Spaces:
Sleeping
Sleeping
James Bentley
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -2,10 +2,15 @@ import gradio as gr
|
|
2 |
from transformers import pipeline
|
3 |
from PIL import Image
|
4 |
import requests
|
|
|
5 |
|
6 |
# Initialize the pipeline
|
7 |
pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-large")
|
8 |
|
|
|
|
|
|
|
|
|
9 |
def image_caption(image, text_prompt=None):
|
10 |
# Conditional image captioning if text prompt is provided
|
11 |
if text_prompt:
|
@@ -19,15 +24,10 @@ def image_caption(image, text_prompt=None):
|
|
19 |
caption = processor.decode(out[0], skip_special_tokens=True)
|
20 |
return caption
|
21 |
|
22 |
-
# Initialize processor and model
|
23 |
-
from transformers import BlipProcessor, BlipForConditionalGeneration
|
24 |
-
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
|
25 |
-
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
|
26 |
-
|
27 |
# Define the Gradio interface
|
28 |
-
image_input = gr.
|
29 |
-
text_input = gr.
|
30 |
-
output = gr.
|
31 |
|
32 |
gr.Interface(
|
33 |
fn=image_caption,
|
@@ -35,4 +35,4 @@ gr.Interface(
|
|
35 |
outputs=output,
|
36 |
title="Image Captioning with BLIP",
|
37 |
description="Upload an image and get a generated caption. Optionally, provide a text prompt for conditional captioning."
|
38 |
-
).launch()
|
|
|
2 |
from transformers import pipeline
|
3 |
from PIL import Image
|
4 |
import requests
|
5 |
+
from transformers import BlipProcessor, BlipForConditionalGeneration
|
6 |
|
7 |
# Initialize the pipeline
|
8 |
pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-large")
|
9 |
|
10 |
+
# Initialize processor and model
|
11 |
+
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
|
12 |
+
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
|
13 |
+
|
14 |
def image_caption(image, text_prompt=None):
|
15 |
# Conditional image captioning if text prompt is provided
|
16 |
if text_prompt:
|
|
|
24 |
caption = processor.decode(out[0], skip_special_tokens=True)
|
25 |
return caption
|
26 |
|
|
|
|
|
|
|
|
|
|
|
27 |
# Define the Gradio interface
|
28 |
+
image_input = gr.Image(type="pil", label="Upload an Image")
|
29 |
+
text_input = gr.Textbox(lines=1, placeholder="Optional: Enter text prompt", label="Text Prompt")
|
30 |
+
output = gr.Textbox(label="Generated Caption")
|
31 |
|
32 |
gr.Interface(
|
33 |
fn=image_caption,
|
|
|
35 |
outputs=output,
|
36 |
title="Image Captioning with BLIP",
|
37 |
description="Upload an image and get a generated caption. Optionally, provide a text prompt for conditional captioning."
|
38 |
+
).launch()
|