File size: 1,495 Bytes
4dca880
 
 
 
76a3399
4dca880
76a3399
4dca880
 
 
 
 
 
 
 
 
 
 
 
76a3399
4dca880
 
 
 
 
76a3399
4dca880
 
 
 
76a3399
4dca880
76a3399
4dca880
 
76a3399
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import requests
from PIL import Image
from transformers import BlipProcessor, BlipForConditionalGeneration
import gradio as gr

processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")

def generate_caption(image, caption_type, text):
    raw_image = Image.fromarray(image.astype('uint8'), 'RGB')
    
    if caption_type == "Conditional":
        caption = conditional_image_captioning(raw_image, text)
    else:
        caption = unconditional_image_captioning(raw_image)
    
    return caption

def conditional_image_captioning(raw_image, text):
    inputs = processor(raw_image, text, return_tensors="pt")
    out = model.generate(**inputs)
    caption = processor.decode(out[0], skip_special_tokens=True)
    return caption

def unconditional_image_captioning(raw_image):
    inputs = processor(raw_image, return_tensors="pt")
    out = model.generate(**inputs)
    caption = processor.decode(out[0], skip_special_tokens=True)
    return caption

input_image = gr.inputs.Image(label="Upload an Image")
input_text = gr.inputs.Textbox(label="Enter Text (for Conditional Captioning)")
radio_button = gr.inputs.Radio(choices=["Conditional", "Unconditional"], label="Captioning Type")
output_text = gr.outputs.Textbox(label="Caption")

gr.Interface(fn=generate_caption, inputs=[input_image, radio_button, input_text], outputs=output_text, title="Image Captioning").launch()