Deepakraj2006 commited on
Commit
f619ca0
·
verified ·
1 Parent(s): b6d53ef

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -27
app.py CHANGED
@@ -1,28 +1,28 @@
1
- import gradio as gr
2
- from transformers import BlipProcessor, BlipForConditionalGeneration
3
- from PIL import Image
4
- processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
5
- model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
6
- def generate_caption(image):
7
- # Now directly using the PIL Image object
8
- inputs = processor(images=image, return_tensors="pt")
9
- outputs = model.generate(**inputs)
10
- caption = processor.decode(outputs[0], skip_special_tokens=True)
11
- return caption
12
- def caption_image(image):
13
- """
14
- Takes a PIL Image input and returns a caption.
15
- """
16
- try:
17
- caption = generate_caption(image)
18
- return caption
19
- except Exception as e:
20
- return f"An error occurred: {str(e)}"
21
- iface = gr.Interface(
22
- fn=caption_image,
23
- inputs=gr.Image(type="pil"),
24
- outputs="text",
25
- title="Image Captioning with BLIP",
26
- description="Upload an image to generate a caption."
27
- )
28
  iface.launch(share=True)
 
1
+ import gradio as gr
2
+ from transformers import BlipProcessor, BlipForConditionalGeneration
3
+ from PIL import Image
4
+ processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
5
+ model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
6
+ def generate_caption(image):
7
+ # Now directly using the PIL Image object
8
+ inputs = processor(images=image, return_tensors="pt")
9
+ outputs = model.generate(**inputs)
10
+ caption = processor.decode(outputs[0], skip_special_tokens=True)
11
+ return caption
12
+ def caption_image(image):
13
+ """
14
+ Takes a PIL Image input and returns a caption.
15
+ """
16
+ try:
17
+ caption = generate_caption(image)
18
+ return caption
19
+ except Exception as e:
20
+ return f"An error occurred: {str(e)}"
21
+ iface = gr.Interface(
22
+ fn=caption_image,
23
+ inputs=gr.Image(type="pil"),
24
+ outputs="text",
25
+ title="Image Captioning with BLIP",
26
+ description="Upload an image to generate a caption."
27
+ )
28
  iface.launch(share=True)