A-Celsius commited on
Commit
2ea83a2
·
1 Parent(s): 8a9c878

Upload Assignment_1.py

Browse files
Files changed (1) hide show
  1. Assignment_1.py +48 -0
Assignment_1.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ from transformers import BlipProcessor, BlipForConditionalGeneration
3
+ import torch
4
+ import gradio as gr
5
+
6
+ model_name = "Salesforce/blip-image-captioning-base"
7
+
8
+ caption_processor = BlipProcessor.from_pretrained(model_name)
9
+ model = BlipForConditionalGeneration.from_pretrained(model_name)
10
+
11
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
12
+ model.to(device)
13
+
14
+ def generate_captions(image, num_captions=5,size=(512, 512)):
15
+ image = image.resize(size)
16
+ if image.mode != 'RGB':
17
+ image = image.convert('RGB')
18
+ pixel_values = caption_processor(image, return_tensors='pt').to(device)
19
+
20
+ caption_ids = model.generate(
21
+ **pixel_values,
22
+ max_length=30,
23
+ num_beams=5,
24
+ num_return_sequences=num_captions,
25
+ temperature=1.0
26
+ )
27
+
28
+ captions = [
29
+ caption_processor.decode(ids, skip_special_tokens=True)
30
+ for ids in caption_ids
31
+ ]
32
+
33
+ return captions
34
+
35
+ from gradio.components import Image, Textbox,Slider
36
+
37
+ interface = gr.Interface(
38
+ fn=generate_captions,
39
+ inputs=[
40
+ Image(type="pil", label="Input Image"),
41
+ Slider(minimum=1, maximum=5, step=1, label="Number of Captions")
42
+ ],
43
+ outputs=Textbox(type="text", label="Captions"),
44
+ title="Assignment 1",
45
+ description="AI tool that creates captions based on the image provided by the user.",
46
+ )
47
+
48
+ interface.launch(share=True)