ybelkada commited on
Commit
686f14a
·
1 Parent(s): 5b6e7e0

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +28 -0
  2. requirements.txt +1 -0
app.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import requests
3
+ import gradio as gr
4
+
5
+ from transformers import BlipProcessor, BlipForConditionalGeneration
6
+
7
+ model_id = "Salesforce/blip-image-captioning-base"
8
+
9
+ model = BlipForConditionalGeneration.from_pretrained(model_id)
10
+ processor = BlipProcessor.from_pretrained(model_id)
11
+
12
+ def parse_input(input):
13
+ # this is a weblink
14
+ if isinstance(input, str):
15
+ image = Image.open(requests.get(input, stream=True).raw).convert('RGB')
16
+ # otherwise it's an image
17
+ else:
18
+ image = Image.open(input).convert("RGB")
19
+
20
+ return image
21
+
22
+ def launch(image):
23
+ inputs = processor(image, return_tensors="pt")
24
+ out = model.generate(**inputs)
25
+ return processor.decode(out[0], skip_special_tokens=True)
26
+
27
+ iface = gr.Interface(launch, inputs="text", outputs="text")
28
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ git+https://github.com/younesbelkada/transformers.git@add-blip