tbdatasci commited on
Commit
abea568
·
1 Parent(s): a38acb2

Made the description more fun

Browse files
Files changed (1) hide show
  1. app.py +5 -16
app.py CHANGED
@@ -1,24 +1,13 @@
1
  from transformers import pipeline
2
  import gradio as gr
3
- # import io
4
- # import IPython.display
5
- # from PIL import Image
6
- # import base64
7
 
8
  get_completion = pipeline("image-to-text",model="nlpconnect/vit-gpt2-image-captioning")
9
 
10
- def summarize(input):
11
- output = get_completion(input)
12
- return output[0]['generated_text']
13
-
14
- # def image_to_base64_str(pil_image):
15
- # byte_arr = io.BytesIO()
16
- # pil_image.save(byte_arr, format='PNG')
17
- # byte_arr = byte_arr.getvalue()
18
- # return str(base64.b64encode(byte_arr).decode('utf-8'))
19
 
20
  def captioner(image):
21
- # base64_image = image_to_base64_str(image)
22
  result = get_completion(image)
23
  return result[0]['generated_text']
24
 
@@ -26,8 +15,8 @@ gr.close_all()
26
  demo = gr.Interface(fn=captioner,
27
  inputs=[gr.Image(label="Upload image", type="pil")],
28
  outputs=[gr.Textbox(label="Caption")],
29
- title="Image Captioning with BLIP",
30
- description="Caption any image using the BLIP model",
31
  allow_flagging="never")
32
 
33
  demo.launch()
 
1
  from transformers import pipeline
2
  import gradio as gr
 
 
 
 
3
 
4
  get_completion = pipeline("image-to-text",model="nlpconnect/vit-gpt2-image-captioning")
5
 
6
+ # def summarize(input):
7
+ # output = get_completion(input)
8
+ # return output[0]['generated_text']
 
 
 
 
 
 
9
 
10
  def captioner(image):
 
11
  result = get_completion(image)
12
  return result[0]['generated_text']
13
 
 
15
  demo = gr.Interface(fn=captioner,
16
  inputs=[gr.Image(label="Upload image", type="pil")],
17
  outputs=[gr.Textbox(label="Caption")],
18
+ title="Image Captioning!",
19
+ description="I'll be back... with captions!",
20
  allow_flagging="never")
21
 
22
  demo.launch()