darshan8950 commited on
Commit
b37e35e
·
1 Parent(s): 3a2facc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
2
  import torch
3
  from PIL import Image
4
  from lavis.models import load_model_and_preprocess
5
- import json
6
 
7
  # Load the Blip-Caption model
8
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@@ -20,13 +20,13 @@ def generate_caption(image_file):
20
  captions = model.generate({"image": image}, use_nucleus_sampling=True, num_captions=5)
21
  res=" "
22
  for i in captions:
23
- res=res+", "+i
24
  return (res)
25
 
26
  # Set up the Gradio interface
27
  inputs = gr.inputs.Image(type="pil",label="Image")
28
  outputs = gr.Textbox(label="Captions")
29
- interface = gr.Interface(fn=generate_caption, inputs=inputs, outputs="text", title="Blip-Caption")
30
 
31
  # Launch the interface
32
  interface.launch(share=True)
 
2
  import torch
3
  from PIL import Image
4
  from lavis.models import load_model_and_preprocess
5
+
6
 
7
  # Load the Blip-Caption model
8
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
20
  captions = model.generate({"image": image}, use_nucleus_sampling=True, num_captions=5)
21
  res=" "
22
  for i in captions:
23
+ res=res+", "+i
24
  return (res)
25
 
26
  # Set up the Gradio interface
27
  inputs = gr.inputs.Image(type="pil",label="Image")
28
  outputs = gr.Textbox(label="Captions")
29
+ interface = gr.Interface(fn=generate_caption, inputs=inputs, outputs=outputs, title="Blip-Caption")
30
 
31
  # Launch the interface
32
  interface.launch(share=True)