Anniek commited on
Commit
c09b2b3
·
1 Parent(s): f4b809f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
2
  from transformers import ViltProcessor, ViltForQuestionAnswering
3
  import torch
4
 
5
- torch.hub.download_url_to_file('https://cocodataset.org/#explore?id=531313', 'dog.jpg')
6
  torch.hub.download_url_to_file('https://cdn.openai.com/dall-e-2/demos/text2im/astronaut/horse/photo/0.jpg', 'astronaut.jpg')
7
 
8
  def getAnswer(query, image):
@@ -21,7 +21,7 @@ def getAnswer(query, image):
21
  image = gr.inputs.Image(type="pil")
22
  question = gr.inputs.Textbox(label="Question about the image")
23
  answer = gr.outputs.Textbox(label="Predicted answer")
24
- examples = [["dog.jpg", "Where is the dog lying?"], ["astronaut.jpg", "What's the astronaut riding on?"]]
25
 
26
 
27
  title="Visual question and answering"
 
2
  from transformers import ViltProcessor, ViltForQuestionAnswering
3
  import torch
4
 
5
+ torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg')
6
  torch.hub.download_url_to_file('https://cdn.openai.com/dall-e-2/demos/text2im/astronaut/horse/photo/0.jpg', 'astronaut.jpg')
7
 
8
  def getAnswer(query, image):
 
21
  image = gr.inputs.Image(type="pil")
22
  question = gr.inputs.Textbox(label="Question about the image")
23
  answer = gr.outputs.Textbox(label="Predicted answer")
24
+ examples = [["cats.jpg", "How many cats are there?"], ["astronaut.jpg", "What's the astronaut riding on?"]]
25
 
26
 
27
  title="Visual question and answering"