Spaces:
Running
Running
Irpan
commited on
Commit
·
b063473
1
Parent(s):
13e41f6
app.py
CHANGED
@@ -1,11 +1,15 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import ViltProcessor, ViltForQuestionAnswering
|
3 |
import torch
|
|
|
|
|
|
|
|
|
4 |
from googletrans import Translator
|
5 |
from googletrans import LANGCODES
|
6 |
import re
|
7 |
|
8 |
-
torch.hub.download_url_to_file('
|
9 |
|
10 |
processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-vqa")
|
11 |
model = ViltForQuestionAnswering.from_pretrained("dandelin/vilt-b32-finetuned-vqa")
|
@@ -77,7 +81,7 @@ def main(image, text):
|
|
77 |
image = gr.inputs.Image(type="pil")
|
78 |
question = gr.inputs.Textbox(label="Question")
|
79 |
answer = gr.outputs.Textbox(label="Predicted answer")
|
80 |
-
examples = [["
|
81 |
|
82 |
title = "Cross-lingual VQA"
|
83 |
description = "ViLT (Vision and Language Transformer), fine-tuned on VQAv2 "
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import ViltProcessor, ViltForQuestionAnswering
|
3 |
import torch
|
4 |
+
|
5 |
+
import httpcore
|
6 |
+
setattr(httpcore, 'SyncHTTPTransport', 'AsyncHTTPProxy')
|
7 |
+
|
8 |
from googletrans import Translator
|
9 |
from googletrans import LANGCODES
|
10 |
import re
|
11 |
|
12 |
+
torch.hub.download_url_to_file('https://media.istockphoto.com/id/1174602891/photo/two-monkeys-mom-and-cub-eat-bananas.jpg?s=612x612&w=0&k=20&c=r7VXi9d1wHhyq3iAk9D2Z3yTZiOJMlLNtjdVRBEjG7g=', 'monkeys.jpg')
|
13 |
|
14 |
processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-vqa")
|
15 |
model = ViltForQuestionAnswering.from_pretrained("dandelin/vilt-b32-finetuned-vqa")
|
|
|
81 |
image = gr.inputs.Image(type="pil")
|
82 |
question = gr.inputs.Textbox(label="Question")
|
83 |
answer = gr.outputs.Textbox(label="Predicted answer")
|
84 |
+
examples = [["monkeys.jpg", "How many monkeys are there, in French?"]]
|
85 |
|
86 |
title = "Cross-lingual VQA"
|
87 |
description = "ViLT (Vision and Language Transformer), fine-tuned on VQAv2 "
|