|
import gradio as gr |
|
from transformers import ViltProcessor, ViltForQuestionAnswering |
|
import torch |
|
|
|
torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg') |
|
torch.hub.download_url_to_file('https://cdn.openai.com/dall-e-2/demos/text2im/astronaut/horse/photo/0.jpg', 'astronaut.jpg') |
|
|
|
processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-vqa") |
|
model = ViltForQuestionAnswering.from_pretrained("dandelin/vilt-b32-finetuned-vqa") |
|
|
|
def getAnswer(image,text): |
|
encoding = processor(image, text, return_tensors="pt") |
|
|
|
|
|
with torch.no_grad(): |
|
outputs = model(**encoding) |
|
|
|
logits = outputs.logits |
|
idx = logits.argmax(-1).item() |
|
predicted_answer = model.config.id2label[idx] |
|
|
|
return predicted_answer |
|
|
|
image = gr.inputs.Image(type="pil") |
|
question = gr.inputs.Textbox(label="Question about the image") |
|
answer = gr.outputs.Textbox(label="Predicted answer") |
|
examples = [["cats.jpg", "How many cats are there?"], ["astronaut.jpg", "What's the astronaut riding on?"]] |
|
|
|
|
|
title="Visual question and answering" |
|
|
|
iface = gr.Interface(fn=getAnswer, |
|
inputs=[image, question], |
|
outputs=answer, |
|
examples=examples, |
|
title=title, |
|
enable_queue=True) |
|
iface.launch(debug=True ) |
|
|