Spaces:
Running
Running
feat: switch VQA model
Browse files
app.py
CHANGED
@@ -1,25 +1,32 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import pipeline
|
3 |
|
4 |
-
|
5 |
|
6 |
|
7 |
-
def predict(input_img):
|
8 |
-
|
9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
|
12 |
gradio_app = gr.Interface(
|
13 |
predict,
|
14 |
-
inputs=
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
gr.Label(label="Result", num_top_classes=2),
|
20 |
],
|
21 |
-
|
|
|
22 |
)
|
23 |
|
24 |
if __name__ == "__main__":
|
25 |
-
gradio_app.launch()
|
|
|
1 |
+
import torch
|
2 |
import gradio as gr
|
3 |
+
from transformers import AutoModel, pipeline, AutoTokenizer
|
4 |
|
5 |
+
inference = pipeline(task="visual-question-answering")
|
6 |
|
7 |
|
8 |
+
def predict(input_img, questions):
|
9 |
+
try:
|
10 |
+
predictions = inference(question=questions, image=input_img)
|
11 |
+
return str(predictions[0])
|
12 |
+
except Exception as e:
|
13 |
+
# 捕获异常,并将错误信息转换为字符串
|
14 |
+
error_message = str(e)
|
15 |
+
# 抛出gradio.Error来展示错误弹窗
|
16 |
+
raise gr.Error(error_message, duration=25)
|
17 |
|
18 |
|
19 |
gradio_app = gr.Interface(
|
20 |
predict,
|
21 |
+
inputs=[
|
22 |
+
gr.Image(
|
23 |
+
label="Select A Image", sources=["upload", "webcam"], type="pil"
|
24 |
+
),
|
25 |
+
"text",
|
|
|
26 |
],
|
27 |
+
outputs="text",
|
28 |
+
title="Plz ask my anything",
|
29 |
)
|
30 |
|
31 |
if __name__ == "__main__":
|
32 |
+
gradio_app.launch(show_error=True, debug=True)
|