Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -53,7 +53,7 @@ def inference(raw_image, model_n, question, strategy):
|
|
53 |
caption = model.generate(image, sample=False, num_beams=3, max_length=20, min_length=5)
|
54 |
else:
|
55 |
caption = model.generate(image, sample=True, top_p=0.9, max_length=20, min_length=5)
|
56 |
-
return
|
57 |
|
58 |
else:
|
59 |
image_vq = transform_vq(raw_image).unsqueeze(0).to(device)
|
@@ -71,4 +71,13 @@ description = "Gradio demo for BLIP: Bootstrapping Language-Image Pre-training f
|
|
71 |
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2201.12086' target='_blank'>BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation</a> | <a href='https://github.com/salesforce/BLIP' target='_blank'>Github Repo</a></p>"
|
72 |
|
73 |
|
74 |
-
gr.Interface(inference,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
caption = model.generate(image, sample=False, num_beams=3, max_length=20, min_length=5)
|
54 |
else:
|
55 |
caption = model.generate(image, sample=True, top_p=0.9, max_length=20, min_length=5)
|
56 |
+
return caption[0]
|
57 |
|
58 |
else:
|
59 |
image_vq = transform_vq(raw_image).unsqueeze(0).to(device)
|
|
|
71 |
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2201.12086' target='_blank'>BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation</a> | <a href='https://github.com/salesforce/BLIP' target='_blank'>Github Repo</a></p>"
|
72 |
|
73 |
|
74 |
+
gr.Interface(inference,
|
75 |
+
inputs,
|
76 |
+
outputs,
|
77 |
+
title=title,
|
78 |
+
description=description,
|
79 |
+
article=article,
|
80 |
+
examples=[['starrynight.jpeg',"Image Captioning","None","Nucleus sampling"]]
|
81 |
+
allow_flagging='never',
|
82 |
+
theme="default",
|
83 |
+
cache_examples=False).launch(enable_queue=True, debug=True)
|