Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -124,7 +124,7 @@ model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
|
124 |
torch_dtype=torch.bfloat16
|
125 |
).to("cuda").eval()
|
126 |
|
127 |
-
@spaces.GPU
|
128 |
def model_inference(input_dict, history):
|
129 |
text = input_dict["text"]
|
130 |
files = input_dict["files"]
|
@@ -181,10 +181,14 @@ def model_inference(input_dict, history):
|
|
181 |
time.sleep(0.01)
|
182 |
yield buffer
|
183 |
|
|
|
|
|
|
|
184 |
|
185 |
demo = gr.ChatInterface(
|
186 |
fn=model_inference,
|
187 |
-
description="#
|
|
|
188 |
textbox=gr.MultimodalTextbox(label="Query Input", file_types=["image"], file_count="multiple"),
|
189 |
stop_btn="Stop Generation",
|
190 |
multimodal=True,
|
@@ -192,3 +196,4 @@ demo = gr.ChatInterface(
|
|
192 |
)
|
193 |
|
194 |
demo.launch(debug=True)
|
|
|
|
124 |
torch_dtype=torch.bfloat16
|
125 |
).to("cuda").eval()
|
126 |
|
127 |
+
@spaces.GPU(duration=200)
|
128 |
def model_inference(input_dict, history):
|
129 |
text = input_dict["text"]
|
130 |
files = input_dict["files"]
|
|
|
181 |
time.sleep(0.01)
|
182 |
yield buffer
|
183 |
|
184 |
+
examples = [
|
185 |
+
[{"text": "Hint: Please answer the question and provide the final answer at the end. Question: Which number do you have to write in the last daisy?", "files": ["5.jpg"]}]
|
186 |
+
]
|
187 |
|
188 |
demo = gr.ChatInterface(
|
189 |
fn=model_inference,
|
190 |
+
description="# **🦖 Fancy-MLLM/R1-OneVision-7B**",
|
191 |
+
examples=examples,
|
192 |
textbox=gr.MultimodalTextbox(label="Query Input", file_types=["image"], file_count="multiple"),
|
193 |
stop_btn="Stop Generation",
|
194 |
multimodal=True,
|
|
|
196 |
)
|
197 |
|
198 |
demo.launch(debug=True)
|
199 |
+
|