Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -77,7 +77,7 @@ processor = AutoProcessor.from_pretrained(MODEL_ID_QWEN, trust_remote_code=True)
|
|
77 |
qwen_model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
78 |
MODEL_ID_QWEN,
|
79 |
trust_remote_code=True,
|
80 |
-
torch_dtype=torch.
|
81 |
).to("cuda").eval()
|
82 |
|
83 |
# -----------------------
|
@@ -136,6 +136,8 @@ def model_inference(input_dict, history):
|
|
136 |
return_tensors="pt",
|
137 |
padding=True,
|
138 |
).to("cuda")
|
|
|
|
|
139 |
# Set up streaming generation.
|
140 |
streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
|
141 |
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
|
@@ -155,7 +157,7 @@ def model_inference(input_dict, history):
|
|
155 |
if not files:
|
156 |
# Prepare a simple conversation for text-only input.
|
157 |
conversation = [{"role": "user", "content": text}]
|
158 |
-
#
|
159 |
input_ids = text_tokenizer.apply_chat_template(
|
160 |
conversation, add_generation_prompt=True, return_tensors="pt"
|
161 |
)
|
@@ -216,6 +218,8 @@ def model_inference(input_dict, history):
|
|
216 |
return_tensors="pt",
|
217 |
padding=True,
|
218 |
).to("cuda")
|
|
|
|
|
219 |
streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
|
220 |
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
|
221 |
thread = Thread(target=qwen_model.generate, kwargs=generation_kwargs)
|
@@ -235,6 +239,7 @@ examples = [
|
|
235 |
[{"text": "Tell me a story about a brave knight in a faraway kingdom."}],
|
236 |
[{"text": "@video-infer Explain the content of the Advertisement", "files": ["example_images/videoplayback.mp4"]}],
|
237 |
[{"text": "@video-infer Explain the content of the video in detail", "files": ["example_images/breakfast.mp4"]}],
|
|
|
238 |
|
239 |
]
|
240 |
|
@@ -250,4 +255,4 @@ demo = gr.ChatInterface(
|
|
250 |
)
|
251 |
|
252 |
if __name__ == "__main__":
|
253 |
-
demo.launch(debug=True)
|
|
|
77 |
qwen_model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
78 |
MODEL_ID_QWEN,
|
79 |
trust_remote_code=True,
|
80 |
+
torch_dtype=torch.float16 # Use float16 for more stability
|
81 |
).to("cuda").eval()
|
82 |
|
83 |
# -----------------------
|
|
|
136 |
return_tensors="pt",
|
137 |
padding=True,
|
138 |
).to("cuda")
|
139 |
+
# Clear CUDA cache to reduce potential memory fragmentation.
|
140 |
+
torch.cuda.empty_cache()
|
141 |
# Set up streaming generation.
|
142 |
streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
|
143 |
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
|
|
|
157 |
if not files:
|
158 |
# Prepare a simple conversation for text-only input.
|
159 |
conversation = [{"role": "user", "content": text}]
|
160 |
+
# Use the text tokenizer’s chat template method.
|
161 |
input_ids = text_tokenizer.apply_chat_template(
|
162 |
conversation, add_generation_prompt=True, return_tensors="pt"
|
163 |
)
|
|
|
218 |
return_tensors="pt",
|
219 |
padding=True,
|
220 |
).to("cuda")
|
221 |
+
# Clear CUDA cache before generation.
|
222 |
+
torch.cuda.empty_cache()
|
223 |
streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
|
224 |
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
|
225 |
thread = Thread(target=qwen_model.generate, kwargs=generation_kwargs)
|
|
|
239 |
[{"text": "Tell me a story about a brave knight in a faraway kingdom."}],
|
240 |
[{"text": "@video-infer Explain the content of the Advertisement", "files": ["example_images/videoplayback.mp4"]}],
|
241 |
[{"text": "@video-infer Explain the content of the video in detail", "files": ["example_images/breakfast.mp4"]}],
|
242 |
+
[{"text": "@video-infer Explain the content of the video.", "files": ["example_images/sky.mp4"]}],
|
243 |
|
244 |
]
|
245 |
|
|
|
255 |
)
|
256 |
|
257 |
if __name__ == "__main__":
|
258 |
+
demo.launch(share=True, debug=True)
|