Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,7 @@ import torch
|
|
7 |
import spaces
|
8 |
import subprocess
|
9 |
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
|
|
10 |
|
11 |
from io import BytesIO
|
12 |
|
@@ -16,114 +17,68 @@ model = AutoModelForImageTextToText.from_pretrained("HuggingFaceTB/SmolVLM2-2.2B
|
|
16 |
torch_dtype=torch.bfloat16).to("cuda:0")
|
17 |
|
18 |
@spaces.GPU
|
19 |
-
def model_inference(
|
20 |
-
input_dict, history, max_tokens
|
21 |
-
):
|
22 |
text = input_dict["text"]
|
23 |
-
images = []
|
24 |
-
user_content = []
|
25 |
media_queue = []
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
media_queue.append({"type": "video", "path": file})
|
34 |
-
|
35 |
-
if "<image>" in text or "<video>" in text:
|
36 |
-
parts = re.split(r'(<image>|<video>)', text)
|
37 |
-
for part in parts:
|
38 |
-
if part == "<image>" and media_queue:
|
39 |
-
user_content.append(media_queue.pop(0))
|
40 |
-
elif part == "<video>" and media_queue:
|
41 |
-
user_content.append(media_queue.pop(0))
|
42 |
-
elif part.strip():
|
43 |
-
user_content.append({"type": "text", "text": part.strip()})
|
44 |
-
else:
|
45 |
-
user_content.append({"type": "text", "text": text})
|
46 |
-
|
47 |
-
for media in media_queue:
|
48 |
-
user_content.append(media)
|
49 |
-
|
50 |
-
resulting_messages = [{"role": "user", "content": user_content}]
|
51 |
-
|
52 |
-
elif len(history) > 0:
|
53 |
-
resulting_messages = []
|
54 |
-
user_content = []
|
55 |
-
media_queue = []
|
56 |
-
for hist in history:
|
57 |
-
if hist["role"] == "user" and isinstance(hist["content"], tuple):
|
58 |
-
file_name = hist["content"][0]
|
59 |
-
if file_name.endswith((".png", ".jpg", ".jpeg")):
|
60 |
-
media_queue.append({"type": "image", "path": file_name})
|
61 |
-
elif file_name.endswith(".mp4"):
|
62 |
-
media_queue.append({"type": "video", "path": file_name})
|
63 |
-
|
64 |
-
for hist in history:
|
65 |
-
if hist["role"] == "user" and isinstance(hist["content"], str):
|
66 |
-
text = hist["content"]
|
67 |
-
parts = re.split(r'(<image>|<video>)', text)
|
68 |
-
|
69 |
-
for part in parts:
|
70 |
-
if part == "<image>" and media_queue:
|
71 |
-
user_content.append(media_queue.pop(0))
|
72 |
-
elif part == "<video>" and media_queue:
|
73 |
-
user_content.append(media_queue.pop(0))
|
74 |
-
elif part.strip():
|
75 |
-
user_content.append({"type": "text", "text": part.strip()})
|
76 |
-
|
77 |
-
elif hist["role"] == "assistant":
|
78 |
-
resulting_messages.append({
|
79 |
-
"role": "user",
|
80 |
-
"content": user_content
|
81 |
-
})
|
82 |
-
resulting_messages.append({
|
83 |
-
"role": "assistant",
|
84 |
-
"content": [{"type": "text", "text": hist["content"]}]
|
85 |
-
})
|
86 |
-
user_content = []
|
87 |
|
88 |
-
if text
|
89 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
|
91 |
-
|
92 |
-
|
93 |
-
|
|
|
|
|
94 |
inputs = processor.apply_chat_template(
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
)
|
101 |
-
|
102 |
-
inputs = inputs.to(model.device)
|
103 |
|
104 |
-
# Generate
|
105 |
streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
|
106 |
generation_args = dict(inputs, streamer=streamer, max_new_tokens=max_tokens)
|
107 |
-
|
108 |
-
|
109 |
thread = Thread(target=model.generate, kwargs=generation_args)
|
110 |
thread.start()
|
111 |
-
|
112 |
-
yield "..."
|
113 |
buffer = ""
|
114 |
|
115 |
for new_text in streamer:
|
116 |
buffer += new_text
|
117 |
-
generated_text_without_prompt = buffer
|
118 |
time.sleep(0.01)
|
119 |
yield buffer
|
120 |
|
121 |
-
demo = gr.ChatInterface(fn=model_inference, title="SmolVLM2: The Smollest Video Model Ever 📺",
|
122 |
-
description="Play with [SmolVLM2-2.2B-Instruct](https://huggingface.co/HuggingFaceTB/SmolVLM2-2.2B-Instruct) in this demo. To get started, upload an image and text. This demo doesn't use history for the chat, so every chat you start is a new conversation.",
|
123 |
-
textbox=gr.MultimodalTextbox(label="Query Input", file_types=["image", ".mp4"], file_count="multiple"), stop_btn="Stop Generation", multimodal=True,
|
124 |
-
cache_examples=False,
|
125 |
-
additional_inputs=[gr.Slider(minimum=100, maximum=500, step=50, value=200, label="Max Tokens")],
|
126 |
-
type="messages"
|
127 |
-
)
|
128 |
|
129 |
-
demo.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
import spaces
|
8 |
import subprocess
|
9 |
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
10 |
+
subprocess.run("pip install av", shell=True) # Install pyav for video processing
|
11 |
|
12 |
from io import BytesIO
|
13 |
|
|
|
17 |
torch_dtype=torch.bfloat16).to("cuda:0")
|
18 |
|
19 |
@spaces.GPU
|
20 |
+
def model_inference(input_dict, history, max_tokens):
|
|
|
|
|
21 |
text = input_dict["text"]
|
|
|
|
|
22 |
media_queue = []
|
23 |
+
user_content = []
|
24 |
+
|
25 |
+
for file in input_dict.get("files", []):
|
26 |
+
if file.endswith((".png", ".jpg", ".jpeg", ".gif", ".bmp")):
|
27 |
+
media_queue.append({"type": "image", "path": file})
|
28 |
+
elif file.endswith((".mp4", ".mov", ".avi", ".mkv", ".flv")):
|
29 |
+
media_queue.append({"type": "video", "path": file})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
+
if "<image>" in text or "<video>" in text:
|
32 |
+
parts = re.split(r'(<image>|<video>)', text)
|
33 |
+
for part in parts:
|
34 |
+
if part == "<image>" and media_queue:
|
35 |
+
user_content.append(media_queue.pop(0))
|
36 |
+
elif part == "<video>" and media_queue:
|
37 |
+
user_content.append(media_queue.pop(0))
|
38 |
+
elif part.strip():
|
39 |
+
user_content.append({"type": "text", "text": part.strip()})
|
40 |
+
else:
|
41 |
+
user_content.append({"type": "text", "text": text})
|
42 |
+
user_content.extend(media_queue)
|
43 |
|
44 |
+
resulting_messages = [{"role": "user", "content": user_content}]
|
45 |
+
|
46 |
+
if not text and not media_queue:
|
47 |
+
return "Please provide text and/or media input."
|
48 |
+
|
49 |
inputs = processor.apply_chat_template(
|
50 |
+
resulting_messages,
|
51 |
+
add_generation_prompt=True,
|
52 |
+
tokenize=True,
|
53 |
+
return_dict=True,
|
54 |
+
return_tensors="pt",
|
55 |
+
).to(model.device)
|
|
|
|
|
56 |
|
|
|
57 |
streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
|
58 |
generation_args = dict(inputs, streamer=streamer, max_new_tokens=max_tokens)
|
59 |
+
|
|
|
60 |
thread = Thread(target=model.generate, kwargs=generation_args)
|
61 |
thread.start()
|
62 |
+
|
63 |
+
yield "Generating response..."
|
64 |
buffer = ""
|
65 |
|
66 |
for new_text in streamer:
|
67 |
buffer += new_text
|
|
|
68 |
time.sleep(0.01)
|
69 |
yield buffer
|
70 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
+
demo = gr.ChatInterface(
|
73 |
+
fn=model_inference,
|
74 |
+
title="SmolVLM2: The Smallest Video Model Ever 📺",
|
75 |
+
description="Play with SmolVLM2-2.2B-Instruct. Upload an image or video and ask a question.",
|
76 |
+
textbox=gr.MultimodalTextbox(label="Query Input", file_types=["image", ".mp4"], file_count="multiple"),
|
77 |
+
stop_btn="Stop Generation",
|
78 |
+
multimodal=True,
|
79 |
+
cache_examples=False,
|
80 |
+
additional_inputs=[gr.Slider(minimum=100, maximum=500, step=50, value=200, label="Max Tokens")],
|
81 |
+
type="messages"
|
82 |
+
)
|
83 |
+
|
84 |
+
demo.launch(share=True, debug=True)
|