Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -1,96 +1,114 @@
|
|
1 |
from transformers import MllamaForConditionalGeneration, AutoProcessor, TextIteratorStreamer
|
2 |
from PIL import Image
|
3 |
-
import requests
|
4 |
import torch
|
5 |
from threading import Thread
|
6 |
import gradio as gr
|
7 |
-
from gradio import FileData
|
8 |
import time
|
9 |
import spaces
|
10 |
import re
|
|
|
11 |
ckpt = "Xkev/Llama-3.2V-11B-cot"
|
12 |
-
model = MllamaForConditionalGeneration.from_pretrained(
|
13 |
-
torch_dtype=torch.bfloat16
|
|
|
14 |
processor = AutoProcessor.from_pretrained(ckpt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
@spaces.GPU
|
17 |
def bot_streaming(message, history, max_new_tokens=250):
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
yield buffer
|
76 |
-
|
77 |
-
|
78 |
-
demo = gr.ChatInterface(fn=bot_streaming, title="LLaVA-CoT",
|
79 |
-
textbox=gr.MultimodalTextbox(),
|
80 |
-
additional_inputs = [gr.Slider(
|
81 |
-
minimum=512,
|
82 |
-
maximum=1024,
|
83 |
-
value=512,
|
84 |
-
step=1,
|
85 |
-
label="Maximum number of new tokens to generate",
|
86 |
-
)
|
87 |
-
],
|
88 |
-
examples=[[{"text": "What is on the flower?", "files": ["./Example1.webp"]},512],
|
89 |
-
[{"text": "How to make this pastry?", "files": ["./Example2.png"]},512]],
|
90 |
-
cache_examples=False,
|
91 |
-
description="Upload an image, and start chatting about it. To learn more about LLaVA-CoT, visit [our GitHub page](https://github.com/PKU-YuanGroup/LLaVA-CoT).",
|
92 |
-
stop_btn="Stop Generation",
|
93 |
-
fill_height=True,
|
94 |
-
multimodal=True)
|
95 |
-
|
96 |
-
demo.launch(debug=True)
|
|
|
1 |
from transformers import MllamaForConditionalGeneration, AutoProcessor, TextIteratorStreamer
|
2 |
from PIL import Image
|
|
|
3 |
import torch
|
4 |
from threading import Thread
|
5 |
import gradio as gr
|
|
|
6 |
import time
|
7 |
import spaces
|
8 |
import re
|
9 |
+
|
10 |
ckpt = "Xkev/Llama-3.2V-11B-cot"
|
11 |
+
model = MllamaForConditionalGeneration.from_pretrained(
|
12 |
+
ckpt, torch_dtype=torch.bfloat16
|
13 |
+
).to("cuda").eval()
|
14 |
processor = AutoProcessor.from_pretrained(ckpt)
|
15 |
+
tokenizer = processor.tokenizer
|
16 |
+
|
17 |
+
def _build_messages_and_images(history, curr_message):
|
18 |
+
messages = []
|
19 |
+
images = []
|
20 |
+
|
21 |
+
for user_msg, assistant_msg in history:
|
22 |
+
user_text = ""
|
23 |
+
user_image = None
|
24 |
+
|
25 |
+
if isinstance(user_msg, dict):
|
26 |
+
user_text = user_msg.get("text") or ""
|
27 |
+
files = user_msg.get("files") or []
|
28 |
+
if files:
|
29 |
+
fp = files[0] if isinstance(files[0], str) else files[0]["path"]
|
30 |
+
user_image = Image.open(fp).convert("RGB")
|
31 |
+
elif isinstance(user_msg, str):
|
32 |
+
user_text = user_msg
|
33 |
+
|
34 |
+
# user
|
35 |
+
content = [{"type": "text", "text": user_text}]
|
36 |
+
if user_image is not None:
|
37 |
+
content.append({"type": "image"})
|
38 |
+
images.append(user_image)
|
39 |
+
messages.append({"role": "user", "content": content})
|
40 |
+
|
41 |
+
# assistant
|
42 |
+
if isinstance(assistant_msg, str):
|
43 |
+
messages.append({"role": "assistant", "content": [{"type": "text", "text": assistant_msg}]})
|
44 |
+
|
45 |
+
curr_text = curr_message.get("text") or ""
|
46 |
+
files = curr_message.get("files") or []
|
47 |
+
content = [{"type": "text", "text": curr_text}]
|
48 |
+
if len(files) >= 1:
|
49 |
+
fp = files[0] if isinstance(files[0], str) else files[0]["path"]
|
50 |
+
img = Image.open(fp).convert("RGB")
|
51 |
+
images.append(img)
|
52 |
+
content.append({"type": "image"})
|
53 |
+
messages.append({"role": "user", "content": content})
|
54 |
+
|
55 |
+
return messages, images
|
56 |
|
57 |
@spaces.GPU
|
58 |
def bot_streaming(message, history, max_new_tokens=250):
|
59 |
+
try:
|
60 |
+
messages, images = _build_messages_and_images(history, message)
|
61 |
+
|
62 |
+
# chat template
|
63 |
+
texts = processor.apply_chat_template(messages, add_generation_prompt=True)
|
64 |
+
|
65 |
+
if images:
|
66 |
+
inputs = processor(text=texts, images=images, return_tensors="pt").to("cuda")
|
67 |
+
else:
|
68 |
+
inputs = processor(text=texts, return_tensors="pt").to("cuda")
|
69 |
+
|
70 |
+
streamer = TextIteratorStreamer(
|
71 |
+
tokenizer, skip_special_tokens=True, skip_prompt=True)
|
72 |
+
|
73 |
+
generation_kwargs = dict(
|
74 |
+
inputs,
|
75 |
+
streamer=streamer,
|
76 |
+
max_new_tokens=int(max_new_tokens),
|
77 |
+
do_sample=True,
|
78 |
+
temperature=0.6,
|
79 |
+
top_p=0.9,
|
80 |
+
)
|
81 |
+
|
82 |
+
thread = Thread(target=model.generate, kwargs=generation_kwargs, daemon=True)
|
83 |
+
thread.start()
|
84 |
+
|
85 |
+
|
86 |
+
for new_text in streamer:
|
87 |
+
safe = re.sub(r"</?(\w+)>", lambda m: m.group(0).replace("<", "\\<").replace(">", "\\>"), new_text)
|
88 |
+
yield safe
|
89 |
+
time.sleep(0.005)
|
90 |
+
|
91 |
+
thread.join(timeout=0.1)
|
92 |
+
|
93 |
+
except Exception as e:
|
94 |
+
yield f"[Error] {type(e).__name__}: {e}"
|
95 |
+
|
96 |
+
demo = gr.ChatInterface(
|
97 |
+
fn=bot_streaming,
|
98 |
+
title="LLaVA-CoT",
|
99 |
+
textbox=gr.MultimodalTextbox(),
|
100 |
+
additional_inputs=[
|
101 |
+
gr.Slider(minimum=64, maximum=1024, value=512, step=1, label="Maximum number of new tokens")
|
102 |
+
],
|
103 |
+
examples=[
|
104 |
+
[{"text": "What is on the flower?", "files": ["./Example1.webp"]}, 512],
|
105 |
+
[{"text": "How to make this pastry?", "files": ["./Example2.png"]}, 512],
|
106 |
+
],
|
107 |
+
cache_examples=False,
|
108 |
+
description="Upload an image, and start chatting about it. To learn more about LLaVA-CoT, visit our GitHub.",
|
109 |
+
stop_btn="Stop Generation",
|
110 |
+
fill_height=True,
|
111 |
+
multimodal=True,
|
112 |
+
)
|
113 |
+
|
114 |
+
demo.launch(debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|