Xkev commited on
Commit
21c3dd2
·
verified ·
1 Parent(s): 32e9904

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +101 -83
app.py CHANGED
@@ -1,96 +1,114 @@
1
  from transformers import MllamaForConditionalGeneration, AutoProcessor, TextIteratorStreamer
2
  from PIL import Image
3
- import requests
4
  import torch
5
  from threading import Thread
6
  import gradio as gr
7
- from gradio import FileData
8
  import time
9
  import spaces
10
  import re
 
11
  ckpt = "Xkev/Llama-3.2V-11B-cot"
12
- model = MllamaForConditionalGeneration.from_pretrained(ckpt,
13
- torch_dtype=torch.bfloat16).to("cuda")
 
14
  processor = AutoProcessor.from_pretrained(ckpt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  @spaces.GPU
17
  def bot_streaming(message, history, max_new_tokens=250):
18
-
19
- txt = message["text"]
20
- ext_buffer = f"{txt}"
21
-
22
- messages= []
23
- images = []
24
-
25
-
26
- for i, msg in enumerate(history):
27
- if isinstance(msg[0], tuple):
28
- messages.append({"role": "user", "content": [{"type": "text", "text": history[i+1][0]}, {"type": "image"}]})
29
- messages.append({"role": "assistant", "content": [{"type": "text", "text": history[i+1][1]}]})
30
- images.append(Image.open(msg[0][0]).convert("RGB"))
31
- elif isinstance(history[i-1], tuple) and isinstance(msg[0], str):
32
- # messages are already handled
33
- pass
34
- elif isinstance(history[i-1][0], str) and isinstance(msg[0], str): # text only turn
35
- messages.append({"role": "user", "content": [{"type": "text", "text": msg[0]}]})
36
- messages.append({"role": "assistant", "content": [{"type": "text", "text": msg[1]}]})
37
-
38
- # add current message
39
- if len(message["files"]) == 1:
40
-
41
- if isinstance(message["files"][0], str): # examples
42
- image = Image.open(message["files"][0]).convert("RGB")
43
- else: # regular input
44
- image = Image.open(message["files"][0]["path"]).convert("RGB")
45
- images.append(image)
46
- messages.append({"role": "user", "content": [{"type": "text", "text": txt}, {"type": "image"}]})
47
- else:
48
- messages.append({"role": "user", "content": [{"type": "text", "text": txt}]})
49
-
50
-
51
- texts = processor.apply_chat_template(messages, add_generation_prompt=True)
52
-
53
- if images == []:
54
- inputs = processor(text=texts, return_tensors="pt").to("cuda")
55
- else:
56
- inputs = processor(text=texts, images=images, return_tensors="pt").to("cuda")
57
-
58
- streamer = TextIteratorStreamer(processor, skip_special_tokens=True, skip_prompt=True)
59
-
60
- generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=max_new_tokens, do_sample=True, temperature=0.6, top_p=0.9)
61
- generated_text = ""
62
-
63
- thread = Thread(target=model.generate, kwargs=generation_kwargs)
64
- thread.start()
65
- buffer = ""
66
-
67
- for new_text in streamer:
68
- buffer += new_text
69
- generated_text_without_prompt = buffer
70
- time.sleep(0.01)
71
-
72
- buffer = re.sub(r"<(\w+)>", r"\<\1\>", buffer)
73
- buffer = re.sub(r"</(\w+)>", r"\</\1\>", buffer)
74
-
75
- yield buffer
76
-
77
-
78
- demo = gr.ChatInterface(fn=bot_streaming, title="LLaVA-CoT",
79
- textbox=gr.MultimodalTextbox(),
80
- additional_inputs = [gr.Slider(
81
- minimum=512,
82
- maximum=1024,
83
- value=512,
84
- step=1,
85
- label="Maximum number of new tokens to generate",
86
- )
87
- ],
88
- examples=[[{"text": "What is on the flower?", "files": ["./Example1.webp"]},512],
89
- [{"text": "How to make this pastry?", "files": ["./Example2.png"]},512]],
90
- cache_examples=False,
91
- description="Upload an image, and start chatting about it. To learn more about LLaVA-CoT, visit [our GitHub page](https://github.com/PKU-YuanGroup/LLaVA-CoT).",
92
- stop_btn="Stop Generation",
93
- fill_height=True,
94
- multimodal=True)
95
-
96
- demo.launch(debug=True)
 
1
  from transformers import MllamaForConditionalGeneration, AutoProcessor, TextIteratorStreamer
2
  from PIL import Image
 
3
  import torch
4
  from threading import Thread
5
  import gradio as gr
 
6
  import time
7
  import spaces
8
  import re
9
+
10
  ckpt = "Xkev/Llama-3.2V-11B-cot"
11
+ model = MllamaForConditionalGeneration.from_pretrained(
12
+ ckpt, torch_dtype=torch.bfloat16
13
+ ).to("cuda").eval()
14
  processor = AutoProcessor.from_pretrained(ckpt)
15
+ tokenizer = processor.tokenizer
16
+
17
+ def _build_messages_and_images(history, curr_message):
18
+ messages = []
19
+ images = []
20
+
21
+ for user_msg, assistant_msg in history:
22
+ user_text = ""
23
+ user_image = None
24
+
25
+ if isinstance(user_msg, dict):
26
+ user_text = user_msg.get("text") or ""
27
+ files = user_msg.get("files") or []
28
+ if files:
29
+ fp = files[0] if isinstance(files[0], str) else files[0]["path"]
30
+ user_image = Image.open(fp).convert("RGB")
31
+ elif isinstance(user_msg, str):
32
+ user_text = user_msg
33
+
34
+ # user
35
+ content = [{"type": "text", "text": user_text}]
36
+ if user_image is not None:
37
+ content.append({"type": "image"})
38
+ images.append(user_image)
39
+ messages.append({"role": "user", "content": content})
40
+
41
+ # assistant
42
+ if isinstance(assistant_msg, str):
43
+ messages.append({"role": "assistant", "content": [{"type": "text", "text": assistant_msg}]})
44
+
45
+ curr_text = curr_message.get("text") or ""
46
+ files = curr_message.get("files") or []
47
+ content = [{"type": "text", "text": curr_text}]
48
+ if len(files) >= 1:
49
+ fp = files[0] if isinstance(files[0], str) else files[0]["path"]
50
+ img = Image.open(fp).convert("RGB")
51
+ images.append(img)
52
+ content.append({"type": "image"})
53
+ messages.append({"role": "user", "content": content})
54
+
55
+ return messages, images
56
 
57
  @spaces.GPU
58
  def bot_streaming(message, history, max_new_tokens=250):
59
+ try:
60
+ messages, images = _build_messages_and_images(history, message)
61
+
62
+ # chat template
63
+ texts = processor.apply_chat_template(messages, add_generation_prompt=True)
64
+
65
+ if images:
66
+ inputs = processor(text=texts, images=images, return_tensors="pt").to("cuda")
67
+ else:
68
+ inputs = processor(text=texts, return_tensors="pt").to("cuda")
69
+
70
+ streamer = TextIteratorStreamer(
71
+ tokenizer, skip_special_tokens=True, skip_prompt=True)
72
+
73
+ generation_kwargs = dict(
74
+ inputs,
75
+ streamer=streamer,
76
+ max_new_tokens=int(max_new_tokens),
77
+ do_sample=True,
78
+ temperature=0.6,
79
+ top_p=0.9,
80
+ )
81
+
82
+ thread = Thread(target=model.generate, kwargs=generation_kwargs, daemon=True)
83
+ thread.start()
84
+
85
+
86
+ for new_text in streamer:
87
+ safe = re.sub(r"</?(\w+)>", lambda m: m.group(0).replace("<", "\\<").replace(">", "\\>"), new_text)
88
+ yield safe
89
+ time.sleep(0.005)
90
+
91
+ thread.join(timeout=0.1)
92
+
93
+ except Exception as e:
94
+ yield f"[Error] {type(e).__name__}: {e}"
95
+
96
+ demo = gr.ChatInterface(
97
+ fn=bot_streaming,
98
+ title="LLaVA-CoT",
99
+ textbox=gr.MultimodalTextbox(),
100
+ additional_inputs=[
101
+ gr.Slider(minimum=64, maximum=1024, value=512, step=1, label="Maximum number of new tokens")
102
+ ],
103
+ examples=[
104
+ [{"text": "What is on the flower?", "files": ["./Example1.webp"]}, 512],
105
+ [{"text": "How to make this pastry?", "files": ["./Example2.png"]}, 512],
106
+ ],
107
+ cache_examples=False,
108
+ description="Upload an image, and start chatting about it. To learn more about LLaVA-CoT, visit our GitHub.",
109
+ stop_btn="Stop Generation",
110
+ fill_height=True,
111
+ multimodal=True,
112
+ )
113
+
114
+ demo.launch(debug=True)