runninglsy commited on
Commit
7a6fa0f
·
1 Parent(s): 899dc41
README.md CHANGED
@@ -8,6 +8,7 @@ sdk_version: 5.22.0
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
 
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
11
+ short_description: See, read, and reason—better together————with much lighter VRAM usage
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess
2
+ subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
3
+ subprocess.run('pip install gptqmodel', shell=True)
4
+ subprocess.run('pip install numpy==1.25.0', shell=True)
5
+
6
+ import spaces
7
+ import os
8
+ import re
9
+ import logging
10
+ from typing import List, Any
11
+ from threading import Thread
12
+
13
+ import torch
14
+ import gradio as gr
15
+ from transformers import AutoModelForCausalLM, TextIteratorStreamer, GenerationConfig
16
+ from moviepy.editor import VideoFileClip
17
+ from PIL import Image
18
+ from gptqmodel import GPTQModel
19
+
20
+ model_name = 'AIDC-AI/Ovis2-34B-GPTQ-Int4'
21
+
22
+ use_thread = False
23
+
24
+ IMAGE_MAX_PARTITION = 16
25
+
26
+ VIDEO_FRAME_NUMS = 32
27
+ VIDEO_MAX_PARTITION = 1
28
+
29
+
30
+ model = GPTQModel.load(model_name, device='cuda', trust_remote_code=True)
31
+ model.model.generation_config = GenerationConfig.from_pretrained(model_name)
32
+
33
+ text_tokenizer = model.get_text_tokenizer()
34
+ visual_tokenizer = model.get_visual_tokenizer()
35
+ streamer = TextIteratorStreamer(text_tokenizer, skip_prompt=True, skip_special_tokens=True)
36
+ image_placeholder = '<image>'
37
+ cur_dir = os.path.dirname(os.path.abspath(__file__))
38
+
39
+ logging.getLogger("httpx").setLevel(logging.WARNING)
40
+ logging.basicConfig(level=logging.INFO)
41
+ logger = logging.getLogger(__name__)
42
+
43
+ def initialize_gen_kwargs():
44
+ return {
45
+ "max_new_tokens": 1536,
46
+ "do_sample": False,
47
+ "top_p": None,
48
+ "top_k": None,
49
+ "temperature": None,
50
+ "repetition_penalty": 1.05,
51
+ "eos_token_id": model.generation_config.eos_token_id,
52
+ "pad_token_id": text_tokenizer.pad_token_id,
53
+ "use_cache": True
54
+ }
55
+
56
+ def submit_chat(chatbot, text_input):
57
+ response = ''
58
+ chatbot.append((text_input, response))
59
+ return chatbot ,''
60
+
61
+ @spaces.GPU
62
+ def ovis_chat(chatbot: List[List[str]], image_input: Any, video_input: Any):
63
+ conversations, model_inputs = prepare_inputs(chatbot, image_input, video_input)
64
+ gen_kwargs = initialize_gen_kwargs()
65
+
66
+ with torch.inference_mode():
67
+ generate_func = lambda: model.generate(**model_inputs, **gen_kwargs, streamer=streamer)
68
+
69
+ if use_thread:
70
+ thread = Thread(target=generate_func)
71
+ thread.start()
72
+ else:
73
+ generate_func()
74
+
75
+ response = ""
76
+ for new_text in streamer:
77
+ response += new_text
78
+ chatbot[-1][1] = response
79
+ yield chatbot
80
+
81
+ if use_thread:
82
+ thread.join()
83
+
84
+ log_conversation(chatbot)
85
+
86
+
87
+ def prepare_inputs(chatbot: List[List[str]], image_input: Any, video_input: Any):
88
+ # conversations = [{
89
+ # "from": "system",
90
+ # "value": "You are a helpful assistant, and your task is to provide reliable and structured responses to users."
91
+ # }]
92
+ conversations= []
93
+
94
+ for query, response in chatbot[:-1]:
95
+ conversations.extend([
96
+ {"from": "human", "value": query},
97
+ {"from": "gpt", "value": response}
98
+ ])
99
+
100
+ last_query = chatbot[-1][0].replace(image_placeholder, '')
101
+ conversations.append({"from": "human", "value": last_query})
102
+
103
+ max_partition = IMAGE_MAX_PARTITION
104
+
105
+ if image_input is not None:
106
+ for conv in conversations:
107
+ if conv["from"] == "human":
108
+ conv["value"] = f'{image_placeholder}\n{conv["value"]}'
109
+ break
110
+ max_partition = IMAGE_MAX_PARTITION
111
+ image_input = [image_input]
112
+
113
+ if video_input is not None:
114
+ for conv in conversations:
115
+ if conv["from"] == "human":
116
+ conv["value"] = f'{image_placeholder}\n' * VIDEO_FRAME_NUMS + f'{conv["value"]}'
117
+ break
118
+ # extract video frames here
119
+ with VideoFileClip(video_input) as clip:
120
+ total_frames = int(clip.fps * clip.duration)
121
+ if total_frames <= VIDEO_FRAME_NUMS:
122
+ sampled_indices = range(total_frames)
123
+ else:
124
+ stride = total_frames / VIDEO_FRAME_NUMS
125
+ sampled_indices = [min(total_frames - 1, int((stride * i + stride * (i + 1)) / 2)) for i in range(VIDEO_FRAME_NUMS)]
126
+ frames = [clip.get_frame(index / clip.fps) for index in sampled_indices]
127
+ frames = [Image.fromarray(frame, mode='RGB') for frame in frames]
128
+ image_input = frames
129
+ max_partition = VIDEO_MAX_PARTITION
130
+
131
+ logger.info(conversations)
132
+
133
+ prompt, input_ids, pixel_values = model.preprocess_inputs(conversations, image_input, max_partition=max_partition)
134
+ attention_mask = torch.ne(input_ids, text_tokenizer.pad_token_id)
135
+
136
+ model_inputs = {
137
+ "inputs": input_ids.unsqueeze(0).to(device=model.device),
138
+ "attention_mask": attention_mask.unsqueeze(0).to(device=model.device),
139
+ "pixel_values": [pixel_values.to(dtype=visual_tokenizer.dtype, device=visual_tokenizer.device)] if image_input is not None else [None]
140
+ }
141
+
142
+ return conversations, model_inputs
143
+
144
+ def log_conversation(chatbot):
145
+ logger.info("[OVIS_CONV_START]")
146
+ [print(f'Q{i}:\n {request}\nA{i}:\n {answer}') for i, (request, answer) in enumerate(chatbot, 1)]
147
+ logger.info("[OVIS_CONV_END]")
148
+
149
+ def clear_chat():
150
+ return [], None, "", None
151
+
152
+ with open(f"{cur_dir}/resource/logo.svg", "r", encoding="utf-8") as svg_file:
153
+ svg_content = svg_file.read()
154
+ font_size = "2.5em"
155
+ svg_content = re.sub(r'(<svg[^>]*)(>)', rf'\1 height="{font_size}" style="vertical-align: middle; display: inline-block;"\2', svg_content)
156
+ html = f"""
157
+ <p align="center" style="font-size: {font_size}; line-height: 1;">
158
+ <span style="display: inline-block; vertical-align: middle;">{svg_content}</span>
159
+ <span style="display: inline-block; vertical-align: middle;">{model_name.split('/')[-1]}</span>
160
+ </p>
161
+ <center><font size=3><b>Ovis</b> has been open-sourced on <a href='https://huggingface.co/{model_name}'>😊 Huggingface</a> and <a href='https://github.com/AIDC-AI/Ovis'>🌟 GitHub</a>. If you find Ovis useful, a like❤️ or a star🌟 would be appreciated.</font></center>
162
+ """
163
+
164
+ latex_delimiters_set = [{
165
+ "left": "\\(",
166
+ "right": "\\)",
167
+ "display": False
168
+ }, {
169
+ "left": "\\begin{equation}",
170
+ "right": "\\end{equation}",
171
+ "display": True
172
+ }, {
173
+ "left": "\\begin{align}",
174
+ "right": "\\end{align}",
175
+ "display": True
176
+ }, {
177
+ "left": "\\begin{alignat}",
178
+ "right": "\\end{alignat}",
179
+ "display": True
180
+ }, {
181
+ "left": "\\begin{gather}",
182
+ "right": "\\end{gather}",
183
+ "display": True
184
+ }, {
185
+ "left": "\\begin{CD}",
186
+ "right": "\\end{CD}",
187
+ "display": True
188
+ }, {
189
+ "left": "\\[",
190
+ "right": "\\]",
191
+ "display": True
192
+ }]
193
+
194
+ text_input = gr.Textbox(label="prompt", placeholder="Enter your text here...", lines=1, container=False)
195
+ with gr.Blocks(title=model_name.split('/')[-1], theme=gr.themes.Ocean()) as demo:
196
+ gr.HTML(html)
197
+ with gr.Row():
198
+ with gr.Column(scale=3):
199
+ input_type = gr.Radio(choices=["image + prompt", "video + prompt"], label="Select input type:", value="image + prompt", elem_classes="my_radio")
200
+
201
+ image_input = gr.Image(label="image", height=350, type="pil", visible=True)
202
+ video_input = gr.Video(label="video", height=350, format='mp4', visible=False)
203
+ with gr.Column(visible=True) as image_examples_col:
204
+ image_examples = gr.Examples(
205
+ examples=[
206
+ [f"{cur_dir}/examples/ovis2_math0.jpg", "Each face of the polyhedron shown is either a triangle or a square. Each square borders 4 triangles, and each triangle borders 3 squares. The polyhedron has 6 squares. How many triangles does it have?\n\nProvide a step-by-step solution to the problem, and conclude with 'the answer is' followed by the final solution."],
207
+ [f"{cur_dir}/examples/ovis2_math1.jpg", "A large square touches another two squares, as shown in the picture. The numbers inside the smaller squares indicate their areas. What is the area of the largest square?\n\nProvide a step-by-step solution to the problem, and conclude with 'the answer is' followed by the final solution."],
208
+ [f"{cur_dir}/examples/ovis2_figure0.png", "Explain this model."],
209
+ [f"{cur_dir}/examples/ovis2_figure1.png", "Organize the notes about GRPO in the figure."],
210
+ [f"{cur_dir}/examples/ovis2_multi0.jpg", "Posso avere un frappuccino e un caffè americano di taglia M? Quanto costa in totale?"],
211
+ ],
212
+ inputs=[image_input, text_input]
213
+ )
214
+
215
+ def update_visibility_on_example(video_input, text_input):
216
+ return (gr.update(visible=True), text_input)
217
+
218
+ with gr.Column(visible=False) as video_examples_col:
219
+ video_examples = gr.Examples(
220
+ examples=[
221
+ [f"{cur_dir}/examples/video_demo_1.mp4", "Describe the video."]
222
+ ],
223
+ inputs=[video_input, text_input],
224
+ fn = update_visibility_on_example,
225
+ run_on_click = True,
226
+ outputs=[video_input, text_input]
227
+ )
228
+
229
+ with gr.Column(scale=7):
230
+ chatbot = gr.Chatbot(label="Ovis", layout="panel", height=600, show_copy_button=True, latex_delimiters=latex_delimiters_set)
231
+ text_input.render()
232
+ with gr.Row():
233
+ send_btn = gr.Button("Send", variant="primary")
234
+ clear_btn = gr.Button("Clear", variant="secondary")
235
+
236
+ def update_input_and_clear(selected):
237
+ if selected == "image + prompt":
238
+ visibility_updates = (gr.update(visible=True), gr.update(visible=False),
239
+ gr.update(visible=True), gr.update(visible=False))
240
+ else:
241
+ visibility_updates = (gr.update(visible=False), gr.update(visible=True),
242
+ gr.update(visible=False), gr.update(visible=True))
243
+ clear_chat_outputs = clear_chat()
244
+ return visibility_updates + clear_chat_outputs
245
+
246
+ input_type.change(fn=update_input_and_clear, inputs=input_type,
247
+ outputs=[image_input, video_input, image_examples_col, video_examples_col, chatbot, image_input, text_input, video_input])
248
+
249
+ send_click_event = send_btn.click(submit_chat, [chatbot, text_input], [chatbot, text_input]).then(ovis_chat,[chatbot, image_input, video_input],chatbot)
250
+ submit_event = text_input.submit(submit_chat, [chatbot, text_input], [chatbot, text_input]).then(ovis_chat,[chatbot, image_input, video_input],chatbot)
251
+ clear_btn.click(clear_chat, outputs=[chatbot, image_input, text_input, video_input])
252
+
253
+ demo.launch()
examples/ovis2_figure0.png ADDED

Git LFS Details

  • SHA256: 80bebf1106831041eaa9baef86d12d443360d5f4e5dd37795d841658853b44fc
  • Pointer size: 132 Bytes
  • Size of remote file: 2.84 MB
examples/ovis2_figure1.png ADDED

Git LFS Details

  • SHA256: af401830ffa31eac748766c49cc678124f859aa5336c38c94b3586fda0e6240c
  • Pointer size: 131 Bytes
  • Size of remote file: 278 kB
examples/ovis2_math0.jpg ADDED

Git LFS Details

  • SHA256: e9feb598f783b0103888fa6db1dea23045e9245d8417895623f8408b783c062e
  • Pointer size: 129 Bytes
  • Size of remote file: 7.46 kB
examples/ovis2_math1.jpg ADDED

Git LFS Details

  • SHA256: d8a7dc778bae422f40e37ecd6e23e99a08be5d1c81b5d92530d4572bc6d8e2b4
  • Pointer size: 129 Bytes
  • Size of remote file: 6.39 kB
examples/ovis2_multi0.jpg ADDED

Git LFS Details

  • SHA256: 66f1f86d24b0f334f039165ebd1ec3e83cefcf7b8bea87e9ec2d42a09c1f84e5
  • Pointer size: 132 Bytes
  • Size of remote file: 3.41 MB
examples/video_demo_1.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b66efe7ca078676d752e04f3388d60a43a1e9416731dfcb1cb52bbc69bd76af4
3
+ size 5816813
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ numpy==1.25.0
2
+ torch==2.4.0
3
+ transformers==4.46.2
4
+ pillow==10.3.0
5
+ moviepy==1.0.3
resource/logo.svg ADDED