emigomez commited on
Commit
829b587
Β·
1 Parent(s): 5ae4477

First commit

Browse files
app.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoProcessor, Qwen2_5_VLForConditionalGeneration, TextIteratorStreamer
3
+ from transformers.image_utils import load_image
4
+ from threading import Thread
5
+ import time
6
+ import torch
7
+ import spaces
8
+ import cv2
9
+ import numpy as np
10
+ from PIL import Image
11
+
12
+ def progress_bar_html(label: str) -> str:
13
+ """
14
+ Returns an HTML snippet for a thin progress bar with a label.
15
+ The progress bar is styled as a dark animated bar.
16
+ """
17
+ return f'''
18
+ <div style="display: flex; align-items: center;">
19
+ <span style="margin-right: 10px; font-size: 14px;">{label}</span>
20
+ <div style="width: 110px; height: 5px; background-color: #9370DB; border-radius: 2px; overflow: hidden;">
21
+ <div style="width: 100%; height: 100%; background-color: #4B0082; animation: loading 1.5s linear infinite;"></div>
22
+ </div>
23
+ </div>
24
+ <style>
25
+ @keyframes loading {{
26
+ 0% {{ transform: translateX(-100%); }}
27
+ 100% {{ transform: translateX(100%); }}
28
+ }}
29
+ </style>
30
+ '''
31
+
32
+ def downsample_video(video_path):
33
+ """
34
+ Downsamples the video to 10 evenly spaced frames.
35
+ Each frame is converted to a PIL Image along with its timestamp.
36
+ """
37
+ vidcap = cv2.VideoCapture(video_path)
38
+ total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
39
+ fps = vidcap.get(cv2.CAP_PROP_FPS)
40
+ frames = []
41
+ if total_frames <= 0 or fps <= 0:
42
+ vidcap.release()
43
+ return frames
44
+ # Sample 10 evenly spaced frames.
45
+ frame_indices = np.linspace(0, total_frames - 1, 10, dtype=int)
46
+ for i in frame_indices:
47
+ vidcap.set(cv2.CAP_PROP_POS_FRAMES, i)
48
+ success, image = vidcap.read()
49
+ if success:
50
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
51
+ pil_image = Image.fromarray(image)
52
+ timestamp = round(i / fps, 2)
53
+ frames.append((pil_image, timestamp))
54
+ vidcap.release()
55
+ return frames
56
+
57
+ MODEL_ID = "Qwen/Qwen2.5-VL-3B-Instruct" # Alternatively: "Qwen/Qwen2.5-VL-3B-Instruct"
58
+ processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
59
+ model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
60
+ MODEL_ID,
61
+ trust_remote_code=True,
62
+ torch_dtype=torch.bfloat16
63
+ ).to("cuda").eval()
64
+
65
+ @spaces.GPU
66
+ def model_inference(input_dict, history):
67
+ text = input_dict["text"]
68
+ files = input_dict["files"]
69
+
70
+ if text.strip().lower().startswith("@video-infer"):
71
+ # Remove the tag from the query.
72
+ text = text[len("@video-infer"):].strip()
73
+ if not files:
74
+ gr.Error("Please upload a video file along with your @video-infer query.")
75
+ return
76
+ # Assume the first file is a video.
77
+ video_path = files[0]
78
+ frames = downsample_video(video_path)
79
+ if not frames:
80
+ gr.Error("Could not process video.")
81
+ return
82
+ # Build messages: start with the text prompt.
83
+ messages = [
84
+ {
85
+ "role": "user",
86
+ "content": [{"type": "text", "text": text}]
87
+ }
88
+ ]
89
+ # Append each frame with a timestamp label.
90
+ for image, timestamp in frames:
91
+ messages[0]["content"].append({"type": "text", "text": f"Frame {timestamp}:"})
92
+ messages[0]["content"].append({"type": "image", "image": image})
93
+ # Collect only the images from the frames.
94
+ video_images = [image for image, _ in frames]
95
+ # Prepare the prompt.
96
+ prompt = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
97
+ inputs = processor(
98
+ text=[prompt],
99
+ images=video_images,
100
+ return_tensors="pt",
101
+ padding=True,
102
+ ).to("cuda")
103
+ # Set up streaming generation.
104
+ streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
105
+ generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
106
+ thread = Thread(target=model.generate, kwargs=generation_kwargs)
107
+ thread.start()
108
+ buffer = ""
109
+ yield progress_bar_html("Processing video with Qwen2.5VL Model")
110
+ for new_text in streamer:
111
+ buffer += new_text
112
+ time.sleep(0.01)
113
+ yield buffer
114
+ return
115
+
116
+ if len(files) > 1:
117
+ images = [load_image(image) for image in files]
118
+ elif len(files) == 1:
119
+ images = [load_image(files[0])]
120
+ else:
121
+ images = []
122
+
123
+ if text == "" and not images:
124
+ gr.Error("Please input a query and optionally image(s).")
125
+ return
126
+ if text == "" and images:
127
+ gr.Error("Please input a text query along with the image(s).")
128
+ return
129
+
130
+ messages = [
131
+ {
132
+ "role": "user",
133
+ "content": [
134
+ *[{"type": "image", "image": image} for image in images],
135
+ {"type": "text", "text": text},
136
+ ],
137
+ }
138
+ ]
139
+ prompt = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
140
+ inputs = processor(
141
+ text=[prompt],
142
+ images=images if images else None,
143
+ return_tensors="pt",
144
+ padding=True,
145
+ ).to("cuda")
146
+ streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
147
+ generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
148
+ thread = Thread(target=model.generate, kwargs=generation_kwargs)
149
+ thread.start()
150
+ buffer = ""
151
+ yield progress_bar_html("Processing with Qwen2.5VL Model")
152
+ for new_text in streamer:
153
+ buffer += new_text
154
+ time.sleep(0.01)
155
+ yield buffer
156
+
157
+ examples = [
158
+ [{"text": "Describe the Image?", "files": ["example_images/document.jpg"]}],
159
+ [{"text": "@video-infer Explain the content of the Advertisement", "files": ["example_images/videoplayback.mp4"]}],
160
+ [{"text": "@video-infer Explain the content of the video in detail", "files": ["example_images/breakfast.mp4"]}],
161
+ [{"text": "@video-infer Explain the content of the video.", "files": ["example_images/sky.mp4"]}],
162
+ ]
163
+
164
+ demo = gr.ChatInterface(
165
+ fn=model_inference,
166
+ description="# **Qwen2.5-VL-7B-Instruct `@video-infer for video understanding`**",
167
+ examples=examples,
168
+ fill_height=True,
169
+ textbox=gr.MultimodalTextbox(label="Query Input", file_types=["image", "video"], file_count="multiple"),
170
+ stop_btn="Stop Generation",
171
+ multimodal=True,
172
+ cache_examples=False,
173
+ )
174
+
175
+ demo.launch(debug=True)
example_images/breakfast.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94ed4b09e71f25adca92aa356b8a1e2ca23073349d34f47c9a3cf397fc24b032
3
+ size 1623938
example_images/campeones.jpg ADDED

Git LFS Details

  • SHA256: 0bb7318e890a7527f3c900531850d3f3b4786c6ae2c43939970e6884553e57ba
  • Pointer size: 131 Bytes
  • Size of remote file: 870 kB
example_images/document.jpg ADDED

Git LFS Details

  • SHA256: b1370554160136244d8aae0a75b6fa1e1dc4fd17a2834470c11578310fc6bbe3
  • Pointer size: 131 Bytes
  • Size of remote file: 171 kB
example_images/dogs.jpg ADDED

Git LFS Details

  • SHA256: f651e3c654f74995a96c2da477670e1574e65c51e29c54c90937f37f9d91ce67
  • Pointer size: 130 Bytes
  • Size of remote file: 91.2 kB
example_images/examples_invoice.png ADDED

Git LFS Details

  • SHA256: 8964e903fe124c791f52992df1046aca5c298b0128c1b93bd03465faa7a00ac2
  • Pointer size: 130 Bytes
  • Size of remote file: 50 kB
example_images/examples_weather_events.png ADDED

Git LFS Details

  • SHA256: 443e28cba26ab4a08e2d4bcc311129c5818608ff8d4976c444bfcdd9918225ca
  • Pointer size: 131 Bytes
  • Size of remote file: 310 kB
example_images/int.png ADDED

Git LFS Details

  • SHA256: 0441848f5026f1fb46c5fdb19a5d201dfeee4d7bae8c089fadb1c642281ed4f8
  • Pointer size: 132 Bytes
  • Size of remote file: 2.28 MB
example_images/math.jpg ADDED

Git LFS Details

  • SHA256: 1367078c5125dc80bd47805ef223658cc5df6fdc791ad310e0483ff86567080d
  • Pointer size: 130 Bytes
  • Size of remote file: 16.2 kB
example_images/newyork.jpg ADDED

Git LFS Details

  • SHA256: 8eec301d6c61741bd4041965788f2f9efda12722f3672d1605d4c01055405c8a
  • Pointer size: 131 Bytes
  • Size of remote file: 557 kB
example_images/s2w_example.png ADDED

Git LFS Details

  • SHA256: 02fa71fb0761ccf26860a0476f7b66ff518421e4624e9132bb6654604eb8a0b6
  • Pointer size: 130 Bytes
  • Size of remote file: 82.8 kB
example_images/sky.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:450f62aa5c0ef6963ae9e71d2178893f849d4b5c57a2c9bfbf751bac61d18b9d
3
+ size 399942
example_images/videoplayback.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f88b951b809fb56d817a990aba9f41f8e45ca666ec4afe54812d8a7357997367
3
+ size 2247886
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ gradio_client==1.3.0
2
+ qwen-vl-utils==0.0.2
3
+ transformers-stream-generator==0.0.4
4
+ torch==2.4.0
5
+ torchvision==0.19.0
6
+ git+https://github.com/huggingface/transformers.git
7
+ accelerate
8
+ av
9
+ opencv-python