xzerus commited on
Commit
f3d47d3
·
verified ·
1 Parent(s): c8a14e6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -118
app.py CHANGED
@@ -6,6 +6,9 @@ from PIL import Image
6
  from torchvision.transforms.functional import InterpolationMode
7
  from transformers import AutoModel, AutoTokenizer
8
 
 
 
 
9
  IMAGENET_MEAN = (0.485, 0.456, 0.406)
10
  IMAGENET_STD = (0.229, 0.224, 0.225)
11
 
@@ -38,22 +41,18 @@ def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbna
38
  orig_width, orig_height = image.size
39
  aspect_ratio = orig_width / orig_height
40
 
41
- # calculate the existing image aspect ratio
42
  target_ratios = set(
43
  (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
44
  i * j <= max_num and i * j >= min_num)
45
  target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
46
 
47
- # find the closest aspect ratio to the target
48
  target_aspect_ratio = find_closest_aspect_ratio(
49
  aspect_ratio, target_ratios, orig_width, orig_height, image_size)
50
 
51
- # calculate the target width and height
52
  target_width = image_size * target_aspect_ratio[0]
53
  target_height = image_size * target_aspect_ratio[1]
54
  blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
55
 
56
- # resize the image
57
  resized_img = image.resize((target_width, target_height))
58
  processed_images = []
59
  for i in range(blocks):
@@ -63,7 +62,6 @@ def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbna
63
  ((i % (target_width // image_size)) + 1) * image_size,
64
  ((i // (target_width // image_size)) + 1) * image_size
65
  )
66
- # split the image
67
  split_img = resized_img.crop(box)
68
  processed_images.append(split_img)
69
  assert len(processed_images) == blocks
@@ -77,108 +75,9 @@ def load_image(image_file, input_size=448, max_num=12):
77
  transform = build_transform(input_size=input_size)
78
  images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num)
79
  pixel_values = [transform(image) for image in images]
80
- pixel_values = torch.stack(pixel_values)
81
  return pixel_values
82
 
83
- # If you want to load a model using multiple GPUs, please refer to the `Multiple GPUs` section.
84
- path = 'OpenGVLab/InternVL2_5-1B'
85
- model = AutoModel.from_pretrained(
86
- path,
87
- torch_dtype=torch.bfloat16,
88
- low_cpu_mem_usage=True,
89
- use_flash_attn=True,
90
- trust_remote_code=True).eval().cuda()
91
- tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False)
92
-
93
- # set the max number of tiles in `max_num`
94
- pixel_values = load_image('./examples/image1.jpg', max_num=12).to(torch.bfloat16).cuda()
95
- generation_config = dict(max_new_tokens=1024, do_sample=True)
96
-
97
- # pure-text conversation (纯文本对话)
98
- question = 'Hello, who are you?'
99
- response, history = model.chat(tokenizer, None, question, generation_config, history=None, return_history=True)
100
- print(f'User: {question}\nAssistant: {response}')
101
-
102
- question = 'Can you tell me a story?'
103
- response, history = model.chat(tokenizer, None, question, generation_config, history=history, return_history=True)
104
- print(f'User: {question}\nAssistant: {response}')
105
-
106
- # single-image single-round conversation (单图单轮对话)
107
- question = '<image>\nPlease describe the image shortly.'
108
- response = model.chat(tokenizer, pixel_values, question, generation_config)
109
- print(f'User: {question}\nAssistant: {response}')
110
-
111
- # single-image multi-round conversation (单图多轮对话)
112
- question = '<image>\nPlease describe the image in detail.'
113
- response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=None, return_history=True)
114
- print(f'User: {question}\nAssistant: {response}')
115
-
116
- question = 'Please write a poem according to the image.'
117
- response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=history, return_history=True)
118
- print(f'User: {question}\nAssistant: {response}')
119
-
120
- # multi-image multi-round conversation, combined images (多图多轮对话,拼接图像)
121
- pixel_values1 = load_image('./examples/image1.jpg', max_num=12).to(torch.bfloat16).cuda()
122
- pixel_values2 = load_image('./examples/image2.jpg', max_num=12).to(torch.bfloat16).cuda()
123
- pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0)
124
-
125
- question = '<image>\nDescribe the two images in detail.'
126
- response, history = model.chat(tokenizer, pixel_values, question, generation_config,
127
- history=None, return_history=True)
128
- print(f'User: {question}\nAssistant: {response}')
129
-
130
- question = 'What are the similarities and differences between these two images.'
131
- response, history = model.chat(tokenizer, pixel_values, question, generation_config,
132
- history=history, return_history=True)
133
- print(f'User: {question}\nAssistant: {response}')
134
-
135
- # multi-image multi-round conversation, separate images (多图多轮对话,独立图像)
136
- pixel_values1 = load_image('./examples/image1.jpg', max_num=12).to(torch.bfloat16).cuda()
137
- pixel_values2 = load_image('./examples/image2.jpg', max_num=12).to(torch.bfloat16).cuda()
138
- pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0)
139
- num_patches_list = [pixel_values1.size(0), pixel_values2.size(0)]
140
-
141
- question = 'Image-1: <image>\nImage-2: <image>\nDescribe the two images in detail.'
142
- response, history = model.chat(tokenizer, pixel_values, question, generation_config,
143
- num_patches_list=num_patches_list,
144
- history=None, return_history=True)
145
- print(f'User: {question}\nAssistant: {response}')
146
-
147
- question = 'What are the similarities and differences between these two images.'
148
- response, history = model.chat(tokenizer, pixel_values, question, generation_config,
149
- num_patches_list=num_patches_list,
150
- history=history, return_history=True)
151
- print(f'User: {question}\nAssistant: {response}')
152
-
153
- # batch inference, single image per sample (单图批处理)
154
- pixel_values1 = load_image('./examples/image1.jpg', max_num=12).to(torch.bfloat16).cuda()
155
- pixel_values2 = load_image('./examples/image2.jpg', max_num=12).to(torch.bfloat16).cuda()
156
- num_patches_list = [pixel_values1.size(0), pixel_values2.size(0)]
157
- pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0)
158
-
159
- questions = ['<image>\nDescribe the image in detail.'] * len(num_patches_list)
160
- responses = model.batch_chat(tokenizer, pixel_values,
161
- num_patches_list=num_patches_list,
162
- questions=questions,
163
- generation_config=generation_config)
164
- for question, response in zip(questions, responses):
165
- print(f'User: {question}\nAssistant: {response}')
166
-
167
- # video multi-round conversation (视频多轮对话)
168
- def get_index(bound, fps, max_frame, first_idx=0, num_segments=32):
169
- if bound:
170
- start, end = bound[0], bound[1]
171
- else:
172
- start, end = -100000, 100000
173
- start_idx = max(first_idx, round(start * fps))
174
- end_idx = min(round(end * fps), max_frame)
175
- seg_size = float(end_idx - start_idx) / num_segments
176
- frame_indices = np.array([
177
- int(start_idx + (seg_size / 2) + np.round(seg_size * idx))
178
- for idx in range(num_segments)
179
- ])
180
- return frame_indices
181
-
182
  def load_video(video_path, bound=None, input_size=448, max_num=1, num_segments=32):
183
  vr = VideoReader(video_path, ctx=cpu(0), num_threads=1)
184
  max_frame = len(vr) - 1
@@ -197,17 +96,26 @@ def load_video(video_path, bound=None, input_size=448, max_num=1, num_segments=3
197
  pixel_values = torch.cat(pixel_values_list)
198
  return pixel_values, num_patches_list
199
 
200
- video_path = './examples/red-panda.mp4'
201
- pixel_values, num_patches_list = load_video(video_path, num_segments=8, max_num=1)
202
- pixel_values = pixel_values.to(torch.bfloat16).cuda()
203
- video_prefix = ''.join([f'Frame{i+1}: <image>\n' for i in range(len(num_patches_list))])
204
- question = video_prefix + 'What is the red panda doing?'
205
- # Frame1: <image>\nFrame2: <image>\n...\nFrame8: <image>\n{question}
206
- response, history = model.chat(tokenizer, pixel_values, question, generation_config,
207
- num_patches_list=num_patches_list, history=None, return_history=True)
208
- print(f'User: {question}\nAssistant: {response}')
 
 
 
 
209
 
210
- question = 'Describe this video in detail.'
211
- response, history = model.chat(tokenizer, pixel_values, question, generation_config,
212
- num_patches_list=num_patches_list, history=history, return_history=True)
213
- print(f'User: {question}\nAssistant: {response}')
 
 
 
 
 
 
6
  from torchvision.transforms.functional import InterpolationMode
7
  from transformers import AutoModel, AutoTokenizer
8
 
9
+ # Device Configuration
10
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
11
+
12
  IMAGENET_MEAN = (0.485, 0.456, 0.406)
13
  IMAGENET_STD = (0.229, 0.224, 0.225)
14
 
 
41
  orig_width, orig_height = image.size
42
  aspect_ratio = orig_width / orig_height
43
 
 
44
  target_ratios = set(
45
  (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
46
  i * j <= max_num and i * j >= min_num)
47
  target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
48
 
 
49
  target_aspect_ratio = find_closest_aspect_ratio(
50
  aspect_ratio, target_ratios, orig_width, orig_height, image_size)
51
 
 
52
  target_width = image_size * target_aspect_ratio[0]
53
  target_height = image_size * target_aspect_ratio[1]
54
  blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
55
 
 
56
  resized_img = image.resize((target_width, target_height))
57
  processed_images = []
58
  for i in range(blocks):
 
62
  ((i % (target_width // image_size)) + 1) * image_size,
63
  ((i // (target_width // image_size)) + 1) * image_size
64
  )
 
65
  split_img = resized_img.crop(box)
66
  processed_images.append(split_img)
67
  assert len(processed_images) == blocks
 
75
  transform = build_transform(input_size=input_size)
76
  images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num)
77
  pixel_values = [transform(image) for image in images]
78
+ pixel_values = torch.stack(pixel_values).to(device)
79
  return pixel_values
80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  def load_video(video_path, bound=None, input_size=448, max_num=1, num_segments=32):
82
  vr = VideoReader(video_path, ctx=cpu(0), num_threads=1)
83
  max_frame = len(vr) - 1
 
96
  pixel_values = torch.cat(pixel_values_list)
97
  return pixel_values, num_patches_list
98
 
99
+ def get_index(bound, fps, max_frame, first_idx=0, num_segments=32):
100
+ if bound:
101
+ start, end = bound[0], bound[1]
102
+ else:
103
+ start, end = -100000, 100000
104
+ start_idx = max(first_idx, round(start * fps))
105
+ end_idx = min(round(end * fps), max_frame)
106
+ seg_size = float(end_idx - start_idx) / num_segments
107
+ frame_indices = np.array([
108
+ int(start_idx + (seg_size / 2) + np.round(seg_size * idx))
109
+ for idx in range(num_segments)
110
+ ])
111
+ return frame_indices
112
 
113
+ # Load Model
114
+ path = 'OpenGVLab/InternVL2_5-1B'
115
+ model = AutoModel.from_pretrained(
116
+ path,
117
+ low_cpu_mem_usage=True,
118
+ use_flash_attn=False,
119
+ trust_remote_code=True
120
+ ).eval().to(device)
121
+ tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False)