Wiselnn commited on
Commit
6305af3
·
verified ·
1 Parent(s): 13cf0f2

d2d35138218eb2dd3999a9fa4f7f93b5fef9ea7fe1e60270b943d1ba85993fec

Browse files
Files changed (20) hide show
  1. vision_niah_d/needle_datasets/images/teddy_bear_times_square.png +3 -0
  2. vision_niah_d/needle_datasets/images/teddy_bear_times_square_interrupt.png +3 -0
  3. vision_niah_d/needle_datasets/images/ucsd.jpeg +3 -0
  4. vision_niah_d/needle_datasets/images/ucsd_interrupt.png +3 -0
  5. vision_niah_d/needle_datasets/images/zoo.png +3 -0
  6. vision_niah_d/needle_datasets/images/zoo_interrupt.png +3 -0
  7. vision_niah_d/niah_output/Qwen2-VL-m_rope-128frames-16card_8k-context-330k-llava-video/all_accuracies.json +452 -0
  8. vision_niah_d/niah_output/Qwen2-VL-m_rope-128frames-16card_8k-context-330k-llava-video/avg_accuracy.txt +1 -0
  9. vision_niah_d/niah_output/Qwen2-VL-m_rope-128frames-16card_8k-context-330k-llava-video/heatmap.png +3 -0
  10. vision_niah_d/niah_output/Qwen2-VL-t_scale2_change_freq-128frames-16card_8k-context-330k-llava-video/all_accuracies.json +452 -0
  11. vision_niah_d/niah_output/Qwen2-VL-t_scale2_change_freq-128frames-16card_8k-context-330k-llava-video/avg_accuracy.txt +1 -0
  12. vision_niah_d/niah_output/Qwen2-VL-t_scale2_change_freq-128frames-16card_8k-context-330k-llava-video/heatmap.png +3 -0
  13. vision_niah_d/niah_output/Qwen2-VL-time_rope-128frames-16card_8k-context-330k-llava-video/all_accuracies.json +452 -0
  14. vision_niah_d/niah_output/Qwen2-VL-time_rope-128frames-16card_8k-context-330k-llava-video/avg_accuracy.txt +1 -0
  15. vision_niah_d/niah_output/Qwen2-VL-time_rope-128frames-16card_8k-context-330k-llava-video/heatmap.png +3 -0
  16. vision_niah_d/niah_output/Qwen2-VL-vanilla_rope-128frames-16card_8k-context-330k-llava-video/all_accuracies.json +452 -0
  17. vision_niah_d/niah_output/Qwen2-VL-vanilla_rope-128frames-16card_8k-context-330k-llava-video/avg_accuracy.txt +1 -0
  18. vision_niah_d/niah_output/Qwen2-VL-vanilla_rope-128frames-16card_8k-context-330k-llava-video/heatmap.png +3 -0
  19. vision_niah_d/produce_haystack_embedding.py +142 -0
  20. vision_niah_d/produce_needle_embedding.py +163 -0
vision_niah_d/needle_datasets/images/teddy_bear_times_square.png ADDED

Git LFS Details

  • SHA256: 51b6e1321ab1230d91eb8d8562195751a5583b41665ac7957122367cde9b221d
  • Pointer size: 132 Bytes
  • Size of remote file: 5.46 MB
vision_niah_d/needle_datasets/images/teddy_bear_times_square_interrupt.png ADDED

Git LFS Details

  • SHA256: 9575a239825d4ea064a78edac78905fbdb031051b332d905a44253214bfc44ef
  • Pointer size: 131 Bytes
  • Size of remote file: 121 kB
vision_niah_d/needle_datasets/images/ucsd.jpeg ADDED

Git LFS Details

  • SHA256: c3c96c65dbaed8e2bd9e8cc8dd4bf264f31109e3a55c108446569496058dd315
  • Pointer size: 131 Bytes
  • Size of remote file: 329 kB
vision_niah_d/needle_datasets/images/ucsd_interrupt.png ADDED

Git LFS Details

  • SHA256: cb810841a1d0d6b83da5a9984079d565aa600e373968982010f8d551c682fca2
  • Pointer size: 130 Bytes
  • Size of remote file: 85 kB
vision_niah_d/needle_datasets/images/zoo.png ADDED

Git LFS Details

  • SHA256: b5fb103488d7a043baad71d001b7a62124a64b547192ffa6d112dd38abec40b3
  • Pointer size: 132 Bytes
  • Size of remote file: 2.48 MB
vision_niah_d/needle_datasets/images/zoo_interrupt.png ADDED

Git LFS Details

  • SHA256: f5ef104d4555027cab2f4161970137da1c49cbb54258a6a364496df8af88c350
  • Pointer size: 130 Bytes
  • Size of remote file: 46.6 kB
vision_niah_d/niah_output/Qwen2-VL-m_rope-128frames-16card_8k-context-330k-llava-video/all_accuracies.json ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "Num. Frame": 100,
4
+ "Frame Depth": 0.0,
5
+ "Score": 1.0
6
+ },
7
+ {
8
+ "Num. Frame": 100,
9
+ "Frame Depth": 20.0,
10
+ "Score": 0.8
11
+ },
12
+ {
13
+ "Num. Frame": 100,
14
+ "Frame Depth": 40.0,
15
+ "Score": 0.8
16
+ },
17
+ {
18
+ "Num. Frame": 100,
19
+ "Frame Depth": 60.0,
20
+ "Score": 0.8
21
+ },
22
+ {
23
+ "Num. Frame": 100,
24
+ "Frame Depth": 80.0,
25
+ "Score": 0.8
26
+ },
27
+ {
28
+ "Num. Frame": 100,
29
+ "Frame Depth": 100.0,
30
+ "Score": 1.0
31
+ },
32
+ {
33
+ "Num. Frame": 300,
34
+ "Frame Depth": 0.0,
35
+ "Score": 1.0
36
+ },
37
+ {
38
+ "Num. Frame": 300,
39
+ "Frame Depth": 20.0,
40
+ "Score": 0.8
41
+ },
42
+ {
43
+ "Num. Frame": 300,
44
+ "Frame Depth": 40.0,
45
+ "Score": 0.8
46
+ },
47
+ {
48
+ "Num. Frame": 300,
49
+ "Frame Depth": 60.0,
50
+ "Score": 0.8
51
+ },
52
+ {
53
+ "Num. Frame": 300,
54
+ "Frame Depth": 80.0,
55
+ "Score": 0.8
56
+ },
57
+ {
58
+ "Num. Frame": 300,
59
+ "Frame Depth": 100.0,
60
+ "Score": 1.0
61
+ },
62
+ {
63
+ "Num. Frame": 500,
64
+ "Frame Depth": 0.0,
65
+ "Score": 1.0
66
+ },
67
+ {
68
+ "Num. Frame": 500,
69
+ "Frame Depth": 20.0,
70
+ "Score": 0.6
71
+ },
72
+ {
73
+ "Num. Frame": 500,
74
+ "Frame Depth": 40.0,
75
+ "Score": 0.6
76
+ },
77
+ {
78
+ "Num. Frame": 500,
79
+ "Frame Depth": 60.0,
80
+ "Score": 0.6
81
+ },
82
+ {
83
+ "Num. Frame": 500,
84
+ "Frame Depth": 80.0,
85
+ "Score": 0.6
86
+ },
87
+ {
88
+ "Num. Frame": 500,
89
+ "Frame Depth": 100.0,
90
+ "Score": 1.0
91
+ },
92
+ {
93
+ "Num. Frame": 700,
94
+ "Frame Depth": 0.0,
95
+ "Score": 1.0
96
+ },
97
+ {
98
+ "Num. Frame": 700,
99
+ "Frame Depth": 20.0,
100
+ "Score": 0.8
101
+ },
102
+ {
103
+ "Num. Frame": 700,
104
+ "Frame Depth": 40.0,
105
+ "Score": 0.8
106
+ },
107
+ {
108
+ "Num. Frame": 700,
109
+ "Frame Depth": 60.0,
110
+ "Score": 0.8
111
+ },
112
+ {
113
+ "Num. Frame": 700,
114
+ "Frame Depth": 80.0,
115
+ "Score": 0.8
116
+ },
117
+ {
118
+ "Num. Frame": 700,
119
+ "Frame Depth": 100.0,
120
+ "Score": 1.0
121
+ },
122
+ {
123
+ "Num. Frame": 900,
124
+ "Frame Depth": 0.0,
125
+ "Score": 1.0
126
+ },
127
+ {
128
+ "Num. Frame": 900,
129
+ "Frame Depth": 20.0,
130
+ "Score": 0.8
131
+ },
132
+ {
133
+ "Num. Frame": 900,
134
+ "Frame Depth": 40.0,
135
+ "Score": 0.8
136
+ },
137
+ {
138
+ "Num. Frame": 900,
139
+ "Frame Depth": 60.0,
140
+ "Score": 0.8
141
+ },
142
+ {
143
+ "Num. Frame": 900,
144
+ "Frame Depth": 80.0,
145
+ "Score": 0.8
146
+ },
147
+ {
148
+ "Num. Frame": 900,
149
+ "Frame Depth": 100.0,
150
+ "Score": 1.0
151
+ },
152
+ {
153
+ "Num. Frame": 1100,
154
+ "Frame Depth": 0.0,
155
+ "Score": 1.0
156
+ },
157
+ {
158
+ "Num. Frame": 1100,
159
+ "Frame Depth": 20.0,
160
+ "Score": 0.4
161
+ },
162
+ {
163
+ "Num. Frame": 1100,
164
+ "Frame Depth": 40.0,
165
+ "Score": 0.4
166
+ },
167
+ {
168
+ "Num. Frame": 1100,
169
+ "Frame Depth": 60.0,
170
+ "Score": 0.4
171
+ },
172
+ {
173
+ "Num. Frame": 1100,
174
+ "Frame Depth": 80.0,
175
+ "Score": 0.4
176
+ },
177
+ {
178
+ "Num. Frame": 1100,
179
+ "Frame Depth": 100.0,
180
+ "Score": 1.0
181
+ },
182
+ {
183
+ "Num. Frame": 1300,
184
+ "Frame Depth": 0.0,
185
+ "Score": 1.0
186
+ },
187
+ {
188
+ "Num. Frame": 1300,
189
+ "Frame Depth": 20.0,
190
+ "Score": 0.8
191
+ },
192
+ {
193
+ "Num. Frame": 1300,
194
+ "Frame Depth": 40.0,
195
+ "Score": 0.8
196
+ },
197
+ {
198
+ "Num. Frame": 1300,
199
+ "Frame Depth": 60.0,
200
+ "Score": 0.8
201
+ },
202
+ {
203
+ "Num. Frame": 1300,
204
+ "Frame Depth": 80.0,
205
+ "Score": 0.8
206
+ },
207
+ {
208
+ "Num. Frame": 1300,
209
+ "Frame Depth": 100.0,
210
+ "Score": 1.0
211
+ },
212
+ {
213
+ "Num. Frame": 1500,
214
+ "Frame Depth": 0.0,
215
+ "Score": 1.0
216
+ },
217
+ {
218
+ "Num. Frame": 1500,
219
+ "Frame Depth": 20.0,
220
+ "Score": 0.8
221
+ },
222
+ {
223
+ "Num. Frame": 1500,
224
+ "Frame Depth": 40.0,
225
+ "Score": 0.8
226
+ },
227
+ {
228
+ "Num. Frame": 1500,
229
+ "Frame Depth": 60.0,
230
+ "Score": 0.8
231
+ },
232
+ {
233
+ "Num. Frame": 1500,
234
+ "Frame Depth": 80.0,
235
+ "Score": 0.8
236
+ },
237
+ {
238
+ "Num. Frame": 1500,
239
+ "Frame Depth": 100.0,
240
+ "Score": 1.0
241
+ },
242
+ {
243
+ "Num. Frame": 1700,
244
+ "Frame Depth": 0.0,
245
+ "Score": 1.0
246
+ },
247
+ {
248
+ "Num. Frame": 1700,
249
+ "Frame Depth": 20.0,
250
+ "Score": 0.6
251
+ },
252
+ {
253
+ "Num. Frame": 1700,
254
+ "Frame Depth": 40.0,
255
+ "Score": 0.6
256
+ },
257
+ {
258
+ "Num. Frame": 1700,
259
+ "Frame Depth": 60.0,
260
+ "Score": 0.6
261
+ },
262
+ {
263
+ "Num. Frame": 1700,
264
+ "Frame Depth": 80.0,
265
+ "Score": 0.6
266
+ },
267
+ {
268
+ "Num. Frame": 1700,
269
+ "Frame Depth": 100.0,
270
+ "Score": 1.0
271
+ },
272
+ {
273
+ "Num. Frame": 1900,
274
+ "Frame Depth": 0.0,
275
+ "Score": 1.0
276
+ },
277
+ {
278
+ "Num. Frame": 1900,
279
+ "Frame Depth": 20.0,
280
+ "Score": 0.6
281
+ },
282
+ {
283
+ "Num. Frame": 1900,
284
+ "Frame Depth": 40.0,
285
+ "Score": 0.6
286
+ },
287
+ {
288
+ "Num. Frame": 1900,
289
+ "Frame Depth": 60.0,
290
+ "Score": 0.6
291
+ },
292
+ {
293
+ "Num. Frame": 1900,
294
+ "Frame Depth": 80.0,
295
+ "Score": 0.6
296
+ },
297
+ {
298
+ "Num. Frame": 1900,
299
+ "Frame Depth": 100.0,
300
+ "Score": 1.0
301
+ },
302
+ {
303
+ "Num. Frame": 2100,
304
+ "Frame Depth": 0.0,
305
+ "Score": 1.0
306
+ },
307
+ {
308
+ "Num. Frame": 2100,
309
+ "Frame Depth": 20.0,
310
+ "Score": 0.4
311
+ },
312
+ {
313
+ "Num. Frame": 2100,
314
+ "Frame Depth": 40.0,
315
+ "Score": 0.4
316
+ },
317
+ {
318
+ "Num. Frame": 2100,
319
+ "Frame Depth": 60.0,
320
+ "Score": 0.4
321
+ },
322
+ {
323
+ "Num. Frame": 2100,
324
+ "Frame Depth": 80.0,
325
+ "Score": 0.4
326
+ },
327
+ {
328
+ "Num. Frame": 2100,
329
+ "Frame Depth": 100.0,
330
+ "Score": 1.0
331
+ },
332
+ {
333
+ "Num. Frame": 2300,
334
+ "Frame Depth": 0.0,
335
+ "Score": 1.0
336
+ },
337
+ {
338
+ "Num. Frame": 2300,
339
+ "Frame Depth": 20.0,
340
+ "Score": 0.6
341
+ },
342
+ {
343
+ "Num. Frame": 2300,
344
+ "Frame Depth": 40.0,
345
+ "Score": 0.6
346
+ },
347
+ {
348
+ "Num. Frame": 2300,
349
+ "Frame Depth": 60.0,
350
+ "Score": 0.6
351
+ },
352
+ {
353
+ "Num. Frame": 2300,
354
+ "Frame Depth": 80.0,
355
+ "Score": 0.6
356
+ },
357
+ {
358
+ "Num. Frame": 2300,
359
+ "Frame Depth": 100.0,
360
+ "Score": 1.0
361
+ },
362
+ {
363
+ "Num. Frame": 2500,
364
+ "Frame Depth": 0.0,
365
+ "Score": 1.0
366
+ },
367
+ {
368
+ "Num. Frame": 2500,
369
+ "Frame Depth": 20.0,
370
+ "Score": 0.8
371
+ },
372
+ {
373
+ "Num. Frame": 2500,
374
+ "Frame Depth": 40.0,
375
+ "Score": 0.8
376
+ },
377
+ {
378
+ "Num. Frame": 2500,
379
+ "Frame Depth": 60.0,
380
+ "Score": 0.8
381
+ },
382
+ {
383
+ "Num. Frame": 2500,
384
+ "Frame Depth": 80.0,
385
+ "Score": 0.8
386
+ },
387
+ {
388
+ "Num. Frame": 2500,
389
+ "Frame Depth": 100.0,
390
+ "Score": 1.0
391
+ },
392
+ {
393
+ "Num. Frame": 2700,
394
+ "Frame Depth": 0.0,
395
+ "Score": 1.0
396
+ },
397
+ {
398
+ "Num. Frame": 2700,
399
+ "Frame Depth": 20.0,
400
+ "Score": 0.8
401
+ },
402
+ {
403
+ "Num. Frame": 2700,
404
+ "Frame Depth": 40.0,
405
+ "Score": 0.8
406
+ },
407
+ {
408
+ "Num. Frame": 2700,
409
+ "Frame Depth": 60.0,
410
+ "Score": 0.8
411
+ },
412
+ {
413
+ "Num. Frame": 2700,
414
+ "Frame Depth": 80.0,
415
+ "Score": 0.8
416
+ },
417
+ {
418
+ "Num. Frame": 2700,
419
+ "Frame Depth": 100.0,
420
+ "Score": 1.0
421
+ },
422
+ {
423
+ "Num. Frame": 2900,
424
+ "Frame Depth": 0.0,
425
+ "Score": 1.0
426
+ },
427
+ {
428
+ "Num. Frame": 2900,
429
+ "Frame Depth": 20.0,
430
+ "Score": 0.6
431
+ },
432
+ {
433
+ "Num. Frame": 2900,
434
+ "Frame Depth": 40.0,
435
+ "Score": 0.6
436
+ },
437
+ {
438
+ "Num. Frame": 2900,
439
+ "Frame Depth": 60.0,
440
+ "Score": 0.6
441
+ },
442
+ {
443
+ "Num. Frame": 2900,
444
+ "Frame Depth": 80.0,
445
+ "Score": 0.6
446
+ },
447
+ {
448
+ "Num. Frame": 2900,
449
+ "Frame Depth": 100.0,
450
+ "Score": 1.0
451
+ }
452
+ ]
vision_niah_d/niah_output/Qwen2-VL-m_rope-128frames-16card_8k-context-330k-llava-video/avg_accuracy.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ Average Accuracy: 0.7866666666666665
vision_niah_d/niah_output/Qwen2-VL-m_rope-128frames-16card_8k-context-330k-llava-video/heatmap.png ADDED

Git LFS Details

  • SHA256: b7acaa6cf05408494c0e97a80065b3ab5250894b84eb5732f74df189b20cd689
  • Pointer size: 130 Bytes
  • Size of remote file: 42.4 kB
vision_niah_d/niah_output/Qwen2-VL-t_scale2_change_freq-128frames-16card_8k-context-330k-llava-video/all_accuracies.json ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "Num. Frame": 100,
4
+ "Frame Depth": 0.0,
5
+ "Score": 1.0
6
+ },
7
+ {
8
+ "Num. Frame": 100,
9
+ "Frame Depth": 20.0,
10
+ "Score": 1.0
11
+ },
12
+ {
13
+ "Num. Frame": 100,
14
+ "Frame Depth": 40.0,
15
+ "Score": 1.0
16
+ },
17
+ {
18
+ "Num. Frame": 100,
19
+ "Frame Depth": 60.0,
20
+ "Score": 1.0
21
+ },
22
+ {
23
+ "Num. Frame": 100,
24
+ "Frame Depth": 80.0,
25
+ "Score": 1.0
26
+ },
27
+ {
28
+ "Num. Frame": 100,
29
+ "Frame Depth": 100.0,
30
+ "Score": 1.0
31
+ },
32
+ {
33
+ "Num. Frame": 300,
34
+ "Frame Depth": 0.0,
35
+ "Score": 1.0
36
+ },
37
+ {
38
+ "Num. Frame": 300,
39
+ "Frame Depth": 20.0,
40
+ "Score": 0.8
41
+ },
42
+ {
43
+ "Num. Frame": 300,
44
+ "Frame Depth": 40.0,
45
+ "Score": 0.8
46
+ },
47
+ {
48
+ "Num. Frame": 300,
49
+ "Frame Depth": 60.0,
50
+ "Score": 0.8
51
+ },
52
+ {
53
+ "Num. Frame": 300,
54
+ "Frame Depth": 80.0,
55
+ "Score": 0.8
56
+ },
57
+ {
58
+ "Num. Frame": 300,
59
+ "Frame Depth": 100.0,
60
+ "Score": 1.0
61
+ },
62
+ {
63
+ "Num. Frame": 500,
64
+ "Frame Depth": 0.0,
65
+ "Score": 1.0
66
+ },
67
+ {
68
+ "Num. Frame": 500,
69
+ "Frame Depth": 20.0,
70
+ "Score": 1.0
71
+ },
72
+ {
73
+ "Num. Frame": 500,
74
+ "Frame Depth": 40.0,
75
+ "Score": 1.0
76
+ },
77
+ {
78
+ "Num. Frame": 500,
79
+ "Frame Depth": 60.0,
80
+ "Score": 1.0
81
+ },
82
+ {
83
+ "Num. Frame": 500,
84
+ "Frame Depth": 80.0,
85
+ "Score": 1.0
86
+ },
87
+ {
88
+ "Num. Frame": 500,
89
+ "Frame Depth": 100.0,
90
+ "Score": 1.0
91
+ },
92
+ {
93
+ "Num. Frame": 700,
94
+ "Frame Depth": 0.0,
95
+ "Score": 1.0
96
+ },
97
+ {
98
+ "Num. Frame": 700,
99
+ "Frame Depth": 20.0,
100
+ "Score": 1.0
101
+ },
102
+ {
103
+ "Num. Frame": 700,
104
+ "Frame Depth": 40.0,
105
+ "Score": 1.0
106
+ },
107
+ {
108
+ "Num. Frame": 700,
109
+ "Frame Depth": 60.0,
110
+ "Score": 1.0
111
+ },
112
+ {
113
+ "Num. Frame": 700,
114
+ "Frame Depth": 80.0,
115
+ "Score": 1.0
116
+ },
117
+ {
118
+ "Num. Frame": 700,
119
+ "Frame Depth": 100.0,
120
+ "Score": 1.0
121
+ },
122
+ {
123
+ "Num. Frame": 900,
124
+ "Frame Depth": 0.0,
125
+ "Score": 1.0
126
+ },
127
+ {
128
+ "Num. Frame": 900,
129
+ "Frame Depth": 20.0,
130
+ "Score": 0.8
131
+ },
132
+ {
133
+ "Num. Frame": 900,
134
+ "Frame Depth": 40.0,
135
+ "Score": 0.8
136
+ },
137
+ {
138
+ "Num. Frame": 900,
139
+ "Frame Depth": 60.0,
140
+ "Score": 0.8
141
+ },
142
+ {
143
+ "Num. Frame": 900,
144
+ "Frame Depth": 80.0,
145
+ "Score": 0.8
146
+ },
147
+ {
148
+ "Num. Frame": 900,
149
+ "Frame Depth": 100.0,
150
+ "Score": 1.0
151
+ },
152
+ {
153
+ "Num. Frame": 1100,
154
+ "Frame Depth": 0.0,
155
+ "Score": 1.0
156
+ },
157
+ {
158
+ "Num. Frame": 1100,
159
+ "Frame Depth": 20.0,
160
+ "Score": 1.0
161
+ },
162
+ {
163
+ "Num. Frame": 1100,
164
+ "Frame Depth": 40.0,
165
+ "Score": 1.0
166
+ },
167
+ {
168
+ "Num. Frame": 1100,
169
+ "Frame Depth": 60.0,
170
+ "Score": 1.0
171
+ },
172
+ {
173
+ "Num. Frame": 1100,
174
+ "Frame Depth": 80.0,
175
+ "Score": 1.0
176
+ },
177
+ {
178
+ "Num. Frame": 1100,
179
+ "Frame Depth": 100.0,
180
+ "Score": 1.0
181
+ },
182
+ {
183
+ "Num. Frame": 1300,
184
+ "Frame Depth": 0.0,
185
+ "Score": 1.0
186
+ },
187
+ {
188
+ "Num. Frame": 1300,
189
+ "Frame Depth": 20.0,
190
+ "Score": 0.8
191
+ },
192
+ {
193
+ "Num. Frame": 1300,
194
+ "Frame Depth": 40.0,
195
+ "Score": 0.8
196
+ },
197
+ {
198
+ "Num. Frame": 1300,
199
+ "Frame Depth": 60.0,
200
+ "Score": 0.8
201
+ },
202
+ {
203
+ "Num. Frame": 1300,
204
+ "Frame Depth": 80.0,
205
+ "Score": 0.8
206
+ },
207
+ {
208
+ "Num. Frame": 1300,
209
+ "Frame Depth": 100.0,
210
+ "Score": 1.0
211
+ },
212
+ {
213
+ "Num. Frame": 1500,
214
+ "Frame Depth": 0.0,
215
+ "Score": 1.0
216
+ },
217
+ {
218
+ "Num. Frame": 1500,
219
+ "Frame Depth": 20.0,
220
+ "Score": 1.0
221
+ },
222
+ {
223
+ "Num. Frame": 1500,
224
+ "Frame Depth": 40.0,
225
+ "Score": 1.0
226
+ },
227
+ {
228
+ "Num. Frame": 1500,
229
+ "Frame Depth": 60.0,
230
+ "Score": 1.0
231
+ },
232
+ {
233
+ "Num. Frame": 1500,
234
+ "Frame Depth": 80.0,
235
+ "Score": 1.0
236
+ },
237
+ {
238
+ "Num. Frame": 1500,
239
+ "Frame Depth": 100.0,
240
+ "Score": 1.0
241
+ },
242
+ {
243
+ "Num. Frame": 1700,
244
+ "Frame Depth": 0.0,
245
+ "Score": 1.0
246
+ },
247
+ {
248
+ "Num. Frame": 1700,
249
+ "Frame Depth": 20.0,
250
+ "Score": 0.8
251
+ },
252
+ {
253
+ "Num. Frame": 1700,
254
+ "Frame Depth": 40.0,
255
+ "Score": 0.8
256
+ },
257
+ {
258
+ "Num. Frame": 1700,
259
+ "Frame Depth": 60.0,
260
+ "Score": 0.8
261
+ },
262
+ {
263
+ "Num. Frame": 1700,
264
+ "Frame Depth": 80.0,
265
+ "Score": 0.8
266
+ },
267
+ {
268
+ "Num. Frame": 1700,
269
+ "Frame Depth": 100.0,
270
+ "Score": 1.0
271
+ },
272
+ {
273
+ "Num. Frame": 1900,
274
+ "Frame Depth": 0.0,
275
+ "Score": 1.0
276
+ },
277
+ {
278
+ "Num. Frame": 1900,
279
+ "Frame Depth": 20.0,
280
+ "Score": 0.8
281
+ },
282
+ {
283
+ "Num. Frame": 1900,
284
+ "Frame Depth": 40.0,
285
+ "Score": 0.8
286
+ },
287
+ {
288
+ "Num. Frame": 1900,
289
+ "Frame Depth": 60.0,
290
+ "Score": 0.8
291
+ },
292
+ {
293
+ "Num. Frame": 1900,
294
+ "Frame Depth": 80.0,
295
+ "Score": 0.8
296
+ },
297
+ {
298
+ "Num. Frame": 1900,
299
+ "Frame Depth": 100.0,
300
+ "Score": 1.0
301
+ },
302
+ {
303
+ "Num. Frame": 2100,
304
+ "Frame Depth": 0.0,
305
+ "Score": 1.0
306
+ },
307
+ {
308
+ "Num. Frame": 2100,
309
+ "Frame Depth": 20.0,
310
+ "Score": 0.8
311
+ },
312
+ {
313
+ "Num. Frame": 2100,
314
+ "Frame Depth": 40.0,
315
+ "Score": 0.8
316
+ },
317
+ {
318
+ "Num. Frame": 2100,
319
+ "Frame Depth": 60.0,
320
+ "Score": 0.8
321
+ },
322
+ {
323
+ "Num. Frame": 2100,
324
+ "Frame Depth": 80.0,
325
+ "Score": 0.8
326
+ },
327
+ {
328
+ "Num. Frame": 2100,
329
+ "Frame Depth": 100.0,
330
+ "Score": 1.0
331
+ },
332
+ {
333
+ "Num. Frame": 2300,
334
+ "Frame Depth": 0.0,
335
+ "Score": 1.0
336
+ },
337
+ {
338
+ "Num. Frame": 2300,
339
+ "Frame Depth": 20.0,
340
+ "Score": 0.8
341
+ },
342
+ {
343
+ "Num. Frame": 2300,
344
+ "Frame Depth": 40.0,
345
+ "Score": 0.8
346
+ },
347
+ {
348
+ "Num. Frame": 2300,
349
+ "Frame Depth": 60.0,
350
+ "Score": 0.8
351
+ },
352
+ {
353
+ "Num. Frame": 2300,
354
+ "Frame Depth": 80.0,
355
+ "Score": 0.8
356
+ },
357
+ {
358
+ "Num. Frame": 2300,
359
+ "Frame Depth": 100.0,
360
+ "Score": 1.0
361
+ },
362
+ {
363
+ "Num. Frame": 2500,
364
+ "Frame Depth": 0.0,
365
+ "Score": 1.0
366
+ },
367
+ {
368
+ "Num. Frame": 2500,
369
+ "Frame Depth": 20.0,
370
+ "Score": 0.8
371
+ },
372
+ {
373
+ "Num. Frame": 2500,
374
+ "Frame Depth": 40.0,
375
+ "Score": 0.8
376
+ },
377
+ {
378
+ "Num. Frame": 2500,
379
+ "Frame Depth": 60.0,
380
+ "Score": 0.8
381
+ },
382
+ {
383
+ "Num. Frame": 2500,
384
+ "Frame Depth": 80.0,
385
+ "Score": 0.8
386
+ },
387
+ {
388
+ "Num. Frame": 2500,
389
+ "Frame Depth": 100.0,
390
+ "Score": 1.0
391
+ },
392
+ {
393
+ "Num. Frame": 2700,
394
+ "Frame Depth": 0.0,
395
+ "Score": 1.0
396
+ },
397
+ {
398
+ "Num. Frame": 2700,
399
+ "Frame Depth": 20.0,
400
+ "Score": 0.8
401
+ },
402
+ {
403
+ "Num. Frame": 2700,
404
+ "Frame Depth": 40.0,
405
+ "Score": 0.8
406
+ },
407
+ {
408
+ "Num. Frame": 2700,
409
+ "Frame Depth": 60.0,
410
+ "Score": 0.8
411
+ },
412
+ {
413
+ "Num. Frame": 2700,
414
+ "Frame Depth": 80.0,
415
+ "Score": 0.8
416
+ },
417
+ {
418
+ "Num. Frame": 2700,
419
+ "Frame Depth": 100.0,
420
+ "Score": 1.0
421
+ },
422
+ {
423
+ "Num. Frame": 2900,
424
+ "Frame Depth": 0.0,
425
+ "Score": 1.0
426
+ },
427
+ {
428
+ "Num. Frame": 2900,
429
+ "Frame Depth": 20.0,
430
+ "Score": 0.8
431
+ },
432
+ {
433
+ "Num. Frame": 2900,
434
+ "Frame Depth": 40.0,
435
+ "Score": 0.8
436
+ },
437
+ {
438
+ "Num. Frame": 2900,
439
+ "Frame Depth": 60.0,
440
+ "Score": 0.8
441
+ },
442
+ {
443
+ "Num. Frame": 2900,
444
+ "Frame Depth": 80.0,
445
+ "Score": 0.8
446
+ },
447
+ {
448
+ "Num. Frame": 2900,
449
+ "Frame Depth": 100.0,
450
+ "Score": 1.0
451
+ }
452
+ ]
vision_niah_d/niah_output/Qwen2-VL-t_scale2_change_freq-128frames-16card_8k-context-330k-llava-video/avg_accuracy.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ Average Accuracy: 0.9111111111111111
vision_niah_d/niah_output/Qwen2-VL-t_scale2_change_freq-128frames-16card_8k-context-330k-llava-video/heatmap.png ADDED

Git LFS Details

  • SHA256: 94268d656f4445e6605bacc60e33724e59c0424cff96d20867574459d9481f26
  • Pointer size: 130 Bytes
  • Size of remote file: 42.3 kB
vision_niah_d/niah_output/Qwen2-VL-time_rope-128frames-16card_8k-context-330k-llava-video/all_accuracies.json ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "Num. Frame": 100,
4
+ "Frame Depth": 0.0,
5
+ "Score": 1.0
6
+ },
7
+ {
8
+ "Num. Frame": 100,
9
+ "Frame Depth": 20.0,
10
+ "Score": 0.8
11
+ },
12
+ {
13
+ "Num. Frame": 100,
14
+ "Frame Depth": 40.0,
15
+ "Score": 0.8
16
+ },
17
+ {
18
+ "Num. Frame": 100,
19
+ "Frame Depth": 60.0,
20
+ "Score": 0.8
21
+ },
22
+ {
23
+ "Num. Frame": 100,
24
+ "Frame Depth": 80.0,
25
+ "Score": 0.8
26
+ },
27
+ {
28
+ "Num. Frame": 100,
29
+ "Frame Depth": 100.0,
30
+ "Score": 1.0
31
+ },
32
+ {
33
+ "Num. Frame": 300,
34
+ "Frame Depth": 0.0,
35
+ "Score": 1.0
36
+ },
37
+ {
38
+ "Num. Frame": 300,
39
+ "Frame Depth": 20.0,
40
+ "Score": 0.8
41
+ },
42
+ {
43
+ "Num. Frame": 300,
44
+ "Frame Depth": 40.0,
45
+ "Score": 0.8
46
+ },
47
+ {
48
+ "Num. Frame": 300,
49
+ "Frame Depth": 60.0,
50
+ "Score": 0.8
51
+ },
52
+ {
53
+ "Num. Frame": 300,
54
+ "Frame Depth": 80.0,
55
+ "Score": 0.8
56
+ },
57
+ {
58
+ "Num. Frame": 300,
59
+ "Frame Depth": 100.0,
60
+ "Score": 1.0
61
+ },
62
+ {
63
+ "Num. Frame": 500,
64
+ "Frame Depth": 0.0,
65
+ "Score": 0.6
66
+ },
67
+ {
68
+ "Num. Frame": 500,
69
+ "Frame Depth": 20.0,
70
+ "Score": 0.4
71
+ },
72
+ {
73
+ "Num. Frame": 500,
74
+ "Frame Depth": 40.0,
75
+ "Score": 0.4
76
+ },
77
+ {
78
+ "Num. Frame": 500,
79
+ "Frame Depth": 60.0,
80
+ "Score": 0.4
81
+ },
82
+ {
83
+ "Num. Frame": 500,
84
+ "Frame Depth": 80.0,
85
+ "Score": 0.4
86
+ },
87
+ {
88
+ "Num. Frame": 500,
89
+ "Frame Depth": 100.0,
90
+ "Score": 1.0
91
+ },
92
+ {
93
+ "Num. Frame": 700,
94
+ "Frame Depth": 0.0,
95
+ "Score": 0.2
96
+ },
97
+ {
98
+ "Num. Frame": 700,
99
+ "Frame Depth": 20.0,
100
+ "Score": 0.2
101
+ },
102
+ {
103
+ "Num. Frame": 700,
104
+ "Frame Depth": 40.0,
105
+ "Score": 0.2
106
+ },
107
+ {
108
+ "Num. Frame": 700,
109
+ "Frame Depth": 60.0,
110
+ "Score": 0.2
111
+ },
112
+ {
113
+ "Num. Frame": 700,
114
+ "Frame Depth": 80.0,
115
+ "Score": 0.2
116
+ },
117
+ {
118
+ "Num. Frame": 700,
119
+ "Frame Depth": 100.0,
120
+ "Score": 1.0
121
+ },
122
+ {
123
+ "Num. Frame": 900,
124
+ "Frame Depth": 0.0,
125
+ "Score": 0.8
126
+ },
127
+ {
128
+ "Num. Frame": 900,
129
+ "Frame Depth": 20.0,
130
+ "Score": 0.6
131
+ },
132
+ {
133
+ "Num. Frame": 900,
134
+ "Frame Depth": 40.0,
135
+ "Score": 0.6
136
+ },
137
+ {
138
+ "Num. Frame": 900,
139
+ "Frame Depth": 60.0,
140
+ "Score": 0.6
141
+ },
142
+ {
143
+ "Num. Frame": 900,
144
+ "Frame Depth": 80.0,
145
+ "Score": 0.6
146
+ },
147
+ {
148
+ "Num. Frame": 900,
149
+ "Frame Depth": 100.0,
150
+ "Score": 1.0
151
+ },
152
+ {
153
+ "Num. Frame": 1100,
154
+ "Frame Depth": 0.0,
155
+ "Score": 0.4
156
+ },
157
+ {
158
+ "Num. Frame": 1100,
159
+ "Frame Depth": 20.0,
160
+ "Score": 0.4
161
+ },
162
+ {
163
+ "Num. Frame": 1100,
164
+ "Frame Depth": 40.0,
165
+ "Score": 0.4
166
+ },
167
+ {
168
+ "Num. Frame": 1100,
169
+ "Frame Depth": 60.0,
170
+ "Score": 0.4
171
+ },
172
+ {
173
+ "Num. Frame": 1100,
174
+ "Frame Depth": 80.0,
175
+ "Score": 0.4
176
+ },
177
+ {
178
+ "Num. Frame": 1100,
179
+ "Frame Depth": 100.0,
180
+ "Score": 0.8
181
+ },
182
+ {
183
+ "Num. Frame": 1300,
184
+ "Frame Depth": 0.0,
185
+ "Score": 0.0
186
+ },
187
+ {
188
+ "Num. Frame": 1300,
189
+ "Frame Depth": 20.0,
190
+ "Score": 0.4
191
+ },
192
+ {
193
+ "Num. Frame": 1300,
194
+ "Frame Depth": 40.0,
195
+ "Score": 0.4
196
+ },
197
+ {
198
+ "Num. Frame": 1300,
199
+ "Frame Depth": 60.0,
200
+ "Score": 0.4
201
+ },
202
+ {
203
+ "Num. Frame": 1300,
204
+ "Frame Depth": 80.0,
205
+ "Score": 0.4
206
+ },
207
+ {
208
+ "Num. Frame": 1300,
209
+ "Frame Depth": 100.0,
210
+ "Score": 0.6
211
+ },
212
+ {
213
+ "Num. Frame": 1500,
214
+ "Frame Depth": 0.0,
215
+ "Score": 0.4
216
+ },
217
+ {
218
+ "Num. Frame": 1500,
219
+ "Frame Depth": 20.0,
220
+ "Score": 0.2
221
+ },
222
+ {
223
+ "Num. Frame": 1500,
224
+ "Frame Depth": 40.0,
225
+ "Score": 0.2
226
+ },
227
+ {
228
+ "Num. Frame": 1500,
229
+ "Frame Depth": 60.0,
230
+ "Score": 0.2
231
+ },
232
+ {
233
+ "Num. Frame": 1500,
234
+ "Frame Depth": 80.0,
235
+ "Score": 0.2
236
+ },
237
+ {
238
+ "Num. Frame": 1500,
239
+ "Frame Depth": 100.0,
240
+ "Score": 0.2
241
+ },
242
+ {
243
+ "Num. Frame": 1700,
244
+ "Frame Depth": 0.0,
245
+ "Score": 0.0
246
+ },
247
+ {
248
+ "Num. Frame": 1700,
249
+ "Frame Depth": 20.0,
250
+ "Score": 0.0
251
+ },
252
+ {
253
+ "Num. Frame": 1700,
254
+ "Frame Depth": 40.0,
255
+ "Score": 0.0
256
+ },
257
+ {
258
+ "Num. Frame": 1700,
259
+ "Frame Depth": 60.0,
260
+ "Score": 0.0
261
+ },
262
+ {
263
+ "Num. Frame": 1700,
264
+ "Frame Depth": 80.0,
265
+ "Score": 0.0
266
+ },
267
+ {
268
+ "Num. Frame": 1700,
269
+ "Frame Depth": 100.0,
270
+ "Score": 0.0
271
+ },
272
+ {
273
+ "Num. Frame": 1900,
274
+ "Frame Depth": 0.0,
275
+ "Score": 0.2
276
+ },
277
+ {
278
+ "Num. Frame": 1900,
279
+ "Frame Depth": 20.0,
280
+ "Score": 0.0
281
+ },
282
+ {
283
+ "Num. Frame": 1900,
284
+ "Frame Depth": 40.0,
285
+ "Score": 0.0
286
+ },
287
+ {
288
+ "Num. Frame": 1900,
289
+ "Frame Depth": 60.0,
290
+ "Score": 0.0
291
+ },
292
+ {
293
+ "Num. Frame": 1900,
294
+ "Frame Depth": 80.0,
295
+ "Score": 0.0
296
+ },
297
+ {
298
+ "Num. Frame": 1900,
299
+ "Frame Depth": 100.0,
300
+ "Score": 0.0
301
+ },
302
+ {
303
+ "Num. Frame": 2100,
304
+ "Frame Depth": 0.0,
305
+ "Score": 0.0
306
+ },
307
+ {
308
+ "Num. Frame": 2100,
309
+ "Frame Depth": 20.0,
310
+ "Score": 0.0
311
+ },
312
+ {
313
+ "Num. Frame": 2100,
314
+ "Frame Depth": 40.0,
315
+ "Score": 0.0
316
+ },
317
+ {
318
+ "Num. Frame": 2100,
319
+ "Frame Depth": 60.0,
320
+ "Score": 0.0
321
+ },
322
+ {
323
+ "Num. Frame": 2100,
324
+ "Frame Depth": 80.0,
325
+ "Score": 0.0
326
+ },
327
+ {
328
+ "Num. Frame": 2100,
329
+ "Frame Depth": 100.0,
330
+ "Score": 0.0
331
+ },
332
+ {
333
+ "Num. Frame": 2300,
334
+ "Frame Depth": 0.0,
335
+ "Score": 0.0
336
+ },
337
+ {
338
+ "Num. Frame": 2300,
339
+ "Frame Depth": 20.0,
340
+ "Score": 0.0
341
+ },
342
+ {
343
+ "Num. Frame": 2300,
344
+ "Frame Depth": 40.0,
345
+ "Score": 0.0
346
+ },
347
+ {
348
+ "Num. Frame": 2300,
349
+ "Frame Depth": 60.0,
350
+ "Score": 0.0
351
+ },
352
+ {
353
+ "Num. Frame": 2300,
354
+ "Frame Depth": 80.0,
355
+ "Score": 0.0
356
+ },
357
+ {
358
+ "Num. Frame": 2300,
359
+ "Frame Depth": 100.0,
360
+ "Score": 0.0
361
+ },
362
+ {
363
+ "Num. Frame": 2500,
364
+ "Frame Depth": 0.0,
365
+ "Score": 0.0
366
+ },
367
+ {
368
+ "Num. Frame": 2500,
369
+ "Frame Depth": 20.0,
370
+ "Score": 0.0
371
+ },
372
+ {
373
+ "Num. Frame": 2500,
374
+ "Frame Depth": 40.0,
375
+ "Score": 0.0
376
+ },
377
+ {
378
+ "Num. Frame": 2500,
379
+ "Frame Depth": 60.0,
380
+ "Score": 0.0
381
+ },
382
+ {
383
+ "Num. Frame": 2500,
384
+ "Frame Depth": 80.0,
385
+ "Score": 0.0
386
+ },
387
+ {
388
+ "Num. Frame": 2500,
389
+ "Frame Depth": 100.0,
390
+ "Score": 0.0
391
+ },
392
+ {
393
+ "Num. Frame": 2700,
394
+ "Frame Depth": 0.0,
395
+ "Score": 0.0
396
+ },
397
+ {
398
+ "Num. Frame": 2700,
399
+ "Frame Depth": 20.0,
400
+ "Score": 0.0
401
+ },
402
+ {
403
+ "Num. Frame": 2700,
404
+ "Frame Depth": 40.0,
405
+ "Score": 0.0
406
+ },
407
+ {
408
+ "Num. Frame": 2700,
409
+ "Frame Depth": 60.0,
410
+ "Score": 0.0
411
+ },
412
+ {
413
+ "Num. Frame": 2700,
414
+ "Frame Depth": 80.0,
415
+ "Score": 0.0
416
+ },
417
+ {
418
+ "Num. Frame": 2700,
419
+ "Frame Depth": 100.0,
420
+ "Score": 0.0
421
+ },
422
+ {
423
+ "Num. Frame": 2900,
424
+ "Frame Depth": 0.0,
425
+ "Score": 0.0
426
+ },
427
+ {
428
+ "Num. Frame": 2900,
429
+ "Frame Depth": 20.0,
430
+ "Score": 0.0
431
+ },
432
+ {
433
+ "Num. Frame": 2900,
434
+ "Frame Depth": 40.0,
435
+ "Score": 0.0
436
+ },
437
+ {
438
+ "Num. Frame": 2900,
439
+ "Frame Depth": 60.0,
440
+ "Score": 0.0
441
+ },
442
+ {
443
+ "Num. Frame": 2900,
444
+ "Frame Depth": 80.0,
445
+ "Score": 0.0
446
+ },
447
+ {
448
+ "Num. Frame": 2900,
449
+ "Frame Depth": 100.0,
450
+ "Score": 0.0
451
+ }
452
+ ]
vision_niah_d/niah_output/Qwen2-VL-time_rope-128frames-16card_8k-context-330k-llava-video/avg_accuracy.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ Average Accuracy: 0.29333333333333333
vision_niah_d/niah_output/Qwen2-VL-time_rope-128frames-16card_8k-context-330k-llava-video/heatmap.png ADDED

Git LFS Details

  • SHA256: 5af1e9e2b48fed6fa209ace235c1d5c63715054c999289219263faaca76f5d97
  • Pointer size: 130 Bytes
  • Size of remote file: 42.6 kB
vision_niah_d/niah_output/Qwen2-VL-vanilla_rope-128frames-16card_8k-context-330k-llava-video/all_accuracies.json ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "Num. Frame": 100,
4
+ "Frame Depth": 0.0,
5
+ "Score": 1.0
6
+ },
7
+ {
8
+ "Num. Frame": 100,
9
+ "Frame Depth": 20.0,
10
+ "Score": 0.8
11
+ },
12
+ {
13
+ "Num. Frame": 100,
14
+ "Frame Depth": 40.0,
15
+ "Score": 0.8
16
+ },
17
+ {
18
+ "Num. Frame": 100,
19
+ "Frame Depth": 60.0,
20
+ "Score": 0.8
21
+ },
22
+ {
23
+ "Num. Frame": 100,
24
+ "Frame Depth": 80.0,
25
+ "Score": 0.8
26
+ },
27
+ {
28
+ "Num. Frame": 100,
29
+ "Frame Depth": 100.0,
30
+ "Score": 1.0
31
+ },
32
+ {
33
+ "Num. Frame": 300,
34
+ "Frame Depth": 0.0,
35
+ "Score": 1.0
36
+ },
37
+ {
38
+ "Num. Frame": 300,
39
+ "Frame Depth": 20.0,
40
+ "Score": 0.6
41
+ },
42
+ {
43
+ "Num. Frame": 300,
44
+ "Frame Depth": 40.0,
45
+ "Score": 0.6
46
+ },
47
+ {
48
+ "Num. Frame": 300,
49
+ "Frame Depth": 60.0,
50
+ "Score": 0.6
51
+ },
52
+ {
53
+ "Num. Frame": 300,
54
+ "Frame Depth": 80.0,
55
+ "Score": 0.6
56
+ },
57
+ {
58
+ "Num. Frame": 300,
59
+ "Frame Depth": 100.0,
60
+ "Score": 1.0
61
+ },
62
+ {
63
+ "Num. Frame": 500,
64
+ "Frame Depth": 0.0,
65
+ "Score": 0.8
66
+ },
67
+ {
68
+ "Num. Frame": 500,
69
+ "Frame Depth": 20.0,
70
+ "Score": 0.6
71
+ },
72
+ {
73
+ "Num. Frame": 500,
74
+ "Frame Depth": 40.0,
75
+ "Score": 0.6
76
+ },
77
+ {
78
+ "Num. Frame": 500,
79
+ "Frame Depth": 60.0,
80
+ "Score": 0.6
81
+ },
82
+ {
83
+ "Num. Frame": 500,
84
+ "Frame Depth": 80.0,
85
+ "Score": 0.6
86
+ },
87
+ {
88
+ "Num. Frame": 500,
89
+ "Frame Depth": 100.0,
90
+ "Score": 1.0
91
+ },
92
+ {
93
+ "Num. Frame": 700,
94
+ "Frame Depth": 0.0,
95
+ "Score": 0.4
96
+ },
97
+ {
98
+ "Num. Frame": 700,
99
+ "Frame Depth": 20.0,
100
+ "Score": 0.4
101
+ },
102
+ {
103
+ "Num. Frame": 700,
104
+ "Frame Depth": 40.0,
105
+ "Score": 0.4
106
+ },
107
+ {
108
+ "Num. Frame": 700,
109
+ "Frame Depth": 60.0,
110
+ "Score": 0.4
111
+ },
112
+ {
113
+ "Num. Frame": 700,
114
+ "Frame Depth": 80.0,
115
+ "Score": 0.4
116
+ },
117
+ {
118
+ "Num. Frame": 700,
119
+ "Frame Depth": 100.0,
120
+ "Score": 1.0
121
+ },
122
+ {
123
+ "Num. Frame": 900,
124
+ "Frame Depth": 0.0,
125
+ "Score": 0.6
126
+ },
127
+ {
128
+ "Num. Frame": 900,
129
+ "Frame Depth": 20.0,
130
+ "Score": 0.6
131
+ },
132
+ {
133
+ "Num. Frame": 900,
134
+ "Frame Depth": 40.0,
135
+ "Score": 0.6
136
+ },
137
+ {
138
+ "Num. Frame": 900,
139
+ "Frame Depth": 60.0,
140
+ "Score": 0.6
141
+ },
142
+ {
143
+ "Num. Frame": 900,
144
+ "Frame Depth": 80.0,
145
+ "Score": 0.6
146
+ },
147
+ {
148
+ "Num. Frame": 900,
149
+ "Frame Depth": 100.0,
150
+ "Score": 1.0
151
+ },
152
+ {
153
+ "Num. Frame": 1100,
154
+ "Frame Depth": 0.0,
155
+ "Score": 0.6
156
+ },
157
+ {
158
+ "Num. Frame": 1100,
159
+ "Frame Depth": 20.0,
160
+ "Score": 0.4
161
+ },
162
+ {
163
+ "Num. Frame": 1100,
164
+ "Frame Depth": 40.0,
165
+ "Score": 0.4
166
+ },
167
+ {
168
+ "Num. Frame": 1100,
169
+ "Frame Depth": 60.0,
170
+ "Score": 0.4
171
+ },
172
+ {
173
+ "Num. Frame": 1100,
174
+ "Frame Depth": 80.0,
175
+ "Score": 0.4
176
+ },
177
+ {
178
+ "Num. Frame": 1100,
179
+ "Frame Depth": 100.0,
180
+ "Score": 0.8
181
+ },
182
+ {
183
+ "Num. Frame": 1300,
184
+ "Frame Depth": 0.0,
185
+ "Score": 0.4
186
+ },
187
+ {
188
+ "Num. Frame": 1300,
189
+ "Frame Depth": 20.0,
190
+ "Score": 0.4
191
+ },
192
+ {
193
+ "Num. Frame": 1300,
194
+ "Frame Depth": 40.0,
195
+ "Score": 0.4
196
+ },
197
+ {
198
+ "Num. Frame": 1300,
199
+ "Frame Depth": 60.0,
200
+ "Score": 0.4
201
+ },
202
+ {
203
+ "Num. Frame": 1300,
204
+ "Frame Depth": 80.0,
205
+ "Score": 0.4
206
+ },
207
+ {
208
+ "Num. Frame": 1300,
209
+ "Frame Depth": 100.0,
210
+ "Score": 0.6
211
+ },
212
+ {
213
+ "Num. Frame": 1500,
214
+ "Frame Depth": 0.0,
215
+ "Score": 0.2
216
+ },
217
+ {
218
+ "Num. Frame": 1500,
219
+ "Frame Depth": 20.0,
220
+ "Score": 0.2
221
+ },
222
+ {
223
+ "Num. Frame": 1500,
224
+ "Frame Depth": 40.0,
225
+ "Score": 0.2
226
+ },
227
+ {
228
+ "Num. Frame": 1500,
229
+ "Frame Depth": 60.0,
230
+ "Score": 0.2
231
+ },
232
+ {
233
+ "Num. Frame": 1500,
234
+ "Frame Depth": 80.0,
235
+ "Score": 0.2
236
+ },
237
+ {
238
+ "Num. Frame": 1500,
239
+ "Frame Depth": 100.0,
240
+ "Score": 0.0
241
+ },
242
+ {
243
+ "Num. Frame": 1700,
244
+ "Frame Depth": 0.0,
245
+ "Score": 0.0
246
+ },
247
+ {
248
+ "Num. Frame": 1700,
249
+ "Frame Depth": 20.0,
250
+ "Score": 0.0
251
+ },
252
+ {
253
+ "Num. Frame": 1700,
254
+ "Frame Depth": 40.0,
255
+ "Score": 0.0
256
+ },
257
+ {
258
+ "Num. Frame": 1700,
259
+ "Frame Depth": 60.0,
260
+ "Score": 0.0
261
+ },
262
+ {
263
+ "Num. Frame": 1700,
264
+ "Frame Depth": 80.0,
265
+ "Score": 0.0
266
+ },
267
+ {
268
+ "Num. Frame": 1700,
269
+ "Frame Depth": 100.0,
270
+ "Score": 0.0
271
+ },
272
+ {
273
+ "Num. Frame": 1900,
274
+ "Frame Depth": 0.0,
275
+ "Score": 0.2
276
+ },
277
+ {
278
+ "Num. Frame": 1900,
279
+ "Frame Depth": 20.0,
280
+ "Score": 0.2
281
+ },
282
+ {
283
+ "Num. Frame": 1900,
284
+ "Frame Depth": 40.0,
285
+ "Score": 0.2
286
+ },
287
+ {
288
+ "Num. Frame": 1900,
289
+ "Frame Depth": 60.0,
290
+ "Score": 0.2
291
+ },
292
+ {
293
+ "Num. Frame": 1900,
294
+ "Frame Depth": 80.0,
295
+ "Score": 0.2
296
+ },
297
+ {
298
+ "Num. Frame": 1900,
299
+ "Frame Depth": 100.0,
300
+ "Score": 0.2
301
+ },
302
+ {
303
+ "Num. Frame": 2100,
304
+ "Frame Depth": 0.0,
305
+ "Score": 0.0
306
+ },
307
+ {
308
+ "Num. Frame": 2100,
309
+ "Frame Depth": 20.0,
310
+ "Score": 0.0
311
+ },
312
+ {
313
+ "Num. Frame": 2100,
314
+ "Frame Depth": 40.0,
315
+ "Score": 0.0
316
+ },
317
+ {
318
+ "Num. Frame": 2100,
319
+ "Frame Depth": 60.0,
320
+ "Score": 0.0
321
+ },
322
+ {
323
+ "Num. Frame": 2100,
324
+ "Frame Depth": 80.0,
325
+ "Score": 0.0
326
+ },
327
+ {
328
+ "Num. Frame": 2100,
329
+ "Frame Depth": 100.0,
330
+ "Score": 0.0
331
+ },
332
+ {
333
+ "Num. Frame": 2300,
334
+ "Frame Depth": 0.0,
335
+ "Score": 0.0
336
+ },
337
+ {
338
+ "Num. Frame": 2300,
339
+ "Frame Depth": 20.0,
340
+ "Score": 0.0
341
+ },
342
+ {
343
+ "Num. Frame": 2300,
344
+ "Frame Depth": 40.0,
345
+ "Score": 0.0
346
+ },
347
+ {
348
+ "Num. Frame": 2300,
349
+ "Frame Depth": 60.0,
350
+ "Score": 0.0
351
+ },
352
+ {
353
+ "Num. Frame": 2300,
354
+ "Frame Depth": 80.0,
355
+ "Score": 0.0
356
+ },
357
+ {
358
+ "Num. Frame": 2300,
359
+ "Frame Depth": 100.0,
360
+ "Score": 0.0
361
+ },
362
+ {
363
+ "Num. Frame": 2500,
364
+ "Frame Depth": 0.0,
365
+ "Score": 0.0
366
+ },
367
+ {
368
+ "Num. Frame": 2500,
369
+ "Frame Depth": 20.0,
370
+ "Score": 0.0
371
+ },
372
+ {
373
+ "Num. Frame": 2500,
374
+ "Frame Depth": 40.0,
375
+ "Score": 0.0
376
+ },
377
+ {
378
+ "Num. Frame": 2500,
379
+ "Frame Depth": 60.0,
380
+ "Score": 0.0
381
+ },
382
+ {
383
+ "Num. Frame": 2500,
384
+ "Frame Depth": 80.0,
385
+ "Score": 0.0
386
+ },
387
+ {
388
+ "Num. Frame": 2500,
389
+ "Frame Depth": 100.0,
390
+ "Score": 0.0
391
+ },
392
+ {
393
+ "Num. Frame": 2700,
394
+ "Frame Depth": 0.0,
395
+ "Score": 0.0
396
+ },
397
+ {
398
+ "Num. Frame": 2700,
399
+ "Frame Depth": 20.0,
400
+ "Score": 0.0
401
+ },
402
+ {
403
+ "Num. Frame": 2700,
404
+ "Frame Depth": 40.0,
405
+ "Score": 0.0
406
+ },
407
+ {
408
+ "Num. Frame": 2700,
409
+ "Frame Depth": 60.0,
410
+ "Score": 0.0
411
+ },
412
+ {
413
+ "Num. Frame": 2700,
414
+ "Frame Depth": 80.0,
415
+ "Score": 0.0
416
+ },
417
+ {
418
+ "Num. Frame": 2700,
419
+ "Frame Depth": 100.0,
420
+ "Score": 0.0
421
+ },
422
+ {
423
+ "Num. Frame": 2900,
424
+ "Frame Depth": 0.0,
425
+ "Score": 0.0
426
+ },
427
+ {
428
+ "Num. Frame": 2900,
429
+ "Frame Depth": 20.0,
430
+ "Score": 0.0
431
+ },
432
+ {
433
+ "Num. Frame": 2900,
434
+ "Frame Depth": 40.0,
435
+ "Score": 0.0
436
+ },
437
+ {
438
+ "Num. Frame": 2900,
439
+ "Frame Depth": 60.0,
440
+ "Score": 0.0
441
+ },
442
+ {
443
+ "Num. Frame": 2900,
444
+ "Frame Depth": 80.0,
445
+ "Score": 0.0
446
+ },
447
+ {
448
+ "Num. Frame": 2900,
449
+ "Frame Depth": 100.0,
450
+ "Score": 0.0
451
+ }
452
+ ]
vision_niah_d/niah_output/Qwen2-VL-vanilla_rope-128frames-16card_8k-context-330k-llava-video/avg_accuracy.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ Average Accuracy: 0.31777777777777777
vision_niah_d/niah_output/Qwen2-VL-vanilla_rope-128frames-16card_8k-context-330k-llava-video/heatmap.png ADDED

Git LFS Details

  • SHA256: c382372c23f66c2707683b72e252ae080c2f953a9c77f525a72730deb470565b
  • Pointer size: 130 Bytes
  • Size of remote file: 42.6 kB
vision_niah_d/produce_haystack_embedding.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from qwen_vl_utils import process_vision_info
2
+ from decord import VideoReader, cpu
3
+ import argparse
4
+ import os
5
+ import numpy as np
6
+ from tqdm import tqdm
7
+ import torch
8
+ import transformers
9
+ import math
10
+ from PIL import Image
11
+ from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
12
+ from torchvision import io, transforms
13
+ from torchvision.transforms import InterpolationMode
14
+ IMAGE_FACTOR = 28
15
+ MIN_PIXELS = 144 * 28 * 28
16
+ MAX_PIXELS = 144 * 28 * 28
17
+ MAX_RATIO = 200
18
+ def load_video_batches(video_path, batch_size):
19
+ global args
20
+ vr = VideoReader(video_path, ctx=cpu(0))
21
+ total_frame_num = len(vr)
22
+ fps = round(vr.get_avg_fps())
23
+ frame_idx = [i for i in range(0, len(vr), fps)]
24
+ for start_idx in range(0, len(frame_idx), batch_size):
25
+ end_idx = min(start_idx + batch_size, total_frame_num)
26
+ frame_indices = frame_idx[start_idx:end_idx]
27
+ batch_frames = vr.get_batch(frame_indices).asnumpy()
28
+ batch_frames = torch.tensor(batch_frames).permute(0, 3, 1, 2)
29
+ # import pdb; pdb.set_trace()
30
+ nframes, _, height, width = batch_frames.shape
31
+ # if torch.unique(batch_frames).numel() == 1:
32
+ # batch_frames.fill_(args.v)
33
+ # print(torch.unique(batch_frames).item())
34
+ resized_height, resized_width = 252, 448
35
+ # resized_height, resized_width = smart_resize(
36
+ # height,
37
+ # width,
38
+ # factor=IMAGE_FACTOR,
39
+ # min_pixels=MIN_PIXELS,
40
+ # max_pixels=MAX_PIXELS,
41
+ # )
42
+ batch_frames = transforms.functional.resize(
43
+ batch_frames,
44
+ [resized_height, resized_width],
45
+ interpolation=InterpolationMode.BICUBIC,
46
+ antialias=True,
47
+ ).float()
48
+
49
+ yield batch_frames
50
+
51
+ def round_by_factor(number: int, factor: int) -> int:
52
+ """Returns the closest integer to 'number' that is divisible by 'factor'."""
53
+ return round(number / factor) * factor
54
+
55
+
56
+ def ceil_by_factor(number: int, factor: int) -> int:
57
+ """Returns the smallest integer greater than or equal to 'number' that is divisible by 'factor'."""
58
+ return math.ceil(number / factor) * factor
59
+
60
+
61
+ def floor_by_factor(number: int, factor: int) -> int:
62
+ """Returns the largest integer less than or equal to 'number' that is divisible by 'factor'."""
63
+ return math.floor(number / factor) * factor
64
+ def smart_resize(
65
+ height: int, width: int, factor: int = IMAGE_FACTOR, min_pixels: int = MIN_PIXELS, max_pixels: int = MAX_PIXELS
66
+ ) -> tuple[int, int]:
67
+ """
68
+ Rescales the image so that the following conditions are met:
69
+
70
+ 1. Both dimensions (height and width) are divisible by 'factor'.
71
+
72
+ 2. The total number of pixels is within the range ['min_pixels', 'max_pixels'].
73
+
74
+ 3. The aspect ratio of the image is maintained as closely as possible.
75
+ """
76
+ if max(height, width) / min(height, width) > MAX_RATIO:
77
+ raise ValueError(
78
+ f"absolute aspect ratio must be smaller than {MAX_RATIO}, got {max(height, width) / min(height, width)}"
79
+ )
80
+ h_bar = max(factor, round_by_factor(height, factor))
81
+ w_bar = max(factor, round_by_factor(width, factor))
82
+ if h_bar * w_bar > max_pixels:
83
+ beta = math.sqrt((height * width) / max_pixels)
84
+ h_bar = floor_by_factor(height / beta, factor)
85
+ w_bar = floor_by_factor(width / beta, factor)
86
+ elif h_bar * w_bar < min_pixels:
87
+ beta = math.sqrt(min_pixels / (height * width))
88
+ h_bar = ceil_by_factor(height * beta, factor)
89
+ w_bar = ceil_by_factor(width * beta, factor)
90
+ return h_bar, w_bar
91
+
92
+ def main(args):
93
+ video_path = args.video_path
94
+ model_path = args.model
95
+ model_name = "llava_qwen"
96
+
97
+ model = Qwen2VLForConditionalGeneration.from_pretrained(model_path,
98
+ device_map="auto",
99
+ torch_dtype=torch.bfloat16,
100
+ attn_implementation="flash_attention_2"
101
+ )
102
+ processor = AutoProcessor.from_pretrained("/mnt/petrelfs/weixilin/cache/Qwen2-VL-7B-Instruct")
103
+ del model.model.layers
104
+ # Process video in batches
105
+ batch_size = 32
106
+ total_batches = (args.sampled_frames_num + batch_size - 1) // batch_size
107
+ image_feature_list = []
108
+ if args.add_newline_token:
109
+ newline_token_embeddong = model.model.image_newline
110
+ with torch.inference_mode():
111
+ for i, video_batch in tqdm(enumerate(load_video_batches(video_path, batch_size)), total=total_batches, desc="Processing Video Batches"):
112
+ v_test = processor.image_processor(images=None, videos=video_batch)
113
+ merge_length = processor.image_processor.merge_size**2
114
+ pixel_values_videos,video_grid_thw=torch.from_numpy(v_test['pixel_values_videos']), torch.from_numpy(v_test['video_grid_thw']).to(model.device)
115
+ # if i > 30:
116
+ # import pdb; pdb.set_trace()
117
+ print(video_grid_thw)
118
+ # import pdb; pdb.set_trace()
119
+ pixel_values_videos = pixel_values_videos.type(model.visual.get_dtype()).to(model.device)
120
+ video_embeds = model.visual(pixel_values_videos, grid_thw=video_grid_thw).to(model.device)
121
+
122
+ print(video_embeds.shape)
123
+ if args.add_newline_token:
124
+ image_features = torch.cat([image_features, newline_token_embeddong.unsqueeze(0).expand(image_features.shape[0], 1, -1)], dim=1)
125
+ image_feature_list.append(video_embeds.to(torch.bfloat16).to("cpu"))
126
+ if i > total_batches:
127
+ break
128
+ image_feature_list = torch.cat(image_feature_list, dim=0)
129
+ os.makedirs(args.output_dir, exist_ok=True)
130
+ torch.save(image_feature_list, f"{args.output_dir}/video_embeddings.pt")
131
+
132
+ if __name__ == "__main__":
133
+ parser = argparse.ArgumentParser()
134
+ parser.add_argument("--model", type=str, default="/mnt/petrelfs/weixilin/cache/Qwen2-VL-7B-Instruct")
135
+ # parser.add_argument("--v", type=int, default=255)
136
+ parser.add_argument("--video_path", type=str, default="/mnt/petrelfs/weixilin/projects/MLLM/LongVA/asset/videos/movie.mp4")
137
+ parser.add_argument("--sampled_frames_num", type=int, default=6000)
138
+ parser.add_argument("--output_dir", type=str, default="/mnt/petrelfs/weixilin/projects/MLLM/Qwen2-VL/vision_niah/video_needle_haystack/data/haystack_vicuna_embeddings_6000frames-tune_projector")
139
+ parser.add_argument("--pooling_size", type=int, default=0)
140
+ parser.add_argument("--add_newline_token", action="store_true")
141
+ args = parser.parse_args()
142
+ main(args)
vision_niah_d/produce_needle_embedding.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from qwen_vl_utils import process_vision_info
2
+ from decord import VideoReader, cpu
3
+ import argparse
4
+ import numpy as np
5
+ from tqdm import tqdm
6
+ import torch
7
+ import transformers
8
+ import math
9
+ from PIL import Image
10
+ from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
11
+ from torchvision import io, transforms
12
+ from torchvision.transforms import InterpolationMode
13
+ import os
14
+ import json
15
+ IMAGE_FACTOR = 28
16
+ MIN_PIXELS = 144 * 28 * 28
17
+ MAX_PIXELS = 144 * 28 * 28
18
+ MAX_RATIO = 200
19
+
20
+ def round_by_factor(number: int, factor: int) -> int:
21
+ """Returns the closest integer to 'number' that is divisible by 'factor'."""
22
+ return round(number / factor) * factor
23
+
24
+
25
+ def ceil_by_factor(number: int, factor: int) -> int:
26
+ """Returns the smallest integer greater than or equal to 'number' that is divisible by 'factor'."""
27
+ return math.ceil(number / factor) * factor
28
+
29
+
30
+ def floor_by_factor(number: int, factor: int) -> int:
31
+ """Returns the largest integer less than or equal to 'number' that is divisible by 'factor'."""
32
+ return math.floor(number / factor) * factor
33
+ def smart_resize(
34
+ height: int, width: int, factor: int = IMAGE_FACTOR, min_pixels: int = MIN_PIXELS, max_pixels: int = MAX_PIXELS
35
+ ) -> tuple[int, int]:
36
+ """
37
+ Rescales the image so that the following conditions are met:
38
+
39
+ 1. Both dimensions (height and width) are divisible by 'factor'.
40
+
41
+ 2. The total number of pixels is within the range ['min_pixels', 'max_pixels'].
42
+
43
+ 3. The aspect ratio of the image is maintained as closely as possible.
44
+ """
45
+ if max(height, width) / min(height, width) > MAX_RATIO:
46
+ raise ValueError(
47
+ f"absolute aspect ratio must be smaller than {MAX_RATIO}, got {max(height, width) / min(height, width)}"
48
+ )
49
+ h_bar = max(factor, round_by_factor(height, factor))
50
+ w_bar = max(factor, round_by_factor(width, factor))
51
+ if h_bar * w_bar > max_pixels:
52
+ beta = math.sqrt((height * width) / max_pixels)
53
+ h_bar = floor_by_factor(height / beta, factor)
54
+ w_bar = floor_by_factor(width / beta, factor)
55
+ elif h_bar * w_bar < min_pixels:
56
+ beta = math.sqrt(min_pixels / (height * width))
57
+ h_bar = ceil_by_factor(height * beta, factor)
58
+ w_bar = ceil_by_factor(width * beta, factor)
59
+ return h_bar, w_bar
60
+
61
+ def read_json_file(file_path):
62
+ """
63
+ 读取JSON文件并返回数据作为字典。
64
+
65
+ 参数:
66
+ file_path (str): JSON文件的路径。
67
+
68
+ 返回:
69
+ dict: JSON文件中的数据。
70
+ """
71
+ try:
72
+ # 打开文件并读取数据
73
+ with open(file_path, 'r', encoding='utf-8') as file:
74
+ # 将JSON数据解析为字典
75
+ data = json.load(file)
76
+ return data
77
+ except FileNotFoundError:
78
+ print(f"The file {file_path} was not found.")
79
+ except json.JSONDecodeError:
80
+ print(f"Error decoding JSON from file {file_path}.")
81
+ except Exception as e:
82
+ print(f"An error occurred: {e}")
83
+
84
+ def fetch_image(ele, size_factor: int = IMAGE_FACTOR) -> Image.Image:
85
+ if "image" in ele:
86
+ image = ele["image"]
87
+ else:
88
+ image = ele["image_url"]
89
+ image_obj = None
90
+ if isinstance(image, Image.Image):
91
+ image_obj = image
92
+ elif image.startswith("http://") or image.startswith("https://"):
93
+ image_obj = Image.open(requests.get(image, stream=True).raw)
94
+ elif image.startswith("file://"):
95
+ image_obj = Image.open(image[7:])
96
+ elif image.startswith("data:image"):
97
+ if "base64," in image:
98
+ _, base64_data = image.split("base64,", 1)
99
+ data = base64.b64decode(base64_data)
100
+ image_obj = Image.open(BytesIO(data))
101
+ else:
102
+ image_obj = Image.open(image)
103
+ if image_obj is None:
104
+ raise ValueError(f"Unrecognized image input, support local path, http url, base64 and PIL.Image, got {image}")
105
+ image = image_obj.convert("RGB")
106
+ ## resize
107
+ if "resized_height" in ele and "resized_width" in ele:
108
+ resized_height, resized_width = 252, 448
109
+ # resized_height, resized_width = smart_resize(
110
+ # ele["resized_height"],
111
+ # ele["resized_width"],
112
+ # factor=size_factor,
113
+ # )
114
+ else:
115
+ width, height = image.size
116
+ min_pixels = ele.get("min_pixels", MIN_PIXELS)
117
+ max_pixels = ele.get("max_pixels", MAX_PIXELS)
118
+ # resized_height, resized_width = smart_resize(
119
+ # height,
120
+ # width,
121
+ # factor=size_factor,
122
+ # min_pixels=min_pixels,
123
+ # max_pixels=max_pixels,
124
+ # )
125
+ resized_height, resized_width = 252, 448
126
+ image = image.resize((resized_width, resized_height))
127
+
128
+ return image
129
+ def main(args):
130
+ model_path = args.model
131
+ model = Qwen2VLForConditionalGeneration.from_pretrained(model_path,
132
+ device_map="auto",
133
+ torch_dtype=torch.bfloat16,
134
+ attn_implementation="flash_attention_2"
135
+ )
136
+ processor = AutoProcessor.from_pretrained("/mnt/petrelfs/weixilin/cache/Qwen2-VL-7B-Instruct")
137
+ del model.model.layers
138
+ # dataset = load_dataset(args.needle_dataset)["test"]
139
+ # dataset = load_dataset('json', '/mnt/petrelfs/weixilin/projects/MLLM/LongVA/vision_niah/needle_datasets/dataset.json')
140
+ dataset = read_json_file(args.needle_dataset)
141
+ for index, instance in enumerate(dataset):
142
+
143
+ # image = instance["image"].convert("RGB")
144
+ img = fetch_image({"image": os.path.join('/mnt/petrelfs/weixilin/projects/MLLM/Qwen2-VL/vision_niah/needle_datasets/images', instance['path']), "resized_height": 252, "resized_width": 448})
145
+ image_single = processor.image_processor(images=[img], videos=None)
146
+ merge_length = processor.image_processor.merge_size**2
147
+ pixel_values, image_grid_thw=torch.from_numpy(image_single['pixel_values']), torch.from_numpy(image_single['image_grid_thw']).to(model.device)
148
+ # import pdb; pdb.set_trace()
149
+ pixel_values = pixel_values.type(model.visual.get_dtype()).to(model.device)
150
+ image_embed = model.visual(pixel_values, grid_thw=image_grid_thw).to(model.device)
151
+ print(image_embed.shape)
152
+ os.makedirs(args.output_dir, exist_ok=True)
153
+ torch.save(image_embed, f"{args.output_dir}/{index}.pt")
154
+
155
+
156
+
157
+ if __name__ == "__main__":
158
+ parser = argparse.ArgumentParser()
159
+ parser.add_argument("--model", type=str, default="/mnt/petrelfs/weixilin/cache/Qwen2-VL-7B-Instruct")
160
+ parser.add_argument("--needle_dataset", type=str, default="/mnt/petrelfs/weixilin/projects/MLLM/Qwen2-VL/vision_niah/needle_datasets/dataset_change_format_debug.json")
161
+ parser.add_argument("--output_dir", type=str, default="/mnt/petrelfs/weixilin/projects/MLLM/Qwen2-VL/vision_niah/video_needle_haystack/data/needle_vicuna_embeddings_144tokens-tune_projector_interrupt_debug")
162
+ args = parser.parse_args()
163
+ main(args)