shubhrapandit commited on
Commit
b0a86ce
·
verified ·
1 Parent(s): ee3f8f2

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +24 -24
README.md CHANGED
@@ -225,12 +225,14 @@ The following performance benchmarks were conducted with [vLLM](https://docs.vll
225
  <th></th>
226
  <th></th>
227
  <th></th>
 
228
  <th style="text-align: center;" colspan="2" >Document Visual Question Answering<br>1680W x 2240H<br>64/128</th>
229
  <th style="text-align: center;" colspan="2" >Visual Reasoning <br>640W x 480H<br>128/128</th>
230
  <th style="text-align: center;" colspan="2" >Image Captioning<br>480W x 360H<br>0/128</th>
231
  </tr>
232
  <tr>
233
  <th>Hardware</th>
 
234
  <th>Model</th>
235
  <th>Average Cost Reduction</th>
236
  <th>Latency (s)</th>
@@ -243,7 +245,8 @@ The following performance benchmarks were conducted with [vLLM](https://docs.vll
243
  </thead>
244
  <tbody>
245
  <tr>
246
- <td>A100x4</td>
 
247
  <td>Qwen/Qwen2.5-VL-72B-Instruct</td>
248
  <td></td>
249
  <td>6.4</td>
@@ -254,7 +257,7 @@ The following performance benchmarks were conducted with [vLLM](https://docs.vll
254
  <td>113</td>
255
  </tr>
256
  <tr>
257
- <td>A100x2</td>
258
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-quantized.w8a8</td>
259
  <td>1.85</td>
260
  <td>7.0</td>
@@ -265,7 +268,7 @@ The following performance benchmarks were conducted with [vLLM](https://docs.vll
265
  <td>211</td>
266
  </tr>
267
  <tr>
268
- <td>A100x1</td>
269
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-quantized.w4a16</td>
270
  <td>3.33</td>
271
  <td>9.4</td>
@@ -276,7 +279,8 @@ The following performance benchmarks were conducted with [vLLM](https://docs.vll
276
  <td>420</td>
277
  </tr>
278
  <tr>
279
- <td>H100x4</td>
 
280
  <td>Qwen/Qwen2.5-VL-72B-Instruct</td>
281
  <td></td>
282
  <td>4.3</td>
@@ -287,7 +291,7 @@ The following performance benchmarks were conducted with [vLLM](https://docs.vll
287
  <td>100</td>
288
  </tr>
289
  <tr>
290
- <td>H100x2</td>
291
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-FP8-Dynamic</td>
292
  <td>1.79</td>
293
  <td>4.6</td>
@@ -298,7 +302,7 @@ The following performance benchmarks were conducted with [vLLM](https://docs.vll
298
  <td>177</td>
299
  </tr>
300
  <tr>
301
- <td>H100x1</td>
302
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-quantized.w4a16</td>
303
  <td>5.66</td>
304
  <td>4.3</td>
@@ -341,7 +345,7 @@ The following performance benchmarks were conducted with [vLLM](https://docs.vll
341
  </thead>
342
  <tbody style="text-align: center">
343
  <tr>
344
- <td>A100x4</td>
345
  <td>Qwen/Qwen2.5-VL-72B-Instruct</td>
346
  <td></td>
347
  <td>0.4</td>
@@ -352,29 +356,27 @@ The following performance benchmarks were conducted with [vLLM](https://docs.vll
352
  <td>595</td>
353
  </tr>
354
  <tr>
355
- <td>A100x2</td>
356
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-quantized.w8a8</td>
357
  <td>1.80</td>
358
- <td>0.6</td>
359
  <td>289</td>
360
- <td>2.0</td>
361
  <td>1020</td>
362
- <td>2.3</td>
363
  <td>1133</td>
364
  </tr>
365
  <tr>
366
- <td>A100x1</td>
367
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-quantized.w4a16</td>
368
  <td>2.75</td>
369
- <td>0.7</td>
370
  <td>341</td>
371
- <td>3.2</td>
372
  <td>1588</td>
373
- <td>4.1</td>
374
  <td>2037</td>
375
  </tr>
376
  <tr>
377
- <td>H100x4</td>
378
  <td>Qwen/Qwen2.5-VL-72B-Instruct</td>
379
  <td></td>
380
  <td>0.5</td>
@@ -385,25 +387,23 @@ The following performance benchmarks were conducted with [vLLM](https://docs.vll
385
  <td>379</td>
386
  </tr>
387
  <tr>
388
- <td>H100x2</td>
389
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-FP8-Dynamic</td>
390
  <td>1.73</td>
391
- <td>0.9</td>
392
  <td>247</td>
393
- <td>2.2</td>
394
  <td>621</td>
395
- <td>2.4</td>
396
  <td>669</td>
397
  </tr>
398
  <tr>
399
- <td>H100x1</td>
400
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-quantized.w4a16</td>
401
  <td>8.27</td>
402
- <td>3.3</td>
403
  <td>913</td>
404
- <td>3.3</td>
405
  <td>913</td>
406
- <td>24.8</td>
407
  <td>6777</td>
408
  </tr>
409
  </tbody>
 
225
  <th></th>
226
  <th></th>
227
  <th></th>
228
+ <th></th>
229
  <th style="text-align: center;" colspan="2" >Document Visual Question Answering<br>1680W x 2240H<br>64/128</th>
230
  <th style="text-align: center;" colspan="2" >Visual Reasoning <br>640W x 480H<br>128/128</th>
231
  <th style="text-align: center;" colspan="2" >Image Captioning<br>480W x 360H<br>0/128</th>
232
  </tr>
233
  <tr>
234
  <th>Hardware</th>
235
+ <th>Number of GPUs</th>
236
  <th>Model</th>
237
  <th>Average Cost Reduction</th>
238
  <th>Latency (s)</th>
 
245
  </thead>
246
  <tbody>
247
  <tr>
248
+ <th rowspan="3" valign="top">A100</td>
249
+ <td>4</td>
250
  <td>Qwen/Qwen2.5-VL-72B-Instruct</td>
251
  <td></td>
252
  <td>6.4</td>
 
257
  <td>113</td>
258
  </tr>
259
  <tr>
260
+ <td>2</td>
261
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-quantized.w8a8</td>
262
  <td>1.85</td>
263
  <td>7.0</td>
 
268
  <td>211</td>
269
  </tr>
270
  <tr>
271
+ <td>1</td>
272
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-quantized.w4a16</td>
273
  <td>3.33</td>
274
  <td>9.4</td>
 
279
  <td>420</td>
280
  </tr>
281
  <tr>
282
+ <th rowspan="3" valign="top">H100</td>
283
+ <td>4</td>
284
  <td>Qwen/Qwen2.5-VL-72B-Instruct</td>
285
  <td></td>
286
  <td>4.3</td>
 
291
  <td>100</td>
292
  </tr>
293
  <tr>
294
+ <td>2</td>
295
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-FP8-Dynamic</td>
296
  <td>1.79</td>
297
  <td>4.6</td>
 
302
  <td>177</td>
303
  </tr>
304
  <tr>
305
+ <td>1</td>
306
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-quantized.w4a16</td>
307
  <td>5.66</td>
308
  <td>4.3</td>
 
345
  </thead>
346
  <tbody style="text-align: center">
347
  <tr>
348
+ <th rowspan="3" valign="top">A100x4</th>
349
  <td>Qwen/Qwen2.5-VL-72B-Instruct</td>
350
  <td></td>
351
  <td>0.4</td>
 
356
  <td>595</td>
357
  </tr>
358
  <tr>
 
359
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-quantized.w8a8</td>
360
  <td>1.80</td>
361
+ <td>1.2</td>
362
  <td>289</td>
363
+ <td>4.0</td>
364
  <td>1020</td>
365
+ <td>4.6</td>
366
  <td>1133</td>
367
  </tr>
368
  <tr>
 
369
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-quantized.w4a16</td>
370
  <td>2.75</td>
371
+ <td>2.8</td>
372
  <td>341</td>
373
+ <td>12.8</td>
374
  <td>1588</td>
375
+ <td>16.4</td>
376
  <td>2037</td>
377
  </tr>
378
  <tr>
379
+ <th rowspan="3" valign="top">H100x4</th>
380
  <td>Qwen/Qwen2.5-VL-72B-Instruct</td>
381
  <td></td>
382
  <td>0.5</td>
 
387
  <td>379</td>
388
  </tr>
389
  <tr>
 
390
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-FP8-Dynamic</td>
391
  <td>1.73</td>
392
+ <td>1.8</td>
393
  <td>247</td>
394
+ <td>4.4</td>
395
  <td>621</td>
396
+ <td>4.8</td>
397
  <td>669</td>
398
  </tr>
399
  <tr>
 
400
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-quantized.w4a16</td>
401
  <td>8.27</td>
402
+ <td>13.2</td>
403
  <td>913</td>
404
+ <td>13.2</td>
405
  <td>913</td>
406
+ <td>99.2</td>
407
  <td>6777</td>
408
  </tr>
409
  </tbody>