File size: 65,367 Bytes
08a9c32 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 |
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "ZJjAmplaD4FG"
},
"outputs": [],
"source": [
"\n"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"id": "At_60t8A92zP"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"/home/ryn_mote/Misc/vlm_with_pooled_for_text_genrec\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/ryn_mote/anaconda3/lib/python3.10/site-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n",
" warnings.warn(\n"
]
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "e43c05fa9e374f2587356068d9343e08",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Some weights of the model checkpoint at OpenGVLab/InternVL2-4B were not used when initializing InternVLChatModel: ['vision_model.embeddings.class_embedding', 'vision_model.embeddings.patch_embedding.bias', 'vision_model.embeddings.patch_embedding.weight', 'vision_model.embeddings.position_embedding', 'vision_model.encoder.layers.0.attn.proj.bias', 'vision_model.encoder.layers.0.attn.proj.weight', 'vision_model.encoder.layers.0.attn.qkv.bias', 'vision_model.encoder.layers.0.attn.qkv.weight', 'vision_model.encoder.layers.0.ls1', 'vision_model.encoder.layers.0.ls2', 'vision_model.encoder.layers.0.mlp.fc1.bias', 'vision_model.encoder.layers.0.mlp.fc1.weight', 'vision_model.encoder.layers.0.mlp.fc2.bias', 'vision_model.encoder.layers.0.mlp.fc2.weight', 'vision_model.encoder.layers.0.norm1.bias', 'vision_model.encoder.layers.0.norm1.weight', 'vision_model.encoder.layers.0.norm2.bias', 'vision_model.encoder.layers.0.norm2.weight', 'vision_model.encoder.layers.1.attn.proj.bias', 'vision_model.encoder.layers.1.attn.proj.weight', 'vision_model.encoder.layers.1.attn.qkv.bias', 'vision_model.encoder.layers.1.attn.qkv.weight', 'vision_model.encoder.layers.1.ls1', 'vision_model.encoder.layers.1.ls2', 'vision_model.encoder.layers.1.mlp.fc1.bias', 'vision_model.encoder.layers.1.mlp.fc1.weight', 'vision_model.encoder.layers.1.mlp.fc2.bias', 'vision_model.encoder.layers.1.mlp.fc2.weight', 'vision_model.encoder.layers.1.norm1.bias', 'vision_model.encoder.layers.1.norm1.weight', 'vision_model.encoder.layers.1.norm2.bias', 'vision_model.encoder.layers.1.norm2.weight', 'vision_model.encoder.layers.10.attn.proj.bias', 'vision_model.encoder.layers.10.attn.proj.weight', 'vision_model.encoder.layers.10.attn.qkv.bias', 'vision_model.encoder.layers.10.attn.qkv.weight', 'vision_model.encoder.layers.10.ls1', 'vision_model.encoder.layers.10.ls2', 'vision_model.encoder.layers.10.mlp.fc1.bias', 'vision_model.encoder.layers.10.mlp.fc1.weight', 'vision_model.encoder.layers.10.mlp.fc2.bias', 'vision_model.encoder.layers.10.mlp.fc2.weight', 'vision_model.encoder.layers.10.norm1.bias', 'vision_model.encoder.layers.10.norm1.weight', 'vision_model.encoder.layers.10.norm2.bias', 'vision_model.encoder.layers.10.norm2.weight', 'vision_model.encoder.layers.11.attn.proj.bias', 'vision_model.encoder.layers.11.attn.proj.weight', 'vision_model.encoder.layers.11.attn.qkv.bias', 'vision_model.encoder.layers.11.attn.qkv.weight', 'vision_model.encoder.layers.11.ls1', 'vision_model.encoder.layers.11.ls2', 'vision_model.encoder.layers.11.mlp.fc1.bias', 'vision_model.encoder.layers.11.mlp.fc1.weight', 'vision_model.encoder.layers.11.mlp.fc2.bias', 'vision_model.encoder.layers.11.mlp.fc2.weight', 'vision_model.encoder.layers.11.norm1.bias', 'vision_model.encoder.layers.11.norm1.weight', 'vision_model.encoder.layers.11.norm2.bias', 'vision_model.encoder.layers.11.norm2.weight', 'vision_model.encoder.layers.12.attn.proj.bias', 'vision_model.encoder.layers.12.attn.proj.weight', 'vision_model.encoder.layers.12.attn.qkv.bias', 'vision_model.encoder.layers.12.attn.qkv.weight', 'vision_model.encoder.layers.12.ls1', 'vision_model.encoder.layers.12.ls2', 'vision_model.encoder.layers.12.mlp.fc1.bias', 'vision_model.encoder.layers.12.mlp.fc1.weight', 'vision_model.encoder.layers.12.mlp.fc2.bias', 'vision_model.encoder.layers.12.mlp.fc2.weight', 'vision_model.encoder.layers.12.norm1.bias', 'vision_model.encoder.layers.12.norm1.weight', 'vision_model.encoder.layers.12.norm2.bias', 'vision_model.encoder.layers.12.norm2.weight', 'vision_model.encoder.layers.13.attn.proj.bias', 'vision_model.encoder.layers.13.attn.proj.weight', 'vision_model.encoder.layers.13.attn.qkv.bias', 'vision_model.encoder.layers.13.attn.qkv.weight', 'vision_model.encoder.layers.13.ls1', 'vision_model.encoder.layers.13.ls2', 'vision_model.encoder.layers.13.mlp.fc1.bias', 'vision_model.encoder.layers.13.mlp.fc1.weight', 'vision_model.encoder.layers.13.mlp.fc2.bias', 'vision_model.encoder.layers.13.mlp.fc2.weight', 'vision_model.encoder.layers.13.norm1.bias', 'vision_model.encoder.layers.13.norm1.weight', 'vision_model.encoder.layers.13.norm2.bias', 'vision_model.encoder.layers.13.norm2.weight', 'vision_model.encoder.layers.14.attn.proj.bias', 'vision_model.encoder.layers.14.attn.proj.weight', 'vision_model.encoder.layers.14.attn.qkv.bias', 'vision_model.encoder.layers.14.attn.qkv.weight', 'vision_model.encoder.layers.14.ls1', 'vision_model.encoder.layers.14.ls2', 'vision_model.encoder.layers.14.mlp.fc1.bias', 'vision_model.encoder.layers.14.mlp.fc1.weight', 'vision_model.encoder.layers.14.mlp.fc2.bias', 'vision_model.encoder.layers.14.mlp.fc2.weight', 'vision_model.encoder.layers.14.norm1.bias', 'vision_model.encoder.layers.14.norm1.weight', 'vision_model.encoder.layers.14.norm2.bias', 'vision_model.encoder.layers.14.norm2.weight', 'vision_model.encoder.layers.15.attn.proj.bias', 'vision_model.encoder.layers.15.attn.proj.weight', 'vision_model.encoder.layers.15.attn.qkv.bias', 'vision_model.encoder.layers.15.attn.qkv.weight', 'vision_model.encoder.layers.15.ls1', 'vision_model.encoder.layers.15.ls2', 'vision_model.encoder.layers.15.mlp.fc1.bias', 'vision_model.encoder.layers.15.mlp.fc1.weight', 'vision_model.encoder.layers.15.mlp.fc2.bias', 'vision_model.encoder.layers.15.mlp.fc2.weight', 'vision_model.encoder.layers.15.norm1.bias', 'vision_model.encoder.layers.15.norm1.weight', 'vision_model.encoder.layers.15.norm2.bias', 'vision_model.encoder.layers.15.norm2.weight', 'vision_model.encoder.layers.16.attn.proj.bias', 'vision_model.encoder.layers.16.attn.proj.weight', 'vision_model.encoder.layers.16.attn.qkv.bias', 'vision_model.encoder.layers.16.attn.qkv.weight', 'vision_model.encoder.layers.16.ls1', 'vision_model.encoder.layers.16.ls2', 'vision_model.encoder.layers.16.mlp.fc1.bias', 'vision_model.encoder.layers.16.mlp.fc1.weight', 'vision_model.encoder.layers.16.mlp.fc2.bias', 'vision_model.encoder.layers.16.mlp.fc2.weight', 'vision_model.encoder.layers.16.norm1.bias', 'vision_model.encoder.layers.16.norm1.weight', 'vision_model.encoder.layers.16.norm2.bias', 'vision_model.encoder.layers.16.norm2.weight', 'vision_model.encoder.layers.17.attn.proj.bias', 'vision_model.encoder.layers.17.attn.proj.weight', 'vision_model.encoder.layers.17.attn.qkv.bias', 'vision_model.encoder.layers.17.attn.qkv.weight', 'vision_model.encoder.layers.17.ls1', 'vision_model.encoder.layers.17.ls2', 'vision_model.encoder.layers.17.mlp.fc1.bias', 'vision_model.encoder.layers.17.mlp.fc1.weight', 'vision_model.encoder.layers.17.mlp.fc2.bias', 'vision_model.encoder.layers.17.mlp.fc2.weight', 'vision_model.encoder.layers.17.norm1.bias', 'vision_model.encoder.layers.17.norm1.weight', 'vision_model.encoder.layers.17.norm2.bias', 'vision_model.encoder.layers.17.norm2.weight', 'vision_model.encoder.layers.18.attn.proj.bias', 'vision_model.encoder.layers.18.attn.proj.weight', 'vision_model.encoder.layers.18.attn.qkv.bias', 'vision_model.encoder.layers.18.attn.qkv.weight', 'vision_model.encoder.layers.18.ls1', 'vision_model.encoder.layers.18.ls2', 'vision_model.encoder.layers.18.mlp.fc1.bias', 'vision_model.encoder.layers.18.mlp.fc1.weight', 'vision_model.encoder.layers.18.mlp.fc2.bias', 'vision_model.encoder.layers.18.mlp.fc2.weight', 'vision_model.encoder.layers.18.norm1.bias', 'vision_model.encoder.layers.18.norm1.weight', 'vision_model.encoder.layers.18.norm2.bias', 'vision_model.encoder.layers.18.norm2.weight', 'vision_model.encoder.layers.19.attn.proj.bias', 'vision_model.encoder.layers.19.attn.proj.weight', 'vision_model.encoder.layers.19.attn.qkv.bias', 'vision_model.encoder.layers.19.attn.qkv.weight', 'vision_model.encoder.layers.19.ls1', 'vision_model.encoder.layers.19.ls2', 'vision_model.encoder.layers.19.mlp.fc1.bias', 'vision_model.encoder.layers.19.mlp.fc1.weight', 'vision_model.encoder.layers.19.mlp.fc2.bias', 'vision_model.encoder.layers.19.mlp.fc2.weight', 'vision_model.encoder.layers.19.norm1.bias', 'vision_model.encoder.layers.19.norm1.weight', 'vision_model.encoder.layers.19.norm2.bias', 'vision_model.encoder.layers.19.norm2.weight', 'vision_model.encoder.layers.2.attn.proj.bias', 'vision_model.encoder.layers.2.attn.proj.weight', 'vision_model.encoder.layers.2.attn.qkv.bias', 'vision_model.encoder.layers.2.attn.qkv.weight', 'vision_model.encoder.layers.2.ls1', 'vision_model.encoder.layers.2.ls2', 'vision_model.encoder.layers.2.mlp.fc1.bias', 'vision_model.encoder.layers.2.mlp.fc1.weight', 'vision_model.encoder.layers.2.mlp.fc2.bias', 'vision_model.encoder.layers.2.mlp.fc2.weight', 'vision_model.encoder.layers.2.norm1.bias', 'vision_model.encoder.layers.2.norm1.weight', 'vision_model.encoder.layers.2.norm2.bias', 'vision_model.encoder.layers.2.norm2.weight', 'vision_model.encoder.layers.20.attn.proj.bias', 'vision_model.encoder.layers.20.attn.proj.weight', 'vision_model.encoder.layers.20.attn.qkv.bias', 'vision_model.encoder.layers.20.attn.qkv.weight', 'vision_model.encoder.layers.20.ls1', 'vision_model.encoder.layers.20.ls2', 'vision_model.encoder.layers.20.mlp.fc1.bias', 'vision_model.encoder.layers.20.mlp.fc1.weight', 'vision_model.encoder.layers.20.mlp.fc2.bias', 'vision_model.encoder.layers.20.mlp.fc2.weight', 'vision_model.encoder.layers.20.norm1.bias', 'vision_model.encoder.layers.20.norm1.weight', 'vision_model.encoder.layers.20.norm2.bias', 'vision_model.encoder.layers.20.norm2.weight', 'vision_model.encoder.layers.21.attn.proj.bias', 'vision_model.encoder.layers.21.attn.proj.weight', 'vision_model.encoder.layers.21.attn.qkv.bias', 'vision_model.encoder.layers.21.attn.qkv.weight', 'vision_model.encoder.layers.21.ls1', 'vision_model.encoder.layers.21.ls2', 'vision_model.encoder.layers.21.mlp.fc1.bias', 'vision_model.encoder.layers.21.mlp.fc1.weight', 'vision_model.encoder.layers.21.mlp.fc2.bias', 'vision_model.encoder.layers.21.mlp.fc2.weight', 'vision_model.encoder.layers.21.norm1.bias', 'vision_model.encoder.layers.21.norm1.weight', 'vision_model.encoder.layers.21.norm2.bias', 'vision_model.encoder.layers.21.norm2.weight', 'vision_model.encoder.layers.22.attn.proj.bias', 'vision_model.encoder.layers.22.attn.proj.weight', 'vision_model.encoder.layers.22.attn.qkv.bias', 'vision_model.encoder.layers.22.attn.qkv.weight', 'vision_model.encoder.layers.22.ls1', 'vision_model.encoder.layers.22.ls2', 'vision_model.encoder.layers.22.mlp.fc1.bias', 'vision_model.encoder.layers.22.mlp.fc1.weight', 'vision_model.encoder.layers.22.mlp.fc2.bias', 'vision_model.encoder.layers.22.mlp.fc2.weight', 'vision_model.encoder.layers.22.norm1.bias', 'vision_model.encoder.layers.22.norm1.weight', 'vision_model.encoder.layers.22.norm2.bias', 'vision_model.encoder.layers.22.norm2.weight', 'vision_model.encoder.layers.23.attn.proj.bias', 'vision_model.encoder.layers.23.attn.proj.weight', 'vision_model.encoder.layers.23.attn.qkv.bias', 'vision_model.encoder.layers.23.attn.qkv.weight', 'vision_model.encoder.layers.23.ls1', 'vision_model.encoder.layers.23.ls2', 'vision_model.encoder.layers.23.mlp.fc1.bias', 'vision_model.encoder.layers.23.mlp.fc1.weight', 'vision_model.encoder.layers.23.mlp.fc2.bias', 'vision_model.encoder.layers.23.mlp.fc2.weight', 'vision_model.encoder.layers.23.norm1.bias', 'vision_model.encoder.layers.23.norm1.weight', 'vision_model.encoder.layers.23.norm2.bias', 'vision_model.encoder.layers.23.norm2.weight', 'vision_model.encoder.layers.3.attn.proj.bias', 'vision_model.encoder.layers.3.attn.proj.weight', 'vision_model.encoder.layers.3.attn.qkv.bias', 'vision_model.encoder.layers.3.attn.qkv.weight', 'vision_model.encoder.layers.3.ls1', 'vision_model.encoder.layers.3.ls2', 'vision_model.encoder.layers.3.mlp.fc1.bias', 'vision_model.encoder.layers.3.mlp.fc1.weight', 'vision_model.encoder.layers.3.mlp.fc2.bias', 'vision_model.encoder.layers.3.mlp.fc2.weight', 'vision_model.encoder.layers.3.norm1.bias', 'vision_model.encoder.layers.3.norm1.weight', 'vision_model.encoder.layers.3.norm2.bias', 'vision_model.encoder.layers.3.norm2.weight', 'vision_model.encoder.layers.4.attn.proj.bias', 'vision_model.encoder.layers.4.attn.proj.weight', 'vision_model.encoder.layers.4.attn.qkv.bias', 'vision_model.encoder.layers.4.attn.qkv.weight', 'vision_model.encoder.layers.4.ls1', 'vision_model.encoder.layers.4.ls2', 'vision_model.encoder.layers.4.mlp.fc1.bias', 'vision_model.encoder.layers.4.mlp.fc1.weight', 'vision_model.encoder.layers.4.mlp.fc2.bias', 'vision_model.encoder.layers.4.mlp.fc2.weight', 'vision_model.encoder.layers.4.norm1.bias', 'vision_model.encoder.layers.4.norm1.weight', 'vision_model.encoder.layers.4.norm2.bias', 'vision_model.encoder.layers.4.norm2.weight', 'vision_model.encoder.layers.5.attn.proj.bias', 'vision_model.encoder.layers.5.attn.proj.weight', 'vision_model.encoder.layers.5.attn.qkv.bias', 'vision_model.encoder.layers.5.attn.qkv.weight', 'vision_model.encoder.layers.5.ls1', 'vision_model.encoder.layers.5.ls2', 'vision_model.encoder.layers.5.mlp.fc1.bias', 'vision_model.encoder.layers.5.mlp.fc1.weight', 'vision_model.encoder.layers.5.mlp.fc2.bias', 'vision_model.encoder.layers.5.mlp.fc2.weight', 'vision_model.encoder.layers.5.norm1.bias', 'vision_model.encoder.layers.5.norm1.weight', 'vision_model.encoder.layers.5.norm2.bias', 'vision_model.encoder.layers.5.norm2.weight', 'vision_model.encoder.layers.6.attn.proj.bias', 'vision_model.encoder.layers.6.attn.proj.weight', 'vision_model.encoder.layers.6.attn.qkv.bias', 'vision_model.encoder.layers.6.attn.qkv.weight', 'vision_model.encoder.layers.6.ls1', 'vision_model.encoder.layers.6.ls2', 'vision_model.encoder.layers.6.mlp.fc1.bias', 'vision_model.encoder.layers.6.mlp.fc1.weight', 'vision_model.encoder.layers.6.mlp.fc2.bias', 'vision_model.encoder.layers.6.mlp.fc2.weight', 'vision_model.encoder.layers.6.norm1.bias', 'vision_model.encoder.layers.6.norm1.weight', 'vision_model.encoder.layers.6.norm2.bias', 'vision_model.encoder.layers.6.norm2.weight', 'vision_model.encoder.layers.7.attn.proj.bias', 'vision_model.encoder.layers.7.attn.proj.weight', 'vision_model.encoder.layers.7.attn.qkv.bias', 'vision_model.encoder.layers.7.attn.qkv.weight', 'vision_model.encoder.layers.7.ls1', 'vision_model.encoder.layers.7.ls2', 'vision_model.encoder.layers.7.mlp.fc1.bias', 'vision_model.encoder.layers.7.mlp.fc1.weight', 'vision_model.encoder.layers.7.mlp.fc2.bias', 'vision_model.encoder.layers.7.mlp.fc2.weight', 'vision_model.encoder.layers.7.norm1.bias', 'vision_model.encoder.layers.7.norm1.weight', 'vision_model.encoder.layers.7.norm2.bias', 'vision_model.encoder.layers.7.norm2.weight', 'vision_model.encoder.layers.8.attn.proj.bias', 'vision_model.encoder.layers.8.attn.proj.weight', 'vision_model.encoder.layers.8.attn.qkv.bias', 'vision_model.encoder.layers.8.attn.qkv.weight', 'vision_model.encoder.layers.8.ls1', 'vision_model.encoder.layers.8.ls2', 'vision_model.encoder.layers.8.mlp.fc1.bias', 'vision_model.encoder.layers.8.mlp.fc1.weight', 'vision_model.encoder.layers.8.mlp.fc2.bias', 'vision_model.encoder.layers.8.mlp.fc2.weight', 'vision_model.encoder.layers.8.norm1.bias', 'vision_model.encoder.layers.8.norm1.weight', 'vision_model.encoder.layers.8.norm2.bias', 'vision_model.encoder.layers.8.norm2.weight', 'vision_model.encoder.layers.9.attn.proj.bias', 'vision_model.encoder.layers.9.attn.proj.weight', 'vision_model.encoder.layers.9.attn.qkv.bias', 'vision_model.encoder.layers.9.attn.qkv.weight', 'vision_model.encoder.layers.9.ls1', 'vision_model.encoder.layers.9.ls2', 'vision_model.encoder.layers.9.mlp.fc1.bias', 'vision_model.encoder.layers.9.mlp.fc1.weight', 'vision_model.encoder.layers.9.mlp.fc2.bias', 'vision_model.encoder.layers.9.mlp.fc2.weight', 'vision_model.encoder.layers.9.norm1.bias', 'vision_model.encoder.layers.9.norm1.weight', 'vision_model.encoder.layers.9.norm2.bias', 'vision_model.encoder.layers.9.norm2.weight']\n",
"- This IS expected if you are initializing InternVLChatModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
"- This IS NOT expected if you are initializing InternVLChatModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n",
"Some weights of InternVLChatModel were not initialized from the model checkpoint at OpenGVLab/InternVL2-4B and are newly initialized: ['vision_model.logit_scale', 'vision_model.text_model.embeddings.position_embedding.weight', 'vision_model.text_model.embeddings.token_embedding.weight', 'vision_model.text_model.encoder.layers.0.layer_norm1.bias', 'vision_model.text_model.encoder.layers.0.layer_norm1.weight', 'vision_model.text_model.encoder.layers.0.layer_norm2.bias', 'vision_model.text_model.encoder.layers.0.layer_norm2.weight', 'vision_model.text_model.encoder.layers.0.mlp.fc1.bias', 'vision_model.text_model.encoder.layers.0.mlp.fc1.weight', 'vision_model.text_model.encoder.layers.0.mlp.fc2.bias', 'vision_model.text_model.encoder.layers.0.mlp.fc2.weight', 'vision_model.text_model.encoder.layers.0.self_attn.k_proj.bias', 'vision_model.text_model.encoder.layers.0.self_attn.k_proj.weight', 'vision_model.text_model.encoder.layers.0.self_attn.out_proj.bias', 'vision_model.text_model.encoder.layers.0.self_attn.out_proj.weight', 'vision_model.text_model.encoder.layers.0.self_attn.q_proj.bias', 'vision_model.text_model.encoder.layers.0.self_attn.q_proj.weight', 'vision_model.text_model.encoder.layers.0.self_attn.v_proj.bias', 'vision_model.text_model.encoder.layers.0.self_attn.v_proj.weight', 'vision_model.text_model.encoder.layers.1.layer_norm1.bias', 'vision_model.text_model.encoder.layers.1.layer_norm1.weight', 'vision_model.text_model.encoder.layers.1.layer_norm2.bias', 'vision_model.text_model.encoder.layers.1.layer_norm2.weight', 'vision_model.text_model.encoder.layers.1.mlp.fc1.bias', 'vision_model.text_model.encoder.layers.1.mlp.fc1.weight', 'vision_model.text_model.encoder.layers.1.mlp.fc2.bias', 'vision_model.text_model.encoder.layers.1.mlp.fc2.weight', 'vision_model.text_model.encoder.layers.1.self_attn.k_proj.bias', 'vision_model.text_model.encoder.layers.1.self_attn.k_proj.weight', 'vision_model.text_model.encoder.layers.1.self_attn.out_proj.bias', 'vision_model.text_model.encoder.layers.1.self_attn.out_proj.weight', 'vision_model.text_model.encoder.layers.1.self_attn.q_proj.bias', 'vision_model.text_model.encoder.layers.1.self_attn.q_proj.weight', 'vision_model.text_model.encoder.layers.1.self_attn.v_proj.bias', 'vision_model.text_model.encoder.layers.1.self_attn.v_proj.weight', 'vision_model.text_model.encoder.layers.10.layer_norm1.bias', 'vision_model.text_model.encoder.layers.10.layer_norm1.weight', 'vision_model.text_model.encoder.layers.10.layer_norm2.bias', 'vision_model.text_model.encoder.layers.10.layer_norm2.weight', 'vision_model.text_model.encoder.layers.10.mlp.fc1.bias', 'vision_model.text_model.encoder.layers.10.mlp.fc1.weight', 'vision_model.text_model.encoder.layers.10.mlp.fc2.bias', 'vision_model.text_model.encoder.layers.10.mlp.fc2.weight', 'vision_model.text_model.encoder.layers.10.self_attn.k_proj.bias', 'vision_model.text_model.encoder.layers.10.self_attn.k_proj.weight', 'vision_model.text_model.encoder.layers.10.self_attn.out_proj.bias', 'vision_model.text_model.encoder.layers.10.self_attn.out_proj.weight', 'vision_model.text_model.encoder.layers.10.self_attn.q_proj.bias', 'vision_model.text_model.encoder.layers.10.self_attn.q_proj.weight', 'vision_model.text_model.encoder.layers.10.self_attn.v_proj.bias', 'vision_model.text_model.encoder.layers.10.self_attn.v_proj.weight', 'vision_model.text_model.encoder.layers.11.layer_norm1.bias', 'vision_model.text_model.encoder.layers.11.layer_norm1.weight', 'vision_model.text_model.encoder.layers.11.layer_norm2.bias', 'vision_model.text_model.encoder.layers.11.layer_norm2.weight', 'vision_model.text_model.encoder.layers.11.mlp.fc1.bias', 'vision_model.text_model.encoder.layers.11.mlp.fc1.weight', 'vision_model.text_model.encoder.layers.11.mlp.fc2.bias', 'vision_model.text_model.encoder.layers.11.mlp.fc2.weight', 'vision_model.text_model.encoder.layers.11.self_attn.k_proj.bias', 'vision_model.text_model.encoder.layers.11.self_attn.k_proj.weight', 'vision_model.text_model.encoder.layers.11.self_attn.out_proj.bias', 'vision_model.text_model.encoder.layers.11.self_attn.out_proj.weight', 'vision_model.text_model.encoder.layers.11.self_attn.q_proj.bias', 'vision_model.text_model.encoder.layers.11.self_attn.q_proj.weight', 'vision_model.text_model.encoder.layers.11.self_attn.v_proj.bias', 'vision_model.text_model.encoder.layers.11.self_attn.v_proj.weight', 'vision_model.text_model.encoder.layers.2.layer_norm1.bias', 'vision_model.text_model.encoder.layers.2.layer_norm1.weight', 'vision_model.text_model.encoder.layers.2.layer_norm2.bias', 'vision_model.text_model.encoder.layers.2.layer_norm2.weight', 'vision_model.text_model.encoder.layers.2.mlp.fc1.bias', 'vision_model.text_model.encoder.layers.2.mlp.fc1.weight', 'vision_model.text_model.encoder.layers.2.mlp.fc2.bias', 'vision_model.text_model.encoder.layers.2.mlp.fc2.weight', 'vision_model.text_model.encoder.layers.2.self_attn.k_proj.bias', 'vision_model.text_model.encoder.layers.2.self_attn.k_proj.weight', 'vision_model.text_model.encoder.layers.2.self_attn.out_proj.bias', 'vision_model.text_model.encoder.layers.2.self_attn.out_proj.weight', 'vision_model.text_model.encoder.layers.2.self_attn.q_proj.bias', 'vision_model.text_model.encoder.layers.2.self_attn.q_proj.weight', 'vision_model.text_model.encoder.layers.2.self_attn.v_proj.bias', 'vision_model.text_model.encoder.layers.2.self_attn.v_proj.weight', 'vision_model.text_model.encoder.layers.3.layer_norm1.bias', 'vision_model.text_model.encoder.layers.3.layer_norm1.weight', 'vision_model.text_model.encoder.layers.3.layer_norm2.bias', 'vision_model.text_model.encoder.layers.3.layer_norm2.weight', 'vision_model.text_model.encoder.layers.3.mlp.fc1.bias', 'vision_model.text_model.encoder.layers.3.mlp.fc1.weight', 'vision_model.text_model.encoder.layers.3.mlp.fc2.bias', 'vision_model.text_model.encoder.layers.3.mlp.fc2.weight', 'vision_model.text_model.encoder.layers.3.self_attn.k_proj.bias', 'vision_model.text_model.encoder.layers.3.self_attn.k_proj.weight', 'vision_model.text_model.encoder.layers.3.self_attn.out_proj.bias', 'vision_model.text_model.encoder.layers.3.self_attn.out_proj.weight', 'vision_model.text_model.encoder.layers.3.self_attn.q_proj.bias', 'vision_model.text_model.encoder.layers.3.self_attn.q_proj.weight', 'vision_model.text_model.encoder.layers.3.self_attn.v_proj.bias', 'vision_model.text_model.encoder.layers.3.self_attn.v_proj.weight', 'vision_model.text_model.encoder.layers.4.layer_norm1.bias', 'vision_model.text_model.encoder.layers.4.layer_norm1.weight', 'vision_model.text_model.encoder.layers.4.layer_norm2.bias', 'vision_model.text_model.encoder.layers.4.layer_norm2.weight', 'vision_model.text_model.encoder.layers.4.mlp.fc1.bias', 'vision_model.text_model.encoder.layers.4.mlp.fc1.weight', 'vision_model.text_model.encoder.layers.4.mlp.fc2.bias', 'vision_model.text_model.encoder.layers.4.mlp.fc2.weight', 'vision_model.text_model.encoder.layers.4.self_attn.k_proj.bias', 'vision_model.text_model.encoder.layers.4.self_attn.k_proj.weight', 'vision_model.text_model.encoder.layers.4.self_attn.out_proj.bias', 'vision_model.text_model.encoder.layers.4.self_attn.out_proj.weight', 'vision_model.text_model.encoder.layers.4.self_attn.q_proj.bias', 'vision_model.text_model.encoder.layers.4.self_attn.q_proj.weight', 'vision_model.text_model.encoder.layers.4.self_attn.v_proj.bias', 'vision_model.text_model.encoder.layers.4.self_attn.v_proj.weight', 'vision_model.text_model.encoder.layers.5.layer_norm1.bias', 'vision_model.text_model.encoder.layers.5.layer_norm1.weight', 'vision_model.text_model.encoder.layers.5.layer_norm2.bias', 'vision_model.text_model.encoder.layers.5.layer_norm2.weight', 'vision_model.text_model.encoder.layers.5.mlp.fc1.bias', 'vision_model.text_model.encoder.layers.5.mlp.fc1.weight', 'vision_model.text_model.encoder.layers.5.mlp.fc2.bias', 'vision_model.text_model.encoder.layers.5.mlp.fc2.weight', 'vision_model.text_model.encoder.layers.5.self_attn.k_proj.bias', 'vision_model.text_model.encoder.layers.5.self_attn.k_proj.weight', 'vision_model.text_model.encoder.layers.5.self_attn.out_proj.bias', 'vision_model.text_model.encoder.layers.5.self_attn.out_proj.weight', 'vision_model.text_model.encoder.layers.5.self_attn.q_proj.bias', 'vision_model.text_model.encoder.layers.5.self_attn.q_proj.weight', 'vision_model.text_model.encoder.layers.5.self_attn.v_proj.bias', 'vision_model.text_model.encoder.layers.5.self_attn.v_proj.weight', 'vision_model.text_model.encoder.layers.6.layer_norm1.bias', 'vision_model.text_model.encoder.layers.6.layer_norm1.weight', 'vision_model.text_model.encoder.layers.6.layer_norm2.bias', 'vision_model.text_model.encoder.layers.6.layer_norm2.weight', 'vision_model.text_model.encoder.layers.6.mlp.fc1.bias', 'vision_model.text_model.encoder.layers.6.mlp.fc1.weight', 'vision_model.text_model.encoder.layers.6.mlp.fc2.bias', 'vision_model.text_model.encoder.layers.6.mlp.fc2.weight', 'vision_model.text_model.encoder.layers.6.self_attn.k_proj.bias', 'vision_model.text_model.encoder.layers.6.self_attn.k_proj.weight', 'vision_model.text_model.encoder.layers.6.self_attn.out_proj.bias', 'vision_model.text_model.encoder.layers.6.self_attn.out_proj.weight', 'vision_model.text_model.encoder.layers.6.self_attn.q_proj.bias', 'vision_model.text_model.encoder.layers.6.self_attn.q_proj.weight', 'vision_model.text_model.encoder.layers.6.self_attn.v_proj.bias', 'vision_model.text_model.encoder.layers.6.self_attn.v_proj.weight', 'vision_model.text_model.encoder.layers.7.layer_norm1.bias', 'vision_model.text_model.encoder.layers.7.layer_norm1.weight', 'vision_model.text_model.encoder.layers.7.layer_norm2.bias', 'vision_model.text_model.encoder.layers.7.layer_norm2.weight', 'vision_model.text_model.encoder.layers.7.mlp.fc1.bias', 'vision_model.text_model.encoder.layers.7.mlp.fc1.weight', 'vision_model.text_model.encoder.layers.7.mlp.fc2.bias', 'vision_model.text_model.encoder.layers.7.mlp.fc2.weight', 'vision_model.text_model.encoder.layers.7.self_attn.k_proj.bias', 'vision_model.text_model.encoder.layers.7.self_attn.k_proj.weight', 'vision_model.text_model.encoder.layers.7.self_attn.out_proj.bias', 'vision_model.text_model.encoder.layers.7.self_attn.out_proj.weight', 'vision_model.text_model.encoder.layers.7.self_attn.q_proj.bias', 'vision_model.text_model.encoder.layers.7.self_attn.q_proj.weight', 'vision_model.text_model.encoder.layers.7.self_attn.v_proj.bias', 'vision_model.text_model.encoder.layers.7.self_attn.v_proj.weight', 'vision_model.text_model.encoder.layers.8.layer_norm1.bias', 'vision_model.text_model.encoder.layers.8.layer_norm1.weight', 'vision_model.text_model.encoder.layers.8.layer_norm2.bias', 'vision_model.text_model.encoder.layers.8.layer_norm2.weight', 'vision_model.text_model.encoder.layers.8.mlp.fc1.bias', 'vision_model.text_model.encoder.layers.8.mlp.fc1.weight', 'vision_model.text_model.encoder.layers.8.mlp.fc2.bias', 'vision_model.text_model.encoder.layers.8.mlp.fc2.weight', 'vision_model.text_model.encoder.layers.8.self_attn.k_proj.bias', 'vision_model.text_model.encoder.layers.8.self_attn.k_proj.weight', 'vision_model.text_model.encoder.layers.8.self_attn.out_proj.bias', 'vision_model.text_model.encoder.layers.8.self_attn.out_proj.weight', 'vision_model.text_model.encoder.layers.8.self_attn.q_proj.bias', 'vision_model.text_model.encoder.layers.8.self_attn.q_proj.weight', 'vision_model.text_model.encoder.layers.8.self_attn.v_proj.bias', 'vision_model.text_model.encoder.layers.8.self_attn.v_proj.weight', 'vision_model.text_model.encoder.layers.9.layer_norm1.bias', 'vision_model.text_model.encoder.layers.9.layer_norm1.weight', 'vision_model.text_model.encoder.layers.9.layer_norm2.bias', 'vision_model.text_model.encoder.layers.9.layer_norm2.weight', 'vision_model.text_model.encoder.layers.9.mlp.fc1.bias', 'vision_model.text_model.encoder.layers.9.mlp.fc1.weight', 'vision_model.text_model.encoder.layers.9.mlp.fc2.bias', 'vision_model.text_model.encoder.layers.9.mlp.fc2.weight', 'vision_model.text_model.encoder.layers.9.self_attn.k_proj.bias', 'vision_model.text_model.encoder.layers.9.self_attn.k_proj.weight', 'vision_model.text_model.encoder.layers.9.self_attn.out_proj.bias', 'vision_model.text_model.encoder.layers.9.self_attn.out_proj.weight', 'vision_model.text_model.encoder.layers.9.self_attn.q_proj.bias', 'vision_model.text_model.encoder.layers.9.self_attn.q_proj.weight', 'vision_model.text_model.encoder.layers.9.self_attn.v_proj.bias', 'vision_model.text_model.encoder.layers.9.self_attn.v_proj.weight', 'vision_model.text_model.final_layer_norm.bias', 'vision_model.text_model.final_layer_norm.weight', 'vision_model.text_projection.weight', 'vision_model.vision_model.embeddings.class_embedding', 'vision_model.vision_model.embeddings.patch_embedding.weight', 'vision_model.vision_model.embeddings.position_embedding.weight', 'vision_model.vision_model.encoder.layers.0.layer_norm1.bias', 'vision_model.vision_model.encoder.layers.0.layer_norm1.weight', 'vision_model.vision_model.encoder.layers.0.layer_norm2.bias', 'vision_model.vision_model.encoder.layers.0.layer_norm2.weight', 'vision_model.vision_model.encoder.layers.0.mlp.fc1.bias', 'vision_model.vision_model.encoder.layers.0.mlp.fc1.weight', 'vision_model.vision_model.encoder.layers.0.mlp.fc2.bias', 'vision_model.vision_model.encoder.layers.0.mlp.fc2.weight', 'vision_model.vision_model.encoder.layers.0.self_attn.k_proj.bias', 'vision_model.vision_model.encoder.layers.0.self_attn.k_proj.weight', 'vision_model.vision_model.encoder.layers.0.self_attn.out_proj.bias', 'vision_model.vision_model.encoder.layers.0.self_attn.out_proj.weight', 'vision_model.vision_model.encoder.layers.0.self_attn.q_proj.bias', 'vision_model.vision_model.encoder.layers.0.self_attn.q_proj.weight', 'vision_model.vision_model.encoder.layers.0.self_attn.v_proj.bias', 'vision_model.vision_model.encoder.layers.0.self_attn.v_proj.weight', 'vision_model.vision_model.encoder.layers.1.layer_norm1.bias', 'vision_model.vision_model.encoder.layers.1.layer_norm1.weight', 'vision_model.vision_model.encoder.layers.1.layer_norm2.bias', 'vision_model.vision_model.encoder.layers.1.layer_norm2.weight', 'vision_model.vision_model.encoder.layers.1.mlp.fc1.bias', 'vision_model.vision_model.encoder.layers.1.mlp.fc1.weight', 'vision_model.vision_model.encoder.layers.1.mlp.fc2.bias', 'vision_model.vision_model.encoder.layers.1.mlp.fc2.weight', 'vision_model.vision_model.encoder.layers.1.self_attn.k_proj.bias', 'vision_model.vision_model.encoder.layers.1.self_attn.k_proj.weight', 'vision_model.vision_model.encoder.layers.1.self_attn.out_proj.bias', 'vision_model.vision_model.encoder.layers.1.self_attn.out_proj.weight', 'vision_model.vision_model.encoder.layers.1.self_attn.q_proj.bias', 'vision_model.vision_model.encoder.layers.1.self_attn.q_proj.weight', 'vision_model.vision_model.encoder.layers.1.self_attn.v_proj.bias', 'vision_model.vision_model.encoder.layers.1.self_attn.v_proj.weight', 'vision_model.vision_model.encoder.layers.10.layer_norm1.bias', 'vision_model.vision_model.encoder.layers.10.layer_norm1.weight', 'vision_model.vision_model.encoder.layers.10.layer_norm2.bias', 'vision_model.vision_model.encoder.layers.10.layer_norm2.weight', 'vision_model.vision_model.encoder.layers.10.mlp.fc1.bias', 'vision_model.vision_model.encoder.layers.10.mlp.fc1.weight', 'vision_model.vision_model.encoder.layers.10.mlp.fc2.bias', 'vision_model.vision_model.encoder.layers.10.mlp.fc2.weight', 'vision_model.vision_model.encoder.layers.10.self_attn.k_proj.bias', 'vision_model.vision_model.encoder.layers.10.self_attn.k_proj.weight', 'vision_model.vision_model.encoder.layers.10.self_attn.out_proj.bias', 'vision_model.vision_model.encoder.layers.10.self_attn.out_proj.weight', 'vision_model.vision_model.encoder.layers.10.self_attn.q_proj.bias', 'vision_model.vision_model.encoder.layers.10.self_attn.q_proj.weight', 'vision_model.vision_model.encoder.layers.10.self_attn.v_proj.bias', 'vision_model.vision_model.encoder.layers.10.self_attn.v_proj.weight', 'vision_model.vision_model.encoder.layers.11.layer_norm1.bias', 'vision_model.vision_model.encoder.layers.11.layer_norm1.weight', 'vision_model.vision_model.encoder.layers.11.layer_norm2.bias', 'vision_model.vision_model.encoder.layers.11.layer_norm2.weight', 'vision_model.vision_model.encoder.layers.11.mlp.fc1.bias', 'vision_model.vision_model.encoder.layers.11.mlp.fc1.weight', 'vision_model.vision_model.encoder.layers.11.mlp.fc2.bias', 'vision_model.vision_model.encoder.layers.11.mlp.fc2.weight', 'vision_model.vision_model.encoder.layers.11.self_attn.k_proj.bias', 'vision_model.vision_model.encoder.layers.11.self_attn.k_proj.weight', 'vision_model.vision_model.encoder.layers.11.self_attn.out_proj.bias', 'vision_model.vision_model.encoder.layers.11.self_attn.out_proj.weight', 'vision_model.vision_model.encoder.layers.11.self_attn.q_proj.bias', 'vision_model.vision_model.encoder.layers.11.self_attn.q_proj.weight', 'vision_model.vision_model.encoder.layers.11.self_attn.v_proj.bias', 'vision_model.vision_model.encoder.layers.11.self_attn.v_proj.weight', 'vision_model.vision_model.encoder.layers.2.layer_norm1.bias', 'vision_model.vision_model.encoder.layers.2.layer_norm1.weight', 'vision_model.vision_model.encoder.layers.2.layer_norm2.bias', 'vision_model.vision_model.encoder.layers.2.layer_norm2.weight', 'vision_model.vision_model.encoder.layers.2.mlp.fc1.bias', 'vision_model.vision_model.encoder.layers.2.mlp.fc1.weight', 'vision_model.vision_model.encoder.layers.2.mlp.fc2.bias', 'vision_model.vision_model.encoder.layers.2.mlp.fc2.weight', 'vision_model.vision_model.encoder.layers.2.self_attn.k_proj.bias', 'vision_model.vision_model.encoder.layers.2.self_attn.k_proj.weight', 'vision_model.vision_model.encoder.layers.2.self_attn.out_proj.bias', 'vision_model.vision_model.encoder.layers.2.self_attn.out_proj.weight', 'vision_model.vision_model.encoder.layers.2.self_attn.q_proj.bias', 'vision_model.vision_model.encoder.layers.2.self_attn.q_proj.weight', 'vision_model.vision_model.encoder.layers.2.self_attn.v_proj.bias', 'vision_model.vision_model.encoder.layers.2.self_attn.v_proj.weight', 'vision_model.vision_model.encoder.layers.3.layer_norm1.bias', 'vision_model.vision_model.encoder.layers.3.layer_norm1.weight', 'vision_model.vision_model.encoder.layers.3.layer_norm2.bias', 'vision_model.vision_model.encoder.layers.3.layer_norm2.weight', 'vision_model.vision_model.encoder.layers.3.mlp.fc1.bias', 'vision_model.vision_model.encoder.layers.3.mlp.fc1.weight', 'vision_model.vision_model.encoder.layers.3.mlp.fc2.bias', 'vision_model.vision_model.encoder.layers.3.mlp.fc2.weight', 'vision_model.vision_model.encoder.layers.3.self_attn.k_proj.bias', 'vision_model.vision_model.encoder.layers.3.self_attn.k_proj.weight', 'vision_model.vision_model.encoder.layers.3.self_attn.out_proj.bias', 'vision_model.vision_model.encoder.layers.3.self_attn.out_proj.weight', 'vision_model.vision_model.encoder.layers.3.self_attn.q_proj.bias', 'vision_model.vision_model.encoder.layers.3.self_attn.q_proj.weight', 'vision_model.vision_model.encoder.layers.3.self_attn.v_proj.bias', 'vision_model.vision_model.encoder.layers.3.self_attn.v_proj.weight', 'vision_model.vision_model.encoder.layers.4.layer_norm1.bias', 'vision_model.vision_model.encoder.layers.4.layer_norm1.weight', 'vision_model.vision_model.encoder.layers.4.layer_norm2.bias', 'vision_model.vision_model.encoder.layers.4.layer_norm2.weight', 'vision_model.vision_model.encoder.layers.4.mlp.fc1.bias', 'vision_model.vision_model.encoder.layers.4.mlp.fc1.weight', 'vision_model.vision_model.encoder.layers.4.mlp.fc2.bias', 'vision_model.vision_model.encoder.layers.4.mlp.fc2.weight', 'vision_model.vision_model.encoder.layers.4.self_attn.k_proj.bias', 'vision_model.vision_model.encoder.layers.4.self_attn.k_proj.weight', 'vision_model.vision_model.encoder.layers.4.self_attn.out_proj.bias', 'vision_model.vision_model.encoder.layers.4.self_attn.out_proj.weight', 'vision_model.vision_model.encoder.layers.4.self_attn.q_proj.bias', 'vision_model.vision_model.encoder.layers.4.self_attn.q_proj.weight', 'vision_model.vision_model.encoder.layers.4.self_attn.v_proj.bias', 'vision_model.vision_model.encoder.layers.4.self_attn.v_proj.weight', 'vision_model.vision_model.encoder.layers.5.layer_norm1.bias', 'vision_model.vision_model.encoder.layers.5.layer_norm1.weight', 'vision_model.vision_model.encoder.layers.5.layer_norm2.bias', 'vision_model.vision_model.encoder.layers.5.layer_norm2.weight', 'vision_model.vision_model.encoder.layers.5.mlp.fc1.bias', 'vision_model.vision_model.encoder.layers.5.mlp.fc1.weight', 'vision_model.vision_model.encoder.layers.5.mlp.fc2.bias', 'vision_model.vision_model.encoder.layers.5.mlp.fc2.weight', 'vision_model.vision_model.encoder.layers.5.self_attn.k_proj.bias', 'vision_model.vision_model.encoder.layers.5.self_attn.k_proj.weight', 'vision_model.vision_model.encoder.layers.5.self_attn.out_proj.bias', 'vision_model.vision_model.encoder.layers.5.self_attn.out_proj.weight', 'vision_model.vision_model.encoder.layers.5.self_attn.q_proj.bias', 'vision_model.vision_model.encoder.layers.5.self_attn.q_proj.weight', 'vision_model.vision_model.encoder.layers.5.self_attn.v_proj.bias', 'vision_model.vision_model.encoder.layers.5.self_attn.v_proj.weight', 'vision_model.vision_model.encoder.layers.6.layer_norm1.bias', 'vision_model.vision_model.encoder.layers.6.layer_norm1.weight', 'vision_model.vision_model.encoder.layers.6.layer_norm2.bias', 'vision_model.vision_model.encoder.layers.6.layer_norm2.weight', 'vision_model.vision_model.encoder.layers.6.mlp.fc1.bias', 'vision_model.vision_model.encoder.layers.6.mlp.fc1.weight', 'vision_model.vision_model.encoder.layers.6.mlp.fc2.bias', 'vision_model.vision_model.encoder.layers.6.mlp.fc2.weight', 'vision_model.vision_model.encoder.layers.6.self_attn.k_proj.bias', 'vision_model.vision_model.encoder.layers.6.self_attn.k_proj.weight', 'vision_model.vision_model.encoder.layers.6.self_attn.out_proj.bias', 'vision_model.vision_model.encoder.layers.6.self_attn.out_proj.weight', 'vision_model.vision_model.encoder.layers.6.self_attn.q_proj.bias', 'vision_model.vision_model.encoder.layers.6.self_attn.q_proj.weight', 'vision_model.vision_model.encoder.layers.6.self_attn.v_proj.bias', 'vision_model.vision_model.encoder.layers.6.self_attn.v_proj.weight', 'vision_model.vision_model.encoder.layers.7.layer_norm1.bias', 'vision_model.vision_model.encoder.layers.7.layer_norm1.weight', 'vision_model.vision_model.encoder.layers.7.layer_norm2.bias', 'vision_model.vision_model.encoder.layers.7.layer_norm2.weight', 'vision_model.vision_model.encoder.layers.7.mlp.fc1.bias', 'vision_model.vision_model.encoder.layers.7.mlp.fc1.weight', 'vision_model.vision_model.encoder.layers.7.mlp.fc2.bias', 'vision_model.vision_model.encoder.layers.7.mlp.fc2.weight', 'vision_model.vision_model.encoder.layers.7.self_attn.k_proj.bias', 'vision_model.vision_model.encoder.layers.7.self_attn.k_proj.weight', 'vision_model.vision_model.encoder.layers.7.self_attn.out_proj.bias', 'vision_model.vision_model.encoder.layers.7.self_attn.out_proj.weight', 'vision_model.vision_model.encoder.layers.7.self_attn.q_proj.bias', 'vision_model.vision_model.encoder.layers.7.self_attn.q_proj.weight', 'vision_model.vision_model.encoder.layers.7.self_attn.v_proj.bias', 'vision_model.vision_model.encoder.layers.7.self_attn.v_proj.weight', 'vision_model.vision_model.encoder.layers.8.layer_norm1.bias', 'vision_model.vision_model.encoder.layers.8.layer_norm1.weight', 'vision_model.vision_model.encoder.layers.8.layer_norm2.bias', 'vision_model.vision_model.encoder.layers.8.layer_norm2.weight', 'vision_model.vision_model.encoder.layers.8.mlp.fc1.bias', 'vision_model.vision_model.encoder.layers.8.mlp.fc1.weight', 'vision_model.vision_model.encoder.layers.8.mlp.fc2.bias', 'vision_model.vision_model.encoder.layers.8.mlp.fc2.weight', 'vision_model.vision_model.encoder.layers.8.self_attn.k_proj.bias', 'vision_model.vision_model.encoder.layers.8.self_attn.k_proj.weight', 'vision_model.vision_model.encoder.layers.8.self_attn.out_proj.bias', 'vision_model.vision_model.encoder.layers.8.self_attn.out_proj.weight', 'vision_model.vision_model.encoder.layers.8.self_attn.q_proj.bias', 'vision_model.vision_model.encoder.layers.8.self_attn.q_proj.weight', 'vision_model.vision_model.encoder.layers.8.self_attn.v_proj.bias', 'vision_model.vision_model.encoder.layers.8.self_attn.v_proj.weight', 'vision_model.vision_model.encoder.layers.9.layer_norm1.bias', 'vision_model.vision_model.encoder.layers.9.layer_norm1.weight', 'vision_model.vision_model.encoder.layers.9.layer_norm2.bias', 'vision_model.vision_model.encoder.layers.9.layer_norm2.weight', 'vision_model.vision_model.encoder.layers.9.mlp.fc1.bias', 'vision_model.vision_model.encoder.layers.9.mlp.fc1.weight', 'vision_model.vision_model.encoder.layers.9.mlp.fc2.bias', 'vision_model.vision_model.encoder.layers.9.mlp.fc2.weight', 'vision_model.vision_model.encoder.layers.9.self_attn.k_proj.bias', 'vision_model.vision_model.encoder.layers.9.self_attn.k_proj.weight', 'vision_model.vision_model.encoder.layers.9.self_attn.out_proj.bias', 'vision_model.vision_model.encoder.layers.9.self_attn.out_proj.weight', 'vision_model.vision_model.encoder.layers.9.self_attn.q_proj.bias', 'vision_model.vision_model.encoder.layers.9.self_attn.q_proj.weight', 'vision_model.vision_model.encoder.layers.9.self_attn.v_proj.bias', 'vision_model.vision_model.encoder.layers.9.self_attn.v_proj.weight', 'vision_model.vision_model.post_layernorm.bias', 'vision_model.vision_model.post_layernorm.weight', 'vision_model.vision_model.pre_layrnorm.bias', 'vision_model.vision_model.pre_layrnorm.weight', 'vision_model.visual_projection.weight']\n",
"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n",
"Some weights of InternVLChatModel were not initialized from the model checkpoint at OpenGVLab/InternVL2-4B and are newly initialized because the shapes did not match:\n",
"- mlp1.0.bias: found shape torch.Size([4096]) in the checkpoint and torch.Size([512]) in the model instantiated\n",
"- mlp1.0.weight: found shape torch.Size([4096]) in the checkpoint and torch.Size([512]) in the model instantiated\n",
"- mlp1.1.weight: found shape torch.Size([3072, 4096]) in the checkpoint and torch.Size([3072, 512]) in the model instantiated\n",
"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n",
"Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Sequential(\n",
" (0): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n",
" (1): Linear(in_features=512, out_features=3072, bias=True)\n",
" (2): GELU(approximate='none')\n",
" (3): Linear(in_features=3072, out_features=3072, bias=True)\n",
")\n",
"Training: [Parameter containing:\n",
"tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
" 1., 1., 1., 1., 1., 1., 1., 1.], device='cuda:0', requires_grad=True), Parameter containing:\n",
"tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
" 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
" 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
" 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
" 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
" 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
" 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
" 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
" 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
" 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
" 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
" 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
" 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
" 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
" 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
" 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
" 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
" 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
" 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
" 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
" 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
" 0., 0., 0., 0., 0., 0., 0., 0.], device='cuda:0', requires_grad=True), Parameter containing:\n",
"tensor([[ 0.0339, 0.0376, 0.0073, ..., -0.0339, 0.0238, 0.0062],\n",
" [-0.0052, -0.0187, 0.0059, ..., 0.0276, -0.0090, 0.0187],\n",
" [ 0.0234, -0.0322, 0.0179, ..., -0.0114, -0.0010, 0.0280],\n",
" ...,\n",
" [ 0.0104, -0.0007, 0.0242, ..., -0.0311, -0.0272, 0.0166],\n",
" [-0.0249, -0.0128, 0.0104, ..., -0.0432, -0.0334, 0.0048],\n",
" [-0.0300, -0.0374, 0.0021, ..., -0.0104, 0.0311, 0.0000]],\n",
" device='cuda:0', requires_grad=True), Parameter containing:\n",
"tensor([-0.0425, -0.0312, -0.0302, ..., -0.0107, -0.0334, -0.0184],\n",
" device='cuda:0', requires_grad=True), Parameter containing:\n",
"tensor([[ 0.0349, -0.0058, -0.0051, ..., -0.0162, -0.0181, -0.0238],\n",
" [-0.0034, -0.0095, -0.0264, ..., -0.0123, 0.0048, -0.0051],\n",
" [-0.0266, 0.0070, -0.0043, ..., 0.0015, 0.0116, 0.0056],\n",
" ...,\n",
" [-0.0108, -0.0024, -0.0011, ..., 0.0107, -0.0093, -0.0156],\n",
" [-0.0225, 0.0118, -0.0011, ..., 0.0048, 0.0154, 0.0074],\n",
" [-0.0044, -0.0165, 0.0197, ..., 0.0096, 0.0107, -0.0024]],\n",
" device='cuda:0', requires_grad=True), Parameter containing:\n",
"tensor([-0.0131, 0.0132, -0.0184, ..., -0.0135, -0.0006, 0.0028],\n",
" device='cuda:0', requires_grad=True)]\n"
]
}
],
"source": [
"\n",
"%cd /home/ryn_mote/Misc/vlm_with_pooled_for_text_genrec/\n",
"\n",
"import numpy as np\n",
"import torch\n",
"import torchvision.transforms as T\n",
"from PIL import Image\n",
"from torchvision.transforms.functional import InterpolationMode\n",
"from transformers import AutoModel, AutoTokenizer\n",
"\n",
"from model import model, tokenizer\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 243,
"referenced_widgets": [
"dda6bb6b93f446d59d72ff96ff7a853e",
"b44eba0bbe4c43a1b0f721e678b4d283",
"e437079090dc473982e44067232d5a8a",
"c8e975345e394d50835853f171d168b5",
"dce79a5e12054f3b81f81f75ae16e496",
"0a1c58f845d8401f803dc82650ec5067",
"be5c59d8ae394c148dcd0c6cf9103c6c",
"851538e4adf34b9a923d109af48bf10f",
"82f1095705fc42f1a0bd5c419efd5fb5",
"1e8b80093b474466a159d263385af6c6",
"bcd3745ef57746678408765d98eb4e05"
]
},
"id": "Fjz_CIWCgHIN",
"outputId": "c64d772a-5df0-4f7e-e535-387777099423"
},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "A3ep4uzx80DJ",
"outputId": "012b5503-8950-4daf-b07f-cbf5c5da2e07"
},
"outputs": [],
"source": [
"\n",
"from model import load_image\n",
"\n",
"# set the max number of tiles in `max_num`\n",
"with torch.cuda.amp.autocast(True, torch.bfloat16):\n",
" pixel_values = load_image('/home/ryn_mote/Downloads/A polaroid of Silver Forgettings -- This flower does not exist.png', max_num=1).cuda()\n",
" emb1 = model.extract_feature(pixel_values.to(torch.bfloat16))\n",
"\n",
" pixel_values = load_image('/home/ryn_mote/Downloads/20230707_160218.jpg', max_num=1).cuda()\n",
" emb2 = model.extract_feature(pixel_values.to(torch.bfloat16))\n",
"\n",
" w = [3, 5]\n",
" embs = (emb1*w[0]+emb2*w[1]) / sum(w)\n",
"\n",
" generation_config = dict(max_new_tokens=32, do_sample=False,)\n",
"\n",
" # single-image single-round conversation (单图单轮对话)\n",
" question = '\\n '\n",
" response = model.chat(tokenizer, pixel_values, question, generation_config, visual_features=embs.to(torch.bfloat16))\n",
" print(f'User: {question}\\nAssistant: {response}')\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "ncu5qq0wEfKN"
},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "UgjhC95CDb26",
"outputId": "709bb3ad-fe29-41a3-dd4d-d3bab9fb21ed"
},
"outputs": [],
"source": []
}
],
"metadata": {
"accelerator": "GPU",
"colab": {
"gpuType": "A100",
"machine_shape": "hm",
"provenance": []
},
"kernelspec": {
"display_name": "base",
"language": "python",
"name": "base"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.8"
},
"widgets": {
"application/vnd.jupyter.widget-state+json": {
"0a1c58f845d8401f803dc82650ec5067": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"1e8b80093b474466a159d263385af6c6": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"82f1095705fc42f1a0bd5c419efd5fb5": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": ""
}
},
"851538e4adf34b9a923d109af48bf10f": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"b44eba0bbe4c43a1b0f721e678b4d283": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_0a1c58f845d8401f803dc82650ec5067",
"placeholder": "",
"style": "IPY_MODEL_be5c59d8ae394c148dcd0c6cf9103c6c",
"value": "Loading checkpoint shards: 100%"
}
},
"bcd3745ef57746678408765d98eb4e05": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"be5c59d8ae394c148dcd0c6cf9103c6c": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"c8e975345e394d50835853f171d168b5": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_1e8b80093b474466a159d263385af6c6",
"placeholder": "",
"style": "IPY_MODEL_bcd3745ef57746678408765d98eb4e05",
"value": " 2/2 [00:00<00:00, 4.98it/s]"
}
},
"dce79a5e12054f3b81f81f75ae16e496": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"dda6bb6b93f446d59d72ff96ff7a853e": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_b44eba0bbe4c43a1b0f721e678b4d283",
"IPY_MODEL_e437079090dc473982e44067232d5a8a",
"IPY_MODEL_c8e975345e394d50835853f171d168b5"
],
"layout": "IPY_MODEL_dce79a5e12054f3b81f81f75ae16e496"
}
},
"e437079090dc473982e44067232d5a8a": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "success",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_851538e4adf34b9a923d109af48bf10f",
"max": 2,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_82f1095705fc42f1a0bd5c419efd5fb5",
"value": 2
}
}
}
}
},
"nbformat": 4,
"nbformat_minor": 1
}
|