|
from transformers import Qwen2Model, Qwen2ForCausalLM, Qwen2_5_VLPreTrainedModel, Qwen2_5_VLForConditionalGeneration, AutoProcessor, AutoTokenizer, AddedToken |
|
import torch |
|
from qwen_vl_utils import process_vision_info |
|
|
|
|
|
qwen25_model = Qwen2_5_VLForConditionalGeneration.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct", device_map="auto", torch_dtype=torch.bfloat16) |
|
llm_device = qwen25_model.model.device |
|
deepseek_model = Qwen2ForCausalLM.from_pretrained("deepseek-ai/DeepSeek-R1-Distill-Qwen-7B").to(torch.bfloat16).to(llm_device) |
|
qwen25_model.model.load_state_dict(deepseek_model.model.state_dict()) |
|
qwen25_model.lm_head.load_state_dict(deepseek_model.lm_head.state_dict()) |
|
|
|
|
|
qwen25_model = qwen25_model.to(torch.bfloat16) |
|
min_pixels = 256*28*28 |
|
max_pixels = 1280*28*28 |
|
processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct", min_pixels=min_pixels, max_pixels=max_pixels, use_fast=False) |
|
ID_TO_NEW_TOKEN = { |
|
151643: "<|end▁of▁sentence|>", |
|
151644: "<|User|>", |
|
151645: "<|Assistant|>", |
|
151646: "<|begin▁of▁sentence|>", |
|
151648: "<think>", |
|
151649: "</think>", |
|
} |
|
|
|
|
|
NEW_TOKEN_TO_ID = {v: k for k, v in ID_TO_NEW_TOKEN.items()} |
|
|
|
for old_id, text in ID_TO_NEW_TOKEN.items(): |
|
|
|
|
|
|
|
|
|
tok = AddedToken( |
|
text, |
|
special=True, |
|
normalized=False, |
|
lstrip=False, |
|
rstrip=False, |
|
single_word=False |
|
) |
|
|
|
|
|
|
|
|
|
processor.tokenizer._added_tokens_decoder[old_id] = tok |
|
processor.tokenizer._added_tokens_encoder[text] = old_id |
|
|
|
processor.tokenizer._update_trie() |
|
|
|
|
|
print("Model loaded and move to GPU") |
|
repo_name = "ahmedheakl/vlm-r1-base2" |
|
qwen25_model.push_to_hub(repo_name) |
|
processor.push_to_hub(repo_name) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|