from transformers import Qwen2Model, Qwen2ForCausalLM, Qwen2_5_VLPreTrainedModel, Qwen2_5_VLForConditionalGeneration, AutoProcessor, AutoTokenizer, AddedToken import torch from qwen_vl_utils import process_vision_info qwen25_model = Qwen2_5_VLForConditionalGeneration.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct", device_map="auto", torch_dtype=torch.bfloat16) llm_device = qwen25_model.model.device deepseek_model = Qwen2ForCausalLM.from_pretrained("deepseek-ai/DeepSeek-R1-Distill-Qwen-7B").to(torch.bfloat16).to(llm_device) qwen25_model.model.load_state_dict(deepseek_model.model.state_dict()) qwen25_model.lm_head.load_state_dict(deepseek_model.lm_head.state_dict()) qwen25_model = qwen25_model.to(torch.bfloat16) min_pixels = 256*28*28 max_pixels = 1280*28*28 processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct", min_pixels=min_pixels, max_pixels=max_pixels, use_fast=False) ID_TO_NEW_TOKEN = { 151643: "<|end▁of▁sentence|>", 151644: "<|User|>", 151645: "<|Assistant|>", 151646: "<|begin▁of▁sentence|>", 151648: "", 151649: "", } # The reverse mapping: new text -> old ID NEW_TOKEN_TO_ID = {v: k for k, v in ID_TO_NEW_TOKEN.items()} for old_id, text in ID_TO_NEW_TOKEN.items(): # Create an AddedToken that won't get split # 'special=True' ensures it is recognized as one piece # 'normalized=False' means "do not lowercase or strip it" # so it is preserved exactly. tok = AddedToken( text, special=True, normalized=False, lstrip=False, rstrip=False, single_word=False ) # Register in the slow tokenizer's internal data structures: # _added_tokens_decoder: maps ID -> AddedToken object # _added_tokens_encoder: maps text -> ID # Then update the trie so that it can match them in raw text. processor.tokenizer._added_tokens_decoder[old_id] = tok processor.tokenizer._added_tokens_encoder[text] = old_id processor.tokenizer._update_trie() print("Model loaded and move to GPU") repo_name = "ahmedheakl/vlm-r1-base2" qwen25_model.push_to_hub(repo_name) processor.push_to_hub(repo_name) # messages = [ # { # "role": "user", # "content": [ # # { # # "type": "image", # # "image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg", # # }, # {"type": "text", "text": "What is the integration of cos^2(x)"}, # ], # } # ] # text = processor.apply_chat_template( # messages, tokenize=False, add_generation_prompt=True # ) # image_inputs, video_inputs = process_vision_info(messages) # inputs = processor( # text=[text], # images=image_inputs, # videos=video_inputs, # padding=True, # return_tensors="pt", # ) # inputs = inputs.to("cuda") # # Inference: Generation of the output # generated_ids = qwen25_model.generate(**inputs, max_new_tokens=1000) # generated_ids_trimmed = [ # out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) # ] # output_text = processor.batch_decode( # generated_ids_trimmed, skip_special_tokens=False, clean_up_tokenization_spaces=False # ) # print(output_text[0])