vlm_requests / Qwen /Qwen2-VL-2B-Instruct_eval_request_False_float16_Original.json
xuanricheng's picture
Update Qwen/Qwen2-VL-2B-Instruct status to FINISHED
a7cae74 verified
raw
history blame
5.75 kB
{
"model": "Qwen/Qwen2-VL-2B-Instruct",
"model_api_url": "",
"model_api_key": "",
"model_api_name": "",
"base_model": "",
"revision": "main",
"precision": "float16",
"private": false,
"weight_type": "Original",
"status": "FINISHED",
"submitted_time": "2025-01-24T02:46:12Z",
"model_type": "\ud83d\udfe2 : pretrained",
"params": 2.209,
"runsh": "#!/bin/bash\ncurrent_file=\"$0\"\ncurrent_dir=\"$(dirname \"$current_file\")\"\nSERVER_IP=$1\nSERVER_PORT=$2\nPYTHONPATH=$current_dir:$PYTHONPATH accelerate launch $current_dir/model_adapter.py --server_ip $SERVER_IP --server_port $SERVER_PORT \"${@:3}\" --cfg $current_dir/meta.json\n",
"adapter": "import torch\nfrom typing import Dict, Any\nimport time\nfrom transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor\nfrom flagevalmm.server import ServerDataset\nfrom flagevalmm.models.base_model_adapter import BaseModelAdapter\nfrom flagevalmm.server.utils import parse_args, process_images_symbol\nfrom qwen_vl_utils import process_vision_info\n\n\nclass CustomDataset(ServerDataset):\n def __getitem__(self, index):\n data = self.get_data(index)\n question_id = data[\"question_id\"]\n img_path = data[\"img_path\"]\n qs = data[\"question\"]\n qs, idx = process_images_symbol(qs)\n idx = set(idx)\n img_path_idx = []\n for i in idx:\n if i < len(img_path):\n img_path_idx.append(img_path[i])\n else:\n print(\"[warning] image index out of range\")\n return question_id, img_path_idx, qs\n\n\nclass ModelAdapter(BaseModelAdapter):\n def model_init(self, task_info: Dict):\n ckpt_path = task_info[\"model_path\"]\n torch.set_grad_enabled(False)\n with self.accelerator.main_process_first():\n tokenizer = AutoTokenizer.from_pretrained(ckpt_path, trust_remote_code=True)\n model = Qwen2VLForConditionalGeneration.from_pretrained(\n ckpt_path,\n device_map=\"auto\",\n torch_dtype=torch.bfloat16,\n attn_implementation=\"flash_attention_2\",\n )\n\n model = self.accelerator.prepare_model(model, evaluation_mode=True)\n self.tokenizer = tokenizer\n if hasattr(model, \"module\"):\n model = model.module\n self.model = model\n self.processor = AutoProcessor.from_pretrained(ckpt_path)\n\n def build_message(\n self,\n query: str,\n image_paths=[],\n ) -> str:\n messages = []\n messages.append(\n {\n \"role\": \"user\",\n \"content\": [],\n },\n )\n for img_path in image_paths:\n messages[-1][\"content\"].append(\n {\"type\": \"image\", \"image\": img_path},\n )\n # add question\n messages[-1][\"content\"].append(\n {\n \"type\": \"text\",\n \"text\": query,\n },\n )\n return messages\n\n def run_one_task(self, task_name: str, meta_info: Dict[str, Any]):\n results = []\n cnt = 0\n\n data_loader = self.create_data_loader(\n CustomDataset, task_name, batch_size=1, num_workers=0\n )\n for question_id, img_path, qs in data_loader:\n if cnt == 1:\n start_time = time.perf_counter()\n cnt += 1\n\n question_id = question_id[0]\n img_path_flaten = [p[0] for p in img_path]\n qs = qs[0]\n messages = self.build_message(qs, image_paths=img_path_flaten)\n\n text = self.processor.apply_chat_template(\n messages, tokenize=False, add_generation_prompt=True\n )\n image_inputs, video_inputs = process_vision_info(messages)\n inputs = self.processor(\n text=[text],\n images=image_inputs,\n videos=video_inputs,\n padding=True,\n return_tensors=\"pt\",\n )\n inputs = inputs.to(\"cuda\")\n\n # Inference\n generated_ids = self.model.generate(**inputs, max_new_tokens=1024)\n generated_ids_trimmed = [\n out_ids[len(in_ids) :]\n for in_ids, out_ids in zip(inputs.input_ids, generated_ids)\n ]\n response = self.processor.batch_decode(\n generated_ids_trimmed,\n skip_special_tokens=True,\n clean_up_tokenization_spaces=False,\n )[0]\n\n self.accelerator.print(f\"{qs}\\n{response}\\n\\n\")\n results.append(\n {\"question_id\": question_id, \"answer\": response.strip(), \"prompt\": qs}\n )\n rank = self.accelerator.state.local_process_index\n\n self.save_result(results, meta_info, rank=rank)\n self.accelerator.wait_for_everyone()\n\n if self.accelerator.is_main_process:\n correct_num = self.collect_results_and_save(meta_info)\n total_time = time.perf_counter() - start_time\n print(\n f\"Total time: {total_time}\\nAverage time:{total_time / cnt}\\nResults_collect number: {correct_num}\"\n )\n\n print(\"rank\", rank, \"finished\")\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n model_adapter = ModelAdapter(\n server_ip=args.server_ip,\n server_port=args.server_port,\n timeout=args.timeout,\n extra_cfg=args.cfg,\n )\n model_adapter.run()\n",
"eval_id": 26049,
"flageval_id": 1054
}