Update deepseek-ai/Janus-Pro-7B_eval_request_False_float16_Original.json
#3
by
daiteng01
- opened
deepseek-ai/Janus-Pro-7B_eval_request_False_float16_Original.json
CHANGED
@@ -1,20 +1,20 @@
|
|
1 |
{
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
}
|
|
|
1 |
{
|
2 |
+
"model": "deepseek-ai/Janus-Pro-7B",
|
3 |
+
"model_api_url": "",
|
4 |
+
"model_api_key": "",
|
5 |
+
"model_api_name": "",
|
6 |
+
"base_model": "",
|
7 |
+
"revision": "main",
|
8 |
+
"precision": "float16",
|
9 |
+
"private": false,
|
10 |
+
"weight_type": "Original",
|
11 |
+
"status": "RUNNING",
|
12 |
+
"submitted_time": "2025-02-11T08:15:48Z",
|
13 |
+
"model_type": "🟢 : pretrained",
|
14 |
+
"params": 0,
|
15 |
+
"runsh": "#!/bin/bash\ncurrent_file=\"$0\"\ncurrent_dir=\"$(dirname \"$current_file\")\"\nSERVER_IP=$1\nSERVER_PORT=$2\n\ncd /share/project/daiteng01/deepseek/Janus-main\npip install -e . -i http://10.1.1.16/repository/pypi-group/simple --trusted-host 10.1.1.16\ncd -\nPYTHONPATH=$current_dir:$PYTHONPATH accelerate launch $current_dir/model_adapter.py --server_ip $SERVER_IP --server_port $SERVER_PORT \"${@:3}\" --cfg $current_dir/meta.json\n",
|
16 |
+
"adapter": "import time\n\nfrom flagevalmm.server import ServerDataset\nfrom flagevalmm.models.base_model_adapter import BaseModelAdapter\nfrom flagevalmm.server.utils import (\n parse_args,\n default_collate_fn,\n process_images_symbol,\n load_pil_image,\n)\nfrom typing import Dict, Any\n\nimport torch\nfrom transformers import AutoModelForCausalLM\nfrom janus.models import MultiModalityCausalLM, VLChatProcessor\nfrom janus.utils.io import load_pil_images\n\n\nclass CustomDataset(ServerDataset):\n def __getitem__(self, index):\n data = self.get_data(index)\n qs, idx = process_images_symbol(\n data[\"question\"], dst_pattern=\"<image_placeholder>\"\n )\n question_id = data[\"question_id\"]\n img_path = data[\"img_path\"]\n image_list, idx = load_pil_image(\n img_path, idx, reqiures_img=True, reduplicate=False\n )\n\n return question_id, qs, image_list\n\n\nclass ModelAdapter(BaseModelAdapter):\n def model_init(self, task_info: Dict):\n ckpt_path = task_info[\"model_path\"]\n\n torch.set_grad_enabled(False)\n with self.accelerator.main_process_first():\n self.vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path)\n self.tokenizer = self.vl_chat_processor.tokenizer\n\n vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(\n model_path, trust_remote_code=True\n )\n vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval()\n model = self.accelerator.prepare_model(vl_gpt, evaluation_mode=True)\n if hasattr(model, \"module\"):\n model = model.module\n self.model = model\n\n def build_message(\n self,\n query: str,\n image_paths=[],\n ) -> str:\n messages = [\n {\n \"role\": \"<|User|>\",\n \"content\": f\"<image_placeholder>\\n{question}\",\n \"images\": image_paths,\n },\n {\"role\": \"<|Assistant|>\", \"content\": \"\"},\n ]\n return messages\n\n def run_one_task(self, task_name: str, meta_info: Dict[str, Any]):\n results = []\n cnt = 0\n\n data_loader = self.create_data_loader(\n CustomDataset,\n task_name,\n collate_fn=default_collate_fn,\n batch_size=1,\n num_workers=2,\n )\n for question_id, question, images in data_loader:\n if cnt == 1:\n start_time = time.perf_counter()\n cnt += 1\n messages = self.build_message(question[0], images[0])\n pil_images = load_pil_images(messages)\n prepare_inputs = self.vl_chat_processor(\n conversations=messages, images=pil_images, force_batchify=True\n ).to(self.model.device)\n\n inputs_embeds = self.model.prepare_inputs_embeds(**prepare_inputs)\n\n # run the model to get the response\n outputs = self.model.language_model.generate(\n inputs_embeds=inputs_embeds,\n attention_mask=prepare_inputs.attention_mask,\n pad_token_id=self.tokenizer.eos_token_id,\n bos_token_id=self.tokenizer.bos_token_id,\n eos_token_id=self.tokenizer.eos_token_id,\n max_new_tokens=4096,\n do_sample=False,\n use_cache=True,\n )\n\n response = self.tokenizer.decode(\n outputs[0].cpu().tolist(), skip_special_tokens=True\n )\n\n self.accelerator.print(f\"{question[0]}\\n{response}\\n\\n\")\n results.append(\n {\n \"question_id\": question_id[0],\n \"answer\": response.strip(),\n \"prompt\": question[0],\n }\n )\n rank = self.accelerator.state.local_process_index\n\n # save results for the rank\n self.save_result(results, meta_info, rank=rank)\n self.accelerator.wait_for_everyone()\n\n if self.accelerator.is_main_process:\n correct_num = self.collect_results_and_save(meta_info)\n total_time = time.perf_counter() - start_time\n print(\n f\"Total time: {total_time}\\nAverage time:{total_time / cnt}\\nResults_collect number: {correct_num}\"\n )\n\n print(\"rank\", rank, \"finished\")\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n model_adapter = ModelAdapter(\n server_ip=args.server_ip,\n server_port=args.server_port,\n timeout=args.timeout,\n extra_cfg=args.cfg,\n )\n model_adapter.run()\n",
|
17 |
+
"eval_id": 26102,
|
18 |
+
"flageval_id": 1056,
|
19 |
+
"failed_status": 45
|
20 |
}
|