deepspeed --num_nodes=1 --num_gpus=7 --master_port=25001 llava/train/train_mem.py \ | |
--deepspeed ./scripts/zero2.json \ | |
--model_name_or_path mistralai/Mistral-7B-Instruct-v0.1 \ | |
--version v1 \ | |
--dataset_config llava/configs/promptv1_2_increasing_ablation/finetune_gpt4_prompt_140k.yaml \ | |
--vision_tower eva-vit-g \ | |
--vit_model_path /mnt/bn/data-tns-algo-masp/baiyi.by/masp/model/eva_vit_g.pth \ | |
--qformer_model_path /mnt/bn/data-tns-algo-masp/baiyi.by/masp/model/blip2_pretrained_flant5xxl.pth \ | |
--pretrain_mm_mlp_adapter /mnt/bn/algo-masp-nas-2/baiyi.by/checkpoints/videollava-proj-mistral-pretrain-local-1122/mm_projector.bin \ | |
--mm_vision_select_layer -2 \ | |
--mm_use_start_end True \ | |
--mm_use_patch_token False \ | |
--image_aspect_ratio pad \ | |
--bf16 True \ | |
--output_dir /mnt/bn/data-tns-algo-masp/kaili.zhao/checkpoints/llava-mistral_recodebase_gpt4v_TTneg_140k \ | |
--num_train_epochs 1 \ | |
--per_device_train_batch_size 16 \ | |
--per_device_eval_batch_size 4 \ | |
--gradient_accumulation_steps 1 \ | |
--evaluation_strategy "no" \ | |
--save_strategy "steps" \ | |
--save_steps 2000 \ | |
--save_total_limit 1 \ | |
--learning_rate 2e-5 \ | |
--weight_decay 0. \ | |
--warmup_ratio 0.03 \ | |
--lr_scheduler_type "cosine" \ | |
--logging_steps 1 \ | |
--tf32 True \ | |
--model_max_length 4096 \ | |
--gradient_checkpointing True \ | |
--dataloader_num_workers 1 \ | |
--lazy_preprocess True \ | |
--report_to none | |