deepspeed \ | |
--num_nodes=1 \ | |
--num_gpus=8 \ | |
--master_port=25001 \ | |
llava/train/train_mem.py \ | |
--deepspeed ./scripts/zero2.json \ | |
--model_name_or_path mistralai/Mistral-7B-Instruct-v0.1 \ | |
--version v1 \ | |
--dataset_config /mnt/bn/algo-masp-nas-2/xiangchen/repo/LLaVA/llava/configs/gpt4v_increasing_ablation/finetune_videollava.yaml \ | |
--vision_tower google/siglip-large-patch16-256 \ | |
--pretrain_mm_mlp_adapter /mnt/bn/algo-masp-nas-2/xiangchen/model/masp_models/checkpoints/llava-pretrain-googlesiglip_projector/checkpoint-4000/mm_projector.bin \ | |
--adapter_module_name none_compress_token_v1_64 \ | |
--mm_vision_select_layer -2 \ | |
--mm_use_start_end True \ | |
--mm_use_patch_token False \ | |
--image_aspect_ratio pad \ | |
--num_token_per_image 64 \ | |
--num_query_token 64 \ | |
--bf16 True \ | |
--output_dir /mnt/bn/masp-nas/xiangchen/model/masp_models/checkpoints/llava-mistral-googlesiglip_llava_800k \ | |
--group_by_modality_length True \ | |
--num_train_epochs 1 \ | |
--per_device_train_batch_size 4 \ | |
--per_device_eval_batch_size 4 \ | |
--gradient_accumulation_steps 4 \ | |
--evaluation_strategy "no" \ | |
--save_strategy "steps" \ | |
--save_steps 2000 \ | |
--save_total_limit 1 \ | |
--learning_rate 1e-5 \ | |
--weight_decay 0. \ | |
--warmup_ratio 0.03 \ | |
--lr_scheduler_type "cosine" \ | |
--logging_steps 1 \ | |
--tf32 True \ | |
--model_max_length 4096 \ | |
--gradient_checkpointing True \ | |
--dataloader_num_workers 2 \ | |
--lazy_preprocess True \ | |
--report_to none | |