from huggingface_hub import notebook_login import torch import transformers from transformers import AutoModelForMaskedLM, AutoTokenizer from transformers import pipeline from transformers import LlamaTokenizer, LlamaForCausalLM import time import csv import locale locale.getpreferredencoding = lambda: "UTF-8" notebook_login() checkpoint = "ziqingyang/chinese-alpaca-2-7b" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = AutoModelForMaskedLM.from_pretrained(checkpoint) !CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ --stage sft \ --model_name_or_path model \ --do_train \ --dataset mydata \ --template chatglm2 \ --finetuning_type lora \ --lora_target q_proj,v_proj \ --output_dir ckpoint \ --overwrite_cache \ --auto_find_batch_size=False \ --per_device_train_batch_size 4 \ --gradient_accumulation_steps 4 \ --lr_scheduler_type cosine \ --logging_steps 10 \ --save_steps 10 \ --learning_rate 5e-5 \ --num_train_epochs 3.0 \ --plot_loss \ --fp16 model.push_to_hub("gutalk_classification") tokenizer.push_to_hub("gutalk_classification")