Uploaded model
- Developed by: kachi129
- License: apache-2.0
- Finetuned from model : llm-jp/llm-jp-3-13b
This llama model was trained 2x faster with Unsloth and Huggingface's TRL library.
Smple Use
'''python from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig from unsloth import FastLanguageModel import torch max_seq_length = 512 # unslothã§ã¯RoPEããµããŒãããŠããã®ã§ã³ã³ããã¹ãé·ã¯èªç±ã«èšå®å¯èœ dtype = None # Noneã«ããŠããã°èªåã§èšå® load_in_4bit = True # ä»åã¯8Bã¯ã©ã¹ã®ã¢ãã«ãæ±ãããTrue
model_id = "llm-jp/llm-jp-3-13b" new_model_id = "kachi-1216/llm-jp-3-13b-finetune-2" #Fine-Tuningããã¢ãã«ã«ã€ãããåå
FastLanguageModel ã€ã³ã¹ã¿ã³ã¹ãäœæ
model, tokenizer = FastLanguageModel.from_pretrained( model_name=model_id, dtype=dtype, load_in_4bit=load_in_4bit, trust_remote_code=True, )
SFTçšã®ã¢ãã«ãçšæ
model = FastLanguageModel.get_peft_model( model, r = 32, target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj",], lora_alpha = 32, lora_dropout = 0.05, bias = "none", use_gradient_checkpointing = "unsloth", random_state = 3407, use_rslora = False, loftq_config = None, max_seq_length = max_seq_length, )
from datasets import load_dataset
dataset = load_dataset("json", data_files="/content/ichikara-instruction-003-001-1.json")
ãã¹ã®æå®ã«ã泚æãã ãããã¢ããããŒããããã¡ã€ã«ãå³ã¯ãªãã¯ããããã¹ãã³ããŒããã¯ãªãã¯ãäžèšã® data_files ãšåèŽããŠããããšãã確èªãã ãããOmnicampus ã®ãã£ã¬ã¯ããªæ§é ãšã¯ç°ãªããããããŸããã
åŠç¿æã®ããã³ãããã©ãŒãããã®å®çŸ©
prompt = """### æ瀺 {}
åç
{}"""
""" formatting_prompts_func: åããŒã¿ãããã³ããã«åããã圢åŒã«åããã """ EOS_TOKEN = tokenizer.eos_token # ããŒã¯ãã€ã¶ãŒã®EOSããŒã¯ã³ïŒææ«ããŒã¯ã³ïŒ def formatting_prompts_func(examples): input = examples["text"] # å ¥åããŒã¿ output = examples["output"] # åºåããŒã¿ text = prompt.format(input, output) + EOS_TOKEN # ããã³ããã®äœæ return { "formatted_text" : text, } # æ°ãããã£ãŒã«ã "formatted_text" ãè¿ã pass
# åããŒã¿ã«ãã©ãŒããããé©çš
dataset = dataset.map( formatting_prompts_func, num_proc= 4, # 䞊ååŠçæ°ãæå® )
dataset
ããŒã¿ã確èª
print(dataset["train"]["formatted_text"][3])
""" training_arguments: åŠç¿ã®èšå®
output_dir: -ãã¬ãŒãã³ã°åŸã®ã¢ãã«ãä¿åãããã£ã¬ã¯ããª
per_device_train_batch_size:
- ããã€ã¹ããšã®ãã¬ãŒãã³ã°ããããµã€ãº
per_device_eval_batch_size:
- ããã€ã¹ããšã®è©äŸ¡ããããµã€ãº
gradient_accumulation_steps:
- åŸé ãæŽæ°ããåã«ã¹ããããç©ã¿éããåæ°
optim:
- ãªããã£ãã€ã¶ã®èšå®
num_train_epochs:
- ãšããã¯æ°
eval_strategy:
- è©äŸ¡ã®æŠç¥ ("no"/"steps"/"epoch")
eval_steps:
- eval_strategyã"steps"ã®ãšããè©äŸ¡ãè¡ãstepéé
logging_strategy:
- ãã°èšé²ã®æŠç¥
logging_steps:
- ãã°ãåºåããã¹ãããéé
warmup_steps:
- åŠç¿çã®ãŠã©ãŒã ã¢ããã¹ãããæ°
save_steps:
- ã¢ãã«ãä¿åããã¹ãããéé
save_total_limit:
- ä¿åããŠããcheckpointã®æ°
max_steps:
- ãã¬ãŒãã³ã°ã®æ倧ã¹ãããæ°
learning_rate:
- åŠç¿ç
fp16:
- 16bitæµ®åå°æ°ç¹ã®äœ¿çšèšå®ïŒç¬¬8åæŒç¿ãåèã«ãããšè¯ãã§ãïŒ
bf16:
- BFloat16ã®äœ¿çšèšå®
group_by_length:
- å ¥åã·ãŒã±ã³ã¹ã®é·ãã«ããããããã°ã«ãŒãå (ãã¬ãŒãã³ã°ã®å¹çå)
report_to:
- ãã°ã®éä¿¡å ("wandb"/"tensorboard"ãªã©) """ from trl import SFTTrainer from transformers import TrainingArguments from unsloth import is_bfloat16_supported
trainer = SFTTrainer( model = model, tokenizer = tokenizer, train_dataset=dataset["train"], max_seq_length = max_seq_length, dataset_text_field="formatted_text", packing = False, args = TrainingArguments( per_device_train_batch_size = 2, gradient_accumulation_steps = 4, num_train_epochs = 1, logging_steps = 10, warmup_steps = 10, save_steps=100, save_total_limit=2, max_steps=-1, learning_rate = 2e-4, fp16 = not is_bfloat16_supported(), bf16 = is_bfloat16_supported(), group_by_length=True, seed = 3407, output_dir = "outputs", report_to = "none", ), )
#@title çŸåšã®ã¡ã¢ãªäœ¿çšéã衚瀺 gpu_stats = torch.cuda.get_device_properties(0) start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3) max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) print(f"GPU = {gpu_stats.name}. Max memory = {max_memory} GB.") print(f"{start_gpu_memory} GB of memory reserved.")
#@title åŠç¿å®è¡ trainer_stats = trainer.train()
ELYZA-tasks-100-TVã®èªã¿èŸŒã¿ãäºåã«ãã¡ã€ã«ãã¢ããããŒãããŠãã ãã
ããŒã¿ã»ããã®èªã¿èŸŒã¿ã
omnicampusã®éçºç°å¢ã§ã¯ãå·Šã«ã¿ã¹ã¯ã®jsonlããã©ãã°ã¢ã³ãããããããŠããå®è¡ã
import json datasets = [] with open("./elyza-tasks-100-TV_0.jsonl", "r") as f: item = "" for line in f: line = line.strip() item += line if item.endswith("}"): datasets.append(json.loads(item)) item = ""
åŠç¿ããã¢ãã«ãçšããŠã¿ã¹ã¯ãå®è¡
from tqdm import tqdm
æšè«ããããã«ã¢ãã«ã®ã¢ãŒããå€æŽ
FastLanguageModel.for_inference(model)
results = [] for dt in tqdm(datasets): input = dt["input"]
prompt = f"""### æ瀺\n{input}\n### åç\n"""
inputs = tokenizer([prompt], return_tensors = "pt").to(model.device)
outputs = model.generate(**inputs, max_new_tokens = 512, use_cache = True, do_sample=False, repetition_penalty=1.2) prediction = tokenizer.decode(outputs[0], skip_special_tokens=True).split('\n### åç')[-1]
results.append({"task_id": dt["task_id"], "input": input, "output": prediction})
jsonlã§ä¿å
with open(f"{new_model_id}_output.jsonl", 'w', encoding='utf-8') as f: for result in results: json.dump(result, f, ensure_ascii=False) f.write('\n')
ã¢ãã«ãšããŒã¯ãã€ã¶ãŒãHugging Faceã«ã¢ããããŒãã
äžæŠprivateã§ã¢ããããŒãããŠãã ããã
æçµææç©ã決ãŸã£ããpublicã«ãããããé¡ãããŸãã
çŸåšå ¬éããŠããModel_Inference_Template.ipynbã¯unslothãæ³å®ããŠããªããããã®ãŸãŸã§ã¯åããªãå¯èœæ§ããããŸãã
new_model_id = "kachi129/llm-jp-3-13b-finetune-2" model.push_to_hub_merged( new_model_id, tokenizer=tokenizer, save_method="lora", token=HF_TOKEN, private=True ) '''
Model tree for kachi129/llm-jp-3-13b-finetune-2
Base model
llm-jp/llm-jp-3-13b