Uploaded model
- Developed by: fuwafuwa012
- License: apache-2.0
- Finetuned from model : llm-jp/llm-jp-3-13b
This llama model was trained 2x faster with Unsloth and Huggingface's TRL library.
code version 2 æ¬ã³ãŒãã¯ïŒelyza-tasks-100-TV_0.jsonlã®åçã®ããã®æšè«çšã³ãŒãã§ãã
""" from unsloth import FastLanguageModel import torch max_seq_length = 512 # unslothã§ã¯RoPEããµããŒãããŠããã®ã§ã³ã³ããã¹ãé·ã¯èªç±ã«èšå®å¯èœ dtype = None # Noneã«ããŠããã°èªåã§èšå® load_in_4bit = True # ä»åã¯13Bã¢ãã«ãæ±ãããTrue
model_id = "llm-jp/llm-jp-3-13b" new_model_id = "llm-jp-3-13b-it" #Fine-Tuningããã¢ãã«ã«ã€ãããååãit: Instruction Tuning
FastLanguageModel ã€ã³ã¹ã¿ã³ã¹ãäœæ
model, tokenizer = FastLanguageModel.from_pretrained( model_name=model_id, dtype=dtype, load_in_4bit=load_in_4bit, trust_remote_code=True, )
SFTçšã®ã¢ãã«ãçšæ
model = FastLanguageModel.get_peft_model( model, r = 32, target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj",], lora_alpha = 64, lora_dropout = 0.10, bias = "none", use_gradient_checkpointing = "unsloth", random_state = 3407, use_rslora = False, loftq_config = None, max_seq_length = max_seq_length, )
åŠç¿ã«çšããããŒã¿ã»ããã®æå®
ä»åã¯LLM-jp ã®å ¬éããŠãã Ichikara Instruction ã䜿ããŸããããŒã¿ã«ã¢ã¯ã»ã¹ããããã«ã¯ç³è«ãå¿ èŠã§ãã®ã§ã䜿ãããæ¹ã®ã¿ç³è«ãããŠãã ããã
Ichikara Instruciton ã Hugging Face Hub ã«ãŠå ¬éããããšã¯ãæ§ããã ããã
ãŸããCC-BY-NC-SAã§ãã®ã§ã¢ãã«ã¯ã©ã€ã»ã³ã¹ãç¶æ¿ããåæã§ã䜿ããã ããã
äžèšã®ãªã³ã¯ããç³è«ãçµããå ã« Google Drive ããããDistribution20241221_all ãšãããã©ã«ãããšããŠã³ããŒãããŠãã ããã
ä»åã¯ãichikara-instruction-003-001-1.jsonãã䜿ããŸããå¿ èŠã§ããã°å±éïŒ!unzip ãªã©ïŒããããŒã¿ã»ããã®ãã¹ãé©åã«æå®ããŠãã ããã
omnicampusã®éçºç°å¢ã§ã¯ååŸããããŒã¿ãå·ŠåŽã«ãã©ãã°ã¢ã³ãããããããŠã䜿ããã ããã
Google Colab ã®å Žåãå·Šã®ãµã€ãããŒãããã©ãã°&ããããã§ã¢ããããŒãããŠãã ããã
https://liat-aip.sakura.ne.jp/wp/llmã®ããã®æ¥æ¬èªã€ã³ã¹ãã©ã¯ã·ã§ã³ããŒã¿äœæ/llmã®ããã®æ¥æ¬èªã€ã³ã¹ãã©ã¯ã·ã§ã³ããŒã¿-å ¬é/
é¢æ ¹è¡, å®è€ãŸã, åŸè€çŸç¥å, éŽæšä¹ çŸ, æ²³å倧èŒ, äºä¹äžçŽä¹, 也å¥å€ªé. ichikara-instruction: LLMã®ããã®æ¥æ¬èªã€ã³ã¹ãã©ã¯ã·ã§ã³ããŒã¿ã®æ§ç¯. èšèªåŠçåŠäŒç¬¬30å幎次倧äŒ(2024)
from datasets import load_dataset
dataset = load_dataset("json", data_files="./ichikara-instruction-003-001-1.json")
ãã¹ã®æå®ã«ã泚æãã ãããã¢ããããŒããããã¡ã€ã«ãå³ã¯ãªãã¯ããããã¹ãã³ããŒããã¯ãªãã¯ãäžèšã® data_files ãšåèŽããŠããããšãã確èªãã ãããOmnicampus ã®ãã£ã¬ã¯ããªæ§é ãšã¯ç°ãªããããããŸããã
åŠç¿æã®ããã³ãããã©ãŒãããã®å®çŸ©
prompt = """### æ瀺 {}
åç
{}"""
""" formatting_prompts_func: åããŒã¿ãããã³ããã«åããã圢åŒã«åããã """ EOS_TOKEN = tokenizer.eos_token # ããŒã¯ãã€ã¶ãŒã®EOSããŒã¯ã³ïŒææ«ããŒã¯ã³ïŒ def formatting_prompts_func(examples): input = examples["text"] # å ¥åããŒã¿ output = examples["output"] # åºåããŒã¿ text = prompt.format(input, output) + EOS_TOKEN # ããã³ããã®äœæ return { "formatted_text" : text, } # æ°ãããã£ãŒã«ã "formatted_text" ãè¿ã pass
# åããŒã¿ã«ãã©ãŒããããé©çš
dataset = dataset.map( formatting_prompts_func, num_proc= 4, # 䞊ååŠçæ°ãæå® )
dataset
ããŒã¿ã確èª
print(dataset["train"]["formatted_text"][3])
from trl import SFTTrainer from transformers import TrainingArguments from unsloth import is_bfloat16_supported
trainer = SFTTrainer( model = model, tokenizer = tokenizer, train_dataset=dataset["train"], max_seq_length = max_seq_length, dataset_text_field="formatted_text", packing = False, args = TrainingArguments( per_device_train_batch_size = 2, gradient_accumulation_steps = 4, num_train_epochs = 1, logging_steps = 10, warmup_steps = 10, save_steps=100, save_total_limit=2, max_steps=-1, learning_rate = 2e-4, fp16 = not is_bfloat16_supported(), bf16 = is_bfloat16_supported(), group_by_length=True, seed = 3407, output_dir = "outputs", report_to = "none", ), )
#@title çŸåšã®ã¡ã¢ãªäœ¿çšéã衚瀺 gpu_stats = torch.cuda.get_device_properties(0) start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3) max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) print(f"GPU = {gpu_stats.name}. Max memory = {max_memory} GB.") print(f"{start_gpu_memory} GB of memory reserved.")
#@title åŠç¿å®è¡ trainer_stats = trainer.train()
ELYZA-tasks-100-TVã®èªã¿èŸŒã¿ãäºåã«ãã¡ã€ã«ãã¢ããããŒãããŠãã ãã
ããŒã¿ã»ããã®èªã¿èŸŒã¿ã
omnicampusã®éçºç°å¢ã§ã¯ãå·Šã«ã¿ã¹ã¯ã®jsonlããã©ãã°ã¢ã³ãããããããŠããå®è¡ã
import json datasets = [] with open("/content//elyza-tasks-100-TV_0.jsonl", "r") as f: item = "" for line in f: line = line.strip() item += line if item.endswith("}"): datasets.append(json.loads(item)) item = ""
åŠç¿ããã¢ãã«ãçšããŠã¿ã¹ã¯ãå®è¡
from tqdm import tqdm
æšè«ããããã«ã¢ãã«ã®ã¢ãŒããå€æŽ
FastLanguageModel.for_inference(model)
results = [] for dt in tqdm(datasets): input = dt["input"]
prompt = f"""### æ瀺\n{input}\n### åç\n"""
inputs = tokenizer([prompt], return_tensors = "pt").to(model.device)
outputs = model.generate(**inputs, max_new_tokens = 512, use_cache = True, do_sample=False, repetition_penalty=1.2)
outputs = model.generate( **inputs, max_new_tokens=512, temperature=0.7, top_p=0.9, repetition_penalty=1.2, use_cache=True, do_sample=True )
prediction = tokenizer.decode(outputs[0], skip_special_tokens=True).split('\n### åç')[-1]
results.append({"task_id": dt["task_id"], "input": input, "output": prediction})
jsonlã§ä¿å
with open(f"{new_model_id}_output.jsonl", 'w', encoding='utf-8') as f: for result in results: json.dump(result, f, ensure_ascii=False) f.write('\n')
"""ã¢ãã«ãšããŒã¯ãã€ã¶ãŒãHugging Faceã«ã¢ããããŒãããŸãã
æ¬ã³ãŒãã§ã¯LoRAã®ã¢ããã¿ã®ã¿ãä¿åããŸãã
ãã®ã¢ããã¿ãçšããæšè«æ¹æ³ã¯Model_Inference_Template_unsloth_20241127.ipynbããåç
§ãã ããã
äžæŠprivateã§ã¢ããããŒãããŠãã ããã
https://docs.unsloth.ai/basics/saving-and-using-models
"""
LoRAã¢ããã¿ã ãä¿å
model.push_to_hub_merged( new_model_id+"_lora", tokenizer=tokenizer, save_method="lora", token=HF_TOKEN, private=True ) """
unslothã®ãµã³ãã«ã³ãŒãã®ãã€ããŒãã©ã¡ãŒã¿ãŒã®å€ãå€æŽããŠåŠç¿ãããã¢ãã«ã«ãªã£ãŠããŸãã å€æŽç¹ãšããŠã¯ïŒä»¥äžã®éãã§ãã lora_alphaã®å€ã64ãšããŠæ£èŠåé ãå¢ããã lora_dropoutã0.10ãšããŠdropoutãäžæãããã åŠç¿çã1e-5ãšäžæã åºåã«å¯ŸããŠïŒdo_sample=True ãš temperature ã top_p ãå ããããšã§åºåã®å€æ§æ§ãåäžããŸããã
Model tree for fuwafuwa012/llm-jp-3-13b-it_lora
Base model
llm-jp/llm-jp-3-13b