Uploaded model
- Developed by: semkaz
- License: apache-2.0
- Finetuned from model : llm-jp/llm-jp-3-13b
This llama model was trained 2x faster with Unsloth and Huggingface's TRL library.
Python Code
以下は、elyza-tasks-100-TV_0.jsonlの回答のためのコードです。
!pip uninstall unsloth -y
!pip install --upgrade --no-cache-dir "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"
!pip install --upgrade torch
!pip install --upgrade xformers
import torch
if torch.cuda.get_device_capability()[0] >= 8:
!pip install --no-deps packaging ninja einops "flash-attn>=2.6.3"
HF_TOKEN = "your-token"
## llm-jp/llm-jp-3-13bを4bit量子化のqLoRA設定でロード。
from unsloth import FastLanguageModel
import torch
max_seq_length = 512 # unslothではRoPEをサポートしているのでコンテキスト長は自由に設定可能
dtype = None # Noneにしておけば自動で設定
load_in_4bit = True # 今回は13Bモデルを扱うためTrue
model_id = "llm-jp/llm-jp-3-13b"
new_model_id = "llm-jp-3-13b-it-v1" #Fine-Tuningしたモデルにつけたい名前、it: Instruction Tuning
## FastLanguageModel インスタンスを作成
model, tokenizer = FastLanguageModel.from_pretrained(
model_name=model_id,
dtype=dtype,
load_in_4bit=load_in_4bit,
trust_remote_code=True,
)
## SFT用のモデルを用意
model = FastLanguageModel.get_peft_model(
model,
r = 8 # 32, # LoRAのランク
target_modules = ["q_proj", "k_proj", "v_proj", "o_proj",
"gate_proj", "up_proj", "down_proj",],
lora_alpha = 32,
lora_dropout = 0.05,
bias = "none",
use_gradient_checkpointing = "unsloth",
random_state = 3407,
use_rslora = False,
loftq_config = None,
max_seq_length = max_seq_length,
)
## 学習に用いるデータセットの指定
from datasets import load_dataset
dataset = load_dataset("json", data_files="your-source/ichikara-instruction-003-001-1.json")
## 学習時のプロンプトフォーマットの定義
prompt = """・ 指示
{}
・ 回答
{}"""
EOS_TOKEN = tokenizer.eos_token # トークナイザーのEOSトークン(文末トークン)
def formatting_prompts_func(examples):
input = examples["text"] # 入力データ
output = examples["output"] # 出力データ
text = prompt.format(input, output) + EOS_TOKEN # プロンプトの作成
return { "formatted_text" : text, } # 新しいフィールド "formatted_text" を返す
pass
## 各データにフォーマットを適用
dataset = dataset.map(
formatting_prompts_func,
num_proc= 4, # 並列処理数を指定
)
## 学習の設定
from trl import SFTTrainer
from transformers import TrainingArguments
from unsloth import is_bfloat16_supported
trainer = SFTTrainer(
model = model,
tokenizer = tokenizer,
train_dataset=dataset["train"],
max_seq_length = max_seq_length,
dataset_text_field="formatted_text",
packing = False,
args = TrainingArguments(
per_device_train_batch_size = 2,
gradient_accumulation_steps = 4,
num_train_epochs = 1,
logging_steps = 10,
warmup_steps = 10,
save_steps=100,
save_total_limit=2,
max_steps=-1,
learning_rate = 2e-4,
fp16 = not is_bfloat16_supported(),
bf16 = is_bfloat16_supported(),
group_by_length=True,
# optim="adamw_8bit",
# weight_decay=0.01,
# lr_scheduler_type = "linear",
seed = 3407,
output_dir = "outputs",
report_to = "wandb",
),
)
## 学習実行
trainer_stats = trainer.train()
## ELYZA-tasks-100-TVの読み込み。事前にファイルをアップロードしてください
import json
datasets = []
with open("your-source/elyza-tasks-100-TV_0.jsonl", "r") as f:
item = ""
for line in f:
line = line.strip()
item += line
if item.endswith("}"):
datasets.append(json.loads(item))
item = ""
## 学習したモデルを用いてタスクを実行
from tqdm import tqdm
FastLanguageModel.for_inference(model)
results = []
for dt in tqdm(datasets):
input = dt["input"]
prompt = f"""### 指示\n{input}\n### 回答\n"""
inputs = tokenizer([prompt], return_tensors = "pt").to(model.device)
outputs = model.generate(**inputs, max_new_tokens = 512, use_cache = True, do_sample=False, repetition_penalty=1.2)
prediction = tokenizer.decode(outputs[0], skip_special_tokens=True).split('\n### 回答')[-1]
results.append({"task_id": dt["task_id"], "input": input, "output": prediction})
## jsonlで保存
file_path = f"your-source/{new_model_id}_output.jsonl"
with open(file_path, 'w', encoding='utf-8') as f:
for result in results:
json.dump(result, f, ensure_ascii=False)
f.write('\n')
## LoRAアダプタだけ保存
model.push_to_hub_merged(
new_model_id + "_lora",
tokenizer=tokenizer,
save_method="lora",
token=HF_TOKEN,
private=True
)
Model tree for semkaz/llm-jp-3-13b-it-v1_lora
Base model
llm-jp/llm-jp-3-13b