hzhn's picture
Update README.md
c969d2d verified
metadata
base_model: llm-jp/llm-jp-3-13b
tags:
  - text-generation-inference
  - transformers
  - unsloth
  - llama
  - trl
license: apache-2.0
language:
  - en

Uploaded model

  • Developed by: hzhn
  • License: apache-2.0
  • Finetuned from model : llm-jp/llm-jp-3-13b

This llama model was trained 2x faster with Unsloth and Huggingface's TRL library.

Instruction Tuning

The models have been fine-tuned on the following datasets.

Language Dataset description
Japanese ichikara-instruction-003-001-1.json A manually constructed instruction dataset

データセット作成チーム: 関根聡, 安藤まや, 後藤美知子, 鈴木久美, 河原大輔, 井之上直也, 乾健太郎. ichikara-instruction: LLMのための日本語インストラクションデータの構築. 言語処理学会第30回年次大会(2024)

Usage

以下はElyza-tasks-100-TV_0.jsonlの回答のためのコードです。

from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
    BitsAndBytesConfig,
    TrainingArguments,
    logging,
)
from peft import (
    LoraConfig,
    PeftModel,
    get_peft_model,
)
import os, torch, gc
from datasets import load_dataset
import bitsandbytes as bnb
from trl import SFTTrainer
# Hugging Face Token
HF_TOKEN = "your_token"
base_model_id = "llm-jp/llm-jp-3-13b"
new_model_id = "llm-jp-3-13b-it_lora"
bnb_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_quant_type="nf4", 
    bnb_4bit_compute_dtype=torch.bfloat16,
)

model = AutoModelForCausalLM.from_pretrained(
    base_model_id,
    quantization_config=bnb_config,
    device_map="auto"
)

tokenizer = AutoTokenizer.from_pretrained(base_model_id, trust_remote_code=True)
def find_all_linear_names(model):
    cls = bnb.nn.Linear4bit # 4bit量子化線形層クラスを指定
    lora_module_names = set() # ここに取得した線形層を保持します。

    # モデル内の全てのモジュールを探索します
    for name, module in model.named_modules():
        if isinstance(module, cls): # モジュールが4bit量子化線形層の場合
            names = name.split('.') # モジュールの名前を分割 (ネストされてる際などに対処)
            lora_module_names.add(names[0] if len(names) == 1 else names[-1]) # 最下層の名前をlora_module_namesに追加

    # 'lm_head' は16ビット演算の際に除外する必要があるため、lora_module_namesから削除
    if 'lm_head' in lora_module_names:
        lora_module_names.remove('lm_head')

    return list(lora_module_names) # lora_module_namesをリストに変換して返します。

modules = find_all_linear_names(model)
peft_config = LoraConfig(
    r=16,
    lora_alpha=32,
    lora_dropout=0.05,
    bias="none",
    task_type="CAUSAL_LM",
    target_modules=modules,
)

model = get_peft_model(model, peft_config)
dataset = load_dataset("json", data_files="./ichikara-instruction-003-001-1.json")
# 学習時のプロンプトフォーマットの定義
prompt = """### 指示
{}
### 回答
{}"""


"""
formatting_prompts_func: 各データをプロンプトに合わせた形式に合わせる
"""
EOS_TOKEN = tokenizer.eos_token # トークナイザーのEOSトークン(文末トークン)
def formatting_prompts_func(examples):
    input = examples["text"] # 入力データ
    output = examples["output"] # 出力データ
    text = prompt.format(input, output) + EOS_TOKEN # プロンプトの作成
    return { "formatted_text" : text, } # 新しいフィールド "formatted_text" を返す
pass

# # 各データにフォーマットを適用
dataset = dataset.map(
    formatting_prompts_func,
    num_proc= 4, # 並列処理数を指定
)
training_arguments = TrainingArguments(
    output_dir=new_model_id,
    per_device_train_batch_size=1,
    gradient_accumulation_steps=2,
    optim="paged_adamw_32bit",
    num_train_epochs=1,
    logging_strategy="steps",
    logging_steps=10,
    warmup_steps=10,
    save_steps=100,
    save_total_limit = 2,
    max_steps = -1,
    learning_rate=5e-5,
    fp16=False,
    bf16=False,
    seed = 3407,
    group_by_length=True,
    report_to="none"
)
trainer = SFTTrainer(
    model=model,
    train_dataset=dataset["train"],
    peft_config=peft_config,
    max_seq_length= 512,
    dataset_text_field="formatted_text",
    tokenizer=tokenizer,
    args=training_arguments,
    packing= False,
)

model.config.use_cache = False # キャッシュ機能を無効化
trainer.train() # トレーニングを実行
import json
datasets = []
with open("./elyza-tasks-100-TV_0.jsonl", "r") as f:
    item = ""
    for line in f:
      line = line.strip()
      item += line
      if item.endswith("}"):
        datasets.append(json.loads(item))
        item = ""
from tqdm import tqdm

results = []
for data in tqdm(datasets):

  input = data["input"]

  prompt = f"""### 指示
  {input}
  ### 回答
  """

  tokenized_input = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt").to(model.device)
  attention_mask = torch.ones_like(tokenized_input)

  with torch.no_grad():
      outputs = model.generate(
          tokenized_input,
          attention_mask=attention_mask,
          max_new_tokens=100,
          do_sample=False,
          repetition_penalty=1.2,
          pad_token_id=tokenizer.eos_token_id
      )[0]
  output = tokenizer.decode(outputs[tokenized_input.size(1):], skip_special_tokens=True)

  results.append({"task_id": data["task_id"], "input": input, "output": output})
import re
jsonl_id = re.sub(".*/", "", new_model_id)
with open(f"./{jsonl_id}-outputs.jsonl", 'w', encoding='utf-8') as f:
    for result in results:
        json.dump(result, f, ensure_ascii=False)  # ensure_ascii=False for handling non-ASCII characters
        f.write('\n')