Uploaded model

  • Developed by: fajoie
  • License: apache-2.0
  • Finetuned from model : llm-jp/llm-jp-3-13b

This llama model was trained 2x faster with Unsloth and Huggingface's TRL library.

使用方法

!pip install -U bitsandbytes
!pip install -U transformers
!pip install -U accelerate
!pip install -U datasets
!pip install -U peft
!pip install ipywidgets --upgrade
from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
    BitsAndBytesConfig,
)
from peft import PeftModel
import torch
from tqdm import tqdm
import json

# Hugging Faceで取得したTokenをこちらに貼る。
HF_TOKEN = "xxx"
# ベースとなるモデルと学習したLoRAのアダプタ。
model_id = "llm-jp/llm-jp-3-13b"
adapter_id = "fajoie/llmjp3_lora"
# QLoRA config
bnb_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_compute_dtype=torch.bfloat16,
)
# Load model
model = AutoModelForCausalLM.from_pretrained(
    model_id,
    quantization_config=bnb_config,
    device_map="auto",
    token = HF_TOKEN
)

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True, token = HF_TOKEN)

# 元のモデルにLoRAのアダプタを統合。
model = PeftModel.from_pretrained(model, adapter_id, token = HF_TOKEN)

# データセットの読み込み。
datasets = []
with open("./elyza-tasks-100-TV_0.jsonl", "r") as f:
    item = ""
    for line in f:
      line = line.strip()
      item += line
      if item.endswith("}"):
        datasets.append(json.loads(item))
        item = ""

# llmjp
results = []
for data in tqdm(datasets):

  input = data["input"]

  prompt = f"""### 指示
  {input}
  ### 回答
  """

  tokenized_input = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt").to(model.device)
  attention_mask = torch.ones_like(tokenized_input)
  with torch.no_grad():
      outputs = model.generate(
          tokenized_input,
          attention_mask=attention_mask,
          max_new_tokens=100,
          do_sample=False,
          repetition_penalty=1.2,
          pad_token_id=tokenizer.eos_token_id
      )[0]
  output = tokenizer.decode(outputs[tokenized_input.size(1):], skip_special_tokens=True)

  results.append({"task_id": data["task_id"], "input": input, "output": output})

# ファイル保存
import re
jsonl_id = re.sub(".*/", "", adapter_id)
with open(f"./{jsonl_id}-outputs.jsonl", 'w', encoding='utf-8') as f:
    for result in results:
        json.dump(result, f, ensure_ascii=False)  # ensure_ascii=False for handling non-ASCII characters
        f.write('\n')

学習手法

!pip uninstall unsloth -y
!pip install --upgrade --no-cache-dir "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"
!pip install --upgrade torch
!pip install --upgrade xformers

# Install Flash Attention 2 for softcapping support
import torch
if torch.cuda.get_device_capability()[0] >= 8:
    !pip install --no-deps packaging ninja einops "flash-attn>=2.6.3"

# Hugging Face Token を指定
HF_TOKEN = "xxx"

# llm-jp/llm-jp-3-13bを4bit量子化のqLoRA設定でロード。
from unsloth import FastLanguageModel
import torch
max_seq_length = 512
dtype = None 
load_in_4bit = True 

model_id = "llm-jp/llm-jp-3-13b"
new_model_id = "llm-jp-3-13b-it" 
# FastLanguageModel 
model, tokenizer = FastLanguageModel.from_pretrained(
    model_name=model_id,
    dtype=dtype,
    load_in_4bit=load_in_4bit,
    trust_remote_code=True,
)

# SFT用のモデルを用意
model = FastLanguageModel.get_peft_model(
    model,
    r = 32,
    target_modules = ["q_proj", "k_proj", "v_proj", "o_proj",
                      "gate_proj", "up_proj", "down_proj",],
    lora_alpha = 32,
    lora_dropout = 0.05,
    bias = "none",
    use_gradient_checkpointing = "unsloth",
    random_state = 3407,
    use_rslora = False,
    loftq_config = None,
    max_seq_length = max_seq_length,
)

from datasets import Dataset, load_dataset, concatenate_datasets

# 使用したいデータセットのパス すべてのichikaraのデータセットを利用
data_dir = "/content/"
data_files = [
    "ichikara-instruction-003-001-1.json",
    "ichikara-instruction-003-001-2.1.json",
    "ichikara-instruction-003-001-2.2.json",
    "ichikara-instruction-003-001-5.1.json",
    "ichikara-instruction-003-001-5.2.json",
    "ichikara-instruction-003-003-1.json"
    ]

dataset = Dataset.from_dict({"ID": [], "text": [], "output":[]})
for data_file in data_files:
    tmp = load_dataset("json", data_files=f"{data_dir}{data_file}", split="train", streaming=False)
    if len(dataset) == 0:
        dataset = tmp
    else:
        dataset = concatenate_datasets([dataset,tmp])

# 学習時のプロンプトフォーマットの定義
prompt = """### 指示
{}
### 回答
{}"""

EOS_TOKEN = tokenizer.eos_token 
def formatting_prompts_func(examples):
    input = examples["text"] 
    output = examples["output"] 
    text = prompt.format(input, output) + EOS_TOKEN 
    return { "formatted_text" : text, } 
pass

# # 各データにフォーマットを適用
dataset = dataset.map(
    formatting_prompts_func,
    num_proc= 4, 
)

from trl import SFTTrainer
from transformers import TrainingArguments
from unsloth import is_bfloat16_supported

# 学習の設定
trainer = SFTTrainer(
    model = model,
    tokenizer = tokenizer,
    train_dataset=dataset,
    max_seq_length = max_seq_length,
    dataset_text_field="formatted_text",
    packing = False,
    args = TrainingArguments(
        per_device_train_batch_size = 16, #Google Colab Pro+を使ったのでバッチサイズを上げた
        gradient_accumulation_steps = 1, #蓄積は逆になしに
        num_train_epochs = 1, #上げたら過学習してさがったので、最終的に1回にした
        logging_steps = 10,
        warmup_steps = 100,
        save_steps=100,
        save_total_limit=2,
        max_steps=-1,
        learning_rate = 2e-4,
        fp16 = not is_bfloat16_supported(),
        bf16 = is_bfloat16_supported(),
        group_by_length=True,
        seed = 3407,
        output_dir = "outputs",
        report_to = "none",
    ),
)

# 学習実行
trainer_stats = trainer.train()

# LoRAアダプタだけ保存
model.push_to_hub_merged(
    new_model_id+"_lora_4",
    tokenizer=tokenizer,
    save_method="lora",
    token=HF_TOKEN,
    private=True
)
Downloads last month

-

Downloads are not tracked for this model. How to track
Inference Providers NEW
This model is not currently available via any of the supported Inference Providers.
The model cannot be deployed to the HF Inference API: The model has no pipeline_tag.

Model tree for fajoie/llmjp3_lora

Finetuned
(1120)
this model