Uploaded model
- Developed by: imagfff
- License: apache-2.0
- Finetuned from model : llm-jp/llm-jp-3-13b
This llama model was trained 2x faster with Unsloth and Huggingface's TRL library.
提出したjsonlファイルの出力方法
- 必要なライブラリのインストール
pip install unsloth
pip uninstall unsloth -y && pip install --upgrade --no-cache-dir "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"
- 下記のコードを実行
import json
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, List
import torch
from tqdm import tqdm
from unsloth import FastLanguageModel
@dataclass
class ModelConfig:
model_name: str = "imagfff/llm-jp-3-13b-it"
max_seq_length: int = 2048
dtype: Any = None
load_in_4bit: bool = True
token: str = "HF token"
def load_model(config: ModelConfig) -> tuple[Any, Any]:
"""モデルとトークナイザーを読み込む"""
model, tokenizer = FastLanguageModel.from_pretrained(
model_name=config.model_name,
max_seq_length=config.max_seq_length,
dtype=config.dtype,
load_in_4bit=config.load_in_4bit,
token=config.token,
)
FastLanguageModel.for_inference(model)
return model, tokenizer
def load_datasets(file_path: str) -> List[Dict[str, Any]]:
"""JSONLファイルからデータセットを読み込む"""
datasets = []
try:
with open(file_path) as f:
item = ""
for line in f:
line = line.strip()
item += line
if item.endswith("}"):
datasets.append(json.loads(item))
item = ""
return datasets
except (FileNotFoundError, json.JSONDecodeError) as e:
raise Exception(f"データセットの読み込みに失敗しました: {e}") from e
def generate_prediction(model: Any, tokenizer: Any, input_text: str) -> str:
"""モデルによる推論を実行"""
prompt = f"### 指示\n{input_text}\n### 回答\n"
inputs = tokenizer([prompt], return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
use_cache=True,
do_sample=False,
repetition_penalty=1.2,
)
return tokenizer.decode(outputs[0], skip_special_tokens=True).split("\n### 回答")[
-1
]
def save_results(results: List[Dict[str, Any]], output_path: str) -> None:
"""結果をJSONLファイルに保存"""
output_path = Path(output_path)
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(output_path, "w", encoding="utf-8") as f:
for result in results:
json.dump(result, f, ensure_ascii=False)
f.write("\n")
def main():
config = ModelConfig()
model, tokenizer = load_model(config)
datasets = load_datasets("./elyza-tasks-100-TV_0.jsonl")
results = []
for dt in tqdm(datasets, desc="推論実行中"):
prediction = generate_prediction(model, tokenizer, dt["input"])
results.append(
{"task_id": dt["task_id"], "input": dt["input"], "output": prediction}
)
model_basename = config.model_name.split("/")[-1]
save_results(results, f"/content/{model_basename}_output.jsonl")
if __name__ == "__main__":
main()
Model tree for imagfff/llm-jp-3-13b-it
Base model
llm-jp/llm-jp-3-13b