|
from unsloth import FastModel |
|
import torch |
|
import json |
|
|
|
|
|
model, tokenizer = FastModel.from_pretrained( |
|
model_name = "NewEden/Gemma-Merged-V2", |
|
max_seq_length = 8192, |
|
load_in_4bit = False, |
|
load_in_8bit = False, |
|
full_finetuning = False, |
|
) |
|
|
|
|
|
model = FastModel.get_peft_model( |
|
model, |
|
finetune_vision_layers=False, |
|
finetune_language_layers=True, |
|
finetune_attention_modules=True, |
|
finetune_mlp_modules=True, |
|
target_modules=[ |
|
"q_proj", "k_proj", "v_proj", "o_proj", |
|
"gate_proj", "up_proj", "down_proj" |
|
], |
|
r=64, |
|
lora_alpha=32, |
|
lora_dropout=0.1, |
|
bias="none", |
|
random_state=3407, |
|
) |
|
|
|
|
|
from unsloth.chat_templates import get_chat_template |
|
tokenizer = get_chat_template( |
|
tokenizer, |
|
chat_template="gemma-3", |
|
) |
|
|
|
|
|
from datasets import load_dataset, Dataset, Features, Sequence, Value |
|
print("Loading dataset...") |
|
dataset = load_dataset( |
|
"NewEden/Light-Novels-Roleplay-Logs-Books-Oh-My", |
|
split="train" |
|
) |
|
print(f"Dataset loaded with {len(dataset)} examples.") |
|
|
|
|
|
def validate_and_fix_conversations(examples): |
|
fixed = [] |
|
for conv in examples["conversations"]: |
|
if not isinstance(conv, list): |
|
continue |
|
cleaned = [] |
|
for turn in conv: |
|
if not isinstance(turn, dict): |
|
continue |
|
role = turn.get("role", "").lower() |
|
content = turn.get("content", "") |
|
if not isinstance(content, str) or not content.strip(): |
|
continue |
|
if role == "system": |
|
continue |
|
if role in ["assistant", "bot", "chatbot"]: |
|
role = "model" |
|
elif role in ["human", "usr", "user"]: |
|
role = "user" |
|
else: |
|
continue |
|
cleaned.append({"role": role, "content": content}) |
|
|
|
if len(cleaned) < 2: |
|
continue |
|
|
|
if cleaned[0]["role"] != "user": |
|
cleaned = cleaned[1:] |
|
|
|
fixed_conv = [] |
|
expected = "user" |
|
for turn in cleaned: |
|
if turn["role"] == expected: |
|
fixed_conv.append(turn) |
|
expected = "model" if expected == "user" else "user" |
|
|
|
if fixed_conv and fixed_conv[-1]["role"] == "user": |
|
fixed_conv = fixed_conv[:-1] |
|
|
|
if len(fixed_conv) >= 2: |
|
fixed.append(fixed_conv) |
|
|
|
return {"conversations": fixed} |
|
|
|
print("Validating and fixing conversations...") |
|
dataset = dataset.map( |
|
validate_and_fix_conversations, |
|
batched=True, |
|
desc="Fixing conversations" |
|
) |
|
print(f"Validation complete. {len(dataset)} examples left.") |
|
|
|
|
|
if len(dataset) == 0: |
|
print("Dataset empty after validation. Creating dummy data...") |
|
dummy_conversations = [ |
|
[ |
|
{"role": "user", "content": "Hey, what's up?"}, |
|
{"role": "model", "content": "All good! How can I help?"} |
|
] |
|
] |
|
flat_examples = [] |
|
for conv in dummy_conversations: |
|
flat_examples.append({ |
|
"conversations": [{"from": msg["role"], "value": msg["content"]} for msg in conv] |
|
}) |
|
features = Features({'conversations': Sequence({'from': Value('string'), 'value': Value('string')})}) |
|
dataset = Dataset.from_list(flat_examples, features=features) |
|
print(f"Dummy dataset created with {len(dataset)} example.") |
|
|
|
|
|
def enforce_strict_user_model_pairs(examples): |
|
fixed = [] |
|
for convo in examples["conversations"]: |
|
if not isinstance(convo, list): |
|
continue |
|
last = None |
|
valid = True |
|
for turn in convo: |
|
if turn["role"] == last: |
|
valid = False |
|
break |
|
last = turn["role"] |
|
if valid and convo[0]["role"] == "user" and convo[-1]["role"] == "model": |
|
fixed.append(convo) |
|
return {"conversations": fixed} |
|
|
|
print("Enforcing strict user/model alternation...") |
|
dataset = dataset.map( |
|
enforce_strict_user_model_pairs, |
|
batched=True, |
|
desc="Filtering strict alternation" |
|
) |
|
print(f"After enforcing alternation: {len(dataset)} examples left.") |
|
|
|
|
|
def apply_chat_template(examples): |
|
texts = tokenizer.apply_chat_template(examples["conversations"]) |
|
return {"text": texts} |
|
|
|
print("Applying chat template...") |
|
dataset = dataset.map( |
|
apply_chat_template, |
|
batched=True, |
|
desc="Applying chat template" |
|
) |
|
print(f"Chat template applied. {len(dataset)} examples ready.") |
|
print("Sample text after templating:") |
|
print(dataset[0]["text"][:500] + "...") |
|
|
|
|
|
from trl import SFTTrainer, SFTConfig |
|
trainer = SFTTrainer( |
|
model=model, |
|
tokenizer=tokenizer, |
|
train_dataset=dataset, |
|
eval_dataset=None, |
|
args=SFTConfig( |
|
dataset_text_field="text", |
|
per_device_train_batch_size=1, |
|
gradient_accumulation_steps=2, |
|
warmup_steps=35, |
|
num_train_epochs=4, |
|
learning_rate=1e-5, |
|
logging_steps=1, |
|
optim="paged_adamw_8bit", |
|
weight_decay=0.02, |
|
lr_scheduler_type="linear", |
|
seed=3407, |
|
report_to="wandb", |
|
), |
|
) |
|
|
|
from unsloth.chat_templates import train_on_responses_only |
|
print("Setting up response-only training...") |
|
trainer = train_on_responses_only( |
|
trainer, |
|
instruction_part="<start_of_turn>user\n", |
|
response_part="<start_of_turn>model\n", |
|
) |
|
|
|
gpu_stats = torch.cuda.get_device_properties(0) |
|
start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3) |
|
max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) |
|
print(f"GPU = {gpu_stats.name} ({max_memory} GB total)") |
|
print(f"Starting reserved memory = {start_gpu_memory} GB") |
|
|
|
print("Starting training...") |
|
trainer_stats = trainer.train() |
|
|
|
used_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3) |
|
used_for_lora = round(used_memory - start_gpu_memory, 3) |
|
print(f"Training took {trainer_stats.metrics['train_runtime']} seconds " |
|
f"({round(trainer_stats.metrics['train_runtime']/60, 2)} minutes).") |
|
print(f"Peak memory: {used_memory} GB. Used for LoRA: {used_for_lora} GB.") |
|
|
|
output_dir = "./gemma-finetuned" |
|
model.save_pretrained(output_dir) |
|
tokenizer.save_pretrained(output_dir) |
|
print(f"Model saved at {output_dir}") |