Spaces:
Runtime error
Runtime error
""" | |
2025.3.13 | |
2025.3.15 | |
4.48.3 | |
0.15.2 | |
__UNSLOTH_VERSIONING__ | |
""" | |
from torch import Tensor | |
import torch | |
import torch.nn as nn | |
from torch.nn import functional as F | |
from trl.trainer.xpo_trainer import (Any, BaseImageProcessor, BasePairwiseJudge, Callable, Dataset, EvalPrediction, F, FeatureExtractionMixin, IterableDataset, OnlineDPOTrainer, OptimizerNames, Optional, PreTrainedModel, PreTrainedTokenizerBase, ProcessorMixin, SIMPLE_CHAT_TEMPLATE, TrainerCallback, Union, XPOConfig, XPOTrainer, empty_cache, generate_model_card, get_comet_experiment_url, get_reward, is_conversational, is_wandb_available, jinja2, maybe_apply_chat_template, nn, os, textwrap, torch, truncate_right, unwrap_model_for_generation, wandb) | |
import os | |
from typing import * | |
from dataclasses import dataclass, field | |
from packaging.version import Version | |
import torch | |
import numpy as np | |
from contextlib import nullcontext | |
from torch.nn import functional as F | |
from transformers import DataCollatorForSeq2Seq, DataCollatorForLanguageModeling | |
torch_compile_options = { | |
"epilogue_fusion" : True, | |
"max_autotune" : False, | |
"shape_padding" : True, | |
"trace.enabled" : False, | |
"triton.cudagraphs" : False, | |
} | |
def selective_log_softmax(logits, index): | |
logits = logits.to(torch.float32) | |
selected_logits = torch.gather(logits, dim = -1, index = index.unsqueeze(-1)).squeeze(-1) | |
# loop to reduce peak mem consumption | |
# logsumexp_values = torch.stack([torch.logsumexp(lg, dim=-1) for lg in logits]) | |
logsumexp_values = torch.logsumexp(logits, dim = -1) | |
per_token_logps = selected_logits - logsumexp_values # log_softmax(x_i) = x_i - logsumexp(x) | |
return per_token_logps | |
class UnslothXPOConfig(XPOConfig): | |
""" | |
Configuration class for the [`XPOTrainer`]. | |
Subclass of [`OnlineDPOConfig`] we can use all its arguments and add the following: | |
Parameters: | |
alpha (`float` or `list[float]`, *optional*, defaults to `1e-5`): | |
Weight of the XPO loss term. If a list of floats is provided then the alpha is selected for each new epoch | |
and the last alpha is used for the rest of the epochs. | |
""" | |
vllm_sampling_params: Optional[Any] = field( | |
default = None, | |
metadata = {'help': 'vLLM SamplingParams'}, | |
) | |
unsloth_num_chunks : Optional[int] = field( | |
default = -1, | |
metadata = {'help': 'Chunk size to reduce memory usage. -1 is most efficient.'}, | |
) | |
def __init__( | |
self, | |
output_dir = None, | |
overwrite_output_dir = None, | |
do_train = False, | |
do_eval = False, | |
do_predict = False, | |
eval_strategy = 'no', | |
prediction_loss_only = False, | |
per_device_train_batch_size = 4, | |
per_device_eval_batch_size = 4, | |
per_gpu_train_batch_size = None, | |
per_gpu_eval_batch_size = None, | |
gradient_accumulation_steps = 2, | |
eval_accumulation_steps = 2, | |
eval_delay = 0, | |
torch_empty_cache_steps = 250, | |
learning_rate = 5e-05, | |
weight_decay = 0.01, | |
adam_beta1 = 0.9, | |
adam_beta2 = 0.999, | |
adam_epsilon = 1e-08, | |
max_grad_norm = 1.0, | |
num_train_epochs = 3.0, | |
max_steps = -1, | |
lr_scheduler_type = 'linear', | |
warmup_ratio = 0.1, | |
warmup_steps = 0, | |
log_level = 'passive', | |
log_level_replica = 'warning', | |
log_on_each_node = True, | |
logging_dir = None, | |
logging_strategy = 'steps', | |
logging_first_step = False, | |
logging_steps = 1, | |
logging_nan_inf_filter = False, | |
save_strategy = 'steps', | |
save_steps = 500, | |
save_total_limit = None, | |
save_safetensors = True, | |
save_on_each_node = False, | |
save_only_model = False, | |
restore_callback_states_from_checkpoint = False, | |
no_cuda = False, | |
use_cpu = False, | |
use_mps_device = False, | |
seed = 3407, | |
data_seed = 3407, | |
jit_mode_eval = False, | |
use_ipex = False, | |
bf16 = False, | |
fp16 = False, | |
fp16_opt_level = 'O1', | |
half_precision_backend = 'auto', | |
bf16_full_eval = False, | |
fp16_full_eval = False, | |
tf32 = None, | |
local_rank = -1, | |
ddp_backend = None, | |
tpu_num_cores = None, | |
tpu_metrics_debug = False, | |
debug = '', | |
dataloader_drop_last = False, | |
eval_steps = None, | |
dataloader_num_workers = 0, | |
dataloader_prefetch_factor = None, | |
past_index = -1, | |
run_name = None, | |
disable_tqdm = None, | |
remove_unused_columns = True, | |
label_names = None, | |
load_best_model_at_end = False, | |
metric_for_best_model = None, | |
greater_is_better = None, | |
ignore_data_skip = False, | |
fsdp = '', | |
fsdp_min_num_params = 0, | |
fsdp_config = None, | |
fsdp_transformer_layer_cls_to_wrap = None, | |
accelerator_config = None, | |
deepspeed = None, | |
label_smoothing_factor = 0.0, | |
optim = 'adamw_8bit', | |
optim_args = None, | |
adafactor = False, | |
group_by_length = False, | |
length_column_name = 'length', | |
report_to = None, | |
ddp_find_unused_parameters = None, | |
ddp_bucket_cap_mb = None, | |
ddp_broadcast_buffers = None, | |
dataloader_pin_memory = True, | |
dataloader_persistent_workers = False, | |
skip_memory_metrics = True, | |
use_legacy_prediction_loop = False, | |
push_to_hub = False, | |
resume_from_checkpoint = None, | |
hub_model_id = None, | |
hub_strategy = 'every_save', | |
hub_token = None, | |
hub_private_repo = None, | |
hub_always_push = False, | |
gradient_checkpointing = False, | |
gradient_checkpointing_kwargs = None, | |
include_inputs_for_metrics = False, | |
eval_do_concat_batches = True, | |
fp16_backend = 'auto', | |
evaluation_strategy = None, | |
push_to_hub_model_id = None, | |
push_to_hub_organization = None, | |
push_to_hub_token = None, | |
mp_parameters = '', | |
auto_find_batch_size = False, | |
full_determinism = False, | |
torchdynamo = None, | |
ray_scope = 'last', | |
ddp_timeout = 1800, | |
torch_compile = False, | |
torch_compile_backend = None, | |
torch_compile_mode = None, | |
dispatch_batches = None, | |
split_batches = None, | |
include_tokens_per_second = False, | |
include_num_input_tokens_seen = False, | |
neftune_noise_alpha = None, | |
optim_target_modules = None, | |
batch_eval_metrics = False, | |
eval_on_start = False, | |
use_liger_kernel = False, | |
eval_use_gather_object = False, | |
average_tokens_across_devices = False, | |
reward_model_path = None, | |
judge = None, | |
max_new_tokens = 64, | |
max_length = 512, | |
temperature = 0.9, | |
missing_eos_penalty = None, | |
loss_type = 'sigmoid', | |
dataset_num_proc = None, | |
disable_dropout = True, | |
use_vllm = False, | |
ds3_gather_for_generation = True, | |
vllm_sampling_params = None, | |
unsloth_num_chunks = -1, | |
**kwargs, | |
): | |
if learning_rate < 1e-7: raise FloatingPointError(f'Unsloth: Your learning rate of `{learning_rate}` is too small and less than 1e-7! Consider increasing it, otherwise gradient updates will be close to 0!') | |
if learning_rate > 1: raise OverflowError(f'Unsloth: Your learning rate of `{learning_rate}` is way too larger > 1! Consider decreasing it to 1e-1, otherwise gradient updates will explode!') | |
if output_dir is None and save_strategy == 'steps' and save_steps == 500: | |
output_dir = 'unsloth_training_checkpoints' | |
save_strategy = 'no' | |
if dataset_num_proc is None: | |
from multiprocessing import cpu_count | |
dataset_num_proc = cpu_count() | |
super().__init__( | |
output_dir = output_dir, | |
overwrite_output_dir = overwrite_output_dir, | |
do_train = do_train, | |
do_eval = do_eval, | |
do_predict = do_predict, | |
eval_strategy = eval_strategy, | |
prediction_loss_only = prediction_loss_only, | |
per_device_train_batch_size = per_device_train_batch_size, | |
per_device_eval_batch_size = per_device_eval_batch_size, | |
per_gpu_train_batch_size = per_gpu_train_batch_size, | |
per_gpu_eval_batch_size = per_gpu_eval_batch_size, | |
gradient_accumulation_steps = gradient_accumulation_steps, | |
eval_accumulation_steps = eval_accumulation_steps, | |
eval_delay = eval_delay, | |
torch_empty_cache_steps = torch_empty_cache_steps, | |
learning_rate = learning_rate, | |
weight_decay = weight_decay, | |
adam_beta1 = adam_beta1, | |
adam_beta2 = adam_beta2, | |
adam_epsilon = adam_epsilon, | |
max_grad_norm = max_grad_norm, | |
num_train_epochs = num_train_epochs, | |
max_steps = max_steps, | |
lr_scheduler_type = lr_scheduler_type, | |
warmup_ratio = warmup_ratio, | |
warmup_steps = warmup_steps, | |
log_level = log_level, | |
log_level_replica = log_level_replica, | |
log_on_each_node = log_on_each_node, | |
logging_dir = logging_dir, | |
logging_strategy = logging_strategy, | |
logging_first_step = logging_first_step, | |
logging_steps = logging_steps, | |
logging_nan_inf_filter = logging_nan_inf_filter, | |
save_strategy = save_strategy, | |
save_steps = save_steps, | |
save_total_limit = save_total_limit, | |
save_safetensors = save_safetensors, | |
save_on_each_node = save_on_each_node, | |
save_only_model = save_only_model, | |
restore_callback_states_from_checkpoint = restore_callback_states_from_checkpoint, | |
no_cuda = no_cuda, | |
use_cpu = use_cpu, | |
use_mps_device = use_mps_device, | |
seed = seed, | |
data_seed = data_seed, | |
jit_mode_eval = jit_mode_eval, | |
use_ipex = use_ipex, | |
bf16 = bf16, | |
fp16 = fp16, | |
fp16_opt_level = fp16_opt_level, | |
half_precision_backend = half_precision_backend, | |
bf16_full_eval = bf16_full_eval, | |
fp16_full_eval = fp16_full_eval, | |
tf32 = tf32, | |
local_rank = local_rank, | |
ddp_backend = ddp_backend, | |
tpu_num_cores = tpu_num_cores, | |
tpu_metrics_debug = tpu_metrics_debug, | |
debug = debug, | |
dataloader_drop_last = dataloader_drop_last, | |
eval_steps = eval_steps, | |
dataloader_num_workers = dataloader_num_workers, | |
dataloader_prefetch_factor = dataloader_prefetch_factor, | |
past_index = past_index, | |
run_name = run_name, | |
disable_tqdm = disable_tqdm, | |
remove_unused_columns = remove_unused_columns, | |
label_names = label_names, | |
load_best_model_at_end = load_best_model_at_end, | |
metric_for_best_model = metric_for_best_model, | |
greater_is_better = greater_is_better, | |
ignore_data_skip = ignore_data_skip, | |
fsdp = fsdp, | |
fsdp_min_num_params = fsdp_min_num_params, | |
fsdp_config = fsdp_config, | |
fsdp_transformer_layer_cls_to_wrap = fsdp_transformer_layer_cls_to_wrap, | |
accelerator_config = accelerator_config, | |
deepspeed = deepspeed, | |
label_smoothing_factor = label_smoothing_factor, | |
optim = optim, | |
optim_args = optim_args, | |
adafactor = adafactor, | |
group_by_length = group_by_length, | |
length_column_name = length_column_name, | |
report_to = report_to, | |
ddp_find_unused_parameters = ddp_find_unused_parameters, | |
ddp_bucket_cap_mb = ddp_bucket_cap_mb, | |
ddp_broadcast_buffers = ddp_broadcast_buffers, | |
dataloader_pin_memory = dataloader_pin_memory, | |
dataloader_persistent_workers = dataloader_persistent_workers, | |
skip_memory_metrics = skip_memory_metrics, | |
use_legacy_prediction_loop = use_legacy_prediction_loop, | |
push_to_hub = push_to_hub, | |
resume_from_checkpoint = resume_from_checkpoint, | |
hub_model_id = hub_model_id, | |
hub_strategy = hub_strategy, | |
hub_token = hub_token, | |
hub_private_repo = hub_private_repo, | |
hub_always_push = hub_always_push, | |
gradient_checkpointing = gradient_checkpointing, | |
gradient_checkpointing_kwargs = gradient_checkpointing_kwargs, | |
include_inputs_for_metrics = include_inputs_for_metrics, | |
eval_do_concat_batches = eval_do_concat_batches, | |
fp16_backend = fp16_backend, | |
evaluation_strategy = evaluation_strategy, | |
push_to_hub_model_id = push_to_hub_model_id, | |
push_to_hub_organization = push_to_hub_organization, | |
push_to_hub_token = push_to_hub_token, | |
mp_parameters = mp_parameters, | |
auto_find_batch_size = auto_find_batch_size, | |
full_determinism = full_determinism, | |
torchdynamo = torchdynamo, | |
ray_scope = ray_scope, | |
ddp_timeout = ddp_timeout, | |
torch_compile = torch_compile, | |
torch_compile_backend = torch_compile_backend, | |
torch_compile_mode = torch_compile_mode, | |
dispatch_batches = dispatch_batches, | |
split_batches = split_batches, | |
include_tokens_per_second = include_tokens_per_second, | |
include_num_input_tokens_seen = include_num_input_tokens_seen, | |
neftune_noise_alpha = neftune_noise_alpha, | |
optim_target_modules = optim_target_modules, | |
batch_eval_metrics = batch_eval_metrics, | |
eval_on_start = eval_on_start, | |
use_liger_kernel = use_liger_kernel, | |
eval_use_gather_object = eval_use_gather_object, | |
average_tokens_across_devices = average_tokens_across_devices, | |
reward_model_path = reward_model_path, | |
judge = judge, | |
max_new_tokens = max_new_tokens, | |
max_length = max_length, | |
temperature = temperature, | |
missing_eos_penalty = missing_eos_penalty, | |
loss_type = loss_type, | |
dataset_num_proc = dataset_num_proc, | |
disable_dropout = disable_dropout, | |
use_vllm = use_vllm, | |
ds3_gather_for_generation = ds3_gather_for_generation,**kwargs) | |
self.vllm_sampling_params = vllm_sampling_params | |
self.unsloth_num_chunks = unsloth_num_chunks | |
pass | |
class _UnslothXPOTrainer(OnlineDPOTrainer): | |
r"""""" | |
_tag_names = ["trl", "xpo"] | |
def __init__( | |
self, | |
model: Union[PreTrainedModel, nn.Module] = None, | |
ref_model: Union[PreTrainedModel, nn.Module] = None, | |
reward_model: Optional[nn.Module] = None, | |
judge: Optional[BasePairwiseJudge] = None, | |
args: Optional[XPOConfig] = None, | |
data_collator: Optional[Callable] = None, | |
train_dataset: Optional[Union[Dataset, IterableDataset]] = None, | |
eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None, | |
processing_class: Optional[ | |
Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin] | |
] = None, | |
peft_config: Optional[dict] = None, | |
compute_metrics: Optional[Callable[[EvalPrediction], dict]] = None, | |
callbacks: Optional[list[TrainerCallback]] = None, | |
optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), | |
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, | |
) -> None: | |
super().__init__( | |
model=model, | |
ref_model=ref_model, | |
judge=judge, | |
reward_model=reward_model, | |
args=args, | |
data_collator=data_collator, | |
train_dataset=train_dataset, | |
eval_dataset=eval_dataset, | |
processing_class=processing_class, | |
reward_processing_class=processing_class, # for now, XPOTrainer can't use any reward model | |
peft_config=peft_config, | |
compute_metrics=compute_metrics, | |
callbacks=callbacks, | |
optimizers=optimizers, | |
preprocess_logits_for_metrics=preprocess_logits_for_metrics, | |
) | |
self._alpha = self.args.alpha | |
# Overwrite the stats dictionary to include XPO specific statistics | |
self.stats = { | |
# Remove "non_score_reward", "rlhf_reward", "scores" | |
# Add "loss/dpo", "loss/xpo" | |
"loss/dpo": [], | |
"loss/xpo": [], | |
"objective/kl": [], | |
"objective/entropy": [], | |
"rewards/chosen": [], | |
"rewards/rejected": [], | |
"rewards/accuracies": [], | |
"rewards/margins": [], | |
"logps/chosen": [], | |
"logps/rejected": [], | |
# Replace "contain_eos_token" by "model_contain_eos_token" and "ref_contain_eos_token" | |
"val/model_contain_eos_token": [], | |
"val/ref_contain_eos_token": [], | |
"alpha": [], | |
"beta": [], | |
} | |
if self.reward_model is not None: | |
# Replace "scores" by "model_scores" and "ref_scores" | |
self.stats["objective/model_scores"] = [] | |
self.stats["objective/ref_scores"] = [] | |
self.stats["objective/scores_margin"] = [] | |
def alpha(self): | |
if isinstance(self._alpha, list): | |
epoch = self.state.epoch | |
return self._alpha[epoch] if epoch < len(self._alpha) else self._alpha[-1] | |
else: | |
return self._alpha | |
def _generate_completions(self, prompts, model): | |
with unwrap_model_for_generation(model, self.accelerator) as unwrapped_model: | |
model_output = unwrapped_model.generate( | |
input_ids=prompts["input_ids"], | |
attention_mask=prompts["attention_mask"], | |
generation_config=self.generation_config, | |
) | |
ref_model = model if self.ref_model is None else self.ref_model | |
with torch.no_grad(), unwrap_model_for_generation(ref_model, self.accelerator) as unwrapped_ref_model: | |
ref_output = unwrapped_ref_model.generate( | |
input_ids=prompts["input_ids"], | |
attention_mask=prompts["attention_mask"], | |
generation_config=self.generation_config, | |
) | |
return model_output, ref_output | |
def _process_completions(self, model_output, ref_output, prompts): | |
context_length = prompts["input_ids"].shape[1] | |
# Process model completions | |
model_completion_ids = model_output[:, context_length:] | |
model_completion_ids, model_completion_mask = truncate_right( | |
model_completion_ids, self.processing_class.eos_token_id, self.processing_class.pad_token_id | |
) | |
model_data = { | |
"input_ids": torch.cat((prompts["input_ids"], model_completion_ids), dim=1), | |
"attention_mask": torch.cat((prompts["attention_mask"], model_completion_mask), dim=1), | |
"raw": prompts["raw"], | |
} | |
# Process reference model completions | |
ref_completion_ids = ref_output[:, context_length:] | |
ref_completion_ids, ref_completion_mask = truncate_right( | |
ref_completion_ids, self.processing_class.eos_token_id, self.processing_class.pad_token_id | |
) | |
ref_data = { | |
"input_ids": torch.cat((prompts["input_ids"], ref_completion_ids), dim=1), | |
"attention_mask": torch.cat((prompts["attention_mask"], ref_completion_mask), dim=1), | |
"raw": prompts["raw"], | |
} | |
return model_data, ref_data | |
def _compute_rewards(self, model_data, ref_data, context_length): | |
with torch.no_grad(): | |
_, model_scores, _ = get_reward( | |
self.reward_model, model_data["input_ids"], self.processing_class.pad_token_id, context_length | |
) | |
_, ref_scores, _ = get_reward( | |
self.reward_model, ref_data["input_ids"], self.processing_class.pad_token_id, context_length | |
) | |
# Apply EOS penalty if needed | |
if self.args.missing_eos_penalty is not None: | |
model_contain_eos = torch.any(model_data["input_ids"] == self.processing_class.eos_token_id, dim=-1) | |
ref_contain_eos = torch.any(ref_data["input_ids"] == self.processing_class.eos_token_id, dim=-1) | |
model_scores[~model_contain_eos] -= self.args.missing_eos_penalty | |
ref_scores[~ref_contain_eos] -= self.args.missing_eos_penalty | |
return model_scores, ref_scores | |
def _compute_judge(self, model_data, ref_data, context_length): | |
prompts = model_data["raw"] | |
model_data_completions = self.processing_class.batch_decode( | |
model_data["input_ids"][:, context_length:], skip_special_tokens=True | |
) | |
model_data_completions = [completion.strip() for completion in model_data_completions] | |
ref_data_completions = self.processing_class.batch_decode( | |
ref_data["input_ids"][:, context_length:], skip_special_tokens=True | |
) | |
ref_data_completions = [completion.strip() for completion in ref_data_completions] | |
if is_conversational({"prompt": prompts[0]}): | |
model_data_completions = [ | |
[{"role": "assistant", "content": completion}] for completion in model_data_completions | |
] | |
environment = jinja2.Environment() | |
template = environment.from_string(SIMPLE_CHAT_TEMPLATE) | |
prompts = [template.render(messages=message) for message in prompts] | |
model_data_completions = [template.render(messages=completion) for completion in model_data_completions] | |
ref_data_completions = [ | |
[{"role": "assistant", "content": completion}] for completion in ref_data_completions | |
] | |
ref_data_completions = [template.render(messages=completion) for completion in ref_data_completions] | |
ranks_of_first_completion = self.judge.judge( | |
prompts, | |
list(zip(model_data_completions, ref_data_completions)), | |
) | |
# convert ranks to a True/False mask: | |
# when rank == 0, it means the first completion is the best | |
# when rank == 1, it means the second completion is the best | |
return torch.tensor([rank == 0 for rank in ranks_of_first_completion], device=model_data["input_ids"].device) | |
def _compute_logprobs(self, model, model_data, ref_data, context_length): | |
def compute_logprobs_for_data(m, data): | |
output = m(data["input_ids"], attention_mask=data["attention_mask"]) | |
logits = output.logits[:, context_length - 1 : -1] | |
token_logprobs = selective_log_softmax(logits, data["input_ids"][:, context_length:]) | |
return token_logprobs | |
# Compute logprobs for model completions | |
model_logprobs_model_data = compute_logprobs_for_data(model, model_data) | |
# Compute logprobs for model on reference completions (for XPO loss) | |
model_logprobs_ref_data = compute_logprobs_for_data(model, ref_data) | |
# Compute logprobs for reference model completions | |
with torch.no_grad(): | |
if self.ref_model is None: | |
with model.disable_adapter(): | |
ref_logprobs_model_data = compute_logprobs_for_data(model, model_data) | |
ref_logprobs_ref_data = compute_logprobs_for_data(model, ref_data) | |
else: | |
ref_logprobs_model_data = compute_logprobs_for_data(self.ref_model, model_data) | |
ref_logprobs_ref_data = compute_logprobs_for_data(self.ref_model, ref_data) | |
# Mask padding tokens | |
model_padding_mask = model_data["attention_mask"][:, context_length:] == 0 | |
ref_padding_mask = ref_data["attention_mask"][:, context_length:] == 0 | |
model_logprobs_model_data = model_logprobs_model_data.masked_fill(model_padding_mask, 0.0) | |
model_logprobs_ref_data = model_logprobs_ref_data.masked_fill(ref_padding_mask, 0.0) | |
ref_logprobs_ref_data = ref_logprobs_ref_data.masked_fill(ref_padding_mask, 0.0) | |
ref_logprobs_model_data = ref_logprobs_model_data.masked_fill(model_padding_mask, 0.0) | |
return model_logprobs_model_data, model_logprobs_ref_data, ref_logprobs_ref_data, ref_logprobs_model_data | |
def _compute_losses( | |
self, | |
model_logprobs_model_data, | |
model_logprobs_ref_data, | |
ref_logprobs_ref_data, | |
ref_logprobs_model_data, | |
chosen_mask, | |
): | |
# Compute log probs | |
model_logprobs_model_data_sum = model_logprobs_model_data.sum(1) | |
model_logprobs_ref_data_sum = model_logprobs_ref_data.sum(1) | |
ref_logprobs_ref_data_sum = ref_logprobs_ref_data.sum(1) | |
ref_logprobs_model_data_sum = ref_logprobs_model_data.sum(1) | |
chosen_model_logprobs = torch.where(chosen_mask, model_logprobs_model_data_sum, model_logprobs_ref_data_sum) | |
chosen_ref_logprobs = torch.where(chosen_mask, ref_logprobs_model_data_sum, ref_logprobs_ref_data_sum) | |
chosen_log_ratios = chosen_model_logprobs - chosen_ref_logprobs | |
rejected_model_logprobs = torch.where(~chosen_mask, model_logprobs_model_data_sum, model_logprobs_ref_data_sum) | |
rejected_ref_logprobs = torch.where(~chosen_mask, ref_logprobs_model_data_sum, ref_logprobs_ref_data_sum) | |
rejected_log_ratios = rejected_model_logprobs - rejected_ref_logprobs | |
# Compute logits as the difference between chosen and rejected log ratios | |
logits = chosen_log_ratios - rejected_log_ratios | |
if self.args.loss_type == "sigmoid": | |
dpo_losses = -F.logsigmoid(self.beta * logits) | |
elif self.args.loss_type == "ipo": | |
dpo_losses = (logits - 1 / (2 * self.beta)) ** 2 | |
else: | |
raise NotImplementedError(f"invalid loss type {self.args.loss_type}") | |
# Compute XPO specific loss | |
xpo_losses = self.alpha * model_logprobs_ref_data_sum | |
# Total loss | |
loss = (dpo_losses + xpo_losses).mean() | |
return loss, dpo_losses, xpo_losses | |
def _log_statistics( | |
self, | |
model_data, | |
ref_data, | |
model_logprobs_model_data, | |
model_logprobs_ref_data, | |
ref_logprobs_ref_data, | |
ref_logprobs_model_data, | |
chosen_mask, | |
dpo_losses, | |
xpo_losses, | |
context_length, | |
model_scores=None, | |
ref_scores=None, | |
): | |
# Helper function to gather and compute mean | |
def gather_mean(tensor): | |
return self.accelerator.gather_for_metrics(tensor).mean().item() | |
# Log losses | |
self.stats["loss/dpo"].append(gather_mean(dpo_losses)) | |
self.stats["loss/xpo"].append(gather_mean(xpo_losses)) | |
# Log scores | |
if self.reward_model is not None: | |
self.stats["objective/model_scores"].append(gather_mean(model_scores)) | |
self.stats["objective/ref_scores"].append(gather_mean(ref_scores)) | |
self.stats["objective/scores_margin"].append(gather_mean(model_scores - ref_scores)) | |
# Log logprobs | |
model_logprobs_model_data_sum = model_logprobs_model_data.sum(1) | |
model_logprobs_ref_data_sum = model_logprobs_ref_data.sum(1) | |
ref_logprobs_ref_data_sum = ref_logprobs_ref_data.sum(1) | |
ref_logprobs_model_data_sum = ref_logprobs_model_data.sum(1) | |
chosen_model_logprobs = torch.where(chosen_mask, model_logprobs_model_data_sum, model_logprobs_ref_data_sum) | |
chosen_ref_logprobs = torch.where(chosen_mask, ref_logprobs_model_data_sum, ref_logprobs_ref_data_sum) | |
chosen_log_ratios = chosen_model_logprobs - chosen_ref_logprobs | |
rejected_model_logprobs = torch.where(~chosen_mask, model_logprobs_model_data_sum, model_logprobs_ref_data_sum) | |
rejected_ref_logprobs = torch.where(~chosen_mask, ref_logprobs_model_data_sum, ref_logprobs_ref_data_sum) | |
rejected_log_ratios = rejected_model_logprobs - rejected_ref_logprobs | |
self.stats["logps/chosen"].append(gather_mean(chosen_model_logprobs.mean() + chosen_ref_logprobs.mean())) | |
self.stats["logps/rejected"].append(gather_mean(rejected_model_logprobs.mean() + rejected_ref_logprobs.mean())) | |
# Log rewards | |
# Compute various statistics | |
chosen_rewards = chosen_log_ratios * self.beta | |
rejected_rewards = rejected_log_ratios * self.beta | |
self.stats["rewards/chosen"].append(gather_mean(chosen_rewards.mean())) | |
self.stats["rewards/rejected"].append(gather_mean(rejected_rewards.mean())) | |
# Calculate KL divergence for model and ref data | |
kl_model_data = model_logprobs_model_data - ref_logprobs_model_data | |
kl_ref_data = model_logprobs_ref_data - ref_logprobs_ref_data | |
mean_kl = (kl_model_data.sum(1) + kl_ref_data.sum(1)).mean() / 2 | |
self.stats["objective/kl"].append(gather_mean(mean_kl)) | |
# Calculate entropy for model and ref data | |
entropy_model_data = -model_logprobs_model_data.sum(1) | |
entropy_ref_data = -model_logprobs_ref_data.sum(1) | |
mean_entropy = (entropy_model_data.mean() + entropy_ref_data.mean()) / 2 | |
self.stats["objective/entropy"].append(gather_mean(mean_entropy)) | |
# Calculate margins | |
margin = chosen_rewards - rejected_rewards | |
self.stats["rewards/margins"].append(gather_mean(margin.mean())) | |
# Calculate accuracy | |
accuracy = (margin > 0).float() | |
self.stats["rewards/accuracies"].append(gather_mean(accuracy.mean())) | |
# Log EOS token statistics | |
model_eos = (model_data["input_ids"][:, context_length:] == self.processing_class.eos_token_id).any(dim=1) | |
ref_eos = (ref_data["input_ids"][:, context_length:] == self.processing_class.eos_token_id).any(dim=1) | |
self.stats["val/model_contain_eos_token"].append(gather_mean(model_eos.float())) | |
self.stats["val/ref_contain_eos_token"].append(gather_mean(ref_eos.float())) | |
# Log alpha and beta | |
self.stats["alpha"].append(self.alpha) | |
self.stats["beta"].append(self.beta) | |
def training_step( | |
self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], num_items_in_batch: Optional[int] = None | |
) -> torch.Tensor: | |
model.train() | |
# Apply chat template and tokenize the input | |
batch_size = len(next(iter(inputs.values()))) | |
prompts = inputs["prompt"] | |
inputs = [{k: v[i] for k, v in inputs.items()} for i in range(batch_size)] | |
inputs = [maybe_apply_chat_template(x, self.processing_class) for x in inputs] | |
inputs = [self.tokenize_row(x, self.model.config.is_encoder_decoder, self.processing_class) for x in inputs] | |
inputs = self.data_collator(inputs) | |
# need the prompt_ only | |
inputs = self._prepare_inputs(inputs) | |
context_length = inputs["prompt_input_ids"].shape[1] | |
prompts = { | |
"input_ids": inputs["prompt_input_ids"], | |
"attention_mask": inputs["prompt_attention_mask"], | |
"raw": prompts, | |
} | |
del inputs | |
# Sample completions from both the model and the reference model | |
model_output, ref_output = self._generate_completions(prompts, model) | |
# Process model completions | |
model_data, ref_data = self._process_completions(model_output, ref_output, prompts) | |
# Compute rewards | |
if self.reward_model is not None: | |
model_scores, ref_scores = self._compute_rewards(model_data, ref_data, context_length) | |
chosen_mask = model_scores >= ref_scores | |
else: | |
model_scores, ref_scores = None, None | |
chosen_mask = self._compute_judge(model_data, ref_data, context_length) | |
# Compute logprobs | |
model_logprobs_model_data, model_logprobs_ref_data, ref_logprobs_ref_data, ref_logprobs_model_data = ( | |
self._compute_logprobs(model, model_data, ref_data, context_length) | |
) | |
# Compute loss | |
loss, dpo_losses, xpo_losses = self._compute_losses( | |
model_logprobs_model_data, | |
model_logprobs_ref_data, | |
ref_logprobs_ref_data, | |
ref_logprobs_model_data, | |
chosen_mask, | |
) | |
# Log everything | |
self._log_statistics( | |
model_data, | |
ref_data, | |
model_logprobs_model_data.detach(), | |
model_logprobs_ref_data.detach(), | |
ref_logprobs_ref_data, | |
ref_logprobs_model_data, | |
chosen_mask, | |
dpo_losses.detach(), | |
xpo_losses.detach(), | |
context_length, | |
model_scores, | |
ref_scores, | |
) | |
if ( | |
self.args.torch_empty_cache_steps is not None | |
and self.state.global_step % self.args.torch_empty_cache_steps == 0 | |
): | |
empty_cache() | |
kwargs = {} | |
# For LOMO optimizers you need to explicitly use the learning rate | |
if self.args.optim in [OptimizerNames.LOMO, OptimizerNames.ADALOMO]: | |
kwargs["learning_rate"] = self._get_learning_rate() | |
if self.args.n_gpu > 1: | |
loss = loss.mean() # mean() to average on multi-gpu parallel training | |
if self.use_apex: | |
with amp.scale_loss(loss, self.optimizer) as scaled_loss: | |
scaled_loss.backward() | |
else: | |
self.accelerator.backward(loss, **kwargs) | |
return loss.detach() / self.args.gradient_accumulation_steps | |
def create_model_card( | |
self, | |
model_name: Optional[str] = None, | |
dataset_name: Optional[str] = None, | |
tags: Union[str, list[str], None] = None, | |
): | |
""" | |
Creates a draft of a model card using the information available to the `Trainer`. | |
Args: | |
model_name (`str` or `None`, *optional*, defaults to `None`): | |
Name of the model. | |
dataset_name (`str` or `None`, *optional*, defaults to `None`): | |
Name of the dataset used for training. | |
tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`): | |
Tags to be associated with the model card. | |
""" | |
if not self.is_world_process_zero(): | |
return | |
if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path): | |
base_model = self.model.config._name_or_path | |
else: | |
base_model = None | |
tags = tags or [] | |
if isinstance(tags, str): | |
tags = [tags] | |
if hasattr(self.model.config, "unsloth_version"): | |
tags.append("unsloth") | |
citation = textwrap.dedent("""\ | |
@article{jung2024binary, | |
title = {{Exploratory Preference Optimization: Harnessing Implicit Q*-Approximation for Sample-Efficient RLHF}}, | |
author = {Tengyang Xie and Dylan J. Foster and Akshay Krishnamurthy and Corby Rosset and Ahmed Awadallah and Alexander Rakhlin}, | |
year = 2024, | |
eprint = {arXiv:2405.21046} | |
}""") | |
model_card = generate_model_card( | |
base_model=base_model, | |
model_name=model_name, | |
hub_model_id=self.hub_model_id, | |
dataset_name=dataset_name, | |
tags=tags, | |
wandb_url=wandb.run.get_url() if is_wandb_available() and wandb.run is not None else None, | |
comet_url=get_comet_experiment_url(), | |
trainer_name="XPO", | |
trainer_citation=citation, | |
paper_title="Exploratory Preference Optimization: Harnessing Implicit Q*-Approximation for Sample-Efficient RLHF", | |
paper_id="2405.21046", | |
) | |
model_card.save(os.path.join(self.args.output_dir, "README.md")) | |
class UnslothXPOTrainer(_UnslothXPOTrainer): | |
""" | |
Initialize XPOTrainer as a subclass of [`OnlineDPOConfig`]. | |
Args: | |
model (`transformers.PreTrainedModel`): | |
The model to train, preferably an `AutoModelForCausalLM`. | |
ref_model (`PreTrainedModelWrapper`): | |
Hugging Face transformer model with a casual language modelling head. Used for implicit reward computation and loss. If no | |
reference model is provided, the trainer will create a reference model with the same architecture as the model to be optimized. | |
reward_model (`transformers.PreTrainedModel`): | |
The reward model to score completions with, preferably an `AutoModelForSequenceClassification`. | |
judge (`BasePairwiseJudge`): | |
The judge to use for pairwise comparison of model completions. | |
args (`XPOConfig`): | |
The XPO config arguments to use for training. | |
data_collator (`transformers.DataCollator`): | |
The data collator to use for training. If None is specified, the default data collator (`DPODataCollatorWithPadding`) will be used | |
which will pad the sequences to the maximum length of the sequences in the batch, given a dataset of paired sequences. | |
train_dataset (`datasets.Dataset`): | |
The dataset to use for training. | |
eval_dataset (`datasets.Dataset`): | |
The dataset to use for evaluation. | |
processing_class (`PreTrainedTokenizerBase` or `BaseImageProcessor` or `FeatureExtractionMixin` or `ProcessorMixin`, *optional*): | |
Processing class used to process the data. If provided, will be used to automatically process the inputs | |
for the model, and it will be saved along the model to make it easier to rerun an interrupted training or | |
reuse the fine-tuned model. | |
peft_config (`dict`): | |
The peft config to use for training. | |
compute_metrics (`Callable[[EvalPrediction], dict]`, *optional*): | |
The function to use to compute the metrics. Must take a `EvalPrediction` and return | |
a dictionary string to metric values. | |
callbacks (`list[transformers.TrainerCallback]`): | |
The callbacks to use for training. | |
optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`): | |
The optimizer and scheduler to use for training. | |
preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`): | |
The function to use to preprocess the logits before computing the metrics. | |
""" | |
def __init__( | |
self, | |
model = None, | |
ref_model = None, | |
reward_model = None, | |
judge = None, | |
args = None, | |
data_collator = None, | |
train_dataset = None, | |
eval_dataset = None, | |
processing_class = None, | |
peft_config = None, | |
compute_metrics = None, | |
callbacks = None, | |
preprocess_logits_for_metrics = None, | |
**kwargs | |
): | |
if args is None: args = UnslothXPOConfig() | |
use_bf16 = getattr(args, 'bf16', False) | |
use_fp16 = getattr(args, 'fp16', False) | |
force_float32 = False | |
if os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1': | |
print('Unsloth: Switching to float32 training since model cannot work with float16') | |
force_float32 = True | |
mixed_precision_dtype = os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') | |
dtype = getattr(model.config, 'torch_dtype', None) | |
if dtype is None: dtype = model.get_input_embeddings().dtype | |
from unsloth_zoo.utils import _get_dtype | |
dtype = _get_dtype(dtype) | |
float16 = dtype == torch.float16 | |
if not force_float32 and (float16 and use_bf16): raise TypeError('Unsloth: Model is in float16 precision but you want to use bfloat16 precision. Set fp16 to `True` and bf16 to `False`') | |
if not force_float32 and (not float16 and use_fp16): raise TypeError('Unsloth: Model is in bfloat16 precision but you want to use float16 precision. Set fp16 to `False` and bf16 to `True`') | |
if force_float32: | |
args.fp16 = False | |
args.bf16 = False | |
os.environ['ACCELERATE_MIXED_PRECISION'] = 'no' | |
elif (not use_bf16 and not use_fp16) and mixed_precision_dtype == 'float32': | |
args.fp16 = float16 | |
args.bf16 = not float16 | |
os.environ['ACCELERATE_MIXED_PRECISION'] = 'fp16' if float16 else 'bf16' | |
if getattr(args, 'eval_dataset', None) is not None and getattr(args, 'eval_strategy', 'no') == 'no': | |
args.eval_strategy = 'steps' | |
if getattr(args, 'eval_steps', None) is None: args.eval_steps = 0.1 | |
ga_steps = getattr(args, 'gradient_accumulation_steps', None) | |
if ga_steps is not None and ga_steps > 1: | |
from transformers import __version__ as transformers_version | |
if Version(transformers_version) <= Version('4.45.2'): | |
print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\n' | |
'`pip install --upgrade --no-cache-dir --force-reinstall --no-deps unsloth transformers trl unsloth_zoo`') | |
if getattr(args, 'eval_strategy', 'no') != 'no': | |
eval_bsz = getattr(args, 'per_device_eval_batch_size', 8) | |
if eval_bsz == 8 and args.per_device_train_batch_size < eval_bsz: args.per_device_eval_batch_size = args.per_device_train_batch_size | |
if getattr(args, 'eval_accumulation_steps', None) is None and ga_steps is not None: args.eval_accumulation_steps = ga_steps | |
fp16_full_eval = getattr(args, 'fp16_full_eval', False) | |
bf16_full_eval = getattr(args, 'bf16_full_eval', False) | |
if args.fp16 and bf16_full_eval: args.bf16_full_eval = False; args.fp16_full_eval = True | |
if args.bf16 and fp16_full_eval: args.bf16_full_eval = True; args.fp16_full_eval = False | |
if force_float32: | |
args.bf16_full_eval = False | |
args.fp16_full_eval = False | |
elif os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') == 'bfloat16': | |
args.bf16_full_eval = True | |
args.fp16_full_eval = False | |
elif not bf16_full_eval and not fp16_full_eval: | |
args.bf16_full_eval = args.bf16 | |
args.fp16_full_eval = args.fp16 | |
_output_logits = False | |
if locals().get('compute_metrics', None) is not None: _output_logits = True | |
if locals().get('preprocess_logits_for_metrics', None) is not None: _output_logits = True | |
if _output_logits: | |
os.environ['UNSLOTH_RETURN_LOGITS'] = '1' | |
if 'max_seq_length' not in locals() and not hasattr(args, 'max_seq_length'): | |
pass | |
else: | |
model_max_seq_length = getattr(model, 'max_seq_length', None) | |
args_max_seq_length = getattr(args, 'max_seq_length', None) | |
if args_max_seq_length is None and model_max_seq_length is not None: | |
max_seq_length = model.max_seq_length | |
if hasattr(args, 'max_seq_length'): args.max_seq_length = max_seq_length | |
if model is not None and hasattr(model, 'for_training'): | |
model.for_training() | |
if 'tokenizer' in locals() and hasattr(tokenizer, 'padding_side'): tokenizer.padding_side = 'right' | |
if 'processing_class' in locals(): | |
if hasattr(processing_class, 'padding_side'): processing_class.padding_side = 'right' | |
if hasattr(processing_class, 'tokenizer') and hasattr(processing_class.tokenizer, 'padding_side'): processing_class.tokenizer.padding_side = 'right' | |
__tokenizer = processing_class if 'processing_class' in locals() else tokenizer | |
from unsloth_zoo.vision_utils import UnslothVisionDataCollator | |
if not isinstance(data_collator, UnslothVisionDataCollator): | |
if isinstance(data_collator, DataCollatorForSeq2Seq) and 'labels' not in train_dataset.column_names: | |
data_collator = DataCollatorForLanguageModeling(__tokenizer, mlm = False) | |
elif isinstance(data_collator, DataCollatorForLanguageModeling) and 'labels' in train_dataset.column_names: | |
data_collator = DataCollatorForSeq2Seq(__tokenizer) | |
else: | |
if hasattr(args, 'remove_unused_columns'): args.remove_unused_columns = False | |
if hasattr(args, 'dataset_text_field'): args.dataset_text_field = '' | |
if hasattr(args, 'dataset_kwargs'): args.dataset_kwargs = {'skip_prepare_dataset': True} | |
if not isinstance(data_collator, UnslothVisionDataCollator): | |
if not hasattr(__tokenizer, 'pad') and hasattr(__tokenizer, 'tokenizer'): | |
if isinstance(data_collator, DataCollatorForSeq2Seq): | |
data_collator = DataCollatorForSeq2Seq(__tokenizer.tokenizer) | |
else: | |
data_collator = DataCollatorForLanguageModeling(__tokenizer.tokenizer, mlm = False) | |
other_metrics = [] | |
from unsloth_zoo.logging_utils import PatchRLStatistics | |
PatchRLStatistics('xpo_trainer', other_metrics) | |
super().__init__( | |
model = model, | |
ref_model = ref_model, | |
reward_model = reward_model, | |
judge = judge, | |
args = args, | |
data_collator = data_collator, | |
train_dataset = train_dataset, | |
eval_dataset = eval_dataset, | |
processing_class = processing_class, | |
peft_config = peft_config, | |
compute_metrics = compute_metrics, | |
callbacks = callbacks, | |
preprocess_logits_for_metrics = preprocess_logits_for_metrics,**kwargs) | |
if hasattr(self, 'neftune_hook_handle'): | |
self.neftune_hook_handle.remove() | |
if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle | |
if getattr(args, 'neftune_noise_alpha', None) is not None: | |
model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha | |
pass | |
pass | |