|
import os |
|
import torch |
|
from torch import nn |
|
|
|
from torch.utils.data import Sampler |
|
|
|
from transformers import Trainer |
|
from transformers.trainer import ( |
|
is_sagemaker_mp_enabled, |
|
get_parameter_names, |
|
has_length, |
|
ALL_LAYERNORM_LAYERS, |
|
|
|
logger, |
|
) |
|
from typing import List, Optional |
|
|
|
from ..utils.train_utils import * |
|
|
|
|
|
def split_to_even_chunks(indices, lengths, num_chunks): |
|
""" |
|
Split a list of indices into `chunks` chunks of roughly equal lengths. |
|
""" |
|
|
|
if len(indices) % num_chunks != 0: |
|
return [indices[i::num_chunks] for i in range(num_chunks)] |
|
|
|
num_indices_per_chunk = len(indices) // num_chunks |
|
|
|
chunks = [[] for _ in range(num_chunks)] |
|
chunks_lengths = [0 for _ in range(num_chunks)] |
|
for index in indices: |
|
shortest_chunk = chunks_lengths.index(min(chunks_lengths)) |
|
chunks[shortest_chunk].append(index) |
|
chunks_lengths[shortest_chunk] += lengths[index] |
|
if len(chunks[shortest_chunk]) == num_indices_per_chunk: |
|
chunks_lengths[shortest_chunk] = float("inf") |
|
|
|
return chunks |
|
|
|
|
|
def get_modality_length_grouped_indices(lengths, batch_size, world_size, generator=None): |
|
|
|
assert all(l != 0 for l in lengths), "Should not have zero length." |
|
mm_indices, mm_lengths = zip(*[(i, l) for i, l in enumerate(lengths) if l > 0]) |
|
lang_indices, lang_lengths = zip(*[(i, -l) for i, l in enumerate(lengths) if l < 0]) |
|
|
|
assert len(mm_indices) > 0, "Should have at least one multimodal sample." |
|
assert len(lang_indices) > 0, "Should have at least one language sample." |
|
|
|
mm_shuffle = [mm_indices[i] for i in get_length_grouped_indices(mm_lengths, batch_size, world_size, generator=None)] |
|
lang_shuffle = [lang_indices[i] for i in get_length_grouped_indices(lang_lengths, batch_size, world_size, generator=None)] |
|
megabatch_size = world_size * batch_size |
|
mm_megabatches = [mm_shuffle[i : i + megabatch_size] for i in range(0, len(mm_shuffle), megabatch_size)] |
|
lang_megabatches = [lang_shuffle[i : i + megabatch_size] for i in range(0, len(lang_shuffle), megabatch_size)] |
|
|
|
last_mm = mm_megabatches[-1] |
|
last_lang = lang_megabatches[-1] |
|
additional_batch = last_mm + last_lang |
|
megabatches = mm_megabatches[:-1] + lang_megabatches[:-1] |
|
megabatch_indices = torch.randperm(len(megabatches), generator=generator) |
|
megabatches = [megabatches[i] for i in megabatch_indices] |
|
|
|
if len(additional_batch) >= megabatch_size: |
|
megabatches = [additional_batch[:megabatch_size]] + megabatches |
|
additional_batch = additional_batch[megabatch_size:] |
|
|
|
if len(additional_batch) > 0: |
|
megabatches.append(additional_batch) |
|
|
|
return [i for megabatch in megabatches for i in megabatch] |
|
|
|
|
|
def get_length_grouped_indices(lengths, batch_size, world_size, generator=None, merge=True): |
|
|
|
indices = torch.randperm(len(lengths), generator=generator) |
|
megabatch_size = world_size * batch_size |
|
megabatches = [indices[i : i + megabatch_size].tolist() for i in range(0, len(lengths), megabatch_size)] |
|
megabatches = [sorted(megabatch, key=lambda i: lengths[i], reverse=True) for megabatch in megabatches] |
|
megabatches = [split_to_even_chunks(megabatch, lengths, world_size) for megabatch in megabatches] |
|
|
|
return [i for megabatch in megabatches for batch in megabatch for i in batch] |
|
|
|
|
|
class LengthGroupedSampler(Sampler): |
|
r""" |
|
Sampler that samples indices in a way that groups together features of the dataset of roughly the same length while |
|
keeping a bit of randomness. |
|
""" |
|
|
|
def __init__( |
|
self, |
|
batch_size: int, |
|
world_size: int, |
|
lengths: Optional[List[int]] = None, |
|
generator=None, |
|
group_by_modality: bool = False, |
|
): |
|
if lengths is None: |
|
raise ValueError("Lengths must be provided.") |
|
|
|
self.batch_size = batch_size |
|
self.world_size = world_size |
|
self.lengths = lengths |
|
self.generator = generator |
|
self.group_by_modality = group_by_modality |
|
|
|
def __len__(self): |
|
return len(self.lengths) |
|
|
|
def __iter__(self): |
|
if self.group_by_modality: |
|
indices = get_modality_length_grouped_indices(self.lengths, self.batch_size, self.world_size, generator=self.generator) |
|
else: |
|
indices = get_length_grouped_indices(self.lengths, self.batch_size, self.world_size, generator=self.generator) |
|
return iter(indices) |
|
|
|
|
|
class LLaVATrainer(Trainer): |
|
|
|
def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]: |
|
if self.train_dataset is None or not has_length(self.train_dataset): |
|
return None |
|
|
|
if self.args.group_by_modality_length: |
|
lengths = self.train_dataset.modality_lengths |
|
return LengthGroupedSampler( |
|
|
|
self.args.train_batch_size, |
|
world_size=self.args.world_size, |
|
lengths=lengths, |
|
group_by_modality=True, |
|
) |
|
else: |
|
return super()._get_train_sampler() |
|
|
|
def create_optimizer(self): |
|
""" |
|
Setup the optimizer. |
|
|
|
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the |
|
Trainer's init through `optimizers`, or subclass and override this method in a subclass. |
|
""" |
|
if is_sagemaker_mp_enabled(): |
|
return super().create_optimizer() |
|
|
|
|
|
|
|
opt_model = self.model |
|
|
|
if self.optimizer is None: |
|
decay_parameters = get_parameter_names(opt_model, ALL_LAYERNORM_LAYERS) |
|
decay_parameters = [name for name in decay_parameters if "bias" not in name] |
|
if self.args.mm_projector_lr is not None: |
|
connector_parameters = [name for name, _ in opt_model.named_parameters() if "connector" in name] |
|
optimizer_grouped_parameters = [ |
|
{ |
|
"params": [ |
|
p for n, p in opt_model.named_parameters() if (n in decay_parameters and n not in connector_parameters and p.requires_grad) |
|
], |
|
"weight_decay": self.args.weight_decay, |
|
"name": "decay_no_connector_parameters" |
|
}, |
|
{ |
|
"params": [ |
|
p for n, p in opt_model.named_parameters() if (n not in decay_parameters and n not in connector_parameters and p.requires_grad) |
|
], |
|
"weight_decay": 0.0, |
|
"name": "no_decay_no_connector_parameters" |
|
}, |
|
{ |
|
"params": [ |
|
p for n, p in opt_model.named_parameters() if (n in decay_parameters and n in connector_parameters and p.requires_grad) |
|
], |
|
"weight_decay": self.args.weight_decay, |
|
"lr": self.args.mm_projector_lr, |
|
"name": "decay_connector_parameters" |
|
}, |
|
{ |
|
"params": [ |
|
p for n, p in opt_model.named_parameters() if (n not in decay_parameters and n in connector_parameters and p.requires_grad) |
|
], |
|
"weight_decay": 0.0, |
|
"lr": self.args.mm_projector_lr, |
|
"name": "no_decay_proj_parameters" |
|
}, |
|
] |
|
else: |
|
optimizer_grouped_parameters = [ |
|
{ |
|
"params": [ |
|
p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad) |
|
], |
|
"weight_decay": self.args.weight_decay, |
|
"name": "decay_parameters" |
|
}, |
|
{ |
|
"params": [ |
|
p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad) |
|
], |
|
"weight_decay": 0.0, |
|
"name": "no_decay_parameters" |
|
}, |
|
] |
|
|
|
if getattr(self.args, "moe_enable", False): |
|
from deepspeed.moe.utils import split_params_into_different_moe_groups_for_optimizer |
|
optimizer_grouped_parameters = split_params_into_different_moe_groups_for_optimizer(optimizer_grouped_parameters) |
|
optimizer_cls, optimizer_kwargs = self.get_optimizer_cls_and_kwargs(self.args) |
|
|
|
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs) |
|
if optimizer_cls.__name__ == "Adam8bit": |
|
import bitsandbytes |
|
|
|
manager = bitsandbytes.optim.GlobalOptimManager.get_instance() |
|
|
|
skipped = 0 |
|
for module in opt_model.modules(): |
|
if isinstance(module, nn.Embedding): |
|
skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values()) |
|
logger.info(f"skipped {module}: {skipped/2**20}M params") |
|
manager.register_module_override(module, "weight", {"optim_bits": 32}) |
|
logger.debug(f"bitsandbytes: will optimize {module} in fp32") |
|
logger.info(f"skipped: {skipped/2**20}M params") |
|
|
|
return self.optimizer |
|
|
|
|
|
|
|
|
|
|