text
stringlengths
0
15.3k
except ModuleNotFoundError:
raise Exception("Attempted to use 'nemo_lm' model type, but package `nemo` is not installedPlease install nemo following the instructions in the README: either with a NVIDIA PyTorch or NeMo container, or installing nemo following https://github.com/NVIDIA/NeMo.")
def dummy():
return
if trainer.strategy.launcher is not None:
trainer.strategy.launcher.launch(dummy, trainer=trainer)
trainer.strategy.setup_environment()
app_state = AppState()
return app_state
@register_model('nemo_lm')
class NeMoLM(LM):
def __init__(self, path: str, max_length: int=4096, batch_size: int=1, max_gen_toks: int=256, devices: int=1, num_nodes: int=1, tensor_model_parallel_size: int=1, pipeline_model_parallel_size: int=1, precision: Literal['16-mixed', 'bf16-mixed', '32-true', '64-true', 64, 32, 16, '64', '32', '16', 'bf16']='bf16', **kwargs):
try:
from nemo.collections.nlp.modules.common.text_generation_utils import generate
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
from pytorch_lightning.trainer.trainer import Trainer
self.generate = generate
except ModuleNotFoundError:
raise Exception("Attempted to use 'nemo_lm' model type, but package `nemo` is not installedPlease install nemo following the instructions in the README: either with a NVIDIA PyTorch or NeMo container, or installing nemo following https://github.com/NVIDIA/NeMo.")
super().__init__()
if tensor_model_parallel_size == 1 and pipeline_model_parallel_size == 1 and (devices > 1):
eval_logger.info(f'The number of data replicas for evaluation is {devices}.')
eval_logger.info(f'The total number of devices is {devices}.')
eval_logger.info('No tensor parallelism or pipeline parallelism is applied.')
elif tensor_model_parallel_size * pipeline_model_parallel_size == devices:
eval_logger.info(f'Setting tensor parallelism to {tensor_model_parallel_size} and pipeline parallelism to {pipeline_model_parallel_size}.')
eval_logger.info(f'The total number of devices is {devices}.')
eval_logger.info('No data parallelism is applied.')
else:
raise ValueError('Please set the product of tensor_model_parallel_size and pipeline_model_parallel_sizeequal to the specified number of devices.')
if num_nodes > 1:
raise ValueError('A number of nodes greater than 1 is not supported yet. Please set num_nodes as 1.')
trainer = Trainer(strategy=NLPDDPStrategy(), devices=devices, accelerator='gpu', num_nodes=num_nodes, precision=precision, logger=False, enable_checkpointing=False, use_distributed_sampler=False)
if tensor_model_parallel_size == 1 and pipeline_model_parallel_size == 1 and (devices > 1):
self._device = torch.device(f'cuda:{trainer.global_rank}')
self._rank = trainer.global_rank
self._world_size = trainer.world_size
self.model = load_model(path, trainer, tensor_model_parallel_size=tensor_model_parallel_size, pipeline_model_parallel_size=pipeline_model_parallel_size).cuda()
self.tokenizer = self.model.tokenizer
self.app_state = setup_distributed_environment(trainer)
self._max_length = max_length
self._batch_size = int(batch_size)
self._max_gen_toks = max_gen_toks
@classmethod
def create_from_arg_string(cls, arg_string, additional_config=None):
args = simple_parse_args_string(arg_string)
if additional_config:
args['batch_size'] = additional_config.get('batch_size', 1)
return cls(**args)
@property
def eot_token_id(self):
try:
return self.tokenizer.eos_id
except AttributeError:
return None
@property
def max_length(self):
return self._max_length
@property
def max_gen_toks(self):
return self._max_gen_toks
@property
def batch_size(self):
return self._batch_size
@property
def device(self):
return self._device
@property
def rank(self):
return self._rank
@property
def world_size(self):
return self._world_size
@property
def accelerator(self):
return self._Accelerator(self.world_size)
class _Accelerator:
def __init__(self, world_size):
self.world_size = world_size
def wait_for_everyone(self):
torch.distributed.barrier()
def gather(self, local_tensor):
gathered_tensors = [torch.zeros(1, dtype=local_tensor.dtype).cuda() for _ in range(self.world_size)]
torch.distributed.all_gather(gathered_tensors, local_tensor)