text
stringlengths
0
15.3k
batch_size = batch_size.split(':')
self.batch_size_per_gpu = batch_size[0]
self.batch_schedule = float(batch_size[1]) if len(batch_size) > 1 else 1
else:
self.batch_size_per_gpu = int(batch_size)
if isinstance(pretrained, str):
if gpus > 1:
if parallelize:
if accelerator.num_processes > 1:
raise RuntimeError('Attempted to use both a HF Accelerate `device_map` and to launch via `accelerate launch`. If this is the case, please either remove `parallelize=True` from --model_args or launch outside of the Accelerate launcher.')
else:
pass
elif accelerator.num_processes == 1:
self._rank = 0
self._world_size = 1
else:
if gpus > accelerator.num_processes:
eval_logger.warning(f"WARNING: The number of total system GPUs does not match the number of spawned processes. If you would like to use data parallelism, please launch the script with 'accelerate launch *script*'. Current run will proceed with {accelerator.num_processes} devices.")
assert accelerator.distributed_type in [DistributedType.FSDP, DistributedType.MULTI_GPU, DistributedType.MULTI_NPU], 'Unsupported distributed type provided. Only DDP and FSDP are supported.'
if accelerator.distributed_type == DistributedType.FSDP:
self._model = accelerator.prepare(self.model)
else:
self._model = accelerator.prepare_model(self.model, evaluation_mode=True)
self._device = torch.device(f'{accelerator.device}')
self.accelerator = accelerator
if self.accelerator.is_local_main_process:
eval_logger.info(f'Using {gpus} devices with data parallelism')
self._rank = self.accelerator.local_process_index
self._world_size = self.accelerator.num_processes
else:
eval_logger.warning('Passed an already-initialized model through `pretrained`, assuming single-process call to evaluate() or custom distributed integration')
self._rank = 0
self._world_size = 1
self.custom_prefix_token_id = prefix_token_id
if prefix_token_id is not None:
eval_logger.info(f'Loglikelihood prefix token id used in evaluation: {self.prefix_token_id}')
@property
def config(self):
return self._config
@property
def model(self):
if hasattr(self, 'accelerator'):
return self.accelerator.unwrap_model(self._model)
else:
return self._model
@property
def eot_token_id(self):
return self.tokenizer.eos_token_id
@property
def prefix_token_id(self):
if self.custom_prefix_token_id is not None:
return self.custom_prefix_token_id
if self.tokenizer.bos_token_id is not None:
return self.tokenizer.bos_token_id
return self.tokenizer.eos_token_id
@property
def max_length(self):
if self._max_length:
return self._max_length
seqlen_config_attrs = ('n_positions', 'max_position_embeddings', 'n_ctx')
for attr in seqlen_config_attrs:
if hasattr(self.model.config, attr):
return getattr(self.model.config, attr)
if hasattr(self.tokenizer, 'model_max_length'):
if self.tokenizer.model_max_length == 1000000000000000019884624838656:
return self._DEFAULT_MAX_LENGTH
return self.tokenizer.model_max_length
return self._DEFAULT_MAX_LENGTH
@property
def max_gen_toks(self) -> int:
return 256
@property
def batch_size(self):
return self.batch_size_per_gpu
@property
def device(self):
return self._device
@property
def rank(self):
return self._rank
@property
def world_size(self):
return self._world_size
@property
def tokenizer_name(self) -> str:
return self.tokenizer.name_or_path.replace('/', '__')
@property
def chat_template(self) -> str: