text
stringlengths
0
15.3k
from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
from lm_eval import utils
from lm_eval.api.instance import Instance
from lm_eval.api.model import TemplateLM
from lm_eval.api.registry import register_model
from lm_eval.models.utils import Collator, clear_torch_cache, configure_pad_token, get_dtype, pad_and_concat, stop_sequences_criteria
eval_logger = utils.eval_logger
def _get_accelerate_args(device_map_option: Optional[str]='auto', max_memory_per_gpu: Optional[Union[int, str]]=None, max_cpu_memory: Optional[Union[int, str]]=None, offload_folder: Optional[str]='./offload', gpus: Optional[int]=None) -> dict:
max_memory = {}
if max_memory_per_gpu is not None:
max_memory_per_gpu_map = {device_idx: max_memory_per_gpu for device_idx in range(gpus)}
max_memory.update(max_memory_per_gpu_map)
if max_cpu_memory is not None:
max_memory['cpu'] = max_cpu_memory
args = {}
if max_memory:
args['max_memory'] = max_memory
args['device_map'] = device_map_option
args['offload_folder'] = offload_folder
return args
@register_model('hf-auto', 'hf', 'huggingface')
class HFLM(TemplateLM):
AUTO_MODEL_CLASS = None
_DEFAULT_MAX_LENGTH = 2048
def __init__(self, pretrained: Union[str, transformers.PreTrainedModel], backend: Optional[Literal['default', 'causal', 'seq2seq']]='default', revision: Optional[str]='main', subfolder: Optional[str]=None, tokenizer: Optional[Union[str, transformers.PreTrainedTokenizer, transformers.PreTrainedTokenizerFast]]=None, truncation: Optional[bool]=False, logits_cache: bool=True, max_length: Optional[int]=None, device: Optional[str]='cuda', dtype: Optional[Union[str, torch.dtype]]='auto', batch_size: Optional[Union[int, str]]=1, max_batch_size: Optional[int]=64, trust_remote_code: Optional[bool]=False, use_fast_tokenizer: Optional[bool]=True, add_bos_token: Optional[bool]=False, prefix_token_id: Optional[int]=None, parallelize: Optional[bool]=False, device_map_option: Optional[str]='auto', max_memory_per_gpu: Optional[Union[int, str]]=None, max_cpu_memory: Optional[Union[int, str]]=None, offload_folder: Optional[Union[str, os.PathLike]]='./offload', peft: Optional[str]=None, delta: Optional[str]=None, autogptq: Optional[Union[bool, str]]=False, **kwargs) -> None:
super().__init__()
if not isinstance(pretrained, str):
eval_logger.warning('`pretrained` model kwarg is not of type `str`. Many other model arguments may be ignored. Please do not launch via accelerate or use `parallelize=True` if passing an existing model this way.')
assert not parallelize, '`parallelize=True` is not compatible with passing pre-initialized model to `pretrained`'
self._model = pretrained
self._device = self._model.device
self._config = self._model.config
gpus = 0
if tokenizer:
assert isinstance(tokenizer, transformers.PreTrainedTokenizer) or isinstance(tokenizer, transformers.PreTrainedTokenizerFast)
self.tokenizer = tokenizer
else:
model_name = self._model.name_or_path
self.tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, use_fast=use_fast_tokenizer)
else:
assert isinstance(device, str)
assert isinstance(pretrained, str)
assert isinstance(batch_size, (int, str))
gpus = torch.cuda.device_count()
accelerator_kwargs = InitProcessGroupKwargs(timeout=timedelta(weeks=52))
accelerator = Accelerator(kwargs_handlers=[accelerator_kwargs])
if accelerator.num_processes > 1:
self.accelerator = accelerator
if 'npu' in accelerator.device.type:
gpus = torch.npu.device_count()
if not (parallelize or accelerator.num_processes > 1):
device_list = set(['cuda', 'cpu'] + [f'cuda:{i}' for i in range(gpus)] + ['mps', 'mps:0'] + [f'npu:{i}' for i in range(gpus)])
if device and device in device_list:
self._device = torch.device(device)
eval_logger.info(f"Using device '{device}'")
if device in ('mps', 'mps:0') and version.parse(torch.__version__) < version.parse('2.1'):
raise RuntimeError(f'mps requires torch >= 2.1. You have {torch.__version__}')
else:
eval_logger.info('Device not specified')
eval_logger.info(f'Cuda Available? {torch.cuda.is_available()}')
self._device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
else:
if device != 'cuda':
eval_logger.info(f"Using `accelerate launch` or `parallelize=True`, device '{device}' will be overridden when placing model.")
self._device = torch.device(device)
revision = revision + ('/' + subfolder if subfolder is not None else '')
self._get_config(pretrained, revision=revision, trust_remote_code=trust_remote_code)
self._get_backend(config=self.config, backend=backend, trust_remote_code=trust_remote_code)
self._create_tokenizer(pretrained, tokenizer, revision=revision, trust_remote_code=trust_remote_code, use_fast_tokenizer=use_fast_tokenizer)
if isinstance(pretrained, str):
self._create_model(pretrained=pretrained, revision=revision, dtype=dtype, trust_remote_code=trust_remote_code, parallelize=parallelize, gpus=gpus, device_map_option=device_map_option, max_memory_per_gpu=max_memory_per_gpu, max_cpu_memory=max_cpu_memory, offload_folder=offload_folder, peft=peft, delta=delta, autogptq=autogptq, **kwargs)
if isinstance(self.model, torch.nn.Module):
self.model.eval()
self.model.tie_weights()
if isinstance(pretrained, str) and (gpus >= 1 or str(self.device) == 'mps'):
if not (parallelize or autogptq or hasattr(self, 'accelerator')):
try:
self.model.to(self.device)
except ValueError:
eval_logger.debug('Failed to place model onto specified device. This may be because the model is quantized via `bitsandbytes` or `device_map` is provided. If the desired GPU is being used, this message is safe to ignore.')
self.truncation = truncation
self.logits_cache = logits_cache
self.vocab_size = self.tokenizer.vocab_size
self.tokenizer = configure_pad_token(self.tokenizer, model_config=self.config)
self.add_bos_token = add_bos_token
if 'gemma' in getattr(self.config, 'model_type', ''):
self.add_bos_token = True
eval_logger.info(f"Model type is '{self.config.model_type}', part of the Gemma family--a BOS token will be used as Gemma underperforms without it.")
self._max_length = max_length
self.pretrained = pretrained
self.delta = delta
self.peft = peft
self.revision = revision
self.batch_schedule = 1
self.batch_sizes = {}
self.max_batch_size = max_batch_size
if str(batch_size).startswith('auto'):