text
stringlengths 0
15.3k
|
---|
if self.tokenizer.chat_template is not None: |
return self.tokenizer.chat_template |
return self.tokenizer.default_chat_template |
def _get_backend(self, config: Union[transformers.PretrainedConfig, transformers.AutoConfig], backend: Optional[Literal['default', 'causal', 'seq2seq']]='default', trust_remote_code: Optional[bool]=False) -> None: |
assert backend in ['default', 'causal', 'seq2seq'] |
if backend != 'default': |
if backend == 'causal': |
self.AUTO_MODEL_CLASS = transformers.AutoModelForCausalLM |
elif backend == 'seq2seq': |
self.AUTO_MODEL_CLASS = transformers.AutoModelForSeq2SeqLM |
eval_logger.info(f"Overrode HF model backend type, and using type '{backend}'") |
elif getattr(config, 'model_type') in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES: |
self.AUTO_MODEL_CLASS = transformers.AutoModelForSeq2SeqLM |
elif getattr(self.config, 'model_type') in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES: |
self.AUTO_MODEL_CLASS = transformers.AutoModelForCausalLM |
else: |
if not trust_remote_code: |
eval_logger.warning('HF model type is neither marked as CausalLM or Seq2SeqLM. This is expected if your model requires `trust_remote_code=True` but may be an error otherwise.') |
self.AUTO_MODEL_CLASS = transformers.AutoModelForCausalLM |
assert self.AUTO_MODEL_CLASS in [transformers.AutoModelForCausalLM, transformers.AutoModelForSeq2SeqLM] |
return None |
def _get_config(self, pretrained: str, revision: str='main', trust_remote_code: bool=False) -> None: |
self._config = transformers.AutoConfig.from_pretrained(pretrained, revision=revision, trust_remote_code=trust_remote_code) |
def _create_model(self, pretrained: str, revision: Optional[str]='main', dtype: Optional[Union[str, torch.dtype]]='auto', trust_remote_code: Optional[bool]=False, parallelize: Optional[bool]=False, gpus: Optional[int]=None, device_map_option: Optional[str]='auto', max_memory_per_gpu: Optional[Union[int, str]]=None, max_cpu_memory: Optional[Union[int, str]]=None, offload_folder: Optional[str]='./offload', peft: Optional[str]=None, delta: Optional[str]=None, autogptq: Optional[Union[bool, str]]=False, **kwargs) -> None: |
model_kwargs = kwargs if kwargs else {} |
if parallelize: |
model_kwargs.update(_get_accelerate_args(device_map_option, max_memory_per_gpu, max_cpu_memory, offload_folder, gpus)) |
elif 'device_map' not in model_kwargs: |
if hasattr(self, 'accelerator'): |
model_kwargs.update({'device_map': {'': f'{self.accelerator.device}'}}) |
else: |
model_kwargs.update({'device_map': {'': str(self.device)}}) |
if not autogptq: |
if model_kwargs.get('load_in_4bit', None): |
assert transformers.__version__ >= '4.30.0', 'load_in_4bit requires transformers >= 4.30.0' |
if transformers.__version__ >= '4.30.0': |
if model_kwargs.get('load_in_4bit', None): |
if model_kwargs.get('bnb_4bit_compute_dtype', None): |
model_kwargs['bnb_4bit_compute_dtype'] = get_dtype(model_kwargs['bnb_4bit_compute_dtype']) |
self._model = self.AUTO_MODEL_CLASS.from_pretrained(pretrained, revision=revision, torch_dtype=get_dtype(dtype), trust_remote_code=trust_remote_code, **model_kwargs) |
else: |
try: |
from auto_gptq import AutoGPTQForCausalLM |
except ModuleNotFoundError: |
raise Exception('Tried to load auto_gptq, but auto-gptq is not installed ', 'please install auto-gptq via pip install lm-eval[gptq] or pip install -e .[gptq]') |
self._model = AutoGPTQForCausalLM.from_quantized(pretrained, trust_remote_code=trust_remote_code, model_basename=None if autogptq is True else Path(autogptq).stem, use_safetensors=True if autogptq is True else autogptq.endswith('.safetensors'), **model_kwargs) |
if peft and delta: |
raise ValueError("Cannot use both 'peft' and 'delta' options at the same time.") |
if peft: |
if model_kwargs.get('load_in_4bit', None): |
if version.parse(PEFT_VERSION) < version.parse('0.4.0'): |
raise AssertionError('load_in_4bit requires peft >= 0.4.0') |
if self._model.config.vocab_size != len(self.tokenizer): |
self._model.resize_token_embeddings(len(self.tokenizer)) |
eval_logger.info(f"Model config indicates vocab_size='{self._model.config.vocab_size}', but found tokenizer with vocab size '{len(self.tokenizer)}'. Resizing model embedding layer...") |
self._model = PeftModel.from_pretrained(self._model, peft, revision=revision) |
elif delta: |
if autogptq: |
eval_logger.warning('Delta weights might trigger unexpected behavior when used with AutoGPTQ.') |
_model_delta = self.AUTO_MODEL_CLASS.from_pretrained(delta, revision=revision, torch_dtype=get_dtype(dtype), trust_remote_code=trust_remote_code, **model_kwargs) |
for (name, param) in self._model.state_dict().items(): |
try: |
param.data += _model_delta.state_dict()[name] |
except KeyError: |
raise KeyError(f'Delta model is missing weights for layer: {name}') |
except Exception as e: |
raise RuntimeError(f'Failed to add delta weights to layer {name}. Error: {e}') |
del _model_delta |
return None |
def _create_tokenizer(self, pretrained: Union[str, transformers.PreTrainedModel], tokenizer: Optional[Union[str, transformers.PreTrainedTokenizer, transformers.PreTrainedTokenizerFast]], revision: Optional[str]='main', trust_remote_code: Optional[bool]=False, use_fast_tokenizer: Optional[bool]=True) -> None: |
if tokenizer: |
if isinstance(tokenizer, str): |
self.tokenizer = transformers.AutoTokenizer.from_pretrained(tokenizer, revision=revision, trust_remote_code=trust_remote_code, use_fast=use_fast_tokenizer) |
else: |
assert isinstance(tokenizer, transformers.PreTrainedTokenizer) or isinstance(tokenizer, transformers.PreTrainedTokenizerFast) |
self.tokenizer = tokenizer |
else: |
if isinstance(pretrained, str): |
model_name = pretrained |
else: |
model_name = self.model.name_or_path |
self.tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, use_fast=use_fast_tokenizer) |
return None |
def _detect_batch_size(self, requests=None, pos: int=0): |
if requests: |
(_, context_enc, continuation_enc) = requests[pos] |
max_length = len((context_enc + continuation_enc)[-(self.max_length + 1):][:-1]) |
max_context_enc = len(context_enc[-(self.max_length + 1):]) |
max_cont_enc = len(continuation_enc[-(self.max_length + 1):]) |
else: |
max_length = self.max_length |
max_context_enc = max_length |
max_cont_enc = max_length |
@find_executable_batch_size(starting_batch_size=self.max_batch_size) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.