text
stringlengths 0
15.3k
|
---|
adaptive_batch_size = None |
if self.batch_size == 'auto': |
print('Passed argument batch_size = auto. Detecting largest batch size') |
batch_size = self._detect_batch_size() |
print(f'Determined Largest batch size: {batch_size}') |
adaptive_batch_size = batch_size |
batch_size = self.batch_size if self.batch_size != 'auto' else adaptive_batch_size if adaptive_batch_size is not None else 0 |
batch_fn = self._batch_scheduler if self.batch_size == 'auto' and (not adaptive_batch_size) else None |
re_ords = Collator([reg.args for reg in requests], sort_fn=_collate, group_by='gen_kwargs', group_fn=lambda x: x[1]) |
chunks = re_ords.get_batched(n=batch_size, batch_fn=batch_fn) |
for chunk in chunks: |
(contexts, all_gen_kwargs) = zip(*chunk) |
gen_kwargs = all_gen_kwargs[0] |
until = None |
if isinstance(gen_kwargs, dict): |
kwargs = copy.deepcopy(gen_kwargs) |
if 'until' in kwargs.keys(): |
until = kwargs.pop('until') |
if isinstance(until, str): |
until = [until] |
elif not isinstance(until, list): |
raise ValueError(f"Expected `kwargs['until']` to be of type Union[str,list] but got {until}") |
else: |
raise ValueError(f'Expected `kwargs` to be of type `dict` but got {type(gen_kwargs)}') |
eos = self.tok_decode(self.eot_token_id, skip_special_tokens=False) |
if not until: |
until = [eos] |
else: |
until.append(eos) |
if 'max_gen_toks' in kwargs.keys(): |
max_gen_toks = kwargs.pop('max_gen_toks') |
else: |
max_gen_toks = self.max_gen_toks |
if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM: |
max_ctx_len = self.max_length - max_gen_toks |
elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM: |
max_ctx_len = self.max_length |
(context_enc, attn_masks) = self.tok_batch_encode(contexts, left_truncate_len=max_ctx_len, truncation=self.truncation) |
context_enc = context_enc.to(self.device) |
attn_masks = attn_masks.to(self.device) |
if 'max_length' not in kwargs: |
kwargs['max_length'] = context_enc.shape[1] + max_gen_toks |
cont = self._model_generate(context=context_enc, attention_mask=attn_masks, stop=until, **kwargs) |
cont_toks_list = cont.tolist() |
for (cont_toks, context) in zip(cont_toks_list, contexts): |
if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM: |
cont_toks = cont_toks[context_enc.shape[1]:] |
s = self.tok_decode(cont_toks) |
for term in until: |
if len(term) > 0: |
s = s.split(term)[0] |
res.append(s) |
self.cache_hook.add_partial('generate_until', (context, gen_kwargs), s) |
pbar.update(1) |
res = re_ords.get_original(res) |
pbar.close() |
return res |
def apply_chat_template(self, chat_history: List[Dict[str, str]]) -> str: |
try: |
chat_templated = self.tokenizer.apply_chat_template(chat_history, tokenize=False, add_generation_prompt=True) |
except jinja2.exceptions.TemplateError: |
eval_logger.warning('Failed to apply chat template. removing the system role in chat history.') |
chat_history = [msg for msg in chat_history if msg['role'] != 'system'] |
chat_templated = self.tokenizer.apply_chat_template(chat_history, tokenize=False, add_generation_prompt=True) |
return chat_templated |
def get_model_info(self) -> dict: |
def get_model_num_params(model) -> int: |
if hasattr(model, 'num_parameters'): |
return model.num_parameters() |
if hasattr(model, 'parameters'): |
return sum((p.numel() for p in model.parameters())) |
else: |
return -1 |
def get_model_dtype(model) -> str: |
if hasattr(model, 'dtype'): |
return model.dtype |
else: |
return '' |
def get_model_sha(pretrained: str, revision: str) -> str: |
try: |
model_info = HfApi().model_info(repo_id=pretrained, revision=revision) |
return model_info.sha |
except Exception as e: |
eval_logger.warn(f'Failed to get model SHA for {pretrained} at revision {revision}. Error: {e}') |
return '' |
model_info = {'model_num_parameters': get_model_num_params(self._model), 'model_dtype': get_model_dtype(self._model), 'model_revision': self.revision, 'model_sha': get_model_sha(self.pretrained, self.revision)} |
if self.peft: |
model_info['peft_sha'] = get_model_sha(self.peft, self.revision) |
if self.delta: |
model_info['delta_sha'] = get_model_sha(self.delta, self.revision) |
return model_info |
# File: lm-evaluation-harness-main/lm_eval/models/mamba_lm.py |
from typing import Optional, Union |
import torch |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.