text
stringlengths 0
15.3k
|
---|
def _collate_gen(_requests): |
return (-len(_requests[0][1]), _requests[0][0]) |
re_ords = Collator(requests, _collate_gen, group_by='gen_kwargs') |
chunks = re_ords.get_batched(n=int(self.batch_size) if self.batch_size != 'auto' else 0, batch_fn=None) |
pbar = tqdm(total=len(requests), disable=disable_tqdm or self.rank != 0, desc='Running generate_until requests') |
for chunk in chunks: |
(context_and_encoding, all_gen_kwargs) = zip(*chunk) |
(context, context_encoding) = zip(*context_and_encoding) |
gen_kwargs = all_gen_kwargs[0] |
until = None |
if isinstance(gen_kwargs, dict): |
kwargs = copy.deepcopy(gen_kwargs) |
if 'until' in kwargs.keys(): |
until = kwargs.pop('until') |
if isinstance(until, str): |
until = [until] |
elif not isinstance(until, list): |
raise ValueError(f"Expected `kwargs['until']` to be of type Union[str,list] but got {until}") |
else: |
raise ValueError(f'Expected `kwargs` to be of type `dict` but got {gen_kwargs}') |
eos = self.tokenizer.decode(self.eot_token_id) |
if not until: |
until = [eos] |
else: |
until.append(eos) |
if 'max_gen_toks' in kwargs.keys(): |
max_gen_toks = kwargs.pop('max_gen_toks') |
else: |
max_gen_toks = self.max_gen_toks |
max_ctx_len = self.max_length - max_gen_toks |
context_encoding = [x[-max_ctx_len:] for x in context_encoding] |
cont = self._model_generate(requests=context_encoding, generate=True, max_tokens=max_gen_toks, stop=until, **kwargs) |
for (output, context) in zip(cont, context): |
generated_text = output.outputs[0].text |
res.append(generated_text) |
self.cache_hook.add_partial('generate_until', (context, gen_kwargs), generated_text) |
pbar.update(1) |
pbar.close() |
return re_ords.get_original(res) |
def _loglikelihood_tokens(self, requests: List[Tuple[Tuple[str, str], List[int], List[int]]], disable_tqdm: bool=False) -> List[Tuple[float, bool]]: |
res = [] |
def _collate(x): |
toks = x[1] + x[2] |
return (-len(toks), tuple(toks)) |
re_ord = Collator(requests, sort_fn=_collate) |
chunks = re_ord.get_batched(n=int(self.batch_size) if self.batch_size != 'auto' else 0, batch_fn=None) |
pbar = tqdm(total=len(requests), disable=disable_tqdm, desc='Running loglikelihood requests') |
for chunk in chunks: |
inputs = [] |
ctxlens = [] |
for (cache_key, context_enc, continuation_enc) in chunk: |
inp = (context_enc + continuation_enc)[-self.max_length:] |
ctxlen = len(context_enc) - max(0, len(context_enc) + len(continuation_enc) - self.max_length) |
inputs.append(inp) |
ctxlens.append(ctxlen) |
outputs = self._model_generate(requests=inputs, generate=False) |
for (output, ctxlen, (cache_key, _, _), inp) in zip(outputs, ctxlens, chunk, inputs): |
answer = self._parse_logprobs(tokens=inp, outputs=output, ctxlen=ctxlen) |
res.append(answer) |
if cache_key is not None: |
self.cache_hook.add_partial('loglikelihood', cache_key, answer) |
pbar.update(1) |
pbar.close() |
return re_ord.get_original(res) |
@staticmethod |
def _parse_logprobs(tokens: List, outputs, ctxlen: int) -> Tuple[float, bool]: |
continuation_logprobs_dicts = outputs.prompt_logprobs |
def coerce_logprob_to_num(logprob): |
return getattr(logprob, 'logprob', logprob) |
continuation_logprobs_dicts = [{token: coerce_logprob_to_num(logprob) for (token, logprob) in logprob_dict.items()} if logprob_dict is not None else None for logprob_dict in continuation_logprobs_dicts] |
continuation_logprobs = sum((logprob_dict.get(token) for (token, logprob_dict) in zip(tokens[ctxlen:], continuation_logprobs_dicts[ctxlen:]))) |
is_greedy = True |
for (token, logprob_dict) in zip(tokens[ctxlen:], continuation_logprobs_dicts[ctxlen:]): |
if logprob_dict: |
top_token = max(logprob_dict, key=logprob_dict.get) |
if top_token != token: |
is_greedy = False |
break |
return (continuation_logprobs, is_greedy) |
@staticmethod |
def modify_gen_kwargs(kwargs: dict) -> dict: |
do_sample = kwargs.pop('do_sample', None) |
if do_sample is False and 'temperature' not in kwargs: |
eval_logger.debug('Got `do_sample=False` and no temperature value, setting VLLM temperature to 0.0 ...') |
kwargs['temperature'] = 0.0 |
kwargs['skip_special_tokens'] = kwargs.get('skip_special_tokens', False) |
kwargs['spaces_between_special_tokens'] = kwargs.get('spaces_between_special_tokens', False) |
return kwargs |
# File: lm-evaluation-harness-main/lm_eval/prompts/__init__.py |
import ast |
import os |
from typing import Dict |
from lm_eval import utils |
from lm_eval.utils import eval_logger |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.