text
stringlengths 0
15.3k
|
---|
logits = torch.gather(logits, 2, cont_toks.unsqueeze(-1)).squeeze(-1) |
answer = (float(logits.sum()), bool(max_equal)) |
res.append(answer) |
self.cache_hook.add_partial('loglikelihood', cache_key, answer) |
return re_ord.get_original(res) |
def generate_until(self, requests, disable_tqdm: bool=False): |
res = defaultdict(list) |
re_ords = {} |
def _collate(x): |
toks = self.tok_encode(x[0]) |
return (-len(toks), x[0]) |
grouper = lm_eval.models.utils.Grouper(requests, lambda x: str(x.args[1])) |
for (key, reqs) in grouper.get_grouped().items(): |
re_ords[key] = utils.Reorderer([req.args for req in reqs], _collate) |
pbar = tqdm(total=len(requests), disable=disable_tqdm or self.rank != 0) |
for (key, re_ord) in re_ords.items(): |
chunks = lm_eval.models.utils.chunks(re_ord.get_reordered(), n=self.batch_size) |
for chunk in tqdm(chunks, disable=self.rank != 0): |
(contexts, all_gen_kwargs) = zip(*chunk) |
gen_kwargs = all_gen_kwargs[0] |
until = None |
if isinstance(gen_kwargs, dict): |
kwargs = copy.deepcopy(gen_kwargs) |
if 'until' in kwargs.keys(): |
until = kwargs.pop('until') |
if isinstance(until, str): |
until = [until] |
elif not isinstance(until, list): |
raise ValueError(f"Expected `kwargs['until']` to be of type Union[str,list] but got {until}") |
else: |
raise ValueError(f'Expected `kwargs` to be of type `dict` but got {kwargs}') |
eos = self.tok_decode(self.eot_token_id) |
if not until: |
until = [eos] |
else: |
until.append(eos) |
if 'max_gen_toks' in kwargs.keys(): |
max_gen_toks = kwargs.pop('max_gen_toks') |
else: |
max_gen_toks = self.max_gen_toks |
primary_until = [until[0]] |
max_ctx_len = self.max_length - max_gen_toks |
(context_enc, attn_masks) = self.tok_batch_encode(contexts, left_truncate_len=max_ctx_len, truncation=self.truncation) |
context_enc = context_enc.to(self.device) |
attn_masks = attn_masks.to(self.device) |
if 'max_length' not in kwargs: |
kwargs['max_length'] = context_enc.shape[1] + max_gen_toks |
cont = self._model_generate(context=context_enc, attention_mask=attn_masks, stop=primary_until, **kwargs) |
cont_toks_list = cont.tolist() |
for (cont_toks, context) in zip(cont_toks_list, contexts): |
cont_toks = cont_toks[context_enc.shape[1]:] |
s = self.tok_decode(cont_toks) |
for term in until: |
if len(term) > 0: |
s = s.split(term)[0] |
res[key].append(s) |
self.cache_hook.add_partial('generate_until', (context, gen_kwargs), s) |
pbar.update(1) |
res[key] = re_ord.get_original(res[key]) |
pbar.close() |
return grouper.get_original(res) |
# File: lm-evaluation-harness-main/lm_eval/models/openai_completions.py |
import copy |
import os |
from collections import defaultdict |
from importlib.util import find_spec |
from typing import List, Literal, Optional, Tuple |
from tqdm import tqdm |
import lm_eval.models.utils |
from lm_eval import utils |
from lm_eval.api.model import LM, TemplateLM |
from lm_eval.api.registry import register_model |
from lm_eval.models.utils import retry_on_specific_exceptions |
from lm_eval.utils import eval_logger |
def get_result(response) -> Tuple[float, bool]: |
is_greedy = True |
logprobs = response.logprobs.token_logprobs |
continuation_logprobs = sum(logprobs) |
for i in range(len(response.logprobs.token_logprobs)): |
token = response.logprobs.token_logprobs[i] |
top_tokens = response.logprobs.top_logprobs[i] |
top_token = max(top_tokens.keys(), key=lambda x: top_tokens[x]) |
if top_token != token: |
is_greedy = False |
break |
return (continuation_logprobs, is_greedy) |
def oa_completion(client, chat: bool=False, **kwargs): |
if not find_spec('openai') or not find_spec('tiktoken'): |
raise Exception("attempted to use 'openai' LM type, but package `openai` or `tiktoken` are not installed. Please install these via `pip install lm-eval[openai]` or `pip install -e .[openai]`") |
else: |
import openai |
def _exception_callback(e: Exception, sleep_time: float) -> None: |
import traceback |
traceback.print_exc() |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.