text
stringlengths 0
15.3k
|
---|
def __getattr__(self, attr: str): |
lm_attr = getattr(self.lm, attr) |
if attr not in ['loglikelihood', 'loglikelihood_rolling', 'generate_until']: |
eval_logger.debug(f"Passing through attribute '{attr}' to underlying LM") |
return lm_attr |
def fn(requests): |
res = [] |
remaining_reqs = [] |
warned = False |
eval_logger.info(f"Loading '{attr}' responses from cache '{self.cache_db}' where possible...") |
for req in tqdm(requests, desc='Checking cached requests'): |
hsh = hash_args(attr, req.args) |
if attr == 'generate_until' and req.args[1].get('do_sample', False): |
if not warned: |
eval_logger.warning(f"Arguments to lm.generate_until() '{req.args[1]}' include non-deterministic sampling. Caching will not be performed for such requests.") |
warned = True |
res.append(None) |
remaining_reqs.append(req) |
elif hsh in self.dbdict: |
ob = self.dbdict[hsh] |
assert ob is not None |
res.append(ob) |
else: |
res.append(None) |
remaining_reqs.append(req) |
eval_logger.info(f'Cached requests: {len(requests) - len(remaining_reqs)}, Requests remaining: {len(remaining_reqs)}') |
rem_res = getattr(self.lm, attr)(remaining_reqs) |
resptr = 0 |
for (req, r) in zip(remaining_reqs, rem_res): |
while res[resptr] is not None: |
resptr += 1 |
res[resptr] = r |
hsh = hash_args(attr, req.args) |
self.dbdict[hsh] = r |
self.dbdict.commit() |
return res |
return fn |
def get_cache_hook(self): |
return CacheHook(self) |
class TemplateLM(LM): |
@property |
@abc.abstractmethod |
def eot_token_id(self): |
pass |
@property |
def prefix_token_id(self): |
return self.eot_token_id |
@abc.abstractmethod |
def tok_encode(self, string: str, **kwargs): |
pass |
@abc.abstractmethod |
def _loglikelihood_tokens(self, requests, **kwargs): |
pass |
def _encode_pair(self, context, continuation): |
n_spaces = len(context) - len(context.rstrip()) |
if n_spaces > 0: |
continuation = context[-n_spaces:] + continuation |
context = context[:-n_spaces] |
model_class = getattr(self, 'AUTO_MODEL_CLASS', None) |
if model_class == transformers.AutoModelForSeq2SeqLM: |
context_enc = self.tok_encode(context) |
continuation_enc = self.tok_encode(continuation, add_special_tokens=False) |
else: |
whole_enc = self.tok_encode(context + continuation) |
context_enc = self.tok_encode(context) |
context_enc_len = len(context_enc) |
continuation_enc = whole_enc[context_enc_len:] |
return (context_enc, continuation_enc) |
def loglikelihood(self, requests, disable_tqdm: bool=False) -> List[Tuple[float, bool]]: |
new_reqs = [] |
for (context, continuation) in [req.args for req in requests]: |
if context == '': |
(context_enc, continuation_enc) = ([self.prefix_token_id], self.tok_encode(continuation)) |
else: |
(context_enc, continuation_enc) = self._encode_pair(context, continuation) |
new_reqs.append(((context, continuation), context_enc, continuation_enc)) |
return self._loglikelihood_tokens(new_reqs, disable_tqdm=disable_tqdm) |
@abc.abstractmethod |
def loglikelihood_rolling(self, requests, disable_tqdm: bool=False) -> List[Tuple[float, bool]]: |
pass |
@abc.abstractmethod |
def generate_until(self, requests, disable_tqdm: bool=False) -> List[str]: |
pass |
# File: lm-evaluation-harness-main/lm_eval/api/registry.py |
import logging |
from typing import Callable, Dict |
import evaluate as hf_evaluate |
Subsets and Splits