text
stringlengths
0
15.3k
@register_model('textsynth')
class TextSynthLM(LM):
def __init__(self, engine, truncate: bool=False, **kwargs) -> None:
super().__init__()
self.engine = engine
self.truncate = truncate
self.api_url = 'https://api.textsynth.com'
self.api_key = os.environ['TEXTSYNTH_API_SECRET_KEY']
@property
def eot_token_id(self):
raise NotImplementedError()
@property
def max_length(self) -> int:
return 2048
@property
def max_gen_toks(self) -> int:
return 256
@property
def batch_size(self):
raise NotImplementedError()
@property
def device(self):
raise NotImplementedError()
def tok_encode(self, string: str):
raise NotImplementedError()
def tok_decode(self, tokens):
raise NotImplementedError()
def loglikelihood(self, requests, disable_tqdm: bool=False):
res = []
for (context, continuation) in tqdm(requests, disable=disable_tqdm):
response = textsynth_completion(url=self.api_url + '/v1/engines/' + self.engine + '/logprob', headers={'Authorization': 'Bearer ' + self.api_key}, json={'context': context, 'continuation': continuation})
resp = response.json()
if 'logprob' in resp:
logprob = resp['logprob']
is_greedy = resp['is_greedy']
res.append((logprob, is_greedy))
self.cache_hook.add_partial('loglikelihood', (context, continuation), (logprob, is_greedy))
else:
logger.error(f'The following response does not contain `logprobs`. Got:\n{resp}')
assert False
return res
def loglikelihood_rolling(self, requests, disable_tqdm: bool=False):
raise NotImplementedError('`loglikelihood_rolling` is currently not supported due to lack of input tokenization support from TextSynth.')
def generate_until(self, requests, disable_tqdm: bool=False):
if not requests:
return []
res = []
for request in tqdm(requests, disable=disable_tqdm):
inp = request[0]
request_args = request[1]
until = request_args['until']
response = textsynth_completion(url=self.api_url + '/v1/engines/' + self.engine + '/completions', headers={'Authorization': 'Bearer ' + self.api_key}, json={'prompt': inp, 'max_tokens': self.max_gen_toks, 'top_k': 1, 'stop': until})
resp = response.json()
if 'text' in resp:
s = resp['text']
res.append(s)
self.cache_hook.add_partial('generate_until', (inp, request_args), s)
else:
logger.error('The following response does not contain generated `text`. Got:\n{resp}')
assert False
return res
def _model_call(self, inps):
raise NotImplementedError()
def _model_generate(self, context, max_length, eos_token_id):
raise NotImplementedError()
# File: lm-evaluation-harness-main/lm_eval/models/utils.py
import collections
import fnmatch
import gc
import itertools
import time
from functools import wraps
from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, Iterator, List, Literal, Optional, Tuple, Type, Union
import torch
import transformers
from lm_eval.utils import eval_logger
if TYPE_CHECKING:
from transformers import PreTrainedTokenizerBase
from transformers.configuration_utils import PretrainedConfig
def chunks(iter, n: int=0, fn=None):
arr = []
for (i, x) in enumerate(iter):
arr.append(x)
if len(arr) == (fn(i, iter) if fn else n):
yield arr