text
stringlengths
0
15.3k
if not requests:
return []
res = []
requests = [req.args for req in requests]
def _collate(x):
toks = self.tok_encode(x[0])
return (len(toks), x[0])
re_ord = utils.Reorderer(requests, _collate)
def sameuntil_chunks(xs, size):
ret = []
lastuntil = xs[0][1]
for x in xs:
if len(ret) >= size or x[1] != lastuntil:
yield (ret, lastuntil)
ret = []
lastuntil = x[1]
ret.append(x)
if ret:
yield (ret, lastuntil)
for (chunk, request_args) in tqdm(list(sameuntil_chunks(re_ord.get_reordered(), self.batch_size)), disable=disable_tqdm):
inps = []
self._max_gen_toks = request_args.get('max_gen_toks', self.max_gen_toks)
for (context, _) in chunk:
context_enc = self.tok_encode(context)
inp = context_enc[-(self.max_length - self.max_gen_toks):]
inps.append(inp)
until = request_args.get('until', ['<|endoftext|>'])
request_args['temperature'] = request_args.get('temperature', 0)
response = oa_completion(client=self.client, model=self.model, prompt=inps, max_tokens=self.max_gen_toks, stop=until, seed=self.seed, **{k: v for (k, v) in request_args.items() if k not in {'do_sample', 'max_gen_toks', 'until'}})
for (resp, (context, args_)) in zip(response.choices, chunk):
s = getattr(resp, 'text')
until_ = until
for term in until_:
if len(term) > 0:
s = s.split(term)[0]
self.cache_hook.add_partial('generate_until', (context, {'until': until_}), s)
res.append(s)
return re_ord.get_original(res)
def _model_call(self, inps):
raise NotImplementedError()
def _model_generate(self, context, max_length, eos_token_id):
raise NotImplementedError()
def loglikelihood_rolling(self, requests, disable_tqdm: bool=False) -> List[float]:
loglikelihoods = []
for (string,) in tqdm([req.args for req in requests], disable=disable_tqdm):
rolling_token_windows = list(map(utils.make_disjoint_window, utils.get_rolling_token_windows(token_list=self.tok_encode(string), prefix_token=self.eot_token_id, max_seq_len=self.max_length, context_len=1)))
rolling_token_windows = [(None,) + x for x in rolling_token_windows]
string_nll = self._loglikelihood_tokens(rolling_token_windows, disable_tqdm=True)
string_nll = [x[0] for x in string_nll]
string_nll = sum(string_nll)
loglikelihoods.append(string_nll)
return loglikelihoods
@register_model('openai-chat-completions', 'local-chat-completions')
class OpenaiChatCompletionsLM(LM):
def __init__(self, model: str='gpt-3.5-turbo', base_url: str=None, truncate: bool=False, **kwargs) -> None:
super().__init__()
try:
import openai
except ModuleNotFoundError:
raise Exception("attempted to use 'openai' LM type, but package `openai` or `tiktoken` are not installed. please install these via `pip install lm-eval[openai]` or `pip install -e .[openai]`")
self.model = model
self.base_url = base_url
self.truncate = truncate
if self.base_url:
self.client = openai.OpenAI(base_url=self.base_url)
else:
self.client = openai.OpenAI()
@property
def max_length(self) -> int:
return 2048
@property
def max_gen_toks(self) -> int:
return 256
@property
def batch_size(self):
raise NotImplementedError()
@property
def device(self):
raise NotImplementedError()
def generate_until(self, requests, disable_tqdm: bool=False) -> List[str]:
res = defaultdict(list)
re_ords = {}
grouper = lm_eval.models.utils.Grouper(requests, lambda x: str(x.args[1]))
for (key, reqs) in grouper.get_grouped().items():
re_ords[key] = utils.Reorderer([req.args for req in reqs], lambda x: (-len(x[0]), x[0]))
pbar = tqdm(total=len(requests), disable=disable_tqdm or self.rank != 0)
for (key, re_ord) in re_ords.items():
chunks = lm_eval.models.utils.chunks(re_ord.get_reordered(), n=1)