text
stringlengths
0
15.3k
self._max_gen_toks = max_gen_toks
self.batch_sizes = {}
self.model = deepsparse.TextGeneration(model_path=pretrained, sequence_length=self._max_length, batch_size=batch_size)
self.tokenizer = tokenizer if tokenizer else self.model.tokenizer
self.config = self.model.config
def tok_encode(self, string: str) -> List[int]:
return self.tokenizer.encode(string)
def tok_decode(self, tokens: List[int]) -> str:
return self.tokenizer.decode(tokens)
@property
def eot_token_id(self):
return self.tokenizer.eos_token_id
@property
def prefix_token_id(self):
if self.tokenizer.bos_token_id is not None:
return self.tokenizer.bos_token_id
return self.tokenizer.eos_token_id
@property
def max_length(self) -> int:
return self._max_length
@property
def max_gen_toks(self) -> int:
return self._max_gen_toks
def loglikelihood(self, requests) -> List[Tuple[float, bool]]:
new_reqs = []
for (context, continuation) in [req.args for req in requests]:
if context == '':
raise NotImplementedError('Implementing empty context is not supported yet')
(context_enc, continuation_enc) = self._encode_pair(context, continuation)
new_reqs.append(((context, continuation), context_enc, continuation_enc))
return self._loglikelihood_tokens(new_reqs)
def _loglikelihood_tokens(self, requests: List[Tuple[Tuple[str, str], List[int], List[int]]], disable_tqdm: bool=False) -> List[Tuple[float, bool]]:
res = []
def _collate(x):
toks = x[1] + x[2]
return (-len(toks), tuple(toks))
re_ord = utils.Reorderer(requests, _collate)
for chunk in tqdm(list(lm_eval.models.utils.chunks(re_ord.get_reordered(), self.batch_size)), disable=disable_tqdm):
batch_inp = []
batch_cache_key = []
batch_continuation_enc = []
for (cache_key, context_enc, continuation_enc) in chunk:
inp = (context_enc + continuation_enc)[-(self.max_length + 1):][:-1]
batch_inp.append(self.tokenizer.decode(inp))
batch_cache_key.append(cache_key)
batch_continuation_enc.append(continuation_enc)
response = self.model(prompt=batch_inp, max_new_tokens=0, output_scores=True, include_prompt_logits=True)
for (resp, continuation_enc, cache_key) in zip(response.generations, batch_continuation_enc, batch_cache_key):
multi_scores = resp.score
from deepsparse.utils.data import numpy_log_softmax
multi_logits = numpy_log_softmax(multi_scores, axis=1)
continuation_multi_logits = multi_logits[-len(continuation_enc):]
continuation_logits = continuation_multi_logits[numpy.arange(len(continuation_enc)), continuation_enc]
greedy_tokens = continuation_multi_logits.argmax(axis=1)
max_equal = greedy_tokens.tolist() == continuation_enc
answer = (float(continuation_logits.sum()), bool(max_equal))
res.append(answer)
if cache_key is not None:
self.cache_hook.add_partial('loglikelihood', cache_key, answer)
return re_ord.get_original(res)
def loglikelihood_rolling(self, requests: List[Instance]) -> List[float]:
raise NotImplementedError('The method not required by any of our current task integrations so far')
def generate_until(self, requests: List[Instance]) -> List[str]:
if not requests:
return []
res = []
requests = [req.args for req in requests]
def _collate(x):
toks = self.tok_encode(x[0])
return (len(toks), x[0])
re_ord = utils.Reorderer(requests, _collate)
def sameuntil_chunks(xs, size):
ret = []
lastuntil = xs[0][1]
for x in xs:
if len(ret) >= size or x[1] != lastuntil:
yield (ret, lastuntil)
ret = []
lastuntil = x[1]
ret.append(x)
if ret:
yield (ret, lastuntil)
pbar = tqdm(total=len(requests))
for (chunk, request_args) in tqdm(list(sameuntil_chunks(re_ord.get_reordered(), self.batch_size))):
inps = []
request_args = copy.deepcopy(request_args)
self._max_gen_toks = request_args.pop('max_gen_toks', self.max_gen_toks)