text
stringlengths 0
15.3k
|
---|
return torch.cat(gathered_tensors) |
def tok_encode(self, string: str): |
return self.tokenizer.text_to_ids(string) |
def tok_decode(self, tokens): |
return self.tokenizer.ids_to_text(tokens) |
def _encode_pair(self, context, continuation): |
n_spaces = len(context) - len(context.rstrip()) |
if n_spaces > 0: |
continuation = context[-n_spaces:] + continuation |
context = context[:-n_spaces] |
whole_enc = self.tok_encode(context + continuation) |
context_enc = self.tok_encode(context) |
context_enc_len = len(context_enc) |
continuation_enc = whole_enc[context_enc_len:] |
return (context_enc, continuation_enc) |
def loglikelihood(self, requests): |
new_reqs = [] |
for (context, continuation) in [req.args for req in requests]: |
if context == '': |
(context_enc, continuation_enc) = ([self.eot_token_id], self.tok_encode(continuation)) |
else: |
(context_enc, continuation_enc) = self._encode_pair(context, continuation) |
new_reqs.append(((context, continuation), context_enc, continuation_enc)) |
return self._loglikelihood_tokens(new_reqs) |
def loglikelihood_rolling(self, requests: List[Instance], disable_tqdm: bool=False) -> List[float]: |
loglikelihoods = [] |
for (string,) in tqdm([req.args for req in requests], disable=disable_tqdm): |
rolling_token_windows = list(map(make_disjoint_window, get_rolling_token_windows(token_list=self.tok_encode(string), prefix_token=self.eot_token_id, max_seq_len=self.max_length - 1, context_len=1))) |
rolling_token_windows = [(None,) + x for x in rolling_token_windows] |
string_nll = self._loglikelihood_tokens(rolling_token_windows) |
string_nll = [x[0] for x in string_nll] |
string_nll = sum(string_nll) |
loglikelihoods.append(string_nll) |
return loglikelihoods |
def _loglikelihood_tokens(self, requests, disable_tqdm=False): |
res = [] |
def _collate(x): |
toks = x[1] + x[2] |
return (-len(toks), tuple(toks)) |
re_ord = Collator(requests, sort_fn=_collate) |
chunks = re_ord.get_batched(n=self.batch_size, batch_fn=None) |
pbar = tqdm(total=len(requests), disable=disable_tqdm or self.rank != 0, desc='Running loglikelihood requests') |
for chunk in chunks: |
inps = [] |
ctxlens = [] |
contlens = [] |
for (_, context_enc, continuation_enc) in chunk: |
inp = (context_enc + continuation_enc)[-(self.max_length - 1):] |
ctxlen = len(context_enc) - max(0, len(context_enc) + len(continuation_enc) - (self.max_length - 1)) |
ctxlens.append(ctxlen) |
contlens.append(len(continuation_enc)) |
inps.append(self.tok_decode(inp)) |
output = self.generate(self.model, inputs=inps, tokens_to_generate=1, min_tokens_to_generate=1, compute_logprob=True, all_probs=True) |
batch_token_ids = np.asarray(output['token_ids'])[:, :-1] |
batch_logprobs = output['logprob'][:, :-1] |
batch_full_logprob = output['full_logprob'][:, :-1, :] |
min_ctxlen = min(ctxlens) |
batch_greedy_tokens = torch.argmax(batch_full_logprob[:, min_ctxlen - 1:, :], -1).cpu().numpy() |
for (token_ids, greedy_tokens, logprobs, ctxlen, contlen, (cache_key, _, _)) in zip(batch_token_ids, batch_greedy_tokens, batch_logprobs, ctxlens, contlens, chunk): |
logprobs = logprobs[ctxlen - 1:][:contlen] |
logprob = sum(logprobs).tolist() |
continuation_tokens = token_ids[ctxlen:][:contlen] |
len_diff = ctxlen - min_ctxlen |
is_greedy = continuation_tokens == greedy_tokens[len_diff:][:contlen] |
if not isinstance(is_greedy, bool): |
is_greedy = is_greedy.all() |
answer = (logprob, is_greedy) |
if cache_key is not None: |
self.cache_hook.add_partial('loglikelihood', cache_key, answer) |
res.append(answer) |
pbar.update(1) |
pbar.close() |
return re_ord.get_original(res) |
def generate_until(self, requests): |
if not requests: |
return [] |
res = [] |
def get_until(req_args): |
until = req_args.get('until', []) |
until = deepcopy(until) |
if self.tokenizer.ids_to_tokens([self.eot_token_id])[0] not in until: |
until.append(self.tokenizer.ids_to_tokens([self.eot_token_id])[0]) |
return until |
def _collate(x): |
toks = self.tok_encode(x[0]) |
return (len(toks), x[0]) |
re_ords = Collator([reg.args for reg in requests], sort_fn=_collate, group_by='gen_kwargs') |
chunks = re_ords.get_batched(n=self.batch_size, batch_fn=None) |
for chunk in chunks: |
(contexts, all_gen_kwargs) = zip(*chunk) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.