text
stringlengths
0
15.3k
gathered = self.accelerator.gather(mytensor).cpu().detach().numpy().tolist()
pad_amnt = max(gathered) - gathered[self.rank]
if pad_amnt > 0:
rolling_token_windows += pad_amnt * [rolling_token_windows[0]]
string_nll = self._loglikelihood_tokens(requests=rolling_token_windows, disable_tqdm=True, override_bs=adaptive_batch_size)
if self.world_size > 1 and pad_amnt > 0:
string_nll = [x[0] for x in string_nll[:-pad_amnt]]
else:
string_nll = [x[0] for x in string_nll]
string_nll = sum(string_nll)
loglikelihoods.append(string_nll)
return loglikelihoods
def _batch_scheduler(self, pos, n_reordered_requests):
sched = pos // int(len(n_reordered_requests) / self.batch_schedule)
if sched in self.batch_sizes:
return self.batch_sizes[sched]
if len(self.batch_sizes) > 1 and self.batch_sizes[sched - 1] == self.max_batch_size:
self.batch_sizes[sched] = self.max_batch_size
return self.batch_sizes[sched]
print(f'Passed argument batch_size = auto:{self.batch_schedule}. Detecting largest batch size')
self.batch_sizes[sched] = self._detect_batch_size(n_reordered_requests, pos)
print(f'Determined largest batch size: {self.batch_sizes[sched]}')
return self.batch_sizes[sched]
def _loglikelihood_tokens(self, requests: List[Tuple[Tuple[str, str], List[int], List[int]]], disable_tqdm: bool=False, override_bs: int=None) -> List[Tuple[float, bool]]:
res = []
def _collate(req: Tuple[Tuple[str, str], List[int], List[int]]):
toks = req[1] + req[2]
return (-len(toks), tuple(toks))
def _lookup_one_token_cont(req: Tuple[Tuple[str, str], List[int], List[int]]):
return req[-2] + req[-1][:-1]
re_ord = Collator(requests, sort_fn=_collate, group_by='contexts' if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM and self.logits_cache else None, group_fn=_lookup_one_token_cont)
n_reordered_requests = len(re_ord)
batch_size = self.batch_size if self.batch_size != 'auto' else override_bs if override_bs is not None else 0
batch_fn = self._batch_scheduler if self.batch_size == 'auto' and n_reordered_requests > 0 and (not override_bs) else None
chunks = re_ord.get_batched(n=batch_size, batch_fn=batch_fn)
pbar = tqdm(total=len(requests), disable=disable_tqdm or self.rank != 0, desc='Running loglikelihood requests')
for chunk in chunks:
inps = []
cont_toks_list = []
inplens = []
conts = []
encoder_attns = []
padding_len_inp = None
padding_len_cont = None
for (_, context_enc, continuation_enc) in chunk:
assert len(context_enc) > 0
assert len(continuation_enc) > 0
assert len(continuation_enc) <= self.max_length
if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
inp = torch.tensor((context_enc + continuation_enc)[-(self.max_length + 1):][:-1], dtype=torch.long, device=self.device)
(inplen,) = inp.shape
elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM:
inp = torch.tensor(context_enc[-self.max_length:], dtype=torch.long, device=self.device)
(inplen,) = inp.shape
encoder_attns.append(torch.ones_like(inp))
cont = torch.tensor(continuation_enc[-self.max_length:], dtype=torch.long, device=self.device)
(contlen,) = cont.shape
conts.append(cont)
padding_len_cont = max(padding_len_cont, contlen) if padding_len_cont is not None else contlen
padding_len_inp = max(padding_len_inp, inplen) if padding_len_inp is not None else inplen
inps.append(inp)
cont_toks_list.append(continuation_enc)
inplens.append(inplen)
call_kwargs = {}
if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
batched_inps = pad_and_concat(padding_len_inp, inps, padding_side='right')
elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM:
batched_inps = pad_and_concat(padding_len_inp, inps)
batched_conts = pad_and_concat(padding_len_cont, conts)
batched_encoder_mask = pad_and_concat(padding_len_inp, encoder_attns)
call_kwargs = {'attn_mask': batched_encoder_mask, 'labels': batched_conts}
multi_logits = F.log_softmax(self._model_call(batched_inps, **call_kwargs), dim=-1)
for ((request_str, ctx_tokens, _), logits, inplen, cont_toks) in zip(chunk, multi_logits, inplens, cont_toks_list):
contlen = len(cont_toks)
ctx_len = inplen + (logits.shape[0] - padding_len_inp) if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM else None
logits = self._select_cont_toks(logits, contlen=contlen, inplen=ctx_len)
logits = logits.unsqueeze(0)
greedy_tokens = logits.argmax(dim=-1)
for (request_str, cont_toks, logits) in re_ord.get_cache(req_str=request_str, cxt_toks=ctx_tokens, cont_toks=cont_toks, logits=logits):
cont_toks = torch.tensor(cont_toks, dtype=torch.long, device=self.device).unsqueeze(0)
max_equal = (greedy_tokens == cont_toks).all()
logits = torch.gather(logits, 2, cont_toks.unsqueeze(-1)).squeeze(-1)
answer = (float(logits.sum()), bool(max_equal))
res.append(answer)
self.cache_hook.add_partial('loglikelihood', request_str, answer)
pbar.update(1)
pbar.close()
return re_ord.get_original(res)
def generate_until(self, requests: List[Instance], disable_tqdm: bool=False) -> List[str]:
res = []
def _collate(req: Tuple[str, dict]):
toks = self.tok_encode(req[0])
return (-len(toks), req[0])
pbar = tqdm(total=len(requests), disable=disable_tqdm or self.rank != 0, desc='Running generate_until requests')