text
stringlengths 0
15.3k
|
---|
add_special_tokens = False or self.add_bos_token |
encoding = self.tokenizer.encode(string, add_special_tokens=add_special_tokens) |
if left_truncate_len: |
encoding = encoding[-left_truncate_len:] |
return encoding |
def tok_batch_encode(self, strings: List[str], padding_side: str='left', left_truncate_len: int=None, truncation: bool=False): |
old_padding_side = self.tokenizer.padding_side |
self.tokenizer.padding_side = padding_side |
add_special_tokens = False or self.add_bos_token |
encoding = self.tokenizer(strings, truncation=truncation, padding='longest', return_tensors='pt', add_special_tokens=add_special_tokens) |
if left_truncate_len: |
encoding['input_ids'] = encoding['input_ids'][:, -left_truncate_len:] |
encoding['attention_mask'] = encoding['attention_mask'][:, -left_truncate_len:] |
self.tokenizer.padding_side = old_padding_side |
return (encoding['input_ids'], encoding['attention_mask']) |
def tok_decode(self, tokens): |
return self.tokenizer.decode(tokens) |
@wrap_constant_batch_size |
def _model_call(self, input_ids: torch.Tensor): |
(_, sequence_length) = input_ids.shape |
with torch.inference_mode(): |
cache_ids = torch.arange(0, sequence_length, dtype=torch.int32).split(1) |
input_ids_split = input_ids.split(1, dim=1) |
return torch.concat([self.model.forward(input_ids=input_id, cache_ids=cache_id, return_dict=False)[0] for (input_id, cache_id) in zip(input_ids_split, cache_ids)], dim=1) |
def _model_generate(self, context, max_length, stop, **generation_kwargs): |
with torch.inference_mode(): |
if 'do_sample' not in generation_kwargs.keys(): |
generation_kwargs['do_sample'] = False |
stopping_criteria = stop_sequences_criteria(self.tokenizer, stop + [self.tokenizer.decode([self.config.eos_token_id])], 1, context.shape[0]) |
return self.model.generate(input_ids=context, max_length=max_length, stopping_criteria=stopping_criteria, pad_token_id=self.eot_token_id, use_cache=True, **generation_kwargs) |
def _select_cont_toks(self, logits, contlen=None, inplen=None): |
assert contlen and inplen, 'Must pass input len and cont. len to select scored logits for causal LM' |
logits = logits[inplen - contlen:inplen] |
return logits |
def loglikelihood_rolling(self, requests, disable_tqdm: bool=False): |
loglikelihoods = [] |
adaptive_batch_size = None |
for (string,) in tqdm([req.args for req in requests], disable=disable_tqdm or self.rank != 0): |
rolling_token_windows = list(map(utils.make_disjoint_window, utils.get_rolling_token_windows(token_list=self.tok_encode(string), prefix_token=self.prefix_token_id, max_seq_len=self.max_length, context_len=1))) |
rolling_token_windows = [(None,) + x for x in rolling_token_windows] |
pad_amnt = 0 |
if self.world_size > 1: |
mytensor = torch.tensor(len(rolling_token_windows), device=self.device) |
gathered = self.accelerator.gather(mytensor).cpu().detach().numpy().tolist() |
pad_amnt = max(gathered) - gathered[self.rank] |
if pad_amnt > 0: |
rolling_token_windows += pad_amnt * [rolling_token_windows[0]] |
string_nll = self._loglikelihood_tokens(rolling_token_windows, disable_tqdm=True, override_bs=adaptive_batch_size) |
if self.world_size > 1 and pad_amnt > 0: |
string_nll = [x[0] for x in string_nll[:-pad_amnt]] |
else: |
string_nll = [x[0] for x in string_nll] |
string_nll = sum(string_nll) |
loglikelihoods.append(string_nll) |
return loglikelihoods |
def _loglikelihood_tokens(self, requests, disable_tqdm: bool=False, override_bs=None): |
res = [] |
def _collate(x): |
toks = x[1] + x[2] |
return (-len(toks), tuple(toks)) |
re_ord = utils.Reorderer(requests, _collate) |
n_reordered_requests = len(re_ord.get_reordered()) |
chunks = lm_eval.models.utils.chunks(re_ord.get_reordered(), n=self.batch_size, fn=None) |
for chunk in tqdm(chunks, disable=disable_tqdm or self.rank != 0): |
inps = [] |
cont_toks_list = [] |
inplens = [] |
conts = [] |
encoder_attns = [] |
padding_len_inp = None |
padding_len_cont = None |
for (_, context_enc, continuation_enc) in chunk: |
assert len(context_enc) > 0 |
assert len(continuation_enc) > 0 |
assert len(continuation_enc) <= self.max_length |
inp = torch.tensor((context_enc + continuation_enc)[-(self.max_length + 1):][:-1], dtype=torch.long, device=self.device) |
(inplen,) = inp.shape |
padding_len_inp = max(padding_len_inp, inplen) if padding_len_inp is not None else inplen |
inps.append(inp) |
cont_toks_list.append(continuation_enc) |
inplens.append(inplen) |
call_kwargs = {} |
batched_inps = lm_eval.models.utils.pad_and_concat(padding_len_inp, inps, padding_side='right') |
multi_logits = F.log_softmax(self._model_call(batched_inps, **call_kwargs), dim=-1) |
for ((cache_key, _, _), logits, inplen, cont_toks) in zip(chunk, multi_logits, inplens, cont_toks_list): |
contlen = len(cont_toks) |
ctx_len = inplen + (logits.shape[0] - padding_len_inp) |
logits = self._select_cont_toks(logits, contlen=contlen, inplen=ctx_len) |
logits = logits.unsqueeze(0) |
greedy_tokens = logits.argmax(dim=-1) |
cont_toks = torch.tensor(cont_toks, dtype=torch.long, device=self.device).unsqueeze(0) |
max_equal = (greedy_tokens == cont_toks).all() |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.