text
stringlengths 0
15.3k
|
---|
def forward_batch(batch_size): |
if self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM: |
length = max(max_context_enc, max_cont_enc) |
batched_conts = torch.ones((batch_size, length), device=self.device).long() |
test_batch = torch.ones((batch_size, length), device=self.device).long() |
call_kwargs = {'attn_mask': test_batch, 'labels': batched_conts} |
else: |
call_kwargs = {} |
test_batch = torch.ones((batch_size, max_length), device=self.device).long() |
for _ in range(5): |
out = F.log_softmax(self._model_call(test_batch, **call_kwargs), dim=-1) |
return batch_size |
try: |
batch_size = forward_batch() |
except RuntimeError as e: |
if 'No executable batch size found' in str(e): |
batch_size = 1 |
else: |
raise |
if self.world_size > 1: |
max_rnk_bs = torch.tensor([batch_size], device=self.device) |
gathered = self.accelerator.gather(max_rnk_bs).cpu().detach().numpy().tolist() |
batch_size = min(gathered) |
clear_torch_cache() |
return batch_size |
clear_torch_cache() |
return batch_size |
def tok_encode(self, string: str, left_truncate_len=None, add_special_tokens=None) -> List[int]: |
"""""" |
special_tokens_kwargs = {} |
if add_special_tokens is None: |
if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM: |
special_tokens_kwargs = {'add_special_tokens': False or self.add_bos_token} |
else: |
special_tokens_kwargs = {'add_special_tokens': add_special_tokens} |
encoding = self.tokenizer.encode(string, **special_tokens_kwargs) |
if left_truncate_len: |
encoding = encoding[-left_truncate_len:] |
return encoding |
def tok_batch_encode(self, strings: List[str], padding_side: str='left', left_truncate_len: int=None, truncation: bool=False) -> Tuple[torch.Tensor, torch.Tensor]: |
old_padding_side = self.tokenizer.padding_side |
self.tokenizer.padding_side = padding_side |
add_special_tokens = {} |
if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM: |
add_special_tokens = {'add_special_tokens': False or self.add_bos_token} |
encoding = self.tokenizer(strings, truncation=truncation, padding='longest', return_tensors='pt', **add_special_tokens) |
if left_truncate_len: |
encoding['input_ids'] = encoding['input_ids'][:, -left_truncate_len:] |
encoding['attention_mask'] = encoding['attention_mask'][:, -left_truncate_len:] |
self.tokenizer.padding_side = old_padding_side |
return (encoding['input_ids'], encoding['attention_mask']) |
def tok_decode(self, tokens, skip_special_tokens=True): |
return self.tokenizer.decode(tokens, skip_special_tokens=skip_special_tokens) |
def _model_call(self, inps, attn_mask=None, labels=None): |
with torch.no_grad(): |
if attn_mask is not None or labels is not None: |
assert attn_mask is not None and labels is not None |
assert self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM |
return self.model(input_ids=inps, attention_mask=attn_mask, labels=labels).logits |
else: |
assert self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM |
return self.model(inps).logits |
def _model_generate(self, context, max_length, stop, **generation_kwargs): |
generation_kwargs['temperature'] = generation_kwargs.get('temperature', 0.0) |
do_sample = generation_kwargs.get('do_sample', None) |
if generation_kwargs.get('temperature') == 0.0 and do_sample is None: |
generation_kwargs['do_sample'] = do_sample = False |
if do_sample is False and generation_kwargs.get('temperature') == 0.0: |
generation_kwargs.pop('temperature') |
stopping_criteria = stop_sequences_criteria(self.tokenizer, stop, context.shape[1], context.shape[0]) |
return self.model.generate(input_ids=context, max_length=max_length, stopping_criteria=stopping_criteria, pad_token_id=self.tokenizer.pad_token_id, use_cache=True, **generation_kwargs) |
def _select_cont_toks(self, logits: torch.Tensor, contlen: int=None, inplen: int=None) -> torch.Tensor: |
if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM: |
assert contlen and inplen, 'Must pass input len and cont. len to select scored logits for causal LM' |
logits = logits[inplen - contlen:inplen] |
elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM: |
assert contlen and (not inplen), 'Selecting scored logits for Seq2SeqLM requires only cont. len' |
logits = logits[:contlen] |
return logits |
def loglikelihood_rolling(self, requests: List[Instance], disable_tqdm: bool=False) -> List[float]: |
loglikelihoods = [] |
adaptive_batch_size = None |
if self.batch_size == 'auto': |
print('Passed argument batch_size = auto. Detecting largest batch size') |
batch_size = self._detect_batch_size() |
print(f'Determined Largest batch size: {batch_size}') |
adaptive_batch_size = batch_size |
for (string,) in tqdm([req.args for req in requests], disable=disable_tqdm or self.rank != 0): |
rolling_token_windows = list(map(utils.make_disjoint_window, utils.get_rolling_token_windows(token_list=self.tok_encode(string), prefix_token=self.prefix_token_id, max_seq_len=self.max_length, context_len=1))) |
rolling_token_windows = [(None,) + x for x in rolling_token_windows] |
pad_amnt = 0 |
if self.world_size > 1: |
mytensor = torch.tensor(len(rolling_token_windows), device=self.device) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.