|
|
|
import logging |
|
|
|
import torch |
|
import torch.nn.functional as F |
|
from torch import Tensor, nn |
|
|
|
from transformers.configuration_bart import BartConfig |
|
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_callable |
|
from relogic.pretrainkit.models.semparse.modeling_bart_copy import (PretrainedBartModel, BartModel, |
|
BART_INPUTS_DOCSTRING, BART_START_DOCSTRING, BART_GENERATION_EXAMPLE, _reorder_buffer, _make_linear_from_emb, fill_tensor) |
|
|
|
from relogic.logickit.dataflow.semtransparse.grammar.keywords import SKETCH_KEYWORDS |
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
@add_start_docstrings( |
|
"The BART Model with a language modeling head. Can be used for summarization.", |
|
BART_START_DOCSTRING + BART_GENERATION_EXAMPLE, |
|
) |
|
class BartForTextToSQL(PretrainedBartModel): |
|
base_model_prefix = "model" |
|
|
|
def __init__(self, config: BartConfig): |
|
super().__init__(config) |
|
base_model = BartModel(config) |
|
self.model = base_model |
|
|
|
|
|
|
|
|
|
|
|
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding: |
|
old_num_tokens = self.model.shared.num_embeddings |
|
new_embeddings = super().resize_token_embeddings(new_num_tokens) |
|
self.model.shared = new_embeddings |
|
self._resize_final_logits_bias(new_num_tokens, old_num_tokens) |
|
return new_embeddings |
|
|
|
def _resize_final_logits_bias(self, new_num_tokens: int, old_num_tokens: int) -> None: |
|
if new_num_tokens <= old_num_tokens: |
|
new_bias = self.final_logits_bias[:, :new_num_tokens] |
|
else: |
|
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) |
|
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) |
|
self.register_buffer("final_logits_bias", new_bias) |
|
|
|
@add_start_docstrings_to_callable(BART_INPUTS_DOCSTRING) |
|
def forward( |
|
self, |
|
input_ids, |
|
column_spans, |
|
copy_span=None, |
|
attention_mask=None, |
|
encoder_outputs=None, |
|
decoder_input_ids=None, |
|
decoder_attention_mask=None, |
|
decoder_cached_states=None, |
|
lm_labels=None, |
|
use_cache=False, |
|
**unused |
|
): |
|
r""" |
|
masked_lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): |
|
Labels for computing the masked language modeling loss. |
|
Indices should either be in ``[0, ..., config.vocab_size]`` or -100 (see ``input_ids`` docstring). |
|
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens |
|
with labels |
|
in ``[0, ..., config.vocab_size]``. |
|
|
|
Returns: |
|
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs: |
|
masked_lm_loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: |
|
Masked language modeling loss. |
|
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`) |
|
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). |
|
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): |
|
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) |
|
of shape :obj:`(batch_size, sequence_length, hidden_size)`. |
|
|
|
Hidden-states of the model at the output of each layer plus the initial embedding outputs. |
|
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): |
|
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape |
|
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`. |
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention |
|
heads. |
|
|
|
Examples:: |
|
|
|
# Mask filling only works for bart-large |
|
from transformers import BartTokenizer, BartForConditionalGeneration |
|
tokenizer = BartTokenizer.from_pretrained('bart-large') |
|
TXT = "My friends are <mask> but they eat too many carbs." |
|
model = BartForConditionalGeneration.from_pretrained('bart-large') |
|
input_ids = tokenizer.batch_encode_plus([TXT], return_tensors='pt')['input_ids'] |
|
logits = model(input_ids)[0] |
|
masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() |
|
probs = logits[0, masked_index].softmax(dim=0) |
|
values, predictions = probs.topk(5) |
|
tokenizer.decode(predictions).split() |
|
# ['good', 'great', 'all', 'really', 'very'] |
|
""" |
|
outputs = self.model( |
|
input_ids=input_ids, |
|
column_spans=column_spans, |
|
copy_span=copy_span, |
|
attention_mask=attention_mask, |
|
decoder_input_ids=decoder_input_ids, |
|
encoder_outputs=encoder_outputs, |
|
decoder_attention_mask=decoder_attention_mask, |
|
decoder_cached_states=decoder_cached_states, |
|
use_cache=use_cache, |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
batch_size = outputs[0].size(0) |
|
|
|
|
|
|
|
|
|
weight = outputs[-1] |
|
|
|
|
|
lm_logits = torch.bmm(outputs[0], weight.transpose(-1, -2)) |
|
|
|
generated_ids = lm_logits.argmax(-1) |
|
|
|
outputs = (lm_logits,) + outputs[1:] + (generated_ids, ) |
|
if lm_labels is not None: |
|
loss_fct = nn.CrossEntropyLoss(reduction="sum") |
|
|
|
masked_lm_loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), lm_labels.view(-1)) / batch_size |
|
outputs = (masked_lm_loss,) + outputs |
|
|
|
return outputs |
|
|
|
def prepare_inputs_for_generation( |
|
self, decoder_input_ids, column_spans, copy_span, past, attention_mask, use_cache, **kwargs): |
|
assert past is not None, "past has to be defined for encoder_outputs" |
|
|
|
|
|
if not past[1]: |
|
encoder_outputs, decoder_cached_states = past, None |
|
else: |
|
encoder_outputs, decoder_cached_states = past |
|
return { |
|
"input_ids": None, |
|
"copy_span": copy_span, |
|
"column_spans": column_spans, |
|
"encoder_outputs": encoder_outputs, |
|
"decoder_cached_states": decoder_cached_states, |
|
"decoder_input_ids": decoder_input_ids, |
|
"attention_mask": attention_mask, |
|
"use_cache": use_cache, |
|
} |
|
|
|
def prepare_logits_for_generation(self, logits, cur_len, max_length): |
|
|
|
|
|
if cur_len == max_length - 1 and self.config.eos_token_id is not None: |
|
self._force_token_ids_generation(logits, self.config.eos_token_id) |
|
return logits |
|
|
|
def _force_token_ids_generation(self, scores, token_ids) -> None: |
|
"""force one of token_ids to be generated by setting prob of all other tokens to 0""" |
|
if isinstance(token_ids, int): |
|
token_ids = [token_ids] |
|
vocab_size = scores.size(-1) |
|
all_but_token_ids_mask = torch.tensor( |
|
[x for x in range(vocab_size) if x not in token_ids], |
|
dtype=torch.long, |
|
device=next(self.parameters()).device, |
|
) |
|
assert len(scores.shape) == 2, "scores should be of rank 2 with shape: [batch_size, vocab_size]" |
|
scores[:, all_but_token_ids_mask] = -float("inf") |
|
|
|
@staticmethod |
|
def _reorder_cache(past, beam_idx): |
|
((enc_out, enc_mask), decoder_cached_states) = past |
|
reordered_past = [] |
|
for layer_past in decoder_cached_states: |
|
|
|
layer_past_new = { |
|
attn_key: _reorder_buffer(attn_cache, beam_idx) for attn_key, attn_cache in layer_past.items() |
|
} |
|
reordered_past.append(layer_past_new) |
|
|
|
new_enc_out = enc_out if enc_out is None else enc_out.index_select(0, beam_idx) |
|
new_enc_mask = enc_mask if enc_mask is None else enc_mask.index_select(0, beam_idx) |
|
|
|
past = ((new_enc_out, new_enc_mask), reordered_past) |
|
return past |
|
|
|
def get_encoder(self): |
|
return self.model.encoder |
|
|
|
def get_output_embeddings(self): |
|
return _make_linear_from_emb(self.model.shared) |
|
|
|
@torch.no_grad() |
|
def generate( |
|
self, |
|
input_ids=None, |
|
column_spans=None, |
|
copy_span=None, |
|
max_length=None, |
|
min_length=None, |
|
do_sample=None, |
|
early_stopping=None, |
|
num_beams=None, |
|
temperature=None, |
|
top_k=None, |
|
top_p=None, |
|
repetition_penalty=None, |
|
bad_words_ids=None, |
|
bos_token_id=None, |
|
pad_token_id=None, |
|
eos_token_id=None, |
|
length_penalty=None, |
|
no_repeat_ngram_size=None, |
|
num_return_sequences=None, |
|
attention_mask=None, |
|
decoder_start_token_id=None, |
|
use_cache=None, |
|
vocab_size=None, |
|
**model_specific_kwargs |
|
): |
|
r""" Generates sequences for models with a LM head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling. |
|
|
|
Adapted in part from `Facebook's XLM beam search code`_. |
|
|
|
.. _`Facebook's XLM beam search code`: |
|
https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529 |
|
|
|
|
|
Parameters: |
|
|
|
input_ids: (`optional`) `torch.LongTensor` of shape `(batch_size, sequence_length)` |
|
The sequence used as a prompt for the generation. If `None` the method initializes |
|
it as an empty `torch.LongTensor` of shape `(1,)`. |
|
|
|
max_length: (`optional`) int |
|
The max length of the sequence to be generated. Between `min_length` and infinity. Default to 20. |
|
|
|
min_length: (`optional`) int |
|
The min length of the sequence to be generated. Between 0 and infinity. Default to 0. |
|
|
|
do_sample: (`optional`) bool |
|
If set to `False` greedy decoding is used. Otherwise sampling is used. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`. |
|
|
|
early_stopping: (`optional`) bool |
|
if set to `True` beam search is stopped when at least `num_beams` sentences finished per batch. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`. |
|
|
|
num_beams: (`optional`) int |
|
Number of beams for beam search. Must be between 1 and infinity. 1 means no beam search. Default to 1. |
|
|
|
temperature: (`optional`) float |
|
The value used to module the next token probabilities. Must be strictly positive. Default to 1.0. |
|
|
|
top_k: (`optional`) int |
|
The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50. |
|
|
|
top_p: (`optional`) float |
|
The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1. |
|
|
|
repetition_penalty: (`optional`) float |
|
The parameter for repetition penalty. Between 1.0 and infinity. 1.0 means no penalty. Default to 1.0. |
|
|
|
pad_token_id: (`optional`) int |
|
Padding token. Default to specicic model pad_token_id or None if it does not exist. |
|
|
|
bos_token_id: (`optional`) int |
|
BOS token. Defaults to `bos_token_id` as defined in the models config. |
|
|
|
eos_token_id: (`optional`) int |
|
EOS token. Defaults to `eos_token_id` as defined in the models config. |
|
|
|
length_penalty: (`optional`) float |
|
Exponential penalty to the length. Default to 1. |
|
|
|
no_repeat_ngram_size: (`optional`) int |
|
If set to int > 0, all ngrams of size `no_repeat_ngram_size` can only occur once. |
|
bad_words_ids: (`optional`) list of lists of int |
|
`bad_words_ids` contains tokens that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use `tokenizer.encode(bad_word, add_prefix_space=True)`. |
|
|
|
num_return_sequences: (`optional`) int |
|
The number of independently computed returned sequences for each element in the batch. Default to 1. |
|
|
|
attention_mask (`optional`) obj: `torch.LongTensor` of same shape as `input_ids` |
|
Mask to avoid performing attention on padding token indices. |
|
Mask values selected in ``[0, 1]``: |
|
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. |
|
Defaults to `None`. |
|
|
|
`What are attention masks? <../glossary.html#attention-mask>`__ |
|
|
|
decoder_start_token_id=None: (`optional`) int |
|
If an encoder-decoder model starts decoding with a different token than BOS. |
|
Defaults to `None` and is changed to `BOS` later. |
|
|
|
use_cache: (`optional`) bool |
|
If `use_cache` is True, past key values are used to speed up decoding if applicable to model. Defaults to `True`. |
|
|
|
model_specific_kwargs: (`optional`) dict |
|
Additional model specific kwargs will be forwarded to the `forward` function of the model. |
|
|
|
Return: |
|
|
|
output: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)` |
|
sequence_length is either equal to max_length or shorter if all batches finished early due to the `eos_token_id` |
|
|
|
Examples:: |
|
|
|
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer |
|
model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache. |
|
outputs = model.generate(max_length=40) # do greedy decoding |
|
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True))) |
|
|
|
tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer |
|
model = AutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache. |
|
input_context = 'The dog' |
|
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context |
|
outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog' |
|
for i in range(3): # 3 output sequences were generated |
|
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True))) |
|
|
|
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer |
|
model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache. |
|
input_context = 'The dog' |
|
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context |
|
outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3) # 3 generate sequences using by sampling |
|
for i in range(3): # 3 output sequences were generated |
|
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True))) |
|
|
|
tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer |
|
model = AutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache. |
|
input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl |
|
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context |
|
outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences |
|
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True))) |
|
|
|
tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer |
|
model = AutoModelWithLMHead.from_pretrained('gpt2') # Download model and configuration from S3 and cache. |
|
input_context = 'My cute dog' # "Legal" is one of the control codes for ctrl |
|
bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']] |
|
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context |
|
outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated |
|
""" |
|
|
|
|
|
if self.get_output_embeddings() is None: |
|
raise AttributeError( |
|
"You tried to generate sequences with a model that does not have a LM Head." |
|
"Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`, `XLMWithLMHeadModel`, `BartForConditionalGeneration` )" |
|
) |
|
|
|
max_length = max_length if max_length is not None else self.config.max_length |
|
min_length = min_length if min_length is not None else self.config.min_length |
|
do_sample = do_sample if do_sample is not None else self.config.do_sample |
|
early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping |
|
use_cache = use_cache if use_cache is not None else self.config.use_cache |
|
num_beams = num_beams if num_beams is not None else self.config.num_beams |
|
temperature = temperature if temperature is not None else self.config.temperature |
|
top_k = top_k if top_k is not None else self.config.top_k |
|
top_p = top_p if top_p is not None else self.config.top_p |
|
repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty |
|
bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id |
|
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id |
|
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id |
|
length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty |
|
no_repeat_ngram_size = ( |
|
no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size |
|
) |
|
bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids |
|
num_return_sequences = ( |
|
num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences |
|
) |
|
decoder_start_token_id = ( |
|
decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id |
|
) |
|
|
|
if input_ids is not None: |
|
batch_size = input_ids.shape[0] |
|
else: |
|
batch_size = 1 |
|
|
|
assert isinstance(max_length, int) and max_length > 0, "`max_length` should be a strictly positive integer." |
|
assert isinstance(min_length, int) and min_length >= 0, "`min_length` should be a positive integer." |
|
assert isinstance(do_sample, bool), "`do_sample` should be a boolean." |
|
assert isinstance(early_stopping, bool), "`early_stopping` should be a boolean." |
|
assert isinstance(use_cache, bool), "`use_cache` should be a boolean." |
|
assert isinstance(num_beams, int) and num_beams > 0, "`num_beams` should be a strictly positive integer." |
|
assert temperature > 0, "`temperature` should be strictly positive." |
|
assert isinstance(top_k, int) and top_k >= 0, "`top_k` should be a positive integer." |
|
assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1." |
|
assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1." |
|
assert input_ids is not None or ( |
|
isinstance(bos_token_id, int) and bos_token_id >= 0 |
|
), "If input_ids is not defined, `bos_token_id` should be a positive integer." |
|
assert pad_token_id is None or ( |
|
isinstance(pad_token_id, int) and (pad_token_id >= 0) |
|
), "`pad_token_id` should be a positive integer." |
|
assert (eos_token_id is None) or ( |
|
isinstance(eos_token_id, int) and (eos_token_id >= 0) |
|
), "`eos_token_id` should be a positive integer." |
|
assert length_penalty > 0, "`length_penalty` should be strictly positive." |
|
assert ( |
|
isinstance(no_repeat_ngram_size, int) and no_repeat_ngram_size >= 0 |
|
), "`no_repeat_ngram_size` should be a positive integer." |
|
assert ( |
|
isinstance(num_return_sequences, int) and num_return_sequences > 0 |
|
), "`num_return_sequences` should be a strictly positive integer." |
|
assert ( |
|
bad_words_ids is None or isinstance(bad_words_ids, list) and isinstance(bad_words_ids[0], list) |
|
), "`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated" |
|
|
|
if input_ids is None: |
|
assert isinstance(bos_token_id, int) and bos_token_id >= 0, ( |
|
"you should either supply a context to complete as `input_ids` input " |
|
"or a `bos_token_id` (integer >= 0) as a first token to start the generation." |
|
) |
|
input_ids = torch.full( |
|
(batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device, |
|
) |
|
else: |
|
assert input_ids.dim() == 2, "Input prompt should be of shape (batch_size, sequence length)." |
|
|
|
|
|
if do_sample is False: |
|
if num_beams == 1: |
|
|
|
assert ( |
|
num_return_sequences == 1 |
|
), "Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1" |
|
|
|
else: |
|
|
|
assert ( |
|
num_beams >= num_return_sequences |
|
), "Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences" |
|
|
|
|
|
|
|
if (attention_mask is None) and (pad_token_id is not None) and (pad_token_id in input_ids): |
|
attention_mask = input_ids.ne(pad_token_id).long() |
|
elif attention_mask is None: |
|
attention_mask = input_ids.new_ones(input_ids.shape) |
|
|
|
|
|
|
|
if pad_token_id is None and eos_token_id is not None: |
|
logger.warning( |
|
"Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence".format(eos_token_id) |
|
) |
|
pad_token_id = eos_token_id |
|
|
|
|
|
if vocab_size is None: |
|
if hasattr(self.config, "vocab_size"): |
|
vocab_size = self.config.vocab_size |
|
elif ( |
|
self.config.is_encoder_decoder |
|
and hasattr(self.config, "decoder") |
|
and hasattr(self.config.decoder, "vocab_size") |
|
): |
|
vocab_size = self.config.decoder.vocab_size |
|
|
|
|
|
|
|
if do_sample: |
|
effective_batch_size = batch_size * num_return_sequences |
|
effective_batch_mult = num_return_sequences |
|
else: |
|
effective_batch_size = batch_size |
|
effective_batch_mult = 1 |
|
|
|
if self.config.is_encoder_decoder: |
|
if decoder_start_token_id is None: |
|
decoder_start_token_id = bos_token_id |
|
|
|
assert ( |
|
decoder_start_token_id is not None |
|
), "decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation" |
|
assert hasattr(self, "get_encoder"), "{} should have a 'get_encoder' function defined".format(self) |
|
assert callable(self.get_encoder), "{} should be a method".format(self.get_encoder) |
|
|
|
|
|
encoder = self.get_encoder() |
|
|
|
encoder_outputs: tuple = encoder(input_ids, attention_mask=attention_mask) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if num_return_sequences > 1 or num_beams > 1: |
|
input_ids_len = input_ids.shape[-1] |
|
input_ids = input_ids.unsqueeze(1).expand(batch_size, effective_batch_mult * num_beams, input_ids_len) |
|
attention_mask = attention_mask.unsqueeze(1).expand( |
|
batch_size, effective_batch_mult * num_beams, input_ids_len |
|
) |
|
|
|
input_ids = input_ids.contiguous().view( |
|
effective_batch_size * num_beams, input_ids_len |
|
) |
|
attention_mask = attention_mask.contiguous().view( |
|
effective_batch_size * num_beams, input_ids_len |
|
) |
|
|
|
if self.config.is_encoder_decoder: |
|
|
|
input_ids = torch.full( |
|
(effective_batch_size * num_beams, 1), |
|
decoder_start_token_id, |
|
dtype=torch.long, |
|
device=next(self.parameters()).device, |
|
) |
|
cur_len = 1 |
|
|
|
assert ( |
|
batch_size == encoder_outputs[0].shape[0] |
|
), f"expected encoder_outputs[0] to have 1st dimension bs={batch_size}, got {encoder_outputs[0].shape[0]} " |
|
|
|
|
|
expanded_batch_idxs = ( |
|
torch.arange(batch_size) |
|
.view(-1, 1) |
|
.repeat(1, num_beams * effective_batch_mult) |
|
.view(-1) |
|
.to(input_ids.device) |
|
) |
|
|
|
encoder_outputs = (encoder_outputs[0].index_select(0, expanded_batch_idxs), *encoder_outputs[1:]) |
|
|
|
else: |
|
encoder_outputs = None |
|
cur_len = input_ids.shape[-1] |
|
|
|
if num_beams > 1: |
|
output = self._generate_beam_search( |
|
input_ids, |
|
column_spans=column_spans, |
|
copy_span=copy_span, |
|
cur_len=cur_len, |
|
max_length=max_length, |
|
min_length=min_length, |
|
do_sample=do_sample, |
|
early_stopping=early_stopping, |
|
temperature=temperature, |
|
top_k=top_k, |
|
top_p=top_p, |
|
repetition_penalty=repetition_penalty, |
|
no_repeat_ngram_size=no_repeat_ngram_size, |
|
bad_words_ids=bad_words_ids, |
|
bos_token_id=bos_token_id, |
|
pad_token_id=pad_token_id, |
|
decoder_start_token_id=decoder_start_token_id, |
|
eos_token_id=eos_token_id, |
|
batch_size=effective_batch_size, |
|
num_return_sequences=num_return_sequences, |
|
length_penalty=length_penalty, |
|
num_beams=num_beams, |
|
vocab_size=vocab_size, |
|
encoder_outputs=encoder_outputs, |
|
attention_mask=attention_mask, |
|
use_cache=use_cache, |
|
model_specific_kwargs=model_specific_kwargs, |
|
) |
|
else: |
|
output = self._generate_no_beam_search( |
|
input_ids, |
|
column_spans=column_spans, |
|
copy_span=copy_span, |
|
cur_len=cur_len, |
|
max_length=max_length, |
|
min_length=min_length, |
|
do_sample=do_sample, |
|
temperature=temperature, |
|
top_k=top_k, |
|
top_p=top_p, |
|
repetition_penalty=repetition_penalty, |
|
no_repeat_ngram_size=no_repeat_ngram_size, |
|
bad_words_ids=bad_words_ids, |
|
bos_token_id=bos_token_id, |
|
pad_token_id=pad_token_id, |
|
decoder_start_token_id=decoder_start_token_id, |
|
eos_token_id=eos_token_id, |
|
batch_size=effective_batch_size, |
|
encoder_outputs=encoder_outputs, |
|
attention_mask=attention_mask, |
|
use_cache=use_cache, |
|
model_specific_kwargs=model_specific_kwargs, |
|
) |
|
|
|
return output |
|
|
|
def _generate_no_beam_search( |
|
self, |
|
input_ids, |
|
column_spans, |
|
copy_span, |
|
cur_len, |
|
max_length, |
|
min_length, |
|
do_sample, |
|
temperature, |
|
top_k, |
|
top_p, |
|
repetition_penalty, |
|
no_repeat_ngram_size, |
|
bad_words_ids, |
|
bos_token_id, |
|
pad_token_id, |
|
eos_token_id, |
|
decoder_start_token_id, |
|
batch_size, |
|
encoder_outputs, |
|
attention_mask, |
|
use_cache, |
|
model_specific_kwargs, |
|
): |
|
""" Generate sequences for each example without beam search (num_beams == 1). |
|
All returned sequence are generated independantly. |
|
""" |
|
|
|
unfinished_sents = input_ids.new(batch_size).fill_(1) |
|
sent_lengths = input_ids.new(batch_size).fill_(max_length) |
|
|
|
past = encoder_outputs |
|
logits_history = [] |
|
while cur_len < max_length: |
|
model_inputs = self.prepare_inputs_for_generation( |
|
input_ids, column_spans=column_spans, copy_span=copy_span, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_specific_kwargs |
|
) |
|
|
|
outputs = self(**model_inputs) |
|
next_token_logits = outputs[0][:, -1, :] |
|
|
|
if self._use_cache(outputs, use_cache): |
|
past = outputs[1] |
|
|
|
|
|
if repetition_penalty != 1.0: |
|
self.enforce_repetition_penalty_(next_token_logits, batch_size, 1, input_ids, repetition_penalty) |
|
|
|
if no_repeat_ngram_size > 0: |
|
|
|
|
|
banned_tokens = calc_banned_ngram_tokens(input_ids, batch_size, no_repeat_ngram_size, cur_len) |
|
for batch_idx in range(batch_size): |
|
next_token_logits[batch_idx, banned_tokens[batch_idx]] = -float("inf") |
|
|
|
if bad_words_ids is not None: |
|
|
|
banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids) |
|
|
|
for batch_idx in range(batch_size): |
|
next_token_logits[batch_idx, banned_tokens[batch_idx]] = -float("inf") |
|
|
|
|
|
if eos_token_id is not None and cur_len < min_length: |
|
next_token_logits[:, eos_token_id] = -float("inf") |
|
|
|
if do_sample: |
|
|
|
if temperature != 1.0: |
|
next_token_logits = next_token_logits / temperature |
|
|
|
next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p) |
|
|
|
probs = F.softmax(next_token_logits, dim=-1) |
|
next_token = torch.multinomial(probs, num_samples=1).squeeze(1) |
|
else: |
|
|
|
next_token = torch.argmax(next_token_logits, dim=-1) |
|
|
|
|
|
if eos_token_id is not None: |
|
|
|
tokens_to_add = next_token * unfinished_sents + (pad_token_id) * (1 - unfinished_sents) |
|
else: |
|
tokens_to_add = next_token |
|
|
|
input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1) |
|
|
|
if eos_token_id is not None: |
|
eos_in_sents = tokens_to_add == eos_token_id |
|
|
|
is_sents_unfinished_and_token_to_add_is_eos = unfinished_sents.mul(eos_in_sents.long()).bool() |
|
sent_lengths.masked_fill_(is_sents_unfinished_and_token_to_add_is_eos, cur_len + 1) |
|
|
|
unfinished_sents.mul_((~eos_in_sents).long()) |
|
|
|
|
|
if unfinished_sents.max() == 0: |
|
break |
|
|
|
|
|
if self.config.is_encoder_decoder is False: |
|
attention_mask = torch.cat( |
|
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1 |
|
) |
|
|
|
cur_len = cur_len + 1 |
|
|
|
|
|
if sent_lengths.min().item() != sent_lengths.max().item(): |
|
assert pad_token_id is not None, "`Pad_token_id` has to be defined if batches have different lengths" |
|
|
|
decoded = input_ids.new(batch_size, sent_lengths.max().item()).fill_(pad_token_id) |
|
else: |
|
decoded = input_ids |
|
|
|
for hypo_idx, hypo in enumerate(input_ids): |
|
decoded[hypo_idx, : sent_lengths[hypo_idx]] = hypo[: sent_lengths[hypo_idx]] |
|
|
|
return decoded |
|
|
|
def _generate_beam_search( |
|
self, |
|
input_ids, |
|
column_spans, |
|
copy_span, |
|
cur_len, |
|
max_length, |
|
min_length, |
|
do_sample, |
|
early_stopping, |
|
temperature, |
|
top_k, |
|
top_p, |
|
repetition_penalty, |
|
no_repeat_ngram_size, |
|
bad_words_ids, |
|
bos_token_id, |
|
pad_token_id, |
|
eos_token_id, |
|
decoder_start_token_id, |
|
batch_size, |
|
num_return_sequences, |
|
length_penalty, |
|
num_beams, |
|
vocab_size, |
|
encoder_outputs, |
|
attention_mask, |
|
use_cache, |
|
model_specific_kwargs, |
|
): |
|
""" Generate sequences for each example with beam search. |
|
""" |
|
|
|
|
|
generated_hyps = [ |
|
BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=early_stopping) |
|
for _ in range(batch_size) |
|
] |
|
|
|
|
|
beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) |
|
|
|
|
|
if do_sample is False: |
|
beam_scores[:, 1:] = -1e9 |
|
beam_scores = beam_scores.view(-1) |
|
|
|
|
|
past = encoder_outputs |
|
|
|
|
|
done = [False for _ in range(batch_size)] |
|
|
|
while cur_len < max_length: |
|
model_inputs = self.prepare_inputs_for_generation( |
|
input_ids, column_spans=column_spans, copy_span=copy_span, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_specific_kwargs |
|
) |
|
outputs = self(**model_inputs) |
|
next_token_logits = outputs[0][:, -1, :] |
|
|
|
|
|
if self._use_cache(outputs, use_cache): |
|
past = outputs[1] |
|
|
|
|
|
if repetition_penalty != 1.0: |
|
self.enforce_repetition_penalty_( |
|
next_token_logits, batch_size, num_beams, input_ids, repetition_penalty, |
|
) |
|
|
|
if temperature != 1.0: |
|
next_token_logits = next_token_logits / temperature |
|
|
|
if self.config.is_encoder_decoder and do_sample is False: |
|
|
|
next_token_logits = self.prepare_logits_for_generation( |
|
next_token_logits, cur_len=cur_len, max_length=max_length |
|
) |
|
|
|
scores = F.log_softmax(next_token_logits, dim=-1) |
|
|
|
|
|
if eos_token_id is not None and cur_len < min_length: |
|
scores[:, eos_token_id] = -float("inf") |
|
|
|
if no_repeat_ngram_size > 0: |
|
|
|
num_batch_hypotheses = batch_size * num_beams |
|
|
|
banned_batch_tokens = calc_banned_ngram_tokens( |
|
input_ids, num_batch_hypotheses, no_repeat_ngram_size, cur_len |
|
) |
|
for i, banned_tokens in enumerate(banned_batch_tokens): |
|
scores[i, banned_tokens] = -float("inf") |
|
|
|
if bad_words_ids is not None: |
|
|
|
banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids) |
|
|
|
for i, banned_tokens in enumerate(banned_tokens): |
|
scores[i, banned_tokens] = -float("inf") |
|
|
|
assert scores.shape == (batch_size * num_beams, vocab_size), "Shapes of scores: {} != {}".format( |
|
scores.shape, (batch_size * num_beams, vocab_size) |
|
) |
|
|
|
if do_sample: |
|
_scores = scores + beam_scores[:, None].expand_as(scores) |
|
|
|
_scores = top_k_top_p_filtering( |
|
_scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2 |
|
) |
|
|
|
_scores = _scores.contiguous().view( |
|
batch_size, num_beams * vocab_size |
|
) |
|
|
|
|
|
probs = F.softmax(_scores, dim=-1) |
|
next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) |
|
|
|
next_scores = torch.gather(_scores, -1, next_tokens) |
|
|
|
next_scores, next_scores_indices = torch.sort(next_scores, descending=True, dim=1) |
|
next_tokens = torch.gather(next_tokens, -1, next_scores_indices) |
|
|
|
else: |
|
next_scores = scores + beam_scores[:, None].expand_as(scores) |
|
|
|
|
|
next_scores = next_scores.view( |
|
batch_size, num_beams * vocab_size |
|
) |
|
|
|
next_scores, next_tokens = torch.topk(next_scores, 2 * num_beams, dim=1, largest=True, sorted=True) |
|
|
|
assert next_scores.size() == next_tokens.size() == (batch_size, 2 * num_beams) |
|
|
|
|
|
next_batch_beam = [] |
|
|
|
|
|
for batch_idx in range(batch_size): |
|
|
|
|
|
if done[batch_idx]: |
|
assert ( |
|
len(generated_hyps[batch_idx]) >= num_beams |
|
), "Batch can only be done if at least {} beams have been generated".format(num_beams) |
|
assert ( |
|
eos_token_id is not None and pad_token_id is not None |
|
), "generated beams >= num_beams -> eos_token_id and pad_token have to be defined" |
|
next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) |
|
continue |
|
|
|
|
|
next_sent_beam = [] |
|
|
|
|
|
for beam_token_rank, (beam_token_id, beam_token_score) in enumerate( |
|
zip(next_tokens[batch_idx], next_scores[batch_idx]) |
|
): |
|
|
|
beam_id = beam_token_id // vocab_size |
|
token_id = beam_token_id % vocab_size |
|
|
|
effective_beam_id = batch_idx * num_beams + beam_id |
|
|
|
if (eos_token_id is not None) and (token_id.item() == eos_token_id): |
|
|
|
is_beam_token_worse_than_top_num_beams = beam_token_rank >= num_beams |
|
if is_beam_token_worse_than_top_num_beams: |
|
continue |
|
generated_hyps[batch_idx].add( |
|
input_ids[effective_beam_id].clone(), beam_token_score.item(), |
|
) |
|
else: |
|
|
|
next_sent_beam.append((beam_token_score, token_id, effective_beam_id)) |
|
|
|
|
|
if len(next_sent_beam) == num_beams: |
|
break |
|
|
|
|
|
done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done( |
|
next_scores[batch_idx].max().item(), cur_len=cur_len |
|
) |
|
|
|
|
|
assert len(next_sent_beam) == num_beams, "Beam should always be full" |
|
next_batch_beam.extend(next_sent_beam) |
|
assert len(next_batch_beam) == num_beams * (batch_idx + 1) |
|
|
|
|
|
if all(done): |
|
break |
|
|
|
|
|
assert len(next_batch_beam) == batch_size * num_beams |
|
beam_scores = beam_scores.new([x[0] for x in next_batch_beam]) |
|
beam_tokens = input_ids.new([x[1] for x in next_batch_beam]) |
|
beam_idx = input_ids.new([x[2] for x in next_batch_beam]) |
|
|
|
|
|
input_ids = input_ids[beam_idx, :] |
|
input_ids = torch.cat([input_ids, beam_tokens.unsqueeze(1)], dim=-1) |
|
|
|
if past is not None: |
|
past = self._reorder_cache(past, beam_idx) |
|
|
|
|
|
if self.config.is_encoder_decoder is False: |
|
attention_mask = torch.cat( |
|
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1 |
|
) |
|
|
|
|
|
cur_len = cur_len + 1 |
|
|
|
|
|
for batch_idx in range(batch_size): |
|
if done[batch_idx]: |
|
continue |
|
|
|
|
|
if eos_token_id is not None and all( |
|
(token_id % vocab_size).item() is not eos_token_id for token_id in next_tokens[batch_idx] |
|
): |
|
assert torch.all( |
|
next_scores[batch_idx, :num_beams] == beam_scores.view(batch_size, num_beams)[batch_idx] |
|
), "If batch_idx is not done, final next scores: {} have to equal to accumulated beam_scores: {}".format( |
|
next_scores[:, :num_beams][batch_idx], beam_scores.view(batch_size, num_beams)[batch_idx], |
|
) |
|
|
|
|
|
for beam_id in range(num_beams): |
|
effective_beam_id = batch_idx * num_beams + beam_id |
|
final_score = beam_scores[effective_beam_id].item() |
|
final_tokens = input_ids[effective_beam_id] |
|
generated_hyps[batch_idx].add(final_tokens, final_score) |
|
|
|
|
|
output_batch_size = batch_size if do_sample else batch_size * num_return_sequences |
|
output_num_return_sequences_per_batch = 1 if do_sample else num_return_sequences |
|
|
|
|
|
sent_lengths = input_ids.new(output_batch_size) |
|
best = [] |
|
|
|
|
|
for i, hypotheses in enumerate(generated_hyps): |
|
sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0]) |
|
for j in range(output_num_return_sequences_per_batch): |
|
effective_batch_idx = output_num_return_sequences_per_batch * i + j |
|
best_hyp = sorted_hyps.pop()[1] |
|
sent_lengths[effective_batch_idx] = len(best_hyp) |
|
best.append(best_hyp) |
|
|
|
|
|
if sent_lengths.min().item() != sent_lengths.max().item(): |
|
assert pad_token_id is not None, "`Pad_token_id` has to be defined" |
|
sent_max_len = min(sent_lengths.max().item() + 1, max_length) |
|
decoded = input_ids.new(output_batch_size, sent_max_len).fill_(pad_token_id) |
|
|
|
|
|
for i, hypo in enumerate(best): |
|
decoded[i, : sent_lengths[i]] = hypo |
|
if sent_lengths[i] < max_length: |
|
decoded[i, sent_lengths[i]] = eos_token_id |
|
else: |
|
|
|
assert (len(hypo) == max_length for hypo in best) |
|
decoded = torch.stack(best).type(torch.long).to(next(self.parameters()).device) |
|
|
|
return decoded |
|
|
|
|
|
def calc_banned_ngram_tokens(prev_input_ids: Tensor, num_hypos: int, no_repeat_ngram_size: int, cur_len: int) -> None: |
|
"""Copied from fairseq for no_repeat_ngram in beam_search""" |
|
if cur_len + 1 < no_repeat_ngram_size: |
|
|
|
return [[] for _ in range(num_hypos)] |
|
generated_ngrams = [{} for _ in range(num_hypos)] |
|
for idx in range(num_hypos): |
|
gen_tokens = prev_input_ids[idx].tolist() |
|
generated_ngram = generated_ngrams[idx] |
|
for ngram in zip(*[gen_tokens[i:] for i in range(no_repeat_ngram_size)]): |
|
prev_ngram_tuple = tuple(ngram[:-1]) |
|
generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]] |
|
|
|
def _get_generated_ngrams(hypo_idx): |
|
|
|
start_idx = cur_len + 1 - no_repeat_ngram_size |
|
ngram_idx = tuple(prev_input_ids[hypo_idx, start_idx:cur_len].tolist()) |
|
return generated_ngrams[hypo_idx].get(ngram_idx, []) |
|
|
|
banned_tokens = [_get_generated_ngrams(hypo_idx) for hypo_idx in range(num_hypos)] |
|
return banned_tokens |
|
|
|
|
|
def calc_banned_bad_words_ids(prev_input_ids, bad_words_ids): |
|
banned_tokens = [] |
|
|
|
def _tokens_match(prev_tokens, tokens): |
|
if len(tokens) == 0: |
|
|
|
return True |
|
if len(tokens) > len(prev_input_ids): |
|
|
|
return False |
|
|
|
if prev_tokens[-len(tokens):] == tokens: |
|
|
|
return True |
|
else: |
|
return False |
|
|
|
for prev_input_ids_slice in prev_input_ids: |
|
banned_tokens_slice = [] |
|
|
|
for banned_token_seq in bad_words_ids: |
|
assert len(banned_token_seq) > 0, "Banned words token sequences {} cannot have an empty list".format( |
|
bad_words_ids |
|
) |
|
|
|
if _tokens_match(prev_input_ids_slice.tolist(), banned_token_seq[:-1]) is False: |
|
|
|
continue |
|
|
|
banned_tokens_slice.append(banned_token_seq[-1]) |
|
|
|
banned_tokens.append(banned_tokens_slice) |
|
|
|
return banned_tokens |
|
|
|
|
|
def top_k_top_p_filtering(logits, top_k=0, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1): |
|
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering |
|
Args: |
|
logits: logits distribution shape (batch size, vocabulary size) |
|
if top_k > 0: keep only top k tokens with highest probability (top-k filtering). |
|
if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). |
|
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) |
|
Make sure we keep at least min_tokens_to_keep per batch example in the output |
|
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 |
|
""" |
|
if top_k > 0: |
|
top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) |
|
|
|
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] |
|
logits[indices_to_remove] = filter_value |
|
|
|
if top_p < 1.0: |
|
sorted_logits, sorted_indices = torch.sort(logits, descending=True) |
|
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) |
|
|
|
|
|
sorted_indices_to_remove = cumulative_probs > top_p |
|
if min_tokens_to_keep > 1: |
|
|
|
sorted_indices_to_remove[..., :min_tokens_to_keep] = 0 |
|
|
|
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() |
|
sorted_indices_to_remove[..., 0] = 0 |
|
|
|
|
|
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove) |
|
logits[indices_to_remove] = filter_value |
|
return logits |
|
|
|
|
|
class BeamHypotheses(object): |
|
def __init__(self, num_beams, max_length, length_penalty, early_stopping): |
|
""" |
|
Initialize n-best list of hypotheses. |
|
""" |
|
self.max_length = max_length - 1 |
|
self.length_penalty = length_penalty |
|
self.early_stopping = early_stopping |
|
self.num_beams = num_beams |
|
self.beams = [] |
|
self.worst_score = 1e9 |
|
|
|
def __len__(self): |
|
""" |
|
Number of hypotheses in the list. |
|
""" |
|
return len(self.beams) |
|
|
|
def add(self, hyp, sum_logprobs): |
|
""" |
|
Add a new hypothesis to the list. |
|
""" |
|
score = sum_logprobs / len(hyp) ** self.length_penalty |
|
if len(self) < self.num_beams or score > self.worst_score: |
|
self.beams.append((score, hyp)) |
|
if len(self) > self.num_beams: |
|
sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.beams)]) |
|
del self.beams[sorted_scores[0][1]] |
|
self.worst_score = sorted_scores[1][0] |
|
else: |
|
self.worst_score = min(score, self.worst_score) |
|
|
|
def is_done(self, best_sum_logprobs, cur_len=None): |
|
""" |
|
If there are enough hypotheses and that none of the hypotheses being generated |
|
can become better than the worst one in the heap, then we are done with this sentence. |
|
""" |
|
|
|
if len(self) < self.num_beams: |
|
return False |
|
elif self.early_stopping: |
|
return True |
|
else: |
|
if cur_len is None: |
|
cur_len = self.max_length |
|
cur_score = best_sum_logprobs / cur_len ** self.length_penalty |
|
ret = self.worst_score >= cur_score |
|
return ret |
|
|