INSTRUCTION
stringlengths 1
46.3k
| RESPONSE
stringlengths 75
80.2k
|
---|---|
Parameters
----------
inputs : ``PackedSequence``, required.
A batch first ``PackedSequence`` to run the stacked LSTM over.
initial_state : Tuple[torch.Tensor, torch.Tensor], optional, (default = None)
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM. Each tensor has shape (1, batch_size, output_dimension).
Returns
-------
output_sequence : PackedSequence
The encoded sequence of shape (batch_size, sequence_length, hidden_size)
final_states: Tuple[torch.Tensor, torch.Tensor]
The per-layer final (state, memory) states of the LSTM, each with shape
(num_layers, batch_size, hidden_size). | def forward(self, # pylint: disable=arguments-differ
inputs: PackedSequence,
initial_state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None) -> \
Tuple[Union[torch.Tensor, PackedSequence], Tuple[torch.Tensor, torch.Tensor]]:
"""
Parameters
----------
inputs : ``PackedSequence``, required.
A batch first ``PackedSequence`` to run the stacked LSTM over.
initial_state : Tuple[torch.Tensor, torch.Tensor], optional, (default = None)
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM. Each tensor has shape (1, batch_size, output_dimension).
Returns
-------
output_sequence : PackedSequence
The encoded sequence of shape (batch_size, sequence_length, hidden_size)
final_states: Tuple[torch.Tensor, torch.Tensor]
The per-layer final (state, memory) states of the LSTM, each with shape
(num_layers, batch_size, hidden_size).
"""
if not initial_state:
hidden_states = [None] * len(self.lstm_layers)
elif initial_state[0].size()[0] != len(self.lstm_layers):
raise ConfigurationError("Initial states were passed to forward() but the number of "
"initial states does not match the number of layers.")
else:
hidden_states = list(zip(initial_state[0].split(1, 0),
initial_state[1].split(1, 0)))
output_sequence = inputs
final_states = []
for i, state in enumerate(hidden_states):
layer = getattr(self, 'layer_{}'.format(i))
# The state is duplicated to mirror the Pytorch API for LSTMs.
output_sequence, final_state = layer(output_sequence, state)
final_states.append(final_state)
final_hidden_state, final_cell_state = tuple(torch.cat(state_list, 0) for state_list in zip(*final_states))
return output_sequence, (final_hidden_state, final_cell_state) |
Takes a type and a set of basic types, and substitutes all instances of ANY_TYPE with all
possible basic types and returns a list with all possible combinations. Note that this
substitution is unconstrained. That is, If you have a type with placeholders, <#1,#1> for
example, this may substitute the placeholders with different basic types. In that case, you'd
want to use ``_substitute_placeholder_type`` instead. | def substitute_any_type(type_: Type, basic_types: Set[BasicType]) -> List[Type]:
"""
Takes a type and a set of basic types, and substitutes all instances of ANY_TYPE with all
possible basic types and returns a list with all possible combinations. Note that this
substitution is unconstrained. That is, If you have a type with placeholders, <#1,#1> for
example, this may substitute the placeholders with different basic types. In that case, you'd
want to use ``_substitute_placeholder_type`` instead.
"""
if type_ == ANY_TYPE:
return list(basic_types)
if isinstance(type_, BasicType):
return [type_]
# If we've made it this far, we have a ComplexType, and we can just call
# `type_.substitute_any_type()`.
return type_.substitute_any_type(basic_types) |
Takes a complex type (without any placeholders), gets its return values, and returns productions
(perhaps each with multiple arguments) that produce the return values. This method also takes
care of ``MultiMatchNamedBasicTypes``. If one of the arguments or the return types is a multi
match type, it gets all the substitutions of those types from ``multi_match_mapping`` and forms
a list with all possible combinations of substitutions. If the complex type passed to this method
has no ``MultiMatchNamedBasicTypes``, the returned list will contain a single tuple. For
example, if the complex is type ``<a,<<b,c>,d>>``, and ``a`` is a multi match type that matches
``e`` and ``f``, this gives the following list of tuples: ``[('d', 'd -> [<a,<<b,c>,d>, e,
<b,c>]), ('d', 'd -> [<a,<<b,c>,d>, f, <b,c>])]`` Note that we assume there will be no
productions from the multi match type, and the list above does not contain ``('d', 'd ->
[<a,<<b,c>,d>, a, <b,c>>]')``. | def _get_complex_type_production(complex_type: ComplexType,
multi_match_mapping: Dict[Type, List[Type]]) -> List[Tuple[Type, str]]:
"""
Takes a complex type (without any placeholders), gets its return values, and returns productions
(perhaps each with multiple arguments) that produce the return values. This method also takes
care of ``MultiMatchNamedBasicTypes``. If one of the arguments or the return types is a multi
match type, it gets all the substitutions of those types from ``multi_match_mapping`` and forms
a list with all possible combinations of substitutions. If the complex type passed to this method
has no ``MultiMatchNamedBasicTypes``, the returned list will contain a single tuple. For
example, if the complex is type ``<a,<<b,c>,d>>``, and ``a`` is a multi match type that matches
``e`` and ``f``, this gives the following list of tuples: ``[('d', 'd -> [<a,<<b,c>,d>, e,
<b,c>]), ('d', 'd -> [<a,<<b,c>,d>, f, <b,c>])]`` Note that we assume there will be no
productions from the multi match type, and the list above does not contain ``('d', 'd ->
[<a,<<b,c>,d>, a, <b,c>>]')``.
"""
return_type = complex_type.return_type()
if isinstance(return_type, MultiMatchNamedBasicType):
return_types_matched = list(multi_match_mapping[return_type] if return_type in
multi_match_mapping else return_type.types_to_match)
else:
return_types_matched = [return_type]
arguments = complex_type.argument_types()
argument_types_matched = []
for argument_type in arguments:
if isinstance(argument_type, MultiMatchNamedBasicType):
matched_types = list(multi_match_mapping[argument_type] if argument_type in
multi_match_mapping else argument_type.types_to_match)
argument_types_matched.append(matched_types)
else:
argument_types_matched.append([argument_type])
complex_type_productions: List[Tuple[Type, str]] = []
for matched_return_type in return_types_matched:
for matched_arguments in itertools.product(*argument_types_matched):
complex_type_productions.append((matched_return_type,
_make_production_string(return_type,
[complex_type] + list(matched_arguments))))
return complex_type_productions |
Generates all the valid actions starting from each non-terminal. For terminals of a specific
type, we simply add a production from the type to the terminal. For all terminal `functions`,
we additionally add a rule that allows their return type to be generated from an application of
the function. For example, the function ``<e,<r,<d,r>>>``, which takes three arguments and
returns an ``r`` would generate a the production rule ``r -> [<e,<r,<d,r>>>, e, r, d]``.
For functions that do not contain ANY_TYPE or placeholder types, this is straight-forward.
When there are ANY_TYPES or placeholders, we substitute the ANY_TYPE with all possible basic
types, and then produce a similar rule. For example, the identity function, with type
``<#1,#1>`` and basic types ``e`` and ``r``, would produce the rules ``e -> [<#1,#1>, e]`` and
``r -> [<#1,#1>, r]``.
We additionally add a valid action from the start symbol to all ``valid_starting_types``.
Parameters
----------
name_mapping : ``Dict[str, str]``
The mapping of names that appear in your logical form languages to their aliases for NLTK.
If you are getting all valid actions for a type declaration, this can be the
``COMMON_NAME_MAPPING``.
type_signatures : ``Dict[str, Type]``
The mapping from name aliases to their types. If you are getting all valid actions for a
type declaration, this can be the ``COMMON_TYPE_SIGNATURE``.
basic_types : ``Set[Type]``
Set of all basic types in the type declaration.
multi_match_mapping : ``Dict[Type, List[Type]]`` (optional)
A mapping from `MultiMatchNamedBasicTypes` to the types they can match. This may be
different from the type's ``types_to_match`` field based on the context. While building action
sequences that lead to complex types with ``MultiMatchNamedBasicTypes``, if a type does not
occur in this mapping, the default set of ``types_to_match`` for that type will be used.
valid_starting_types : ``Set[Type]``, optional
These are the valid starting types for your grammar; e.g., what types are we allowed to
parse expressions into? We will add a "START -> TYPE" rule for each of these types. If
this is ``None``, we default to using ``basic_types``.
num_nested_lambdas : ``int`` (optional)
Does the language used permit lambda expressions? And if so, how many nested lambdas do we
need to worry about? We'll add rules like "<r,d> -> ['lambda x', d]" for all complex
types, where the variable is determined by the number of nestings. We currently only
permit up to three levels of nesting, just for ease of implementation. | def get_valid_actions(name_mapping: Dict[str, str],
type_signatures: Dict[str, Type],
basic_types: Set[Type],
multi_match_mapping: Dict[Type, List[Type]] = None,
valid_starting_types: Set[Type] = None,
num_nested_lambdas: int = 0) -> Dict[str, List[str]]:
"""
Generates all the valid actions starting from each non-terminal. For terminals of a specific
type, we simply add a production from the type to the terminal. For all terminal `functions`,
we additionally add a rule that allows their return type to be generated from an application of
the function. For example, the function ``<e,<r,<d,r>>>``, which takes three arguments and
returns an ``r`` would generate a the production rule ``r -> [<e,<r,<d,r>>>, e, r, d]``.
For functions that do not contain ANY_TYPE or placeholder types, this is straight-forward.
When there are ANY_TYPES or placeholders, we substitute the ANY_TYPE with all possible basic
types, and then produce a similar rule. For example, the identity function, with type
``<#1,#1>`` and basic types ``e`` and ``r``, would produce the rules ``e -> [<#1,#1>, e]`` and
``r -> [<#1,#1>, r]``.
We additionally add a valid action from the start symbol to all ``valid_starting_types``.
Parameters
----------
name_mapping : ``Dict[str, str]``
The mapping of names that appear in your logical form languages to their aliases for NLTK.
If you are getting all valid actions for a type declaration, this can be the
``COMMON_NAME_MAPPING``.
type_signatures : ``Dict[str, Type]``
The mapping from name aliases to their types. If you are getting all valid actions for a
type declaration, this can be the ``COMMON_TYPE_SIGNATURE``.
basic_types : ``Set[Type]``
Set of all basic types in the type declaration.
multi_match_mapping : ``Dict[Type, List[Type]]`` (optional)
A mapping from `MultiMatchNamedBasicTypes` to the types they can match. This may be
different from the type's ``types_to_match`` field based on the context. While building action
sequences that lead to complex types with ``MultiMatchNamedBasicTypes``, if a type does not
occur in this mapping, the default set of ``types_to_match`` for that type will be used.
valid_starting_types : ``Set[Type]``, optional
These are the valid starting types for your grammar; e.g., what types are we allowed to
parse expressions into? We will add a "START -> TYPE" rule for each of these types. If
this is ``None``, we default to using ``basic_types``.
num_nested_lambdas : ``int`` (optional)
Does the language used permit lambda expressions? And if so, how many nested lambdas do we
need to worry about? We'll add rules like "<r,d> -> ['lambda x', d]" for all complex
types, where the variable is determined by the number of nestings. We currently only
permit up to three levels of nesting, just for ease of implementation.
"""
valid_actions: Dict[str, Set[str]] = defaultdict(set)
valid_starting_types = valid_starting_types or basic_types
for type_ in valid_starting_types:
valid_actions[str(START_TYPE)].add(_make_production_string(START_TYPE, type_))
complex_types = set()
for name, alias in name_mapping.items():
# Lambda functions and variables associated with them get produced in specific contexts. So
# we do not add them to ``valid_actions`` here, and let ``GrammarState`` deal with it.
# ``var`` is a special function that some languages (like LambdaDCS) use within lambda
# functions to indicate the use of a variable (eg.: ``(lambda x (fb:row.row.year (var x)))``)
# We do not have to produce this function outside the scope of lambda. Even within lambdas,
# it is a lot easier to not do it, and let the action sequence to logical form transformation
# logic add it to the output logical forms instead.
if name in ["lambda", "var", "x", "y", "z"]:
continue
name_type = type_signatures[alias]
# Type to terminal productions.
for substituted_type in substitute_any_type(name_type, basic_types):
valid_actions[str(substituted_type)].add(_make_production_string(substituted_type, name))
# Keeping track of complex types.
if isinstance(name_type, ComplexType) and name_type != ANY_TYPE:
complex_types.add(name_type)
for complex_type in complex_types:
for substituted_type in substitute_any_type(complex_type, basic_types):
for head, production in _get_complex_type_production(substituted_type,
multi_match_mapping or {}):
valid_actions[str(head)].add(production)
# We can produce complex types with a lambda expression, though we'll leave out
# placeholder types for now.
for i in range(num_nested_lambdas):
lambda_var = chr(ord('x') + i)
# We'll only allow lambdas to be functions that take and return basic types as their
# arguments, for now. Also, we're doing this for all possible complex types where
# the first and second types are basic types. So we may be overgenerating a bit.
for first_type in basic_types:
for second_type in basic_types:
key = ComplexType(first_type, second_type)
production_string = _make_production_string(key, ['lambda ' + lambda_var, second_type])
valid_actions[str(key)].add(production_string)
valid_action_strings = {key: sorted(value) for key, value in valid_actions.items()}
return valid_action_strings |
Gives the final return type for this function. If the function takes a single argument,
this is just ``self.second``. If the function takes multiple arguments and returns a basic
type, this should be the final ``.second`` after following all complex types. That is the
implementation here in the base class. If you have a higher-order function that returns a
function itself, you need to override this method. | def return_type(self) -> Type:
"""
Gives the final return type for this function. If the function takes a single argument,
this is just ``self.second``. If the function takes multiple arguments and returns a basic
type, this should be the final ``.second`` after following all complex types. That is the
implementation here in the base class. If you have a higher-order function that returns a
function itself, you need to override this method.
"""
return_type = self.second
while isinstance(return_type, ComplexType):
return_type = return_type.second
return return_type |
Gives the types of all arguments to this function. For functions returning a basic type,
we grab all ``.first`` types until ``.second`` is no longer a ``ComplexType``. That logic
is implemented here in the base class. If you have a higher-order function that returns a
function itself, you need to override this method. | def argument_types(self) -> List[Type]:
"""
Gives the types of all arguments to this function. For functions returning a basic type,
we grab all ``.first`` types until ``.second`` is no longer a ``ComplexType``. That logic
is implemented here in the base class. If you have a higher-order function that returns a
function itself, you need to override this method.
"""
arguments = [self.first]
remaining_type = self.second
while isinstance(remaining_type, ComplexType):
arguments.append(remaining_type.first)
remaining_type = remaining_type.second
return arguments |
Takes a set of ``BasicTypes`` and replaces any instances of ``ANY_TYPE`` inside this
complex type with each of those basic types. | def substitute_any_type(self, basic_types: Set[BasicType]) -> List[Type]:
"""
Takes a set of ``BasicTypes`` and replaces any instances of ``ANY_TYPE`` inside this
complex type with each of those basic types.
"""
substitutions = []
for first_type in substitute_any_type(self.first, basic_types):
for second_type in substitute_any_type(self.second, basic_types):
substitutions.append(self.__class__(first_type, second_type))
return substitutions |
See ``PlaceholderType.resolve`` | def resolve(self, other) -> Optional[Type]:
"""See ``PlaceholderType.resolve``"""
if not isinstance(other, NltkComplexType):
return None
other_first = other.first.resolve(other.second)
if not other_first:
return None
other_second = other.second.resolve(other_first)
if not other_second:
return None
return UnaryOpType(other_first, self._allowed_substitutions, self._signature) |
See ``PlaceholderType.resolve`` | def resolve(self, other: Type) -> Optional[Type]:
"""See ``PlaceholderType.resolve``"""
if not isinstance(other, NltkComplexType):
return None
if not isinstance(other.second, NltkComplexType):
return None
other_first = other.first.resolve(other.second.first)
if other_first is None:
return None
other_first = other_first.resolve(other.second.second)
if not other_first:
return None
other_second = other.second.resolve(ComplexType(other_first, other_first))
if not other_second:
return None
return BinaryOpType(other_first, self._allowed_substitutions, self._signature) |
We override this method to do just one thing on top of ``ApplicationExpression._set_type``.
In lambda expressions of the form /x F(x), where the function is F and the argument is x,
we can use the type of F to infer the type of x. That is, if F is of type <a, b>, we can
resolve the type of x against a. We do this as the additional step after setting the type
of F(x).
So why does NLTK not already do this? NLTK assumes all variables (x) are of type entity
(e). So it does not have to resolve the type of x anymore. However, this would cause type
inference failures in our case since x can bind to rows, numbers or cells, each of which
has a different type. To deal with this issue, we made X of type ANY_TYPE. Also, LambdaDCS
(and some other languages) contain a var function that indicate the usage of variables
within lambda functions. We map var to V, and made it of type <#1, #1>. We cannot leave X
as ANY_TYPE because that would propagate up the tree. We need to set its type when we have
the information about F. Hence this method. Note that the language may or may not contain
the var function. We deal with both cases below. | def _set_type(self, other_type: Type = ANY_TYPE, signature=None) -> None:
"""
We override this method to do just one thing on top of ``ApplicationExpression._set_type``.
In lambda expressions of the form /x F(x), where the function is F and the argument is x,
we can use the type of F to infer the type of x. That is, if F is of type <a, b>, we can
resolve the type of x against a. We do this as the additional step after setting the type
of F(x).
So why does NLTK not already do this? NLTK assumes all variables (x) are of type entity
(e). So it does not have to resolve the type of x anymore. However, this would cause type
inference failures in our case since x can bind to rows, numbers or cells, each of which
has a different type. To deal with this issue, we made X of type ANY_TYPE. Also, LambdaDCS
(and some other languages) contain a var function that indicate the usage of variables
within lambda functions. We map var to V, and made it of type <#1, #1>. We cannot leave X
as ANY_TYPE because that would propagate up the tree. We need to set its type when we have
the information about F. Hence this method. Note that the language may or may not contain
the var function. We deal with both cases below.
"""
super(DynamicTypeApplicationExpression, self)._set_type(other_type, signature)
# TODO(pradeep): Assuming the mapping of "var" function is "V". Do something better.
if isinstance(self.argument, ApplicationExpression) and str(self.argument.function) == "V":
# pylint: disable=protected-access
self.argument.argument._set_type(self.function.type.first)
if str(self.argument) == "X" and str(self.function) != "V":
# pylint: disable=protected-access
self.argument._set_type(self.function.type.first) |
Send the mean and std of all parameters and gradients to tensorboard, as well
as logging the average gradient norm. | def log_parameter_and_gradient_statistics(self, # pylint: disable=invalid-name
model: Model,
batch_grad_norm: float) -> None:
"""
Send the mean and std of all parameters and gradients to tensorboard, as well
as logging the average gradient norm.
"""
if self._should_log_parameter_statistics:
# Log parameter values to Tensorboard
for name, param in model.named_parameters():
self.add_train_scalar("parameter_mean/" + name, param.data.mean())
self.add_train_scalar("parameter_std/" + name, param.data.std())
if param.grad is not None:
if param.grad.is_sparse:
# pylint: disable=protected-access
grad_data = param.grad.data._values()
else:
grad_data = param.grad.data
# skip empty gradients
if torch.prod(torch.tensor(grad_data.shape)).item() > 0: # pylint: disable=not-callable
self.add_train_scalar("gradient_mean/" + name, grad_data.mean())
self.add_train_scalar("gradient_std/" + name, grad_data.std())
else:
# no gradient for a parameter with sparse gradients
logger.info("No gradient for %s, skipping tensorboard logging.", name)
# norm of gradients
if batch_grad_norm is not None:
self.add_train_scalar("gradient_norm", batch_grad_norm) |
Send current parameter specific learning rates to tensorboard | def log_learning_rates(self,
model: Model,
optimizer: torch.optim.Optimizer):
"""
Send current parameter specific learning rates to tensorboard
"""
if self._should_log_learning_rate:
# optimizer stores lr info keyed by parameter tensor
# we want to log with parameter name
names = {param: name for name, param in model.named_parameters()}
for group in optimizer.param_groups:
if 'lr' not in group:
continue
rate = group['lr']
for param in group['params']:
# check whether params has requires grad or not
effective_rate = rate * float(param.requires_grad)
self.add_train_scalar("learning_rate/" + names[param], effective_rate) |
Send histograms of parameters to tensorboard. | def log_histograms(self, model: Model, histogram_parameters: Set[str]) -> None:
"""
Send histograms of parameters to tensorboard.
"""
for name, param in model.named_parameters():
if name in histogram_parameters:
self.add_train_histogram("parameter_histogram/" + name, param) |
Sends all of the train metrics (and validation metrics, if provided) to tensorboard. | def log_metrics(self,
train_metrics: dict,
val_metrics: dict = None,
epoch: int = None,
log_to_console: bool = False) -> None:
"""
Sends all of the train metrics (and validation metrics, if provided) to tensorboard.
"""
metric_names = set(train_metrics.keys())
if val_metrics is not None:
metric_names.update(val_metrics.keys())
val_metrics = val_metrics or {}
# For logging to the console
if log_to_console:
dual_message_template = "%s | %8.3f | %8.3f"
no_val_message_template = "%s | %8.3f | %8s"
no_train_message_template = "%s | %8s | %8.3f"
header_template = "%s | %-10s"
name_length = max([len(x) for x in metric_names])
logger.info(header_template, "Training".rjust(name_length + 13), "Validation")
for name in metric_names:
# Log to tensorboard
train_metric = train_metrics.get(name)
if train_metric is not None:
self.add_train_scalar(name, train_metric, timestep=epoch)
val_metric = val_metrics.get(name)
if val_metric is not None:
self.add_validation_scalar(name, val_metric, timestep=epoch)
# And maybe log to console
if log_to_console and val_metric is not None and train_metric is not None:
logger.info(dual_message_template, name.ljust(name_length), train_metric, val_metric)
elif log_to_console and val_metric is not None:
logger.info(no_train_message_template, name.ljust(name_length), "N/A", val_metric)
elif log_to_console and train_metric is not None:
logger.info(no_val_message_template, name.ljust(name_length), train_metric, "N/A") |
Create explanation (as a list of header/content entries) for an answer | def get_explanation(logical_form: str,
world_extractions: JsonDict,
answer_index: int,
world: QuarelWorld) -> List[JsonDict]:
"""
Create explanation (as a list of header/content entries) for an answer
"""
output = []
nl_world = {}
if world_extractions['world1'] != "N/A" and world_extractions['world1'] != ["N/A"]:
nl_world['world1'] = nl_world_string(world_extractions['world1'])
nl_world['world2'] = nl_world_string(world_extractions['world2'])
output.append({
"header": "Identified two worlds",
"content": [f'''world1 = {nl_world['world1']}''',
f'''world2 = {nl_world['world2']}''']
})
else:
nl_world['world1'] = 'world1'
nl_world['world2'] = 'world2'
parse = semparse_util.lisp_to_nested_expression(logical_form)
if parse[0] != "infer":
return None
setup = parse[1]
output.append({
"header": "The question is stating",
"content": nl_arg(setup, nl_world)
})
answers = parse[2:]
output.append({
"header": "The answer options are stating",
"content": ["A: " + " and ".join(nl_arg(answers[0], nl_world)),
"B: " + " and ".join(nl_arg(answers[1], nl_world))]
})
setup_core = setup
if setup[0] == 'and':
setup_core = setup[1]
s_attr = setup_core[0]
s_dir = world.qr_size[setup_core[1]]
s_world = nl_world[setup_core[2]]
a_attr = answers[answer_index][0]
qr_dir = world._get_qr_coeff(strip_entity_type(s_attr), strip_entity_type(a_attr)) # pylint: disable=protected-access
a_dir = s_dir * qr_dir
a_world = nl_world[answers[answer_index][2]]
content = [f'When {nl_attr(s_attr)} is {nl_dir(s_dir)} ' +
f'then {nl_attr(a_attr)} is {nl_dir(a_dir)} (for {s_world})']
if a_world != s_world:
content.append(f'''Therefore {nl_attr(a_attr)} is {nl_dir(-a_dir)} for {a_world}''')
content.append(f"Therefore {chr(65+answer_index)} is the correct answer")
output.append({
"header": "Theory used",
"content": content
})
return output |
Use stemming to attempt alignment between extracted world and given world literals.
If more words align to one world vs the other, it's considered aligned. | def align_entities(extracted: List[str],
literals: JsonDict,
stemmer: NltkPorterStemmer) -> List[str]:
"""
Use stemming to attempt alignment between extracted world and given world literals.
If more words align to one world vs the other, it's considered aligned.
"""
literal_keys = list(literals.keys())
literal_values = list(literals.values())
overlaps = [get_stem_overlaps(extract, literal_values, stemmer) for extract in extracted]
worlds = []
for overlap in overlaps:
if overlap[0] > overlap[1]:
worlds.append(literal_keys[0])
elif overlap[0] < overlap[1]:
worlds.append(literal_keys[1])
else:
worlds.append(None)
return worlds |
Calculate multi-perspective cosine matching between time-steps of vectors
of the same length.
Parameters
----------
vector1 : ``torch.Tensor``
A tensor of shape ``(batch, seq_len, hidden_size)``
vector2 : ``torch.Tensor``
A tensor of shape ``(batch, seq_len or 1, hidden_size)``
weight : ``torch.Tensor``
A tensor of shape ``(num_perspectives, hidden_size)``
Returns
-------
A tuple of two tensors consisting multi-perspective matching results.
The first one is of the shape (batch, seq_len, 1), the second one is of shape
(batch, seq_len, num_perspectives) | def multi_perspective_match(vector1: torch.Tensor,
vector2: torch.Tensor,
weight: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Calculate multi-perspective cosine matching between time-steps of vectors
of the same length.
Parameters
----------
vector1 : ``torch.Tensor``
A tensor of shape ``(batch, seq_len, hidden_size)``
vector2 : ``torch.Tensor``
A tensor of shape ``(batch, seq_len or 1, hidden_size)``
weight : ``torch.Tensor``
A tensor of shape ``(num_perspectives, hidden_size)``
Returns
-------
A tuple of two tensors consisting multi-perspective matching results.
The first one is of the shape (batch, seq_len, 1), the second one is of shape
(batch, seq_len, num_perspectives)
"""
assert vector1.size(0) == vector2.size(0)
assert weight.size(1) == vector1.size(2) == vector1.size(2)
# (batch, seq_len, 1)
similarity_single = F.cosine_similarity(vector1, vector2, 2).unsqueeze(2)
# (1, 1, num_perspectives, hidden_size)
weight = weight.unsqueeze(0).unsqueeze(0)
# (batch, seq_len, num_perspectives, hidden_size)
vector1 = weight * vector1.unsqueeze(2)
vector2 = weight * vector2.unsqueeze(2)
similarity_multi = F.cosine_similarity(vector1, vector2, dim=3)
return similarity_single, similarity_multi |
Calculate multi-perspective cosine matching between each time step of
one vector and each time step of another vector.
Parameters
----------
vector1 : ``torch.Tensor``
A tensor of shape ``(batch, seq_len1, hidden_size)``
vector2 : ``torch.Tensor``
A tensor of shape ``(batch, seq_len2, hidden_size)``
weight : ``torch.Tensor``
A tensor of shape ``(num_perspectives, hidden_size)``
eps : ``float`` optional, (default = 1e-8)
A small value to avoid zero division problem
Returns
-------
A tensor of shape (batch, seq_len1, seq_len2, num_perspectives) consisting
multi-perspective matching results | def multi_perspective_match_pairwise(vector1: torch.Tensor,
vector2: torch.Tensor,
weight: torch.Tensor,
eps: float = 1e-8) -> torch.Tensor:
"""
Calculate multi-perspective cosine matching between each time step of
one vector and each time step of another vector.
Parameters
----------
vector1 : ``torch.Tensor``
A tensor of shape ``(batch, seq_len1, hidden_size)``
vector2 : ``torch.Tensor``
A tensor of shape ``(batch, seq_len2, hidden_size)``
weight : ``torch.Tensor``
A tensor of shape ``(num_perspectives, hidden_size)``
eps : ``float`` optional, (default = 1e-8)
A small value to avoid zero division problem
Returns
-------
A tensor of shape (batch, seq_len1, seq_len2, num_perspectives) consisting
multi-perspective matching results
"""
num_perspectives = weight.size(0)
# (1, num_perspectives, 1, hidden_size)
weight = weight.unsqueeze(0).unsqueeze(2)
# (batch, num_perspectives, seq_len*, hidden_size)
vector1 = weight * vector1.unsqueeze(1).expand(-1, num_perspectives, -1, -1)
vector2 = weight * vector2.unsqueeze(1).expand(-1, num_perspectives, -1, -1)
# (batch, num_perspectives, seq_len*, 1)
vector1_norm = vector1.norm(p=2, dim=3, keepdim=True)
vector2_norm = vector2.norm(p=2, dim=3, keepdim=True)
# (batch, num_perspectives, seq_len1, seq_len2)
mul_result = torch.matmul(vector1, vector2.transpose(2, 3))
norm_value = vector1_norm * vector2_norm.transpose(2, 3)
# (batch, seq_len1, seq_len2, num_perspectives)
return (mul_result / norm_value.clamp(min=eps)).permute(0, 2, 3, 1) |
Given the forward (or backward) representations of sentence1 and sentence2, apply four bilateral
matching functions between them in one direction.
Parameters
----------
context_1 : ``torch.Tensor``
Tensor of shape (batch_size, seq_len1, hidden_dim) representing the encoding of the first sentence.
mask_1 : ``torch.Tensor``
Binary Tensor of shape (batch_size, seq_len1), indicating which
positions in the first sentence are padding (0) and which are not (1).
context_2 : ``torch.Tensor``
Tensor of shape (batch_size, seq_len2, hidden_dim) representing the encoding of the second sentence.
mask_2 : ``torch.Tensor``
Binary Tensor of shape (batch_size, seq_len2), indicating which
positions in the second sentence are padding (0) and which are not (1).
Returns
-------
A tuple of matching vectors for the two sentences. Each of which is a list of
matching vectors of shape (batch, seq_len, num_perspectives or 1) | def forward(self,
context_1: torch.Tensor,
mask_1: torch.Tensor,
context_2: torch.Tensor,
mask_2: torch.Tensor) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
# pylint: disable=arguments-differ
"""
Given the forward (or backward) representations of sentence1 and sentence2, apply four bilateral
matching functions between them in one direction.
Parameters
----------
context_1 : ``torch.Tensor``
Tensor of shape (batch_size, seq_len1, hidden_dim) representing the encoding of the first sentence.
mask_1 : ``torch.Tensor``
Binary Tensor of shape (batch_size, seq_len1), indicating which
positions in the first sentence are padding (0) and which are not (1).
context_2 : ``torch.Tensor``
Tensor of shape (batch_size, seq_len2, hidden_dim) representing the encoding of the second sentence.
mask_2 : ``torch.Tensor``
Binary Tensor of shape (batch_size, seq_len2), indicating which
positions in the second sentence are padding (0) and which are not (1).
Returns
-------
A tuple of matching vectors for the two sentences. Each of which is a list of
matching vectors of shape (batch, seq_len, num_perspectives or 1)
"""
assert (not mask_2.requires_grad) and (not mask_1.requires_grad)
assert context_1.size(-1) == context_2.size(-1) == self.hidden_dim
# (batch,)
len_1 = get_lengths_from_binary_sequence_mask(mask_1)
len_2 = get_lengths_from_binary_sequence_mask(mask_2)
# (batch, seq_len*)
mask_1, mask_2 = mask_1.float(), mask_2.float()
# explicitly set masked weights to zero
# (batch_size, seq_len*, hidden_dim)
context_1 = context_1 * mask_1.unsqueeze(-1)
context_2 = context_2 * mask_2.unsqueeze(-1)
# array to keep the matching vectors for the two sentences
matching_vector_1: List[torch.Tensor] = []
matching_vector_2: List[torch.Tensor] = []
# Step 0. unweighted cosine
# First calculate the cosine similarities between each forward
# (or backward) contextual embedding and every forward (or backward)
# contextual embedding of the other sentence.
# (batch, seq_len1, seq_len2)
cosine_sim = F.cosine_similarity(context_1.unsqueeze(-2), context_2.unsqueeze(-3), dim=3)
# (batch, seq_len*, 1)
cosine_max_1 = masked_max(cosine_sim, mask_2.unsqueeze(-2), dim=2, keepdim=True)
cosine_mean_1 = masked_mean(cosine_sim, mask_2.unsqueeze(-2), dim=2, keepdim=True)
cosine_max_2 = masked_max(cosine_sim.permute(0, 2, 1), mask_1.unsqueeze(-2), dim=2, keepdim=True)
cosine_mean_2 = masked_mean(cosine_sim.permute(0, 2, 1), mask_1.unsqueeze(-2), dim=2, keepdim=True)
matching_vector_1.extend([cosine_max_1, cosine_mean_1])
matching_vector_2.extend([cosine_max_2, cosine_mean_2])
# Step 1. Full-Matching
# Each time step of forward (or backward) contextual embedding of one sentence
# is compared with the last time step of the forward (or backward)
# contextual embedding of the other sentence
if self.with_full_match:
# (batch, 1, hidden_dim)
if self.is_forward:
# (batch, 1, hidden_dim)
last_position_1 = (len_1 - 1).clamp(min=0)
last_position_1 = last_position_1.view(-1, 1, 1).expand(-1, 1, self.hidden_dim)
last_position_2 = (len_2 - 1).clamp(min=0)
last_position_2 = last_position_2.view(-1, 1, 1).expand(-1, 1, self.hidden_dim)
context_1_last = context_1.gather(1, last_position_1)
context_2_last = context_2.gather(1, last_position_2)
else:
context_1_last = context_1[:, 0:1, :]
context_2_last = context_2[:, 0:1, :]
# (batch, seq_len*, num_perspectives)
matching_vector_1_full = multi_perspective_match(context_1,
context_2_last,
self.full_match_weights)
matching_vector_2_full = multi_perspective_match(context_2,
context_1_last,
self.full_match_weights_reversed)
matching_vector_1.extend(matching_vector_1_full)
matching_vector_2.extend(matching_vector_2_full)
# Step 2. Maxpooling-Matching
# Each time step of forward (or backward) contextual embedding of one sentence
# is compared with every time step of the forward (or backward)
# contextual embedding of the other sentence, and only the max value of each
# dimension is retained.
if self.with_maxpool_match:
# (batch, seq_len1, seq_len2, num_perspectives)
matching_vector_max = multi_perspective_match_pairwise(context_1,
context_2,
self.maxpool_match_weights)
# (batch, seq_len*, num_perspectives)
matching_vector_1_max = masked_max(matching_vector_max,
mask_2.unsqueeze(-2).unsqueeze(-1),
dim=2)
matching_vector_1_mean = masked_mean(matching_vector_max,
mask_2.unsqueeze(-2).unsqueeze(-1),
dim=2)
matching_vector_2_max = masked_max(matching_vector_max.permute(0, 2, 1, 3),
mask_1.unsqueeze(-2).unsqueeze(-1),
dim=2)
matching_vector_2_mean = masked_mean(matching_vector_max.permute(0, 2, 1, 3),
mask_1.unsqueeze(-2).unsqueeze(-1),
dim=2)
matching_vector_1.extend([matching_vector_1_max, matching_vector_1_mean])
matching_vector_2.extend([matching_vector_2_max, matching_vector_2_mean])
# Step 3. Attentive-Matching
# Each forward (or backward) similarity is taken as the weight
# of the forward (or backward) contextual embedding, and calculate an
# attentive vector for the sentence by weighted summing all its
# contextual embeddings.
# Finally match each forward (or backward) contextual embedding
# with its corresponding attentive vector.
# (batch, seq_len1, seq_len2, hidden_dim)
att_2 = context_2.unsqueeze(-3) * cosine_sim.unsqueeze(-1)
# (batch, seq_len1, seq_len2, hidden_dim)
att_1 = context_1.unsqueeze(-2) * cosine_sim.unsqueeze(-1)
if self.with_attentive_match:
# (batch, seq_len*, hidden_dim)
att_mean_2 = masked_softmax(att_2.sum(dim=2), mask_1.unsqueeze(-1))
att_mean_1 = masked_softmax(att_1.sum(dim=1), mask_2.unsqueeze(-1))
# (batch, seq_len*, num_perspectives)
matching_vector_1_att_mean = multi_perspective_match(context_1,
att_mean_2,
self.attentive_match_weights)
matching_vector_2_att_mean = multi_perspective_match(context_2,
att_mean_1,
self.attentive_match_weights_reversed)
matching_vector_1.extend(matching_vector_1_att_mean)
matching_vector_2.extend(matching_vector_2_att_mean)
# Step 4. Max-Attentive-Matching
# Pick the contextual embeddings with the highest cosine similarity as the attentive
# vector, and match each forward (or backward) contextual embedding with its
# corresponding attentive vector.
if self.with_max_attentive_match:
# (batch, seq_len*, hidden_dim)
att_max_2 = masked_max(att_2, mask_2.unsqueeze(-2).unsqueeze(-1), dim=2)
att_max_1 = masked_max(att_1.permute(0, 2, 1, 3), mask_1.unsqueeze(-2).unsqueeze(-1), dim=2)
# (batch, seq_len*, num_perspectives)
matching_vector_1_att_max = multi_perspective_match(context_1,
att_max_2,
self.max_attentive_match_weights)
matching_vector_2_att_max = multi_perspective_match(context_2,
att_max_1,
self.max_attentive_match_weights_reversed)
matching_vector_1.extend(matching_vector_1_att_max)
matching_vector_2.extend(matching_vector_2_att_max)
return matching_vector_1, matching_vector_2 |
Training data in WikitableQuestions comes with examples in the form of lisp strings in the format:
(example (id <example-id>)
(utterance <question>)
(context (graph tables.TableKnowledgeGraph <table-filename>))
(targetValue (list (description <answer1>) (description <answer2>) ...)))
We parse such strings and return the parsed information here. | def parse_example_line(lisp_string: str) -> Dict:
"""
Training data in WikitableQuestions comes with examples in the form of lisp strings in the format:
(example (id <example-id>)
(utterance <question>)
(context (graph tables.TableKnowledgeGraph <table-filename>))
(targetValue (list (description <answer1>) (description <answer2>) ...)))
We parse such strings and return the parsed information here.
"""
id_piece, rest = lisp_string.split(') (utterance "')
example_id = id_piece.split('(id ')[1]
question, rest = rest.split('") (context (graph tables.TableKnowledgeGraph ')
table_filename, rest = rest.split(')) (targetValue (list')
target_value_strings = rest.strip().split("(description")
target_values = []
for string in target_value_strings:
string = string.replace(")", "").replace('"', '').strip()
if string != "":
target_values.append(string)
return {'id': example_id,
'question': question,
'table_filename': table_filename,
'target_values': target_values} |
Very basic model for executing friction logical forms. For now returns answer index (or
-1 if no answer can be concluded) | def execute(self, lf_raw: str) -> int:
"""
Very basic model for executing friction logical forms. For now returns answer index (or
-1 if no answer can be concluded)
"""
# Remove "a:" prefixes from attributes (hack)
logical_form = re.sub(r"\(a:", r"(", lf_raw)
parse = semparse_util.lisp_to_nested_expression(logical_form)
if len(parse) < 2:
return -1
if parse[0] == 'infer':
args = [self._exec_and(arg) for arg in parse[1:]]
if None in args:
return -1
return self._exec_infer(*args)
return -1 |
Just converts from an ``argparse.Namespace`` object to params. | def make_vocab_from_args(args: argparse.Namespace):
"""
Just converts from an ``argparse.Namespace`` object to params.
"""
parameter_path = args.param_path
overrides = args.overrides
serialization_dir = args.serialization_dir
params = Params.from_file(parameter_path, overrides)
make_vocab_from_params(params, serialization_dir) |
Given an utterance, we get the numbers that correspond to times and convert them to
values that may appear in the query. For example: convert ``7pm`` to ``1900``. | def get_times_from_utterance(utterance: str,
char_offset_to_token_index: Dict[int, int],
indices_of_approximate_words: Set[int]) -> Dict[str, List[int]]:
"""
Given an utterance, we get the numbers that correspond to times and convert them to
values that may appear in the query. For example: convert ``7pm`` to ``1900``.
"""
pm_linking_dict = _time_regex_match(r'\d+pm',
utterance,
char_offset_to_token_index,
pm_map_match_to_query_value,
indices_of_approximate_words)
am_linking_dict = _time_regex_match(r'\d+am',
utterance,
char_offset_to_token_index,
am_map_match_to_query_value,
indices_of_approximate_words)
oclock_linking_dict = _time_regex_match(r"\d+ o'clock",
utterance,
char_offset_to_token_index,
lambda match: digit_to_query_time(match.rstrip(" o'clock")),
indices_of_approximate_words)
hours_linking_dict = _time_regex_match(r"\d+ hours",
utterance,
char_offset_to_token_index,
lambda match: [int(match.rstrip(" hours"))],
indices_of_approximate_words)
times_linking_dict: Dict[str, List[int]] = defaultdict(list)
linking_dicts = [pm_linking_dict, am_linking_dict, oclock_linking_dict, hours_linking_dict]
for linking_dict in linking_dicts:
for key, value in linking_dict.items():
times_linking_dict[key].extend(value)
return times_linking_dict |
When the year is not explicitly mentioned in the utterance, the query assumes that
it is 1993 so we do the same here. If there is no mention of the month or day then
we do not return any dates from the utterance. | def get_date_from_utterance(tokenized_utterance: List[Token],
year: int = 1993) -> List[datetime]:
"""
When the year is not explicitly mentioned in the utterance, the query assumes that
it is 1993 so we do the same here. If there is no mention of the month or day then
we do not return any dates from the utterance.
"""
dates = []
utterance = ' '.join([token.text for token in tokenized_utterance])
year_result = re.findall(r'199[0-4]', utterance)
if year_result:
year = int(year_result[0])
trigrams = ngrams([token.text for token in tokenized_utterance], 3)
for month, tens, digit in trigrams:
# This will match something like ``september twenty first``.
day = ' '.join([tens, digit])
if month in MONTH_NUMBERS and day in DAY_NUMBERS:
try:
dates.append(datetime(year, MONTH_NUMBERS[month], DAY_NUMBERS[day]))
except ValueError:
print('invalid month day')
bigrams = ngrams([token.text for token in tokenized_utterance], 2)
for month, day in bigrams:
if month in MONTH_NUMBERS and day in DAY_NUMBERS:
# This will match something like ``september first``.
try:
dates.append(datetime(year, MONTH_NUMBERS[month], DAY_NUMBERS[day]))
except ValueError:
print('invalid month day')
fivegrams = ngrams([token.text for token in tokenized_utterance], 5)
for tens, digit, _, year_match, month in fivegrams:
# This will match something like ``twenty first of 1993 july``.
day = ' '.join([tens, digit])
if month in MONTH_NUMBERS and day in DAY_NUMBERS and year_match.isdigit():
try:
dates.append(datetime(int(year_match), MONTH_NUMBERS[month], DAY_NUMBERS[day]))
except ValueError:
print('invalid month day')
if month in MONTH_NUMBERS and digit in DAY_NUMBERS and year_match.isdigit():
try:
dates.append(datetime(int(year_match), MONTH_NUMBERS[month], DAY_NUMBERS[digit]))
except ValueError:
print('invalid month day')
return dates |
Given an utterance, this function finds all the numbers that are in the action space. Since we need to
keep track of linking scores, we represent the numbers as a dictionary, where the keys are the string
representation of the number and the values are lists of the token indices that triggers that number. | def get_numbers_from_utterance(utterance: str, tokenized_utterance: List[Token]) -> Dict[str, List[int]]:
"""
Given an utterance, this function finds all the numbers that are in the action space. Since we need to
keep track of linking scores, we represent the numbers as a dictionary, where the keys are the string
representation of the number and the values are lists of the token indices that triggers that number.
"""
# When we use a regex to find numbers or strings, we need a mapping from
# the character to which token triggered it.
char_offset_to_token_index = {token.idx : token_index
for token_index, token in enumerate(tokenized_utterance)}
# We want to look up later for each time whether it appears after a word
# such as "about" or "approximately".
indices_of_approximate_words = {index for index, token in enumerate(tokenized_utterance)
if token.text in APPROX_WORDS}
indices_of_words_preceding_time = {index for index, token in enumerate(tokenized_utterance)
if token.text in WORDS_PRECEDING_TIME}
indices_of_am_pm = {index for index, token in enumerate(tokenized_utterance)
if token.text in {'am', 'pm'}}
number_linking_dict: Dict[str, List[int]] = defaultdict(list)
for token_index, token in enumerate(tokenized_utterance):
if token.text.isdigit():
if token_index - 1 in indices_of_words_preceding_time and token_index + 1 not in indices_of_am_pm:
for time in digit_to_query_time(token.text):
number_linking_dict[str(time)].append(token_index)
times_linking_dict = get_times_from_utterance(utterance,
char_offset_to_token_index,
indices_of_approximate_words)
for key, value in times_linking_dict.items():
number_linking_dict[key].extend(value)
for index, token in enumerate(tokenized_utterance):
for number in NUMBER_TRIGGER_DICT.get(token.text, []):
if index - 1 in indices_of_approximate_words:
for approx_time in get_approximate_times([int(number)]):
number_linking_dict[str(approx_time)].append(index)
else:
number_linking_dict[number].append(index)
return number_linking_dict |
Given a digit in the utterance, return a list of the times that it corresponds to. | def digit_to_query_time(digit: str) -> List[int]:
"""
Given a digit in the utterance, return a list of the times that it corresponds to.
"""
if len(digit) > 2:
return [int(digit), int(digit) + TWELVE_TO_TWENTY_FOUR]
elif int(digit) % 12 == 0:
return [0, 1200, 2400]
return [int(digit) * HOUR_TO_TWENTY_FOUR,
(int(digit) * HOUR_TO_TWENTY_FOUR + TWELVE_TO_TWENTY_FOUR) % HOURS_IN_DAY] |
Given a list of times that follow a word such as ``about``,
we return a list of times that could appear in the query as a result
of this. For example if ``about 7pm`` appears in the utterance, then
we also want to add ``1830`` and ``1930``. | def get_approximate_times(times: List[int]) -> List[int]:
"""
Given a list of times that follow a word such as ``about``,
we return a list of times that could appear in the query as a result
of this. For example if ``about 7pm`` appears in the utterance, then
we also want to add ``1830`` and ``1930``.
"""
approximate_times = []
for time in times:
hour = int(time/HOUR_TO_TWENTY_FOUR) % 24
minute = time % HOUR_TO_TWENTY_FOUR
approximate_time = datetime.now()
approximate_time = approximate_time.replace(hour=hour, minute=minute)
start_time_range = approximate_time - timedelta(minutes=30)
end_time_range = approximate_time + timedelta(minutes=30)
approximate_times.extend([start_time_range.hour * HOUR_TO_TWENTY_FOUR + start_time_range.minute,
end_time_range.hour * HOUR_TO_TWENTY_FOUR + end_time_range.minute])
return approximate_times |
r"""
Given a regex for matching times in the utterance, we want to convert the matches
to the values that appear in the query and token indices they correspond to.
``char_offset_to_token_index`` is a dictionary that maps from the character offset to
the token index, we use this to look up what token a regex match corresponds to.
``indices_of_approximate_words`` are the token indices of the words such as ``about`` or
``approximately``. We use this to check if a regex match is preceded by one of these words.
If it is, we also want to add the times that define this approximate time range.
``map_match_to_query_value`` is a function that converts the regex matches to the
values that appear in the query. For example, we may pass in a regex such as ``\d+pm``
that matches times such as ``7pm``. ``map_match_to_query_value`` would be a function that
takes ``7pm`` as input and returns ``1900``. | def _time_regex_match(regex: str,
utterance: str,
char_offset_to_token_index: Dict[int, int],
map_match_to_query_value: Callable[[str], List[int]],
indices_of_approximate_words: Set[int]) -> Dict[str, List[int]]:
r"""
Given a regex for matching times in the utterance, we want to convert the matches
to the values that appear in the query and token indices they correspond to.
``char_offset_to_token_index`` is a dictionary that maps from the character offset to
the token index, we use this to look up what token a regex match corresponds to.
``indices_of_approximate_words`` are the token indices of the words such as ``about`` or
``approximately``. We use this to check if a regex match is preceded by one of these words.
If it is, we also want to add the times that define this approximate time range.
``map_match_to_query_value`` is a function that converts the regex matches to the
values that appear in the query. For example, we may pass in a regex such as ``\d+pm``
that matches times such as ``7pm``. ``map_match_to_query_value`` would be a function that
takes ``7pm`` as input and returns ``1900``.
"""
linking_scores_dict: Dict[str, List[int]] = defaultdict(list)
number_regex = re.compile(regex)
for match in number_regex.finditer(utterance):
query_values = map_match_to_query_value(match.group())
# If the time appears after a word like ``about`` then we also add
# the times that mark the start and end of the allowed range.
approximate_times = []
if char_offset_to_token_index.get(match.start(), 0) - 1 in indices_of_approximate_words:
approximate_times.extend(get_approximate_times(query_values))
query_values.extend(approximate_times)
if match.start() in char_offset_to_token_index:
for query_value in query_values:
linking_scores_dict[str(query_value)].extend([char_offset_to_token_index[match.start()],
char_offset_to_token_index[match.start()] + 1])
return linking_scores_dict |
We evaluate here whether the predicted query and the query label evaluate to the
exact same table. This method is only called by the subprocess, so we just exit with
1 if it is correct and 0 otherwise. | def _evaluate_sql_query_subprocess(self, predicted_query: str, sql_query_labels: List[str]) -> int:
"""
We evaluate here whether the predicted query and the query label evaluate to the
exact same table. This method is only called by the subprocess, so we just exit with
1 if it is correct and 0 otherwise.
"""
postprocessed_predicted_query = self.postprocess_query_sqlite(predicted_query)
try:
self._cursor.execute(postprocessed_predicted_query)
predicted_rows = self._cursor.fetchall()
except sqlite3.Error as error:
logger.warning(f'Error executing predicted: {error}')
exit(0)
# If predicted table matches any of the reference tables then it is counted as correct.
target_rows = None
for sql_query_label in sql_query_labels:
postprocessed_sql_query_label = self.postprocess_query_sqlite(sql_query_label)
try:
self._cursor.execute(postprocessed_sql_query_label)
target_rows = self._cursor.fetchall()
except sqlite3.Error as error:
logger.warning(f'Error executing predicted: {error}')
if predicted_rows == target_rows:
exit(1)
exit(0) |
Formats a dictionary of production rules into the string format expected
by the Parsimonious Grammar class. | def format_grammar_string(grammar_dictionary: Dict[str, List[str]]) -> str:
"""
Formats a dictionary of production rules into the string format expected
by the Parsimonious Grammar class.
"""
grammar_string = '\n'.join([f"{nonterminal} = {' / '.join(right_hand_side)}"
for nonterminal, right_hand_side in grammar_dictionary.items()])
return grammar_string.replace("\\", "\\\\") |
We initialize the valid actions with the global actions. These include the
valid actions that result from the grammar and also those that result from
the tables provided. The keys represent the nonterminals in the grammar
and the values are lists of the valid actions of that nonterminal. | def initialize_valid_actions(grammar: Grammar,
keywords_to_uppercase: List[str] = None) -> Dict[str, List[str]]:
"""
We initialize the valid actions with the global actions. These include the
valid actions that result from the grammar and also those that result from
the tables provided. The keys represent the nonterminals in the grammar
and the values are lists of the valid actions of that nonterminal.
"""
valid_actions: Dict[str, Set[str]] = defaultdict(set)
for key in grammar:
rhs = grammar[key]
# Sequence represents a series of expressions that match pieces of the text in order.
# Eg. A -> B C
if isinstance(rhs, Sequence):
valid_actions[key].add(format_action(key, " ".join(rhs._unicode_members()), # pylint: disable=protected-access
keywords_to_uppercase=keywords_to_uppercase))
# OneOf represents a series of expressions, one of which matches the text.
# Eg. A -> B / C
elif isinstance(rhs, OneOf):
for option in rhs._unicode_members(): # pylint: disable=protected-access
valid_actions[key].add(format_action(key, option,
keywords_to_uppercase=keywords_to_uppercase))
# A string literal, eg. "A"
elif isinstance(rhs, Literal):
if rhs.literal != "":
valid_actions[key].add(format_action(key, repr(rhs.literal),
keywords_to_uppercase=keywords_to_uppercase))
else:
valid_actions[key] = set()
valid_action_strings = {key: sorted(value) for key, value in valid_actions.items()}
return valid_action_strings |
This function formats an action as it appears in models. It
splits productions based on the special `ws` and `wsp` rules,
which are used in grammars to denote whitespace, and then
rejoins these tokens a formatted, comma separated list.
Importantly, note that it `does not` split on spaces in
the grammar string, because these might not correspond
to spaces in the language the grammar recognises.
Parameters
----------
nonterminal : ``str``, required.
The nonterminal in the action.
right_hand_side : ``str``, required.
The right hand side of the action
(i.e the thing which is produced).
is_string : ``bool``, optional (default = False).
Whether the production produces a string.
If it does, it is formatted as ``nonterminal -> ['string']``
is_number : ``bool``, optional, (default = False).
Whether the production produces a string.
If it does, it is formatted as ``nonterminal -> ['number']``
keywords_to_uppercase: ``List[str]``, optional, (default = None)
Keywords in the grammar to uppercase. In the case of sql,
this might be SELECT, MAX etc. | def format_action(nonterminal: str,
right_hand_side: str,
is_string: bool = False,
is_number: bool = False,
keywords_to_uppercase: List[str] = None) -> str:
"""
This function formats an action as it appears in models. It
splits productions based on the special `ws` and `wsp` rules,
which are used in grammars to denote whitespace, and then
rejoins these tokens a formatted, comma separated list.
Importantly, note that it `does not` split on spaces in
the grammar string, because these might not correspond
to spaces in the language the grammar recognises.
Parameters
----------
nonterminal : ``str``, required.
The nonterminal in the action.
right_hand_side : ``str``, required.
The right hand side of the action
(i.e the thing which is produced).
is_string : ``bool``, optional (default = False).
Whether the production produces a string.
If it does, it is formatted as ``nonterminal -> ['string']``
is_number : ``bool``, optional, (default = False).
Whether the production produces a string.
If it does, it is formatted as ``nonterminal -> ['number']``
keywords_to_uppercase: ``List[str]``, optional, (default = None)
Keywords in the grammar to uppercase. In the case of sql,
this might be SELECT, MAX etc.
"""
keywords_to_uppercase = keywords_to_uppercase or []
if right_hand_side.upper() in keywords_to_uppercase:
right_hand_side = right_hand_side.upper()
if is_string:
return f'{nonterminal} -> ["\'{right_hand_side}\'"]'
elif is_number:
return f'{nonterminal} -> ["{right_hand_side}"]'
else:
right_hand_side = right_hand_side.lstrip("(").rstrip(")")
child_strings = [token for token in WHITESPACE_REGEX.split(right_hand_side) if token]
child_strings = [tok.upper() if tok.upper() in keywords_to_uppercase else tok for tok in child_strings]
return f"{nonterminal} -> [{', '.join(child_strings)}]" |
For each node, we accumulate the rules that generated its children in a list. | def add_action(self, node: Node) -> None:
"""
For each node, we accumulate the rules that generated its children in a list.
"""
if node.expr.name and node.expr.name not in ['ws', 'wsp']:
nonterminal = f'{node.expr.name} -> '
if isinstance(node.expr, Literal):
right_hand_side = f'["{node.text}"]'
else:
child_strings = []
for child in node.__iter__():
if child.expr.name in ['ws', 'wsp']:
continue
if child.expr.name != '':
child_strings.append(child.expr.name)
else:
child_right_side_string = child.expr._as_rhs().lstrip("(").rstrip(")") # pylint: disable=protected-access
child_right_side_list = [tok for tok in
WHITESPACE_REGEX.split(child_right_side_string) if tok]
child_right_side_list = [tok.upper() if tok.upper() in
self.keywords_to_uppercase else tok
for tok in child_right_side_list]
child_strings.extend(child_right_side_list)
right_hand_side = "[" + ", ".join(child_strings) + "]"
rule = nonterminal + right_hand_side
self.action_sequence = [rule] + self.action_sequence |
See the ``NodeVisitor`` visit method. This just changes the order in which
we visit nonterminals from right to left to left to right. | def visit(self, node):
"""
See the ``NodeVisitor`` visit method. This just changes the order in which
we visit nonterminals from right to left to left to right.
"""
method = getattr(self, 'visit_' + node.expr_name, self.generic_visit)
# Call that method, and show where in the tree it failed if it blows
# up.
try:
# Changing this to reverse here!
return method(node, [self.visit(child) for child in reversed(list(node))])
except (VisitationError, UndefinedLabel):
# Don't catch and re-wrap already-wrapped exceptions.
raise
except self.unwrapped_exceptions:
raise
except Exception: # pylint: disable=broad-except
# Catch any exception, and tack on a parse tree so it's easier to
# see where it went wrong.
exc_class, exc, traceback = exc_info()
reraise(VisitationError, VisitationError(exc, exc_class, node), traceback) |
Parameters
----------
input_ids : ``torch.LongTensor``
The (batch_size, ..., max_sequence_length) tensor of wordpiece ids.
offsets : ``torch.LongTensor``, optional
The BERT embeddings are one per wordpiece. However it's possible/likely
you might want one per original token. In that case, ``offsets``
represents the indices of the desired wordpiece for each original token.
Depending on how your token indexer is configured, this could be the
position of the last wordpiece for each token, or it could be the position
of the first wordpiece for each token.
For example, if you had the sentence "Definitely not", and if the corresponding
wordpieces were ["Def", "##in", "##ite", "##ly", "not"], then the input_ids
would be 5 wordpiece ids, and the "last wordpiece" offsets would be [3, 4].
If offsets are provided, the returned tensor will contain only the wordpiece
embeddings at those positions, and (in particular) will contain one embedding
per token. If offsets are not provided, the entire tensor of wordpiece embeddings
will be returned.
token_type_ids : ``torch.LongTensor``, optional
If an input consists of two sentences (as in the BERT paper),
tokens from the first sentence should have type 0 and tokens from
the second sentence should have type 1. If you don't provide this
(the default BertIndexer doesn't) then it's assumed to be all 0s. | def forward(self,
input_ids: torch.LongTensor,
offsets: torch.LongTensor = None,
token_type_ids: torch.LongTensor = None) -> torch.Tensor:
"""
Parameters
----------
input_ids : ``torch.LongTensor``
The (batch_size, ..., max_sequence_length) tensor of wordpiece ids.
offsets : ``torch.LongTensor``, optional
The BERT embeddings are one per wordpiece. However it's possible/likely
you might want one per original token. In that case, ``offsets``
represents the indices of the desired wordpiece for each original token.
Depending on how your token indexer is configured, this could be the
position of the last wordpiece for each token, or it could be the position
of the first wordpiece for each token.
For example, if you had the sentence "Definitely not", and if the corresponding
wordpieces were ["Def", "##in", "##ite", "##ly", "not"], then the input_ids
would be 5 wordpiece ids, and the "last wordpiece" offsets would be [3, 4].
If offsets are provided, the returned tensor will contain only the wordpiece
embeddings at those positions, and (in particular) will contain one embedding
per token. If offsets are not provided, the entire tensor of wordpiece embeddings
will be returned.
token_type_ids : ``torch.LongTensor``, optional
If an input consists of two sentences (as in the BERT paper),
tokens from the first sentence should have type 0 and tokens from
the second sentence should have type 1. If you don't provide this
(the default BertIndexer doesn't) then it's assumed to be all 0s.
"""
# pylint: disable=arguments-differ
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
input_mask = (input_ids != 0).long()
# input_ids may have extra dimensions, so we reshape down to 2-d
# before calling the BERT model and then reshape back at the end.
all_encoder_layers, _ = self.bert_model(input_ids=util.combine_initial_dims(input_ids),
token_type_ids=util.combine_initial_dims(token_type_ids),
attention_mask=util.combine_initial_dims(input_mask))
if self._scalar_mix is not None:
mix = self._scalar_mix(all_encoder_layers, input_mask)
else:
mix = all_encoder_layers[-1]
# At this point, mix is (batch_size * d1 * ... * dn, sequence_length, embedding_dim)
if offsets is None:
# Resize to (batch_size, d1, ..., dn, sequence_length, embedding_dim)
return util.uncombine_initial_dims(mix, input_ids.size())
else:
# offsets is (batch_size, d1, ..., dn, orig_sequence_length)
offsets2d = util.combine_initial_dims(offsets)
# now offsets is (batch_size * d1 * ... * dn, orig_sequence_length)
range_vector = util.get_range_vector(offsets2d.size(0),
device=util.get_device_of(mix)).unsqueeze(1)
# selected embeddings is also (batch_size * d1 * ... * dn, orig_sequence_length)
selected_embeddings = mix[range_vector, offsets2d]
return util.uncombine_initial_dims(selected_embeddings, offsets.size()) |
SQL is a predominately variable free language in terms of simple usage, in the
sense that most queries do not create references to variables which are not
already static tables in a dataset. However, it is possible to do this via
derived tables. If we don't require this functionality, we can tighten the
grammar, because we don't need to support aliased tables. | def update_grammar_to_be_variable_free(grammar_dictionary: Dict[str, List[str]]):
"""
SQL is a predominately variable free language in terms of simple usage, in the
sense that most queries do not create references to variables which are not
already static tables in a dataset. However, it is possible to do this via
derived tables. If we don't require this functionality, we can tighten the
grammar, because we don't need to support aliased tables.
"""
# Tables in variable free grammars cannot be aliased, so we
# remove this functionality from the grammar.
grammar_dictionary["select_result"] = ['"*"', '(table_name ws ".*")', 'expr']
# Similarly, collapse the definition of a source table
# to not contain aliases and modify references to subqueries.
grammar_dictionary["single_source"] = ['table_name', '("(" ws query ws ")")']
del grammar_dictionary["source_subq"]
del grammar_dictionary["source_table"]
grammar_dictionary["expr"] = ['in_expr',
'(value wsp "LIKE" wsp string)',
'(value ws "BETWEEN" wsp value ws "AND" wsp value)',
'(value ws binaryop wsp expr)',
'(unaryop ws expr)',
'(col_ref ws "IS" ws "NOT" ws "NULL")',
'(col_ref ws "IS" ws "NULL")',
# This used to be source_subq - now
# we don't need aliases, we can colapse it to queries.
'("(" ws query ws ")")',
'value']
# Finally, remove the ability to reference an arbitrary name,
# because now we don't have aliased tables, we don't need
# to recognise new variables.
del grammar_dictionary["name"] |
Variables can be treated as numbers or strings if their type can be inferred -
however, that can be difficult, so instead, we can just treat them all as values
and be a bit looser on the typing we allow in our grammar. Here we just remove
all references to number and string from the grammar, replacing them with value. | def update_grammar_with_untyped_entities(grammar_dictionary: Dict[str, List[str]]) -> None:
"""
Variables can be treated as numbers or strings if their type can be inferred -
however, that can be difficult, so instead, we can just treat them all as values
and be a bit looser on the typing we allow in our grammar. Here we just remove
all references to number and string from the grammar, replacing them with value.
"""
grammar_dictionary["string_set_vals"] = ['(value ws "," ws string_set_vals)', 'value']
grammar_dictionary["value"].remove('string')
grammar_dictionary["value"].remove('number')
grammar_dictionary["limit"] = ['("LIMIT" ws "1")', '("LIMIT" ws value)']
grammar_dictionary["expr"][1] = '(value wsp "LIKE" wsp value)'
del grammar_dictionary["string"]
del grammar_dictionary["number"] |
Ensembles don't have vocabularies or weights of their own, so they override _load. | def _load(cls,
config: Params,
serialization_dir: str,
weights_file: str = None,
cuda_device: int = -1) -> 'Model':
"""
Ensembles don't have vocabularies or weights of their own, so they override _load.
"""
model_params = config.get('model')
# The experiment config tells us how to _train_ a model, including where to get pre-trained
# embeddings from. We're now _loading_ the model, so those embeddings will already be
# stored in our weights. We don't need any pretrained weight file anymore, and we don't
# want the code to look for it, so we remove it from the parameters here.
remove_pretrained_embedding_params(model_params)
model = Model.from_params(vocab=None, params=model_params)
# Force model to cpu or gpu, as appropriate, to make sure that the embeddings are
# in sync with the weights
if cuda_device >= 0:
model.cuda(cuda_device)
else:
model.cpu()
return model |
Apply text standardization following original implementation. | def text_standardize(text):
"""
Apply text standardization following original implementation.
"""
text = text.replace('—', '-')
text = text.replace('–', '-')
text = text.replace('―', '-')
text = text.replace('…', '...')
text = text.replace('´', "'")
text = re.sub(r'''(-+|~+|!+|"+|;+|\?+|\++|,+|\)+|\(+|\\+|\/+|\*+|\[+|\]+|}+|{+|\|+|_+)''', r' \1 ', text)
text = re.sub(r'\s*\n\s*', ' \n ', text)
text = re.sub(r'[^\S\n]+', ' ', text)
return text.strip() |
The :mod:`~allennlp.run` command only knows about the registered classes in the ``allennlp``
codebase. In particular, once you start creating your own ``Model`` s and so forth, it won't
work for them, unless you use the ``--include-package`` flag. | def main(prog: str = None,
subcommand_overrides: Dict[str, Subcommand] = {}) -> None:
"""
The :mod:`~allennlp.run` command only knows about the registered classes in the ``allennlp``
codebase. In particular, once you start creating your own ``Model`` s and so forth, it won't
work for them, unless you use the ``--include-package`` flag.
"""
# pylint: disable=dangerous-default-value
parser = ArgumentParserWithDefaults(description="Run AllenNLP", usage='%(prog)s', prog=prog)
parser.add_argument('--version', action='version', version='%(prog)s ' + __version__)
subparsers = parser.add_subparsers(title='Commands', metavar='')
subcommands = {
# Default commands
"configure": Configure(),
"train": Train(),
"evaluate": Evaluate(),
"predict": Predict(),
"make-vocab": MakeVocab(),
"elmo": Elmo(),
"fine-tune": FineTune(),
"dry-run": DryRun(),
"test-install": TestInstall(),
"find-lr": FindLearningRate(),
"print-results": PrintResults(),
# Superseded by overrides
**subcommand_overrides
}
for name, subcommand in subcommands.items():
subparser = subcommand.add_subparser(name, subparsers)
# configure doesn't need include-package because it imports
# whatever classes it needs.
if name != "configure":
subparser.add_argument('--include-package',
type=str,
action='append',
default=[],
help='additional packages to include')
args = parser.parse_args()
# If a subparser is triggered, it adds its work as `args.func`.
# So if no such attribute has been added, no subparser was triggered,
# so give the user some help.
if 'func' in dir(args):
# Import any additional modules needed (to register custom classes).
for package_name in getattr(args, 'include_package', ()):
import_submodules(package_name)
args.func(args)
else:
parser.print_help() |
The ``TextField`` has a list of ``Tokens``, and each ``Token`` gets converted into arrays by
(potentially) several ``TokenIndexers``. This method gets the max length (over tokens)
associated with each of these arrays. | def get_padding_lengths(self) -> Dict[str, int]:
"""
The ``TextField`` has a list of ``Tokens``, and each ``Token`` gets converted into arrays by
(potentially) several ``TokenIndexers``. This method gets the max length (over tokens)
associated with each of these arrays.
"""
# Our basic outline: we will iterate over `TokenIndexers`, and aggregate lengths over tokens
# for each indexer separately. Then we will combine the results for each indexer into a single
# dictionary, resolving any (unlikely) key conflicts by taking a max.
lengths = []
if self._indexed_tokens is None:
raise ConfigurationError("You must call .index(vocabulary) on a "
"field before determining padding lengths.")
# Each indexer can return a different sequence length, and for indexers that return
# multiple arrays each can have a different length. We'll keep track of them here.
for indexer_name, indexer in self._token_indexers.items():
indexer_lengths = {}
for indexed_tokens_key in self._indexer_name_to_indexed_token[indexer_name]:
# This is a list of dicts, one for each token in the field.
token_lengths = [indexer.get_padding_lengths(token)
for token in self._indexed_tokens[indexed_tokens_key]]
if not token_lengths:
# This is a padding edge case and occurs when we want to pad a ListField of
# TextFields. In order to pad the list field, we need to be able to have an
# _empty_ TextField, but if this is the case, token_lengths will be an empty
# list, so we add the default empty padding dictionary to the list instead.
token_lengths = [{}]
# Iterate over the keys and find the maximum token length.
# It's fine to iterate over the keys of the first token since all tokens have the same keys.
for key in token_lengths[0]:
indexer_lengths[key] = max(x[key] if key in x else 0 for x in token_lengths)
lengths.append(indexer_lengths)
padding_lengths = {}
num_tokens = set()
for indexer_name, token_list in self._indexed_tokens.items():
padding_lengths[f"{indexer_name}_length"] = len(token_list)
num_tokens.add(len(token_list))
# We don't actually use this for padding anywhere, but we used to. We add this key back in
# so that older configs still work if they sorted by this key in a BucketIterator. Taking
# the max of all of these should be fine for that purpose.
padding_lengths['num_tokens'] = max(num_tokens)
# Get all keys which have been used for padding for each indexer and take the max if there are duplicates.
padding_keys = {key for d in lengths for key in d.keys()}
for padding_key in padding_keys:
padding_lengths[padding_key] = max(x[padding_key] if padding_key in x else 0 for x in lengths)
return padding_lengths |
Creates ELMo word representations from a vocabulary file. These
word representations are _independent_ - they are the result of running
the CNN and Highway layers of the ELMo model, but not the Bidirectional LSTM.
ELMo requires 2 additional tokens: <S> and </S>. The first token
in this file is assumed to be an unknown token.
This script produces two artifacts: A new vocabulary file
with the <S> and </S> tokens inserted and a glove formatted embedding
file containing word : vector pairs, one per line, with all values
separated by a space. | def main(vocab_path: str,
elmo_config_path: str,
elmo_weights_path: str,
output_dir: str,
batch_size: int,
device: int,
use_custom_oov_token: bool = False):
"""
Creates ELMo word representations from a vocabulary file. These
word representations are _independent_ - they are the result of running
the CNN and Highway layers of the ELMo model, but not the Bidirectional LSTM.
ELMo requires 2 additional tokens: <S> and </S>. The first token
in this file is assumed to be an unknown token.
This script produces two artifacts: A new vocabulary file
with the <S> and </S> tokens inserted and a glove formatted embedding
file containing word : vector pairs, one per line, with all values
separated by a space.
"""
# Load the vocabulary words and convert to char ids
with open(vocab_path, 'r') as vocab_file:
tokens = vocab_file.read().strip().split('\n')
# Insert the sentence boundary tokens which elmo uses at positions 1 and 2.
if tokens[0] != DEFAULT_OOV_TOKEN and not use_custom_oov_token:
raise ConfigurationError("ELMo embeddings require the use of a OOV token.")
tokens = [tokens[0]] + ["<S>", "</S>"] + tokens[1:]
indexer = ELMoTokenCharactersIndexer()
indices = indexer.tokens_to_indices([Token(token) for token in tokens], Vocabulary(), "indices")["indices"]
sentences = []
for k in range((len(indices) // 50) + 1):
sentences.append(indexer.pad_token_sequence(indices[(k * 50):((k + 1) * 50)],
desired_num_tokens=50,
padding_lengths={}))
last_batch_remainder = 50 - (len(indices) % 50)
if device != -1:
elmo_token_embedder = _ElmoCharacterEncoder(elmo_config_path,
elmo_weights_path).cuda(device)
else:
elmo_token_embedder = _ElmoCharacterEncoder(elmo_config_path,
elmo_weights_path)
all_embeddings = []
for i in range((len(sentences) // batch_size) + 1):
array = numpy.array(sentences[i * batch_size: (i + 1) * batch_size])
if device != -1:
batch = torch.from_numpy(array).cuda(device)
else:
batch = torch.from_numpy(array)
token_embedding = elmo_token_embedder(batch)['token_embedding'].data
# Reshape back to a list of words of shape (batch_size * 50, encoding_dim)
# We also need to remove the <S>, </S> tokens appended by the encoder.
per_word_embeddings = token_embedding[:, 1:-1, :].contiguous().view(-1, token_embedding.size(-1))
all_embeddings.append(per_word_embeddings)
# Remove the embeddings associated with padding in the last batch.
all_embeddings[-1] = all_embeddings[-1][:-last_batch_remainder, :]
embedding_weight = torch.cat(all_embeddings, 0).cpu().numpy()
# Write out the embedding in a glove format.
os.makedirs(output_dir, exist_ok=True)
with gzip.open(os.path.join(output_dir, "elmo_embeddings.txt.gz"), 'wb') as embeddings_file:
for i, word in enumerate(tokens):
string_array = " ".join([str(x) for x in list(embedding_weight[i, :])])
embeddings_file.write(f"{word} {string_array}\n".encode('utf-8'))
# Write out the new vocab with the <S> and </S> tokens.
_, vocab_file_name = os.path.split(vocab_path)
with open(os.path.join(output_dir, vocab_file_name), "w") as new_vocab_file:
for word in tokens:
new_vocab_file.write(f"{word}\n") |
Sorts the instances by their padding lengths, using the keys in
``sorting_keys`` (in the order in which they are provided). ``sorting_keys`` is a list of
``(field_name, padding_key)`` tuples. | def sort_by_padding(instances: List[Instance],
sorting_keys: List[Tuple[str, str]], # pylint: disable=invalid-sequence-index
vocab: Vocabulary,
padding_noise: float = 0.0) -> List[Instance]:
"""
Sorts the instances by their padding lengths, using the keys in
``sorting_keys`` (in the order in which they are provided). ``sorting_keys`` is a list of
``(field_name, padding_key)`` tuples.
"""
instances_with_lengths = []
for instance in instances:
# Make sure instance is indexed before calling .get_padding
instance.index_fields(vocab)
padding_lengths = cast(Dict[str, Dict[str, float]], instance.get_padding_lengths())
if padding_noise > 0.0:
noisy_lengths = {}
for field_name, field_lengths in padding_lengths.items():
noisy_lengths[field_name] = add_noise_to_dict_values(field_lengths, padding_noise)
padding_lengths = noisy_lengths
instance_with_lengths = ([padding_lengths[field_name][padding_key]
for (field_name, padding_key) in sorting_keys],
instance)
instances_with_lengths.append(instance_with_lengths)
instances_with_lengths.sort(key=lambda x: x[0])
return [instance_with_lengths[-1] for instance_with_lengths in instances_with_lengths] |
Take the question and check if it is compatible with either of the answer choices. | def infer(self, setup: QuaRelType, answer_0: QuaRelType, answer_1: QuaRelType) -> int:
"""
Take the question and check if it is compatible with either of the answer choices.
"""
if self._check_quarels_compatible(setup, answer_0):
if self._check_quarels_compatible(setup, answer_1):
# Found two answers
return -2
else:
return 0
elif self._check_quarels_compatible(setup, answer_1):
return 1
else:
return -1 |
Returns bare bones HTML for serving up an input form with the
specified fields that can render predictions from the configured model. | def _html(title: str, field_names: List[str]) -> str:
"""
Returns bare bones HTML for serving up an input form with the
specified fields that can render predictions from the configured model.
"""
inputs = ''.join(_SINGLE_INPUT_TEMPLATE.substitute(field_name=field_name)
for field_name in field_names)
quoted_field_names = [f"'{field_name}'" for field_name in field_names]
quoted_field_list = f"[{','.join(quoted_field_names)}]"
return _PAGE_TEMPLATE.substitute(title=title,
css=_CSS,
inputs=inputs,
qfl=quoted_field_list) |
Creates a Flask app that serves up the provided ``Predictor``
along with a front-end for interacting with it.
If you want to use the built-in bare-bones HTML, you must provide the
field names for the inputs (which will be used both as labels
and as the keys in the JSON that gets sent to the predictor).
If you would rather create your own HTML, call it index.html
and provide its directory as ``static_dir``. In that case you
don't need to supply the field names -- that information should
be implicit in your demo site. (Probably the easiest thing to do
is just start with the bare-bones HTML and modify it.)
In addition, if you want somehow transform the JSON prediction
(e.g. by removing probabilities or logits)
you can do that by passing in a ``sanitizer`` function. | def make_app(predictor: Predictor,
field_names: List[str] = None,
static_dir: str = None,
sanitizer: Callable[[JsonDict], JsonDict] = None,
title: str = "AllenNLP Demo") -> Flask:
"""
Creates a Flask app that serves up the provided ``Predictor``
along with a front-end for interacting with it.
If you want to use the built-in bare-bones HTML, you must provide the
field names for the inputs (which will be used both as labels
and as the keys in the JSON that gets sent to the predictor).
If you would rather create your own HTML, call it index.html
and provide its directory as ``static_dir``. In that case you
don't need to supply the field names -- that information should
be implicit in your demo site. (Probably the easiest thing to do
is just start with the bare-bones HTML and modify it.)
In addition, if you want somehow transform the JSON prediction
(e.g. by removing probabilities or logits)
you can do that by passing in a ``sanitizer`` function.
"""
if static_dir is not None:
static_dir = os.path.abspath(static_dir)
if not os.path.exists(static_dir):
logger.error("app directory %s does not exist, aborting", static_dir)
sys.exit(-1)
elif static_dir is None and field_names is None:
print("Neither build_dir nor field_names passed. Demo won't render on this port.\n"
"You must use nodejs + react app to interact with the server.")
app = Flask(__name__) # pylint: disable=invalid-name
@app.errorhandler(ServerError)
def handle_invalid_usage(error: ServerError) -> Response: # pylint: disable=unused-variable
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.route('/')
def index() -> Response: # pylint: disable=unused-variable
if static_dir is not None:
return send_file(os.path.join(static_dir, 'index.html'))
else:
html = _html(title, field_names)
return Response(response=html, status=200)
@app.route('/predict', methods=['POST', 'OPTIONS'])
def predict() -> Response: # pylint: disable=unused-variable
"""make a prediction using the specified model and return the results"""
if request.method == "OPTIONS":
return Response(response="", status=200)
data = request.get_json()
prediction = predictor.predict_json(data)
if sanitizer is not None:
prediction = sanitizer(prediction)
log_blob = {"inputs": data, "outputs": prediction}
logger.info("prediction: %s", json.dumps(log_blob))
return jsonify(prediction)
@app.route('/predict_batch', methods=['POST', 'OPTIONS'])
def predict_batch() -> Response: # pylint: disable=unused-variable
"""make a prediction using the specified model and return the results"""
if request.method == "OPTIONS":
return Response(response="", status=200)
data = request.get_json()
prediction = predictor.predict_batch_json(data)
if sanitizer is not None:
prediction = [sanitizer(p) for p in prediction]
return jsonify(prediction)
@app.route('/<path:path>')
def static_proxy(path: str) -> Response: # pylint: disable=unused-variable
if static_dir is not None:
return send_from_directory(static_dir, path)
else:
raise ServerError("static_dir not specified", 404)
return app |
Returns the valid actions in the current grammar state. See the class docstring for a
description of what we're returning here. | def get_valid_actions(self) -> Dict[str, Tuple[torch.Tensor, torch.Tensor, List[int]]]:
"""
Returns the valid actions in the current grammar state. See the class docstring for a
description of what we're returning here.
"""
actions = self._valid_actions[self._nonterminal_stack[-1]]
context_actions = []
for type_, variable in self._lambda_stacks:
if self._nonterminal_stack[-1] == type_:
production_string = f"{type_} -> {variable}"
context_actions.append(self._context_actions[production_string])
if context_actions:
input_tensor, output_tensor, action_ids = actions['global']
new_inputs = [input_tensor] + [x[0] for x in context_actions]
input_tensor = torch.cat(new_inputs, dim=0)
new_outputs = [output_tensor] + [x[1] for x in context_actions]
output_tensor = torch.cat(new_outputs, dim=0)
new_action_ids = action_ids + [x[2] for x in context_actions]
# We can't just reassign to actions['global'], because that would modify the state of
# self._valid_actions. Instead, we need to construct a new actions dictionary.
new_actions = {**actions}
new_actions['global'] = (input_tensor, output_tensor, new_action_ids)
actions = new_actions
return actions |
Takes an action in the current grammar state, returning a new grammar state with whatever
updates are necessary. The production rule is assumed to be formatted as "LHS -> RHS".
This will update the non-terminal stack and the context-dependent actions. Updating the
non-terminal stack involves popping the non-terminal that was expanded off of the stack,
then pushing on any non-terminals in the production rule back on the stack. We push the
non-terminals on in `reverse` order, so that the first non-terminal in the production rule
gets popped off the stack first.
For example, if our current ``nonterminal_stack`` is ``["r", "<e,r>", "d"]``, and
``action`` is ``d -> [<e,d>, e]``, the resulting stack will be ``["r", "<e,r>", "e",
"<e,d>"]``. | def take_action(self, production_rule: str) -> 'LambdaGrammarStatelet':
"""
Takes an action in the current grammar state, returning a new grammar state with whatever
updates are necessary. The production rule is assumed to be formatted as "LHS -> RHS".
This will update the non-terminal stack and the context-dependent actions. Updating the
non-terminal stack involves popping the non-terminal that was expanded off of the stack,
then pushing on any non-terminals in the production rule back on the stack. We push the
non-terminals on in `reverse` order, so that the first non-terminal in the production rule
gets popped off the stack first.
For example, if our current ``nonterminal_stack`` is ``["r", "<e,r>", "d"]``, and
``action`` is ``d -> [<e,d>, e]``, the resulting stack will be ``["r", "<e,r>", "e",
"<e,d>"]``.
"""
left_side, right_side = production_rule.split(' -> ')
assert self._nonterminal_stack[-1] == left_side, (f"Tried to expand {self._nonterminal_stack[-1]}"
f"but got rule {left_side} -> {right_side}")
assert all(self._lambda_stacks[key][-1] == left_side for key in self._lambda_stacks)
new_stack = self._nonterminal_stack[:-1]
new_lambda_stacks = {key: self._lambda_stacks[key][:-1] for key in self._lambda_stacks}
productions = self._get_productions_from_string(right_side)
# Looking for lambda productions, but not for cells or columns with the word "lambda" in
# them.
if 'lambda' in productions[0] and 'fb:' not in productions[0]:
production = productions[0]
if production[0] == "'" and production[-1] == "'":
# The production rule with a lambda is typically "<t,d> -> ['lambda x', d]". We
# need to strip the quotes.
production = production[1:-1]
lambda_variable = production.split(' ')[1]
# The left side must be formatted as "<t,d>", where "t" is the type of the lambda
# variable, and "d" is the return type of the lambda function. We need to pull out the
# "t" here. TODO(mattg): this is pretty limiting, but I'm not sure how general we
# should make this.
if len(left_side) != 5:
raise NotImplementedError("Can't handle this type yet:", left_side)
lambda_type = left_side[1]
new_lambda_stacks[(lambda_type, lambda_variable)] = []
for production in reversed(productions):
if self._is_nonterminal(production):
new_stack.append(production)
for lambda_stack in new_lambda_stacks.values():
lambda_stack.append(production)
# If any of the lambda stacks have now become empty, we remove them from our dictionary.
new_lambda_stacks = {key: new_lambda_stacks[key]
for key in new_lambda_stacks if new_lambda_stacks[key]}
return LambdaGrammarStatelet(nonterminal_stack=new_stack,
lambda_stacks=new_lambda_stacks,
valid_actions=self._valid_actions,
context_actions=self._context_actions,
is_nonterminal=self._is_nonterminal) |
Note: Counter to typical intuition, this function decodes the _maximum_
spanning tree.
Decode the optimal MST tree with the Chu-Liu-Edmonds algorithm for
maximum spanning arborescences on graphs.
Parameters
----------
energy : ``numpy.ndarray``, required.
A tensor with shape (num_labels, timesteps, timesteps)
containing the energy of each edge. If has_labels is ``False``,
the tensor should have shape (timesteps, timesteps) instead.
length : ``int``, required.
The length of this sequence, as the energy may have come
from a padded batch.
has_labels : ``bool``, optional, (default = True)
Whether the graph has labels or not. | def decode_mst(energy: numpy.ndarray,
length: int,
has_labels: bool = True) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Note: Counter to typical intuition, this function decodes the _maximum_
spanning tree.
Decode the optimal MST tree with the Chu-Liu-Edmonds algorithm for
maximum spanning arborescences on graphs.
Parameters
----------
energy : ``numpy.ndarray``, required.
A tensor with shape (num_labels, timesteps, timesteps)
containing the energy of each edge. If has_labels is ``False``,
the tensor should have shape (timesteps, timesteps) instead.
length : ``int``, required.
The length of this sequence, as the energy may have come
from a padded batch.
has_labels : ``bool``, optional, (default = True)
Whether the graph has labels or not.
"""
if has_labels and energy.ndim != 3:
raise ConfigurationError("The dimension of the energy array is not equal to 3.")
elif not has_labels and energy.ndim != 2:
raise ConfigurationError("The dimension of the energy array is not equal to 2.")
input_shape = energy.shape
max_length = input_shape[-1]
# Our energy matrix might have been batched -
# here we clip it to contain only non padded tokens.
if has_labels:
energy = energy[:, :length, :length]
# get best label for each edge.
label_id_matrix = energy.argmax(axis=0)
energy = energy.max(axis=0)
else:
energy = energy[:length, :length]
label_id_matrix = None
# get original score matrix
original_score_matrix = energy
# initialize score matrix to original score matrix
score_matrix = numpy.array(original_score_matrix, copy=True)
old_input = numpy.zeros([length, length], dtype=numpy.int32)
old_output = numpy.zeros([length, length], dtype=numpy.int32)
current_nodes = [True for _ in range(length)]
representatives: List[Set[int]] = []
for node1 in range(length):
original_score_matrix[node1, node1] = 0.0
score_matrix[node1, node1] = 0.0
representatives.append({node1})
for node2 in range(node1 + 1, length):
old_input[node1, node2] = node1
old_output[node1, node2] = node2
old_input[node2, node1] = node2
old_output[node2, node1] = node1
final_edges: Dict[int, int] = {}
# The main algorithm operates inplace.
chu_liu_edmonds(length, score_matrix, current_nodes,
final_edges, old_input, old_output, representatives)
heads = numpy.zeros([max_length], numpy.int32)
if has_labels:
head_type = numpy.ones([max_length], numpy.int32)
else:
head_type = None
for child, parent in final_edges.items():
heads[child] = parent
if has_labels:
head_type[child] = label_id_matrix[parent, child]
return heads, head_type |
Applies the chu-liu-edmonds algorithm recursively
to a graph with edge weights defined by score_matrix.
Note that this function operates in place, so variables
will be modified.
Parameters
----------
length : ``int``, required.
The number of nodes.
score_matrix : ``numpy.ndarray``, required.
The score matrix representing the scores for pairs
of nodes.
current_nodes : ``List[bool]``, required.
The nodes which are representatives in the graph.
A representative at it's most basic represents a node,
but as the algorithm progresses, individual nodes will
represent collapsed cycles in the graph.
final_edges: ``Dict[int, int]``, required.
An empty dictionary which will be populated with the
nodes which are connected in the maximum spanning tree.
old_input: ``numpy.ndarray``, required.
old_output: ``numpy.ndarray``, required.
representatives : ``List[Set[int]]``, required.
A list containing the nodes that a particular node
is representing at this iteration in the graph.
Returns
-------
Nothing - all variables are modified in place. | def chu_liu_edmonds(length: int,
score_matrix: numpy.ndarray,
current_nodes: List[bool],
final_edges: Dict[int, int],
old_input: numpy.ndarray,
old_output: numpy.ndarray,
representatives: List[Set[int]]):
"""
Applies the chu-liu-edmonds algorithm recursively
to a graph with edge weights defined by score_matrix.
Note that this function operates in place, so variables
will be modified.
Parameters
----------
length : ``int``, required.
The number of nodes.
score_matrix : ``numpy.ndarray``, required.
The score matrix representing the scores for pairs
of nodes.
current_nodes : ``List[bool]``, required.
The nodes which are representatives in the graph.
A representative at it's most basic represents a node,
but as the algorithm progresses, individual nodes will
represent collapsed cycles in the graph.
final_edges: ``Dict[int, int]``, required.
An empty dictionary which will be populated with the
nodes which are connected in the maximum spanning tree.
old_input: ``numpy.ndarray``, required.
old_output: ``numpy.ndarray``, required.
representatives : ``List[Set[int]]``, required.
A list containing the nodes that a particular node
is representing at this iteration in the graph.
Returns
-------
Nothing - all variables are modified in place.
"""
# Set the initial graph to be the greedy best one.
parents = [-1]
for node1 in range(1, length):
parents.append(0)
if current_nodes[node1]:
max_score = score_matrix[0, node1]
for node2 in range(1, length):
if node2 == node1 or not current_nodes[node2]:
continue
new_score = score_matrix[node2, node1]
if new_score > max_score:
max_score = new_score
parents[node1] = node2
# Check if this solution has a cycle.
has_cycle, cycle = _find_cycle(parents, length, current_nodes)
# If there are no cycles, find all edges and return.
if not has_cycle:
final_edges[0] = -1
for node in range(1, length):
if not current_nodes[node]:
continue
parent = old_input[parents[node], node]
child = old_output[parents[node], node]
final_edges[child] = parent
return
# Otherwise, we have a cycle so we need to remove an edge.
# From here until the recursive call is the contraction stage of the algorithm.
cycle_weight = 0.0
# Find the weight of the cycle.
index = 0
for node in cycle:
index += 1
cycle_weight += score_matrix[parents[node], node]
# For each node in the graph, find the maximum weight incoming
# and outgoing edge into the cycle.
cycle_representative = cycle[0]
for node in range(length):
if not current_nodes[node] or node in cycle:
continue
in_edge_weight = float("-inf")
in_edge = -1
out_edge_weight = float("-inf")
out_edge = -1
for node_in_cycle in cycle:
if score_matrix[node_in_cycle, node] > in_edge_weight:
in_edge_weight = score_matrix[node_in_cycle, node]
in_edge = node_in_cycle
# Add the new edge score to the cycle weight
# and subtract the edge we're considering removing.
score = (cycle_weight +
score_matrix[node, node_in_cycle] -
score_matrix[parents[node_in_cycle], node_in_cycle])
if score > out_edge_weight:
out_edge_weight = score
out_edge = node_in_cycle
score_matrix[cycle_representative, node] = in_edge_weight
old_input[cycle_representative, node] = old_input[in_edge, node]
old_output[cycle_representative, node] = old_output[in_edge, node]
score_matrix[node, cycle_representative] = out_edge_weight
old_output[node, cycle_representative] = old_output[node, out_edge]
old_input[node, cycle_representative] = old_input[node, out_edge]
# For the next recursive iteration, we want to consider the cycle as a
# single node. Here we collapse the cycle into the first node in the
# cycle (first node is arbitrary), set all the other nodes not be
# considered in the next iteration. We also keep track of which
# representatives we are considering this iteration because we need
# them below to check if we're done.
considered_representatives: List[Set[int]] = []
for i, node_in_cycle in enumerate(cycle):
considered_representatives.append(set())
if i > 0:
# We need to consider at least one
# node in the cycle, arbitrarily choose
# the first.
current_nodes[node_in_cycle] = False
for node in representatives[node_in_cycle]:
considered_representatives[i].add(node)
if i > 0:
representatives[cycle_representative].add(node)
chu_liu_edmonds(length, score_matrix, current_nodes, final_edges, old_input, old_output, representatives)
# Expansion stage.
# check each node in cycle, if one of its representatives
# is a key in the final_edges, it is the one we need.
found = False
key_node = -1
for i, node in enumerate(cycle):
for cycle_rep in considered_representatives[i]:
if cycle_rep in final_edges:
key_node = node
found = True
break
if found:
break
previous = parents[key_node]
while previous != key_node:
child = old_output[parents[previous], previous]
parent = old_input[parents[previous], previous]
final_edges[child] = parent
previous = parents[previous] |
Replace all the parameter values with the averages.
Save the current parameter values to restore later. | def assign_average_value(self) -> None:
"""
Replace all the parameter values with the averages.
Save the current parameter values to restore later.
"""
for name, parameter in self._parameters:
self._backups[name].copy_(parameter.data)
parameter.data.copy_(self._shadows[name]) |
Restore the backed-up (non-average) parameter values. | def restore(self) -> None:
"""
Restore the backed-up (non-average) parameter values.
"""
for name, parameter in self._parameters:
parameter.data.copy_(self._backups[name]) |
Takes two tensors of the same shape, such as ``(batch_size, length_1, length_2,
embedding_dim)``. Computes a (possibly parameterized) similarity on the final dimension
and returns a tensor with one less dimension, such as ``(batch_size, length_1, length_2)``. | def forward(self, tensor_1: torch.Tensor, tensor_2: torch.Tensor) -> torch.Tensor:
# pylint: disable=arguments-differ
"""
Takes two tensors of the same shape, such as ``(batch_size, length_1, length_2,
embedding_dim)``. Computes a (possibly parameterized) similarity on the final dimension
and returns a tensor with one less dimension, such as ``(batch_size, length_1, length_2)``.
"""
raise NotImplementedError |
This method can be used to prune the set of unfinished states on a beam or finished states
at the end of search. In the former case, the states need not be sorted because the all come
from the same decoding step, which does the sorting. However, if the states are finished and
this method is called at the end of the search, they need to be sorted because they come
from different decoding steps. | def _prune_beam(states: List[State],
beam_size: int,
sort_states: bool = False) -> List[State]:
"""
This method can be used to prune the set of unfinished states on a beam or finished states
at the end of search. In the former case, the states need not be sorted because the all come
from the same decoding step, which does the sorting. However, if the states are finished and
this method is called at the end of the search, they need to be sorted because they come
from different decoding steps.
"""
states_by_batch_index: Dict[int, List[State]] = defaultdict(list)
for state in states:
assert len(state.batch_indices) == 1
batch_index = state.batch_indices[0]
states_by_batch_index[batch_index].append(state)
pruned_states = []
for _, instance_states in states_by_batch_index.items():
if sort_states:
scores = torch.cat([state.score[0].view(-1) for state in instance_states])
_, sorted_indices = scores.sort(-1, descending=True)
sorted_states = [instance_states[i] for i in sorted_indices.detach().cpu().numpy()]
instance_states = sorted_states
for state in instance_states[:beam_size]:
pruned_states.append(state)
return pruned_states |
Returns the best finished states for each batch instance based on model scores. We return
at most ``self._max_num_decoded_sequences`` number of sequences per instance. | def _get_best_final_states(self, finished_states: List[StateType]) -> Dict[int, List[StateType]]:
"""
Returns the best finished states for each batch instance based on model scores. We return
at most ``self._max_num_decoded_sequences`` number of sequences per instance.
"""
batch_states: Dict[int, List[StateType]] = defaultdict(list)
for state in finished_states:
batch_states[state.batch_indices[0]].append(state)
best_states: Dict[int, List[StateType]] = {}
for batch_index, states in batch_states.items():
# The time this sort takes is pretty negligible, no particular need to optimize this
# yet. Maybe with a larger beam size...
finished_to_sort = [(-state.score[0].item(), state) for state in states]
finished_to_sort.sort(key=lambda x: x[0])
best_states[batch_index] = [state[1] for state in finished_to_sort[:self._beam_size]]
return best_states |
Read pre-trained word vectors from an eventually compressed text file, possibly contained
inside an archive with multiple files. The text file is assumed to be utf-8 encoded with
space-separated fields: [word] [dim 1] [dim 2] ...
Lines that contain more numerical tokens than ``embedding_dim`` raise a warning and are skipped.
The remainder of the docstring is identical to ``_read_pretrained_embeddings_file``. | def _read_embeddings_from_text_file(file_uri: str,
embedding_dim: int,
vocab: Vocabulary,
namespace: str = "tokens") -> torch.FloatTensor:
"""
Read pre-trained word vectors from an eventually compressed text file, possibly contained
inside an archive with multiple files. The text file is assumed to be utf-8 encoded with
space-separated fields: [word] [dim 1] [dim 2] ...
Lines that contain more numerical tokens than ``embedding_dim`` raise a warning and are skipped.
The remainder of the docstring is identical to ``_read_pretrained_embeddings_file``.
"""
tokens_to_keep = set(vocab.get_index_to_token_vocabulary(namespace).values())
vocab_size = vocab.get_vocab_size(namespace)
embeddings = {}
# First we read the embeddings from the file, only keeping vectors for the words we need.
logger.info("Reading pretrained embeddings from file")
with EmbeddingsTextFile(file_uri) as embeddings_file:
for line in Tqdm.tqdm(embeddings_file):
token = line.split(' ', 1)[0]
if token in tokens_to_keep:
fields = line.rstrip().split(' ')
if len(fields) - 1 != embedding_dim:
# Sometimes there are funny unicode parsing problems that lead to different
# fields lengths (e.g., a word with a unicode space character that splits
# into more than one column). We skip those lines. Note that if you have
# some kind of long header, this could result in all of your lines getting
# skipped. It's hard to check for that here; you just have to look in the
# embedding_misses_file and at the model summary to make sure things look
# like they are supposed to.
logger.warning("Found line with wrong number of dimensions (expected: %d; actual: %d): %s",
embedding_dim, len(fields) - 1, line)
continue
vector = numpy.asarray(fields[1:], dtype='float32')
embeddings[token] = vector
if not embeddings:
raise ConfigurationError("No embeddings of correct dimension found; you probably "
"misspecified your embedding_dim parameter, or didn't "
"pre-populate your Vocabulary")
all_embeddings = numpy.asarray(list(embeddings.values()))
embeddings_mean = float(numpy.mean(all_embeddings))
embeddings_std = float(numpy.std(all_embeddings))
# Now we initialize the weight matrix for an embedding layer, starting with random vectors,
# then filling in the word vectors we just read.
logger.info("Initializing pre-trained embedding layer")
embedding_matrix = torch.FloatTensor(vocab_size, embedding_dim).normal_(embeddings_mean,
embeddings_std)
num_tokens_found = 0
index_to_token = vocab.get_index_to_token_vocabulary(namespace)
for i in range(vocab_size):
token = index_to_token[i]
# If we don't have a pre-trained vector for this word, we'll just leave this row alone,
# so the word has a random initialization.
if token in embeddings:
embedding_matrix[i] = torch.FloatTensor(embeddings[token])
num_tokens_found += 1
else:
logger.debug("Token %s was not found in the embedding file. Initialising randomly.", token)
logger.info("Pretrained embeddings were found for %d out of %d tokens",
num_tokens_found, vocab_size)
return embedding_matrix |
Reads from a hdf5 formatted file. The embedding matrix is assumed to
be keyed by 'embedding' and of size ``(num_tokens, embedding_dim)``. | def _read_embeddings_from_hdf5(embeddings_filename: str,
embedding_dim: int,
vocab: Vocabulary,
namespace: str = "tokens") -> torch.FloatTensor:
"""
Reads from a hdf5 formatted file. The embedding matrix is assumed to
be keyed by 'embedding' and of size ``(num_tokens, embedding_dim)``.
"""
with h5py.File(embeddings_filename, 'r') as fin:
embeddings = fin['embedding'][...]
if list(embeddings.shape) != [vocab.get_vocab_size(namespace), embedding_dim]:
raise ConfigurationError(
"Read shape {0} embeddings from the file, but expected {1}".format(
list(embeddings.shape), [vocab.get_vocab_size(namespace), embedding_dim]))
return torch.FloatTensor(embeddings) |
This function takes in input a string and if it contains 1 or 2 integers, it assumes the
largest one it the number of tokens. Returns None if the line doesn't match that pattern. | def _get_num_tokens_from_first_line(line: str) -> Optional[int]:
""" This function takes in input a string and if it contains 1 or 2 integers, it assumes the
largest one it the number of tokens. Returns None if the line doesn't match that pattern. """
fields = line.split(' ')
if 1 <= len(fields) <= 2:
try:
int_fields = [int(x) for x in fields]
except ValueError:
return None
else:
num_tokens = max(int_fields)
logger.info('Recognized a header line in the embedding file with number of tokens: %d',
num_tokens)
return num_tokens
return None |
Gets the embeddings of desired terminal actions yet to be produced by the decoder, and
returns their sum for the decoder to add it to the predicted embedding to bias the
prediction towards missing actions. | def _get_predicted_embedding_addition(self,
checklist_state: ChecklistStatelet,
action_ids: List[int],
action_embeddings: torch.Tensor) -> torch.Tensor:
"""
Gets the embeddings of desired terminal actions yet to be produced by the decoder, and
returns their sum for the decoder to add it to the predicted embedding to bias the
prediction towards missing actions.
"""
# Our basic approach here will be to figure out which actions we want to bias, by doing
# some fancy indexing work, then multiply the action embeddings by a mask for those
# actions, and return the sum of the result.
# Shape: (num_terminal_actions, 1). This is 1 if we still want to predict something on the
# checklist, and 0 otherwise.
checklist_balance = checklist_state.get_balance().clamp(min=0)
# (num_terminal_actions, 1)
actions_in_agenda = checklist_state.terminal_actions
# (1, num_current_actions)
action_id_tensor = checklist_balance.new(action_ids).long().unsqueeze(0)
# Shape: (num_terminal_actions, num_current_actions). Will have a value of 1 if the
# terminal action i is our current action j, and a value of 0 otherwise. Because both sets
# of actions are free of duplicates, there will be at most one non-zero value per current
# action, and per terminal action.
current_agenda_actions = (actions_in_agenda == action_id_tensor).float()
# Shape: (num_current_actions,). With the inner multiplication, we remove any current
# agenda actions that are not in our checklist balance, then we sum over the terminal
# action dimension, which will have a sum of at most one. So this will be a 0/1 tensor,
# where a 1 means to encourage the current action in that position.
actions_to_encourage = torch.sum(current_agenda_actions * checklist_balance, dim=0)
# Shape: (action_embedding_dim,). This is the sum of the action embeddings that we want
# the model to prefer.
embedding_addition = torch.sum(action_embeddings * actions_to_encourage.unsqueeze(1),
dim=0,
keepdim=False)
if self._add_action_bias:
# If we're adding an action bias, the last dimension of the action embedding is a bias
# weight. We don't want this addition to affect the bias (TODO(mattg): or do we?), so
# we zero out that dimension here.
embedding_addition[-1] = 0
return embedding_addition |
Pulls at most ``max_instances_in_memory`` from the input_queue,
groups them into batches of size ``batch_size``, converts them
to ``TensorDict`` s, and puts them on the ``output_queue``. | def _create_tensor_dicts(input_queue: Queue,
output_queue: Queue,
iterator: DataIterator,
shuffle: bool,
index: int) -> None:
"""
Pulls at most ``max_instances_in_memory`` from the input_queue,
groups them into batches of size ``batch_size``, converts them
to ``TensorDict`` s, and puts them on the ``output_queue``.
"""
def instances() -> Iterator[Instance]:
instance = input_queue.get()
while instance is not None:
yield instance
instance = input_queue.get()
for tensor_dict in iterator(instances(), num_epochs=1, shuffle=shuffle):
output_queue.put(tensor_dict)
output_queue.put(index) |
Reads Instances from the iterable and puts them in the input_queue. | def _queuer(instances: Iterable[Instance],
input_queue: Queue,
num_workers: int,
num_epochs: Optional[int]) -> None:
"""
Reads Instances from the iterable and puts them in the input_queue.
"""
epoch = 0
while num_epochs is None or epoch < num_epochs:
epoch += 1
for instance in instances:
input_queue.put(instance)
# Now put a None for each worker, since each needs to receive one
# to know that it's done.
for _ in range(num_workers):
input_queue.put(None) |
Returns a list of valid actions for each element of the group. | def get_valid_actions(self) -> List[Dict[str, Tuple[torch.Tensor, torch.Tensor, List[int]]]]:
"""
Returns a list of valid actions for each element of the group.
"""
return [state.get_valid_actions() for state in self.grammar_state] |
A worker that pulls filenames off the input queue, uses the dataset reader
to read them, and places the generated instances on the output queue.
When there are no filenames left on the input queue, it puts its ``index``
on the output queue and doesn't do anything else. | def _worker(reader: DatasetReader,
input_queue: Queue,
output_queue: Queue,
index: int) -> None:
"""
A worker that pulls filenames off the input queue, uses the dataset reader
to read them, and places the generated instances on the output queue.
When there are no filenames left on the input queue, it puts its ``index``
on the output queue and doesn't do anything else.
"""
# Keep going until you get a file_path that's None.
while True:
file_path = input_queue.get()
if file_path is None:
# Put my index on the queue to signify that I'm finished
output_queue.put(index)
break
logger.info(f"reading instances from {file_path}")
for instance in reader.read(file_path):
output_queue.put(instance) |
Given labels and a constraint type, returns the allowed transitions. It will
additionally include transitions for the start and end states, which are used
by the conditional random field.
Parameters
----------
constraint_type : ``str``, required
Indicates which constraint to apply. Current choices are
"BIO", "IOB1", "BIOUL", and "BMES".
labels : ``Dict[int, str]``, required
A mapping {label_id -> label}. Most commonly this would be the value from
Vocabulary.get_index_to_token_vocabulary()
Returns
-------
``List[Tuple[int, int]]``
The allowed transitions (from_label_id, to_label_id). | def allowed_transitions(constraint_type: str, labels: Dict[int, str]) -> List[Tuple[int, int]]:
"""
Given labels and a constraint type, returns the allowed transitions. It will
additionally include transitions for the start and end states, which are used
by the conditional random field.
Parameters
----------
constraint_type : ``str``, required
Indicates which constraint to apply. Current choices are
"BIO", "IOB1", "BIOUL", and "BMES".
labels : ``Dict[int, str]``, required
A mapping {label_id -> label}. Most commonly this would be the value from
Vocabulary.get_index_to_token_vocabulary()
Returns
-------
``List[Tuple[int, int]]``
The allowed transitions (from_label_id, to_label_id).
"""
num_labels = len(labels)
start_tag = num_labels
end_tag = num_labels + 1
labels_with_boundaries = list(labels.items()) + [(start_tag, "START"), (end_tag, "END")]
allowed = []
for from_label_index, from_label in labels_with_boundaries:
if from_label in ("START", "END"):
from_tag = from_label
from_entity = ""
else:
from_tag = from_label[0]
from_entity = from_label[1:]
for to_label_index, to_label in labels_with_boundaries:
if to_label in ("START", "END"):
to_tag = to_label
to_entity = ""
else:
to_tag = to_label[0]
to_entity = to_label[1:]
if is_transition_allowed(constraint_type, from_tag, from_entity,
to_tag, to_entity):
allowed.append((from_label_index, to_label_index))
return allowed |
Given a constraint type and strings ``from_tag`` and ``to_tag`` that
represent the origin and destination of the transition, return whether
the transition is allowed under the given constraint type.
Parameters
----------
constraint_type : ``str``, required
Indicates which constraint to apply. Current choices are
"BIO", "IOB1", "BIOUL", and "BMES".
from_tag : ``str``, required
The tag that the transition originates from. For example, if the
label is ``I-PER``, the ``from_tag`` is ``I``.
from_entity: ``str``, required
The entity corresponding to the ``from_tag``. For example, if the
label is ``I-PER``, the ``from_entity`` is ``PER``.
to_tag : ``str``, required
The tag that the transition leads to. For example, if the
label is ``I-PER``, the ``to_tag`` is ``I``.
to_entity: ``str``, required
The entity corresponding to the ``to_tag``. For example, if the
label is ``I-PER``, the ``to_entity`` is ``PER``.
Returns
-------
``bool``
Whether the transition is allowed under the given ``constraint_type``. | def is_transition_allowed(constraint_type: str,
from_tag: str,
from_entity: str,
to_tag: str,
to_entity: str):
"""
Given a constraint type and strings ``from_tag`` and ``to_tag`` that
represent the origin and destination of the transition, return whether
the transition is allowed under the given constraint type.
Parameters
----------
constraint_type : ``str``, required
Indicates which constraint to apply. Current choices are
"BIO", "IOB1", "BIOUL", and "BMES".
from_tag : ``str``, required
The tag that the transition originates from. For example, if the
label is ``I-PER``, the ``from_tag`` is ``I``.
from_entity: ``str``, required
The entity corresponding to the ``from_tag``. For example, if the
label is ``I-PER``, the ``from_entity`` is ``PER``.
to_tag : ``str``, required
The tag that the transition leads to. For example, if the
label is ``I-PER``, the ``to_tag`` is ``I``.
to_entity: ``str``, required
The entity corresponding to the ``to_tag``. For example, if the
label is ``I-PER``, the ``to_entity`` is ``PER``.
Returns
-------
``bool``
Whether the transition is allowed under the given ``constraint_type``.
"""
# pylint: disable=too-many-return-statements
if to_tag == "START" or from_tag == "END":
# Cannot transition into START or from END
return False
if constraint_type == "BIOUL":
if from_tag == "START":
return to_tag in ('O', 'B', 'U')
if to_tag == "END":
return from_tag in ('O', 'L', 'U')
return any([
# O can transition to O, B-* or U-*
# L-x can transition to O, B-*, or U-*
# U-x can transition to O, B-*, or U-*
from_tag in ('O', 'L', 'U') and to_tag in ('O', 'B', 'U'),
# B-x can only transition to I-x or L-x
# I-x can only transition to I-x or L-x
from_tag in ('B', 'I') and to_tag in ('I', 'L') and from_entity == to_entity
])
elif constraint_type == "BIO":
if from_tag == "START":
return to_tag in ('O', 'B')
if to_tag == "END":
return from_tag in ('O', 'B', 'I')
return any([
# Can always transition to O or B-x
to_tag in ('O', 'B'),
# Can only transition to I-x from B-x or I-x
to_tag == 'I' and from_tag in ('B', 'I') and from_entity == to_entity
])
elif constraint_type == "IOB1":
if from_tag == "START":
return to_tag in ('O', 'I')
if to_tag == "END":
return from_tag in ('O', 'B', 'I')
return any([
# Can always transition to O or I-x
to_tag in ('O', 'I'),
# Can only transition to B-x from B-x or I-x, where
# x is the same tag.
to_tag == 'B' and from_tag in ('B', 'I') and from_entity == to_entity
])
elif constraint_type == "BMES":
if from_tag == "START":
return to_tag in ('B', 'S')
if to_tag == "END":
return from_tag in ('E', 'S')
return any([
# Can only transition to B or S from E or S.
to_tag in ('B', 'S') and from_tag in ('E', 'S'),
# Can only transition to M-x from B-x, where
# x is the same tag.
to_tag == 'M' and from_tag in ('B', 'M') and from_entity == to_entity,
# Can only transition to E-x from B-x or M-x, where
# x is the same tag.
to_tag == 'E' and from_tag in ('B', 'M') and from_entity == to_entity,
])
else:
raise ConfigurationError(f"Unknown constraint type: {constraint_type}") |
Computes the (batch_size,) denominator term for the log-likelihood, which is the
sum of the likelihoods across all possible state sequences. | def _input_likelihood(self, logits: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
"""
Computes the (batch_size,) denominator term for the log-likelihood, which is the
sum of the likelihoods across all possible state sequences.
"""
batch_size, sequence_length, num_tags = logits.size()
# Transpose batch size and sequence dimensions
mask = mask.float().transpose(0, 1).contiguous()
logits = logits.transpose(0, 1).contiguous()
# Initial alpha is the (batch_size, num_tags) tensor of likelihoods combining the
# transitions to the initial states and the logits for the first timestep.
if self.include_start_end_transitions:
alpha = self.start_transitions.view(1, num_tags) + logits[0]
else:
alpha = logits[0]
# For each i we compute logits for the transitions from timestep i-1 to timestep i.
# We do so in a (batch_size, num_tags, num_tags) tensor where the axes are
# (instance, current_tag, next_tag)
for i in range(1, sequence_length):
# The emit scores are for time i ("next_tag") so we broadcast along the current_tag axis.
emit_scores = logits[i].view(batch_size, 1, num_tags)
# Transition scores are (current_tag, next_tag) so we broadcast along the instance axis.
transition_scores = self.transitions.view(1, num_tags, num_tags)
# Alpha is for the current_tag, so we broadcast along the next_tag axis.
broadcast_alpha = alpha.view(batch_size, num_tags, 1)
# Add all the scores together and logexp over the current_tag axis
inner = broadcast_alpha + emit_scores + transition_scores
# In valid positions (mask == 1) we want to take the logsumexp over the current_tag dimension
# of ``inner``. Otherwise (mask == 0) we want to retain the previous alpha.
alpha = (util.logsumexp(inner, 1) * mask[i].view(batch_size, 1) +
alpha * (1 - mask[i]).view(batch_size, 1))
# Every sequence needs to end with a transition to the stop_tag.
if self.include_start_end_transitions:
stops = alpha + self.end_transitions.view(1, num_tags)
else:
stops = alpha
# Finally we log_sum_exp along the num_tags dim, result is (batch_size,)
return util.logsumexp(stops) |
Computes the numerator term for the log-likelihood, which is just score(inputs, tags) | def _joint_likelihood(self,
logits: torch.Tensor,
tags: torch.Tensor,
mask: torch.LongTensor) -> torch.Tensor:
"""
Computes the numerator term for the log-likelihood, which is just score(inputs, tags)
"""
batch_size, sequence_length, _ = logits.data.shape
# Transpose batch size and sequence dimensions:
logits = logits.transpose(0, 1).contiguous()
mask = mask.float().transpose(0, 1).contiguous()
tags = tags.transpose(0, 1).contiguous()
# Start with the transition scores from start_tag to the first tag in each input
if self.include_start_end_transitions:
score = self.start_transitions.index_select(0, tags[0])
else:
score = 0.0
# Add up the scores for the observed transitions and all the inputs but the last
for i in range(sequence_length - 1):
# Each is shape (batch_size,)
current_tag, next_tag = tags[i], tags[i+1]
# The scores for transitioning from current_tag to next_tag
transition_score = self.transitions[current_tag.view(-1), next_tag.view(-1)]
# The score for using current_tag
emit_score = logits[i].gather(1, current_tag.view(batch_size, 1)).squeeze(1)
# Include transition score if next element is unmasked,
# input_score if this element is unmasked.
score = score + transition_score * mask[i + 1] + emit_score * mask[i]
# Transition from last state to "stop" state. To start with, we need to find the last tag
# for each instance.
last_tag_index = mask.sum(0).long() - 1
last_tags = tags.gather(0, last_tag_index.view(1, batch_size)).squeeze(0)
# Compute score of transitioning to `stop_tag` from each "last tag".
if self.include_start_end_transitions:
last_transition_score = self.end_transitions.index_select(0, last_tags)
else:
last_transition_score = 0.0
# Add the last input if it's not masked.
last_inputs = logits[-1] # (batch_size, num_tags)
last_input_score = last_inputs.gather(1, last_tags.view(-1, 1)) # (batch_size, 1)
last_input_score = last_input_score.squeeze() # (batch_size,)
score = score + last_transition_score + last_input_score * mask[-1]
return score |
Computes the log likelihood. | def forward(self,
inputs: torch.Tensor,
tags: torch.Tensor,
mask: torch.ByteTensor = None) -> torch.Tensor:
"""
Computes the log likelihood.
"""
# pylint: disable=arguments-differ
if mask is None:
mask = torch.ones(*tags.size(), dtype=torch.long)
log_denominator = self._input_likelihood(inputs, mask)
log_numerator = self._joint_likelihood(inputs, tags, mask)
return torch.sum(log_numerator - log_denominator) |
Uses viterbi algorithm to find most likely tags for the given inputs.
If constraints are applied, disallows all other transitions. | def viterbi_tags(self,
logits: torch.Tensor,
mask: torch.Tensor) -> List[Tuple[List[int], float]]:
"""
Uses viterbi algorithm to find most likely tags for the given inputs.
If constraints are applied, disallows all other transitions.
"""
_, max_seq_length, num_tags = logits.size()
# Get the tensors out of the variables
logits, mask = logits.data, mask.data
# Augment transitions matrix with start and end transitions
start_tag = num_tags
end_tag = num_tags + 1
transitions = torch.Tensor(num_tags + 2, num_tags + 2).fill_(-10000.)
# Apply transition constraints
constrained_transitions = (
self.transitions * self._constraint_mask[:num_tags, :num_tags] +
-10000.0 * (1 - self._constraint_mask[:num_tags, :num_tags])
)
transitions[:num_tags, :num_tags] = constrained_transitions.data
if self.include_start_end_transitions:
transitions[start_tag, :num_tags] = (
self.start_transitions.detach() * self._constraint_mask[start_tag, :num_tags].data +
-10000.0 * (1 - self._constraint_mask[start_tag, :num_tags].detach())
)
transitions[:num_tags, end_tag] = (
self.end_transitions.detach() * self._constraint_mask[:num_tags, end_tag].data +
-10000.0 * (1 - self._constraint_mask[:num_tags, end_tag].detach())
)
else:
transitions[start_tag, :num_tags] = (-10000.0 *
(1 - self._constraint_mask[start_tag, :num_tags].detach()))
transitions[:num_tags, end_tag] = -10000.0 * (1 - self._constraint_mask[:num_tags, end_tag].detach())
best_paths = []
# Pad the max sequence length by 2 to account for start_tag + end_tag.
tag_sequence = torch.Tensor(max_seq_length + 2, num_tags + 2)
for prediction, prediction_mask in zip(logits, mask):
sequence_length = torch.sum(prediction_mask)
# Start with everything totally unlikely
tag_sequence.fill_(-10000.)
# At timestep 0 we must have the START_TAG
tag_sequence[0, start_tag] = 0.
# At steps 1, ..., sequence_length we just use the incoming prediction
tag_sequence[1:(sequence_length + 1), :num_tags] = prediction[:sequence_length]
# And at the last timestep we must have the END_TAG
tag_sequence[sequence_length + 1, end_tag] = 0.
# We pass the tags and the transitions to ``viterbi_decode``.
viterbi_path, viterbi_score = util.viterbi_decode(tag_sequence[:(sequence_length + 2)], transitions)
# Get rid of START and END sentinels and append.
viterbi_path = viterbi_path[1:-1]
best_paths.append((viterbi_path, viterbi_score.item()))
return best_paths |
Given a starting state and a step function, apply beam search to find the
most likely target sequences.
Notes
-----
If your step function returns ``-inf`` for some log probabilities
(like if you're using a masked log-softmax) then some of the "best"
sequences returned may also have ``-inf`` log probability. Specifically
this happens when the beam size is smaller than the number of actions
with finite log probability (non-zero probability) returned by the step function.
Therefore if you're using a mask you may want to check the results from ``search``
and potentially discard sequences with non-finite log probability.
Parameters
----------
start_predictions : ``torch.Tensor``
A tensor containing the initial predictions with shape ``(batch_size,)``.
Usually the initial predictions are just the index of the "start" token
in the target vocabulary.
start_state : ``StateType``
The initial state passed to the ``step`` function. Each value of the state dict
should be a tensor of shape ``(batch_size, *)``, where ``*`` means any other
number of dimensions.
step : ``StepFunctionType``
A function that is responsible for computing the next most likely tokens,
given the current state and the predictions from the last time step.
The function should accept two arguments. The first being a tensor
of shape ``(group_size,)``, representing the index of the predicted
tokens from the last time step, and the second being the current state.
The ``group_size`` will be ``batch_size * beam_size``, except in the initial
step, for which it will just be ``batch_size``.
The function is expected to return a tuple, where the first element
is a tensor of shape ``(group_size, target_vocab_size)`` containing
the log probabilities of the tokens for the next step, and the second
element is the updated state. The tensor in the state should have shape
``(group_size, *)``, where ``*`` means any other number of dimensions.
Returns
-------
Tuple[torch.Tensor, torch.Tensor]
Tuple of ``(predictions, log_probabilities)``, where ``predictions``
has shape ``(batch_size, beam_size, max_steps)`` and ``log_probabilities``
has shape ``(batch_size, beam_size)``. | def search(self,
start_predictions: torch.Tensor,
start_state: StateType,
step: StepFunctionType) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Given a starting state and a step function, apply beam search to find the
most likely target sequences.
Notes
-----
If your step function returns ``-inf`` for some log probabilities
(like if you're using a masked log-softmax) then some of the "best"
sequences returned may also have ``-inf`` log probability. Specifically
this happens when the beam size is smaller than the number of actions
with finite log probability (non-zero probability) returned by the step function.
Therefore if you're using a mask you may want to check the results from ``search``
and potentially discard sequences with non-finite log probability.
Parameters
----------
start_predictions : ``torch.Tensor``
A tensor containing the initial predictions with shape ``(batch_size,)``.
Usually the initial predictions are just the index of the "start" token
in the target vocabulary.
start_state : ``StateType``
The initial state passed to the ``step`` function. Each value of the state dict
should be a tensor of shape ``(batch_size, *)``, where ``*`` means any other
number of dimensions.
step : ``StepFunctionType``
A function that is responsible for computing the next most likely tokens,
given the current state and the predictions from the last time step.
The function should accept two arguments. The first being a tensor
of shape ``(group_size,)``, representing the index of the predicted
tokens from the last time step, and the second being the current state.
The ``group_size`` will be ``batch_size * beam_size``, except in the initial
step, for which it will just be ``batch_size``.
The function is expected to return a tuple, where the first element
is a tensor of shape ``(group_size, target_vocab_size)`` containing
the log probabilities of the tokens for the next step, and the second
element is the updated state. The tensor in the state should have shape
``(group_size, *)``, where ``*`` means any other number of dimensions.
Returns
-------
Tuple[torch.Tensor, torch.Tensor]
Tuple of ``(predictions, log_probabilities)``, where ``predictions``
has shape ``(batch_size, beam_size, max_steps)`` and ``log_probabilities``
has shape ``(batch_size, beam_size)``.
"""
batch_size = start_predictions.size()[0]
# List of (batch_size, beam_size) tensors. One for each time step. Does not
# include the start symbols, which are implicit.
predictions: List[torch.Tensor] = []
# List of (batch_size, beam_size) tensors. One for each time step. None for
# the first. Stores the index n for the parent prediction, i.e.
# predictions[t-1][i][n], that it came from.
backpointers: List[torch.Tensor] = []
# Calculate the first timestep. This is done outside the main loop
# because we are going from a single decoder input (the output from the
# encoder) to the top `beam_size` decoder outputs. On the other hand,
# within the main loop we are going from the `beam_size` elements of the
# beam to `beam_size`^2 candidates from which we will select the top
# `beam_size` elements for the next iteration.
# shape: (batch_size, num_classes)
start_class_log_probabilities, state = step(start_predictions, start_state)
num_classes = start_class_log_probabilities.size()[1]
# Make sure `per_node_beam_size` is not larger than `num_classes`.
if self.per_node_beam_size > num_classes:
raise ConfigurationError(f"Target vocab size ({num_classes:d}) too small "
f"relative to per_node_beam_size ({self.per_node_beam_size:d}).\n"
f"Please decrease beam_size or per_node_beam_size.")
# shape: (batch_size, beam_size), (batch_size, beam_size)
start_top_log_probabilities, start_predicted_classes = \
start_class_log_probabilities.topk(self.beam_size)
if self.beam_size == 1 and (start_predicted_classes == self._end_index).all():
warnings.warn("Empty sequences predicted. You may want to increase the beam size or ensure "
"your step function is working properly.",
RuntimeWarning)
return start_predicted_classes.unsqueeze(-1), start_top_log_probabilities
# The log probabilities for the last time step.
# shape: (batch_size, beam_size)
last_log_probabilities = start_top_log_probabilities
# shape: [(batch_size, beam_size)]
predictions.append(start_predicted_classes)
# Log probability tensor that mandates that the end token is selected.
# shape: (batch_size * beam_size, num_classes)
log_probs_after_end = start_class_log_probabilities.new_full(
(batch_size * self.beam_size, num_classes),
float("-inf")
)
log_probs_after_end[:, self._end_index] = 0.
# Set the same state for each element in the beam.
for key, state_tensor in state.items():
_, *last_dims = state_tensor.size()
# shape: (batch_size * beam_size, *)
state[key] = state_tensor.\
unsqueeze(1).\
expand(batch_size, self.beam_size, *last_dims).\
reshape(batch_size * self.beam_size, *last_dims)
for timestep in range(self.max_steps - 1):
# shape: (batch_size * beam_size,)
last_predictions = predictions[-1].reshape(batch_size * self.beam_size)
# If every predicted token from the last step is `self._end_index`,
# then we can stop early.
if (last_predictions == self._end_index).all():
break
# Take a step. This get the predicted log probs of the next classes
# and updates the state.
# shape: (batch_size * beam_size, num_classes)
class_log_probabilities, state = step(last_predictions, state)
# shape: (batch_size * beam_size, num_classes)
last_predictions_expanded = last_predictions.unsqueeze(-1).expand(
batch_size * self.beam_size,
num_classes
)
# Here we are finding any beams where we predicted the end token in
# the previous timestep and replacing the distribution with a
# one-hot distribution, forcing the beam to predict the end token
# this timestep as well.
# shape: (batch_size * beam_size, num_classes)
cleaned_log_probabilities = torch.where(
last_predictions_expanded == self._end_index,
log_probs_after_end,
class_log_probabilities
)
# shape (both): (batch_size * beam_size, per_node_beam_size)
top_log_probabilities, predicted_classes = \
cleaned_log_probabilities.topk(self.per_node_beam_size)
# Here we expand the last log probabilities to (batch_size * beam_size, per_node_beam_size)
# so that we can add them to the current log probs for this timestep.
# This lets us maintain the log probability of each element on the beam.
# shape: (batch_size * beam_size, per_node_beam_size)
expanded_last_log_probabilities = last_log_probabilities.\
unsqueeze(2).\
expand(batch_size, self.beam_size, self.per_node_beam_size).\
reshape(batch_size * self.beam_size, self.per_node_beam_size)
# shape: (batch_size * beam_size, per_node_beam_size)
summed_top_log_probabilities = top_log_probabilities + expanded_last_log_probabilities
# shape: (batch_size, beam_size * per_node_beam_size)
reshaped_summed = summed_top_log_probabilities.\
reshape(batch_size, self.beam_size * self.per_node_beam_size)
# shape: (batch_size, beam_size * per_node_beam_size)
reshaped_predicted_classes = predicted_classes.\
reshape(batch_size, self.beam_size * self.per_node_beam_size)
# Keep only the top `beam_size` beam indices.
# shape: (batch_size, beam_size), (batch_size, beam_size)
restricted_beam_log_probs, restricted_beam_indices = reshaped_summed.topk(self.beam_size)
# Use the beam indices to extract the corresponding classes.
# shape: (batch_size, beam_size)
restricted_predicted_classes = reshaped_predicted_classes.gather(1, restricted_beam_indices)
predictions.append(restricted_predicted_classes)
# shape: (batch_size, beam_size)
last_log_probabilities = restricted_beam_log_probs
# The beam indices come from a `beam_size * per_node_beam_size` dimension where the
# indices with a common ancestor are grouped together. Hence
# dividing by per_node_beam_size gives the ancestor. (Note that this is integer
# division as the tensor is a LongTensor.)
# shape: (batch_size, beam_size)
backpointer = restricted_beam_indices / self.per_node_beam_size
backpointers.append(backpointer)
# Keep only the pieces of the state tensors corresponding to the
# ancestors created this iteration.
for key, state_tensor in state.items():
_, *last_dims = state_tensor.size()
# shape: (batch_size, beam_size, *)
expanded_backpointer = backpointer.\
view(batch_size, self.beam_size, *([1] * len(last_dims))).\
expand(batch_size, self.beam_size, *last_dims)
# shape: (batch_size * beam_size, *)
state[key] = state_tensor.\
reshape(batch_size, self.beam_size, *last_dims).\
gather(1, expanded_backpointer).\
reshape(batch_size * self.beam_size, *last_dims)
if not torch.isfinite(last_log_probabilities).all():
warnings.warn("Infinite log probabilities encountered. Some final sequences may not make sense. "
"This can happen when the beam size is larger than the number of valid (non-zero "
"probability) transitions that the step function produces.",
RuntimeWarning)
# Reconstruct the sequences.
# shape: [(batch_size, beam_size, 1)]
reconstructed_predictions = [predictions[-1].unsqueeze(2)]
# shape: (batch_size, beam_size)
cur_backpointers = backpointers[-1]
for timestep in range(len(predictions) - 2, 0, -1):
# shape: (batch_size, beam_size, 1)
cur_preds = predictions[timestep].gather(1, cur_backpointers).unsqueeze(2)
reconstructed_predictions.append(cur_preds)
# shape: (batch_size, beam_size)
cur_backpointers = backpointers[timestep - 1].gather(1, cur_backpointers)
# shape: (batch_size, beam_size, 1)
final_preds = predictions[0].gather(1, cur_backpointers).unsqueeze(2)
reconstructed_predictions.append(final_preds)
# shape: (batch_size, beam_size, max_steps)
all_predictions = torch.cat(list(reversed(reconstructed_predictions)), 2)
return all_predictions, last_log_probabilities |
Parameters
----------
data_directory : str, required.
The path to the data directory of https://github.com/jkkummerfeld/text2sql-data
which has been preprocessed using scripts/reformat_text2sql_data.py.
dataset : str, optional.
The dataset to parse. By default all are parsed.
filter_by : str, optional
Compute statistics about a particular error and only print errors which don't contain this string.
verbose : bool, optional.
Whether to print information about incorrectly parsed SQL. | def main(data_directory: int, dataset: str = None, filter_by: str = None, verbose: bool = False) -> None:
"""
Parameters
----------
data_directory : str, required.
The path to the data directory of https://github.com/jkkummerfeld/text2sql-data
which has been preprocessed using scripts/reformat_text2sql_data.py.
dataset : str, optional.
The dataset to parse. By default all are parsed.
filter_by : str, optional
Compute statistics about a particular error and only print errors which don't contain this string.
verbose : bool, optional.
Whether to print information about incorrectly parsed SQL.
"""
directory_dict = {path: files for path, names, files in os.walk(data_directory) if files}
for directory, data_files in directory_dict.items():
if "query_split" in directory or (dataset is not None and dataset not in directory):
continue
print(f"Parsing dataset at {directory}")
parsed = 0
total_non_aliases = 0
total_as_count = 0
total_queries_with_weird_as = 0
total = 0
for json_file in data_files:
print(f"\tParsing split at {json_file}")
file_path = os.path.join(directory, json_file)
num_parsed, num_queries, filtered_errors, non_basic_as_aliases, as_count, queries_with_weird_as = parse_dataset(file_path, filter_by, verbose)
parsed += num_parsed
total += num_queries
total_non_aliases += non_basic_as_aliases
total_as_count += as_count
total_queries_with_weird_as += queries_with_weird_as
print(f"\tParsed {parsed} out of {total} queries, coverage {parsed/total}")
print(f"\tFound {total_non_aliases} out of {total_as_count} non simple AS aliases. percentage: {total_non_aliases/total_as_count}")
print(f"\tFound {total_queries_with_weird_as} out of {total} queries with > 1 weird AS. percentage: {total_queries_with_weird_as/total}")
if filter_by is not None:
print(f"\tOf {total - parsed} errors, {filtered_errors/ (total - parsed + 1e-13)} contain {filter_by}") |
Checks whether the provided obj takes a certain arg.
If it's a class, we're really checking whether its constructor does.
If it's a function or method, we're checking the object itself.
Otherwise, we raise an error. | def takes_arg(obj, arg: str) -> bool:
"""
Checks whether the provided obj takes a certain arg.
If it's a class, we're really checking whether its constructor does.
If it's a function or method, we're checking the object itself.
Otherwise, we raise an error.
"""
if inspect.isclass(obj):
signature = inspect.signature(obj.__init__)
elif inspect.ismethod(obj) or inspect.isfunction(obj):
signature = inspect.signature(obj)
else:
raise ConfigurationError(f"object {obj} is not callable")
return arg in signature.parameters |
Checks whether a provided object takes in any positional arguments.
Similar to takes_arg, we do this for both the __init__ function of
the class or a function / method
Otherwise, we raise an error | def takes_kwargs(obj) -> bool:
"""
Checks whether a provided object takes in any positional arguments.
Similar to takes_arg, we do this for both the __init__ function of
the class or a function / method
Otherwise, we raise an error
"""
if inspect.isclass(obj):
signature = inspect.signature(obj.__init__)
elif inspect.ismethod(obj) or inspect.isfunction(obj):
signature = inspect.signature(obj)
else:
raise ConfigurationError(f"object {obj} is not callable")
return bool(any([p.kind == inspect.Parameter.VAR_KEYWORD # type: ignore
for p in signature.parameters.values()])) |
Optional[X] annotations are actually represented as Union[X, NoneType].
For our purposes, the "Optional" part is not interesting, so here we
throw it away. | def remove_optional(annotation: type):
"""
Optional[X] annotations are actually represented as Union[X, NoneType].
For our purposes, the "Optional" part is not interesting, so here we
throw it away.
"""
origin = getattr(annotation, '__origin__', None)
args = getattr(annotation, '__args__', ())
if origin == Union and len(args) == 2 and args[1] == type(None):
return args[0]
else:
return annotation |
Given some class, a `Params` object, and potentially other keyword arguments,
create a dict of keyword args suitable for passing to the class's constructor.
The function does this by finding the class's constructor, matching the constructor
arguments to entries in the `params` object, and instantiating values for the parameters
using the type annotation and possibly a from_params method.
Any values that are provided in the `extras` will just be used as is.
For instance, you might provide an existing `Vocabulary` this way. | def create_kwargs(cls: Type[T], params: Params, **extras) -> Dict[str, Any]:
"""
Given some class, a `Params` object, and potentially other keyword arguments,
create a dict of keyword args suitable for passing to the class's constructor.
The function does this by finding the class's constructor, matching the constructor
arguments to entries in the `params` object, and instantiating values for the parameters
using the type annotation and possibly a from_params method.
Any values that are provided in the `extras` will just be used as is.
For instance, you might provide an existing `Vocabulary` this way.
"""
# Get the signature of the constructor.
signature = inspect.signature(cls.__init__)
kwargs: Dict[str, Any] = {}
# Iterate over all the constructor parameters and their annotations.
for name, param in signature.parameters.items():
# Skip "self". You're not *required* to call the first parameter "self",
# so in theory this logic is fragile, but if you don't call the self parameter
# "self" you kind of deserve what happens.
if name == "self":
continue
# If the annotation is a compound type like typing.Dict[str, int],
# it will have an __origin__ field indicating `typing.Dict`
# and an __args__ field indicating `(str, int)`. We capture both.
annotation = remove_optional(param.annotation)
kwargs[name] = construct_arg(cls, name, annotation, param.default, params, **extras)
params.assert_empty(cls.__name__)
return kwargs |
Given a dictionary of extra arguments, returns a dictionary of
kwargs that actually are a part of the signature of the cls.from_params
(or cls) method. | def create_extras(cls: Type[T],
extras: Dict[str, Any]) -> Dict[str, Any]:
"""
Given a dictionary of extra arguments, returns a dictionary of
kwargs that actually are a part of the signature of the cls.from_params
(or cls) method.
"""
subextras: Dict[str, Any] = {}
if hasattr(cls, "from_params"):
from_params_method = cls.from_params # type: ignore
else:
# In some rare cases, we get a registered subclass that does _not_ have a
# from_params method (this happens with Activations, for instance, where we
# register pytorch modules directly). This is a bit of a hack to make those work,
# instead of adding a `from_params` method for them somehow. Then the extras
# in the class constructor are what we are looking for, to pass on.
from_params_method = cls
if takes_kwargs(from_params_method):
# If annotation.params accepts **kwargs, we need to pass them all along.
# For example, `BasicTextFieldEmbedder.from_params` requires a Vocabulary
# object, but `TextFieldEmbedder.from_params` does not.
subextras = extras
else:
# Otherwise, only supply the ones that are actual args; any additional ones
# will cause a TypeError.
subextras = {k: v for k, v in extras.items()
if takes_arg(from_params_method, k)}
return subextras |
Does the work of actually constructing an individual argument for :func:`create_kwargs`.
Here we're in the inner loop of iterating over the parameters to a particular constructor,
trying to construct just one of them. The information we get for that parameter is its name,
its type annotation, and its default value; we also get the full set of ``Params`` for
constructing the object (which we may mutate), and any ``extras`` that the constructor might
need.
We take the type annotation and default value here separately, instead of using an
``inspect.Parameter`` object directly, so that we can handle ``Union`` types using recursion on
this method, trying the different annotation types in the union in turn. | def construct_arg(cls: Type[T], # pylint: disable=inconsistent-return-statements,too-many-return-statements
param_name: str,
annotation: Type,
default: Any,
params: Params,
**extras) -> Any:
"""
Does the work of actually constructing an individual argument for :func:`create_kwargs`.
Here we're in the inner loop of iterating over the parameters to a particular constructor,
trying to construct just one of them. The information we get for that parameter is its name,
its type annotation, and its default value; we also get the full set of ``Params`` for
constructing the object (which we may mutate), and any ``extras`` that the constructor might
need.
We take the type annotation and default value here separately, instead of using an
``inspect.Parameter`` object directly, so that we can handle ``Union`` types using recursion on
this method, trying the different annotation types in the union in turn.
"""
from allennlp.models.archival import load_archive # import here to avoid circular imports
# We used `param_name` as the method argument to avoid conflicts with 'name' being a key in
# `extras`, which isn't _that_ unlikely. Now that we are inside the method, we can switch back
# to using `name`.
name = param_name
origin = getattr(annotation, '__origin__', None)
args = getattr(annotation, '__args__', [])
# The parameter is optional if its default value is not the "no default" sentinel.
optional = default != _NO_DEFAULT
# Some constructors expect extra non-parameter items, e.g. vocab: Vocabulary.
# We check the provided `extras` for these and just use them if they exist.
if name in extras:
return extras[name]
# Next case is when argument should be loaded from pretrained archive.
elif name in params and isinstance(params.get(name), Params) and "_pretrained" in params.get(name):
load_module_params = params.pop(name).pop("_pretrained")
archive_file = load_module_params.pop("archive_file")
module_path = load_module_params.pop("module_path")
freeze = load_module_params.pop("freeze", True)
archive = load_archive(archive_file)
result = archive.extract_module(module_path, freeze) # pylint: disable=no-member
if not isinstance(result, annotation):
raise ConfigurationError(f"The module from model at {archive_file} at path {module_path} "
f"was expected of type {annotation} but is of type {type(result)}")
return result
# The next case is when the parameter type is itself constructible from_params.
elif hasattr(annotation, 'from_params'):
if name in params:
# Our params have an entry for this, so we use that.
subparams = params.pop(name)
subextras = create_extras(annotation, extras)
# In some cases we allow a string instead of a param dict, so
# we need to handle that case separately.
if isinstance(subparams, str):
return annotation.by_name(subparams)()
else:
return annotation.from_params(params=subparams, **subextras)
elif not optional:
# Not optional and not supplied, that's an error!
raise ConfigurationError(f"expected key {name} for {cls.__name__}")
else:
return default
# If the parameter type is a Python primitive, just pop it off
# using the correct casting pop_xyz operation.
elif annotation == str:
return params.pop(name, default) if optional else params.pop(name)
elif annotation == int:
return params.pop_int(name, default) if optional else params.pop_int(name)
elif annotation == bool:
return params.pop_bool(name, default) if optional else params.pop_bool(name)
elif annotation == float:
return params.pop_float(name, default) if optional else params.pop_float(name)
# This is special logic for handling types like Dict[str, TokenIndexer],
# List[TokenIndexer], Tuple[TokenIndexer, Tokenizer], and Set[TokenIndexer],
# which it creates by instantiating each value from_params and returning the resulting structure.
elif origin in (Dict, dict) and len(args) == 2 and hasattr(args[-1], 'from_params'):
value_cls = annotation.__args__[-1]
value_dict = {}
for key, value_params in params.pop(name, Params({})).items():
subextras = create_extras(value_cls, extras)
value_dict[key] = value_cls.from_params(params=value_params, **subextras)
return value_dict
elif origin in (List, list) and len(args) == 1 and hasattr(args[0], 'from_params'):
value_cls = annotation.__args__[0]
value_list = []
for value_params in params.pop(name, Params({})):
subextras = create_extras(value_cls, extras)
value_list.append(value_cls.from_params(params=value_params, **subextras))
return value_list
elif origin in (Tuple, tuple) and all(hasattr(arg, 'from_params') for arg in args):
value_list = []
for value_cls, value_params in zip(annotation.__args__, params.pop(name, Params({}))):
subextras = create_extras(value_cls, extras)
value_list.append(value_cls.from_params(params=value_params, **subextras))
return tuple(value_list)
elif origin in (Set, set) and len(args) == 1 and hasattr(args[0], 'from_params'):
value_cls = annotation.__args__[0]
value_set = set()
for value_params in params.pop(name, Params({})):
subextras = create_extras(value_cls, extras)
value_set.add(value_cls.from_params(params=value_params, **subextras))
return value_set
elif origin == Union:
# Storing this so we can recover it later if we need to.
param_value = params.get(name, Params({}))
if isinstance(param_value, Params):
param_value = param_value.duplicate()
# We'll try each of the given types in the union sequentially, returning the first one that
# succeeds.
for arg in args:
try:
return construct_arg(cls, name, arg, default, params, **extras)
except (ValueError, TypeError, ConfigurationError, AttributeError):
# Our attempt to construct the argument may have popped `params[name]`, so we
# restore it here.
params[name] = param_value
if isinstance(param_value, Params):
param_value = param_value.duplicate()
continue
# If none of them succeeded, we crash.
raise ConfigurationError(f"Failed to construct argument {name} with type {annotation}")
else:
# Pass it on as is and hope for the best. ¯\_(ツ)_/¯
if optional:
return params.pop(name, default)
else:
return params.pop(name) |
This is the automatic implementation of `from_params`. Any class that subclasses `FromParams`
(or `Registrable`, which itself subclasses `FromParams`) gets this implementation for free.
If you want your class to be instantiated from params in the "obvious" way -- pop off parameters
and hand them to your constructor with the same names -- this provides that functionality.
If you need more complex logic in your from `from_params` method, you'll have to implement
your own method that overrides this one. | def from_params(cls: Type[T], params: Params, **extras) -> T:
"""
This is the automatic implementation of `from_params`. Any class that subclasses `FromParams`
(or `Registrable`, which itself subclasses `FromParams`) gets this implementation for free.
If you want your class to be instantiated from params in the "obvious" way -- pop off parameters
and hand them to your constructor with the same names -- this provides that functionality.
If you need more complex logic in your from `from_params` method, you'll have to implement
your own method that overrides this one.
"""
# pylint: disable=protected-access
from allennlp.common.registrable import Registrable # import here to avoid circular imports
logger.info(f"instantiating class {cls} from params {getattr(params, 'params', params)} "
f"and extras {set(extras.keys())}")
if params is None:
return None
if isinstance(params, str):
params = Params({"type": params})
registered_subclasses = Registrable._registry.get(cls)
if registered_subclasses is not None:
# We know ``cls`` inherits from Registrable, so we'll use a cast to make mypy happy.
# We have to use a disable to make pylint happy.
# pylint: disable=no-member
as_registrable = cast(Type[Registrable], cls)
default_to_first_choice = as_registrable.default_implementation is not None
choice = params.pop_choice("type",
choices=as_registrable.list_available(),
default_to_first_choice=default_to_first_choice)
subclass = registered_subclasses[choice]
if hasattr(subclass, 'from_params'):
# We want to call subclass.from_params
extras = create_extras(subclass, extras)
return subclass.from_params(params=params, **extras)
else:
# In some rare cases, we get a registered subclass that does _not_ have a
# from_params method (this happens with Activations, for instance, where we
# register pytorch modules directly). This is a bit of a hack to make those work,
# instead of adding a `from_params` method for them somehow. We just trust that
# you've done the right thing in passing your parameters, and nothing else needs to
# be recursively constructed.
extras = create_extras(subclass, extras)
constructor_args = {**params, **extras}
return subclass(**constructor_args)
else:
# This is not a base class, so convert our params and extras into a dict of kwargs.
if cls.__init__ == object.__init__:
# This class does not have an explicit constructor, so don't give it any kwargs.
# Without this logic, create_kwargs will look at object.__init__ and see that
# it takes *args and **kwargs and look for those.
kwargs: Dict[str, Any] = {}
else:
# This class has a constructor, so create kwargs for it.
kwargs = create_kwargs(cls, params, **extras)
return cls(**kwargs) |
The main method in the ``TransitionFunction`` API. This function defines the computation
done at each step of decoding and returns a ranked list of next states.
The input state is `grouped`, to allow for efficient computation, but the output states
should all have a ``group_size`` of 1, to make things easier on the decoding algorithm.
They will get regrouped later as needed.
Because of the way we handle grouping in the decoder states, constructing a new state is
actually a relatively expensive operation. If you know a priori that only some of the
states will be needed (either because you have a set of gold action sequences, or you have
a fixed beam size), passing that information into this function will keep us from
constructing more states than we need, which will greatly speed up your computation.
IMPORTANT: This method `must` returns states already sorted by their score, otherwise
``BeamSearch`` and other methods will break. For efficiency, we do not perform an
additional sort in those methods.
ALSO IMPORTANT: When ``allowed_actions`` is given and ``max_actions`` is not, we assume you
want to evaluate all possible states and do not need any sorting (e.g., this is true for
maximum marginal likelihood training that does not use a beam search). In this case, we
may skip the sorting step for efficiency reasons.
Parameters
----------
state : ``State``
The current state of the decoder, which we will take a step `from`. We may be grouping
together computation for several states here. Because we can have several states for
each instance in the original batch being evaluated at the same time, we use
``group_size`` for this kind of batching, and ``batch_size`` for the `original` batch
in ``model.forward.``
max_actions : ``int``, optional
If you know that you will only need a certain number of states out of this (e.g., in a
beam search), you can pass in the max number of actions that you need, and we will only
construct that many states (for each `batch` instance - `not` for each `group`
instance!). This can save a whole lot of computation if you have an action space
that's much larger than your beam size.
allowed_actions : ``List[Set]``, optional
If the ``DecoderTrainer`` has constraints on which actions need to be evaluated (e.g.,
maximum marginal likelihood only needs to evaluate action sequences in a given set),
you can pass those constraints here, to avoid constructing state objects unnecessarily.
If there are no constraints from the trainer, passing a value of ``None`` here will
allow all actions to be considered.
This is a list because it is `batched` - every instance in the batch has a set of
allowed actions. Note that the size of this list is the ``group_size`` in the
``State``, `not` the ``batch_size`` of ``model.forward``. The training algorithm needs
to convert from the `batched` allowed action sequences that it has to a `grouped`
allowed action sequence list.
Returns
-------
next_states : ``List[State]``
A list of next states, ordered by score. | def take_step(self,
state: StateType,
max_actions: int = None,
allowed_actions: List[Set] = None) -> List[StateType]:
"""
The main method in the ``TransitionFunction`` API. This function defines the computation
done at each step of decoding and returns a ranked list of next states.
The input state is `grouped`, to allow for efficient computation, but the output states
should all have a ``group_size`` of 1, to make things easier on the decoding algorithm.
They will get regrouped later as needed.
Because of the way we handle grouping in the decoder states, constructing a new state is
actually a relatively expensive operation. If you know a priori that only some of the
states will be needed (either because you have a set of gold action sequences, or you have
a fixed beam size), passing that information into this function will keep us from
constructing more states than we need, which will greatly speed up your computation.
IMPORTANT: This method `must` returns states already sorted by their score, otherwise
``BeamSearch`` and other methods will break. For efficiency, we do not perform an
additional sort in those methods.
ALSO IMPORTANT: When ``allowed_actions`` is given and ``max_actions`` is not, we assume you
want to evaluate all possible states and do not need any sorting (e.g., this is true for
maximum marginal likelihood training that does not use a beam search). In this case, we
may skip the sorting step for efficiency reasons.
Parameters
----------
state : ``State``
The current state of the decoder, which we will take a step `from`. We may be grouping
together computation for several states here. Because we can have several states for
each instance in the original batch being evaluated at the same time, we use
``group_size`` for this kind of batching, and ``batch_size`` for the `original` batch
in ``model.forward.``
max_actions : ``int``, optional
If you know that you will only need a certain number of states out of this (e.g., in a
beam search), you can pass in the max number of actions that you need, and we will only
construct that many states (for each `batch` instance - `not` for each `group`
instance!). This can save a whole lot of computation if you have an action space
that's much larger than your beam size.
allowed_actions : ``List[Set]``, optional
If the ``DecoderTrainer`` has constraints on which actions need to be evaluated (e.g.,
maximum marginal likelihood only needs to evaluate action sequences in a given set),
you can pass those constraints here, to avoid constructing state objects unnecessarily.
If there are no constraints from the trainer, passing a value of ``None`` here will
allow all actions to be considered.
This is a list because it is `batched` - every instance in the batch has a set of
allowed actions. Note that the size of this list is the ``group_size`` in the
``State``, `not` the ``batch_size`` of ``model.forward``. The training algorithm needs
to convert from the `batched` allowed action sequences that it has to a `grouped`
allowed action sequence list.
Returns
-------
next_states : ``List[State]``
A list of next states, ordered by score.
"""
raise NotImplementedError |
In PyTorch 1.0, Tensor._sparse_mask was changed to Tensor.sparse_mask.
This wrapper allows AllenNLP to (temporarily) work with both 1.0 and 0.4.1. | def _safe_sparse_mask(tensor: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
"""
In PyTorch 1.0, Tensor._sparse_mask was changed to Tensor.sparse_mask.
This wrapper allows AllenNLP to (temporarily) work with both 1.0 and 0.4.1.
"""
# pylint: disable=protected-access
try:
return tensor.sparse_mask(mask)
except AttributeError:
# TODO(joelgrus): remove this and/or warn at some point
return tensor._sparse_mask(mask) |
Parses a chunk of text in the SemEval SDP format.
Each word in the sentence is returned as a dictionary with the following
format:
'id': '1',
'form': 'Pierre',
'lemma': 'Pierre',
'pos': 'NNP',
'head': '2', # Note that this is the `syntactic` head.
'deprel': 'nn',
'top': '-',
'pred': '+',
'frame': 'named:x-c'
Along with a list of arcs and their corresponding tags. Note that
in semantic dependency parsing words can have more than one head
(it is not a tree), meaning that the list of arcs and tags are
not tied to the length of the sentence. | def parse_sentence(sentence_blob: str) -> Tuple[List[Dict[str, str]], List[Tuple[int, int]], List[str]]:
"""
Parses a chunk of text in the SemEval SDP format.
Each word in the sentence is returned as a dictionary with the following
format:
'id': '1',
'form': 'Pierre',
'lemma': 'Pierre',
'pos': 'NNP',
'head': '2', # Note that this is the `syntactic` head.
'deprel': 'nn',
'top': '-',
'pred': '+',
'frame': 'named:x-c'
Along with a list of arcs and their corresponding tags. Note that
in semantic dependency parsing words can have more than one head
(it is not a tree), meaning that the list of arcs and tags are
not tied to the length of the sentence.
"""
annotated_sentence = []
arc_indices = []
arc_tags = []
predicates = []
lines = [line.split("\t") for line in sentence_blob.split("\n")
if line and not line.strip().startswith("#")]
for line_idx, line in enumerate(lines):
annotated_token = {k:v for k, v in zip(FIELDS, line)}
if annotated_token['pred'] == "+":
predicates.append(line_idx)
annotated_sentence.append(annotated_token)
for line_idx, line in enumerate(lines):
for predicate_idx, arg in enumerate(line[len(FIELDS):]):
if arg != "_":
arc_indices.append((line_idx, predicates[predicate_idx]))
arc_tags.append(arg)
return annotated_sentence, arc_indices, arc_tags |
Just converts from an ``argparse.Namespace`` object to string paths. | def fine_tune_model_from_args(args: argparse.Namespace):
"""
Just converts from an ``argparse.Namespace`` object to string paths.
"""
fine_tune_model_from_file_paths(model_archive_path=args.model_archive,
config_file=args.config_file,
serialization_dir=args.serialization_dir,
overrides=args.overrides,
extend_vocab=args.extend_vocab,
file_friendly_logging=args.file_friendly_logging,
batch_weight_key=args.batch_weight_key,
embedding_sources_mapping=args.embedding_sources_mapping) |
Disambiguates single GPU and multiple GPU settings for cuda_device param. | def parse_cuda_device(cuda_device: Union[str, int, List[int]]) -> Union[int, List[int]]:
"""
Disambiguates single GPU and multiple GPU settings for cuda_device param.
"""
def from_list(strings):
if len(strings) > 1:
return [int(d) for d in strings]
elif len(strings) == 1:
return int(strings[0])
else:
return -1
if isinstance(cuda_device, str):
return from_list(re.split(r',\s*', cuda_device))
elif isinstance(cuda_device, int):
return cuda_device
elif isinstance(cuda_device, list):
return from_list(cuda_device)
else:
# TODO(brendanr): Determine why mypy can't tell that this matches the Union.
return int(cuda_device) |
A wrapper around :func:`fine_tune_model` which loads the model archive from a file.
Parameters
----------
model_archive_path : ``str``
Path to a saved model archive that is the result of running the ``train`` command.
config_file : ``str``
A configuration file specifying how to continue training. The format is identical to the
configuration file for the ``train`` command, but any contents in the ``model`` section is
ignored (as we are using the provided model archive instead).
serialization_dir : ``str``
The directory in which to save results and logs. We just pass this along to
:func:`fine_tune_model`.
overrides : ``str``
A JSON string that we will use to override values in the input parameter file.
extend_vocab: ``bool``, optional (default=False)
If ``True``, we use the new instances to extend your vocabulary.
file_friendly_logging : ``bool``, optional (default=False)
If ``True``, we make our output more friendly to saved model files. We just pass this
along to :func:`fine_tune_model`.
batch_weight_key : ``str``, optional (default="")
If non-empty, name of metric used to weight the loss on a per-batch basis.
embedding_sources_mapping: ``str``, optional (default="")
JSON string to define dict mapping from embedding paths used during training to
the corresponding embedding filepaths available during fine-tuning. | def fine_tune_model_from_file_paths(model_archive_path: str,
config_file: str,
serialization_dir: str,
overrides: str = "",
extend_vocab: bool = False,
file_friendly_logging: bool = False,
batch_weight_key: str = "",
embedding_sources_mapping: str = "") -> Model:
"""
A wrapper around :func:`fine_tune_model` which loads the model archive from a file.
Parameters
----------
model_archive_path : ``str``
Path to a saved model archive that is the result of running the ``train`` command.
config_file : ``str``
A configuration file specifying how to continue training. The format is identical to the
configuration file for the ``train`` command, but any contents in the ``model`` section is
ignored (as we are using the provided model archive instead).
serialization_dir : ``str``
The directory in which to save results and logs. We just pass this along to
:func:`fine_tune_model`.
overrides : ``str``
A JSON string that we will use to override values in the input parameter file.
extend_vocab: ``bool``, optional (default=False)
If ``True``, we use the new instances to extend your vocabulary.
file_friendly_logging : ``bool``, optional (default=False)
If ``True``, we make our output more friendly to saved model files. We just pass this
along to :func:`fine_tune_model`.
batch_weight_key : ``str``, optional (default="")
If non-empty, name of metric used to weight the loss on a per-batch basis.
embedding_sources_mapping: ``str``, optional (default="")
JSON string to define dict mapping from embedding paths used during training to
the corresponding embedding filepaths available during fine-tuning.
"""
# We don't need to pass in `cuda_device` here, because the trainer will call `model.cuda()` if
# necessary.
archive = load_archive(model_archive_path)
params = Params.from_file(config_file, overrides)
embedding_sources: Dict[str, str] = json.loads(embedding_sources_mapping) if embedding_sources_mapping else {}
return fine_tune_model(model=archive.model,
params=params,
serialization_dir=serialization_dir,
extend_vocab=extend_vocab,
file_friendly_logging=file_friendly_logging,
batch_weight_key=batch_weight_key,
embedding_sources_mapping=embedding_sources) |
Fine tunes the given model, using a set of parameters that is largely identical to those used
for :func:`~allennlp.commands.train.train_model`, except that the ``model`` section is ignored,
if it is present (as we are already given a ``Model`` here).
The main difference between the logic done here and the logic done in ``train_model`` is that
here we do not worry about vocabulary construction or creating the model object. Everything
else is the same.
Parameters
----------
model : ``Model``
A model to fine tune.
params : ``Params``
A parameter object specifying an AllenNLP Experiment
serialization_dir : ``str``
The directory in which to save results and logs.
extend_vocab: ``bool``, optional (default=False)
If ``True``, we use the new instances to extend your vocabulary.
file_friendly_logging : ``bool``, optional (default=False)
If ``True``, we add newlines to tqdm output, even on an interactive terminal, and we slow
down tqdm's output to only once every 10 seconds.
batch_weight_key : ``str``, optional (default="")
If non-empty, name of metric used to weight the loss on a per-batch basis.
embedding_sources_mapping: ``Dict[str, str]``, optional (default=None)
mapping from model paths to the pretrained embedding filepaths
used during fine-tuning. | def fine_tune_model(model: Model,
params: Params,
serialization_dir: str,
extend_vocab: bool = False,
file_friendly_logging: bool = False,
batch_weight_key: str = "",
embedding_sources_mapping: Dict[str, str] = None) -> Model:
"""
Fine tunes the given model, using a set of parameters that is largely identical to those used
for :func:`~allennlp.commands.train.train_model`, except that the ``model`` section is ignored,
if it is present (as we are already given a ``Model`` here).
The main difference between the logic done here and the logic done in ``train_model`` is that
here we do not worry about vocabulary construction or creating the model object. Everything
else is the same.
Parameters
----------
model : ``Model``
A model to fine tune.
params : ``Params``
A parameter object specifying an AllenNLP Experiment
serialization_dir : ``str``
The directory in which to save results and logs.
extend_vocab: ``bool``, optional (default=False)
If ``True``, we use the new instances to extend your vocabulary.
file_friendly_logging : ``bool``, optional (default=False)
If ``True``, we add newlines to tqdm output, even on an interactive terminal, and we slow
down tqdm's output to only once every 10 seconds.
batch_weight_key : ``str``, optional (default="")
If non-empty, name of metric used to weight the loss on a per-batch basis.
embedding_sources_mapping: ``Dict[str, str]``, optional (default=None)
mapping from model paths to the pretrained embedding filepaths
used during fine-tuning.
"""
prepare_environment(params)
if os.path.exists(serialization_dir) and os.listdir(serialization_dir):
raise ConfigurationError(f"Serialization directory ({serialization_dir}) "
f"already exists and is not empty.")
os.makedirs(serialization_dir, exist_ok=True)
prepare_global_logging(serialization_dir, file_friendly_logging)
serialization_params = deepcopy(params).as_dict(quiet=True)
with open(os.path.join(serialization_dir, CONFIG_NAME), "w") as param_file:
json.dump(serialization_params, param_file, indent=4)
if params.pop('model', None):
logger.warning("You passed parameters for the model in your configuration file, but we "
"are ignoring them, using instead the model parameters in the archive.")
vocabulary_params = params.pop('vocabulary', {})
if vocabulary_params.get('directory_path', None):
logger.warning("You passed `directory_path` in parameters for the vocabulary in "
"your configuration file, but it will be ignored. ")
all_datasets = datasets_from_params(params)
vocab = model.vocab
if extend_vocab:
datasets_for_vocab_creation = set(params.pop("datasets_for_vocab_creation", all_datasets))
for dataset in datasets_for_vocab_creation:
if dataset not in all_datasets:
raise ConfigurationError(f"invalid 'dataset_for_vocab_creation' {dataset}")
logger.info("Extending model vocabulary using %s data.", ", ".join(datasets_for_vocab_creation))
vocab.extend_from_instances(vocabulary_params,
(instance for key, dataset in all_datasets.items()
for instance in dataset
if key in datasets_for_vocab_creation))
model.extend_embedder_vocab(embedding_sources_mapping)
vocab.save_to_files(os.path.join(serialization_dir, "vocabulary"))
iterator = DataIterator.from_params(params.pop("iterator"))
iterator.index_with(model.vocab)
validation_iterator_params = params.pop("validation_iterator", None)
if validation_iterator_params:
validation_iterator = DataIterator.from_params(validation_iterator_params)
validation_iterator.index_with(vocab)
else:
validation_iterator = None
train_data = all_datasets['train']
validation_data = all_datasets.get('validation')
test_data = all_datasets.get('test')
trainer_params = params.pop("trainer")
no_grad_regexes = trainer_params.pop("no_grad", ())
for name, parameter in model.named_parameters():
if any(re.search(regex, name) for regex in no_grad_regexes):
parameter.requires_grad_(False)
frozen_parameter_names, tunable_parameter_names = \
get_frozen_and_tunable_parameter_names(model)
logger.info("Following parameters are Frozen (without gradient):")
for name in frozen_parameter_names:
logger.info(name)
logger.info("Following parameters are Tunable (with gradient):")
for name in tunable_parameter_names:
logger.info(name)
trainer_type = trainer_params.pop("type", "default")
if trainer_type == "default":
trainer = Trainer.from_params(model=model,
serialization_dir=serialization_dir,
iterator=iterator,
train_data=train_data,
validation_data=validation_data,
params=trainer_params,
validation_iterator=validation_iterator)
else:
raise ConfigurationError("currently fine-tune only works with the default Trainer")
evaluate_on_test = params.pop_bool("evaluate_on_test", False)
params.assert_empty('base train command')
try:
metrics = trainer.train()
except KeyboardInterrupt:
# if we have completed an epoch, try to create a model archive.
if os.path.exists(os.path.join(serialization_dir, _DEFAULT_WEIGHTS)):
logging.info("Fine-tuning interrupted by the user. Attempting to create "
"a model archive using the current best epoch weights.")
archive_model(serialization_dir, files_to_archive=params.files_to_archive)
raise
# Evaluate
if test_data and evaluate_on_test:
logger.info("The model will be evaluated using the best epoch weights.")
test_metrics = evaluate(model, test_data, validation_iterator or iterator,
cuda_device=trainer._cuda_devices[0], # pylint: disable=protected-access,
batch_weight_key=batch_weight_key)
for key, value in test_metrics.items():
metrics["test_" + key] = value
elif test_data:
logger.info("To evaluate on the test set after training, pass the "
"'evaluate_on_test' flag, or use the 'allennlp evaluate' command.")
# Now tar up results
archive_model(serialization_dir, files_to_archive=params.files_to_archive)
metrics_json = json.dumps(metrics, indent=2)
with open(os.path.join(serialization_dir, "metrics.json"), "w") as metrics_file:
metrics_file.write(metrics_json)
logger.info("Metrics: %s", metrics_json)
return model |
Extracts the top-k scoring items with respect to the scorer. We additionally return
the indices of the top-k in their original order, not ordered by score, so that downstream
components can rely on the original ordering (e.g., for knowing what spans are valid
antecedents in a coreference resolution model). May use the same k for all sentences in
minibatch, or different k for each.
Parameters
----------
embeddings : ``torch.FloatTensor``, required.
A tensor of shape (batch_size, num_items, embedding_size), containing an embedding for
each item in the list that we want to prune.
mask : ``torch.LongTensor``, required.
A tensor of shape (batch_size, num_items), denoting unpadded elements of
``embeddings``.
num_items_to_keep : ``Union[int, torch.LongTensor]``, required.
If a tensor of shape (batch_size), specifies the number of items to keep for each
individual sentence in minibatch.
If an int, keep the same number of items for all sentences.
Returns
-------
top_embeddings : ``torch.FloatTensor``
The representations of the top-k scoring items.
Has shape (batch_size, max_num_items_to_keep, embedding_size).
top_mask : ``torch.LongTensor``
The corresponding mask for ``top_embeddings``.
Has shape (batch_size, max_num_items_to_keep).
top_indices : ``torch.IntTensor``
The indices of the top-k scoring items into the original ``embeddings``
tensor. This is returned because it can be useful to retain pointers to
the original items, if each item is being scored by multiple distinct
scorers, for instance. Has shape (batch_size, max_num_items_to_keep).
top_item_scores : ``torch.FloatTensor``
The values of the top-k scoring items.
Has shape (batch_size, max_num_items_to_keep, 1). | def forward(self, # pylint: disable=arguments-differ
embeddings: torch.FloatTensor,
mask: torch.LongTensor,
num_items_to_keep: Union[int, torch.LongTensor]) -> Tuple[torch.FloatTensor, torch.LongTensor,
torch.LongTensor, torch.FloatTensor]:
"""
Extracts the top-k scoring items with respect to the scorer. We additionally return
the indices of the top-k in their original order, not ordered by score, so that downstream
components can rely on the original ordering (e.g., for knowing what spans are valid
antecedents in a coreference resolution model). May use the same k for all sentences in
minibatch, or different k for each.
Parameters
----------
embeddings : ``torch.FloatTensor``, required.
A tensor of shape (batch_size, num_items, embedding_size), containing an embedding for
each item in the list that we want to prune.
mask : ``torch.LongTensor``, required.
A tensor of shape (batch_size, num_items), denoting unpadded elements of
``embeddings``.
num_items_to_keep : ``Union[int, torch.LongTensor]``, required.
If a tensor of shape (batch_size), specifies the number of items to keep for each
individual sentence in minibatch.
If an int, keep the same number of items for all sentences.
Returns
-------
top_embeddings : ``torch.FloatTensor``
The representations of the top-k scoring items.
Has shape (batch_size, max_num_items_to_keep, embedding_size).
top_mask : ``torch.LongTensor``
The corresponding mask for ``top_embeddings``.
Has shape (batch_size, max_num_items_to_keep).
top_indices : ``torch.IntTensor``
The indices of the top-k scoring items into the original ``embeddings``
tensor. This is returned because it can be useful to retain pointers to
the original items, if each item is being scored by multiple distinct
scorers, for instance. Has shape (batch_size, max_num_items_to_keep).
top_item_scores : ``torch.FloatTensor``
The values of the top-k scoring items.
Has shape (batch_size, max_num_items_to_keep, 1).
"""
# If an int was given for number of items to keep, construct tensor by repeating the value.
if isinstance(num_items_to_keep, int):
batch_size = mask.size(0)
# Put the tensor on same device as the mask.
num_items_to_keep = num_items_to_keep * torch.ones([batch_size], dtype=torch.long,
device=mask.device)
max_items_to_keep = num_items_to_keep.max()
mask = mask.unsqueeze(-1)
num_items = embeddings.size(1)
# Shape: (batch_size, num_items, 1)
scores = self._scorer(embeddings)
if scores.size(-1) != 1 or scores.dim() != 3:
raise ValueError(f"The scorer passed to Pruner must produce a tensor of shape"
f"(batch_size, num_items, 1), but found shape {scores.size()}")
# Make sure that we don't select any masked items by setting their scores to be very
# negative. These are logits, typically, so -1e20 should be plenty negative.
scores = util.replace_masked_values(scores, mask, -1e20)
# Shape: (batch_size, max_num_items_to_keep, 1)
_, top_indices = scores.topk(max_items_to_keep, 1)
# Mask based on number of items to keep for each sentence.
# Shape: (batch_size, max_num_items_to_keep)
top_indices_mask = util.get_mask_from_sequence_lengths(num_items_to_keep, max_items_to_keep)
top_indices_mask = top_indices_mask.byte()
# Shape: (batch_size, max_num_items_to_keep)
top_indices = top_indices.squeeze(-1)
# Fill all masked indices with largest "top" index for that sentence, so that all masked
# indices will be sorted to the end.
# Shape: (batch_size, 1)
fill_value, _ = top_indices.max(dim=1)
fill_value = fill_value.unsqueeze(-1)
# Shape: (batch_size, max_num_items_to_keep)
top_indices = torch.where(top_indices_mask, top_indices, fill_value)
# Now we order the selected indices in increasing order with
# respect to their indices (and hence, with respect to the
# order they originally appeared in the ``embeddings`` tensor).
top_indices, _ = torch.sort(top_indices, 1)
# Shape: (batch_size * max_num_items_to_keep)
# torch.index_select only accepts 1D indices, but here
# we need to select items for each element in the batch.
flat_top_indices = util.flatten_and_batch_shift_indices(top_indices, num_items)
# Shape: (batch_size, max_num_items_to_keep, embedding_size)
top_embeddings = util.batched_index_select(embeddings, top_indices, flat_top_indices)
# Combine the masks on spans that are out-of-bounds, and the mask on spans that are outside
# the top k for each sentence.
# Shape: (batch_size, max_num_items_to_keep)
sequence_mask = util.batched_index_select(mask, top_indices, flat_top_indices)
sequence_mask = sequence_mask.squeeze(-1).byte()
top_mask = top_indices_mask & sequence_mask
top_mask = top_mask.long()
# Shape: (batch_size, max_num_items_to_keep, 1)
top_scores = util.batched_index_select(scores, top_indices, flat_top_indices)
return top_embeddings, top_mask, top_indices, top_scores |
Add the epoch number to the batch instances as a MetadataField. | def add_epoch_number(batch: Batch, epoch: int) -> Batch:
"""
Add the epoch number to the batch instances as a MetadataField.
"""
for instance in batch.instances:
instance.fields['epoch_num'] = MetadataField(epoch)
return batch |
Take the next `max_instances` instances from the given dataset.
If `max_instances` is `None`, then just take all instances from the dataset.
If `max_instances` is not `None`, each call resumes where the previous one
left off, and when you get to the end of the dataset you start again from the beginning. | def _take_instances(self,
instances: Iterable[Instance],
max_instances: Optional[int] = None) -> Iterator[Instance]:
"""
Take the next `max_instances` instances from the given dataset.
If `max_instances` is `None`, then just take all instances from the dataset.
If `max_instances` is not `None`, each call resumes where the previous one
left off, and when you get to the end of the dataset you start again from the beginning.
"""
# If max_instances isn't specified, just iterate once over the whole dataset
if max_instances is None:
yield from iter(instances)
else:
# If we don't have a cursor for this dataset, create one. We use ``id()``
# for the key because ``instances`` could be a list, which can't be used as a key.
key = id(instances)
iterator = self._cursors.get(key, iter(instances))
while max_instances > 0:
try:
# If there are instances left on this iterator,
# yield one and decrement max_instances.
yield next(iterator)
max_instances -= 1
except StopIteration:
# None left, so start over again at the beginning of the dataset.
iterator = iter(instances)
# We may have a new iterator, so update the cursor.
self._cursors[key] = iterator |
Breaks the dataset into "memory-sized" lists of instances,
which it yields up one at a time until it gets through a full epoch.
For example, if the dataset is already an in-memory list, and each epoch
represents one pass through the dataset, it just yields back the dataset.
Whereas if the dataset is lazily read from disk and we've specified to
load 1000 instances at a time, then it yields lists of 1000 instances each. | def _memory_sized_lists(self,
instances: Iterable[Instance]) -> Iterable[List[Instance]]:
"""
Breaks the dataset into "memory-sized" lists of instances,
which it yields up one at a time until it gets through a full epoch.
For example, if the dataset is already an in-memory list, and each epoch
represents one pass through the dataset, it just yields back the dataset.
Whereas if the dataset is lazily read from disk and we've specified to
load 1000 instances at a time, then it yields lists of 1000 instances each.
"""
lazy = is_lazy(instances)
# Get an iterator over the next epoch worth of instances.
iterator = self._take_instances(instances, self._instances_per_epoch)
# We have four different cases to deal with:
# With lazy instances and no guidance about how many to load into memory,
# we just load ``batch_size`` instances at a time:
if lazy and self._max_instances_in_memory is None:
yield from lazy_groups_of(iterator, self._batch_size)
# If we specified max instances in memory, lazy or not, we just
# load ``max_instances_in_memory`` instances at a time:
elif self._max_instances_in_memory is not None:
yield from lazy_groups_of(iterator, self._max_instances_in_memory)
# If we have non-lazy instances, and we want all instances each epoch,
# then we just yield back the list of instances:
elif self._instances_per_epoch is None:
yield ensure_list(instances)
# In the final case we have non-lazy instances, we want a specific number
# of instances each epoch, and we didn't specify how to many instances to load
# into memory. So we convert the whole iterator to a list:
else:
yield list(iterator) |
If self._maximum_samples_per_batch is specified, then split the batch
into smaller sub-batches if it exceeds the maximum size.
Parameters
----------
batch_instances : ``Iterable[Instance]``
A candidate batch.
excess : ``Deque[Instance]``
Instances that were not sufficient to form an entire batch
previously. They will be used as part of the first sub-batch. This
will be populated with instances from the end of batch_instances
that do not consist of more than self._maximum_samples_per_batch
samples or self._batch_size instances. It is the caller's
responsibility to place these in a batch too, which may, of course,
be done in part with subsequent calls to this method.
WARNING: Mutated in place! | def _ensure_batch_is_sufficiently_small(
self,
batch_instances: Iterable[Instance],
excess: Deque[Instance]) -> List[List[Instance]]:
"""
If self._maximum_samples_per_batch is specified, then split the batch
into smaller sub-batches if it exceeds the maximum size.
Parameters
----------
batch_instances : ``Iterable[Instance]``
A candidate batch.
excess : ``Deque[Instance]``
Instances that were not sufficient to form an entire batch
previously. They will be used as part of the first sub-batch. This
will be populated with instances from the end of batch_instances
that do not consist of more than self._maximum_samples_per_batch
samples or self._batch_size instances. It is the caller's
responsibility to place these in a batch too, which may, of course,
be done in part with subsequent calls to this method.
WARNING: Mutated in place!
"""
if self._maximum_samples_per_batch is None:
assert not excess
return [list(batch_instances)]
key, limit = self._maximum_samples_per_batch
batches: List[List[Instance]] = []
batch: List[Instance] = []
padding_length = -1
excess.extend(batch_instances)
while excess:
instance = excess.popleft()
if self.vocab is not None:
# we index here to ensure that shape information is available,
# as in some cases (with self._maximum_samples_per_batch)
# we need access to shaping information before batches are constructed)
instance.index_fields(self.vocab)
field_lengths = instance.get_padding_lengths()
for _, lengths in field_lengths.items():
try:
padding_length = max(padding_length,
lengths[key])
except KeyError:
pass
proposed_batch_size = len(batch) + 1
# Adding the current instance would exceed the batch size or sample size.
if proposed_batch_size >= self._batch_size or padding_length * proposed_batch_size > limit:
# Output the already existing batch
batches.append(batch)
# Put the current instance back, reset state.
excess.appendleft(instance)
batch = []
padding_length = -1
else:
batch.append(instance)
# Keep the current batch as excess.
excess.extend(batch)
return batches |
Returns the number of batches that ``dataset`` will be split into; if you want to track
progress through the batch with the generator produced by ``__call__``, this could be
useful. | def get_num_batches(self, instances: Iterable[Instance]) -> int:
"""
Returns the number of batches that ``dataset`` will be split into; if you want to track
progress through the batch with the generator produced by ``__call__``, this could be
useful.
"""
if is_lazy(instances) and self._instances_per_epoch is None:
# Unable to compute num batches, so just return 1.
return 1
elif self._instances_per_epoch is not None:
return math.ceil(self._instances_per_epoch / self._batch_size)
else:
# Not lazy, so can compute the list length.
return math.ceil(len(ensure_list(instances)) / self._batch_size) |
This method should return one epoch worth of batches. | def _create_batches(self, instances: Iterable[Instance], shuffle: bool) -> Iterable[Batch]:
"""
This method should return one epoch worth of batches.
"""
raise NotImplementedError |
TQDM and requests use carriage returns to get the training line to update for each batch
without adding more lines to the terminal output. Displaying those in a file won't work
correctly, so we'll just make sure that each batch shows up on its one line.
:param message: the message to permute
:return: the message with carriage returns replaced with newlines | def replace_cr_with_newline(message: str):
"""
TQDM and requests use carriage returns to get the training line to update for each batch
without adding more lines to the terminal output. Displaying those in a file won't work
correctly, so we'll just make sure that each batch shows up on its one line.
:param message: the message to permute
:return: the message with carriage returns replaced with newlines
"""
if '\r' in message:
message = message.replace('\r', '')
if not message or message[-1] != '\n':
message += '\n'
return message |
Converts a list of JSON objects into a list of :class:`~allennlp.data.instance.Instance`s.
By default, this expects that a "batch" consists of a list of JSON blobs which would
individually be predicted by :func:`predict_json`. In order to use this method for
batch prediction, :func:`_json_to_instance` should be implemented by the subclass, or
if the instances have some dependency on each other, this method should be overridden
directly. | def _batch_json_to_instances(self, json_dicts: List[JsonDict]) -> List[Instance]:
"""
Converts a list of JSON objects into a list of :class:`~allennlp.data.instance.Instance`s.
By default, this expects that a "batch" consists of a list of JSON blobs which would
individually be predicted by :func:`predict_json`. In order to use this method for
batch prediction, :func:`_json_to_instance` should be implemented by the subclass, or
if the instances have some dependency on each other, this method should be overridden
directly.
"""
instances = []
for json_dict in json_dicts:
instances.append(self._json_to_instance(json_dict))
return instances |
Context manager that captures the internal-module outputs of
this predictor's model. The idea is that you could use it as follows:
.. code-block:: python
with predictor.capture_model_internals() as internals:
outputs = predictor.predict_json(inputs)
return {**outputs, "model_internals": internals} | def capture_model_internals(self) -> Iterator[dict]:
"""
Context manager that captures the internal-module outputs of
this predictor's model. The idea is that you could use it as follows:
.. code-block:: python
with predictor.capture_model_internals() as internals:
outputs = predictor.predict_json(inputs)
return {**outputs, "model_internals": internals}
"""
results = {}
hooks = []
# First we'll register hooks to add the outputs of each module to the results dict.
def add_output(idx: int):
def _add_output(mod, _, outputs):
results[idx] = {"name": str(mod), "output": sanitize(outputs)}
return _add_output
for idx, module in enumerate(self._model.modules()):
if module != self._model:
hook = module.register_forward_hook(add_output(idx))
hooks.append(hook)
# If you capture the return value of the context manager, you get the results dict.
yield results
# And then when you exit the context we remove all the hooks.
for hook in hooks:
hook.remove() |
Instantiate a :class:`Predictor` from an archive path.
If you need more detailed configuration options, such as running the predictor on the GPU,
please use `from_archive`.
Parameters
----------
archive_path The path to the archive.
Returns
-------
A Predictor instance. | def from_path(cls, archive_path: str, predictor_name: str = None) -> 'Predictor':
"""
Instantiate a :class:`Predictor` from an archive path.
If you need more detailed configuration options, such as running the predictor on the GPU,
please use `from_archive`.
Parameters
----------
archive_path The path to the archive.
Returns
-------
A Predictor instance.
"""
return Predictor.from_archive(load_archive(archive_path), predictor_name) |
Instantiate a :class:`Predictor` from an :class:`~allennlp.models.archival.Archive`;
that is, from the result of training a model. Optionally specify which `Predictor`
subclass; otherwise, the default one for the model will be used. | def from_archive(cls, archive: Archive, predictor_name: str = None) -> 'Predictor':
"""
Instantiate a :class:`Predictor` from an :class:`~allennlp.models.archival.Archive`;
that is, from the result of training a model. Optionally specify which `Predictor`
subclass; otherwise, the default one for the model will be used.
"""
# Duplicate the config so that the config inside the archive doesn't get consumed
config = archive.config.duplicate()
if not predictor_name:
model_type = config.get("model").get("type")
if not model_type in DEFAULT_PREDICTORS:
raise ConfigurationError(f"No default predictor for model type {model_type}.\n"\
f"Please specify a predictor explicitly.")
predictor_name = DEFAULT_PREDICTORS[model_type]
dataset_reader_params = config["dataset_reader"]
dataset_reader = DatasetReader.from_params(dataset_reader_params)
model = archive.model
model.eval()
return Predictor.by_name(predictor_name)(model, dataset_reader) |
Compute 'Scaled Dot Product Attention | def attention(query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
mask: torch.Tensor = None,
dropout: Callable = None) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute 'Scaled Dot Product Attention'"""
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn |
Mask out subsequent positions. | def subsequent_mask(size: int, device: str = 'cpu') -> torch.Tensor:
"""Mask out subsequent positions."""
mask = torch.tril(torch.ones(size, size, device=device, dtype=torch.int32)).unsqueeze(0)
return mask |
Helper: Construct a model from hyperparameters. | def make_model(num_layers: int = 6,
input_size: int = 512, # Attention size
hidden_size: int = 2048, # FF layer size
heads: int = 8,
dropout: float = 0.1,
return_all_layers: bool = False) -> TransformerEncoder:
"""Helper: Construct a model from hyperparameters."""
attn = MultiHeadedAttention(heads, input_size, dropout)
ff = PositionwiseFeedForward(input_size, hidden_size, dropout)
model = TransformerEncoder(EncoderLayer(input_size, attn, ff, dropout),
num_layers,
return_all_layers=return_all_layers)
# Initialize parameters with Glorot / fan_avg.
for p in model.parameters():
if p.dim() > 1:
torch.nn.init.xavier_uniform_(p)
return model |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.