text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Returns a dictionary of padding lengths, keyed by field name. Each ``Field`` returns a
<END_TASK>
<USER_TASK:>
Description:
def get_padding_lengths(self) -> Dict[str, Dict[str, int]]:
"""
Returns a dictionary of padding lengths, keyed by field name. Each ``Field`` returns a
mapping from padding keys to actual lengths, and we just key that dictionary by field name.
""" |
lengths = {}
for field_name, field in self.fields.items():
lengths[field_name] = field.get_padding_lengths()
return lengths |
<SYSTEM_TASK:>
Inspect the docstring and get the comments for each parameter.
<END_TASK>
<USER_TASK:>
Description:
def _docspec_comments(obj) -> Dict[str, str]:
"""
Inspect the docstring and get the comments for each parameter.
""" |
# Sometimes our docstring is on the class, and sometimes it's on the initializer,
# so we've got to check both.
class_docstring = getattr(obj, '__doc__', None)
init_docstring = getattr(obj.__init__, '__doc__', None) if hasattr(obj, '__init__') else None
docstring = class_docstring or init_docstring or ''
doc = NumpyDocString(docstring)
params = doc["Parameters"]
comments: Dict[str, str] = {}
for line in params:
# It looks like when there's not a space after the parameter name,
# numpydocstring parses it incorrectly.
name_bad = line[0]
name = name_bad.split(":")[0]
# Sometimes the line has 3 fields, sometimes it has 4 fields.
comment = "\n".join(line[-1])
comments[name] = comment
return comments |
<SYSTEM_TASK:>
Render a single config item, with the provided indent
<END_TASK>
<USER_TASK:>
Description:
def _render(item: ConfigItem, indent: str = "") -> str:
"""
Render a single config item, with the provided indent
""" |
optional = item.default_value != _NO_DEFAULT
if is_configurable(item.annotation):
rendered_annotation = f"{item.annotation} (configurable)"
else:
rendered_annotation = str(item.annotation)
rendered_item = "".join([
# rendered_comment,
indent,
"// " if optional else "",
f'"{item.name}": ',
rendered_annotation,
f" (default: {item.default_value} )" if optional else "",
f" // {item.comment}" if item.comment else "",
"\n"
])
return rendered_item |
<SYSTEM_TASK:>
Convert `url` into a hashed filename in a repeatable way.
<END_TASK>
<USER_TASK:>
Description:
def url_to_filename(url: str, etag: str = None) -> str:
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
""" |
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename |
<SYSTEM_TASK:>
Split a full s3 path into the bucket name and path.
<END_TASK>
<USER_TASK:>
Description:
def split_s3_path(url: str) -> Tuple[str, str]:
"""Split a full s3 path into the bucket name and path.""" |
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path |
<SYSTEM_TASK:>
Wrapper function for s3 requests in order to create more helpful error
<END_TASK>
<USER_TASK:>
Description:
def s3_request(func: Callable):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
""" |
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise FileNotFoundError("file {} not found".format(url))
else:
raise
return wrapper |
<SYSTEM_TASK:>
Pull a file directly from S3.
<END_TASK>
<USER_TASK:>
Description:
def s3_get(url: str, temp_file: IO) -> None:
"""Pull a file directly from S3.""" |
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file) |
<SYSTEM_TASK:>
Given a URL, look for the corresponding dataset in the local cache.
<END_TASK>
<USER_TASK:>
Description:
def get_from_cache(url: str, cache_dir: str = None) -> str:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
""" |
if cache_dir is None:
cache_dir = CACHE_DIRECTORY
os.makedirs(cache_dir, exist_ok=True)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path |
<SYSTEM_TASK:>
This method lets you take advantage of spacy's batch processing.
<END_TASK>
<USER_TASK:>
Description:
def batch_split_sentences(self, texts: List[str]) -> List[List[str]]:
"""
This method lets you take advantage of spacy's batch processing.
Default implementation is to just iterate over the texts and call ``split_sentences``.
""" |
return [self.split_sentences(text) for text in texts] |
<SYSTEM_TASK:>
An iterator over the entire dataset, yielding all sentences processed.
<END_TASK>
<USER_TASK:>
Description:
def dataset_iterator(self, file_path: str) -> Iterator[OntonotesSentence]:
"""
An iterator over the entire dataset, yielding all sentences processed.
""" |
for conll_file in self.dataset_path_iterator(file_path):
yield from self.sentence_iterator(conll_file) |
<SYSTEM_TASK:>
An iterator returning file_paths in a directory
<END_TASK>
<USER_TASK:>
Description:
def dataset_path_iterator(file_path: str) -> Iterator[str]:
"""
An iterator returning file_paths in a directory
containing CONLL-formatted files.
""" |
logger.info("Reading CONLL sentences from dataset files at: %s", file_path)
for root, _, files in list(os.walk(file_path)):
for data_file in files:
# These are a relic of the dataset pre-processing. Every
# file will be duplicated - one file called filename.gold_skel
# and one generated from the preprocessing called filename.gold_conll.
if not data_file.endswith("gold_conll"):
continue
yield os.path.join(root, data_file) |
<SYSTEM_TASK:>
An iterator over CONLL formatted files which yields documents, regardless
<END_TASK>
<USER_TASK:>
Description:
def dataset_document_iterator(self, file_path: str) -> Iterator[List[OntonotesSentence]]:
"""
An iterator over CONLL formatted files which yields documents, regardless
of the number of document annotations in a particular file. This is useful
for conll data which has been preprocessed, such as the preprocessing which
takes place for the 2012 CONLL Coreference Resolution task.
""" |
with codecs.open(file_path, 'r', encoding='utf8') as open_file:
conll_rows = []
document: List[OntonotesSentence] = []
for line in open_file:
line = line.strip()
if line != '' and not line.startswith('#'):
# Non-empty line. Collect the annotation.
conll_rows.append(line)
else:
if conll_rows:
document.append(self._conll_rows_to_sentence(conll_rows))
conll_rows = []
if line.startswith("#end document"):
yield document
document = []
if document:
# Collect any stragglers or files which might not
# have the '#end document' format for the end of the file.
yield document |
<SYSTEM_TASK:>
An iterator over the sentences in an individual CONLL formatted file.
<END_TASK>
<USER_TASK:>
Description:
def sentence_iterator(self, file_path: str) -> Iterator[OntonotesSentence]:
"""
An iterator over the sentences in an individual CONLL formatted file.
""" |
for document in self.dataset_document_iterator(file_path):
for sentence in document:
yield sentence |
<SYSTEM_TASK:>
Given a sequence of different label types for a single word and the current
<END_TASK>
<USER_TASK:>
Description:
def _process_span_annotations_for_word(annotations: List[str],
span_labels: List[List[str]],
current_span_labels: List[Optional[str]]) -> None:
"""
Given a sequence of different label types for a single word and the current
span label we are inside, compute the BIO tag for each label and append to a list.
Parameters
----------
annotations: ``List[str]``
A list of labels to compute BIO tags for.
span_labels : ``List[List[str]]``
A list of lists, one for each annotation, to incrementally collect
the BIO tags for a sequence.
current_span_labels : ``List[Optional[str]]``
The currently open span per annotation type, or ``None`` if there is no open span.
""" |
for annotation_index, annotation in enumerate(annotations):
# strip all bracketing information to
# get the actual propbank label.
label = annotation.strip("()*")
if "(" in annotation:
# Entering into a span for a particular semantic role label.
# We append the label and set the current span for this annotation.
bio_label = "B-" + label
span_labels[annotation_index].append(bio_label)
current_span_labels[annotation_index] = label
elif current_span_labels[annotation_index] is not None:
# If there's no '(' token, but the current_span_label is not None,
# then we are inside a span.
bio_label = "I-" + current_span_labels[annotation_index]
span_labels[annotation_index].append(bio_label)
else:
# We're outside a span.
span_labels[annotation_index].append("O")
# Exiting a span, so we reset the current span label for this annotation.
if ")" in annotation:
current_span_labels[annotation_index] = None |
<SYSTEM_TASK:>
Apply dropout to input tensor.
<END_TASK>
<USER_TASK:>
Description:
def forward(self, input_tensor):
# pylint: disable=arguments-differ
"""
Apply dropout to input tensor.
Parameters
----------
input_tensor: ``torch.FloatTensor``
A tensor of shape ``(batch_size, num_timesteps, embedding_dim)``
Returns
-------
output: ``torch.FloatTensor``
A tensor of shape ``(batch_size, num_timesteps, embedding_dim)`` with dropout applied.
""" |
ones = input_tensor.data.new_ones(input_tensor.shape[0], input_tensor.shape[-1])
dropout_mask = torch.nn.functional.dropout(ones, self.p, self.training, inplace=False)
if self.inplace:
input_tensor *= dropout_mask.unsqueeze(1)
return None
else:
return dropout_mask.unsqueeze(1) * input_tensor |
<SYSTEM_TASK:>
If you actually passed gradient-tracking Tensors to a Metric, there will be
<END_TASK>
<USER_TASK:>
Description:
def unwrap_to_tensors(*tensors: torch.Tensor):
"""
If you actually passed gradient-tracking Tensors to a Metric, there will be
a huge memory leak, because it will prevent garbage collection for the computation
graph. This method ensures that you're using tensors directly and that they are on
the CPU.
""" |
return (x.detach().cpu() if isinstance(x, torch.Tensor) else x for x in tensors) |
<SYSTEM_TASK:>
Replaces abstract variables in text with their concrete counterparts.
<END_TASK>
<USER_TASK:>
Description:
def replace_variables(sentence: List[str],
sentence_variables: Dict[str, str]) -> Tuple[List[str], List[str]]:
"""
Replaces abstract variables in text with their concrete counterparts.
""" |
tokens = []
tags = []
for token in sentence:
if token not in sentence_variables:
tokens.append(token)
tags.append("O")
else:
for word in sentence_variables[token].split():
tokens.append(word)
tags.append(token)
return tokens, tags |
<SYSTEM_TASK:>
Cleans up and unifies a SQL query. This involves unifying quoted strings
<END_TASK>
<USER_TASK:>
Description:
def clean_and_split_sql(sql: str) -> List[str]:
"""
Cleans up and unifies a SQL query. This involves unifying quoted strings
and splitting brackets which aren't formatted consistently in the data.
""" |
sql_tokens: List[str] = []
for token in sql.strip().split():
token = token.replace('"', "'").replace("%", "")
if token.endswith("(") and len(token) > 1:
sql_tokens.extend(split_table_and_column_names(token[:-1]))
sql_tokens.extend(split_table_and_column_names(token[-1]))
else:
sql_tokens.extend(split_table_and_column_names(token))
return sql_tokens |
<SYSTEM_TASK:>
Some examples in the text2sql datasets use ID as a column reference to the
<END_TASK>
<USER_TASK:>
Description:
def resolve_primary_keys_in_schema(sql_tokens: List[str],
schema: Dict[str, List[TableColumn]]) -> List[str]:
"""
Some examples in the text2sql datasets use ID as a column reference to the
column of a table which has a primary key. This causes problems if you are trying
to constrain a grammar to only produce the column names directly, because you don't
know what ID refers to. So instead of dealing with that, we just replace it.
""" |
primary_keys_for_tables = {name: max(columns, key=lambda x: x.is_primary_key).name
for name, columns in schema.items()}
resolved_tokens = []
for i, token in enumerate(sql_tokens):
if i > 2:
table_name = sql_tokens[i - 2]
if token == "ID" and table_name in primary_keys_for_tables.keys():
token = primary_keys_for_tables[table_name]
resolved_tokens.append(token)
return resolved_tokens |
<SYSTEM_TASK:>
This function exists because Pytorch RNNs require that their inputs be sorted
<END_TASK>
<USER_TASK:>
Description:
def sort_and_run_forward(self,
module: Callable[[PackedSequence, Optional[RnnState]],
Tuple[Union[PackedSequence, torch.Tensor], RnnState]],
inputs: torch.Tensor,
mask: torch.Tensor,
hidden_state: Optional[RnnState] = None):
"""
This function exists because Pytorch RNNs require that their inputs be sorted
before being passed as input. As all of our Seq2xxxEncoders use this functionality,
it is provided in a base class. This method can be called on any module which
takes as input a ``PackedSequence`` and some ``hidden_state``, which can either be a
tuple of tensors or a tensor.
As all of our Seq2xxxEncoders have different return types, we return `sorted`
outputs from the module, which is called directly. Additionally, we return the
indices into the batch dimension required to restore the tensor to it's correct,
unsorted order and the number of valid batch elements (i.e the number of elements
in the batch which are not completely masked). This un-sorting and re-padding
of the module outputs is left to the subclasses because their outputs have different
types and handling them smoothly here is difficult.
Parameters
----------
module : ``Callable[[PackedSequence, Optional[RnnState]],
Tuple[Union[PackedSequence, torch.Tensor], RnnState]]``, required.
A function to run on the inputs. In most cases, this is a ``torch.nn.Module``.
inputs : ``torch.Tensor``, required.
A tensor of shape ``(batch_size, sequence_length, embedding_size)`` representing
the inputs to the Encoder.
mask : ``torch.Tensor``, required.
A tensor of shape ``(batch_size, sequence_length)``, representing masked and
non-masked elements of the sequence for each element in the batch.
hidden_state : ``Optional[RnnState]``, (default = None).
A single tensor of shape (num_layers, batch_size, hidden_size) representing the
state of an RNN with or a tuple of
tensors of shapes (num_layers, batch_size, hidden_size) and
(num_layers, batch_size, memory_size), representing the hidden state and memory
state of an LSTM-like RNN.
Returns
-------
module_output : ``Union[torch.Tensor, PackedSequence]``.
A Tensor or PackedSequence representing the output of the Pytorch Module.
The batch size dimension will be equal to ``num_valid``, as sequences of zero
length are clipped off before the module is called, as Pytorch cannot handle
zero length sequences.
final_states : ``Optional[RnnState]``
A Tensor representing the hidden state of the Pytorch Module. This can either
be a single tensor of shape (num_layers, num_valid, hidden_size), for instance in
the case of a GRU, or a tuple of tensors, such as those required for an LSTM.
restoration_indices : ``torch.LongTensor``
A tensor of shape ``(batch_size,)``, describing the re-indexing required to transform
the outputs back to their original batch order.
""" |
# In some circumstances you may have sequences of zero length. ``pack_padded_sequence``
# requires all sequence lengths to be > 0, so remove sequences of zero length before
# calling self._module, then fill with zeros.
# First count how many sequences are empty.
batch_size = mask.size(0)
num_valid = torch.sum(mask[:, 0]).int().item()
sequence_lengths = get_lengths_from_binary_sequence_mask(mask)
sorted_inputs, sorted_sequence_lengths, restoration_indices, sorting_indices =\
sort_batch_by_length(inputs, sequence_lengths)
# Now create a PackedSequence with only the non-empty, sorted sequences.
packed_sequence_input = pack_padded_sequence(sorted_inputs[:num_valid, :, :],
sorted_sequence_lengths[:num_valid].data.tolist(),
batch_first=True)
# Prepare the initial states.
if not self.stateful:
if hidden_state is None:
initial_states = hidden_state
elif isinstance(hidden_state, tuple):
initial_states = [state.index_select(1, sorting_indices)[:, :num_valid, :].contiguous()
for state in hidden_state]
else:
initial_states = hidden_state.index_select(1, sorting_indices)[:, :num_valid, :].contiguous()
else:
initial_states = self._get_initial_states(batch_size, num_valid, sorting_indices)
# Actually call the module on the sorted PackedSequence.
module_output, final_states = module(packed_sequence_input, initial_states)
return module_output, final_states, restoration_indices |
<SYSTEM_TASK:>
Returns an initial state for use in an RNN. Additionally, this method handles
<END_TASK>
<USER_TASK:>
Description:
def _get_initial_states(self,
batch_size: int,
num_valid: int,
sorting_indices: torch.LongTensor) -> Optional[RnnState]:
"""
Returns an initial state for use in an RNN. Additionally, this method handles
the batch size changing across calls by mutating the state to append initial states
for new elements in the batch. Finally, it also handles sorting the states
with respect to the sequence lengths of elements in the batch and removing rows
which are completely padded. Importantly, this `mutates` the state if the
current batch size is larger than when it was previously called.
Parameters
----------
batch_size : ``int``, required.
The batch size can change size across calls to stateful RNNs, so we need
to know if we need to expand or shrink the states before returning them.
Expanded states will be set to zero.
num_valid : ``int``, required.
The batch may contain completely padded sequences which get removed before
the sequence is passed through the encoder. We also need to clip these off
of the state too.
sorting_indices ``torch.LongTensor``, required.
Pytorch RNNs take sequences sorted by length. When we return the states to be
used for a given call to ``module.forward``, we need the states to match up to
the sorted sequences, so before returning them, we sort the states using the
same indices used to sort the sequences.
Returns
-------
This method has a complex return type because it has to deal with the first time it
is called, when it has no state, and the fact that types of RNN have heterogeneous
states.
If it is the first time the module has been called, it returns ``None``, regardless
of the type of the ``Module``.
Otherwise, for LSTMs, it returns a tuple of ``torch.Tensors`` with shape
``(num_layers, num_valid, state_size)`` and ``(num_layers, num_valid, memory_size)``
respectively, or for GRUs, it returns a single ``torch.Tensor`` of shape
``(num_layers, num_valid, state_size)``.
""" |
# We don't know the state sizes the first time calling forward,
# so we let the module define what it's initial hidden state looks like.
if self._states is None:
return None
# Otherwise, we have some previous states.
if batch_size > self._states[0].size(1):
# This batch is larger than the all previous states.
# If so, resize the states.
num_states_to_concat = batch_size - self._states[0].size(1)
resized_states = []
# state has shape (num_layers, batch_size, hidden_size)
for state in self._states:
# This _must_ be inside the loop because some
# RNNs have states with different last dimension sizes.
zeros = state.new_zeros(state.size(0),
num_states_to_concat,
state.size(2))
resized_states.append(torch.cat([state, zeros], 1))
self._states = tuple(resized_states)
correctly_shaped_states = self._states
elif batch_size < self._states[0].size(1):
# This batch is smaller than the previous one.
correctly_shaped_states = tuple(state[:, :batch_size, :] for state in self._states)
else:
correctly_shaped_states = self._states
# At this point, our states are of shape (num_layers, batch_size, hidden_size).
# However, the encoder uses sorted sequences and additionally removes elements
# of the batch which are fully padded. We need the states to match up to these
# sorted and filtered sequences, so we do that in the next two blocks before
# returning the state/s.
if len(self._states) == 1:
# GRUs only have a single state. This `unpacks` it from the
# tuple and returns the tensor directly.
correctly_shaped_state = correctly_shaped_states[0]
sorted_state = correctly_shaped_state.index_select(1, sorting_indices)
return sorted_state[:, :num_valid, :].contiguous()
else:
# LSTMs have a state tuple of (state, memory).
sorted_states = [state.index_select(1, sorting_indices)
for state in correctly_shaped_states]
return tuple(state[:, :num_valid, :].contiguous() for state in sorted_states) |
<SYSTEM_TASK:>
After the RNN has run forward, the states need to be updated.
<END_TASK>
<USER_TASK:>
Description:
def _update_states(self,
final_states: RnnStateStorage,
restoration_indices: torch.LongTensor) -> None:
"""
After the RNN has run forward, the states need to be updated.
This method just sets the state to the updated new state, performing
several pieces of book-keeping along the way - namely, unsorting the
states and ensuring that the states of completely padded sequences are
not updated. Finally, it also detaches the state variable from the
computational graph, such that the graph can be garbage collected after
each batch iteration.
Parameters
----------
final_states : ``RnnStateStorage``, required.
The hidden states returned as output from the RNN.
restoration_indices : ``torch.LongTensor``, required.
The indices that invert the sorting used in ``sort_and_run_forward``
to order the states with respect to the lengths of the sequences in
the batch.
""" |
# TODO(Mark): seems weird to sort here, but append zeros in the subclasses.
# which way around is best?
new_unsorted_states = [state.index_select(1, restoration_indices)
for state in final_states]
if self._states is None:
# We don't already have states, so just set the
# ones we receive to be the current state.
self._states = tuple(state.data for state in new_unsorted_states)
else:
# Now we've sorted the states back so that they correspond to the original
# indices, we need to figure out what states we need to update, because if we
# didn't use a state for a particular row, we want to preserve its state.
# Thankfully, the rows which are all zero in the state correspond exactly
# to those which aren't used, so we create masks of shape (new_batch_size,),
# denoting which states were used in the RNN computation.
current_state_batch_size = self._states[0].size(1)
new_state_batch_size = final_states[0].size(1)
# Masks for the unused states of shape (1, new_batch_size, 1)
used_new_rows_mask = [(state[0, :, :].sum(-1)
!= 0.0).float().view(1, new_state_batch_size, 1)
for state in new_unsorted_states]
new_states = []
if current_state_batch_size > new_state_batch_size:
# The new state is smaller than the old one,
# so just update the indices which we used.
for old_state, new_state, used_mask in zip(self._states,
new_unsorted_states,
used_new_rows_mask):
# zero out all rows in the previous state
# which _were_ used in the current state.
masked_old_state = old_state[:, :new_state_batch_size, :] * (1 - used_mask)
# The old state is larger, so update the relevant parts of it.
old_state[:, :new_state_batch_size, :] = new_state + masked_old_state
new_states.append(old_state.detach())
else:
# The states are the same size, so we just have to
# deal with the possibility that some rows weren't used.
new_states = []
for old_state, new_state, used_mask in zip(self._states,
new_unsorted_states,
used_new_rows_mask):
# zero out all rows which _were_ used in the current state.
masked_old_state = old_state * (1 - used_mask)
# The old state is larger, so update the relevant parts of it.
new_state += masked_old_state
new_states.append(new_state.detach())
# It looks like there should be another case handled here - when
# the current_state_batch_size < new_state_batch_size. However,
# this never happens, because the states themeselves are mutated
# by appending zeros when calling _get_inital_states, meaning that
# the new states are either of equal size, or smaller, in the case
# that there are some unused elements (zero-length) for the RNN computation.
self._states = tuple(new_states) |
<SYSTEM_TASK:>
Convert a list of strings to a list of Values
<END_TASK>
<USER_TASK:>
Description:
def to_value_list(original_strings, corenlp_values=None):
"""Convert a list of strings to a list of Values
Args:
original_strings (list[basestring])
corenlp_values (list[basestring or None])
Returns:
list[Value]
""" |
assert isinstance(original_strings, (list, tuple, set))
if corenlp_values is not None:
assert isinstance(corenlp_values, (list, tuple, set))
assert len(original_strings) == len(corenlp_values)
return list(set(to_value(x, y) for (x, y)
in zip(original_strings, corenlp_values)))
else:
return list(set(to_value(x) for x in original_strings)) |
<SYSTEM_TASK:>
Return True if the predicted denotation is correct.
<END_TASK>
<USER_TASK:>
Description:
def check_denotation(target_values, predicted_values):
"""Return True if the predicted denotation is correct.
Args:
target_values (list[Value])
predicted_values (list[Value])
Returns:
bool
""" |
# Check size
if len(target_values) != len(predicted_values):
return False
# Check items
for target in target_values:
if not any(target.match(pred) for pred in predicted_values):
return False
return True |
<SYSTEM_TASK:>
Try to parse into a number.
<END_TASK>
<USER_TASK:>
Description:
def parse(text):
"""Try to parse into a number.
Return:
the number (int or float) if successful; otherwise None.
""" |
try:
return int(text)
except ValueError:
try:
amount = float(text)
assert not isnan(amount) and not isinf(amount)
return amount
except (ValueError, AssertionError):
return None |
<SYSTEM_TASK:>
Try to parse into a date.
<END_TASK>
<USER_TASK:>
Description:
def parse(text):
"""Try to parse into a date.
Return:
tuple (year, month, date) if successful; otherwise None.
""" |
try:
ymd = text.lower().split('-')
assert len(ymd) == 3
year = -1 if ymd[0] in ('xx', 'xxxx') else int(ymd[0])
month = -1 if ymd[1] == 'xx' else int(ymd[1])
day = -1 if ymd[2] == 'xx' else int(ymd[2])
assert not year == month == day == -1
assert month == -1 or 1 <= month <= 12
assert day == -1 or 1 <= day <= 31
return (year, month, day)
except (ValueError, AssertionError):
return None |
<SYSTEM_TASK:>
Given a sequence tensor, extract spans and return representations of
<END_TASK>
<USER_TASK:>
Description:
def forward(self, # pylint: disable=arguments-differ
sequence_tensor: torch.FloatTensor,
span_indices: torch.LongTensor,
sequence_mask: torch.LongTensor = None,
span_indices_mask: torch.LongTensor = None):
"""
Given a sequence tensor, extract spans and return representations of
them. Span representation can be computed in many different ways,
such as concatenation of the start and end spans, attention over the
vectors contained inside the span, etc.
Parameters
----------
sequence_tensor : ``torch.FloatTensor``, required.
A tensor of shape (batch_size, sequence_length, embedding_size)
representing an embedded sequence of words.
span_indices : ``torch.LongTensor``, required.
A tensor of shape ``(batch_size, num_spans, 2)``, where the last
dimension represents the inclusive start and end indices of the
span to be extracted from the ``sequence_tensor``.
sequence_mask : ``torch.LongTensor``, optional (default = ``None``).
A tensor of shape (batch_size, sequence_length) representing padded
elements of the sequence.
span_indices_mask : ``torch.LongTensor``, optional (default = ``None``).
A tensor of shape (batch_size, num_spans) representing the valid
spans in the ``indices`` tensor. This mask is optional because
sometimes it's easier to worry about masking after calling this
function, rather than passing a mask directly.
Returns
-------
A tensor of shape ``(batch_size, num_spans, embedded_span_size)``,
where ``embedded_span_size`` depends on the way spans are represented.
""" |
raise NotImplementedError |
<SYSTEM_TASK:>
Takes an initial state object, a means of transitioning from state to state, and a
<END_TASK>
<USER_TASK:>
Description:
def decode(self,
initial_state: State,
transition_function: TransitionFunction,
supervision: SupervisionType) -> Dict[str, torch.Tensor]:
"""
Takes an initial state object, a means of transitioning from state to state, and a
supervision signal, and uses the supervision to train the transition function to pick
"good" states.
This function should typically return a ``loss`` key during training, which the ``Model``
will use as its loss.
Parameters
----------
initial_state : ``State``
This is the initial state for decoding, typically initialized after running some kind
of encoder on some inputs.
transition_function : ``TransitionFunction``
This is the transition function that scores all possible actions that can be taken in a
given state, and returns a ranked list of next states at each step of decoding.
supervision : ``SupervisionType``
This is the supervision that is used to train the ``transition_function`` function to
pick "good" states. You can use whatever kind of supervision you want (e.g., a single
"gold" action sequence, a set of possible "gold" action sequences, a reward function,
etc.). We use ``typing.Generics`` to make sure that our static type checker is happy
with how you've matched the supervision that you provide in the model to the
``DecoderTrainer`` that you want to use.
""" |
raise NotImplementedError |
<SYSTEM_TASK:>
Returns the state of the scheduler as a ``dict``.
<END_TASK>
<USER_TASK:>
Description:
def state_dict(self) -> Dict[str, Any]:
"""
Returns the state of the scheduler as a ``dict``.
""" |
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'} |
<SYSTEM_TASK:>
Load the schedulers state.
<END_TASK>
<USER_TASK:>
Description:
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
"""
Load the schedulers state.
Parameters
----------
state_dict : ``Dict[str, Any]``
Scheduler state. Should be an object returned from a call to ``state_dict``.
""" |
self.__dict__.update(state_dict) |
<SYSTEM_TASK:>
Identifies the best prediction given the results from the submodels.
<END_TASK>
<USER_TASK:>
Description:
def ensemble(subresults: List[Dict[str, torch.Tensor]]) -> torch.Tensor:
"""
Identifies the best prediction given the results from the submodels.
Parameters
----------
subresults : List[Dict[str, torch.Tensor]]
Results of each submodel.
Returns
-------
The index of the best submodel.
""" |
# Choose the highest average confidence span.
span_start_probs = sum(subresult['span_start_probs'] for subresult in subresults) / len(subresults)
span_end_probs = sum(subresult['span_end_probs'] for subresult in subresults) / len(subresults)
return get_best_span(span_start_probs.log(), span_end_probs.log()) |
<SYSTEM_TASK:>
Load the pre-trained weights from the file.
<END_TASK>
<USER_TASK:>
Description:
def load_weights(self, weight_file: str) -> None:
"""
Load the pre-trained weights from the file.
""" |
requires_grad = self.requires_grad
with h5py.File(cached_path(weight_file), 'r') as fin:
for i_layer, lstms in enumerate(
zip(self.forward_layers, self.backward_layers)
):
for j_direction, lstm in enumerate(lstms):
# lstm is an instance of LSTMCellWithProjection
cell_size = lstm.cell_size
dataset = fin['RNN_%s' % j_direction]['RNN']['MultiRNNCell']['Cell%s' % i_layer
]['LSTMCell']
# tensorflow packs together both W and U matrices into one matrix,
# but pytorch maintains individual matrices. In addition, tensorflow
# packs the gates as input, memory, forget, output but pytorch
# uses input, forget, memory, output. So we need to modify the weights.
tf_weights = numpy.transpose(dataset['W_0'][...])
torch_weights = tf_weights.copy()
# split the W from U matrices
input_size = lstm.input_size
input_weights = torch_weights[:, :input_size]
recurrent_weights = torch_weights[:, input_size:]
tf_input_weights = tf_weights[:, :input_size]
tf_recurrent_weights = tf_weights[:, input_size:]
# handle the different gate order convention
for torch_w, tf_w in [[input_weights, tf_input_weights],
[recurrent_weights, tf_recurrent_weights]]:
torch_w[(1 * cell_size):(2 * cell_size), :] = tf_w[(2 * cell_size):(3 * cell_size), :]
torch_w[(2 * cell_size):(3 * cell_size), :] = tf_w[(1 * cell_size):(2 * cell_size), :]
lstm.input_linearity.weight.data.copy_(torch.FloatTensor(input_weights))
lstm.state_linearity.weight.data.copy_(torch.FloatTensor(recurrent_weights))
lstm.input_linearity.weight.requires_grad = requires_grad
lstm.state_linearity.weight.requires_grad = requires_grad
# the bias weights
tf_bias = dataset['B'][...]
# tensorflow adds 1.0 to forget gate bias instead of modifying the
# parameters...
tf_bias[(2 * cell_size):(3 * cell_size)] += 1
torch_bias = tf_bias.copy()
torch_bias[(1 * cell_size):(2 * cell_size)
] = tf_bias[(2 * cell_size):(3 * cell_size)]
torch_bias[(2 * cell_size):(3 * cell_size)
] = tf_bias[(1 * cell_size):(2 * cell_size)]
lstm.state_linearity.bias.data.copy_(torch.FloatTensor(torch_bias))
lstm.state_linearity.bias.requires_grad = requires_grad
# the projection weights
proj_weights = numpy.transpose(dataset['W_P_0'][...])
lstm.state_projection.weight.data.copy_(torch.FloatTensor(proj_weights))
lstm.state_projection.weight.requires_grad = requires_grad |
<SYSTEM_TASK:>
Gives the final return type for this function. If the function takes a single argument,
<END_TASK>
<USER_TASK:>
Description:
def return_type(self) -> Type:
"""
Gives the final return type for this function. If the function takes a single argument,
this is just ``self.second``. If the function takes multiple arguments and returns a basic
type, this should be the final ``.second`` after following all complex types. That is the
implementation here in the base class. If you have a higher-order function that returns a
function itself, you need to override this method.
""" |
return_type = self.second
while isinstance(return_type, ComplexType):
return_type = return_type.second
return return_type |
<SYSTEM_TASK:>
Gives the types of all arguments to this function. For functions returning a basic type,
<END_TASK>
<USER_TASK:>
Description:
def argument_types(self) -> List[Type]:
"""
Gives the types of all arguments to this function. For functions returning a basic type,
we grab all ``.first`` types until ``.second`` is no longer a ``ComplexType``. That logic
is implemented here in the base class. If you have a higher-order function that returns a
function itself, you need to override this method.
""" |
arguments = [self.first]
remaining_type = self.second
while isinstance(remaining_type, ComplexType):
arguments.append(remaining_type.first)
remaining_type = remaining_type.second
return arguments |
<SYSTEM_TASK:>
Takes a set of ``BasicTypes`` and replaces any instances of ``ANY_TYPE`` inside this
<END_TASK>
<USER_TASK:>
Description:
def substitute_any_type(self, basic_types: Set[BasicType]) -> List[Type]:
"""
Takes a set of ``BasicTypes`` and replaces any instances of ``ANY_TYPE`` inside this
complex type with each of those basic types.
""" |
substitutions = []
for first_type in substitute_any_type(self.first, basic_types):
for second_type in substitute_any_type(self.second, basic_types):
substitutions.append(self.__class__(first_type, second_type))
return substitutions |
<SYSTEM_TASK:>
Send the mean and std of all parameters and gradients to tensorboard, as well
<END_TASK>
<USER_TASK:>
Description:
def log_parameter_and_gradient_statistics(self, # pylint: disable=invalid-name
model: Model,
batch_grad_norm: float) -> None:
"""
Send the mean and std of all parameters and gradients to tensorboard, as well
as logging the average gradient norm.
""" |
if self._should_log_parameter_statistics:
# Log parameter values to Tensorboard
for name, param in model.named_parameters():
self.add_train_scalar("parameter_mean/" + name, param.data.mean())
self.add_train_scalar("parameter_std/" + name, param.data.std())
if param.grad is not None:
if param.grad.is_sparse:
# pylint: disable=protected-access
grad_data = param.grad.data._values()
else:
grad_data = param.grad.data
# skip empty gradients
if torch.prod(torch.tensor(grad_data.shape)).item() > 0: # pylint: disable=not-callable
self.add_train_scalar("gradient_mean/" + name, grad_data.mean())
self.add_train_scalar("gradient_std/" + name, grad_data.std())
else:
# no gradient for a parameter with sparse gradients
logger.info("No gradient for %s, skipping tensorboard logging.", name)
# norm of gradients
if batch_grad_norm is not None:
self.add_train_scalar("gradient_norm", batch_grad_norm) |
<SYSTEM_TASK:>
Send current parameter specific learning rates to tensorboard
<END_TASK>
<USER_TASK:>
Description:
def log_learning_rates(self,
model: Model,
optimizer: torch.optim.Optimizer):
"""
Send current parameter specific learning rates to tensorboard
""" |
if self._should_log_learning_rate:
# optimizer stores lr info keyed by parameter tensor
# we want to log with parameter name
names = {param: name for name, param in model.named_parameters()}
for group in optimizer.param_groups:
if 'lr' not in group:
continue
rate = group['lr']
for param in group['params']:
# check whether params has requires grad or not
effective_rate = rate * float(param.requires_grad)
self.add_train_scalar("learning_rate/" + names[param], effective_rate) |
<SYSTEM_TASK:>
Use stemming to attempt alignment between extracted world and given world literals.
<END_TASK>
<USER_TASK:>
Description:
def align_entities(extracted: List[str],
literals: JsonDict,
stemmer: NltkPorterStemmer) -> List[str]:
"""
Use stemming to attempt alignment between extracted world and given world literals.
If more words align to one world vs the other, it's considered aligned.
""" |
literal_keys = list(literals.keys())
literal_values = list(literals.values())
overlaps = [get_stem_overlaps(extract, literal_values, stemmer) for extract in extracted]
worlds = []
for overlap in overlaps:
if overlap[0] > overlap[1]:
worlds.append(literal_keys[0])
elif overlap[0] < overlap[1]:
worlds.append(literal_keys[1])
else:
worlds.append(None)
return worlds |
<SYSTEM_TASK:>
Calculate multi-perspective cosine matching between time-steps of vectors
<END_TASK>
<USER_TASK:>
Description:
def multi_perspective_match(vector1: torch.Tensor,
vector2: torch.Tensor,
weight: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Calculate multi-perspective cosine matching between time-steps of vectors
of the same length.
Parameters
----------
vector1 : ``torch.Tensor``
A tensor of shape ``(batch, seq_len, hidden_size)``
vector2 : ``torch.Tensor``
A tensor of shape ``(batch, seq_len or 1, hidden_size)``
weight : ``torch.Tensor``
A tensor of shape ``(num_perspectives, hidden_size)``
Returns
-------
A tuple of two tensors consisting multi-perspective matching results.
The first one is of the shape (batch, seq_len, 1), the second one is of shape
(batch, seq_len, num_perspectives)
""" |
assert vector1.size(0) == vector2.size(0)
assert weight.size(1) == vector1.size(2) == vector1.size(2)
# (batch, seq_len, 1)
similarity_single = F.cosine_similarity(vector1, vector2, 2).unsqueeze(2)
# (1, 1, num_perspectives, hidden_size)
weight = weight.unsqueeze(0).unsqueeze(0)
# (batch, seq_len, num_perspectives, hidden_size)
vector1 = weight * vector1.unsqueeze(2)
vector2 = weight * vector2.unsqueeze(2)
similarity_multi = F.cosine_similarity(vector1, vector2, dim=3)
return similarity_single, similarity_multi |
<SYSTEM_TASK:>
Calculate multi-perspective cosine matching between each time step of
<END_TASK>
<USER_TASK:>
Description:
def multi_perspective_match_pairwise(vector1: torch.Tensor,
vector2: torch.Tensor,
weight: torch.Tensor,
eps: float = 1e-8) -> torch.Tensor:
"""
Calculate multi-perspective cosine matching between each time step of
one vector and each time step of another vector.
Parameters
----------
vector1 : ``torch.Tensor``
A tensor of shape ``(batch, seq_len1, hidden_size)``
vector2 : ``torch.Tensor``
A tensor of shape ``(batch, seq_len2, hidden_size)``
weight : ``torch.Tensor``
A tensor of shape ``(num_perspectives, hidden_size)``
eps : ``float`` optional, (default = 1e-8)
A small value to avoid zero division problem
Returns
-------
A tensor of shape (batch, seq_len1, seq_len2, num_perspectives) consisting
multi-perspective matching results
""" |
num_perspectives = weight.size(0)
# (1, num_perspectives, 1, hidden_size)
weight = weight.unsqueeze(0).unsqueeze(2)
# (batch, num_perspectives, seq_len*, hidden_size)
vector1 = weight * vector1.unsqueeze(1).expand(-1, num_perspectives, -1, -1)
vector2 = weight * vector2.unsqueeze(1).expand(-1, num_perspectives, -1, -1)
# (batch, num_perspectives, seq_len*, 1)
vector1_norm = vector1.norm(p=2, dim=3, keepdim=True)
vector2_norm = vector2.norm(p=2, dim=3, keepdim=True)
# (batch, num_perspectives, seq_len1, seq_len2)
mul_result = torch.matmul(vector1, vector2.transpose(2, 3))
norm_value = vector1_norm * vector2_norm.transpose(2, 3)
# (batch, seq_len1, seq_len2, num_perspectives)
return (mul_result / norm_value.clamp(min=eps)).permute(0, 2, 3, 1) |
<SYSTEM_TASK:>
When the year is not explicitly mentioned in the utterance, the query assumes that
<END_TASK>
<USER_TASK:>
Description:
def get_date_from_utterance(tokenized_utterance: List[Token],
year: int = 1993) -> List[datetime]:
"""
When the year is not explicitly mentioned in the utterance, the query assumes that
it is 1993 so we do the same here. If there is no mention of the month or day then
we do not return any dates from the utterance.
""" |
dates = []
utterance = ' '.join([token.text for token in tokenized_utterance])
year_result = re.findall(r'199[0-4]', utterance)
if year_result:
year = int(year_result[0])
trigrams = ngrams([token.text for token in tokenized_utterance], 3)
for month, tens, digit in trigrams:
# This will match something like ``september twenty first``.
day = ' '.join([tens, digit])
if month in MONTH_NUMBERS and day in DAY_NUMBERS:
try:
dates.append(datetime(year, MONTH_NUMBERS[month], DAY_NUMBERS[day]))
except ValueError:
print('invalid month day')
bigrams = ngrams([token.text for token in tokenized_utterance], 2)
for month, day in bigrams:
if month in MONTH_NUMBERS and day in DAY_NUMBERS:
# This will match something like ``september first``.
try:
dates.append(datetime(year, MONTH_NUMBERS[month], DAY_NUMBERS[day]))
except ValueError:
print('invalid month day')
fivegrams = ngrams([token.text for token in tokenized_utterance], 5)
for tens, digit, _, year_match, month in fivegrams:
# This will match something like ``twenty first of 1993 july``.
day = ' '.join([tens, digit])
if month in MONTH_NUMBERS and day in DAY_NUMBERS and year_match.isdigit():
try:
dates.append(datetime(int(year_match), MONTH_NUMBERS[month], DAY_NUMBERS[day]))
except ValueError:
print('invalid month day')
if month in MONTH_NUMBERS and digit in DAY_NUMBERS and year_match.isdigit():
try:
dates.append(datetime(int(year_match), MONTH_NUMBERS[month], DAY_NUMBERS[digit]))
except ValueError:
print('invalid month day')
return dates |
<SYSTEM_TASK:>
Given an utterance, this function finds all the numbers that are in the action space. Since we need to
<END_TASK>
<USER_TASK:>
Description:
def get_numbers_from_utterance(utterance: str, tokenized_utterance: List[Token]) -> Dict[str, List[int]]:
"""
Given an utterance, this function finds all the numbers that are in the action space. Since we need to
keep track of linking scores, we represent the numbers as a dictionary, where the keys are the string
representation of the number and the values are lists of the token indices that triggers that number.
""" |
# When we use a regex to find numbers or strings, we need a mapping from
# the character to which token triggered it.
char_offset_to_token_index = {token.idx : token_index
for token_index, token in enumerate(tokenized_utterance)}
# We want to look up later for each time whether it appears after a word
# such as "about" or "approximately".
indices_of_approximate_words = {index for index, token in enumerate(tokenized_utterance)
if token.text in APPROX_WORDS}
indices_of_words_preceding_time = {index for index, token in enumerate(tokenized_utterance)
if token.text in WORDS_PRECEDING_TIME}
indices_of_am_pm = {index for index, token in enumerate(tokenized_utterance)
if token.text in {'am', 'pm'}}
number_linking_dict: Dict[str, List[int]] = defaultdict(list)
for token_index, token in enumerate(tokenized_utterance):
if token.text.isdigit():
if token_index - 1 in indices_of_words_preceding_time and token_index + 1 not in indices_of_am_pm:
for time in digit_to_query_time(token.text):
number_linking_dict[str(time)].append(token_index)
times_linking_dict = get_times_from_utterance(utterance,
char_offset_to_token_index,
indices_of_approximate_words)
for key, value in times_linking_dict.items():
number_linking_dict[key].extend(value)
for index, token in enumerate(tokenized_utterance):
for number in NUMBER_TRIGGER_DICT.get(token.text, []):
if index - 1 in indices_of_approximate_words:
for approx_time in get_approximate_times([int(number)]):
number_linking_dict[str(approx_time)].append(index)
else:
number_linking_dict[number].append(index)
return number_linking_dict |
<SYSTEM_TASK:>
Given a digit in the utterance, return a list of the times that it corresponds to.
<END_TASK>
<USER_TASK:>
Description:
def digit_to_query_time(digit: str) -> List[int]:
"""
Given a digit in the utterance, return a list of the times that it corresponds to.
""" |
if len(digit) > 2:
return [int(digit), int(digit) + TWELVE_TO_TWENTY_FOUR]
elif int(digit) % 12 == 0:
return [0, 1200, 2400]
return [int(digit) * HOUR_TO_TWENTY_FOUR,
(int(digit) * HOUR_TO_TWENTY_FOUR + TWELVE_TO_TWENTY_FOUR) % HOURS_IN_DAY] |
<SYSTEM_TASK:>
Given a list of times that follow a word such as ``about``,
<END_TASK>
<USER_TASK:>
Description:
def get_approximate_times(times: List[int]) -> List[int]:
"""
Given a list of times that follow a word such as ``about``,
we return a list of times that could appear in the query as a result
of this. For example if ``about 7pm`` appears in the utterance, then
we also want to add ``1830`` and ``1930``.
""" |
approximate_times = []
for time in times:
hour = int(time/HOUR_TO_TWENTY_FOUR) % 24
minute = time % HOUR_TO_TWENTY_FOUR
approximate_time = datetime.now()
approximate_time = approximate_time.replace(hour=hour, minute=minute)
start_time_range = approximate_time - timedelta(minutes=30)
end_time_range = approximate_time + timedelta(minutes=30)
approximate_times.extend([start_time_range.hour * HOUR_TO_TWENTY_FOUR + start_time_range.minute,
end_time_range.hour * HOUR_TO_TWENTY_FOUR + end_time_range.minute])
return approximate_times |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def _time_regex_match(regex: str,
utterance: str,
char_offset_to_token_index: Dict[int, int],
map_match_to_query_value: Callable[[str], List[int]],
indices_of_approximate_words: Set[int]) -> Dict[str, List[int]]:
r"""
Given a regex for matching times in the utterance, we want to convert the matches
to the values that appear in the query and token indices they correspond to.
``char_offset_to_token_index`` is a dictionary that maps from the character offset to
the token index, we use this to look up what token a regex match corresponds to.
``indices_of_approximate_words`` are the token indices of the words such as ``about`` or
``approximately``. We use this to check if a regex match is preceded by one of these words.
If it is, we also want to add the times that define this approximate time range.
``map_match_to_query_value`` is a function that converts the regex matches to the
values that appear in the query. For example, we may pass in a regex such as ``\d+pm``
that matches times such as ``7pm``. ``map_match_to_query_value`` would be a function that
takes ``7pm`` as input and returns ``1900``.
""" |
linking_scores_dict: Dict[str, List[int]] = defaultdict(list)
number_regex = re.compile(regex)
for match in number_regex.finditer(utterance):
query_values = map_match_to_query_value(match.group())
# If the time appears after a word like ``about`` then we also add
# the times that mark the start and end of the allowed range.
approximate_times = []
if char_offset_to_token_index.get(match.start(), 0) - 1 in indices_of_approximate_words:
approximate_times.extend(get_approximate_times(query_values))
query_values.extend(approximate_times)
if match.start() in char_offset_to_token_index:
for query_value in query_values:
linking_scores_dict[str(query_value)].extend([char_offset_to_token_index[match.start()],
char_offset_to_token_index[match.start()] + 1])
return linking_scores_dict |
<SYSTEM_TASK:>
We evaluate here whether the predicted query and the query label evaluate to the
<END_TASK>
<USER_TASK:>
Description:
def _evaluate_sql_query_subprocess(self, predicted_query: str, sql_query_labels: List[str]) -> int:
"""
We evaluate here whether the predicted query and the query label evaluate to the
exact same table. This method is only called by the subprocess, so we just exit with
1 if it is correct and 0 otherwise.
""" |
postprocessed_predicted_query = self.postprocess_query_sqlite(predicted_query)
try:
self._cursor.execute(postprocessed_predicted_query)
predicted_rows = self._cursor.fetchall()
except sqlite3.Error as error:
logger.warning(f'Error executing predicted: {error}')
exit(0)
# If predicted table matches any of the reference tables then it is counted as correct.
target_rows = None
for sql_query_label in sql_query_labels:
postprocessed_sql_query_label = self.postprocess_query_sqlite(sql_query_label)
try:
self._cursor.execute(postprocessed_sql_query_label)
target_rows = self._cursor.fetchall()
except sqlite3.Error as error:
logger.warning(f'Error executing predicted: {error}')
if predicted_rows == target_rows:
exit(1)
exit(0) |
<SYSTEM_TASK:>
Formats a dictionary of production rules into the string format expected
<END_TASK>
<USER_TASK:>
Description:
def format_grammar_string(grammar_dictionary: Dict[str, List[str]]) -> str:
"""
Formats a dictionary of production rules into the string format expected
by the Parsimonious Grammar class.
""" |
grammar_string = '\n'.join([f"{nonterminal} = {' / '.join(right_hand_side)}"
for nonterminal, right_hand_side in grammar_dictionary.items()])
return grammar_string.replace("\\", "\\\\") |
<SYSTEM_TASK:>
We initialize the valid actions with the global actions. These include the
<END_TASK>
<USER_TASK:>
Description:
def initialize_valid_actions(grammar: Grammar,
keywords_to_uppercase: List[str] = None) -> Dict[str, List[str]]:
"""
We initialize the valid actions with the global actions. These include the
valid actions that result from the grammar and also those that result from
the tables provided. The keys represent the nonterminals in the grammar
and the values are lists of the valid actions of that nonterminal.
""" |
valid_actions: Dict[str, Set[str]] = defaultdict(set)
for key in grammar:
rhs = grammar[key]
# Sequence represents a series of expressions that match pieces of the text in order.
# Eg. A -> B C
if isinstance(rhs, Sequence):
valid_actions[key].add(format_action(key, " ".join(rhs._unicode_members()), # pylint: disable=protected-access
keywords_to_uppercase=keywords_to_uppercase))
# OneOf represents a series of expressions, one of which matches the text.
# Eg. A -> B / C
elif isinstance(rhs, OneOf):
for option in rhs._unicode_members(): # pylint: disable=protected-access
valid_actions[key].add(format_action(key, option,
keywords_to_uppercase=keywords_to_uppercase))
# A string literal, eg. "A"
elif isinstance(rhs, Literal):
if rhs.literal != "":
valid_actions[key].add(format_action(key, repr(rhs.literal),
keywords_to_uppercase=keywords_to_uppercase))
else:
valid_actions[key] = set()
valid_action_strings = {key: sorted(value) for key, value in valid_actions.items()}
return valid_action_strings |
<SYSTEM_TASK:>
This function formats an action as it appears in models. It
<END_TASK>
<USER_TASK:>
Description:
def format_action(nonterminal: str,
right_hand_side: str,
is_string: bool = False,
is_number: bool = False,
keywords_to_uppercase: List[str] = None) -> str:
"""
This function formats an action as it appears in models. It
splits productions based on the special `ws` and `wsp` rules,
which are used in grammars to denote whitespace, and then
rejoins these tokens a formatted, comma separated list.
Importantly, note that it `does not` split on spaces in
the grammar string, because these might not correspond
to spaces in the language the grammar recognises.
Parameters
----------
nonterminal : ``str``, required.
The nonterminal in the action.
right_hand_side : ``str``, required.
The right hand side of the action
(i.e the thing which is produced).
is_string : ``bool``, optional (default = False).
Whether the production produces a string.
If it does, it is formatted as ``nonterminal -> ['string']``
is_number : ``bool``, optional, (default = False).
Whether the production produces a string.
If it does, it is formatted as ``nonterminal -> ['number']``
keywords_to_uppercase: ``List[str]``, optional, (default = None)
Keywords in the grammar to uppercase. In the case of sql,
this might be SELECT, MAX etc.
""" |
keywords_to_uppercase = keywords_to_uppercase or []
if right_hand_side.upper() in keywords_to_uppercase:
right_hand_side = right_hand_side.upper()
if is_string:
return f'{nonterminal} -> ["\'{right_hand_side}\'"]'
elif is_number:
return f'{nonterminal} -> ["{right_hand_side}"]'
else:
right_hand_side = right_hand_side.lstrip("(").rstrip(")")
child_strings = [token for token in WHITESPACE_REGEX.split(right_hand_side) if token]
child_strings = [tok.upper() if tok.upper() in keywords_to_uppercase else tok for tok in child_strings]
return f"{nonterminal} -> [{', '.join(child_strings)}]" |
<SYSTEM_TASK:>
For each node, we accumulate the rules that generated its children in a list.
<END_TASK>
<USER_TASK:>
Description:
def add_action(self, node: Node) -> None:
"""
For each node, we accumulate the rules that generated its children in a list.
""" |
if node.expr.name and node.expr.name not in ['ws', 'wsp']:
nonterminal = f'{node.expr.name} -> '
if isinstance(node.expr, Literal):
right_hand_side = f'["{node.text}"]'
else:
child_strings = []
for child in node.__iter__():
if child.expr.name in ['ws', 'wsp']:
continue
if child.expr.name != '':
child_strings.append(child.expr.name)
else:
child_right_side_string = child.expr._as_rhs().lstrip("(").rstrip(")") # pylint: disable=protected-access
child_right_side_list = [tok for tok in
WHITESPACE_REGEX.split(child_right_side_string) if tok]
child_right_side_list = [tok.upper() if tok.upper() in
self.keywords_to_uppercase else tok
for tok in child_right_side_list]
child_strings.extend(child_right_side_list)
right_hand_side = "[" + ", ".join(child_strings) + "]"
rule = nonterminal + right_hand_side
self.action_sequence = [rule] + self.action_sequence |
<SYSTEM_TASK:>
See the ``NodeVisitor`` visit method. This just changes the order in which
<END_TASK>
<USER_TASK:>
Description:
def visit(self, node):
"""
See the ``NodeVisitor`` visit method. This just changes the order in which
we visit nonterminals from right to left to left to right.
""" |
method = getattr(self, 'visit_' + node.expr_name, self.generic_visit)
# Call that method, and show where in the tree it failed if it blows
# up.
try:
# Changing this to reverse here!
return method(node, [self.visit(child) for child in reversed(list(node))])
except (VisitationError, UndefinedLabel):
# Don't catch and re-wrap already-wrapped exceptions.
raise
except self.unwrapped_exceptions:
raise
except Exception: # pylint: disable=broad-except
# Catch any exception, and tack on a parse tree so it's easier to
# see where it went wrong.
exc_class, exc, traceback = exc_info()
reraise(VisitationError, VisitationError(exc, exc_class, node), traceback) |
<SYSTEM_TASK:>
SQL is a predominately variable free language in terms of simple usage, in the
<END_TASK>
<USER_TASK:>
Description:
def update_grammar_to_be_variable_free(grammar_dictionary: Dict[str, List[str]]):
"""
SQL is a predominately variable free language in terms of simple usage, in the
sense that most queries do not create references to variables which are not
already static tables in a dataset. However, it is possible to do this via
derived tables. If we don't require this functionality, we can tighten the
grammar, because we don't need to support aliased tables.
""" |
# Tables in variable free grammars cannot be aliased, so we
# remove this functionality from the grammar.
grammar_dictionary["select_result"] = ['"*"', '(table_name ws ".*")', 'expr']
# Similarly, collapse the definition of a source table
# to not contain aliases and modify references to subqueries.
grammar_dictionary["single_source"] = ['table_name', '("(" ws query ws ")")']
del grammar_dictionary["source_subq"]
del grammar_dictionary["source_table"]
grammar_dictionary["expr"] = ['in_expr',
'(value wsp "LIKE" wsp string)',
'(value ws "BETWEEN" wsp value ws "AND" wsp value)',
'(value ws binaryop wsp expr)',
'(unaryop ws expr)',
'(col_ref ws "IS" ws "NOT" ws "NULL")',
'(col_ref ws "IS" ws "NULL")',
# This used to be source_subq - now
# we don't need aliases, we can colapse it to queries.
'("(" ws query ws ")")',
'value']
# Finally, remove the ability to reference an arbitrary name,
# because now we don't have aliased tables, we don't need
# to recognise new variables.
del grammar_dictionary["name"] |
<SYSTEM_TASK:>
Variables can be treated as numbers or strings if their type can be inferred -
<END_TASK>
<USER_TASK:>
Description:
def update_grammar_with_untyped_entities(grammar_dictionary: Dict[str, List[str]]) -> None:
"""
Variables can be treated as numbers or strings if their type can be inferred -
however, that can be difficult, so instead, we can just treat them all as values
and be a bit looser on the typing we allow in our grammar. Here we just remove
all references to number and string from the grammar, replacing them with value.
""" |
grammar_dictionary["string_set_vals"] = ['(value ws "," ws string_set_vals)', 'value']
grammar_dictionary["value"].remove('string')
grammar_dictionary["value"].remove('number')
grammar_dictionary["limit"] = ['("LIMIT" ws "1")', '("LIMIT" ws value)']
grammar_dictionary["expr"][1] = '(value wsp "LIKE" wsp value)'
del grammar_dictionary["string"]
del grammar_dictionary["number"] |
<SYSTEM_TASK:>
Ensembles don't have vocabularies or weights of their own, so they override _load.
<END_TASK>
<USER_TASK:>
Description:
def _load(cls,
config: Params,
serialization_dir: str,
weights_file: str = None,
cuda_device: int = -1) -> 'Model':
"""
Ensembles don't have vocabularies or weights of their own, so they override _load.
""" |
model_params = config.get('model')
# The experiment config tells us how to _train_ a model, including where to get pre-trained
# embeddings from. We're now _loading_ the model, so those embeddings will already be
# stored in our weights. We don't need any pretrained weight file anymore, and we don't
# want the code to look for it, so we remove it from the parameters here.
remove_pretrained_embedding_params(model_params)
model = Model.from_params(vocab=None, params=model_params)
# Force model to cpu or gpu, as appropriate, to make sure that the embeddings are
# in sync with the weights
if cuda_device >= 0:
model.cuda(cuda_device)
else:
model.cpu()
return model |
<SYSTEM_TASK:>
Take the question and check if it is compatible with either of the answer choices.
<END_TASK>
<USER_TASK:>
Description:
def infer(self, setup: QuaRelType, answer_0: QuaRelType, answer_1: QuaRelType) -> int:
"""
Take the question and check if it is compatible with either of the answer choices.
""" |
if self._check_quarels_compatible(setup, answer_0):
if self._check_quarels_compatible(setup, answer_1):
# Found two answers
return -2
else:
return 0
elif self._check_quarels_compatible(setup, answer_1):
return 1
else:
return -1 |
<SYSTEM_TASK:>
Creates a Flask app that serves up the provided ``Predictor``
<END_TASK>
<USER_TASK:>
Description:
def make_app(predictor: Predictor,
field_names: List[str] = None,
static_dir: str = None,
sanitizer: Callable[[JsonDict], JsonDict] = None,
title: str = "AllenNLP Demo") -> Flask:
"""
Creates a Flask app that serves up the provided ``Predictor``
along with a front-end for interacting with it.
If you want to use the built-in bare-bones HTML, you must provide the
field names for the inputs (which will be used both as labels
and as the keys in the JSON that gets sent to the predictor).
If you would rather create your own HTML, call it index.html
and provide its directory as ``static_dir``. In that case you
don't need to supply the field names -- that information should
be implicit in your demo site. (Probably the easiest thing to do
is just start with the bare-bones HTML and modify it.)
In addition, if you want somehow transform the JSON prediction
(e.g. by removing probabilities or logits)
you can do that by passing in a ``sanitizer`` function.
""" |
if static_dir is not None:
static_dir = os.path.abspath(static_dir)
if not os.path.exists(static_dir):
logger.error("app directory %s does not exist, aborting", static_dir)
sys.exit(-1)
elif static_dir is None and field_names is None:
print("Neither build_dir nor field_names passed. Demo won't render on this port.\n"
"You must use nodejs + react app to interact with the server.")
app = Flask(__name__) # pylint: disable=invalid-name
@app.errorhandler(ServerError)
def handle_invalid_usage(error: ServerError) -> Response: # pylint: disable=unused-variable
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.route('/')
def index() -> Response: # pylint: disable=unused-variable
if static_dir is not None:
return send_file(os.path.join(static_dir, 'index.html'))
else:
html = _html(title, field_names)
return Response(response=html, status=200)
@app.route('/predict', methods=['POST', 'OPTIONS'])
def predict() -> Response: # pylint: disable=unused-variable
"""make a prediction using the specified model and return the results"""
if request.method == "OPTIONS":
return Response(response="", status=200)
data = request.get_json()
prediction = predictor.predict_json(data)
if sanitizer is not None:
prediction = sanitizer(prediction)
log_blob = {"inputs": data, "outputs": prediction}
logger.info("prediction: %s", json.dumps(log_blob))
return jsonify(prediction)
@app.route('/predict_batch', methods=['POST', 'OPTIONS'])
def predict_batch() -> Response: # pylint: disable=unused-variable
"""make a prediction using the specified model and return the results"""
if request.method == "OPTIONS":
return Response(response="", status=200)
data = request.get_json()
prediction = predictor.predict_batch_json(data)
if sanitizer is not None:
prediction = [sanitizer(p) for p in prediction]
return jsonify(prediction)
@app.route('/<path:path>')
def static_proxy(path: str) -> Response: # pylint: disable=unused-variable
if static_dir is not None:
return send_from_directory(static_dir, path)
else:
raise ServerError("static_dir not specified", 404)
return app |
<SYSTEM_TASK:>
Returns bare bones HTML for serving up an input form with the
<END_TASK>
<USER_TASK:>
Description:
def _html(title: str, field_names: List[str]) -> str:
"""
Returns bare bones HTML for serving up an input form with the
specified fields that can render predictions from the configured model.
""" |
inputs = ''.join(_SINGLE_INPUT_TEMPLATE.substitute(field_name=field_name)
for field_name in field_names)
quoted_field_names = [f"'{field_name}'" for field_name in field_names]
quoted_field_list = f"[{','.join(quoted_field_names)}]"
return _PAGE_TEMPLATE.substitute(title=title,
css=_CSS,
inputs=inputs,
qfl=quoted_field_list) |
<SYSTEM_TASK:>
Returns the valid actions in the current grammar state. See the class docstring for a
<END_TASK>
<USER_TASK:>
Description:
def get_valid_actions(self) -> Dict[str, Tuple[torch.Tensor, torch.Tensor, List[int]]]:
"""
Returns the valid actions in the current grammar state. See the class docstring for a
description of what we're returning here.
""" |
actions = self._valid_actions[self._nonterminal_stack[-1]]
context_actions = []
for type_, variable in self._lambda_stacks:
if self._nonterminal_stack[-1] == type_:
production_string = f"{type_} -> {variable}"
context_actions.append(self._context_actions[production_string])
if context_actions:
input_tensor, output_tensor, action_ids = actions['global']
new_inputs = [input_tensor] + [x[0] for x in context_actions]
input_tensor = torch.cat(new_inputs, dim=0)
new_outputs = [output_tensor] + [x[1] for x in context_actions]
output_tensor = torch.cat(new_outputs, dim=0)
new_action_ids = action_ids + [x[2] for x in context_actions]
# We can't just reassign to actions['global'], because that would modify the state of
# self._valid_actions. Instead, we need to construct a new actions dictionary.
new_actions = {**actions}
new_actions['global'] = (input_tensor, output_tensor, new_action_ids)
actions = new_actions
return actions |
<SYSTEM_TASK:>
Replace all the parameter values with the averages.
<END_TASK>
<USER_TASK:>
Description:
def assign_average_value(self) -> None:
"""
Replace all the parameter values with the averages.
Save the current parameter values to restore later.
""" |
for name, parameter in self._parameters:
self._backups[name].copy_(parameter.data)
parameter.data.copy_(self._shadows[name]) |
<SYSTEM_TASK:>
This method can be used to prune the set of unfinished states on a beam or finished states
<END_TASK>
<USER_TASK:>
Description:
def _prune_beam(states: List[State],
beam_size: int,
sort_states: bool = False) -> List[State]:
"""
This method can be used to prune the set of unfinished states on a beam or finished states
at the end of search. In the former case, the states need not be sorted because the all come
from the same decoding step, which does the sorting. However, if the states are finished and
this method is called at the end of the search, they need to be sorted because they come
from different decoding steps.
""" |
states_by_batch_index: Dict[int, List[State]] = defaultdict(list)
for state in states:
assert len(state.batch_indices) == 1
batch_index = state.batch_indices[0]
states_by_batch_index[batch_index].append(state)
pruned_states = []
for _, instance_states in states_by_batch_index.items():
if sort_states:
scores = torch.cat([state.score[0].view(-1) for state in instance_states])
_, sorted_indices = scores.sort(-1, descending=True)
sorted_states = [instance_states[i] for i in sorted_indices.detach().cpu().numpy()]
instance_states = sorted_states
for state in instance_states[:beam_size]:
pruned_states.append(state)
return pruned_states |
<SYSTEM_TASK:>
Returns the best finished states for each batch instance based on model scores. We return
<END_TASK>
<USER_TASK:>
Description:
def _get_best_final_states(self, finished_states: List[StateType]) -> Dict[int, List[StateType]]:
"""
Returns the best finished states for each batch instance based on model scores. We return
at most ``self._max_num_decoded_sequences`` number of sequences per instance.
""" |
batch_states: Dict[int, List[StateType]] = defaultdict(list)
for state in finished_states:
batch_states[state.batch_indices[0]].append(state)
best_states: Dict[int, List[StateType]] = {}
for batch_index, states in batch_states.items():
# The time this sort takes is pretty negligible, no particular need to optimize this
# yet. Maybe with a larger beam size...
finished_to_sort = [(-state.score[0].item(), state) for state in states]
finished_to_sort.sort(key=lambda x: x[0])
best_states[batch_index] = [state[1] for state in finished_to_sort[:self._beam_size]]
return best_states |
<SYSTEM_TASK:>
Returns and embedding matrix for the given vocabulary using the pretrained embeddings
<END_TASK>
<USER_TASK:>
Description:
def _read_pretrained_embeddings_file(file_uri: str,
embedding_dim: int,
vocab: Vocabulary,
namespace: str = "tokens") -> torch.FloatTensor:
"""
Returns and embedding matrix for the given vocabulary using the pretrained embeddings
contained in the given file. Embeddings for tokens not found in the pretrained embedding file
are randomly initialized using a normal distribution with mean and standard deviation equal to
those of the pretrained embeddings.
We support two file formats:
* text format - utf-8 encoded text file with space separated fields: [word] [dim 1] [dim 2] ...
The text file can eventually be compressed, and even resides in an archive with multiple files.
If the file resides in an archive with other files, then ``embeddings_filename`` must
be a URI "(archive_uri)#file_path_inside_the_archive"
* hdf5 format - hdf5 file containing an embedding matrix in the form of a torch.Tensor.
If the filename ends with '.hdf5' or '.h5' then we load from hdf5, otherwise we assume
text format.
Parameters
----------
file_uri : str, required.
It can be:
* a file system path or a URL of an eventually compressed text file or a zip/tar archive
containing a single file.
* URI of the type ``(archive_path_or_url)#file_path_inside_archive`` if the text file
is contained in a multi-file archive.
vocab : Vocabulary, required.
A Vocabulary object.
namespace : str, (optional, default=tokens)
The namespace of the vocabulary to find pretrained embeddings for.
trainable : bool, (optional, default=True)
Whether or not the embedding parameters should be optimized.
Returns
-------
A weight matrix with embeddings initialized from the read file. The matrix has shape
``(vocab.get_vocab_size(namespace), embedding_dim)``, where the indices of words appearing in
the pretrained embedding file are initialized to the pretrained embedding value.
""" |
file_ext = get_file_extension(file_uri)
if file_ext in ['.h5', '.hdf5']:
return _read_embeddings_from_hdf5(file_uri,
embedding_dim,
vocab, namespace)
return _read_embeddings_from_text_file(file_uri,
embedding_dim,
vocab, namespace) |
<SYSTEM_TASK:>
This function takes in input a string and if it contains 1 or 2 integers, it assumes the
<END_TASK>
<USER_TASK:>
Description:
def _get_num_tokens_from_first_line(line: str) -> Optional[int]:
""" This function takes in input a string and if it contains 1 or 2 integers, it assumes the
largest one it the number of tokens. Returns None if the line doesn't match that pattern. """ |
fields = line.split(' ')
if 1 <= len(fields) <= 2:
try:
int_fields = [int(x) for x in fields]
except ValueError:
return None
else:
num_tokens = max(int_fields)
logger.info('Recognized a header line in the embedding file with number of tokens: %d',
num_tokens)
return num_tokens
return None |
<SYSTEM_TASK:>
Gets the embeddings of desired terminal actions yet to be produced by the decoder, and
<END_TASK>
<USER_TASK:>
Description:
def _get_predicted_embedding_addition(self,
checklist_state: ChecklistStatelet,
action_ids: List[int],
action_embeddings: torch.Tensor) -> torch.Tensor:
"""
Gets the embeddings of desired terminal actions yet to be produced by the decoder, and
returns their sum for the decoder to add it to the predicted embedding to bias the
prediction towards missing actions.
""" |
# Our basic approach here will be to figure out which actions we want to bias, by doing
# some fancy indexing work, then multiply the action embeddings by a mask for those
# actions, and return the sum of the result.
# Shape: (num_terminal_actions, 1). This is 1 if we still want to predict something on the
# checklist, and 0 otherwise.
checklist_balance = checklist_state.get_balance().clamp(min=0)
# (num_terminal_actions, 1)
actions_in_agenda = checklist_state.terminal_actions
# (1, num_current_actions)
action_id_tensor = checklist_balance.new(action_ids).long().unsqueeze(0)
# Shape: (num_terminal_actions, num_current_actions). Will have a value of 1 if the
# terminal action i is our current action j, and a value of 0 otherwise. Because both sets
# of actions are free of duplicates, there will be at most one non-zero value per current
# action, and per terminal action.
current_agenda_actions = (actions_in_agenda == action_id_tensor).float()
# Shape: (num_current_actions,). With the inner multiplication, we remove any current
# agenda actions that are not in our checklist balance, then we sum over the terminal
# action dimension, which will have a sum of at most one. So this will be a 0/1 tensor,
# where a 1 means to encourage the current action in that position.
actions_to_encourage = torch.sum(current_agenda_actions * checklist_balance, dim=0)
# Shape: (action_embedding_dim,). This is the sum of the action embeddings that we want
# the model to prefer.
embedding_addition = torch.sum(action_embeddings * actions_to_encourage.unsqueeze(1),
dim=0,
keepdim=False)
if self._add_action_bias:
# If we're adding an action bias, the last dimension of the action embedding is a bias
# weight. We don't want this addition to affect the bias (TODO(mattg): or do we?), so
# we zero out that dimension here.
embedding_addition[-1] = 0
return embedding_addition |
<SYSTEM_TASK:>
Pulls at most ``max_instances_in_memory`` from the input_queue,
<END_TASK>
<USER_TASK:>
Description:
def _create_tensor_dicts(input_queue: Queue,
output_queue: Queue,
iterator: DataIterator,
shuffle: bool,
index: int) -> None:
"""
Pulls at most ``max_instances_in_memory`` from the input_queue,
groups them into batches of size ``batch_size``, converts them
to ``TensorDict`` s, and puts them on the ``output_queue``.
""" |
def instances() -> Iterator[Instance]:
instance = input_queue.get()
while instance is not None:
yield instance
instance = input_queue.get()
for tensor_dict in iterator(instances(), num_epochs=1, shuffle=shuffle):
output_queue.put(tensor_dict)
output_queue.put(index) |
<SYSTEM_TASK:>
Reads Instances from the iterable and puts them in the input_queue.
<END_TASK>
<USER_TASK:>
Description:
def _queuer(instances: Iterable[Instance],
input_queue: Queue,
num_workers: int,
num_epochs: Optional[int]) -> None:
"""
Reads Instances from the iterable and puts them in the input_queue.
""" |
epoch = 0
while num_epochs is None or epoch < num_epochs:
epoch += 1
for instance in instances:
input_queue.put(instance)
# Now put a None for each worker, since each needs to receive one
# to know that it's done.
for _ in range(num_workers):
input_queue.put(None) |
<SYSTEM_TASK:>
Returns a list of valid actions for each element of the group.
<END_TASK>
<USER_TASK:>
Description:
def get_valid_actions(self) -> List[Dict[str, Tuple[torch.Tensor, torch.Tensor, List[int]]]]:
"""
Returns a list of valid actions for each element of the group.
""" |
return [state.get_valid_actions() for state in self.grammar_state] |
<SYSTEM_TASK:>
A worker that pulls filenames off the input queue, uses the dataset reader
<END_TASK>
<USER_TASK:>
Description:
def _worker(reader: DatasetReader,
input_queue: Queue,
output_queue: Queue,
index: int) -> None:
"""
A worker that pulls filenames off the input queue, uses the dataset reader
to read them, and places the generated instances on the output queue.
When there are no filenames left on the input queue, it puts its ``index``
on the output queue and doesn't do anything else.
""" |
# Keep going until you get a file_path that's None.
while True:
file_path = input_queue.get()
if file_path is None:
# Put my index on the queue to signify that I'm finished
output_queue.put(index)
break
logger.info(f"reading instances from {file_path}")
for instance in reader.read(file_path):
output_queue.put(instance) |
<SYSTEM_TASK:>
Given labels and a constraint type, returns the allowed transitions. It will
<END_TASK>
<USER_TASK:>
Description:
def allowed_transitions(constraint_type: str, labels: Dict[int, str]) -> List[Tuple[int, int]]:
"""
Given labels and a constraint type, returns the allowed transitions. It will
additionally include transitions for the start and end states, which are used
by the conditional random field.
Parameters
----------
constraint_type : ``str``, required
Indicates which constraint to apply. Current choices are
"BIO", "IOB1", "BIOUL", and "BMES".
labels : ``Dict[int, str]``, required
A mapping {label_id -> label}. Most commonly this would be the value from
Vocabulary.get_index_to_token_vocabulary()
Returns
-------
``List[Tuple[int, int]]``
The allowed transitions (from_label_id, to_label_id).
""" |
num_labels = len(labels)
start_tag = num_labels
end_tag = num_labels + 1
labels_with_boundaries = list(labels.items()) + [(start_tag, "START"), (end_tag, "END")]
allowed = []
for from_label_index, from_label in labels_with_boundaries:
if from_label in ("START", "END"):
from_tag = from_label
from_entity = ""
else:
from_tag = from_label[0]
from_entity = from_label[1:]
for to_label_index, to_label in labels_with_boundaries:
if to_label in ("START", "END"):
to_tag = to_label
to_entity = ""
else:
to_tag = to_label[0]
to_entity = to_label[1:]
if is_transition_allowed(constraint_type, from_tag, from_entity,
to_tag, to_entity):
allowed.append((from_label_index, to_label_index))
return allowed |
<SYSTEM_TASK:>
Given a constraint type and strings ``from_tag`` and ``to_tag`` that
<END_TASK>
<USER_TASK:>
Description:
def is_transition_allowed(constraint_type: str,
from_tag: str,
from_entity: str,
to_tag: str,
to_entity: str):
"""
Given a constraint type and strings ``from_tag`` and ``to_tag`` that
represent the origin and destination of the transition, return whether
the transition is allowed under the given constraint type.
Parameters
----------
constraint_type : ``str``, required
Indicates which constraint to apply. Current choices are
"BIO", "IOB1", "BIOUL", and "BMES".
from_tag : ``str``, required
The tag that the transition originates from. For example, if the
label is ``I-PER``, the ``from_tag`` is ``I``.
from_entity: ``str``, required
The entity corresponding to the ``from_tag``. For example, if the
label is ``I-PER``, the ``from_entity`` is ``PER``.
to_tag : ``str``, required
The tag that the transition leads to. For example, if the
label is ``I-PER``, the ``to_tag`` is ``I``.
to_entity: ``str``, required
The entity corresponding to the ``to_tag``. For example, if the
label is ``I-PER``, the ``to_entity`` is ``PER``.
Returns
-------
``bool``
Whether the transition is allowed under the given ``constraint_type``.
""" |
# pylint: disable=too-many-return-statements
if to_tag == "START" or from_tag == "END":
# Cannot transition into START or from END
return False
if constraint_type == "BIOUL":
if from_tag == "START":
return to_tag in ('O', 'B', 'U')
if to_tag == "END":
return from_tag in ('O', 'L', 'U')
return any([
# O can transition to O, B-* or U-*
# L-x can transition to O, B-*, or U-*
# U-x can transition to O, B-*, or U-*
from_tag in ('O', 'L', 'U') and to_tag in ('O', 'B', 'U'),
# B-x can only transition to I-x or L-x
# I-x can only transition to I-x or L-x
from_tag in ('B', 'I') and to_tag in ('I', 'L') and from_entity == to_entity
])
elif constraint_type == "BIO":
if from_tag == "START":
return to_tag in ('O', 'B')
if to_tag == "END":
return from_tag in ('O', 'B', 'I')
return any([
# Can always transition to O or B-x
to_tag in ('O', 'B'),
# Can only transition to I-x from B-x or I-x
to_tag == 'I' and from_tag in ('B', 'I') and from_entity == to_entity
])
elif constraint_type == "IOB1":
if from_tag == "START":
return to_tag in ('O', 'I')
if to_tag == "END":
return from_tag in ('O', 'B', 'I')
return any([
# Can always transition to O or I-x
to_tag in ('O', 'I'),
# Can only transition to B-x from B-x or I-x, where
# x is the same tag.
to_tag == 'B' and from_tag in ('B', 'I') and from_entity == to_entity
])
elif constraint_type == "BMES":
if from_tag == "START":
return to_tag in ('B', 'S')
if to_tag == "END":
return from_tag in ('E', 'S')
return any([
# Can only transition to B or S from E or S.
to_tag in ('B', 'S') and from_tag in ('E', 'S'),
# Can only transition to M-x from B-x, where
# x is the same tag.
to_tag == 'M' and from_tag in ('B', 'M') and from_entity == to_entity,
# Can only transition to E-x from B-x or M-x, where
# x is the same tag.
to_tag == 'E' and from_tag in ('B', 'M') and from_entity == to_entity,
])
else:
raise ConfigurationError(f"Unknown constraint type: {constraint_type}") |
<SYSTEM_TASK:>
Uses viterbi algorithm to find most likely tags for the given inputs.
<END_TASK>
<USER_TASK:>
Description:
def viterbi_tags(self,
logits: torch.Tensor,
mask: torch.Tensor) -> List[Tuple[List[int], float]]:
"""
Uses viterbi algorithm to find most likely tags for the given inputs.
If constraints are applied, disallows all other transitions.
""" |
_, max_seq_length, num_tags = logits.size()
# Get the tensors out of the variables
logits, mask = logits.data, mask.data
# Augment transitions matrix with start and end transitions
start_tag = num_tags
end_tag = num_tags + 1
transitions = torch.Tensor(num_tags + 2, num_tags + 2).fill_(-10000.)
# Apply transition constraints
constrained_transitions = (
self.transitions * self._constraint_mask[:num_tags, :num_tags] +
-10000.0 * (1 - self._constraint_mask[:num_tags, :num_tags])
)
transitions[:num_tags, :num_tags] = constrained_transitions.data
if self.include_start_end_transitions:
transitions[start_tag, :num_tags] = (
self.start_transitions.detach() * self._constraint_mask[start_tag, :num_tags].data +
-10000.0 * (1 - self._constraint_mask[start_tag, :num_tags].detach())
)
transitions[:num_tags, end_tag] = (
self.end_transitions.detach() * self._constraint_mask[:num_tags, end_tag].data +
-10000.0 * (1 - self._constraint_mask[:num_tags, end_tag].detach())
)
else:
transitions[start_tag, :num_tags] = (-10000.0 *
(1 - self._constraint_mask[start_tag, :num_tags].detach()))
transitions[:num_tags, end_tag] = -10000.0 * (1 - self._constraint_mask[:num_tags, end_tag].detach())
best_paths = []
# Pad the max sequence length by 2 to account for start_tag + end_tag.
tag_sequence = torch.Tensor(max_seq_length + 2, num_tags + 2)
for prediction, prediction_mask in zip(logits, mask):
sequence_length = torch.sum(prediction_mask)
# Start with everything totally unlikely
tag_sequence.fill_(-10000.)
# At timestep 0 we must have the START_TAG
tag_sequence[0, start_tag] = 0.
# At steps 1, ..., sequence_length we just use the incoming prediction
tag_sequence[1:(sequence_length + 1), :num_tags] = prediction[:sequence_length]
# And at the last timestep we must have the END_TAG
tag_sequence[sequence_length + 1, end_tag] = 0.
# We pass the tags and the transitions to ``viterbi_decode``.
viterbi_path, viterbi_score = util.viterbi_decode(tag_sequence[:(sequence_length + 2)], transitions)
# Get rid of START and END sentinels and append.
viterbi_path = viterbi_path[1:-1]
best_paths.append((viterbi_path, viterbi_score.item()))
return best_paths |
<SYSTEM_TASK:>
Checks whether the provided obj takes a certain arg.
<END_TASK>
<USER_TASK:>
Description:
def takes_arg(obj, arg: str) -> bool:
"""
Checks whether the provided obj takes a certain arg.
If it's a class, we're really checking whether its constructor does.
If it's a function or method, we're checking the object itself.
Otherwise, we raise an error.
""" |
if inspect.isclass(obj):
signature = inspect.signature(obj.__init__)
elif inspect.ismethod(obj) or inspect.isfunction(obj):
signature = inspect.signature(obj)
else:
raise ConfigurationError(f"object {obj} is not callable")
return arg in signature.parameters |
<SYSTEM_TASK:>
Given some class, a `Params` object, and potentially other keyword arguments,
<END_TASK>
<USER_TASK:>
Description:
def create_kwargs(cls: Type[T], params: Params, **extras) -> Dict[str, Any]:
"""
Given some class, a `Params` object, and potentially other keyword arguments,
create a dict of keyword args suitable for passing to the class's constructor.
The function does this by finding the class's constructor, matching the constructor
arguments to entries in the `params` object, and instantiating values for the parameters
using the type annotation and possibly a from_params method.
Any values that are provided in the `extras` will just be used as is.
For instance, you might provide an existing `Vocabulary` this way.
""" |
# Get the signature of the constructor.
signature = inspect.signature(cls.__init__)
kwargs: Dict[str, Any] = {}
# Iterate over all the constructor parameters and their annotations.
for name, param in signature.parameters.items():
# Skip "self". You're not *required* to call the first parameter "self",
# so in theory this logic is fragile, but if you don't call the self parameter
# "self" you kind of deserve what happens.
if name == "self":
continue
# If the annotation is a compound type like typing.Dict[str, int],
# it will have an __origin__ field indicating `typing.Dict`
# and an __args__ field indicating `(str, int)`. We capture both.
annotation = remove_optional(param.annotation)
kwargs[name] = construct_arg(cls, name, annotation, param.default, params, **extras)
params.assert_empty(cls.__name__)
return kwargs |
<SYSTEM_TASK:>
The main method in the ``TransitionFunction`` API. This function defines the computation
<END_TASK>
<USER_TASK:>
Description:
def take_step(self,
state: StateType,
max_actions: int = None,
allowed_actions: List[Set] = None) -> List[StateType]:
"""
The main method in the ``TransitionFunction`` API. This function defines the computation
done at each step of decoding and returns a ranked list of next states.
The input state is `grouped`, to allow for efficient computation, but the output states
should all have a ``group_size`` of 1, to make things easier on the decoding algorithm.
They will get regrouped later as needed.
Because of the way we handle grouping in the decoder states, constructing a new state is
actually a relatively expensive operation. If you know a priori that only some of the
states will be needed (either because you have a set of gold action sequences, or you have
a fixed beam size), passing that information into this function will keep us from
constructing more states than we need, which will greatly speed up your computation.
IMPORTANT: This method `must` returns states already sorted by their score, otherwise
``BeamSearch`` and other methods will break. For efficiency, we do not perform an
additional sort in those methods.
ALSO IMPORTANT: When ``allowed_actions`` is given and ``max_actions`` is not, we assume you
want to evaluate all possible states and do not need any sorting (e.g., this is true for
maximum marginal likelihood training that does not use a beam search). In this case, we
may skip the sorting step for efficiency reasons.
Parameters
----------
state : ``State``
The current state of the decoder, which we will take a step `from`. We may be grouping
together computation for several states here. Because we can have several states for
each instance in the original batch being evaluated at the same time, we use
``group_size`` for this kind of batching, and ``batch_size`` for the `original` batch
in ``model.forward.``
max_actions : ``int``, optional
If you know that you will only need a certain number of states out of this (e.g., in a
beam search), you can pass in the max number of actions that you need, and we will only
construct that many states (for each `batch` instance - `not` for each `group`
instance!). This can save a whole lot of computation if you have an action space
that's much larger than your beam size.
allowed_actions : ``List[Set]``, optional
If the ``DecoderTrainer`` has constraints on which actions need to be evaluated (e.g.,
maximum marginal likelihood only needs to evaluate action sequences in a given set),
you can pass those constraints here, to avoid constructing state objects unnecessarily.
If there are no constraints from the trainer, passing a value of ``None`` here will
allow all actions to be considered.
This is a list because it is `batched` - every instance in the batch has a set of
allowed actions. Note that the size of this list is the ``group_size`` in the
``State``, `not` the ``batch_size`` of ``model.forward``. The training algorithm needs
to convert from the `batched` allowed action sequences that it has to a `grouped`
allowed action sequence list.
Returns
-------
next_states : ``List[State]``
A list of next states, ordered by score.
""" |
raise NotImplementedError |
<SYSTEM_TASK:>
Parses a chunk of text in the SemEval SDP format.
<END_TASK>
<USER_TASK:>
Description:
def parse_sentence(sentence_blob: str) -> Tuple[List[Dict[str, str]], List[Tuple[int, int]], List[str]]:
"""
Parses a chunk of text in the SemEval SDP format.
Each word in the sentence is returned as a dictionary with the following
format:
'id': '1',
'form': 'Pierre',
'lemma': 'Pierre',
'pos': 'NNP',
'head': '2', # Note that this is the `syntactic` head.
'deprel': 'nn',
'top': '-',
'pred': '+',
'frame': 'named:x-c'
Along with a list of arcs and their corresponding tags. Note that
in semantic dependency parsing words can have more than one head
(it is not a tree), meaning that the list of arcs and tags are
not tied to the length of the sentence.
""" |
annotated_sentence = []
arc_indices = []
arc_tags = []
predicates = []
lines = [line.split("\t") for line in sentence_blob.split("\n")
if line and not line.strip().startswith("#")]
for line_idx, line in enumerate(lines):
annotated_token = {k:v for k, v in zip(FIELDS, line)}
if annotated_token['pred'] == "+":
predicates.append(line_idx)
annotated_sentence.append(annotated_token)
for line_idx, line in enumerate(lines):
for predicate_idx, arg in enumerate(line[len(FIELDS):]):
if arg != "_":
arc_indices.append((line_idx, predicates[predicate_idx]))
arc_tags.append(arg)
return annotated_sentence, arc_indices, arc_tags |
<SYSTEM_TASK:>
Disambiguates single GPU and multiple GPU settings for cuda_device param.
<END_TASK>
<USER_TASK:>
Description:
def parse_cuda_device(cuda_device: Union[str, int, List[int]]) -> Union[int, List[int]]:
"""
Disambiguates single GPU and multiple GPU settings for cuda_device param.
""" |
def from_list(strings):
if len(strings) > 1:
return [int(d) for d in strings]
elif len(strings) == 1:
return int(strings[0])
else:
return -1
if isinstance(cuda_device, str):
return from_list(re.split(r',\s*', cuda_device))
elif isinstance(cuda_device, int):
return cuda_device
elif isinstance(cuda_device, list):
return from_list(cuda_device)
else:
# TODO(brendanr): Determine why mypy can't tell that this matches the Union.
return int(cuda_device) |
<SYSTEM_TASK:>
Add the epoch number to the batch instances as a MetadataField.
<END_TASK>
<USER_TASK:>
Description:
def add_epoch_number(batch: Batch, epoch: int) -> Batch:
"""
Add the epoch number to the batch instances as a MetadataField.
""" |
for instance in batch.instances:
instance.fields['epoch_num'] = MetadataField(epoch)
return batch |
<SYSTEM_TASK:>
Take the next `max_instances` instances from the given dataset.
<END_TASK>
<USER_TASK:>
Description:
def _take_instances(self,
instances: Iterable[Instance],
max_instances: Optional[int] = None) -> Iterator[Instance]:
"""
Take the next `max_instances` instances from the given dataset.
If `max_instances` is `None`, then just take all instances from the dataset.
If `max_instances` is not `None`, each call resumes where the previous one
left off, and when you get to the end of the dataset you start again from the beginning.
""" |
# If max_instances isn't specified, just iterate once over the whole dataset
if max_instances is None:
yield from iter(instances)
else:
# If we don't have a cursor for this dataset, create one. We use ``id()``
# for the key because ``instances`` could be a list, which can't be used as a key.
key = id(instances)
iterator = self._cursors.get(key, iter(instances))
while max_instances > 0:
try:
# If there are instances left on this iterator,
# yield one and decrement max_instances.
yield next(iterator)
max_instances -= 1
except StopIteration:
# None left, so start over again at the beginning of the dataset.
iterator = iter(instances)
# We may have a new iterator, so update the cursor.
self._cursors[key] = iterator |
<SYSTEM_TASK:>
Breaks the dataset into "memory-sized" lists of instances,
<END_TASK>
<USER_TASK:>
Description:
def _memory_sized_lists(self,
instances: Iterable[Instance]) -> Iterable[List[Instance]]:
"""
Breaks the dataset into "memory-sized" lists of instances,
which it yields up one at a time until it gets through a full epoch.
For example, if the dataset is already an in-memory list, and each epoch
represents one pass through the dataset, it just yields back the dataset.
Whereas if the dataset is lazily read from disk and we've specified to
load 1000 instances at a time, then it yields lists of 1000 instances each.
""" |
lazy = is_lazy(instances)
# Get an iterator over the next epoch worth of instances.
iterator = self._take_instances(instances, self._instances_per_epoch)
# We have four different cases to deal with:
# With lazy instances and no guidance about how many to load into memory,
# we just load ``batch_size`` instances at a time:
if lazy and self._max_instances_in_memory is None:
yield from lazy_groups_of(iterator, self._batch_size)
# If we specified max instances in memory, lazy or not, we just
# load ``max_instances_in_memory`` instances at a time:
elif self._max_instances_in_memory is not None:
yield from lazy_groups_of(iterator, self._max_instances_in_memory)
# If we have non-lazy instances, and we want all instances each epoch,
# then we just yield back the list of instances:
elif self._instances_per_epoch is None:
yield ensure_list(instances)
# In the final case we have non-lazy instances, we want a specific number
# of instances each epoch, and we didn't specify how to many instances to load
# into memory. So we convert the whole iterator to a list:
else:
yield list(iterator) |
<SYSTEM_TASK:>
If self._maximum_samples_per_batch is specified, then split the batch
<END_TASK>
<USER_TASK:>
Description:
def _ensure_batch_is_sufficiently_small(
self,
batch_instances: Iterable[Instance],
excess: Deque[Instance]) -> List[List[Instance]]:
"""
If self._maximum_samples_per_batch is specified, then split the batch
into smaller sub-batches if it exceeds the maximum size.
Parameters
----------
batch_instances : ``Iterable[Instance]``
A candidate batch.
excess : ``Deque[Instance]``
Instances that were not sufficient to form an entire batch
previously. They will be used as part of the first sub-batch. This
will be populated with instances from the end of batch_instances
that do not consist of more than self._maximum_samples_per_batch
samples or self._batch_size instances. It is the caller's
responsibility to place these in a batch too, which may, of course,
be done in part with subsequent calls to this method.
WARNING: Mutated in place!
""" |
if self._maximum_samples_per_batch is None:
assert not excess
return [list(batch_instances)]
key, limit = self._maximum_samples_per_batch
batches: List[List[Instance]] = []
batch: List[Instance] = []
padding_length = -1
excess.extend(batch_instances)
while excess:
instance = excess.popleft()
if self.vocab is not None:
# we index here to ensure that shape information is available,
# as in some cases (with self._maximum_samples_per_batch)
# we need access to shaping information before batches are constructed)
instance.index_fields(self.vocab)
field_lengths = instance.get_padding_lengths()
for _, lengths in field_lengths.items():
try:
padding_length = max(padding_length,
lengths[key])
except KeyError:
pass
proposed_batch_size = len(batch) + 1
# Adding the current instance would exceed the batch size or sample size.
if proposed_batch_size >= self._batch_size or padding_length * proposed_batch_size > limit:
# Output the already existing batch
batches.append(batch)
# Put the current instance back, reset state.
excess.appendleft(instance)
batch = []
padding_length = -1
else:
batch.append(instance)
# Keep the current batch as excess.
excess.extend(batch)
return batches |
<SYSTEM_TASK:>
This method should return one epoch worth of batches.
<END_TASK>
<USER_TASK:>
Description:
def _create_batches(self, instances: Iterable[Instance], shuffle: bool) -> Iterable[Batch]:
"""
This method should return one epoch worth of batches.
""" |
raise NotImplementedError |
<SYSTEM_TASK:>
Mask out subsequent positions.
<END_TASK>
<USER_TASK:>
Description:
def subsequent_mask(size: int, device: str = 'cpu') -> torch.Tensor:
"""Mask out subsequent positions.""" |
mask = torch.tril(torch.ones(size, size, device=device, dtype=torch.int32)).unsqueeze(0)
return mask |
<SYSTEM_TASK:>
Apply residual connection to any sublayer with the same size.
<END_TASK>
<USER_TASK:>
Description:
def forward(self, x: torch.Tensor, sublayer: Callable[[torch.Tensor], torch.Tensor]) -> torch.Tensor:
"""Apply residual connection to any sublayer with the same size.""" |
return x + self.dropout(sublayer(self.norm(x))) |
<SYSTEM_TASK:>
An initializer which allows initializing model parameters in "blocks". This is helpful
<END_TASK>
<USER_TASK:>
Description:
def block_orthogonal(tensor: torch.Tensor,
split_sizes: List[int],
gain: float = 1.0) -> None:
"""
An initializer which allows initializing model parameters in "blocks". This is helpful
in the case of recurrent models which use multiple gates applied to linear projections,
which can be computed efficiently if they are concatenated together. However, they are
separate parameters which should be initialized independently.
Parameters
----------
tensor : ``torch.Tensor``, required.
A tensor to initialize.
split_sizes : List[int], required.
A list of length ``tensor.ndim()`` specifying the size of the
blocks along that particular dimension. E.g. ``[10, 20]`` would
result in the tensor being split into chunks of size 10 along the
first dimension and 20 along the second.
gain : float, optional (default = 1.0)
The gain (scaling) applied to the orthogonal initialization.
""" |
data = tensor.data
sizes = list(tensor.size())
if any([a % b != 0 for a, b in zip(sizes, split_sizes)]):
raise ConfigurationError("tensor dimensions must be divisible by their respective "
"split_sizes. Found size: {} and split_sizes: {}".format(sizes, split_sizes))
indexes = [list(range(0, max_size, split))
for max_size, split in zip(sizes, split_sizes)]
# Iterate over all possible blocks within the tensor.
for block_start_indices in itertools.product(*indexes):
# A list of tuples containing the index to start at for this block
# and the appropriate step size (i.e split_size[i] for dimension i).
index_and_step_tuples = zip(block_start_indices, split_sizes)
# This is a tuple of slices corresponding to:
# tensor[index: index + step_size, ...]. This is
# required because we could have an arbitrary number
# of dimensions. The actual slices we need are the
# start_index: start_index + step for each dimension in the tensor.
block_slice = tuple([slice(start_index, start_index + step)
for start_index, step in index_and_step_tuples])
data[block_slice] = torch.nn.init.orthogonal_(tensor[block_slice].contiguous(), gain=gain) |
<SYSTEM_TASK:>
Initialize the biases of the forget gate to 1, and all other gates to 0,
<END_TASK>
<USER_TASK:>
Description:
def lstm_hidden_bias(tensor: torch.Tensor) -> None:
"""
Initialize the biases of the forget gate to 1, and all other gates to 0,
following Jozefowicz et al., An Empirical Exploration of Recurrent Network Architectures
""" |
# gates are (b_hi|b_hf|b_hg|b_ho) of shape (4*hidden_size)
tensor.data.zero_()
hidden_size = tensor.shape[0] // 4
tensor.data[hidden_size:(2 * hidden_size)] = 1.0 |
<SYSTEM_TASK:>
Returns true if there is any cell in this column that can be split.
<END_TASK>
<USER_TASK:>
Description:
def _should_split_column_cells(cls, column_cells: List[str]) -> bool:
"""
Returns true if there is any cell in this column that can be split.
""" |
return any(cls._should_split_cell(cell_text) for cell_text in column_cells) |
<SYSTEM_TASK:>
Checks whether the cell should be split. We're just doing the same thing that SEMPRE did
<END_TASK>
<USER_TASK:>
Description:
def _should_split_cell(cls, cell_text: str) -> bool:
"""
Checks whether the cell should be split. We're just doing the same thing that SEMPRE did
here.
""" |
if ', ' in cell_text or '\n' in cell_text or '/' in cell_text:
return True
return False |
<SYSTEM_TASK:>
Returns entities that can be linked to spans in the question, that should be in the agenda,
<END_TASK>
<USER_TASK:>
Description:
def get_linked_agenda_items(self) -> List[str]:
"""
Returns entities that can be linked to spans in the question, that should be in the agenda,
for training a coverage based semantic parser. This method essentially does a heuristic
entity linking, to provide weak supervision for a learning to search parser.
""" |
agenda_items: List[str] = []
for entity in self._get_longest_span_matching_entities():
agenda_items.append(entity)
# If the entity is a cell, we need to add the column to the agenda as well,
# because the answer most likely involves getting the row with the cell.
if 'fb:cell' in entity:
agenda_items.append(self.neighbors[entity][0])
return agenda_items |
<SYSTEM_TASK:>
Ensure single word predicate
<END_TASK>
<USER_TASK:>
Description:
def split_predicate(ex: Extraction) -> Extraction:
"""
Ensure single word predicate
by adding "before-predicate" and "after-predicate"
arguments.
""" |
rel_toks = ex.toks[char_to_word_index(ex.rel.span[0], ex.sent) \
: char_to_word_index(ex.rel.span[1], ex.sent) + 1]
if not rel_toks:
return ex
verb_inds = [tok_ind for (tok_ind, tok)
in enumerate(rel_toks)
if tok.tag_.startswith('VB')]
last_verb_ind = verb_inds[-1] if verb_inds \
else (len(rel_toks) - 1)
rel_parts = [element_from_span([rel_toks[last_verb_ind]],
'V')]
before_verb = rel_toks[ : last_verb_ind]
after_verb = rel_toks[last_verb_ind + 1 : ]
if before_verb:
rel_parts.append(element_from_span(before_verb, "BV"))
if after_verb:
rel_parts.append(element_from_span(after_verb, "AV"))
return Extraction(ex.sent, ex.toks, ex.arg1, rel_parts, ex.args2, ex.confidence) |
<SYSTEM_TASK:>
Construct an Element instance from regexp
<END_TASK>
<USER_TASK:>
Description:
def interpret_element(element_type: str, text: str, span: str) -> Element:
"""
Construct an Element instance from regexp
groups.
""" |
return Element(element_type,
interpret_span(span),
text) |
<SYSTEM_TASK:>
Given a list of extractions for a single sentence -
<END_TASK>
<USER_TASK:>
Description:
def convert_sent_to_conll(sent_ls: List[Extraction]):
"""
Given a list of extractions for a single sentence -
convert it to conll representation.
""" |
# Sanity check - make sure all extractions are on the same sentence
assert(len(set([ex.sent for ex in sent_ls])) == 1)
toks = sent_ls[0].sent.split(' ')
return safe_zip(*[range(len(toks)),
toks] + \
[extraction_to_conll(ex)
for ex in sent_ls]) |
<SYSTEM_TASK:>
Pad line to conform to ontonotes representation.
<END_TASK>
<USER_TASK:>
Description:
def pad_line_to_ontonotes(line, domain) -> List[str]:
"""
Pad line to conform to ontonotes representation.
""" |
word_ind, word = line[ : 2]
pos = 'XX'
oie_tags = line[2 : ]
line_num = 0
parse = "-"
lemma = "-"
return [domain, line_num, word_ind, word, pos, parse, lemma, '-',\
'-', '-', '*'] + list(oie_tags) + ['-', ] |
<SYSTEM_TASK:>
Given a dictionary from sentence -> extractions,
<END_TASK>
<USER_TASK:>
Description:
def convert_sent_dict_to_conll(sent_dic, domain) -> str:
"""
Given a dictionary from sentence -> extractions,
return a corresponding CoNLL representation.
""" |
return '\n\n'.join(['\n'.join(['\t'.join(map(str, pad_line_to_ontonotes(line, domain)))
for line in convert_sent_to_conll(sent_ls)])
for sent_ls
in sent_dic.iteritems()]) |
<SYSTEM_TASK:>
Parses a S3 Uri into a dictionary of the Bucket, Key, and VersionId
<END_TASK>
<USER_TASK:>
Description:
def parse_s3_uri(uri):
"""Parses a S3 Uri into a dictionary of the Bucket, Key, and VersionId
:return: a BodyS3Location dict or None if not an S3 Uri
:rtype: dict
""" |
if not isinstance(uri, string_types):
return None
url = urlparse(uri)
query = parse_qs(url.query)
if url.scheme == 's3' and url.netloc and url.path:
s3_pointer = {
'Bucket': url.netloc,
'Key': url.path.lstrip('/')
}
if 'versionId' in query and len(query['versionId']) == 1:
s3_pointer['Version'] = query['versionId'][0]
return s3_pointer
else:
return None |
<SYSTEM_TASK:>
Constructs a S3 URI string from given code dictionary
<END_TASK>
<USER_TASK:>
Description:
def to_s3_uri(code_dict):
"""Constructs a S3 URI string from given code dictionary
:param dict code_dict: Dictionary containing Lambda function Code S3 location of the form
{S3Bucket, S3Key, S3ObjectVersion}
:return: S3 URI of form s3://bucket/key?versionId=version
:rtype string
""" |
try:
uri = "s3://{bucket}/{key}".format(bucket=code_dict["S3Bucket"], key=code_dict["S3Key"])
version = code_dict.get("S3ObjectVersion", None)
except (TypeError, AttributeError):
raise TypeError("Code location should be a dictionary")
if version:
uri += "?versionId=" + version
return uri |
<SYSTEM_TASK:>
Constructs a Lambda `Code` or `Content` property, from the SAM `CodeUri` or `ContentUri` property.
<END_TASK>
<USER_TASK:>
Description:
def construct_s3_location_object(location_uri, logical_id, property_name):
"""Constructs a Lambda `Code` or `Content` property, from the SAM `CodeUri` or `ContentUri` property.
This follows the current scheme for Lambda Functions and LayerVersions.
:param dict or string location_uri: s3 location dict or string
:param string logical_id: logical_id of the resource calling this function
:param string property_name: name of the property which is used as an input to this function.
:returns: a Code dict, containing the S3 Bucket, Key, and Version of the Lambda layer code
:rtype: dict
""" |
if isinstance(location_uri, dict):
if not location_uri.get("Bucket") or not location_uri.get("Key"):
# location_uri is a dictionary but does not contain Bucket or Key property
raise InvalidResourceException(logical_id,
"'{}' requires Bucket and Key properties to be "
"specified".format(property_name))
s3_pointer = location_uri
else:
# location_uri is NOT a dictionary. Parse it as a string
s3_pointer = parse_s3_uri(location_uri)
if s3_pointer is None:
raise InvalidResourceException(logical_id,
'\'{}\' is not a valid S3 Uri of the form '
'"s3://bucket/key" with optional versionId query '
'parameter.'.format(property_name))
code = {
'S3Bucket': s3_pointer['Bucket'],
'S3Key': s3_pointer['Key']
}
if 'Version' in s3_pointer:
code['S3ObjectVersion'] = s3_pointer['Version']
return code |
<SYSTEM_TASK:>
Returns a list of policies from the resource properties. This method knows how to interpret and handle
<END_TASK>
<USER_TASK:>
Description:
def _get_policies(self, resource_properties):
"""
Returns a list of policies from the resource properties. This method knows how to interpret and handle
polymorphic nature of the policies property.
Policies can be one of the following:
* Managed policy name: string
* List of managed policy names: list of strings
* IAM Policy document: dict containing Statement key
* List of IAM Policy documents: list of IAM Policy Document
* Policy Template: dict with only one key where key is in list of supported policy template names
* List of Policy Templates: list of Policy Template
:param dict resource_properties: Dictionary of resource properties containing the policies property.
It is assumed that this is already a dictionary and contains policies key.
:return list of PolicyEntry: List of policies, where each item is an instance of named tuple `PolicyEntry`
""" |
policies = None
if self._contains_policies(resource_properties):
policies = resource_properties[self.POLICIES_PROPERTY_NAME]
if not policies:
# Policies is None or empty
return []
if not isinstance(policies, list):
# Just a single entry. Make it into a list of convenience
policies = [policies]
result = []
for policy in policies:
policy_type = self._get_type(policy)
entry = PolicyEntry(data=policy, type=policy_type)
result.append(entry)
return result |
<SYSTEM_TASK:>
Is there policies data in this resource?
<END_TASK>
<USER_TASK:>
Description:
def _contains_policies(self, resource_properties):
"""
Is there policies data in this resource?
:param dict resource_properties: Properties of the resource
:return: True if we can process this resource. False, otherwise
""" |
return resource_properties is not None \
and isinstance(resource_properties, dict) \
and self.POLICIES_PROPERTY_NAME in resource_properties |
<SYSTEM_TASK:>
Returns the type of the given policy
<END_TASK>
<USER_TASK:>
Description:
def _get_type(self, policy):
"""
Returns the type of the given policy
:param string or dict policy: Policy data
:return PolicyTypes: Type of the given policy. None, if type could not be inferred
""" |
# Must handle intrinsic functions. Policy could be a primitive type or an intrinsic function
# Managed policies are either string or an intrinsic function that resolves to a string
if isinstance(policy, string_types) or is_instrinsic(policy):
return PolicyTypes.MANAGED_POLICY
# Policy statement is a dictionary with the key "Statement" in it
if isinstance(policy, dict) and "Statement" in policy:
return PolicyTypes.POLICY_STATEMENT
# This could be a policy template then.
if self._is_policy_template(policy):
return PolicyTypes.POLICY_TEMPLATE
# Nothing matches. Don't take opinions on how to handle it. Instead just set the appropriate type.
return PolicyTypes.UNKNOWN |
<SYSTEM_TASK:>
Is the given policy data a policy template? Policy templates is a dictionary with one key which is the name
<END_TASK>
<USER_TASK:>
Description:
def _is_policy_template(self, policy):
"""
Is the given policy data a policy template? Policy templates is a dictionary with one key which is the name
of the template.
:param dict policy: Policy data
:return: True, if this is a policy template. False if it is not
""" |
return self._policy_template_processor is not None and \
isinstance(policy, dict) and \
len(policy) == 1 and \
self._policy_template_processor.has(list(policy.keys())[0]) is True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.