text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Takes a list of rows and a column and returns the mean of the values under that column in <END_TASK> <USER_TASK:> Description: def average(self, rows: List[Row], column: NumberColumn) -> Number: """ Takes a list of rows and a column and returns the mean of the values under that column in those rows. """
cell_values = [row.values[column.name] for row in rows] if not cell_values: return 0.0 # type: ignore return sum(cell_values) / len(cell_values)
<SYSTEM_TASK:> Takes a two rows and a number column and returns the difference between the values under <END_TASK> <USER_TASK:> Description: def diff(self, first_row: List[Row], second_row: List[Row], column: NumberColumn) -> Number: """ Takes a two rows and a number column and returns the difference between the values under that column in those two rows. """
if not first_row or not second_row: return 0.0 # type: ignore first_value = first_row[0].values[column.name] second_value = second_row[0].values[column.name] if isinstance(first_value, float) and isinstance(second_value, float): return first_value - second_value # type: ignore else: raise ExecutionError(f"Invalid column for diff: {column.name}")
<SYSTEM_TASK:> This function will be called on nodes of a logical form tree, which are either non-terminal <END_TASK> <USER_TASK:> Description: def is_terminal(self, symbol: str) -> bool: """ This function will be called on nodes of a logical form tree, which are either non-terminal symbols that can be expanded or terminal symbols that must be leaf nodes. Returns ``True`` if the given symbol is a terminal symbol. """
# We special-case 'lambda' here because it behaves weirdly in action sequences. return (symbol in self.global_name_mapping or symbol in self.local_name_mapping or 'lambda' in symbol)
<SYSTEM_TASK:> Returns a mapping from each `MultiMatchNamedBasicType` to all the `NamedBasicTypes` that it <END_TASK> <USER_TASK:> Description: def get_multi_match_mapping(self) -> Dict[Type, List[Type]]: """ Returns a mapping from each `MultiMatchNamedBasicType` to all the `NamedBasicTypes` that it matches. """
if self._multi_match_mapping is None: self._multi_match_mapping = {} basic_types = self.get_basic_types() for basic_type in basic_types: if isinstance(basic_type, types.MultiMatchNamedBasicType): matched_types: List[str] = [] # We need to check if each type in the `types_to_match` field for the given # MultiMatchNamedBasic type is itself in the set of basic types allowed in this # world, and add it to the mapping only if it is. Some basic types that the # multi match type can match with may be diallowed in the world due to the # instance-specific context. for type_ in basic_type.types_to_match: if type_ in basic_types: matched_types.append(type_) self._multi_match_mapping[basic_type] = matched_types return self._multi_match_mapping
<SYSTEM_TASK:> Takes a logical form as a string, maps its tokens using the mapping and returns a parsed expression. <END_TASK> <USER_TASK:> Description: def parse_logical_form(self, logical_form: str, remove_var_function: bool = True) -> Expression: """ Takes a logical form as a string, maps its tokens using the mapping and returns a parsed expression. Parameters ---------- logical_form : ``str`` Logical form to parse remove_var_function : ``bool`` (optional) ``var`` is a special function that some languages use within lambda functions to indicate the usage of a variable. If your language uses it, and you do not want to include it in the parsed expression, set this flag. You may want to do this if you are generating an action sequence from this parsed expression, because it is easier to let the decoder not produce this function due to the way constrained decoding is currently implemented. """
if not logical_form.startswith("("): logical_form = f"({logical_form})" if remove_var_function: # Replace "(x)" with "x" logical_form = re.sub(r'\(([x-z])\)', r'\1', logical_form) # Replace "(var x)" with "(x)" logical_form = re.sub(r'\(var ([x-z])\)', r'(\1)', logical_form) parsed_lisp = semparse_util.lisp_to_nested_expression(logical_form) translated_string = self._process_nested_expression(parsed_lisp) type_signature = self.local_type_signatures.copy() type_signature.update(self.global_type_signatures) return self._logic_parser.parse(translated_string, signature=type_signature)
<SYSTEM_TASK:> Takes an action sequence and constructs a logical form from it. This is useful if you want <END_TASK> <USER_TASK:> Description: def get_logical_form(self, action_sequence: List[str], add_var_function: bool = True) -> str: """ Takes an action sequence and constructs a logical form from it. This is useful if you want to get a logical form from a decoded sequence of actions generated by a transition based semantic parser. Parameters ---------- action_sequence : ``List[str]`` The sequence of actions as strings (eg.: ``['{START_SYMBOL} -> t', 't -> <e,t>', ...]``). add_var_function : ``bool`` (optional) ``var`` is a special function that some languages use within lambda functions to indicate the use of a variable (eg.: ``(lambda x (fb:row.row.year (var x)))``). Due to the way constrained decoding is currently implemented, it is easier for the decoder to not produce these functions. In that case, setting this flag adds the function in the logical form even though it is not present in the action sequence. """
# Basic outline: we assume that the bracketing that we get in the RHS of each action is the # correct bracketing for reconstructing the logical form. This is true when there is no # currying in the action sequence. Given this assumption, we just need to construct a tree # from the action sequence, then output all of the leaves in the tree, with brackets around # the children of all non-terminal nodes. remaining_actions = [action.split(" -> ") for action in action_sequence] tree = Tree(remaining_actions[0][1], []) try: remaining_actions = self._construct_node_from_actions(tree, remaining_actions[1:], add_var_function) except ParsingError: logger.error("Error parsing action sequence: %s", action_sequence) raise if remaining_actions: logger.error("Error parsing action sequence: %s", action_sequence) logger.error("Remaining actions were: %s", remaining_actions) raise ParsingError("Extra actions in action sequence") return nltk_tree_to_logical_form(tree)
<SYSTEM_TASK:> ``nested_expression`` is the result of parsing a logical form in Lisp format. <END_TASK> <USER_TASK:> Description: def _process_nested_expression(self, nested_expression) -> str: """ ``nested_expression`` is the result of parsing a logical form in Lisp format. We process it recursively and return a string in the format that NLTK's ``LogicParser`` would understand. """
expression_is_list = isinstance(nested_expression, list) expression_size = len(nested_expression) if expression_is_list and expression_size == 1 and isinstance(nested_expression[0], list): return self._process_nested_expression(nested_expression[0]) elements_are_leaves = [isinstance(element, str) for element in nested_expression] if all(elements_are_leaves): mapped_names = [self._map_name(name) for name in nested_expression] else: mapped_names = [] for element, is_leaf in zip(nested_expression, elements_are_leaves): if is_leaf: mapped_names.append(self._map_name(element)) else: mapped_names.append(self._process_nested_expression(element)) if mapped_names[0] == "\\": # This means the predicate is lambda. NLTK wants the variable name to not be within parantheses. # Adding parentheses after the variable. arguments = [mapped_names[1]] + [f"({name})" for name in mapped_names[2:]] else: arguments = [f"({name})" for name in mapped_names[1:]] return f'({mapped_names[0]} {" ".join(arguments)})'
<SYSTEM_TASK:> Utility method to add a name and its translation to the local name mapping, and the corresponding <END_TASK> <USER_TASK:> Description: def _add_name_mapping(self, name: str, translated_name: str, name_type: Type = None): """ Utility method to add a name and its translation to the local name mapping, and the corresponding signature, if available to the local type signatures. This method also updates the reverse name mapping. """
self.local_name_mapping[name] = translated_name self.reverse_name_mapping[translated_name] = name if name_type: self.local_type_signatures[translated_name] = name_type
<SYSTEM_TASK:> Creates a server running SEMPRE that we can send logical forms to for evaluation. This <END_TASK> <USER_TASK:> Description: def _create_sempre_executor(self) -> None: """ Creates a server running SEMPRE that we can send logical forms to for evaluation. This uses inter-process communication, because SEMPRE is java code. We also need to be careful to clean up the process when our program exits. """
if self._executor_process: return # It'd be much nicer to just use `cached_path` for these files. However, the SEMPRE jar # that we're using expects to find these files in a particular location, so we need to make # sure we put the files in that location. os.makedirs(SEMPRE_DIR, exist_ok=True) abbreviations_path = os.path.join(SEMPRE_DIR, 'abbreviations.tsv') if not os.path.exists(abbreviations_path): result = requests.get(ABBREVIATIONS_FILE) with open(abbreviations_path, 'wb') as downloaded_file: downloaded_file.write(result.content) grammar_path = os.path.join(SEMPRE_DIR, 'grow.grammar') if not os.path.exists(grammar_path): result = requests.get(GROW_FILE) with open(grammar_path, 'wb') as downloaded_file: downloaded_file.write(result.content) if not check_for_java(): raise RuntimeError('Java is not installed properly.') args = ['java', '-jar', cached_path(SEMPRE_EXECUTOR_JAR), 'serve', self._table_directory] self._executor_process = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, bufsize=1) lines = [] for _ in range(6): # SEMPRE outputs six lines of stuff when it loads that I can't disable. So, we clear # that here. lines.append(str(self._executor_process.stdout.readline())) assert 'Parser' in lines[-1], "SEMPRE server output unexpected; the server may have changed" logger.info("Started SEMPRE server for evaluating logical forms") # This is supposed to ensure that the subprocess gets killed when python exits. atexit.register(self._stop_sempre_executor)
<SYSTEM_TASK:> Subroutine for ceafe. Computes the mention F measure between gold and <END_TASK> <USER_TASK:> Description: def phi4(gold_clustering, predicted_clustering): """ Subroutine for ceafe. Computes the mention F measure between gold and predicted mentions in a cluster. """
return 2 * len([mention for mention in gold_clustering if mention in predicted_clustering]) \ / float(len(gold_clustering) + len(predicted_clustering))
<SYSTEM_TASK:> Clips gradient norm of an iterable of parameters. <END_TASK> <USER_TASK:> Description: def sparse_clip_norm(parameters, max_norm, norm_type=2) -> float: """Clips gradient norm of an iterable of parameters. The norm is computed over all gradients together, as if they were concatenated into a single vector. Gradients are modified in-place. Supports sparse gradients. Parameters ---------- parameters : ``(Iterable[torch.Tensor])`` An iterable of Tensors that will have gradients normalized. max_norm : ``float`` The max norm of the gradients. norm_type : ``float`` The type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns ------- Total norm of the parameters (viewed as a single vector). """
# pylint: disable=invalid-name,protected-access parameters = list(filter(lambda p: p.grad is not None, parameters)) max_norm = float(max_norm) norm_type = float(norm_type) if norm_type == float('inf'): total_norm = max(p.grad.data.abs().max() for p in parameters) else: total_norm = 0 for p in parameters: if p.grad.is_sparse: # need to coalesce the repeated indices before finding norm grad = p.grad.data.coalesce() param_norm = grad._values().norm(norm_type) else: param_norm = p.grad.data.norm(norm_type) total_norm += param_norm ** norm_type total_norm = total_norm ** (1. / norm_type) clip_coef = max_norm / (total_norm + 1e-6) if clip_coef < 1: for p in parameters: if p.grad.is_sparse: p.grad.data._values().mul_(clip_coef) else: p.grad.data.mul_(clip_coef) return total_norm
<SYSTEM_TASK:> Move the optimizer state to GPU, if necessary. <END_TASK> <USER_TASK:> Description: def move_optimizer_to_cuda(optimizer): """ Move the optimizer state to GPU, if necessary. After calling, any parameter specific state in the optimizer will be located on the same device as the parameter. """
for param_group in optimizer.param_groups: for param in param_group['params']: if param.is_cuda: param_state = optimizer.state[param] for k in param_state.keys(): if isinstance(param_state[k], torch.Tensor): param_state[k] = param_state[k].cuda(device=param.get_device())
<SYSTEM_TASK:> Returns the size of the batch dimension. Assumes a well-formed batch, <END_TASK> <USER_TASK:> Description: def get_batch_size(batch: Union[Dict, torch.Tensor]) -> int: """ Returns the size of the batch dimension. Assumes a well-formed batch, returns 0 otherwise. """
if isinstance(batch, torch.Tensor): return batch.size(0) # type: ignore elif isinstance(batch, Dict): return get_batch_size(next(iter(batch.values()))) else: return 0
<SYSTEM_TASK:> Convert seconds past Epoch to human readable string. <END_TASK> <USER_TASK:> Description: def time_to_str(timestamp: int) -> str: """ Convert seconds past Epoch to human readable string. """
datetimestamp = datetime.datetime.fromtimestamp(timestamp) return '{:04d}-{:02d}-{:02d}-{:02d}-{:02d}-{:02d}'.format( datetimestamp.year, datetimestamp.month, datetimestamp.day, datetimestamp.hour, datetimestamp.minute, datetimestamp.second )
<SYSTEM_TASK:> Convert human readable string to datetime.datetime. <END_TASK> <USER_TASK:> Description: def str_to_time(time_str: str) -> datetime.datetime: """ Convert human readable string to datetime.datetime. """
pieces: Any = [int(piece) for piece in time_str.split('-')] return datetime.datetime(*pieces)
<SYSTEM_TASK:> Load all the datasets specified by the config. <END_TASK> <USER_TASK:> Description: def datasets_from_params(params: Params, cache_directory: str = None, cache_prefix: str = None) -> Dict[str, Iterable[Instance]]: """ Load all the datasets specified by the config. Parameters ---------- params : ``Params`` cache_directory : ``str``, optional If given, we will instruct the ``DatasetReaders`` that we construct to cache their instances in this location (or read their instances from caches in this location, if a suitable cache already exists). This is essentially a `base` directory for the cache, as we will additionally add the ``cache_prefix`` to this directory, giving an actual cache location of ``cache_directory + cache_prefix``. cache_prefix : ``str``, optional This works in conjunction with the ``cache_directory``. The idea is that the ``cache_directory`` contains caches for all different parameter settings, while the ``cache_prefix`` captures a specific set of parameters that led to a particular cache file. That is, if you change the tokenization settings inside your ``DatasetReader``, you don't want to read cached data that used the old settings. In order to avoid this, we compute a hash of the parameters used to construct each ``DatasetReader`` and use that as a "prefix" to the cache files inside the base ``cache_directory``. So, a given ``input_file`` would be cached essentially as ``cache_directory + cache_prefix + input_file``, where you specify a ``cache_directory``, the ``cache_prefix`` is based on the dataset reader parameters, and the ``input_file`` is whatever path you provided to ``DatasetReader.read()``. In order to allow you to give recognizable names to these prefixes if you want them, you can manually specify the ``cache_prefix``. Note that in some rare cases this can be dangerous, as we'll use the `same` prefix for both train and validation dataset readers. """
dataset_reader_params = params.pop('dataset_reader') validation_dataset_reader_params = params.pop('validation_dataset_reader', None) train_cache_dir, validation_cache_dir = _set_up_cache_files(dataset_reader_params, validation_dataset_reader_params, cache_directory, cache_prefix) dataset_reader = DatasetReader.from_params(dataset_reader_params) validation_and_test_dataset_reader: DatasetReader = dataset_reader if validation_dataset_reader_params is not None: logger.info("Using a separate dataset reader to load validation and test data.") validation_and_test_dataset_reader = DatasetReader.from_params(validation_dataset_reader_params) if train_cache_dir: dataset_reader.cache_data(train_cache_dir) validation_and_test_dataset_reader.cache_data(validation_cache_dir) train_data_path = params.pop('train_data_path') logger.info("Reading training data from %s", train_data_path) train_data = dataset_reader.read(train_data_path) datasets: Dict[str, Iterable[Instance]] = {"train": train_data} validation_data_path = params.pop('validation_data_path', None) if validation_data_path is not None: logger.info("Reading validation data from %s", validation_data_path) validation_data = validation_and_test_dataset_reader.read(validation_data_path) datasets["validation"] = validation_data test_data_path = params.pop("test_data_path", None) if test_data_path is not None: logger.info("Reading test data from %s", test_data_path) test_data = validation_and_test_dataset_reader.read(test_data_path) datasets["test"] = test_data return datasets
<SYSTEM_TASK:> This function creates the serialization directory if it doesn't exist. If it already exists <END_TASK> <USER_TASK:> Description: def create_serialization_dir( params: Params, serialization_dir: str, recover: bool, force: bool) -> None: """ This function creates the serialization directory if it doesn't exist. If it already exists and is non-empty, then it verifies that we're recovering from a training with an identical configuration. Parameters ---------- params: ``Params`` A parameter object specifying an AllenNLP Experiment. serialization_dir: ``str`` The directory in which to save results and logs. recover: ``bool`` If ``True``, we will try to recover from an existing serialization directory, and crash if the directory doesn't exist, or doesn't match the configuration we're given. force: ``bool`` If ``True``, we will overwrite the serialization directory if it already exists. """
if recover and force: raise ConfigurationError("Illegal arguments: both force and recover are true.") if os.path.exists(serialization_dir) and force: shutil.rmtree(serialization_dir) if os.path.exists(serialization_dir) and os.listdir(serialization_dir): if not recover: raise ConfigurationError(f"Serialization directory ({serialization_dir}) already exists and is " f"not empty. Specify --recover to recover training from existing output.") logger.info(f"Recovering from prior training at {serialization_dir}.") recovered_config_file = os.path.join(serialization_dir, CONFIG_NAME) if not os.path.exists(recovered_config_file): raise ConfigurationError("The serialization directory already exists but doesn't " "contain a config.json. You probably gave the wrong directory.") else: loaded_params = Params.from_file(recovered_config_file) # Check whether any of the training configuration differs from the configuration we are # resuming. If so, warn the user that training may fail. fail = False flat_params = params.as_flat_dict() flat_loaded = loaded_params.as_flat_dict() for key in flat_params.keys() - flat_loaded.keys(): logger.error(f"Key '{key}' found in training configuration but not in the serialization " f"directory we're recovering from.") fail = True for key in flat_loaded.keys() - flat_params.keys(): logger.error(f"Key '{key}' found in the serialization directory we're recovering from " f"but not in the training config.") fail = True for key in flat_params.keys(): if flat_params.get(key, None) != flat_loaded.get(key, None): logger.error(f"Value for '{key}' in training configuration does not match that the value in " f"the serialization directory we're recovering from: " f"{flat_params[key]} != {flat_loaded[key]}") fail = True if fail: raise ConfigurationError("Training configuration does not match the configuration we're " "recovering from.") else: if recover: raise ConfigurationError(f"--recover specified but serialization_dir ({serialization_dir}) " "does not exist. There is nothing to recover from.") os.makedirs(serialization_dir, exist_ok=True)
<SYSTEM_TASK:> Performs a forward pass using multiple GPUs. This is a simplification <END_TASK> <USER_TASK:> Description: def data_parallel(batch_group: List[TensorDict], model: Model, cuda_devices: List) -> Dict[str, torch.Tensor]: """ Performs a forward pass using multiple GPUs. This is a simplification of torch.nn.parallel.data_parallel to support the allennlp model interface. """
assert len(batch_group) <= len(cuda_devices) moved = [nn_util.move_to_device(batch, device) for batch, device in zip(batch_group, cuda_devices)] used_device_ids = cuda_devices[:len(moved)] # Counterintuitively, it appears replicate expects the source device id to be the first element # in the device id list. See torch.cuda.comm.broadcast_coalesced, which is called indirectly. replicas = replicate(model, used_device_ids) # We pass all our arguments as kwargs. Create a list of empty tuples of the # correct shape to serve as (non-existent) positional arguments. inputs = [()] * len(batch_group) outputs = parallel_apply(replicas, inputs, moved, used_device_ids) # Only the 'loss' is needed. # a (num_gpu, ) tensor with loss on each GPU losses = gather([output['loss'].unsqueeze(0) for output in outputs], used_device_ids[0], 0) return {'loss': losses.mean()}
<SYSTEM_TASK:> Performs gradient rescaling. Is a no-op if gradient rescaling is not enabled. <END_TASK> <USER_TASK:> Description: def rescale_gradients(model: Model, grad_norm: Optional[float] = None) -> Optional[float]: """ Performs gradient rescaling. Is a no-op if gradient rescaling is not enabled. """
if grad_norm: parameters_to_clip = [p for p in model.parameters() if p.grad is not None] return sparse_clip_norm(parameters_to_clip, grad_norm) return None
<SYSTEM_TASK:> Gets the metrics but sets ``"loss"`` to <END_TASK> <USER_TASK:> Description: def get_metrics(model: Model, total_loss: float, num_batches: int, reset: bool = False) -> Dict[str, float]: """ Gets the metrics but sets ``"loss"`` to the total loss divided by the ``num_batches`` so that the ``"loss"`` metric is "average loss per batch". """
metrics = model.get_metrics(reset=reset) metrics["loss"] = float(total_loss / num_batches) if num_batches > 0 else 0.0 return metrics
<SYSTEM_TASK:> Parse all dependencies out of the requirements.txt file. <END_TASK> <USER_TASK:> Description: def parse_requirements() -> Tuple[PackagesType, PackagesType, Set[str]]: """Parse all dependencies out of the requirements.txt file."""
essential_packages: PackagesType = {} other_packages: PackagesType = {} duplicates: Set[str] = set() with open("requirements.txt", "r") as req_file: section: str = "" for line in req_file: line = line.strip() if line.startswith("####"): # Line is a section name. section = parse_section_name(line) continue if not line or line.startswith("#"): # Line is empty or just regular comment. continue module, version = parse_package(line) if module in essential_packages or module in other_packages: duplicates.add(module) if section.startswith("ESSENTIAL"): essential_packages[module] = version else: other_packages[module] = version return essential_packages, other_packages, duplicates
<SYSTEM_TASK:> Parse all dependencies out of the setup.py script. <END_TASK> <USER_TASK:> Description: def parse_setup() -> Tuple[PackagesType, PackagesType, Set[str], Set[str]]: """Parse all dependencies out of the setup.py script."""
essential_packages: PackagesType = {} test_packages: PackagesType = {} essential_duplicates: Set[str] = set() test_duplicates: Set[str] = set() with open('setup.py') as setup_file: contents = setup_file.read() # Parse out essential packages. package_string = re.search(r"""install_requires=\[[\s\n]*['"](.*?)['"],?[\s\n]*\]""", contents, re.DOTALL).groups()[0].strip() for package in re.split(r"""['"],[\s\n]+['"]""", package_string): module, version = parse_package(package) if module in essential_packages: essential_duplicates.add(module) else: essential_packages[module] = version # Parse packages only needed for testing. package_string = re.search(r"""tests_require=\[[\s\n]*['"](.*?)['"],?[\s\n]*\]""", contents, re.DOTALL).groups()[0].strip() for package in re.split(r"""['"],[\s\n]+['"]""", package_string): module, version = parse_package(package) if module in test_packages: test_duplicates.add(module) else: test_packages[module] = version return essential_packages, test_packages, essential_duplicates, test_duplicates
<SYSTEM_TASK:> Given a sentence, return all token spans within the sentence. Spans are `inclusive`. <END_TASK> <USER_TASK:> Description: def enumerate_spans(sentence: List[T], offset: int = 0, max_span_width: int = None, min_span_width: int = 1, filter_function: Callable[[List[T]], bool] = None) -> List[Tuple[int, int]]: """ Given a sentence, return all token spans within the sentence. Spans are `inclusive`. Additionally, you can provide a maximum and minimum span width, which will be used to exclude spans outside of this range. Finally, you can provide a function mapping ``List[T] -> bool``, which will be applied to every span to decide whether that span should be included. This allows filtering by length, regex matches, pos tags or any Spacy ``Token`` attributes, for example. Parameters ---------- sentence : ``List[T]``, required. The sentence to generate spans for. The type is generic, as this function can be used with strings, or Spacy ``Tokens`` or other sequences. offset : ``int``, optional (default = 0) A numeric offset to add to all span start and end indices. This is helpful if the sentence is part of a larger structure, such as a document, which the indices need to respect. max_span_width : ``int``, optional (default = None) The maximum length of spans which should be included. Defaults to len(sentence). min_span_width : ``int``, optional (default = 1) The minimum length of spans which should be included. Defaults to 1. filter_function : ``Callable[[List[T]], bool]``, optional (default = None) A function mapping sequences of the passed type T to a boolean value. If ``True``, the span is included in the returned spans from the sentence, otherwise it is excluded.. """
max_span_width = max_span_width or len(sentence) filter_function = filter_function or (lambda x: True) spans: List[Tuple[int, int]] = [] for start_index in range(len(sentence)): last_end_index = min(start_index + max_span_width, len(sentence)) first_end_index = min(start_index + min_span_width - 1, len(sentence)) for end_index in range(first_end_index, last_end_index): start = offset + start_index end = offset + end_index # add 1 to end index because span indices are inclusive. if filter_function(sentence[slice(start_index, end_index + 1)]): spans.append((start, end)) return spans
<SYSTEM_TASK:> Given a tag sequence encoded with IOB1 labels, recode to BIOUL. <END_TASK> <USER_TASK:> Description: def to_bioul(tag_sequence: List[str], encoding: str = "IOB1") -> List[str]: """ Given a tag sequence encoded with IOB1 labels, recode to BIOUL. In the IOB1 scheme, I is a token inside a span, O is a token outside a span and B is the beginning of span immediately following another span of the same type. In the BIO scheme, I is a token inside a span, O is a token outside a span and B is the beginning of a span. Parameters ---------- tag_sequence : ``List[str]``, required. The tag sequence encoded in IOB1, e.g. ["I-PER", "I-PER", "O"]. encoding : `str`, optional, (default = ``IOB1``). The encoding type to convert from. Must be either "IOB1" or "BIO". Returns ------- bioul_sequence: ``List[str]`` The tag sequence encoded in IOB1, e.g. ["B-PER", "L-PER", "O"]. """
if not encoding in {"IOB1", "BIO"}: raise ConfigurationError(f"Invalid encoding {encoding} passed to 'to_bioul'.") # pylint: disable=len-as-condition def replace_label(full_label, new_label): # example: full_label = 'I-PER', new_label = 'U', returns 'U-PER' parts = list(full_label.partition('-')) parts[0] = new_label return ''.join(parts) def pop_replace_append(in_stack, out_stack, new_label): # pop the last element from in_stack, replace the label, append # to out_stack tag = in_stack.pop() new_tag = replace_label(tag, new_label) out_stack.append(new_tag) def process_stack(stack, out_stack): # process a stack of labels, add them to out_stack if len(stack) == 1: # just a U token pop_replace_append(stack, out_stack, 'U') else: # need to code as BIL recoded_stack = [] pop_replace_append(stack, recoded_stack, 'L') while len(stack) >= 2: pop_replace_append(stack, recoded_stack, 'I') pop_replace_append(stack, recoded_stack, 'B') recoded_stack.reverse() out_stack.extend(recoded_stack) # Process the tag_sequence one tag at a time, adding spans to a stack, # then recode them. bioul_sequence = [] stack: List[str] = [] for label in tag_sequence: # need to make a dict like # token = {'token': 'Matt', "labels": {'conll2003': "B-PER"} # 'gold': 'I-PER'} # where 'gold' is the raw value from the CoNLL data set if label == 'O' and len(stack) == 0: bioul_sequence.append(label) elif label == 'O' and len(stack) > 0: # need to process the entries on the stack plus this one process_stack(stack, bioul_sequence) bioul_sequence.append(label) elif label[0] == 'I': # check if the previous type is the same as this one # if it is then append to stack # otherwise this start a new entity if the type # is different if len(stack) == 0: if encoding == "BIO": raise InvalidTagSequence(tag_sequence) stack.append(label) else: # check if the previous type is the same as this one this_type = label.partition('-')[2] prev_type = stack[-1].partition('-')[2] if this_type == prev_type: stack.append(label) else: if encoding == "BIO": raise InvalidTagSequence(tag_sequence) # a new entity process_stack(stack, bioul_sequence) stack.append(label) elif label[0] == 'B': if len(stack) > 0: process_stack(stack, bioul_sequence) stack.append(label) else: raise InvalidTagSequence(tag_sequence) # process the stack if len(stack) > 0: process_stack(stack, bioul_sequence) return bioul_sequence
<SYSTEM_TASK:> Check if a file in this repository exists. <END_TASK> <USER_TASK:> Description: def path_ok(match_tuple: MatchTuple) -> bool: """Check if a file in this repository exists."""
relative_path = match_tuple.link.split("#")[0] full_path = os.path.join(os.path.dirname(str(match_tuple.source)), relative_path) return os.path.exists(full_path)
<SYSTEM_TASK:> Wraps `os.environ` to filter out non-encodable values. <END_TASK> <USER_TASK:> Description: def _environment_variables() -> Dict[str, str]: """ Wraps `os.environ` to filter out non-encodable values. """
return {key: value for key, value in os.environ.items() if _is_encodable(value)}
<SYSTEM_TASK:> Deep merge two dicts, preferring values from `preferred`. <END_TASK> <USER_TASK:> Description: def with_fallback(preferred: Dict[str, Any], fallback: Dict[str, Any]) -> Dict[str, Any]: """ Deep merge two dicts, preferring values from `preferred`. """
def merge(preferred_value: Any, fallback_value: Any) -> Any: if isinstance(preferred_value, dict) and isinstance(fallback_value, dict): return with_fallback(preferred_value, fallback_value) elif isinstance(preferred_value, dict) and isinstance(fallback_value, list): # treat preferred_value as a sparse list, where each key is an index to be overridden merged_list = fallback_value for elem_key, preferred_element in preferred_value.items(): try: index = int(elem_key) merged_list[index] = merge(preferred_element, fallback_value[index]) except ValueError: raise ConfigurationError("could not merge dicts - the preferred dict contains " f"invalid keys (key {elem_key} is not a valid list index)") except IndexError: raise ConfigurationError("could not merge dicts - the preferred dict contains " f"invalid keys (key {index} is out of bounds)") return merged_list else: return copy.deepcopy(preferred_value) preferred_keys = set(preferred.keys()) fallback_keys = set(fallback.keys()) common_keys = preferred_keys & fallback_keys merged: Dict[str, Any] = {} for key in preferred_keys - fallback_keys: merged[key] = copy.deepcopy(preferred[key]) for key in fallback_keys - preferred_keys: merged[key] = copy.deepcopy(fallback[key]) for key in common_keys: preferred_value = preferred[key] fallback_value = fallback[key] merged[key] = merge(preferred_value, fallback_value) return merged
<SYSTEM_TASK:> Any class in its ``from_params`` method can request that some of its <END_TASK> <USER_TASK:> Description: def add_file_to_archive(self, name: str) -> None: """ Any class in its ``from_params`` method can request that some of its input files be added to the archive by calling this method. For example, if some class ``A`` had an ``input_file`` parameter, it could call ``` params.add_file_to_archive("input_file") ``` which would store the supplied value for ``input_file`` at the key ``previous.history.and.then.input_file``. The ``files_to_archive`` dict is shared with child instances via the ``_check_is_dict`` method, so that the final mapping can be retrieved from the top-level ``Params`` object. NOTE: You must call ``add_file_to_archive`` before you ``pop()`` the parameter, because the ``Params`` instance looks up the value of the filename inside itself. If the ``loading_from_archive`` flag is True, this will be a no-op. """
if not self.loading_from_archive: self.files_to_archive[f"{self.history}{name}"] = cached_path(self.get(name))
<SYSTEM_TASK:> Performs a pop and coerces to an int. <END_TASK> <USER_TASK:> Description: def pop_int(self, key: str, default: Any = DEFAULT) -> int: """ Performs a pop and coerces to an int. """
value = self.pop(key, default) if value is None: return None else: return int(value)
<SYSTEM_TASK:> Performs a pop and coerces to a float. <END_TASK> <USER_TASK:> Description: def pop_float(self, key: str, default: Any = DEFAULT) -> float: """ Performs a pop and coerces to a float. """
value = self.pop(key, default) if value is None: return None else: return float(value)
<SYSTEM_TASK:> Performs a pop and coerces to a bool. <END_TASK> <USER_TASK:> Description: def pop_bool(self, key: str, default: Any = DEFAULT) -> bool: """ Performs a pop and coerces to a bool. """
value = self.pop(key, default) if value is None: return None elif isinstance(value, bool): return value elif value == "true": return True elif value == "false": return False else: raise ValueError("Cannot convert variable to bool: " + value)
<SYSTEM_TASK:> Gets the value of ``key`` in the ``params`` dictionary, ensuring that the value is one of <END_TASK> <USER_TASK:> Description: def pop_choice(self, key: str, choices: List[Any], default_to_first_choice: bool = False) -> Any: """ Gets the value of ``key`` in the ``params`` dictionary, ensuring that the value is one of the given choices. Note that this `pops` the key from params, modifying the dictionary, consistent with how parameters are processed in this codebase. Parameters ---------- key: str Key to get the value from in the param dictionary choices: List[Any] A list of valid options for values corresponding to ``key``. For example, if you're specifying the type of encoder to use for some part of your model, the choices might be the list of encoder classes we know about and can instantiate. If the value we find in the param dictionary is not in ``choices``, we raise a ``ConfigurationError``, because the user specified an invalid value in their parameter file. default_to_first_choice: bool, optional (default=False) If this is ``True``, we allow the ``key`` to not be present in the parameter dictionary. If the key is not present, we will use the return as the value the first choice in the ``choices`` list. If this is ``False``, we raise a ``ConfigurationError``, because specifying the ``key`` is required (e.g., you `have` to specify your model class when running an experiment, but you can feel free to use default settings for encoders if you want). """
default = choices[0] if default_to_first_choice else self.DEFAULT value = self.pop(key, default) if value not in choices: key_str = self.history + key message = '%s not in acceptable choices for %s: %s' % (value, key_str, str(choices)) raise ConfigurationError(message) return value
<SYSTEM_TASK:> Sometimes we need to just represent the parameters as a dict, for instance when we pass <END_TASK> <USER_TASK:> Description: def as_dict(self, quiet: bool = False, infer_type_and_cast: bool = False): """ Sometimes we need to just represent the parameters as a dict, for instance when we pass them to PyTorch code. Parameters ---------- quiet: bool, optional (default = False) Whether to log the parameters before returning them as a dict. infer_type_and_cast : bool, optional (default = False) If True, we infer types and cast (e.g. things that look like floats to floats). """
if infer_type_and_cast: params_as_dict = infer_and_cast(self.params) else: params_as_dict = self.params if quiet: return params_as_dict def log_recursively(parameters, history): for key, value in parameters.items(): if isinstance(value, dict): new_local_history = history + key + "." log_recursively(value, new_local_history) else: logger.info(history + key + " = " + str(value)) logger.info("Converting Params object to dict; logging of default " "values will not occur when dictionary parameters are " "used subsequently.") logger.info("CURRENTLY DEFINED PARAMETERS: ") log_recursively(self.params, self.history) return params_as_dict
<SYSTEM_TASK:> Returns the parameters of a flat dictionary from keys to values. <END_TASK> <USER_TASK:> Description: def as_flat_dict(self): """ Returns the parameters of a flat dictionary from keys to values. Nested structure is collapsed with periods. """
flat_params = {} def recurse(parameters, path): for key, value in parameters.items(): newpath = path + [key] if isinstance(value, dict): recurse(value, newpath) else: flat_params['.'.join(newpath)] = value recurse(self.params, []) return flat_params
<SYSTEM_TASK:> Load a `Params` object from a configuration file. <END_TASK> <USER_TASK:> Description: def from_file(params_file: str, params_overrides: str = "", ext_vars: dict = None) -> 'Params': """ Load a `Params` object from a configuration file. Parameters ---------- params_file : ``str`` The path to the configuration file to load. params_overrides : ``str``, optional A dict of overrides that can be applied to final object. e.g. {"model.embedding_dim": 10} ext_vars : ``dict``, optional Our config files are Jsonnet, which allows specifying external variables for later substitution. Typically we substitute these using environment variables; however, you can also specify them here, in which case they take priority over environment variables. e.g. {"HOME_DIR": "/Users/allennlp/home"} """
if ext_vars is None: ext_vars = {} # redirect to cache, if necessary params_file = cached_path(params_file) ext_vars = {**_environment_variables(), **ext_vars} file_dict = json.loads(evaluate_file(params_file, ext_vars=ext_vars)) overrides_dict = parse_overrides(params_overrides) param_dict = with_fallback(preferred=overrides_dict, fallback=file_dict) return Params(param_dict)
<SYSTEM_TASK:> Returns Ordered Dict of Params from list of partial order preferences. <END_TASK> <USER_TASK:> Description: def as_ordered_dict(self, preference_orders: List[List[str]] = None) -> OrderedDict: """ Returns Ordered Dict of Params from list of partial order preferences. Parameters ---------- preference_orders: List[List[str]], optional ``preference_orders`` is list of partial preference orders. ["A", "B", "C"] means "A" > "B" > "C". For multiple preference_orders first will be considered first. Keys not found, will have last but alphabetical preference. Default Preferences: ``[["dataset_reader", "iterator", "model", "train_data_path", "validation_data_path", "test_data_path", "trainer", "vocabulary"], ["type"]]`` """
params_dict = self.as_dict(quiet=True) if not preference_orders: preference_orders = [] preference_orders.append(["dataset_reader", "iterator", "model", "train_data_path", "validation_data_path", "test_data_path", "trainer", "vocabulary"]) preference_orders.append(["type"]) def order_func(key): # Makes a tuple to use for ordering. The tuple is an index into each of the `preference_orders`, # followed by the key itself. This gives us integer sorting if you have a key in one of the # `preference_orders`, followed by alphabetical ordering if not. order_tuple = [order.index(key) if key in order else len(order) for order in preference_orders] return order_tuple + [key] def order_dict(dictionary, order_func): # Recursively orders dictionary according to scoring order_func result = OrderedDict() for key, val in sorted(dictionary.items(), key=lambda item: order_func(item[0])): result[key] = order_dict(val, order_func) if isinstance(val, dict) else val return result return order_dict(params_dict, order_func)
<SYSTEM_TASK:> Clears out the tracked metrics, but keeps the patience and should_decrease settings. <END_TASK> <USER_TASK:> Description: def clear(self) -> None: """ Clears out the tracked metrics, but keeps the patience and should_decrease settings. """
self._best_so_far = None self._epochs_with_no_improvement = 0 self._is_best_so_far = True self._epoch_number = 0 self.best_epoch = None
<SYSTEM_TASK:> A ``Trainer`` can use this to serialize the state of the metric tracker. <END_TASK> <USER_TASK:> Description: def state_dict(self) -> Dict[str, Any]: """ A ``Trainer`` can use this to serialize the state of the metric tracker. """
return { "best_so_far": self._best_so_far, "patience": self._patience, "epochs_with_no_improvement": self._epochs_with_no_improvement, "is_best_so_far": self._is_best_so_far, "should_decrease": self._should_decrease, "best_epoch_metrics": self.best_epoch_metrics, "epoch_number": self._epoch_number, "best_epoch": self.best_epoch }
<SYSTEM_TASK:> Record a new value of the metric and update the various things that depend on it. <END_TASK> <USER_TASK:> Description: def add_metric(self, metric: float) -> None: """ Record a new value of the metric and update the various things that depend on it. """
new_best = ((self._best_so_far is None) or (self._should_decrease and metric < self._best_so_far) or (not self._should_decrease and metric > self._best_so_far)) if new_best: self.best_epoch = self._epoch_number self._is_best_so_far = True self._best_so_far = metric self._epochs_with_no_improvement = 0 else: self._is_best_so_far = False self._epochs_with_no_improvement += 1 self._epoch_number += 1
<SYSTEM_TASK:> Helper to add multiple metrics at once. <END_TASK> <USER_TASK:> Description: def add_metrics(self, metrics: Iterable[float]) -> None: """ Helper to add multiple metrics at once. """
for metric in metrics: self.add_metric(metric)
<SYSTEM_TASK:> Returns true if improvement has stopped for long enough. <END_TASK> <USER_TASK:> Description: def should_stop_early(self) -> bool: """ Returns true if improvement has stopped for long enough. """
if self._patience is None: return False else: return self._epochs_with_no_improvement >= self._patience
<SYSTEM_TASK:> Archive the model weights, its training configuration, and its <END_TASK> <USER_TASK:> Description: def archive_model(serialization_dir: str, weights: str = _DEFAULT_WEIGHTS, files_to_archive: Dict[str, str] = None, archive_path: str = None) -> None: """ Archive the model weights, its training configuration, and its vocabulary to `model.tar.gz`. Include the additional ``files_to_archive`` if provided. Parameters ---------- serialization_dir: ``str`` The directory where the weights and vocabulary are written out. weights: ``str``, optional (default=_DEFAULT_WEIGHTS) Which weights file to include in the archive. The default is ``best.th``. files_to_archive: ``Dict[str, str]``, optional (default=None) A mapping {flattened_key -> filename} of supplementary files to include in the archive. That is, if you wanted to include ``params['model']['weights']`` then you would specify the key as `"model.weights"`. archive_path : ``str``, optional, (default = None) A full path to serialize the model to. The default is "model.tar.gz" inside the serialization_dir. If you pass a directory here, we'll serialize the model to "model.tar.gz" inside the directory. """
weights_file = os.path.join(serialization_dir, weights) if not os.path.exists(weights_file): logger.error("weights file %s does not exist, unable to archive model", weights_file) return config_file = os.path.join(serialization_dir, CONFIG_NAME) if not os.path.exists(config_file): logger.error("config file %s does not exist, unable to archive model", config_file) # If there are files we want to archive, write out the mapping # so that we can use it during de-archiving. if files_to_archive: fta_filename = os.path.join(serialization_dir, _FTA_NAME) with open(fta_filename, 'w') as fta_file: fta_file.write(json.dumps(files_to_archive)) if archive_path is not None: archive_file = archive_path if os.path.isdir(archive_file): archive_file = os.path.join(archive_file, "model.tar.gz") else: archive_file = os.path.join(serialization_dir, "model.tar.gz") logger.info("archiving weights and vocabulary to %s", archive_file) with tarfile.open(archive_file, 'w:gz') as archive: archive.add(config_file, arcname=CONFIG_NAME) archive.add(weights_file, arcname=_WEIGHTS_NAME) archive.add(os.path.join(serialization_dir, "vocabulary"), arcname="vocabulary") # If there are supplemental files to archive: if files_to_archive: # Archive the { flattened_key -> original_filename } mapping. archive.add(fta_filename, arcname=_FTA_NAME) # And add each requested file to the archive. for key, filename in files_to_archive.items(): archive.add(filename, arcname=f"fta/{key}")
<SYSTEM_TASK:> Instantiates an Archive from an archived `tar.gz` file. <END_TASK> <USER_TASK:> Description: def load_archive(archive_file: str, cuda_device: int = -1, overrides: str = "", weights_file: str = None) -> Archive: """ Instantiates an Archive from an archived `tar.gz` file. Parameters ---------- archive_file: ``str`` The archive file to load the model from. weights_file: ``str``, optional (default = None) The weights file to use. If unspecified, weights.th in the archive_file will be used. cuda_device: ``int``, optional (default = -1) If `cuda_device` is >= 0, the model will be loaded onto the corresponding GPU. Otherwise it will be loaded onto the CPU. overrides: ``str``, optional (default = "") JSON overrides to apply to the unarchived ``Params`` object. """
# redirect to the cache, if necessary resolved_archive_file = cached_path(archive_file) if resolved_archive_file == archive_file: logger.info(f"loading archive file {archive_file}") else: logger.info(f"loading archive file {archive_file} from cache at {resolved_archive_file}") if os.path.isdir(resolved_archive_file): serialization_dir = resolved_archive_file else: # Extract archive to temp dir tempdir = tempfile.mkdtemp() logger.info(f"extracting archive file {resolved_archive_file} to temp dir {tempdir}") with tarfile.open(resolved_archive_file, 'r:gz') as archive: archive.extractall(tempdir) # Postpone cleanup until exit in case the unarchived contents are needed outside # this function. atexit.register(_cleanup_archive_dir, tempdir) serialization_dir = tempdir # Check for supplemental files in archive fta_filename = os.path.join(serialization_dir, _FTA_NAME) if os.path.exists(fta_filename): with open(fta_filename, 'r') as fta_file: files_to_archive = json.loads(fta_file.read()) # Add these replacements to overrides replacements_dict: Dict[str, Any] = {} for key, original_filename in files_to_archive.items(): replacement_filename = os.path.join(serialization_dir, f"fta/{key}") if os.path.exists(replacement_filename): replacements_dict[key] = replacement_filename else: logger.warning(f"Archived file {replacement_filename} not found! At train time " f"this file was located at {original_filename}. This may be " "because you are loading a serialization directory. Attempting to " "load the file from its train-time location.") overrides_dict = parse_overrides(overrides) combined_dict = with_fallback(preferred=overrides_dict, fallback=unflatten(replacements_dict)) overrides = json.dumps(combined_dict) # Load config config = Params.from_file(os.path.join(serialization_dir, CONFIG_NAME), overrides) config.loading_from_archive = True if weights_file: weights_path = weights_file else: weights_path = os.path.join(serialization_dir, _WEIGHTS_NAME) # Fallback for serialization directories. if not os.path.exists(weights_path): weights_path = os.path.join(serialization_dir, _DEFAULT_WEIGHTS) # Instantiate model. Use a duplicate of the config, as it will get consumed. model = Model.load(config.duplicate(), weights_file=weights_path, serialization_dir=serialization_dir, cuda_device=cuda_device) return Archive(model=model, config=config)
<SYSTEM_TASK:> This method can be used to load a module from the pretrained model archive. <END_TASK> <USER_TASK:> Description: def extract_module(self, path: str, freeze: bool = True) -> Module: """ This method can be used to load a module from the pretrained model archive. It is also used implicitly in FromParams based construction. So instead of using standard params to construct a module, you can instead load a pretrained module from the model archive directly. For eg, instead of using params like {"type": "module_type", ...}, you can use the following template:: { "_pretrained": { "archive_file": "../path/to/model.tar.gz", "path": "path.to.module.in.model", "freeze": False } } If you use this feature with FromParams, take care of the following caveat: Call to initializer(self) at end of model initializer can potentially wipe the transferred parameters by reinitializing them. This can happen if you have setup initializer regex that also matches parameters of the transferred module. To safe-guard against this, you can either update your initializer regex to prevent conflicting match or add extra initializer:: [ [".*transferred_module_name.*", "prevent"]] ] Parameters ---------- path : ``str``, required Path of target module to be loaded from the model. Eg. "_textfield_embedder.token_embedder_tokens" freeze : ``bool``, optional (default=True) Whether to freeze the module parameters or not. """
modules_dict = {path: module for path, module in self.model.named_modules()} module = modules_dict.get(path, None) if not module: raise ConfigurationError(f"You asked to transfer module at path {path} from " f"the model {type(self.model)}. But it's not present.") if not isinstance(module, Module): raise ConfigurationError(f"The transferred object from model {type(self.model)} at path " f"{path} is not a PyTorch Module.") for parameter in module.parameters(): # type: ignore parameter.requires_grad_(not freeze) return module
<SYSTEM_TASK:> Takes a list of possible actions and indices of decoded actions into those possible actions <END_TASK> <USER_TASK:> Description: def _get_action_strings(cls, possible_actions: List[List[ProductionRule]], action_indices: Dict[int, List[List[int]]]) -> List[List[List[str]]]: """ Takes a list of possible actions and indices of decoded actions into those possible actions for a batch and returns sequences of action strings. We assume ``action_indices`` is a dict mapping batch indices to k-best decoded sequence lists. """
all_action_strings: List[List[List[str]]] = [] batch_size = len(possible_actions) for i in range(batch_size): batch_actions = possible_actions[i] batch_best_sequences = action_indices[i] if i in action_indices else [] # This will append an empty list to ``all_action_strings`` if ``batch_best_sequences`` # is empty. action_strings = [[batch_actions[rule_id][0] for rule_id in sequence] for sequence in batch_best_sequences] all_action_strings.append(action_strings) return all_action_strings
<SYSTEM_TASK:> This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test <END_TASK> <USER_TASK:> Description: def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: """ This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test time, to finalize predictions. We only transform the action string sequences into logical forms here. """
best_action_strings = output_dict["best_action_strings"] # Instantiating an empty world for getting logical forms. world = NlvrLanguage(set()) logical_forms = [] for instance_action_sequences in best_action_strings: instance_logical_forms = [] for action_strings in instance_action_sequences: if action_strings: instance_logical_forms.append(world.action_sequence_to_logical_form(action_strings)) else: instance_logical_forms.append('') logical_forms.append(instance_logical_forms) action_mapping = output_dict['action_mapping'] best_actions = output_dict['best_action_strings'] debug_infos = output_dict['debug_info'] batch_action_info = [] for batch_index, (predicted_actions, debug_info) in enumerate(zip(best_actions, debug_infos)): instance_action_info = [] for predicted_action, action_debug_info in zip(predicted_actions[0], debug_info): action_info = {} action_info['predicted_action'] = predicted_action considered_actions = action_debug_info['considered_actions'] probabilities = action_debug_info['probabilities'] actions = [] for action, probability in zip(considered_actions, probabilities): if action != -1: actions.append((action_mapping[(batch_index, action)], probability)) actions.sort() considered_actions, probabilities = zip(*actions) action_info['considered_actions'] = considered_actions action_info['action_probabilities'] = probabilities action_info['question_attention'] = action_debug_info.get('question_attention', []) instance_action_info.append(action_info) batch_action_info.append(instance_action_info) output_dict["predicted_actions"] = batch_action_info output_dict["logical_form"] = logical_forms return output_dict
<SYSTEM_TASK:> Returns whether action history in the state evaluates to the correct denotations over all <END_TASK> <USER_TASK:> Description: def _check_state_denotations(self, state: GrammarBasedState, worlds: List[NlvrLanguage]) -> List[bool]: """ Returns whether action history in the state evaluates to the correct denotations over all worlds. Only defined when the state is finished. """
assert state.is_finished(), "Cannot compute denotations for unfinished states!" # Since this is a finished state, its group size must be 1. batch_index = state.batch_indices[0] instance_label_strings = state.extras[batch_index] history = state.action_history[0] all_actions = state.possible_actions[0] action_sequence = [all_actions[action][0] for action in history] return self._check_denotation(action_sequence, instance_label_strings, worlds)
<SYSTEM_TASK:> Start learning rate finder for given args <END_TASK> <USER_TASK:> Description: def find_learning_rate_from_args(args: argparse.Namespace) -> None: """ Start learning rate finder for given args """
params = Params.from_file(args.param_path, args.overrides) find_learning_rate_model(params, args.serialization_dir, start_lr=args.start_lr, end_lr=args.end_lr, num_batches=args.num_batches, linear_steps=args.linear, stopping_factor=args.stopping_factor, force=args.force)
<SYSTEM_TASK:> Runs learning rate search for given `num_batches` and saves the results in ``serialization_dir`` <END_TASK> <USER_TASK:> Description: def find_learning_rate_model(params: Params, serialization_dir: str, start_lr: float = 1e-5, end_lr: float = 10, num_batches: int = 100, linear_steps: bool = False, stopping_factor: float = None, force: bool = False) -> None: """ Runs learning rate search for given `num_batches` and saves the results in ``serialization_dir`` Parameters ---------- params : ``Params`` A parameter object specifying an AllenNLP Experiment. serialization_dir : ``str`` The directory in which to save results. start_lr: ``float`` Learning rate to start the search. end_lr: ``float`` Learning rate upto which search is done. num_batches: ``int`` Number of mini-batches to run Learning rate finder. linear_steps: ``bool`` Increase learning rate linearly if False exponentially. stopping_factor: ``float`` Stop the search when the current loss exceeds the best loss recorded by multiple of stopping factor. If ``None`` search proceeds till the ``end_lr`` force: ``bool`` If True and the serialization directory already exists, everything in it will be removed prior to finding the learning rate. """
if os.path.exists(serialization_dir) and force: shutil.rmtree(serialization_dir) if os.path.exists(serialization_dir) and os.listdir(serialization_dir): raise ConfigurationError(f'Serialization directory {serialization_dir} already exists and is ' f'not empty.') else: os.makedirs(serialization_dir, exist_ok=True) prepare_environment(params) cuda_device = params.params.get('trainer').get('cuda_device', -1) check_for_gpu(cuda_device) all_datasets = datasets_from_params(params) datasets_for_vocab_creation = set(params.pop("datasets_for_vocab_creation", all_datasets)) for dataset in datasets_for_vocab_creation: if dataset not in all_datasets: raise ConfigurationError(f"invalid 'dataset_for_vocab_creation' {dataset}") logger.info("From dataset instances, %s will be considered for vocabulary creation.", ", ".join(datasets_for_vocab_creation)) vocab = Vocabulary.from_params( params.pop("vocabulary", {}), (instance for key, dataset in all_datasets.items() for instance in dataset if key in datasets_for_vocab_creation) ) model = Model.from_params(vocab=vocab, params=params.pop('model')) iterator = DataIterator.from_params(params.pop("iterator")) iterator.index_with(vocab) train_data = all_datasets['train'] trainer_params = params.pop("trainer") no_grad_regexes = trainer_params.pop("no_grad", ()) for name, parameter in model.named_parameters(): if any(re.search(regex, name) for regex in no_grad_regexes): parameter.requires_grad_(False) trainer_choice = trainer_params.pop("type", "default") if trainer_choice != "default": raise ConfigurationError("currently find-learning-rate only works with the default Trainer") trainer = Trainer.from_params(model=model, serialization_dir=serialization_dir, iterator=iterator, train_data=train_data, validation_data=None, params=trainer_params, validation_iterator=None) logger.info(f'Starting learning rate search from {start_lr} to {end_lr} in {num_batches} iterations.') learning_rates, losses = search_learning_rate(trainer, start_lr=start_lr, end_lr=end_lr, num_batches=num_batches, linear_steps=linear_steps, stopping_factor=stopping_factor) logger.info(f'Finished learning rate search.') losses = _smooth(losses, 0.98) _save_plot(learning_rates, losses, os.path.join(serialization_dir, 'lr-losses.png'))
<SYSTEM_TASK:> Compute a weighted average of the ``tensors``. The input tensors an be any shape <END_TASK> <USER_TASK:> Description: def forward(self, tensors: List[torch.Tensor], # pylint: disable=arguments-differ mask: torch.Tensor = None) -> torch.Tensor: """ Compute a weighted average of the ``tensors``. The input tensors an be any shape with at least two dimensions, but must all be the same shape. When ``do_layer_norm=True``, the ``mask`` is required input. If the ``tensors`` are dimensioned ``(dim_0, ..., dim_{n-1}, dim_n)``, then the ``mask`` is dimensioned ``(dim_0, ..., dim_{n-1})``, as in the typical case with ``tensors`` of shape ``(batch_size, timesteps, dim)`` and ``mask`` of shape ``(batch_size, timesteps)``. When ``do_layer_norm=False`` the ``mask`` is ignored. """
if len(tensors) != self.mixture_size: raise ConfigurationError("{} tensors were passed, but the module was initialized to " "mix {} tensors.".format(len(tensors), self.mixture_size)) def _do_layer_norm(tensor, broadcast_mask, num_elements_not_masked): tensor_masked = tensor * broadcast_mask mean = torch.sum(tensor_masked) / num_elements_not_masked variance = torch.sum(((tensor_masked - mean) * broadcast_mask)**2) / num_elements_not_masked return (tensor - mean) / torch.sqrt(variance + 1E-12) normed_weights = torch.nn.functional.softmax(torch.cat([parameter for parameter in self.scalar_parameters]), dim=0) normed_weights = torch.split(normed_weights, split_size_or_sections=1) if not self.do_layer_norm: pieces = [] for weight, tensor in zip(normed_weights, tensors): pieces.append(weight * tensor) return self.gamma * sum(pieces) else: mask_float = mask.float() broadcast_mask = mask_float.unsqueeze(-1) input_dim = tensors[0].size(-1) num_elements_not_masked = torch.sum(mask_float) * input_dim pieces = [] for weight, tensor in zip(normed_weights, tensors): pieces.append(weight * _do_layer_norm(tensor, broadcast_mask, num_elements_not_masked)) return self.gamma * sum(pieces)
<SYSTEM_TASK:> Executes a logical form, using whatever predicates you have defined. <END_TASK> <USER_TASK:> Description: def execute(self, logical_form: str): """Executes a logical form, using whatever predicates you have defined."""
if not hasattr(self, '_functions'): raise RuntimeError("You must call super().__init__() in your Language constructor") logical_form = logical_form.replace(",", " ") expression = util.lisp_to_nested_expression(logical_form) return self._execute_expression(expression)
<SYSTEM_TASK:> Induces a grammar from the defined collection of predicates in this language and returns <END_TASK> <USER_TASK:> Description: def get_nonterminal_productions(self) -> Dict[str, List[str]]: """ Induces a grammar from the defined collection of predicates in this language and returns all productions in that grammar, keyed by the non-terminal they are expanding. This includes terminal productions implied by each predicate as well as productions for the `return type` of each defined predicate. For example, defining a "multiply" predicate adds a "<int,int:int> -> multiply" terminal production to the grammar, and `also` a "int -> [<int,int:int>, int, int]" non-terminal production, because I can use the "multiply" predicate to produce an int. """
if not self._nonterminal_productions: actions: Dict[str, Set[str]] = defaultdict(set) # If you didn't give us a set of valid start types, we'll assume all types we know # about (including functional types) are valid start types. if self._start_types: start_types = self._start_types else: start_types = set() for type_list in self._function_types.values(): start_types.update(type_list) for start_type in start_types: actions[START_SYMBOL].add(f"{START_SYMBOL} -> {start_type}") for name, function_type_list in self._function_types.items(): for function_type in function_type_list: actions[str(function_type)].add(f"{function_type} -> {name}") if isinstance(function_type, FunctionType): return_type = function_type.return_type arg_types = function_type.argument_types right_side = f"[{function_type}, {', '.join(str(arg_type) for arg_type in arg_types)}]" actions[str(return_type)].add(f"{return_type} -> {right_side}") self._nonterminal_productions = {key: sorted(value) for key, value in actions.items()} return self._nonterminal_productions
<SYSTEM_TASK:> Converts a logical form into a linearization of the production rules from its abstract <END_TASK> <USER_TASK:> Description: def logical_form_to_action_sequence(self, logical_form: str) -> List[str]: """ Converts a logical form into a linearization of the production rules from its abstract syntax tree. The linearization is top-down, depth-first. Each production rule is formatted as "LHS -> RHS", where "LHS" is a single non-terminal type, and RHS is either a terminal or a list of non-terminals (other possible values for RHS in a more general context-free grammar are not produced by our grammar induction logic). Non-terminals are `types` in the grammar, either basic types (like ``int``, ``str``, or some class that you define), or functional types, represented with angle brackets with a colon separating arguments from the return type. Multi-argument functions have commas separating their argument types. For example, ``<int:int>`` is a function that takes an integer and returns an integer, and ``<int,int:int>`` is a function that takes two integer arguments and returns an integer. As an example translation from logical form to complete action sequence, the logical form ``(add 2 3)`` would be translated to ``['@start@ -> int', 'int -> [<int,int:int>, int, int]', '<int,int:int> -> add', 'int -> 2', 'int -> 3']``. """
expression = util.lisp_to_nested_expression(logical_form) try: transitions, start_type = self._get_transitions(expression, expected_type=None) if self._start_types and start_type not in self._start_types: raise ParsingError(f"Expression had unallowed start type of {start_type}: {expression}") except ParsingError: logger.error(f'Error parsing logical form: {logical_form}') raise transitions.insert(0, f'@start@ -> {start_type}') return transitions
<SYSTEM_TASK:> Determines whether an input symbol is a valid non-terminal in the grammar. <END_TASK> <USER_TASK:> Description: def is_nonterminal(self, symbol: str) -> bool: """ Determines whether an input symbol is a valid non-terminal in the grammar. """
nonterminal_productions = self.get_nonterminal_productions() return symbol in nonterminal_productions
<SYSTEM_TASK:> This method pads a list of tokens to ``desired_num_tokens`` and returns a padded copy of the <END_TASK> <USER_TASK:> Description: def pad_token_sequence(self, tokens: Dict[str, List[TokenType]], desired_num_tokens: Dict[str, int], padding_lengths: Dict[str, int]) -> Dict[str, List[TokenType]]: """ This method pads a list of tokens to ``desired_num_tokens`` and returns a padded copy of the input tokens. If the input token list is longer than ``desired_num_tokens`` then it will be truncated. ``padding_lengths`` is used to provide supplemental padding parameters which are needed in some cases. For example, it contains the widths to pad characters to when doing character-level padding. """
raise NotImplementedError
<SYSTEM_TASK:> The CONLL 2012 data includes 2 annotated spans which are identical, <END_TASK> <USER_TASK:> Description: def canonicalize_clusters(clusters: DefaultDict[int, List[Tuple[int, int]]]) -> List[List[Tuple[int, int]]]: """ The CONLL 2012 data includes 2 annotated spans which are identical, but have different ids. This checks all clusters for spans which are identical, and if it finds any, merges the clusters containing the identical spans. """
merged_clusters: List[Set[Tuple[int, int]]] = [] for cluster in clusters.values(): cluster_with_overlapping_mention = None for mention in cluster: # Look at clusters we have already processed to # see if they contain a mention in the current # cluster for comparison. for cluster2 in merged_clusters: if mention in cluster2: # first cluster in merged clusters # which contains this mention. cluster_with_overlapping_mention = cluster2 break # Already encountered overlap - no need to keep looking. if cluster_with_overlapping_mention is not None: break if cluster_with_overlapping_mention is not None: # Merge cluster we are currently processing into # the cluster in the processed list. cluster_with_overlapping_mention.update(cluster) else: merged_clusters.append(set(cluster)) return [list(c) for c in merged_clusters]
<SYSTEM_TASK:> Return the word indices of a predicate in BIO tags. <END_TASK> <USER_TASK:> Description: def get_predicate_indices(tags: List[str]) -> List[int]: """ Return the word indices of a predicate in BIO tags. """
return [ind for ind, tag in enumerate(tags) if 'V' in tag]
<SYSTEM_TASK:> Get the predicate in this prediction. <END_TASK> <USER_TASK:> Description: def get_predicate_text(sent_tokens: List[Token], tags: List[str]) -> str: """ Get the predicate in this prediction. """
return " ".join([sent_tokens[pred_id].text for pred_id in get_predicate_indices(tags)])
<SYSTEM_TASK:> Tests whether the predicate in BIO tags1 overlap <END_TASK> <USER_TASK:> Description: def predicates_overlap(tags1: List[str], tags2: List[str]) -> bool: """ Tests whether the predicate in BIO tags1 overlap with those of tags2. """
# Get predicate word indices from both predictions pred_ind1 = get_predicate_indices(tags1) pred_ind2 = get_predicate_indices(tags2) # Return if pred_ind1 pred_ind2 overlap return any(set.intersection(set(pred_ind1), set(pred_ind2)))
<SYSTEM_TASK:> Generate a coherent tag, given previous tag and current label. <END_TASK> <USER_TASK:> Description: def get_coherent_next_tag(prev_label: str, cur_label: str) -> str: """ Generate a coherent tag, given previous tag and current label. """
if cur_label == "O": # Don't need to add prefix to an "O" label return "O" if prev_label == cur_label: return f"I-{cur_label}" else: return f"B-{cur_label}"
<SYSTEM_TASK:> Merge two predictions into one. Assumes the predicate in tags1 overlap with <END_TASK> <USER_TASK:> Description: def merge_overlapping_predictions(tags1: List[str], tags2: List[str]) -> List[str]: """ Merge two predictions into one. Assumes the predicate in tags1 overlap with the predicate of tags2. """
ret_sequence = [] prev_label = "O" # Build a coherent sequence out of two # spans which predicates' overlap for tag1, tag2 in zip(tags1, tags2): label1 = tag1.split("-")[-1] label2 = tag2.split("-")[-1] if (label1 == "V") or (label2 == "V"): # Construct maximal predicate length - # add predicate tag if any of the sequence predict it cur_label = "V" # Else - prefer an argument over 'O' label elif label1 != "O": cur_label = label1 else: cur_label = label2 # Append cur tag to the returned sequence cur_tag = get_coherent_next_tag(prev_label, cur_label) prev_label = cur_label ret_sequence.append(cur_tag) return ret_sequence
<SYSTEM_TASK:> Sanitize a BIO label - this deals with OIE <END_TASK> <USER_TASK:> Description: def sanitize_label(label: str) -> str: """ Sanitize a BIO label - this deals with OIE labels sometimes having some noise, as parentheses. """
if "-" in label: prefix, suffix = label.split("-") suffix = suffix.split("(")[-1] return f"{prefix}-{suffix}" else: return label
<SYSTEM_TASK:> Given a list of tokens, this method precomputes word representations <END_TASK> <USER_TASK:> Description: def create_cached_cnn_embeddings(self, tokens: List[str]) -> None: """ Given a list of tokens, this method precomputes word representations by running just the character convolutions and highway layers of elmo, essentially creating uncontextual word vectors. On subsequent forward passes, the word ids are looked up from an embedding, rather than being computed on the fly via the CNN encoder. This function sets 3 attributes: _word_embedding : ``torch.Tensor`` The word embedding for each word in the tokens passed to this method. _bos_embedding : ``torch.Tensor`` The embedding for the BOS token. _eos_embedding : ``torch.Tensor`` The embedding for the EOS token. Parameters ---------- tokens : ``List[str]``, required. A list of tokens to precompute character convolutions for. """
tokens = [ELMoCharacterMapper.bos_token, ELMoCharacterMapper.eos_token] + tokens timesteps = 32 batch_size = 32 chunked_tokens = lazy_groups_of(iter(tokens), timesteps) all_embeddings = [] device = get_device_of(next(self.parameters())) for batch in lazy_groups_of(chunked_tokens, batch_size): # Shape (batch_size, timesteps, 50) batched_tensor = batch_to_ids(batch) # NOTE: This device check is for when a user calls this method having # already placed the model on a device. If this is called in the # constructor, it will probably happen on the CPU. This isn't too bad, # because it's only a few convolutions and will likely be very fast. if device >= 0: batched_tensor = batched_tensor.cuda(device) output = self._token_embedder(batched_tensor) token_embedding = output["token_embedding"] mask = output["mask"] token_embedding, _ = remove_sentence_boundaries(token_embedding, mask) all_embeddings.append(token_embedding.view(-1, token_embedding.size(-1))) full_embedding = torch.cat(all_embeddings, 0) # We might have some trailing embeddings from padding in the batch, so # we clip the embedding and lookup to the right size. full_embedding = full_embedding[:len(tokens), :] embedding = full_embedding[2:len(tokens), :] vocab_size, embedding_dim = list(embedding.size()) from allennlp.modules.token_embedders import Embedding # type: ignore self._bos_embedding = full_embedding[0, :] self._eos_embedding = full_embedding[1, :] self._word_embedding = Embedding(vocab_size, # type: ignore embedding_dim, weight=embedding.data, trainable=self._requires_grad, padding_index=0)
<SYSTEM_TASK:> Performs a normalization that is very similar to that done by the normalization functions in <END_TASK> <USER_TASK:> Description: def normalize_text(text: str) -> str: """ Performs a normalization that is very similar to that done by the normalization functions in SQuAD and TriviaQA. This involves splitting and rejoining the text, and could be a somewhat expensive operation. """
return ' '.join([token for token in text.lower().strip(STRIPPED_CHARACTERS).split() if token not in IGNORED_TOKENS])
<SYSTEM_TASK:> Finds a list of token spans in ``passage_tokens`` that match the given ``answer_texts``. This <END_TASK> <USER_TASK:> Description: def find_valid_answer_spans(passage_tokens: List[Token], answer_texts: List[str]) -> List[Tuple[int, int]]: """ Finds a list of token spans in ``passage_tokens`` that match the given ``answer_texts``. This tries to find all spans that would evaluate to correct given the SQuAD and TriviaQA official evaluation scripts, which do some normalization of the input text. Note that this could return duplicate spans! The caller is expected to be able to handle possible duplicates (as already happens in the SQuAD dev set, for instance). """
normalized_tokens = [token.text.lower().strip(STRIPPED_CHARACTERS) for token in passage_tokens] # Because there could be many `answer_texts`, we'll do the most expensive pre-processing # step once. This gives us a map from tokens to the position in the passage they appear. word_positions: Dict[str, List[int]] = defaultdict(list) for i, token in enumerate(normalized_tokens): word_positions[token].append(i) spans = [] for answer_text in answer_texts: # For each answer, we'll first find all valid start positions in the passage. Then # we'll grow each span to the same length as the number of answer tokens, and see if we # have a match. We're a little tricky as we grow the span, skipping words that are # already pruned from the normalized answer text, and stopping early if we don't match. answer_tokens = answer_text.lower().strip(STRIPPED_CHARACTERS).split() num_answer_tokens = len(answer_tokens) for span_start in word_positions[answer_tokens[0]]: span_end = span_start # span_end is _inclusive_ answer_index = 1 while answer_index < num_answer_tokens and span_end + 1 < len(normalized_tokens): token = normalized_tokens[span_end + 1] if answer_tokens[answer_index] == token: answer_index += 1 span_end += 1 elif token in IGNORED_TOKENS: span_end += 1 else: break if num_answer_tokens == answer_index: spans.append((span_start, span_end)) return spans
<SYSTEM_TASK:> Process a list of reference answers. <END_TASK> <USER_TASK:> Description: def handle_cannot(reference_answers: List[str]): """ Process a list of reference answers. If equal or more than half of the reference answers are "CANNOTANSWER", take it as gold. Otherwise, return answers that are not "CANNOTANSWER". """
num_cannot = 0 num_spans = 0 for ref in reference_answers: if ref == 'CANNOTANSWER': num_cannot += 1 else: num_spans += 1 if num_cannot >= num_spans: reference_answers = ['CANNOTANSWER'] else: reference_answers = [x for x in reference_answers if x != 'CANNOTANSWER'] return reference_answers
<SYSTEM_TASK:> Spacy needs to do batch processing, or it can be really slow. This method lets you take <END_TASK> <USER_TASK:> Description: def batch_split_words(self, sentences: List[str]) -> List[List[Token]]: """ Spacy needs to do batch processing, or it can be really slow. This method lets you take advantage of that if you want. Default implementation is to just iterate of the sentences and call ``split_words``, but the ``SpacyWordSplitter`` will actually do batched processing. """
return [self.split_words(sentence) for sentence in sentences]
<SYSTEM_TASK:> Return a new BeamSearch instance that's like this one but with the specified constraint. <END_TASK> <USER_TASK:> Description: def constrained_to(self, initial_sequence: torch.Tensor, keep_beam_details: bool = True) -> 'BeamSearch': """ Return a new BeamSearch instance that's like this one but with the specified constraint. """
return BeamSearch(self._beam_size, self._per_node_beam_size, initial_sequence, keep_beam_details)
<SYSTEM_TASK:> Lower text and remove punctuation, articles and extra whitespace. <END_TASK> <USER_TASK:> Description: def _normalize_answer(text: str) -> str: """Lower text and remove punctuation, articles and extra whitespace."""
parts = [_white_space_fix(_remove_articles(_normalize_number(_remove_punc(_lower(token))))) for token in _tokenize(text)] parts = [part for part in parts if part.strip()] normalized = ' '.join(parts).strip() return normalized
<SYSTEM_TASK:> Takes gold and predicted answer sets and first finds a greedy 1-1 alignment <END_TASK> <USER_TASK:> Description: def _align_bags(predicted: List[Set[str]], gold: List[Set[str]]) -> List[float]: """ Takes gold and predicted answer sets and first finds a greedy 1-1 alignment between them and gets maximum metric values over all the answers """
f1_scores = [] for gold_index, gold_item in enumerate(gold): max_f1 = 0.0 max_index = None best_alignment: Tuple[Set[str], Set[str]] = (set(), set()) if predicted: for pred_index, pred_item in enumerate(predicted): current_f1 = _compute_f1(pred_item, gold_item) if current_f1 >= max_f1: best_alignment = (gold_item, pred_item) max_f1 = current_f1 max_index = pred_index match_flag = _match_numbers_if_present(*best_alignment) gold[gold_index] = set() predicted[max_index] = set() else: match_flag = False if match_flag: f1_scores.append(max_f1) else: f1_scores.append(0.0) return f1_scores
<SYSTEM_TASK:> Takes an answer JSON blob from the DROP data release and converts it into strings used for <END_TASK> <USER_TASK:> Description: def answer_json_to_strings(answer: Dict[str, Any]) -> Tuple[Tuple[str, ...], str]: """ Takes an answer JSON blob from the DROP data release and converts it into strings used for evaluation. """
if "number" in answer and answer["number"]: return tuple([str(answer["number"])]), "number" elif "spans" in answer and answer["spans"]: return tuple(answer["spans"]), "span" if len(answer["spans"]) == 1 else "spans" elif "date" in answer: return tuple(["{0} {1} {2}".format(answer["date"]["day"], answer["date"]["month"], answer["date"]["year"])]), "date" else: raise ValueError(f"Answer type not found, should be one of number, spans or date at: {json.dumps(answer)}")
<SYSTEM_TASK:> Returns an ``Iterable`` containing all the instances <END_TASK> <USER_TASK:> Description: def read(self, file_path: str) -> Iterable[Instance]: """ Returns an ``Iterable`` containing all the instances in the specified dataset. If ``self.lazy`` is False, this calls ``self._read()``, ensures that the result is a list, then returns the resulting list. If ``self.lazy`` is True, this returns an object whose ``__iter__`` method calls ``self._read()`` each iteration. In this case your implementation of ``_read()`` must also be lazy (that is, not load all instances into memory at once), otherwise you will get a ``ConfigurationError``. In either case, the returned ``Iterable`` can be iterated over multiple times. It's unlikely you want to override this function, but if you do your result should likewise be repeatedly iterable. """
lazy = getattr(self, 'lazy', None) if lazy is None: logger.warning("DatasetReader.lazy is not set, " "did you forget to call the superclass constructor?") if self._cache_directory: cache_file = self._get_cache_location_for_file_path(file_path) else: cache_file = None if lazy: return _LazyInstances(lambda: self._read(file_path), cache_file, self.deserialize_instance, self.serialize_instance) else: # First we read the instances, either from a cache or from the original file. if cache_file and os.path.exists(cache_file): instances = self._instances_from_cache_file(cache_file) else: instances = self._read(file_path) # Then some validation. if not isinstance(instances, list): instances = [instance for instance in Tqdm.tqdm(instances)] if not instances: raise ConfigurationError("No instances were read from the given filepath {}. " "Is the path correct?".format(file_path)) # And finally we write to the cache if we need to. if cache_file and not os.path.exists(cache_file): logger.info(f"Caching instances to {cache_file}") with open(cache_file, 'w') as cache: for instance in Tqdm.tqdm(instances): cache.write(self.serialize_instance(instance) + '\n') return instances
<SYSTEM_TASK:> Prints predicate argument predictions and gold labels for a single verbal <END_TASK> <USER_TASK:> Description: def write_to_conll_eval_file(prediction_file: TextIO, gold_file: TextIO, verb_index: Optional[int], sentence: List[str], prediction: List[str], gold_labels: List[str]): """ Prints predicate argument predictions and gold labels for a single verbal predicate in a sentence to two provided file references. Parameters ---------- prediction_file : TextIO, required. A file reference to print predictions to. gold_file : TextIO, required. A file reference to print gold labels to. verb_index : Optional[int], required. The index of the verbal predicate in the sentence which the gold labels are the arguments for, or None if the sentence contains no verbal predicate. sentence : List[str], required. The word tokens. prediction : List[str], required. The predicted BIO labels. gold_labels : List[str], required. The gold BIO labels. """
verb_only_sentence = ["-"] * len(sentence) if verb_index: verb_only_sentence[verb_index] = sentence[verb_index] conll_format_predictions = convert_bio_tags_to_conll_format(prediction) conll_format_gold_labels = convert_bio_tags_to_conll_format(gold_labels) for word, predicted, gold in zip(verb_only_sentence, conll_format_predictions, conll_format_gold_labels): prediction_file.write(word.ljust(15)) prediction_file.write(predicted.rjust(15) + "\n") gold_file.write(word.ljust(15)) gold_file.write(gold.rjust(15) + "\n") prediction_file.write("\n") gold_file.write("\n")
<SYSTEM_TASK:> Given a ``sentence``, returns a list of actions the sentence triggers as an ``agenda``. The <END_TASK> <USER_TASK:> Description: def get_agenda_for_sentence(self, sentence: str) -> List[str]: """ Given a ``sentence``, returns a list of actions the sentence triggers as an ``agenda``. The ``agenda`` can be used while by a parser to guide the decoder. sequences as possible. This is a simplistic mapping at this point, and can be expanded. Parameters ---------- sentence : ``str`` The sentence for which an agenda will be produced. """
agenda = [] sentence = sentence.lower() if sentence.startswith("there is a box") or sentence.startswith("there is a tower "): agenda.append(self.terminal_productions["box_exists"]) elif sentence.startswith("there is a "): agenda.append(self.terminal_productions["object_exists"]) if "<Set[Box]:bool> -> box_exists" not in agenda: # These are object filters and do not apply if we have a box_exists at the top. if "touch" in sentence: if "top" in sentence: agenda.append(self.terminal_productions["touch_top"]) elif "bottom" in sentence or "base" in sentence: agenda.append(self.terminal_productions["touch_bottom"]) elif "corner" in sentence: agenda.append(self.terminal_productions["touch_corner"]) elif "right" in sentence: agenda.append(self.terminal_productions["touch_right"]) elif "left" in sentence: agenda.append(self.terminal_productions["touch_left"]) elif "wall" in sentence or "edge" in sentence: agenda.append(self.terminal_productions["touch_wall"]) else: agenda.append(self.terminal_productions["touch_object"]) else: # The words "top" and "bottom" may be referring to top and bottom blocks in a tower. if "top" in sentence: agenda.append(self.terminal_productions["top"]) elif "bottom" in sentence or "base" in sentence: agenda.append(self.terminal_productions["bottom"]) if " not " in sentence: agenda.append(self.terminal_productions["negate_filter"]) if " contains " in sentence or " has " in sentence: agenda.append(self.terminal_productions["all_boxes"]) # This takes care of shapes, colors, top, bottom, big, small etc. for constant, production in self.terminal_productions.items(): # TODO(pradeep): Deal with constant names with underscores. if "top" in constant or "bottom" in constant: # We already dealt with top, bottom, touch_top and touch_bottom above. continue if constant in sentence: if "<Set[Object]:Set[Object]> ->" in production and "<Set[Box]:bool> -> box_exists" in agenda: if constant in ["square", "circle", "triangle"]: agenda.append(self.terminal_productions[f"shape_{constant}"]) elif constant in ["yellow", "blue", "black"]: agenda.append(self.terminal_productions[f"color_{constant}"]) else: continue else: agenda.append(production) # TODO (pradeep): Rules for "member_*" productions ("tower" or "box" followed by a color, # shape or number...) number_productions = self._get_number_productions(sentence) for production in number_productions: agenda.append(production) if not agenda: # None of the rules above was triggered! if "box" in sentence: agenda.append(self.terminal_productions["all_boxes"]) else: agenda.append(self.terminal_productions["all_objects"]) return agenda
<SYSTEM_TASK:> Gathers all the numbers in the sentence, and returns productions that lead to them. <END_TASK> <USER_TASK:> Description: def _get_number_productions(sentence: str) -> List[str]: """ Gathers all the numbers in the sentence, and returns productions that lead to them. """
# The mapping here is very simple and limited, which also shouldn't be a problem # because numbers seem to be represented fairly regularly. number_strings = {"one": "1", "two": "2", "three": "3", "four": "4", "five": "5", "six": "6", "seven": "7", "eight": "8", "nine": "9", "ten": "10"} number_productions = [] tokens = sentence.split() numbers = number_strings.values() for token in tokens: if token in numbers: number_productions.append(f"int -> {token}") elif token in number_strings: number_productions.append(f"int -> {number_strings[token]}") return number_productions
<SYSTEM_TASK:> Returns all objects that touch the given set of objects. <END_TASK> <USER_TASK:> Description: def touch_object(self, objects: Set[Object]) -> Set[Object]: """ Returns all objects that touch the given set of objects. """
objects_per_box = self._separate_objects_by_boxes(objects) return_set = set() for box, box_objects in objects_per_box.items(): candidate_objects = box.objects for object_ in box_objects: for candidate_object in candidate_objects: if self._objects_touch_each_other(object_, candidate_object): return_set.add(candidate_object) return return_set
<SYSTEM_TASK:> Returns the set of objects in the same boxes that are above the given objects. That is, if <END_TASK> <USER_TASK:> Description: def above(self, objects: Set[Object]) -> Set[Object]: """ Returns the set of objects in the same boxes that are above the given objects. That is, if the input is a set of two objects, one in each box, we will return a union of the objects above the first object in the first box, and those above the second object in the second box. """
objects_per_box = self._separate_objects_by_boxes(objects) return_set = set() for box in objects_per_box: # min_y_loc corresponds to the top-most object. min_y_loc = min([obj.y_loc for obj in objects_per_box[box]]) for candidate_obj in box.objects: if candidate_obj.y_loc < min_y_loc: return_set.add(candidate_obj) return return_set
<SYSTEM_TASK:> Returns the set of objects in the same boxes that are below the given objects. That is, if <END_TASK> <USER_TASK:> Description: def below(self, objects: Set[Object]) -> Set[Object]: """ Returns the set of objects in the same boxes that are below the given objects. That is, if the input is a set of two objects, one in each box, we will return a union of the objects below the first object in the first box, and those below the second object in the second box. """
objects_per_box = self._separate_objects_by_boxes(objects) return_set = set() for box in objects_per_box: # max_y_loc corresponds to the bottom-most object. max_y_loc = max([obj.y_loc for obj in objects_per_box[box]]) for candidate_obj in box.objects: if candidate_obj.y_loc > max_y_loc: return_set.add(candidate_obj) return return_set
<SYSTEM_TASK:> Returns true iff the objects touch each other. <END_TASK> <USER_TASK:> Description: def _objects_touch_each_other(self, object1: Object, object2: Object) -> bool: """ Returns true iff the objects touch each other. """
in_vertical_range = object1.y_loc <= object2.y_loc + object2.size and \ object1.y_loc + object1.size >= object2.y_loc in_horizantal_range = object1.x_loc <= object2.x_loc + object2.size and \ object1.x_loc + object1.size >= object2.x_loc touch_side = object1.x_loc + object1.size == object2.x_loc or \ object2.x_loc + object2.size == object1.x_loc touch_top_or_bottom = object1.y_loc + object1.size == object2.y_loc or \ object2.y_loc + object2.size == object1.y_loc return (in_vertical_range and touch_side) or (in_horizantal_range and touch_top_or_bottom)
<SYSTEM_TASK:> Given a set of objects, separate them by the boxes they belong to and return a dict. <END_TASK> <USER_TASK:> Description: def _separate_objects_by_boxes(self, objects: Set[Object]) -> Dict[Box, List[Object]]: """ Given a set of objects, separate them by the boxes they belong to and return a dict. """
objects_per_box: Dict[Box, List[Object]] = defaultdict(list) for box in self.boxes: for object_ in objects: if object_ in box.objects: objects_per_box[box].append(object_) return objects_per_box
<SYSTEM_TASK:> Returns the set of objects for which the attribute function returns an attribute value that <END_TASK> <USER_TASK:> Description: def _get_objects_with_same_attribute(self, objects: Set[Object], attribute_function: Callable[[Object], str]) -> Set[Object]: """ Returns the set of objects for which the attribute function returns an attribute value that is most frequent in the initial set, if the frequency is greater than 1. If not, all objects have different attribute values, and this method returns an empty set. """
objects_of_attribute: Dict[str, Set[Object]] = defaultdict(set) for entity in objects: objects_of_attribute[attribute_function(entity)].add(entity) if not objects_of_attribute: return set() most_frequent_attribute = max(objects_of_attribute, key=lambda x: len(objects_of_attribute[x])) if len(objects_of_attribute[most_frequent_attribute]) <= 1: return set() return objects_of_attribute[most_frequent_attribute]
<SYSTEM_TASK:> Given a possibly complex data structure, <END_TASK> <USER_TASK:> Description: def has_tensor(obj) -> bool: """ Given a possibly complex data structure, check if it has any torch.Tensors in it. """
if isinstance(obj, torch.Tensor): return True elif isinstance(obj, dict): return any(has_tensor(value) for value in obj.values()) elif isinstance(obj, (list, tuple)): return any(has_tensor(item) for item in obj) else: return False
<SYSTEM_TASK:> Supports sparse and dense tensors. <END_TASK> <USER_TASK:> Description: def clamp_tensor(tensor, minimum, maximum): """ Supports sparse and dense tensors. Returns a tensor with values clamped between the provided minimum and maximum, without modifying the original tensor. """
if tensor.is_sparse: coalesced_tensor = tensor.coalesce() # pylint: disable=protected-access coalesced_tensor._values().clamp_(minimum, maximum) return coalesced_tensor else: return tensor.clamp(minimum, maximum)
<SYSTEM_TASK:> Takes a list of tensor dictionaries, where each dictionary is assumed to have matching keys, <END_TASK> <USER_TASK:> Description: def batch_tensor_dicts(tensor_dicts: List[Dict[str, torch.Tensor]], remove_trailing_dimension: bool = False) -> Dict[str, torch.Tensor]: """ Takes a list of tensor dictionaries, where each dictionary is assumed to have matching keys, and returns a single dictionary with all tensors with the same key batched together. Parameters ---------- tensor_dicts : ``List[Dict[str, torch.Tensor]]`` The list of tensor dictionaries to batch. remove_trailing_dimension : ``bool`` If ``True``, we will check for a trailing dimension of size 1 on the tensors that are being batched, and remove it if we find it. """
key_to_tensors: Dict[str, List[torch.Tensor]] = defaultdict(list) for tensor_dict in tensor_dicts: for key, tensor in tensor_dict.items(): key_to_tensors[key].append(tensor) batched_tensors = {} for key, tensor_list in key_to_tensors.items(): batched_tensor = torch.stack(tensor_list) if remove_trailing_dimension and all(tensor.size(-1) == 1 for tensor in tensor_list): batched_tensor = batched_tensor.squeeze(-1) batched_tensors[key] = batched_tensor return batched_tensors
<SYSTEM_TASK:> Sort a batch first tensor by some specified lengths. <END_TASK> <USER_TASK:> Description: def sort_batch_by_length(tensor: torch.Tensor, sequence_lengths: torch.Tensor): """ Sort a batch first tensor by some specified lengths. Parameters ---------- tensor : torch.FloatTensor, required. A batch first Pytorch tensor. sequence_lengths : torch.LongTensor, required. A tensor representing the lengths of some dimension of the tensor which we want to sort by. Returns ------- sorted_tensor : torch.FloatTensor The original tensor sorted along the batch dimension with respect to sequence_lengths. sorted_sequence_lengths : torch.LongTensor The original sequence_lengths sorted by decreasing size. restoration_indices : torch.LongTensor Indices into the sorted_tensor such that ``sorted_tensor.index_select(0, restoration_indices) == original_tensor`` permutation_index : torch.LongTensor The indices used to sort the tensor. This is useful if you want to sort many tensors using the same ordering. """
if not isinstance(tensor, torch.Tensor) or not isinstance(sequence_lengths, torch.Tensor): raise ConfigurationError("Both the tensor and sequence lengths must be torch.Tensors.") sorted_sequence_lengths, permutation_index = sequence_lengths.sort(0, descending=True) sorted_tensor = tensor.index_select(0, permutation_index) index_range = torch.arange(0, len(sequence_lengths), device=sequence_lengths.device) # This is the equivalent of zipping with index, sorting by the original # sequence lengths and returning the now sorted indices. _, reverse_mapping = permutation_index.sort(0, descending=False) restoration_indices = index_range.index_select(0, reverse_mapping) return sorted_tensor, sorted_sequence_lengths, restoration_indices, permutation_index
<SYSTEM_TASK:> Computes and returns an element-wise dropout mask for a given tensor, where <END_TASK> <USER_TASK:> Description: def get_dropout_mask(dropout_probability: float, tensor_for_masking: torch.Tensor): """ Computes and returns an element-wise dropout mask for a given tensor, where each element in the mask is dropped out with probability dropout_probability. Note that the mask is NOT applied to the tensor - the tensor is passed to retain the correct CUDA tensor type for the mask. Parameters ---------- dropout_probability : float, required. Probability of dropping a dimension of the input. tensor_for_masking : torch.Tensor, required. Returns ------- A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability). This scaling ensures expected values and variances of the output of applying this mask and the original tensor are the same. """
binary_mask = (torch.rand(tensor_for_masking.size()) > dropout_probability).to(tensor_for_masking.device) # Scale mask by 1/keep_prob to preserve output statistics. dropout_mask = binary_mask.float().div(1.0 - dropout_probability) return dropout_mask
<SYSTEM_TASK:> To calculate max along certain dimensions on masked values <END_TASK> <USER_TASK:> Description: def masked_max(vector: torch.Tensor, mask: torch.Tensor, dim: int, keepdim: bool = False, min_val: float = -1e7) -> torch.Tensor: """ To calculate max along certain dimensions on masked values Parameters ---------- vector : ``torch.Tensor`` The vector to calculate max, assume unmasked parts are already zeros mask : ``torch.Tensor`` The mask of the vector. It must be broadcastable with vector. dim : ``int`` The dimension to calculate max keepdim : ``bool`` Whether to keep dimension min_val : ``float`` The minimal value for paddings Returns ------- A ``torch.Tensor`` of including the maximum values. """
one_minus_mask = (1.0 - mask).byte() replaced_vector = vector.masked_fill(one_minus_mask, min_val) max_value, _ = replaced_vector.max(dim=dim, keepdim=keepdim) return max_value
<SYSTEM_TASK:> To calculate mean along certain dimensions on masked values <END_TASK> <USER_TASK:> Description: def masked_mean(vector: torch.Tensor, mask: torch.Tensor, dim: int, keepdim: bool = False, eps: float = 1e-8) -> torch.Tensor: """ To calculate mean along certain dimensions on masked values Parameters ---------- vector : ``torch.Tensor`` The vector to calculate mean. mask : ``torch.Tensor`` The mask of the vector. It must be broadcastable with vector. dim : ``int`` The dimension to calculate mean keepdim : ``bool`` Whether to keep dimension eps : ``float`` A small value to avoid zero division problem. Returns ------- A ``torch.Tensor`` of including the mean values. """
one_minus_mask = (1.0 - mask).byte() replaced_vector = vector.masked_fill(one_minus_mask, 0.0) value_sum = torch.sum(replaced_vector, dim=dim, keepdim=keepdim) value_count = torch.sum(mask.float(), dim=dim, keepdim=keepdim) return value_sum / value_count.clamp(min=eps)
<SYSTEM_TASK:> Flips a padded tensor along the time dimension without affecting masked entries. <END_TASK> <USER_TASK:> Description: def masked_flip(padded_sequence: torch.Tensor, sequence_lengths: List[int]) -> torch.Tensor: """ Flips a padded tensor along the time dimension without affecting masked entries. Parameters ---------- padded_sequence : ``torch.Tensor`` The tensor to flip along the time dimension. Assumed to be of dimensions (batch size, num timesteps, ...) sequence_lengths : ``torch.Tensor`` A list containing the lengths of each unpadded sequence in the batch. Returns ------- A ``torch.Tensor`` of the same shape as padded_sequence. """
assert padded_sequence.size(0) == len(sequence_lengths), \ f'sequence_lengths length ${len(sequence_lengths)} does not match batch size ${padded_sequence.size(0)}' num_timesteps = padded_sequence.size(1) flipped_padded_sequence = torch.flip(padded_sequence, [1]) sequences = [flipped_padded_sequence[i, num_timesteps - length:] for i, length in enumerate(sequence_lengths)] return torch.nn.utils.rnn.pad_sequence(sequences, batch_first=True)
<SYSTEM_TASK:> Takes the dictionary of tensors produced by a ``TextField`` and returns a mask <END_TASK> <USER_TASK:> Description: def get_text_field_mask(text_field_tensors: Dict[str, torch.Tensor], num_wrapping_dims: int = 0) -> torch.LongTensor: """ Takes the dictionary of tensors produced by a ``TextField`` and returns a mask with 0 where the tokens are padding, and 1 otherwise. We also handle ``TextFields`` wrapped by an arbitrary number of ``ListFields``, where the number of wrapping ``ListFields`` is given by ``num_wrapping_dims``. If ``num_wrapping_dims == 0``, the returned mask has shape ``(batch_size, num_tokens)``. If ``num_wrapping_dims > 0`` then the returned mask has ``num_wrapping_dims`` extra dimensions, so the shape will be ``(batch_size, ..., num_tokens)``. There could be several entries in the tensor dictionary with different shapes (e.g., one for word ids, one for character ids). In order to get a token mask, we use the tensor in the dictionary with the lowest number of dimensions. After subtracting ``num_wrapping_dims``, if this tensor has two dimensions we assume it has shape ``(batch_size, ..., num_tokens)``, and use it for the mask. If instead it has three dimensions, we assume it has shape ``(batch_size, ..., num_tokens, num_features)``, and sum over the last dimension to produce the mask. Most frequently this will be a character id tensor, but it could also be a featurized representation of each token, etc. If the input ``text_field_tensors`` contains the "mask" key, this is returned instead of inferring the mask. TODO(joelgrus): can we change this? NOTE: Our functions for generating masks create torch.LongTensors, because using torch.ByteTensors makes it easy to run into overflow errors when doing mask manipulation, such as summing to get the lengths of sequences - see below. >>> mask = torch.ones([260]).byte() >>> mask.sum() # equals 260. >>> var_mask = torch.autograd.V(mask) >>> var_mask.sum() # equals 4, due to 8 bit precision - the sum overflows. """
if "mask" in text_field_tensors: return text_field_tensors["mask"] tensor_dims = [(tensor.dim(), tensor) for tensor in text_field_tensors.values()] tensor_dims.sort(key=lambda x: x[0]) smallest_dim = tensor_dims[0][0] - num_wrapping_dims if smallest_dim == 2: token_tensor = tensor_dims[0][1] return (token_tensor != 0).long() elif smallest_dim == 3: character_tensor = tensor_dims[0][1] return ((character_tensor > 0).long().sum(dim=-1) > 0).long() else: raise ValueError("Expected a tensor with dimension 2 or 3, found {}".format(smallest_dim))
<SYSTEM_TASK:> Return zero-based index in the sequence of the last item whose value is equal to obj. Raises a <END_TASK> <USER_TASK:> Description: def _rindex(sequence: Sequence[T], obj: T) -> int: """ Return zero-based index in the sequence of the last item whose value is equal to obj. Raises a ValueError if there is no such item. Parameters ---------- sequence : ``Sequence[T]`` obj : ``T`` Returns ------- zero-based index associated to the position of the last item equal to obj """
for i in range(len(sequence) - 1, -1, -1): if sequence[i] == obj: return i raise ValueError(f"Unable to find {obj} in sequence {sequence}.")
<SYSTEM_TASK:> Returns a range vector with the desired size, starting at 0. The CUDA implementation <END_TASK> <USER_TASK:> Description: def get_range_vector(size: int, device: int) -> torch.Tensor: """ Returns a range vector with the desired size, starting at 0. The CUDA implementation is meant to avoid copy data from CPU to GPU. """
if device > -1: return torch.cuda.LongTensor(size, device=device).fill_(1).cumsum(0) - 1 else: return torch.arange(0, size, dtype=torch.long)
<SYSTEM_TASK:> Checks if the string occurs in the table, and if it does, returns the names of the columns <END_TASK> <USER_TASK:> Description: def _string_in_table(self, candidate: str) -> List[str]: """ Checks if the string occurs in the table, and if it does, returns the names of the columns under which it occurs. If it does not, returns an empty list. """
candidate_column_names: List[str] = [] # First check if the entire candidate occurs as a cell. if candidate in self._string_column_mapping: candidate_column_names = self._string_column_mapping[candidate] # If not, check if it is a substring pf any cell value. if not candidate_column_names: for cell_value, column_names in self._string_column_mapping.items(): if candidate in cell_value: candidate_column_names.extend(column_names) candidate_column_names = list(set(candidate_column_names)) return candidate_column_names
<SYSTEM_TASK:> Computes the ELMo embeddings for a single tokenized sentence. <END_TASK> <USER_TASK:> Description: def embed_sentence(self, sentence: List[str]) -> numpy.ndarray: """ Computes the ELMo embeddings for a single tokenized sentence. Please note that ELMo has internal state and will give different results for the same input. See the comment under the class definition. Parameters ---------- sentence : ``List[str]``, required A tokenized sentence. Returns ------- A tensor containing the ELMo vectors. """
return self.embed_batch([sentence])[0]
<SYSTEM_TASK:> Computes the ELMo embeddings for a batch of tokenized sentences. <END_TASK> <USER_TASK:> Description: def embed_batch(self, batch: List[List[str]]) -> List[numpy.ndarray]: """ Computes the ELMo embeddings for a batch of tokenized sentences. Please note that ELMo has internal state and will give different results for the same input. See the comment under the class definition. Parameters ---------- batch : ``List[List[str]]``, required A list of tokenized sentences. Returns ------- A list of tensors, each representing the ELMo vectors for the input sentence at the same index. """
elmo_embeddings = [] # Batches with only an empty sentence will throw an exception inside AllenNLP, so we handle this case # and return an empty embedding instead. if batch == [[]]: elmo_embeddings.append(empty_embedding()) else: embeddings, mask = self.batch_to_embeddings(batch) for i in range(len(batch)): length = int(mask[i, :].sum()) # Slicing the embedding :0 throws an exception so we need to special case for empty sentences. if length == 0: elmo_embeddings.append(empty_embedding()) else: elmo_embeddings.append(embeddings[i, :, :length, :].detach().cpu().numpy()) return elmo_embeddings
<SYSTEM_TASK:> Computes the ELMo embeddings for a iterable of sentences. <END_TASK> <USER_TASK:> Description: def embed_sentences(self, sentences: Iterable[List[str]], batch_size: int = DEFAULT_BATCH_SIZE) -> Iterable[numpy.ndarray]: """ Computes the ELMo embeddings for a iterable of sentences. Please note that ELMo has internal state and will give different results for the same input. See the comment under the class definition. Parameters ---------- sentences : ``Iterable[List[str]]``, required An iterable of tokenized sentences. batch_size : ``int``, required The number of sentences ELMo should process at once. Returns ------- A list of tensors, each representing the ELMo vectors for the input sentence at the same index. """
for batch in lazy_groups_of(iter(sentences), batch_size): yield from self.embed_batch(batch)
<SYSTEM_TASK:> Computes ELMo embeddings from an input_file where each line contains a sentence tokenized by whitespace. <END_TASK> <USER_TASK:> Description: def embed_file(self, input_file: IO, output_file_path: str, output_format: str = "all", batch_size: int = DEFAULT_BATCH_SIZE, forget_sentences: bool = False, use_sentence_keys: bool = False) -> None: """ Computes ELMo embeddings from an input_file where each line contains a sentence tokenized by whitespace. The ELMo embeddings are written out in HDF5 format, where each sentence embedding is saved in a dataset with the line number in the original file as the key. Parameters ---------- input_file : ``IO``, required A file with one tokenized sentence per line. output_file_path : ``str``, required A path to the output hdf5 file. output_format : ``str``, optional, (default = "all") The embeddings to output. Must be one of "all", "top", or "average". batch_size : ``int``, optional, (default = 64) The number of sentences to process in ELMo at one time. forget_sentences : ``bool``, optional, (default = False). If use_sentence_keys is False, whether or not to include a string serialized JSON dictionary that associates sentences with their line number (its HDF5 key). The mapping is placed in the "sentence_to_index" HDF5 key. This is useful if you want to use the embeddings without keeping the original file of sentences around. use_sentence_keys : ``bool``, optional, (default = False). Whether or not to use full sentences as keys. By default, the line numbers of the input file are used as ids, which is more robust. """
assert output_format in ["all", "top", "average"] # Tokenizes the sentences. sentences = [line.strip() for line in input_file] blank_lines = [i for (i, line) in enumerate(sentences) if line == ""] if blank_lines: raise ConfigurationError(f"Your input file contains empty lines at indexes " f"{blank_lines}. Please remove them.") split_sentences = [sentence.split() for sentence in sentences] # Uses the sentence index as the key. if use_sentence_keys: logger.warning("Using sentences as keys can fail if sentences " "contain forward slashes or colons. Use with caution.") embedded_sentences = zip(sentences, self.embed_sentences(split_sentences, batch_size)) else: embedded_sentences = ((str(i), x) for i, x in enumerate(self.embed_sentences(split_sentences, batch_size))) sentence_to_index = {} logger.info("Processing sentences.") with h5py.File(output_file_path, 'w') as fout: for key, embeddings in Tqdm.tqdm(embedded_sentences): if use_sentence_keys and key in fout.keys(): raise ConfigurationError(f"Key already exists in {output_file_path}. " f"To encode duplicate sentences, do not pass " f"the --use-sentence-keys flag.") if not forget_sentences and not use_sentence_keys: sentence = sentences[int(key)] sentence_to_index[sentence] = key if output_format == "all": output = embeddings elif output_format == "top": output = embeddings[-1] elif output_format == "average": output = numpy.average(embeddings, axis=0) fout.create_dataset( str(key), output.shape, dtype='float32', data=output ) if not forget_sentences and not use_sentence_keys: sentence_index_dataset = fout.create_dataset( "sentence_to_index", (1,), dtype=h5py.special_dtype(vlen=str)) sentence_index_dataset[0] = json.dumps(sentence_to_index) input_file.close()
<SYSTEM_TASK:> Add the field to the existing fields mapping. <END_TASK> <USER_TASK:> Description: def add_field(self, field_name: str, field: Field, vocab: Vocabulary = None) -> None: """ Add the field to the existing fields mapping. If we have already indexed the Instance, then we also index `field`, so it is necessary to supply the vocab. """
self.fields[field_name] = field if self.indexed: field.index(vocab)
<SYSTEM_TASK:> Increments counts in the given ``counter`` for all of the vocabulary items in all of the <END_TASK> <USER_TASK:> Description: def count_vocab_items(self, counter: Dict[str, Dict[str, int]]): """ Increments counts in the given ``counter`` for all of the vocabulary items in all of the ``Fields`` in this ``Instance``. """
for field in self.fields.values(): field.count_vocab_items(counter)
<SYSTEM_TASK:> Indexes all fields in this ``Instance`` using the provided ``Vocabulary``. <END_TASK> <USER_TASK:> Description: def index_fields(self, vocab: Vocabulary) -> None: """ Indexes all fields in this ``Instance`` using the provided ``Vocabulary``. This `mutates` the current object, it does not return a new ``Instance``. A ``DataIterator`` will call this on each pass through a dataset; we use the ``indexed`` flag to make sure that indexing only happens once. This means that if for some reason you modify your vocabulary after you've indexed your instances, you might get unexpected behavior. """
if not self.indexed: self.indexed = True for field in self.fields.values(): field.index(vocab)