text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Get the label of the Dataset. <END_TASK> <USER_TASK:> Description: def get_label(self): """Get the label of the Dataset. Returns ------- label : numpy array or None The label information from the Dataset. """
if self.label is None: self.label = self.get_field('label') return self.label
<SYSTEM_TASK:> Get the weight of the Dataset. <END_TASK> <USER_TASK:> Description: def get_weight(self): """Get the weight of the Dataset. Returns ------- weight : numpy array or None Weight for each data point from the Dataset. """
if self.weight is None: self.weight = self.get_field('weight') return self.weight
<SYSTEM_TASK:> Get the feature penalty of the Dataset. <END_TASK> <USER_TASK:> Description: def get_feature_penalty(self): """Get the feature penalty of the Dataset. Returns ------- feature_penalty : numpy array or None Feature penalty for each feature in the Dataset. """
if self.feature_penalty is None: self.feature_penalty = self.get_field('feature_penalty') return self.feature_penalty
<SYSTEM_TASK:> Get the monotone constraints of the Dataset. <END_TASK> <USER_TASK:> Description: def get_monotone_constraints(self): """Get the monotone constraints of the Dataset. Returns ------- monotone_constraints : numpy array or None Monotone constraints: -1, 0 or 1, for each feature in the Dataset. """
if self.monotone_constraints is None: self.monotone_constraints = self.get_field('monotone_constraints') return self.monotone_constraints
<SYSTEM_TASK:> Get the initial score of the Dataset. <END_TASK> <USER_TASK:> Description: def get_init_score(self): """Get the initial score of the Dataset. Returns ------- init_score : numpy array or None Init score of Booster. """
if self.init_score is None: self.init_score = self.get_field('init_score') return self.init_score
<SYSTEM_TASK:> Get the raw data of the Dataset. <END_TASK> <USER_TASK:> Description: def get_data(self): """Get the raw data of the Dataset. Returns ------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse, list of numpy arrays or None Raw data used in the Dataset construction. """
if self.handle is None: raise Exception("Cannot get data before construct Dataset") if self.data is not None and self.used_indices is not None and self.need_slice: if isinstance(self.data, np.ndarray) or scipy.sparse.issparse(self.data): self.data = self.data[self.used_indices, :] elif isinstance(self.data, DataFrame): self.data = self.data.iloc[self.used_indices].copy() elif isinstance(self.data, DataTable): self.data = self.data[self.used_indices, :] else: warnings.warn("Cannot subset {} type of raw data.\n" "Returning original raw data".format(type(self.data).__name__)) self.need_slice = False return self.data
<SYSTEM_TASK:> Get the group of the Dataset. <END_TASK> <USER_TASK:> Description: def get_group(self): """Get the group of the Dataset. Returns ------- group : numpy array or None Group size of each group. """
if self.group is None: self.group = self.get_field('group') if self.group is not None: # group data from LightGBM is boundaries data, need to convert to group size self.group = np.diff(self.group) return self.group
<SYSTEM_TASK:> Get the number of rows in the Dataset. <END_TASK> <USER_TASK:> Description: def num_data(self): """Get the number of rows in the Dataset. Returns ------- number_of_rows : int The number of rows in the Dataset. """
if self.handle is not None: ret = ctypes.c_int() _safe_call(_LIB.LGBM_DatasetGetNumData(self.handle, ctypes.byref(ret))) return ret.value else: raise LightGBMError("Cannot get num_data before construct dataset")
<SYSTEM_TASK:> Get a chain of Dataset objects. <END_TASK> <USER_TASK:> Description: def get_ref_chain(self, ref_limit=100): """Get a chain of Dataset objects. Starts with r, then goes to r.reference (if exists), then to r.reference.reference, etc. until we hit ``ref_limit`` or a reference loop. Parameters ---------- ref_limit : int, optional (default=100) The limit number of references. Returns ------- ref_chain : set of Dataset Chain of references of the Datasets. """
head = self ref_chain = set() while len(ref_chain) < ref_limit: if isinstance(head, Dataset): ref_chain.add(head) if (head.reference is not None) and (head.reference not in ref_chain): head = head.reference else: break else: break return ref_chain
<SYSTEM_TASK:> Add features from other Dataset to the current Dataset. <END_TASK> <USER_TASK:> Description: def add_features_from(self, other): """Add features from other Dataset to the current Dataset. Both Datasets must be constructed before calling this method. Parameters ---------- other : Dataset The Dataset to take features from. Returns ------- self : Dataset Dataset with the new features added. """
if self.handle is None or other.handle is None: raise ValueError('Both source and target Datasets must be constructed before adding features') _safe_call(_LIB.LGBM_DatasetAddFeaturesFrom(self.handle, other.handle)) return self
<SYSTEM_TASK:> Save Dataset to a text file. <END_TASK> <USER_TASK:> Description: def dump_text(self, filename): """Save Dataset to a text file. This format cannot be loaded back in by LightGBM, but is useful for debugging purposes. Parameters ---------- filename : string Name of the output file. Returns ------- self : Dataset Returns self. """
_safe_call(_LIB.LGBM_DatasetDumpText( self.construct().handle, c_str(filename))) return self
<SYSTEM_TASK:> Set the network configuration. <END_TASK> <USER_TASK:> Description: def set_network(self, machines, local_listen_port=12400, listen_time_out=120, num_machines=1): """Set the network configuration. Parameters ---------- machines : list, set or string Names of machines. local_listen_port : int, optional (default=12400) TCP listen port for local machines. listen_time_out : int, optional (default=120) Socket time-out in minutes. num_machines : int, optional (default=1) The number of machines for parallel learning application. Returns ------- self : Booster Booster with set network. """
_safe_call(_LIB.LGBM_NetworkInit(c_str(machines), ctypes.c_int(local_listen_port), ctypes.c_int(listen_time_out), ctypes.c_int(num_machines))) self.network = True return self
<SYSTEM_TASK:> Add validation data. <END_TASK> <USER_TASK:> Description: def add_valid(self, data, name): """Add validation data. Parameters ---------- data : Dataset Validation data. name : string Name of validation data. Returns ------- self : Booster Booster with set validation data. """
if not isinstance(data, Dataset): raise TypeError('Validation data should be Dataset instance, met {}' .format(type(data).__name__)) if data._predictor is not self.__init_predictor: raise LightGBMError("Add validation data failed, " "you should use same predictor for these data") _safe_call(_LIB.LGBM_BoosterAddValidData( self.handle, data.construct().handle)) self.valid_sets.append(data) self.name_valid_sets.append(name) self.__num_dataset += 1 self.__inner_predict_buffer.append(None) self.__is_predicted_cur_iter.append(False) return self
<SYSTEM_TASK:> Reset parameters of Booster. <END_TASK> <USER_TASK:> Description: def reset_parameter(self, params): """Reset parameters of Booster. Parameters ---------- params : dict New parameters for Booster. Returns ------- self : Booster Booster with new parameters. """
if any(metric_alias in params for metric_alias in ('metric', 'metrics', 'metric_types')): self.__need_reload_eval_info = True params_str = param_dict_to_str(params) if params_str: _safe_call(_LIB.LGBM_BoosterResetParameter( self.handle, c_str(params_str))) self.params.update(params) return self
<SYSTEM_TASK:> Update Booster for one iteration. <END_TASK> <USER_TASK:> Description: def update(self, train_set=None, fobj=None): """Update Booster for one iteration. Parameters ---------- train_set : Dataset or None, optional (default=None) Training data. If None, last training data is used. fobj : callable or None, optional (default=None) Customized objective function. For multi-class task, the score is group by class_id first, then group by row_id. If you want to get i-th row score in j-th class, the access way is score[j * num_data + i] and you should group grad and hess in this way as well. Returns ------- is_finished : bool Whether the update was successfully finished. """
# need reset training data if train_set is not None and train_set is not self.train_set: if not isinstance(train_set, Dataset): raise TypeError('Training data should be Dataset instance, met {}' .format(type(train_set).__name__)) if train_set._predictor is not self.__init_predictor: raise LightGBMError("Replace training data failed, " "you should use same predictor for these data") self.train_set = train_set _safe_call(_LIB.LGBM_BoosterResetTrainingData( self.handle, self.train_set.construct().handle)) self.__inner_predict_buffer[0] = None is_finished = ctypes.c_int(0) if fobj is None: if self.__set_objective_to_none: raise LightGBMError('Cannot update due to null objective function.') _safe_call(_LIB.LGBM_BoosterUpdateOneIter( self.handle, ctypes.byref(is_finished))) self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)] return is_finished.value == 1 else: if not self.__set_objective_to_none: self.reset_parameter({"objective": "none"}).__set_objective_to_none = True grad, hess = fobj(self.__inner_predict(0), self.train_set) return self.__boost(grad, hess)
<SYSTEM_TASK:> Boost Booster for one iteration with customized gradient statistics. <END_TASK> <USER_TASK:> Description: def __boost(self, grad, hess): """Boost Booster for one iteration with customized gradient statistics. Note ---- For multi-class task, the score is group by class_id first, then group by row_id. If you want to get i-th row score in j-th class, the access way is score[j * num_data + i] and you should group grad and hess in this way as well. Parameters ---------- grad : 1-D numpy array or 1-D list The first order derivative (gradient). hess : 1-D numpy array or 1-D list The second order derivative (Hessian). Returns ------- is_finished : bool Whether the boost was successfully finished. """
grad = list_to_1d_numpy(grad, name='gradient') hess = list_to_1d_numpy(hess, name='hessian') assert grad.flags.c_contiguous assert hess.flags.c_contiguous if len(grad) != len(hess): raise ValueError("Lengths of gradient({}) and hessian({}) don't match" .format(len(grad), len(hess))) is_finished = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterUpdateOneIterCustom( self.handle, grad.ctypes.data_as(ctypes.POINTER(ctypes.c_float)), hess.ctypes.data_as(ctypes.POINTER(ctypes.c_float)), ctypes.byref(is_finished))) self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)] return is_finished.value == 1
<SYSTEM_TASK:> Rollback one iteration. <END_TASK> <USER_TASK:> Description: def rollback_one_iter(self): """Rollback one iteration. Returns ------- self : Booster Booster with rolled back one iteration. """
_safe_call(_LIB.LGBM_BoosterRollbackOneIter( self.handle)) self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)] return self
<SYSTEM_TASK:> Get the index of the current iteration. <END_TASK> <USER_TASK:> Description: def current_iteration(self): """Get the index of the current iteration. Returns ------- cur_iter : int The index of the current iteration. """
out_cur_iter = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterGetCurrentIteration( self.handle, ctypes.byref(out_cur_iter))) return out_cur_iter.value
<SYSTEM_TASK:> Evaluate for data. <END_TASK> <USER_TASK:> Description: def eval(self, data, name, feval=None): """Evaluate for data. Parameters ---------- data : Dataset Data for the evaluating. name : string Name of the data. feval : callable or None, optional (default=None) Customized evaluation function. Should accept two parameters: preds, train_data, and return (eval_name, eval_result, is_higher_better) or list of such tuples. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i]. Returns ------- result : list List with evaluation results. """
if not isinstance(data, Dataset): raise TypeError("Can only eval for Dataset instance") data_idx = -1 if data is self.train_set: data_idx = 0 else: for i in range_(len(self.valid_sets)): if data is self.valid_sets[i]: data_idx = i + 1 break # need to push new valid data if data_idx == -1: self.add_valid(data, name) data_idx = self.__num_dataset - 1 return self.__inner_eval(name, data_idx, feval)
<SYSTEM_TASK:> Evaluate for validation data. <END_TASK> <USER_TASK:> Description: def eval_valid(self, feval=None): """Evaluate for validation data. Parameters ---------- feval : callable or None, optional (default=None) Customized evaluation function. Should accept two parameters: preds, train_data, and return (eval_name, eval_result, is_higher_better) or list of such tuples. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i]. Returns ------- result : list List with evaluation results. """
return [item for i in range_(1, self.__num_dataset) for item in self.__inner_eval(self.name_valid_sets[i - 1], i, feval)]
<SYSTEM_TASK:> Save Booster to file. <END_TASK> <USER_TASK:> Description: def save_model(self, filename, num_iteration=None, start_iteration=0): """Save Booster to file. Parameters ---------- filename : string Filename to save Booster. num_iteration : int or None, optional (default=None) Index of the iteration that should be saved. If None, if the best iteration exists, it is saved; otherwise, all iterations are saved. If <= 0, all iterations are saved. start_iteration : int, optional (default=0) Start index of the iteration that should be saved. Returns ------- self : Booster Returns self. """
if num_iteration is None: num_iteration = self.best_iteration _safe_call(_LIB.LGBM_BoosterSaveModel( self.handle, ctypes.c_int(start_iteration), ctypes.c_int(num_iteration), c_str(filename))) _dump_pandas_categorical(self.pandas_categorical, filename) return self
<SYSTEM_TASK:> Shuffle models. <END_TASK> <USER_TASK:> Description: def shuffle_models(self, start_iteration=0, end_iteration=-1): """Shuffle models. Parameters ---------- start_iteration : int, optional (default=0) The first iteration that will be shuffled. end_iteration : int, optional (default=-1) The last iteration that will be shuffled. If <= 0, means the last available iteration. Returns ------- self : Booster Booster with shuffled models. """
_safe_call(_LIB.LGBM_BoosterShuffleModels( self.handle, ctypes.c_int(start_iteration), ctypes.c_int(end_iteration))) return self
<SYSTEM_TASK:> Load Booster from a string. <END_TASK> <USER_TASK:> Description: def model_from_string(self, model_str, verbose=True): """Load Booster from a string. Parameters ---------- model_str : string Model will be loaded from this string. verbose : bool, optional (default=True) Whether to print messages while loading model. Returns ------- self : Booster Loaded Booster object. """
if self.handle is not None: _safe_call(_LIB.LGBM_BoosterFree(self.handle)) self._free_buffer() self.handle = ctypes.c_void_p() out_num_iterations = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterLoadModelFromString( c_str(model_str), ctypes.byref(out_num_iterations), ctypes.byref(self.handle))) out_num_class = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterGetNumClasses( self.handle, ctypes.byref(out_num_class))) if verbose: print('Finished loading model, total used %d iterations' % int(out_num_iterations.value)) self.__num_class = out_num_class.value self.pandas_categorical = _load_pandas_categorical(model_str=model_str) return self
<SYSTEM_TASK:> Save Booster to string. <END_TASK> <USER_TASK:> Description: def model_to_string(self, num_iteration=None, start_iteration=0): """Save Booster to string. Parameters ---------- num_iteration : int or None, optional (default=None) Index of the iteration that should be saved. If None, if the best iteration exists, it is saved; otherwise, all iterations are saved. If <= 0, all iterations are saved. start_iteration : int, optional (default=0) Start index of the iteration that should be saved. Returns ------- str_repr : string String representation of Booster. """
if num_iteration is None: num_iteration = self.best_iteration buffer_len = 1 << 20 tmp_out_len = ctypes.c_int64(0) string_buffer = ctypes.create_string_buffer(buffer_len) ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)]) _safe_call(_LIB.LGBM_BoosterSaveModelToString( self.handle, ctypes.c_int(start_iteration), ctypes.c_int(num_iteration), ctypes.c_int64(buffer_len), ctypes.byref(tmp_out_len), ptr_string_buffer)) actual_len = tmp_out_len.value # if buffer length is not long enough, re-allocate a buffer if actual_len > buffer_len: string_buffer = ctypes.create_string_buffer(actual_len) ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)]) _safe_call(_LIB.LGBM_BoosterSaveModelToString( self.handle, ctypes.c_int(start_iteration), ctypes.c_int(num_iteration), ctypes.c_int64(actual_len), ctypes.byref(tmp_out_len), ptr_string_buffer)) ret = string_buffer.value.decode() ret += _dump_pandas_categorical(self.pandas_categorical) return ret
<SYSTEM_TASK:> Dump Booster to JSON format. <END_TASK> <USER_TASK:> Description: def dump_model(self, num_iteration=None, start_iteration=0): """Dump Booster to JSON format. Parameters ---------- num_iteration : int or None, optional (default=None) Index of the iteration that should be dumped. If None, if the best iteration exists, it is dumped; otherwise, all iterations are dumped. If <= 0, all iterations are dumped. start_iteration : int, optional (default=0) Start index of the iteration that should be dumped. Returns ------- json_repr : dict JSON format of Booster. """
if num_iteration is None: num_iteration = self.best_iteration buffer_len = 1 << 20 tmp_out_len = ctypes.c_int64(0) string_buffer = ctypes.create_string_buffer(buffer_len) ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)]) _safe_call(_LIB.LGBM_BoosterDumpModel( self.handle, ctypes.c_int(start_iteration), ctypes.c_int(num_iteration), ctypes.c_int64(buffer_len), ctypes.byref(tmp_out_len), ptr_string_buffer)) actual_len = tmp_out_len.value # if buffer length is not long enough, reallocate a buffer if actual_len > buffer_len: string_buffer = ctypes.create_string_buffer(actual_len) ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)]) _safe_call(_LIB.LGBM_BoosterDumpModel( self.handle, ctypes.c_int(start_iteration), ctypes.c_int(num_iteration), ctypes.c_int64(actual_len), ctypes.byref(tmp_out_len), ptr_string_buffer)) ret = json.loads(string_buffer.value.decode()) ret['pandas_categorical'] = json.loads(json.dumps(self.pandas_categorical, default=json_default_with_numpy)) return ret
<SYSTEM_TASK:> Make a prediction. <END_TASK> <USER_TASK:> Description: def predict(self, data, num_iteration=None, raw_score=False, pred_leaf=False, pred_contrib=False, data_has_header=False, is_reshape=True, **kwargs): """Make a prediction. Parameters ---------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse Data source for prediction. If string, it represents the path to txt file. num_iteration : int or None, optional (default=None) Limit number of iterations in the prediction. If None, if the best iteration exists, it is used; otherwise, all iterations are used. If <= 0, all iterations are used (no limits). raw_score : bool, optional (default=False) Whether to predict raw scores. pred_leaf : bool, optional (default=False) Whether to predict leaf index. pred_contrib : bool, optional (default=False) Whether to predict feature contributions. Note ---- If you want to get more explanations for your model's predictions using SHAP values, like SHAP interaction values, you can install the shap package (https://github.com/slundberg/shap). Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra column, where the last column is the expected value. data_has_header : bool, optional (default=False) Whether the data has header. Used only if data is string. is_reshape : bool, optional (default=True) If True, result is reshaped to [nrow, ncol]. **kwargs Other parameters for the prediction. Returns ------- result : numpy array Prediction result. """
predictor = self._to_predictor(copy.deepcopy(kwargs)) if num_iteration is None: num_iteration = self.best_iteration return predictor.predict(data, num_iteration, raw_score, pred_leaf, pred_contrib, data_has_header, is_reshape)
<SYSTEM_TASK:> Refit the existing Booster by new data. <END_TASK> <USER_TASK:> Description: def refit(self, data, label, decay_rate=0.9, **kwargs): """Refit the existing Booster by new data. Parameters ---------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse Data source for refit. If string, it represents the path to txt file. label : list, numpy 1-D array or pandas Series / one-column DataFrame Label for refit. decay_rate : float, optional (default=0.9) Decay rate of refit, will use ``leaf_output = decay_rate * old_leaf_output + (1.0 - decay_rate) * new_leaf_output`` to refit trees. **kwargs Other parameters for refit. These parameters will be passed to ``predict`` method. Returns ------- result : Booster Refitted Booster. """
if self.__set_objective_to_none: raise LightGBMError('Cannot refit due to null objective function.') predictor = self._to_predictor(copy.deepcopy(kwargs)) leaf_preds = predictor.predict(data, -1, pred_leaf=True) nrow, ncol = leaf_preds.shape train_set = Dataset(data, label, silent=True) new_booster = Booster(self.params, train_set, silent=True) # Copy models _safe_call(_LIB.LGBM_BoosterMerge( new_booster.handle, predictor.handle)) leaf_preds = leaf_preds.reshape(-1) ptr_data, type_ptr_data, _ = c_int_array(leaf_preds) _safe_call(_LIB.LGBM_BoosterRefit( new_booster.handle, ptr_data, ctypes.c_int(nrow), ctypes.c_int(ncol))) new_booster.network = self.network new_booster.__attr = self.__attr.copy() return new_booster
<SYSTEM_TASK:> Get the output of a leaf. <END_TASK> <USER_TASK:> Description: def get_leaf_output(self, tree_id, leaf_id): """Get the output of a leaf. Parameters ---------- tree_id : int The index of the tree. leaf_id : int The index of the leaf in the tree. Returns ------- result : float The output of the leaf. """
ret = ctypes.c_double(0) _safe_call(_LIB.LGBM_BoosterGetLeafValue( self.handle, ctypes.c_int(tree_id), ctypes.c_int(leaf_id), ctypes.byref(ret))) return ret.value
<SYSTEM_TASK:> Get names of features. <END_TASK> <USER_TASK:> Description: def feature_name(self): """Get names of features. Returns ------- result : list List with names of features. """
num_feature = self.num_feature() # Get name of features tmp_out_len = ctypes.c_int(0) string_buffers = [ctypes.create_string_buffer(255) for i in range_(num_feature)] ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers)) _safe_call(_LIB.LGBM_BoosterGetFeatureNames( self.handle, ctypes.byref(tmp_out_len), ptr_string_buffers)) if num_feature != tmp_out_len.value: raise ValueError("Length of feature names doesn't equal with num_feature") return [string_buffers[i].value.decode() for i in range_(num_feature)]
<SYSTEM_TASK:> Get split value histogram for the specified feature. <END_TASK> <USER_TASK:> Description: def get_split_value_histogram(self, feature, bins=None, xgboost_style=False): """Get split value histogram for the specified feature. Parameters ---------- feature : int or string The feature name or index the histogram is calculated for. If int, interpreted as index. If string, interpreted as name. Note ---- Categorical features are not supported. bins : int, string or None, optional (default=None) The maximum number of bins. If None, or int and > number of unique split values and ``xgboost_style=True``, the number of bins equals number of unique split values. If string, it should be one from the list of the supported values by ``numpy.histogram()`` function. xgboost_style : bool, optional (default=False) Whether the returned result should be in the same form as it is in XGBoost. If False, the returned value is tuple of 2 numpy arrays as it is in ``numpy.histogram()`` function. If True, the returned value is matrix, in which the first column is the right edges of non-empty bins and the second one is the histogram values. Returns ------- result_tuple : tuple of 2 numpy arrays If ``xgboost_style=False``, the values of the histogram of used splitting values for the specified feature and the bin edges. result_array_like : numpy array or pandas DataFrame (if pandas is installed) If ``xgboost_style=True``, the histogram of used splitting values for the specified feature. """
def add(root): """Recursively add thresholds.""" if 'split_index' in root: # non-leaf if feature_names is not None and isinstance(feature, string_type): split_feature = feature_names[root['split_feature']] else: split_feature = root['split_feature'] if split_feature == feature: if isinstance(root['threshold'], string_type): raise LightGBMError('Cannot compute split value histogram for the categorical feature') else: values.append(root['threshold']) add(root['left_child']) add(root['right_child']) model = self.dump_model() feature_names = model.get('feature_names') tree_infos = model['tree_info'] values = [] for tree_info in tree_infos: add(tree_info['tree_structure']) if bins is None or isinstance(bins, integer_types) and xgboost_style: n_unique = len(np.unique(values)) bins = max(min(n_unique, bins) if bins is not None else n_unique, 1) hist, bin_edges = np.histogram(values, bins=bins) if xgboost_style: ret = np.column_stack((bin_edges[1:], hist)) ret = ret[ret[:, 1] > 0] if PANDAS_INSTALLED: return DataFrame(ret, columns=['SplitValue', 'Count']) else: return ret else: return hist, bin_edges
<SYSTEM_TASK:> Evaluate training or validation data. <END_TASK> <USER_TASK:> Description: def __inner_eval(self, data_name, data_idx, feval=None): """Evaluate training or validation data."""
if data_idx >= self.__num_dataset: raise ValueError("Data_idx should be smaller than number of dataset") self.__get_eval_info() ret = [] if self.__num_inner_eval > 0: result = np.zeros(self.__num_inner_eval, dtype=np.float64) tmp_out_len = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterGetEval( self.handle, ctypes.c_int(data_idx), ctypes.byref(tmp_out_len), result.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))) if tmp_out_len.value != self.__num_inner_eval: raise ValueError("Wrong length of eval results") for i in range_(self.__num_inner_eval): ret.append((data_name, self.__name_inner_eval[i], result[i], self.__higher_better_inner_eval[i])) if feval is not None: if data_idx == 0: cur_data = self.train_set else: cur_data = self.valid_sets[data_idx - 1] feval_ret = feval(self.__inner_predict(data_idx), cur_data) if isinstance(feval_ret, list): for eval_name, val, is_higher_better in feval_ret: ret.append((data_name, eval_name, val, is_higher_better)) else: eval_name, val, is_higher_better = feval_ret ret.append((data_name, eval_name, val, is_higher_better)) return ret
<SYSTEM_TASK:> Predict for training and validation dataset. <END_TASK> <USER_TASK:> Description: def __inner_predict(self, data_idx): """Predict for training and validation dataset."""
if data_idx >= self.__num_dataset: raise ValueError("Data_idx should be smaller than number of dataset") if self.__inner_predict_buffer[data_idx] is None: if data_idx == 0: n_preds = self.train_set.num_data() * self.__num_class else: n_preds = self.valid_sets[data_idx - 1].num_data() * self.__num_class self.__inner_predict_buffer[data_idx] = np.zeros(n_preds, dtype=np.float64) # avoid to predict many time in one iteration if not self.__is_predicted_cur_iter[data_idx]: tmp_out_len = ctypes.c_int64(0) data_ptr = self.__inner_predict_buffer[data_idx].ctypes.data_as(ctypes.POINTER(ctypes.c_double)) _safe_call(_LIB.LGBM_BoosterGetPredict( self.handle, ctypes.c_int(data_idx), ctypes.byref(tmp_out_len), data_ptr)) if tmp_out_len.value != len(self.__inner_predict_buffer[data_idx]): raise ValueError("Wrong length of predict results for data %d" % (data_idx)) self.__is_predicted_cur_iter[data_idx] = True return self.__inner_predict_buffer[data_idx]
<SYSTEM_TASK:> Get inner evaluation count and names. <END_TASK> <USER_TASK:> Description: def __get_eval_info(self): """Get inner evaluation count and names."""
if self.__need_reload_eval_info: self.__need_reload_eval_info = False out_num_eval = ctypes.c_int(0) # Get num of inner evals _safe_call(_LIB.LGBM_BoosterGetEvalCounts( self.handle, ctypes.byref(out_num_eval))) self.__num_inner_eval = out_num_eval.value if self.__num_inner_eval > 0: # Get name of evals tmp_out_len = ctypes.c_int(0) string_buffers = [ctypes.create_string_buffer(255) for i in range_(self.__num_inner_eval)] ptr_string_buffers = (ctypes.c_char_p * self.__num_inner_eval)(*map(ctypes.addressof, string_buffers)) _safe_call(_LIB.LGBM_BoosterGetEvalNames( self.handle, ctypes.byref(tmp_out_len), ptr_string_buffers)) if self.__num_inner_eval != tmp_out_len.value: raise ValueError("Length of eval names doesn't equal with num_evals") self.__name_inner_eval = \ [string_buffers[i].value.decode() for i in range_(self.__num_inner_eval)] self.__higher_better_inner_eval = \ [name.startswith(('auc', 'ndcg@', 'map@')) for name in self.__name_inner_eval]
<SYSTEM_TASK:> Set attributes to the Booster. <END_TASK> <USER_TASK:> Description: def set_attr(self, **kwargs): """Set attributes to the Booster. Parameters ---------- **kwargs The attributes to set. Setting a value to None deletes an attribute. Returns ------- self : Booster Booster with set attributes. """
for key, value in kwargs.items(): if value is not None: if not isinstance(value, string_type): raise ValueError("Only string values are accepted") self.__attr[key] = value else: self.__attr.pop(key, None) return self
<SYSTEM_TASK:> Find the path to LightGBM library files. <END_TASK> <USER_TASK:> Description: def find_lib_path(): """Find the path to LightGBM library files. Returns ------- lib_path: list of strings List of all found library paths to LightGBM. """
if os.environ.get('LIGHTGBM_BUILD_DOC', False): # we don't need lib_lightgbm while building docs return [] curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) dll_path = [curr_path, os.path.join(curr_path, '../../'), os.path.join(curr_path, 'compile'), os.path.join(curr_path, '../compile'), os.path.join(curr_path, '../../lib/')] if system() in ('Windows', 'Microsoft'): dll_path.append(os.path.join(curr_path, '../compile/Release/')) dll_path.append(os.path.join(curr_path, '../compile/windows/x64/DLL/')) dll_path.append(os.path.join(curr_path, '../../Release/')) dll_path.append(os.path.join(curr_path, '../../windows/x64/DLL/')) dll_path = [os.path.join(p, 'lib_lightgbm.dll') for p in dll_path] else: dll_path = [os.path.join(p, 'lib_lightgbm.so') for p in dll_path] lib_path = [p for p in dll_path if os.path.exists(p) and os.path.isfile(p)] if not lib_path: dll_path = [os.path.realpath(p) for p in dll_path] raise Exception('Cannot find lightgbm library file in following paths:\n' + '\n'.join(dll_path)) return lib_path
<SYSTEM_TASK:> Create a callback that prints the evaluation results. <END_TASK> <USER_TASK:> Description: def print_evaluation(period=1, show_stdv=True): """Create a callback that prints the evaluation results. Parameters ---------- period : int, optional (default=1) The period to print the evaluation results. show_stdv : bool, optional (default=True) Whether to show stdv (if provided). Returns ------- callback : function The callback that prints the evaluation results every ``period`` iteration(s). """
def _callback(env): if period > 0 and env.evaluation_result_list and (env.iteration + 1) % period == 0: result = '\t'.join([_format_eval_result(x, show_stdv) for x in env.evaluation_result_list]) print('[%d]\t%s' % (env.iteration + 1, result)) _callback.order = 10 return _callback
<SYSTEM_TASK:> Create a callback that records the evaluation history into ``eval_result``. <END_TASK> <USER_TASK:> Description: def record_evaluation(eval_result): """Create a callback that records the evaluation history into ``eval_result``. Parameters ---------- eval_result : dict A dictionary to store the evaluation results. Returns ------- callback : function The callback that records the evaluation history into the passed dictionary. """
if not isinstance(eval_result, dict): raise TypeError('Eval_result should be a dictionary') eval_result.clear() def _init(env): for data_name, _, _, _ in env.evaluation_result_list: eval_result.setdefault(data_name, collections.defaultdict(list)) def _callback(env): if not eval_result: _init(env) for data_name, eval_name, result, _ in env.evaluation_result_list: eval_result[data_name][eval_name].append(result) _callback.order = 20 return _callback
<SYSTEM_TASK:> Create a callback that resets the parameter after the first iteration. <END_TASK> <USER_TASK:> Description: def reset_parameter(**kwargs): """Create a callback that resets the parameter after the first iteration. Note ---- The initial parameter will still take in-effect on first iteration. Parameters ---------- **kwargs : value should be list or function List of parameters for each boosting round or a customized function that calculates the parameter in terms of current number of round (e.g. yields learning rate decay). If list lst, parameter = lst[current_round]. If function func, parameter = func(current_round). Returns ------- callback : function The callback that resets the parameter after the first iteration. """
def _callback(env): new_parameters = {} for key, value in kwargs.items(): if key in ['num_class', 'num_classes', 'boosting', 'boost', 'boosting_type', 'metric', 'metrics', 'metric_types']: raise RuntimeError("cannot reset {} during training".format(repr(key))) if isinstance(value, list): if len(value) != env.end_iteration - env.begin_iteration: raise ValueError("Length of list {} has to equal to 'num_boost_round'." .format(repr(key))) new_param = value[env.iteration - env.begin_iteration] else: new_param = value(env.iteration - env.begin_iteration) if new_param != env.params.get(key, None): new_parameters[key] = new_param if new_parameters: env.model.reset_parameter(new_parameters) env.params.update(new_parameters) _callback.before_iteration = True _callback.order = 10 return _callback
<SYSTEM_TASK:> Create a callback that activates early stopping. <END_TASK> <USER_TASK:> Description: def early_stopping(stopping_rounds, first_metric_only=False, verbose=True): """Create a callback that activates early stopping. Note ---- Activates early stopping. The model will train until the validation score stops improving. Validation score needs to improve at least every ``early_stopping_rounds`` round(s) to continue training. Requires at least one validation data and one metric. If there's more than one, will check all of them. But the training data is ignored anyway. To check only the first metric set ``first_metric_only`` to True. Parameters ---------- stopping_rounds : int The possible number of rounds without the trend occurrence. first_metric_only : bool, optional (default=False) Whether to use only the first metric for early stopping. verbose : bool, optional (default=True) Whether to print message with early stopping information. Returns ------- callback : function The callback that activates early stopping. """
best_score = [] best_iter = [] best_score_list = [] cmp_op = [] enabled = [True] def _init(env): enabled[0] = not any((boost_alias in env.params and env.params[boost_alias] == 'dart') for boost_alias in ('boosting', 'boosting_type', 'boost')) if not enabled[0]: warnings.warn('Early stopping is not available in dart mode') return if not env.evaluation_result_list: raise ValueError('For early stopping, ' 'at least one dataset and eval metric is required for evaluation') if verbose: msg = "Training until validation scores don't improve for {} rounds." print(msg.format(stopping_rounds)) for eval_ret in env.evaluation_result_list: best_iter.append(0) best_score_list.append(None) if eval_ret[3]: best_score.append(float('-inf')) cmp_op.append(gt) else: best_score.append(float('inf')) cmp_op.append(lt) def _callback(env): if not cmp_op: _init(env) if not enabled[0]: return for i in range_(len(env.evaluation_result_list)): score = env.evaluation_result_list[i][2] if best_score_list[i] is None or cmp_op[i](score, best_score[i]): best_score[i] = score best_iter[i] = env.iteration best_score_list[i] = env.evaluation_result_list elif env.iteration - best_iter[i] >= stopping_rounds: if verbose: print('Early stopping, best iteration is:\n[%d]\t%s' % ( best_iter[i] + 1, '\t'.join([_format_eval_result(x) for x in best_score_list[i]]))) raise EarlyStopException(best_iter[i], best_score_list[i]) if env.iteration == env.end_iteration - 1: if verbose: print('Did not meet early stopping. Best iteration is:\n[%d]\t%s' % ( best_iter[i] + 1, '\t'.join([_format_eval_result(x) for x in best_score_list[i]]))) raise EarlyStopException(best_iter[i], best_score_list[i]) if first_metric_only: # the only first metric is used for early stopping break _callback.order = 30 return _callback
<SYSTEM_TASK:> Logarithmic loss with non-necessarily-binary labels. <END_TASK> <USER_TASK:> Description: def log_loss(preds, labels): """Logarithmic loss with non-necessarily-binary labels."""
log_likelihood = np.sum(labels * np.log(preds)) / len(preds) return -log_likelihood
<SYSTEM_TASK:> Measure performance of an objective. <END_TASK> <USER_TASK:> Description: def experiment(objective, label_type, data): """Measure performance of an objective. Parameters ---------- objective : string 'binary' or 'xentropy' Objective function. label_type : string 'binary' or 'probability' Type of the label. data : dict Data for training. Returns ------- result : dict Experiment summary stats. """
np.random.seed(0) nrounds = 5 lgb_data = data['lgb_with_' + label_type + '_labels'] params = { 'objective': objective, 'feature_fraction': 1, 'bagging_fraction': 1, 'verbose': -1 } time_zero = time.time() gbm = lgb.train(params, lgb_data, num_boost_round=nrounds) y_fitted = gbm.predict(data['X']) y_true = data[label_type + '_labels'] duration = time.time() - time_zero return { 'time': duration, 'correlation': np.corrcoef(y_fitted, y_true)[0, 1], 'logloss': log_loss(y_fitted, y_true) }
<SYSTEM_TASK:> Check object is not tuple or does not have 2 elements. <END_TASK> <USER_TASK:> Description: def _check_not_tuple_of_2_elements(obj, obj_name='obj'): """Check object is not tuple or does not have 2 elements."""
if not isinstance(obj, tuple) or len(obj) != 2: raise TypeError('%s must be a tuple of 2 elements.' % obj_name)
<SYSTEM_TASK:> Plot model's feature importances. <END_TASK> <USER_TASK:> Description: def plot_importance(booster, ax=None, height=0.2, xlim=None, ylim=None, title='Feature importance', xlabel='Feature importance', ylabel='Features', importance_type='split', max_num_features=None, ignore_zero=True, figsize=None, grid=True, precision=None, **kwargs): """Plot model's feature importances. Parameters ---------- booster : Booster or LGBMModel Booster or LGBMModel instance which feature importance should be plotted. ax : matplotlib.axes.Axes or None, optional (default=None) Target axes instance. If None, new figure and axes will be created. height : float, optional (default=0.2) Bar height, passed to ``ax.barh()``. xlim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.xlim()``. ylim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.ylim()``. title : string or None, optional (default="Feature importance") Axes title. If None, title is disabled. xlabel : string or None, optional (default="Feature importance") X-axis title label. If None, title is disabled. ylabel : string or None, optional (default="Features") Y-axis title label. If None, title is disabled. importance_type : string, optional (default="split") How the importance is calculated. If "split", result contains numbers of times the feature is used in a model. If "gain", result contains total gains of splits which use the feature. max_num_features : int or None, optional (default=None) Max number of top features displayed on plot. If None or <1, all features will be displayed. ignore_zero : bool, optional (default=True) Whether to ignore features with zero importance. figsize : tuple of 2 elements or None, optional (default=None) Figure size. grid : bool, optional (default=True) Whether to add a grid for axes. precision : int or None, optional (default=None) Used to restrict the display of floating point values to a certain precision. **kwargs Other parameters passed to ``ax.barh()``. Returns ------- ax : matplotlib.axes.Axes The plot with model's feature importances. """
if MATPLOTLIB_INSTALLED: import matplotlib.pyplot as plt else: raise ImportError('You must install matplotlib to plot importance.') if isinstance(booster, LGBMModel): booster = booster.booster_ elif not isinstance(booster, Booster): raise TypeError('booster must be Booster or LGBMModel.') importance = booster.feature_importance(importance_type=importance_type) feature_name = booster.feature_name() if not len(importance): raise ValueError("Booster's feature_importance is empty.") tuples = sorted(zip_(feature_name, importance), key=lambda x: x[1]) if ignore_zero: tuples = [x for x in tuples if x[1] > 0] if max_num_features is not None and max_num_features > 0: tuples = tuples[-max_num_features:] labels, values = zip_(*tuples) if ax is None: if figsize is not None: _check_not_tuple_of_2_elements(figsize, 'figsize') _, ax = plt.subplots(1, 1, figsize=figsize) ylocs = np.arange(len(values)) ax.barh(ylocs, values, align='center', height=height, **kwargs) for x, y in zip_(values, ylocs): ax.text(x + 1, y, _float2str(x, precision) if importance_type == 'gain' else x, va='center') ax.set_yticks(ylocs) ax.set_yticklabels(labels) if xlim is not None: _check_not_tuple_of_2_elements(xlim, 'xlim') else: xlim = (0, max(values) * 1.1) ax.set_xlim(xlim) if ylim is not None: _check_not_tuple_of_2_elements(ylim, 'ylim') else: ylim = (-1, len(values)) ax.set_ylim(ylim) if title is not None: ax.set_title(title) if xlabel is not None: ax.set_xlabel(xlabel) if ylabel is not None: ax.set_ylabel(ylabel) ax.grid(grid) return ax
<SYSTEM_TASK:> Plot one metric during training. <END_TASK> <USER_TASK:> Description: def plot_metric(booster, metric=None, dataset_names=None, ax=None, xlim=None, ylim=None, title='Metric during training', xlabel='Iterations', ylabel='auto', figsize=None, grid=True): """Plot one metric during training. Parameters ---------- booster : dict or LGBMModel Dictionary returned from ``lightgbm.train()`` or LGBMModel instance. metric : string or None, optional (default=None) The metric name to plot. Only one metric supported because different metrics have various scales. If None, first metric picked from dictionary (according to hashcode). dataset_names : list of strings or None, optional (default=None) List of the dataset names which are used to calculate metric to plot. If None, all datasets are used. ax : matplotlib.axes.Axes or None, optional (default=None) Target axes instance. If None, new figure and axes will be created. xlim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.xlim()``. ylim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.ylim()``. title : string or None, optional (default="Metric during training") Axes title. If None, title is disabled. xlabel : string or None, optional (default="Iterations") X-axis title label. If None, title is disabled. ylabel : string or None, optional (default="auto") Y-axis title label. If 'auto', metric name is used. If None, title is disabled. figsize : tuple of 2 elements or None, optional (default=None) Figure size. grid : bool, optional (default=True) Whether to add a grid for axes. Returns ------- ax : matplotlib.axes.Axes The plot with metric's history over the training. """
if MATPLOTLIB_INSTALLED: import matplotlib.pyplot as plt else: raise ImportError('You must install matplotlib to plot metric.') if isinstance(booster, LGBMModel): eval_results = deepcopy(booster.evals_result_) elif isinstance(booster, dict): eval_results = deepcopy(booster) else: raise TypeError('booster must be dict or LGBMModel.') num_data = len(eval_results) if not num_data: raise ValueError('eval results cannot be empty.') if ax is None: if figsize is not None: _check_not_tuple_of_2_elements(figsize, 'figsize') _, ax = plt.subplots(1, 1, figsize=figsize) if dataset_names is None: dataset_names = iter(eval_results.keys()) elif not isinstance(dataset_names, (list, tuple, set)) or not dataset_names: raise ValueError('dataset_names should be iterable and cannot be empty') else: dataset_names = iter(dataset_names) name = next(dataset_names) # take one as sample metrics_for_one = eval_results[name] num_metric = len(metrics_for_one) if metric is None: if num_metric > 1: msg = """more than one metric available, picking one to plot.""" warnings.warn(msg, stacklevel=2) metric, results = metrics_for_one.popitem() else: if metric not in metrics_for_one: raise KeyError('No given metric in eval results.') results = metrics_for_one[metric] num_iteration, max_result, min_result = len(results), max(results), min(results) x_ = range_(num_iteration) ax.plot(x_, results, label=name) for name in dataset_names: metrics_for_one = eval_results[name] results = metrics_for_one[metric] max_result, min_result = max(max(results), max_result), min(min(results), min_result) ax.plot(x_, results, label=name) ax.legend(loc='best') if xlim is not None: _check_not_tuple_of_2_elements(xlim, 'xlim') else: xlim = (0, num_iteration) ax.set_xlim(xlim) if ylim is not None: _check_not_tuple_of_2_elements(ylim, 'ylim') else: range_result = max_result - min_result ylim = (min_result - range_result * 0.2, max_result + range_result * 0.2) ax.set_ylim(ylim) if ylabel == 'auto': ylabel = metric if title is not None: ax.set_title(title) if xlabel is not None: ax.set_xlabel(xlabel) if ylabel is not None: ax.set_ylabel(ylabel) ax.grid(grid) return ax
<SYSTEM_TASK:> Create a digraph representation of specified tree. <END_TASK> <USER_TASK:> Description: def create_tree_digraph(booster, tree_index=0, show_info=None, precision=None, old_name=None, old_comment=None, old_filename=None, old_directory=None, old_format=None, old_engine=None, old_encoding=None, old_graph_attr=None, old_node_attr=None, old_edge_attr=None, old_body=None, old_strict=False, **kwargs): """Create a digraph representation of specified tree. Note ---- For more information please visit https://graphviz.readthedocs.io/en/stable/api.html#digraph. Parameters ---------- booster : Booster or LGBMModel Booster or LGBMModel instance to be converted. tree_index : int, optional (default=0) The index of a target tree to convert. show_info : list of strings or None, optional (default=None) What information should be shown in nodes. Possible values of list items: 'split_gain', 'internal_value', 'internal_count', 'leaf_count'. precision : int or None, optional (default=None) Used to restrict the display of floating point values to a certain precision. **kwargs Other parameters passed to ``Digraph`` constructor. Check https://graphviz.readthedocs.io/en/stable/api.html#digraph for the full list of supported parameters. Returns ------- graph : graphviz.Digraph The digraph representation of specified tree. """
if isinstance(booster, LGBMModel): booster = booster.booster_ elif not isinstance(booster, Booster): raise TypeError('booster must be Booster or LGBMModel.') for param_name in ['old_name', 'old_comment', 'old_filename', 'old_directory', 'old_format', 'old_engine', 'old_encoding', 'old_graph_attr', 'old_node_attr', 'old_edge_attr', 'old_body']: param = locals().get(param_name) if param is not None: warnings.warn('{0} parameter is deprecated and will be removed in 2.4 version.\n' 'Please use **kwargs to pass {1} parameter.'.format(param_name, param_name[4:]), LGBMDeprecationWarning) if param_name[4:] not in kwargs: kwargs[param_name[4:]] = param if locals().get('strict'): warnings.warn('old_strict parameter is deprecated and will be removed in 2.4 version.\n' 'Please use **kwargs to pass strict parameter.', LGBMDeprecationWarning) if 'strict' not in kwargs: kwargs['strict'] = True model = booster.dump_model() tree_infos = model['tree_info'] if 'feature_names' in model: feature_names = model['feature_names'] else: feature_names = None if tree_index < len(tree_infos): tree_info = tree_infos[tree_index] else: raise IndexError('tree_index is out of range.') if show_info is None: show_info = [] graph = _to_graphviz(tree_info, show_info, feature_names, precision, **kwargs) return graph
<SYSTEM_TASK:> Train a supervised model and return a model object. <END_TASK> <USER_TASK:> Description: def train_supervised( input, lr=0.1, dim=100, ws=5, epoch=5, minCount=1, minCountLabel=0, minn=0, maxn=0, neg=5, wordNgrams=1, loss="softmax", bucket=2000000, thread=multiprocessing.cpu_count() - 1, lrUpdateRate=100, t=1e-4, label="__label__", verbose=2, pretrainedVectors="", ): """ Train a supervised model and return a model object. input must be a filepath. The input text does not need to be tokenized as per the tokenize function, but it must be preprocessed and encoded as UTF-8. You might want to consult standard preprocessing scripts such as tokenizer.perl mentioned here: http://www.statmt.org/wmt07/baseline.html The input file must must contain at least one label per line. For an example consult the example datasets which are part of the fastText repository such as the dataset pulled by classification-example.sh. """
model = "supervised" a = _build_args(locals()) ft = _FastText() fasttext.train(ft.f, a) return ft
<SYSTEM_TASK:> Get the vector representation of word. <END_TASK> <USER_TASK:> Description: def get_word_vector(self, word): """Get the vector representation of word."""
dim = self.get_dimension() b = fasttext.Vector(dim) self.f.getWordVector(b, word) return np.array(b)
<SYSTEM_TASK:> Given a word, get the subwords and their indicies. <END_TASK> <USER_TASK:> Description: def get_subwords(self, word, on_unicode_error='strict'): """ Given a word, get the subwords and their indicies. """
pair = self.f.getSubwords(word, on_unicode_error) return pair[0], np.array(pair[1])
<SYSTEM_TASK:> Given an index, get the corresponding vector of the Input Matrix. <END_TASK> <USER_TASK:> Description: def get_input_vector(self, ind): """ Given an index, get the corresponding vector of the Input Matrix. """
dim = self.get_dimension() b = fasttext.Vector(dim) self.f.getInputVector(b, ind) return np.array(b)
<SYSTEM_TASK:> Given a string, get a list of labels and a list of <END_TASK> <USER_TASK:> Description: def predict(self, text, k=1, threshold=0.0, on_unicode_error='strict'): """ Given a string, get a list of labels and a list of corresponding probabilities. k controls the number of returned labels. A choice of 5, will return the 5 most probable labels. By default this returns only the most likely label and probability. threshold filters the returned labels by a threshold on probability. A choice of 0.5 will return labels with at least 0.5 probability. k and threshold will be applied together to determine the returned labels. This function assumes to be given a single line of text. We split words on whitespace (space, newline, tab, vertical tab) and the control characters carriage return, formfeed and the null character. If the model is not supervised, this function will throw a ValueError. If given a list of strings, it will return a list of results as usually received for a single line of text. """
def check(entry): if entry.find('\n') != -1: raise ValueError( "predict processes one line at a time (remove \'\\n\')" ) entry += "\n" return entry if type(text) == list: text = [check(entry) for entry in text] predictions = self.f.multilinePredict(text, k, threshold, on_unicode_error) dt = np.dtype([('probability', 'float64'), ('label', 'object')]) result_as_pair = np.array(predictions, dtype=dt) return result_as_pair['label'].tolist(), result_as_pair['probability'] else: text = check(text) predictions = self.f.predict(text, k, threshold, on_unicode_error) probs, labels = zip(*predictions) return labels, np.array(probs, copy=False)
<SYSTEM_TASK:> Get a copy of the full input matrix of a Model. This only <END_TASK> <USER_TASK:> Description: def get_input_matrix(self): """ Get a copy of the full input matrix of a Model. This only works if the model is not quantized. """
if self.f.isQuant(): raise ValueError("Can't get quantized Matrix") return np.array(self.f.getInputMatrix())
<SYSTEM_TASK:> Get a copy of the full output matrix of a Model. This only <END_TASK> <USER_TASK:> Description: def get_output_matrix(self): """ Get a copy of the full output matrix of a Model. This only works if the model is not quantized. """
if self.f.isQuant(): raise ValueError("Can't get quantized Matrix") return np.array(self.f.getOutputMatrix())
<SYSTEM_TASK:> Get the entire list of words of the dictionary optionally <END_TASK> <USER_TASK:> Description: def get_words(self, include_freq=False, on_unicode_error='strict'): """ Get the entire list of words of the dictionary optionally including the frequency of the individual words. This does not include any subwords. For that please consult the function get_subwords. """
pair = self.f.getVocab(on_unicode_error) if include_freq: return (pair[0], np.array(pair[1])) else: return pair[0]
<SYSTEM_TASK:> Get the entire list of labels of the dictionary optionally <END_TASK> <USER_TASK:> Description: def get_labels(self, include_freq=False, on_unicode_error='strict'): """ Get the entire list of labels of the dictionary optionally including the frequency of the individual labels. Unsupervised models use words as labels, which is why get_labels will call and return get_words for this type of model. """
a = self.f.getArgs() if a.model == model_name.supervised: pair = self.f.getLabels(on_unicode_error) if include_freq: return (pair[0], np.array(pair[1])) else: return pair[0] else: return self.get_words(include_freq)
<SYSTEM_TASK:> Quantize the model reducing the size of the model and <END_TASK> <USER_TASK:> Description: def quantize( self, input=None, qout=False, cutoff=0, retrain=False, epoch=None, lr=None, thread=None, verbose=None, dsub=2, qnorm=False ): """ Quantize the model reducing the size of the model and it's memory footprint. """
a = self.f.getArgs() if not epoch: epoch = a.epoch if not lr: lr = a.lr if not thread: thread = a.thread if not verbose: verbose = a.verbose if retrain and not input: raise ValueError("Need input file path if retraining") if input is None: input = "" self.f.quantize( input, qout, cutoff, retrain, epoch, lr, thread, verbose, dsub, qnorm )
<SYSTEM_TASK:> Sanitize turns PyTorch and Numpy types into basic Python types so they <END_TASK> <USER_TASK:> Description: def sanitize(x: Any) -> Any: # pylint: disable=invalid-name,too-many-return-statements """ Sanitize turns PyTorch and Numpy types into basic Python types so they can be serialized into JSON. """
if isinstance(x, (str, float, int, bool)): # x is already serializable return x elif isinstance(x, torch.Tensor): # tensor needs to be converted to a list (and moved to cpu if necessary) return x.cpu().tolist() elif isinstance(x, numpy.ndarray): # array needs to be converted to a list return x.tolist() elif isinstance(x, numpy.number): # pylint: disable=no-member # NumPy numbers need to be converted to Python numbers return x.item() elif isinstance(x, dict): # Dicts need their values sanitized return {key: sanitize(value) for key, value in x.items()} elif isinstance(x, (spacy.tokens.Token, allennlp.data.Token)): # Tokens get sanitized to just their text. return x.text elif isinstance(x, (list, tuple)): # Lists and Tuples need their values sanitized return [sanitize(x_i) for x_i in x] elif x is None: return "None" elif hasattr(x, 'to_json'): return x.to_json() else: raise ValueError(f"Cannot sanitize {x} of type {type(x)}. " "If this is your own custom class, add a `to_json(self)` method " "that returns a JSON-like object.")
<SYSTEM_TASK:> Takes a list and groups it into sublists of size ``count``, using ``default_value`` to pad the <END_TASK> <USER_TASK:> Description: def group_by_count(iterable: List[Any], count: int, default_value: Any) -> List[List[Any]]: """ Takes a list and groups it into sublists of size ``count``, using ``default_value`` to pad the list at the end if the list is not divisable by ``count``. For example: >>> group_by_count([1, 2, 3, 4, 5, 6, 7], 3, 0) [[1, 2, 3], [4, 5, 6], [7, 0, 0]] This is a short method, but it's complicated and hard to remember as a one-liner, so we just make a function out of it. """
return [list(l) for l in zip_longest(*[iter(iterable)] * count, fillvalue=default_value)]
<SYSTEM_TASK:> Takes an iterator and batches the individual instances into lists of the <END_TASK> <USER_TASK:> Description: def lazy_groups_of(iterator: Iterator[A], group_size: int) -> Iterator[List[A]]: """ Takes an iterator and batches the individual instances into lists of the specified size. The last list may be smaller if there are instances left over. """
return iter(lambda: list(islice(iterator, 0, group_size)), [])
<SYSTEM_TASK:> Take a list of objects and pads it to the desired length, returning the padded list. The <END_TASK> <USER_TASK:> Description: def pad_sequence_to_length(sequence: List, desired_length: int, default_value: Callable[[], Any] = lambda: 0, padding_on_right: bool = True) -> List: """ Take a list of objects and pads it to the desired length, returning the padded list. The original list is not modified. Parameters ---------- sequence : List A list of objects to be padded. desired_length : int Maximum length of each sequence. Longer sequences are truncated to this length, and shorter ones are padded to it. default_value: Callable, default=lambda: 0 Callable that outputs a default value (of any type) to use as padding values. This is a lambda to avoid using the same object when the default value is more complex, like a list. padding_on_right : bool, default=True When we add padding tokens (or truncate the sequence), should we do it on the right or the left? Returns ------- padded_sequence : List """
# Truncates the sequence to the desired length. if padding_on_right: padded_sequence = sequence[:desired_length] else: padded_sequence = sequence[-desired_length:] # Continues to pad with default_value() until we reach the desired length. for _ in range(desired_length - len(padded_sequence)): if padding_on_right: padded_sequence.append(default_value()) else: padded_sequence.insert(0, default_value()) return padded_sequence
<SYSTEM_TASK:> Returns a new dictionary with noise added to every key in ``dictionary``. The noise is <END_TASK> <USER_TASK:> Description: def add_noise_to_dict_values(dictionary: Dict[A, float], noise_param: float) -> Dict[A, float]: """ Returns a new dictionary with noise added to every key in ``dictionary``. The noise is uniformly distributed within ``noise_param`` percent of the value for every value in the dictionary. """
new_dict = {} for key, value in dictionary.items(): noise_value = value * noise_param noise = random.uniform(-noise_value, noise_value) new_dict[key] = value + noise return new_dict
<SYSTEM_TASK:> This function configures 3 global logging attributes - streaming stdout and stderr <END_TASK> <USER_TASK:> Description: def prepare_global_logging(serialization_dir: str, file_friendly_logging: bool) -> logging.FileHandler: """ This function configures 3 global logging attributes - streaming stdout and stderr to a file as well as the terminal, setting the formatting for the python logging library and setting the interval frequency for the Tqdm progress bar. Note that this function does not set the logging level, which is set in ``allennlp/run.py``. Parameters ---------- serialization_dir : ``str``, required. The directory to stream logs to. file_friendly_logging : ``bool``, required. Whether logs should clean the output to prevent carriage returns (used to update progress bars on a single terminal line). This option is typically only used if you are running in an environment without a terminal. Returns ------- ``logging.FileHandler`` A logging file handler that can later be closed and removed from the global logger. """
# If we don't have a terminal as stdout, # force tqdm to be nicer. if not sys.stdout.isatty(): file_friendly_logging = True Tqdm.set_slower_interval(file_friendly_logging) std_out_file = os.path.join(serialization_dir, "stdout.log") sys.stdout = TeeLogger(std_out_file, # type: ignore sys.stdout, file_friendly_logging) sys.stderr = TeeLogger(os.path.join(serialization_dir, "stderr.log"), # type: ignore sys.stderr, file_friendly_logging) stdout_handler = logging.FileHandler(std_out_file) stdout_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s')) logging.getLogger().addHandler(stdout_handler) return stdout_handler
<SYSTEM_TASK:> This function closes any open file handles and logs set up by `prepare_global_logging`. <END_TASK> <USER_TASK:> Description: def cleanup_global_logging(stdout_handler: logging.FileHandler) -> None: """ This function closes any open file handles and logs set up by `prepare_global_logging`. Parameters ---------- stdout_handler : ``logging.FileHandler``, required. The file handler returned from `prepare_global_logging`, attached to the global logger. """
stdout_handler.close() logging.getLogger().removeHandler(stdout_handler) if isinstance(sys.stdout, TeeLogger): sys.stdout = sys.stdout.cleanup() if isinstance(sys.stderr, TeeLogger): sys.stderr = sys.stderr.cleanup()
<SYSTEM_TASK:> In order to avoid loading spacy models a whole bunch of times, we'll save references to them, <END_TASK> <USER_TASK:> Description: def get_spacy_model(spacy_model_name: str, pos_tags: bool, parse: bool, ner: bool) -> SpacyModelType: """ In order to avoid loading spacy models a whole bunch of times, we'll save references to them, keyed by the options we used to create the spacy model, so any particular configuration only gets loaded once. """
options = (spacy_model_name, pos_tags, parse, ner) if options not in LOADED_SPACY_MODELS: disable = ['vectors', 'textcat'] if not pos_tags: disable.append('tagger') if not parse: disable.append('parser') if not ner: disable.append('ner') try: spacy_model = spacy.load(spacy_model_name, disable=disable) except OSError: logger.warning(f"Spacy models '{spacy_model_name}' not found. Downloading and installing.") spacy_download(spacy_model_name) # NOTE(mattg): The following four lines are a workaround suggested by Ines for spacy # 2.1.0, which removed the linking that was done in spacy 2.0. importlib doesn't find # packages that were installed in the same python session, so the way `spacy_download` # works in 2.1.0 is broken for this use case. These four lines can probably be removed # at some point in the future, once spacy has figured out a better way to handle this. # See https://github.com/explosion/spaCy/issues/3435. from spacy.cli import link from spacy.util import get_package_path package_path = get_package_path(spacy_model_name) link(spacy_model_name, spacy_model_name, model_path=package_path) spacy_model = spacy.load(spacy_model_name, disable=disable) LOADED_SPACY_MODELS[options] = spacy_model return LOADED_SPACY_MODELS[options]
<SYSTEM_TASK:> Import all submodules under the given package. <END_TASK> <USER_TASK:> Description: def import_submodules(package_name: str) -> None: """ Import all submodules under the given package. Primarily useful so that people using AllenNLP as a library can specify their own custom packages and have their custom classes get loaded and registered. """
importlib.invalidate_caches() # For some reason, python doesn't always add this by default to your path, but you pretty much # always want it when using `--include-package`. And if it's already there, adding it again at # the end won't hurt anything. sys.path.append('.') # Import at top level module = importlib.import_module(package_name) path = getattr(module, '__path__', []) path_string = '' if not path else path[0] # walk_packages only finds immediate children, so need to recurse. for module_finder, name, _ in pkgutil.walk_packages(path): # Sometimes when you import third-party libraries that are on your path, # `pkgutil.walk_packages` returns those too, so we need to skip them. if path_string and module_finder.path != path_string: continue subpackage = f"{package_name}.{name}" import_submodules(subpackage)
<SYSTEM_TASK:> An Iterable may be a list or a generator. <END_TASK> <USER_TASK:> Description: def ensure_list(iterable: Iterable[A]) -> List[A]: """ An Iterable may be a list or a generator. This ensures we get a list without making an unnecessary copy. """
if isinstance(iterable, list): return iterable else: return list(iterable)
<SYSTEM_TASK:> Takes an action index, updates checklist and returns an updated state. <END_TASK> <USER_TASK:> Description: def update(self, action: torch.Tensor) -> 'ChecklistStatelet': """ Takes an action index, updates checklist and returns an updated state. """
checklist_addition = (self.terminal_actions == action).float() new_checklist = self.checklist + checklist_addition new_checklist_state = ChecklistStatelet(terminal_actions=self.terminal_actions, checklist_target=self.checklist_target, checklist_mask=self.checklist_mask, checklist=new_checklist, terminal_indices_dict=self.terminal_indices_dict) return new_checklist_state
<SYSTEM_TASK:> Finds the production rule matching the filter function in the given type's valid action <END_TASK> <USER_TASK:> Description: def _remove_action_from_type(valid_actions: Dict[str, List[str]], type_: str, filter_function: Callable[[str], bool]) -> None: """ Finds the production rule matching the filter function in the given type's valid action list, and removes it. If there is more than one matching function, we crash. """
action_list = valid_actions[type_] matching_action_index = [i for i, action in enumerate(action_list) if filter_function(action)] assert len(matching_action_index) == 1, "Filter function didn't find one action" action_list.pop(matching_action_index[0])
<SYSTEM_TASK:> This method returns the indices of each entity's neighbors. A tensor <END_TASK> <USER_TASK:> Description: def _get_neighbor_indices(worlds: List[WikiTablesWorld], num_entities: int, tensor: torch.Tensor) -> torch.LongTensor: """ This method returns the indices of each entity's neighbors. A tensor is accepted as a parameter for copying purposes. Parameters ---------- worlds : ``List[WikiTablesWorld]`` num_entities : ``int`` tensor : ``torch.Tensor`` Used for copying the constructed list onto the right device. Returns ------- A ``torch.LongTensor`` with shape ``(batch_size, num_entities, num_neighbors)``. It is padded with -1 instead of 0, since 0 is a valid neighbor index. """
num_neighbors = 0 for world in worlds: for entity in world.table_graph.entities: if len(world.table_graph.neighbors[entity]) > num_neighbors: num_neighbors = len(world.table_graph.neighbors[entity]) batch_neighbors = [] for world in worlds: # Each batch instance has its own world, which has a corresponding table. entities = world.table_graph.entities entity2index = {entity: i for i, entity in enumerate(entities)} entity2neighbors = world.table_graph.neighbors neighbor_indexes = [] for entity in entities: entity_neighbors = [entity2index[n] for n in entity2neighbors[entity]] # Pad with -1 instead of 0, since 0 represents a neighbor index. padded = pad_sequence_to_length(entity_neighbors, num_neighbors, lambda: -1) neighbor_indexes.append(padded) neighbor_indexes = pad_sequence_to_length(neighbor_indexes, num_entities, lambda: [-1] * num_neighbors) batch_neighbors.append(neighbor_indexes) return tensor.new_tensor(batch_neighbors, dtype=torch.long)
<SYSTEM_TASK:> Produces the probability of an entity given a question word and type. The logic below <END_TASK> <USER_TASK:> Description: def _get_linking_probabilities(self, worlds: List[WikiTablesWorld], linking_scores: torch.FloatTensor, question_mask: torch.LongTensor, entity_type_dict: Dict[int, int]) -> torch.FloatTensor: """ Produces the probability of an entity given a question word and type. The logic below separates the entities by type since the softmax normalization term sums over entities of a single type. Parameters ---------- worlds : ``List[WikiTablesWorld]`` linking_scores : ``torch.FloatTensor`` Has shape (batch_size, num_question_tokens, num_entities). question_mask: ``torch.LongTensor`` Has shape (batch_size, num_question_tokens). entity_type_dict : ``Dict[int, int]`` This is a mapping from ((batch_index * num_entities) + entity_index) to entity type id. Returns ------- batch_probabilities : ``torch.FloatTensor`` Has shape ``(batch_size, num_question_tokens, num_entities)``. Contains all the probabilities for an entity given a question word. """
_, num_question_tokens, num_entities = linking_scores.size() batch_probabilities = [] for batch_index, world in enumerate(worlds): all_probabilities = [] num_entities_in_instance = 0 # NOTE: The way that we're doing this here relies on the fact that entities are # implicitly sorted by their types when we sort them by name, and that numbers come # before "fb:cell", and "fb:cell" comes before "fb:row". This is not a great # assumption, and could easily break later, but it should work for now. for type_index in range(self._num_entity_types): # This index of 0 is for the null entity for each type, representing the case where a # word doesn't link to any entity. entity_indices = [0] entities = world.table_graph.entities for entity_index, _ in enumerate(entities): if entity_type_dict[batch_index * num_entities + entity_index] == type_index: entity_indices.append(entity_index) if len(entity_indices) == 1: # No entities of this type; move along... continue # We're subtracting one here because of the null entity we added above. num_entities_in_instance += len(entity_indices) - 1 # We separate the scores by type, since normalization is done per type. There's an # extra "null" entity per type, also, so we have `num_entities_per_type + 1`. We're # selecting from a (num_question_tokens, num_entities) linking tensor on _dimension 1_, # so we get back something of shape (num_question_tokens,) for each index we're # selecting. All of the selected indices together then make a tensor of shape # (num_question_tokens, num_entities_per_type + 1). indices = linking_scores.new_tensor(entity_indices, dtype=torch.long) entity_scores = linking_scores[batch_index].index_select(1, indices) # We used index 0 for the null entity, so this will actually have some values in it. # But we want the null entity's score to be 0, so we set that here. entity_scores[:, 0] = 0 # No need for a mask here, as this is done per batch instance, with no padding. type_probabilities = torch.nn.functional.softmax(entity_scores, dim=1) all_probabilities.append(type_probabilities[:, 1:]) # We need to add padding here if we don't have the right number of entities. if num_entities_in_instance != num_entities: zeros = linking_scores.new_zeros(num_question_tokens, num_entities - num_entities_in_instance) all_probabilities.append(zeros) # (num_question_tokens, num_entities) probabilities = torch.cat(all_probabilities, dim=1) batch_probabilities.append(probabilities) batch_probabilities = torch.stack(batch_probabilities, dim=0) return batch_probabilities * question_mask.unsqueeze(-1).float()
<SYSTEM_TASK:> This method creates the LambdaGrammarStatelet object that's used for decoding. Part of <END_TASK> <USER_TASK:> Description: def _create_grammar_state(self, world: WikiTablesWorld, possible_actions: List[ProductionRule], linking_scores: torch.Tensor, entity_types: torch.Tensor) -> LambdaGrammarStatelet: """ This method creates the LambdaGrammarStatelet object that's used for decoding. Part of creating that is creating the `valid_actions` dictionary, which contains embedded representations of all of the valid actions. So, we create that here as well. The way we represent the valid expansions is a little complicated: we use a dictionary of `action types`, where the key is the action type (like "global", "linked", or whatever your model is expecting), and the value is a tuple representing all actions of that type. The tuple is (input tensor, output tensor, action id). The input tensor has the representation that is used when `selecting` actions, for all actions of this type. The output tensor has the representation that is used when feeding the action to the next step of the decoder (this could just be the same as the input tensor). The action ids are a list of indices into the main action list for each batch instance. The inputs to this method are for a `single instance in the batch`; none of the tensors we create here are batched. We grab the global action ids from the input ``ProductionRules``, and we use those to embed the valid actions for every non-terminal type. We use the input ``linking_scores`` for non-global actions. Parameters ---------- world : ``WikiTablesWorld`` From the input to ``forward`` for a single batch instance. possible_actions : ``List[ProductionRule]`` From the input to ``forward`` for a single batch instance. linking_scores : ``torch.Tensor`` Assumed to have shape ``(num_entities, num_question_tokens)`` (i.e., there is no batch dimension). entity_types : ``torch.Tensor`` Assumed to have shape ``(num_entities,)`` (i.e., there is no batch dimension). """
# TODO(mattg): Move the "valid_actions" construction to another method. action_map = {} for action_index, action in enumerate(possible_actions): action_string = action[0] action_map[action_string] = action_index entity_map = {} for entity_index, entity in enumerate(world.table_graph.entities): entity_map[entity] = entity_index valid_actions = world.get_valid_actions() translated_valid_actions: Dict[str, Dict[str, Tuple[torch.Tensor, torch.Tensor, List[int]]]] = {} for key, action_strings in valid_actions.items(): translated_valid_actions[key] = {} # `key` here is a non-terminal from the grammar, and `action_strings` are all the valid # productions of that non-terminal. We'll first split those productions by global vs. # linked action. action_indices = [action_map[action_string] for action_string in action_strings] production_rule_arrays = [(possible_actions[index], index) for index in action_indices] global_actions = [] linked_actions = [] for production_rule_array, action_index in production_rule_arrays: if production_rule_array[1]: global_actions.append((production_rule_array[2], action_index)) else: linked_actions.append((production_rule_array[0], action_index)) # Then we get the embedded representations of the global actions. global_action_tensors, global_action_ids = zip(*global_actions) global_action_tensor = torch.cat(global_action_tensors, dim=0) global_input_embeddings = self._action_embedder(global_action_tensor) if self._add_action_bias: global_action_biases = self._action_biases(global_action_tensor) global_input_embeddings = torch.cat([global_input_embeddings, global_action_biases], dim=-1) global_output_embeddings = self._output_action_embedder(global_action_tensor) translated_valid_actions[key]['global'] = (global_input_embeddings, global_output_embeddings, list(global_action_ids)) # Then the representations of the linked actions. if linked_actions: linked_rules, linked_action_ids = zip(*linked_actions) entities = [rule.split(' -> ')[1] for rule in linked_rules] entity_ids = [entity_map[entity] for entity in entities] # (num_linked_actions, num_question_tokens) entity_linking_scores = linking_scores[entity_ids] # (num_linked_actions,) entity_type_tensor = entity_types[entity_ids] # (num_linked_actions, entity_type_embedding_dim) entity_type_embeddings = self._entity_type_decoder_embedding(entity_type_tensor) translated_valid_actions[key]['linked'] = (entity_linking_scores, entity_type_embeddings, list(linked_action_ids)) # Lastly, we need to also create embedded representations of context-specific actions. In # this case, those are only variable productions, like "r -> x". Note that our language # only permits one lambda at a time, so we don't need to worry about how nested lambdas # might impact this. context_actions = {} for action_id, action in enumerate(possible_actions): if action[0].endswith(" -> x"): input_embedding = self._action_embedder(action[2]) if self._add_action_bias: input_bias = self._action_biases(action[2]) input_embedding = torch.cat([input_embedding, input_bias], dim=-1) output_embedding = self._output_action_embedder(action[2]) context_actions[action[0]] = (input_embedding, output_embedding, action_id) return LambdaGrammarStatelet([START_SYMBOL], {}, translated_valid_actions, context_actions, type_declaration.is_nonterminal)
<SYSTEM_TASK:> Gets the logits of desired terminal actions yet to be produced by the decoder, and <END_TASK> <USER_TASK:> Description: def _get_linked_logits_addition(checklist_state: ChecklistStatelet, action_ids: List[int], action_logits: torch.Tensor) -> torch.Tensor: """ Gets the logits of desired terminal actions yet to be produced by the decoder, and returns them for the decoder to add to the prior action logits, biasing the model towards predicting missing linked actions. """
# Our basic approach here will be to figure out which actions we want to bias, by doing # some fancy indexing work, then multiply the action embeddings by a mask for those # actions, and return the sum of the result. # Shape: (num_terminal_actions, 1). This is 1 if we still want to predict something on the # checklist, and 0 otherwise. checklist_balance = checklist_state.get_balance().clamp(min=0) # (num_terminal_actions, 1) actions_in_agenda = checklist_state.terminal_actions # (1, num_current_actions) action_id_tensor = checklist_balance.new(action_ids).long().unsqueeze(0) # Shape: (num_terminal_actions, num_current_actions). Will have a value of 1 if the # terminal action i is our current action j, and a value of 0 otherwise. Because both sets # of actions are free of duplicates, there will be at most one non-zero value per current # action, and per terminal action. current_agenda_actions = (actions_in_agenda == action_id_tensor).float() # Shape: (num_current_actions,). With the inner multiplication, we remove any current # agenda actions that are not in our checklist balance, then we sum over the terminal # action dimension, which will have a sum of at most one. So this will be a 0/1 tensor, # where a 1 means to encourage the current action in that position. actions_to_encourage = torch.sum(current_agenda_actions * checklist_balance, dim=0) # Shape: (num_current_actions,). This is the sum of the action embeddings that we want # the model to prefer. logit_addition = action_logits * actions_to_encourage return logit_addition
<SYSTEM_TASK:> Walk over action space to collect completed paths of at most ``self._max_path_length`` steps. <END_TASK> <USER_TASK:> Description: def _walk(self) -> None: """ Walk over action space to collect completed paths of at most ``self._max_path_length`` steps. """
# Buffer of NTs to expand, previous actions incomplete_paths = [([str(type_)], [f"{START_SYMBOL} -> {type_}"]) for type_ in self._world.get_valid_starting_types()] self._completed_paths = [] actions = self._world.get_valid_actions() # Keeps track of `MultiMatchNamedBasicTypes` to substitute them with appropriate types. multi_match_substitutions = self._world.get_multi_match_mapping() # Overview: We keep track of the buffer of non-terminals to expand, and the action history # for each incomplete path. At every iteration in the while loop below, we iterate over all # incomplete paths, expand one non-terminal from the buffer in a depth-first fashion, get # all possible next actions triggered by that non-terminal and add to the paths. Then, we # check the expanded paths, to see if they are 1) complete, in which case they are # added to completed_paths, 2) longer than max_path_length, in which case they are # discarded, or 3) neither, in which case they are used to form the incomplete_paths for the # next iteration of this while loop. # While the non-terminal expansion is done in a depth-first fashion, note that the search over # the action space itself is breadth-first. while incomplete_paths: next_paths = [] for nonterminal_buffer, history in incomplete_paths: # Taking the last non-terminal added to the buffer. We're going depth-first. nonterminal = nonterminal_buffer.pop() next_actions = [] if nonterminal in multi_match_substitutions: for current_nonterminal in [nonterminal] + multi_match_substitutions[nonterminal]: if current_nonterminal in actions: next_actions.extend(actions[current_nonterminal]) elif nonterminal not in actions: # This happens when the nonterminal corresponds to a type that does not exist in # the context. For example, in the variable free variant of the WikiTables # world, there are nonterminals for specific column types (like date). Say we # produced a path containing "filter_date_greater" already, and we do not have # an columns of type "date", then this condition would be triggered. We should # just discard those paths. continue else: next_actions.extend(actions[nonterminal]) # Iterating over all possible next actions. for action in next_actions: new_history = history + [action] new_nonterminal_buffer = nonterminal_buffer[:] # Since we expand the last action added to the buffer, the left child should be # added after the right child. for right_side_part in reversed(self._get_right_side_parts(action)): if types.is_nonterminal(right_side_part): new_nonterminal_buffer.append(right_side_part) next_paths.append((new_nonterminal_buffer, new_history)) incomplete_paths = [] for nonterminal_buffer, path in next_paths: # An empty buffer means that we've completed this path. if not nonterminal_buffer: # Indexing completed paths by the nonterminals they contain. next_path_index = len(self._completed_paths) for action in path: for value in self._get_right_side_parts(action): if not types.is_nonterminal(value): self._terminal_path_index[action].add(next_path_index) self._completed_paths.append(path) # We're adding to incomplete_paths for the next iteration, only those paths that are # shorter than the max_path_length. The remaining paths will be discarded. elif len(path) <= self._max_path_length: incomplete_paths.append((nonterminal_buffer, path))
<SYSTEM_TASK:> Check that all the instances have the same types. <END_TASK> <USER_TASK:> Description: def _check_types(self) -> None: """ Check that all the instances have the same types. """
all_instance_fields_and_types: List[Dict[str, str]] = [{k: v.__class__.__name__ for k, v in x.fields.items()} for x in self.instances] # Check all the field names and Field types are the same for every instance. if not all([all_instance_fields_and_types[0] == x for x in all_instance_fields_and_types]): raise ConfigurationError("You cannot construct a Batch with non-homogeneous Instances.")
<SYSTEM_TASK:> This method converts this ``Batch`` into a set of pytorch Tensors that can be passed <END_TASK> <USER_TASK:> Description: def as_tensor_dict(self, padding_lengths: Dict[str, Dict[str, int]] = None, verbose: bool = False) -> Dict[str, Union[torch.Tensor, Dict[str, torch.Tensor]]]: # This complex return type is actually predefined elsewhere as a DataArray, # but we can't use it because mypy doesn't like it. """ This method converts this ``Batch`` into a set of pytorch Tensors that can be passed through a model. In order for the tensors to be valid tensors, all ``Instances`` in this batch need to be padded to the same lengths wherever padding is necessary, so we do that first, then we combine all of the tensors for each field in each instance into a set of batched tensors for each field. Parameters ---------- padding_lengths : ``Dict[str, Dict[str, int]]`` If a key is present in this dictionary with a non-``None`` value, we will pad to that length instead of the length calculated from the data. This lets you, e.g., set a maximum value for sentence length if you want to throw out long sequences. Entries in this dictionary are keyed first by field name (e.g., "question"), then by padding key (e.g., "num_tokens"). verbose : ``bool``, optional (default=``False``) Should we output logging information when we're doing this padding? If the batch is large, this is nice to have, because padding a large batch could take a long time. But if you're doing this inside of a data generator, having all of this output per batch is a bit obnoxious (and really slow). Returns ------- tensors : ``Dict[str, DataArray]`` A dictionary of tensors, keyed by field name, suitable for passing as input to a model. This is a `batch` of instances, so, e.g., if the instances have a "question" field and an "answer" field, the "question" fields for all of the instances will be grouped together into a single tensor, and the "answer" fields for all instances will be similarly grouped in a parallel set of tensors, for batched computation. Additionally, for complex ``Fields``, the value of the dictionary key is not necessarily a single tensor. For example, with the ``TextField``, the output is a dictionary mapping ``TokenIndexer`` keys to tensors. The number of elements in this sub-dictionary therefore corresponds to the number of ``TokenIndexers`` used to index the ``TextField``. Each ``Field`` class is responsible for batching its own output. """
if padding_lengths is None: padding_lengths = defaultdict(dict) # First we need to decide _how much_ to pad. To do that, we find the max length for all # relevant padding decisions from the instances themselves. Then we check whether we were # given a max length for a particular field and padding key. If we were, we use that # instead of the instance-based one. if verbose: logger.info("Padding batch of size %d to lengths %s", len(self.instances), str(padding_lengths)) logger.info("Getting max lengths from instances") instance_padding_lengths = self.get_padding_lengths() if verbose: logger.info("Instance max lengths: %s", str(instance_padding_lengths)) lengths_to_use: Dict[str, Dict[str, int]] = defaultdict(dict) for field_name, instance_field_lengths in instance_padding_lengths.items(): for padding_key in instance_field_lengths.keys(): if padding_lengths[field_name].get(padding_key) is not None: lengths_to_use[field_name][padding_key] = padding_lengths[field_name][padding_key] else: lengths_to_use[field_name][padding_key] = instance_field_lengths[padding_key] # Now we actually pad the instances to tensors. field_tensors: Dict[str, list] = defaultdict(list) if verbose: logger.info("Now actually padding instances to length: %s", str(lengths_to_use)) for instance in self.instances: for field, tensors in instance.as_tensor_dict(lengths_to_use).items(): field_tensors[field].append(tensors) # Finally, we combine the tensors that we got for each instance into one big tensor (or set # of tensors) per field. The `Field` classes themselves have the logic for batching the # tensors together, so we grab a dictionary of field_name -> field class from the first # instance in the batch. field_classes = self.instances[0].fields final_fields = {} for field_name, field_tensor_list in field_tensors.items(): final_fields[field_name] = field_classes[field_name].batch_tensors(field_tensor_list) return final_fields
<SYSTEM_TASK:> Based on the current utterance, return a dictionary where the keys are the strings in <END_TASK> <USER_TASK:> Description: def get_strings_from_utterance(tokenized_utterance: List[Token]) -> Dict[str, List[int]]: """ Based on the current utterance, return a dictionary where the keys are the strings in the database that map to lists of the token indices that they are linked to. """
string_linking_scores: Dict[str, List[int]] = defaultdict(list) for index, token in enumerate(tokenized_utterance): for string in ATIS_TRIGGER_DICT.get(token.text.lower(), []): string_linking_scores[string].append(index) token_bigrams = bigrams([token.text for token in tokenized_utterance]) for index, token_bigram in enumerate(token_bigrams): for string in ATIS_TRIGGER_DICT.get(' '.join(token_bigram).lower(), []): string_linking_scores[string].extend([index, index + 1]) trigrams = ngrams([token.text for token in tokenized_utterance], 3) for index, trigram in enumerate(trigrams): if trigram[0] == 'st': natural_language_key = f'st. {trigram[2]}'.lower() else: natural_language_key = ' '.join(trigram).lower() for string in ATIS_TRIGGER_DICT.get(natural_language_key, []): string_linking_scores[string].extend([index, index + 1, index + 2]) return string_linking_scores
<SYSTEM_TASK:> We create a new ``Grammar`` object from the one in ``AtisSqlTableContext``, that also <END_TASK> <USER_TASK:> Description: def _update_grammar(self): """ We create a new ``Grammar`` object from the one in ``AtisSqlTableContext``, that also has the new entities that are extracted from the utterance. Stitching together the expressions to form the grammar is a little tedious here, but it is worth it because we don't have to create a new grammar from scratch. Creating a new grammar is expensive because we have many production rules that have all database values in the column on the right hand side. We update the expressions bottom up, since the higher level expressions may refer to the lower level ones. For example, the ternary expression will refer to the start and end times. """
# This will give us a shallow copy. We have to be careful here because the ``Grammar`` object # contains ``Expression`` objects that have tuples containing the members of that expression. # We have to create new sub-expression objects so that original grammar is not mutated. new_grammar = copy(AtisWorld.sql_table_context.grammar) for numeric_nonterminal in NUMERIC_NONTERMINALS: self._add_numeric_nonterminal_to_grammar(numeric_nonterminal, new_grammar) self._update_expression_reference(new_grammar, 'pos_value', 'number') ternary_expressions = [self._get_sequence_with_spacing(new_grammar, [new_grammar['col_ref'], Literal('BETWEEN'), new_grammar['time_range_start'], Literal(f'AND'), new_grammar['time_range_end']]), self._get_sequence_with_spacing(new_grammar, [new_grammar['col_ref'], Literal('NOT'), Literal('BETWEEN'), new_grammar['time_range_start'], Literal(f'AND'), new_grammar['time_range_end']]), self._get_sequence_with_spacing(new_grammar, [new_grammar['col_ref'], Literal('not'), Literal('BETWEEN'), new_grammar['time_range_start'], Literal(f'AND'), new_grammar['time_range_end']])] new_grammar['ternaryexpr'] = OneOf(*ternary_expressions, name='ternaryexpr') self._update_expression_reference(new_grammar, 'condition', 'ternaryexpr') new_binary_expressions = [] fare_round_trip_cost_expression = \ self._get_sequence_with_spacing(new_grammar, [Literal('fare'), Literal('.'), Literal('round_trip_cost'), new_grammar['binaryop'], new_grammar['fare_round_trip_cost']]) new_binary_expressions.append(fare_round_trip_cost_expression) fare_one_direction_cost_expression = \ self._get_sequence_with_spacing(new_grammar, [Literal('fare'), Literal('.'), Literal('one_direction_cost'), new_grammar['binaryop'], new_grammar['fare_one_direction_cost']]) new_binary_expressions.append(fare_one_direction_cost_expression) flight_number_expression = \ self._get_sequence_with_spacing(new_grammar, [Literal('flight'), Literal('.'), Literal('flight_number'), new_grammar['binaryop'], new_grammar['flight_number']]) new_binary_expressions.append(flight_number_expression) if self.dates: year_binary_expression = self._get_sequence_with_spacing(new_grammar, [Literal('date_day'), Literal('.'), Literal('year'), new_grammar['binaryop'], new_grammar['year_number']]) month_binary_expression = self._get_sequence_with_spacing(new_grammar, [Literal('date_day'), Literal('.'), Literal('month_number'), new_grammar['binaryop'], new_grammar['month_number']]) day_binary_expression = self._get_sequence_with_spacing(new_grammar, [Literal('date_day'), Literal('.'), Literal('day_number'), new_grammar['binaryop'], new_grammar['day_number']]) new_binary_expressions.extend([year_binary_expression, month_binary_expression, day_binary_expression]) new_binary_expressions = new_binary_expressions + list(new_grammar['biexpr'].members) new_grammar['biexpr'] = OneOf(*new_binary_expressions, name='biexpr') self._update_expression_reference(new_grammar, 'condition', 'biexpr') return new_grammar
<SYSTEM_TASK:> When we add a new expression, there may be other expressions that refer to <END_TASK> <USER_TASK:> Description: def _update_expression_reference(self, # pylint: disable=no-self-use grammar: Grammar, parent_expression_nonterminal: str, child_expression_nonterminal: str) -> None: """ When we add a new expression, there may be other expressions that refer to it, and we need to update those to point to the new expression. """
grammar[parent_expression_nonterminal].members = \ [member if member.name != child_expression_nonterminal else grammar[child_expression_nonterminal] for member in grammar[parent_expression_nonterminal].members]
<SYSTEM_TASK:> This is a helper method for generating sequences, since we often want a list of expressions <END_TASK> <USER_TASK:> Description: def _get_sequence_with_spacing(self, # pylint: disable=no-self-use new_grammar, expressions: List[Expression], name: str = '') -> Sequence: """ This is a helper method for generating sequences, since we often want a list of expressions with whitespaces between them. """
expressions = [subexpression for expression in expressions for subexpression in (expression, new_grammar['ws'])] return Sequence(*expressions, name=name)
<SYSTEM_TASK:> This method gets entities from the current utterance finds which tokens they are linked to. <END_TASK> <USER_TASK:> Description: def _get_linked_entities(self) -> Dict[str, Dict[str, Tuple[str, str, List[int]]]]: """ This method gets entities from the current utterance finds which tokens they are linked to. The entities are divided into two main groups, ``numbers`` and ``strings``. We rely on these entities later for updating the valid actions and the grammar. """
current_tokenized_utterance = [] if not self.tokenized_utterances \ else self.tokenized_utterances[-1] # We generate a dictionary where the key is the type eg. ``number`` or ``string``. # The value is another dictionary where the key is the action and the value is a tuple # of the nonterminal, the string value and the linking score. entity_linking_scores: Dict[str, Dict[str, Tuple[str, str, List[int]]]] = {} number_linking_scores: Dict[str, Tuple[str, str, List[int]]] = {} string_linking_scores: Dict[str, Tuple[str, str, List[int]]] = {} # Get time range start self.add_to_number_linking_scores({'0'}, number_linking_scores, get_time_range_start_from_utterance, current_tokenized_utterance, 'time_range_start') self.add_to_number_linking_scores({'1200'}, number_linking_scores, get_time_range_end_from_utterance, current_tokenized_utterance, 'time_range_end') self.add_to_number_linking_scores({'0', '1', '60', '41'}, number_linking_scores, get_numbers_from_utterance, current_tokenized_utterance, 'number') self.add_to_number_linking_scores({'0'}, number_linking_scores, get_costs_from_utterance, current_tokenized_utterance, 'fare_round_trip_cost') self.add_to_number_linking_scores({'0'}, number_linking_scores, get_costs_from_utterance, current_tokenized_utterance, 'fare_one_direction_cost') self.add_to_number_linking_scores({'0'}, number_linking_scores, get_flight_numbers_from_utterance, current_tokenized_utterance, 'flight_number') self.add_dates_to_number_linking_scores(number_linking_scores, current_tokenized_utterance) # Add string linking dict. string_linking_dict: Dict[str, List[int]] = {} for tokenized_utterance in self.tokenized_utterances: string_linking_dict = get_strings_from_utterance(tokenized_utterance) strings_list = AtisWorld.sql_table_context.strings_list strings_list.append(('flight_airline_code_string -> ["\'EA\'"]', 'EA')) strings_list.append(('airline_airline_name_string-> ["\'EA\'"]', 'EA')) # We construct the linking scores for strings from the ``string_linking_dict`` here. for string in strings_list: entity_linking = [0 for token in current_tokenized_utterance] # string_linking_dict has the strings and linking scores from the last utterance. # If the string is not in the last utterance, then the linking scores will be all 0. for token_index in string_linking_dict.get(string[1], []): entity_linking[token_index] = 1 action = string[0] string_linking_scores[action] = (action.split(' -> ')[0], string[1], entity_linking) entity_linking_scores['number'] = number_linking_scores entity_linking_scores['string'] = string_linking_scores return entity_linking_scores
<SYSTEM_TASK:> Creates a Flask app that serves up a simple configuration wizard. <END_TASK> <USER_TASK:> Description: def make_app(include_packages: Sequence[str] = ()) -> Flask: """ Creates a Flask app that serves up a simple configuration wizard. """
# Load modules for package_name in include_packages: import_submodules(package_name) app = Flask(__name__) # pylint: disable=invalid-name @app.errorhandler(ServerError) def handle_invalid_usage(error: ServerError) -> Response: # pylint: disable=unused-variable response = jsonify(error.to_dict()) response.status_code = error.status_code return response @app.route('/') def index() -> Response: # pylint: disable=unused-variable return send_file('config_explorer.html') @app.route('/api/config/') def api_config() -> Response: # pylint: disable=unused-variable """ There are basically two things that can happen here. If this method is called with a ``Registrable`` class (e.g. ``Model``), it should return the list of possible ``Model`` subclasses. If it is called with an instantiable subclass (e.g. ``CrfTagger``), is should return the config for that subclass. This is complicated by the fact that some Registrable base classes (e.g. Vocabulary, Trainer) are _themselves_ instantiable. We handle this in two ways: first, we insist that the first case include an extra ``get_choices`` parameter. That is, if you call this method for ``Trainer`` with get_choices=true, you get the list of Trainer subclasses. If you call it without that extra flag, you get the config for the class itself. There are basically two UX situations in which this API is called. The first is when you have a dropdown list of choices (e.g. Model types) and you select one. Such an API request is made *without* the get_choices flag, which means that the config is returned *even if the class in question is a Registrable class that has subclass choices*. The second is when you click a "Configure" button, which configures a class that may (e.g. ``Model``) or may not (e.g. ``FeedForward``) have registrable subclasses. In this case the API request is made with the "get_choices" flag, but will return the corresponding config object if no choices are available (e.g. in the ``FeedForward``) case. This is not elegant, but it works. """ class_name = request.args.get('class', '') get_choices = request.args.get('get_choices', None) # Get the configuration for this class name config = configure(class_name) try: # May not have choices choice5 = choices(class_name) except ValueError: choice5 = [] if get_choices and choice5: return jsonify({ "className": class_name, "choices": choice5 }) else: return jsonify({ "className": class_name, "config": config.to_json() }) return app
<SYSTEM_TASK:> Performs division and handles divide-by-zero. <END_TASK> <USER_TASK:> Description: def _prf_divide(numerator, denominator): """Performs division and handles divide-by-zero. On zero-division, sets the corresponding result elements to zero. """
result = numerator / denominator mask = denominator == 0.0 if not mask.any(): return result # remove nan result[mask] = 0.0 return result
<SYSTEM_TASK:> One sentence per line, formatted like <END_TASK> <USER_TASK:> Description: def load_data(file_path: str) -> Tuple[List[str], List[str]]: """ One sentence per line, formatted like The###DET dog###NN ate###V the###DET apple###NN Returns a list of pairs (tokenized_sentence, tags) """
data = [] with open(file_path) as f: for line in f: pairs = line.strip().split() sentence, tags = zip(*(pair.split("###") for pair in pairs)) data.append((sentence, tags)) return data
<SYSTEM_TASK:> Persist this Vocabulary to files so it can be reloaded later. <END_TASK> <USER_TASK:> Description: def save_to_files(self, directory: str) -> None: """ Persist this Vocabulary to files so it can be reloaded later. Each namespace corresponds to one file. Parameters ---------- directory : ``str`` The directory where we save the serialized vocabulary. """
os.makedirs(directory, exist_ok=True) if os.listdir(directory): logging.warning("vocabulary serialization directory %s is not empty", directory) with codecs.open(os.path.join(directory, NAMESPACE_PADDING_FILE), 'w', 'utf-8') as namespace_file: for namespace_str in self._non_padded_namespaces: print(namespace_str, file=namespace_file) for namespace, mapping in self._index_to_token.items(): # Each namespace gets written to its own file, in index order. with codecs.open(os.path.join(directory, namespace + '.txt'), 'w', 'utf-8') as token_file: num_tokens = len(mapping) start_index = 1 if mapping[0] == self._padding_token else 0 for i in range(start_index, num_tokens): print(mapping[i].replace('\n', '@@NEWLINE@@'), file=token_file)
<SYSTEM_TASK:> Loads a ``Vocabulary`` that was serialized using ``save_to_files``. <END_TASK> <USER_TASK:> Description: def from_files(cls, directory: str) -> 'Vocabulary': """ Loads a ``Vocabulary`` that was serialized using ``save_to_files``. Parameters ---------- directory : ``str`` The directory containing the serialized vocabulary. """
logger.info("Loading token dictionary from %s.", directory) with codecs.open(os.path.join(directory, NAMESPACE_PADDING_FILE), 'r', 'utf-8') as namespace_file: non_padded_namespaces = [namespace_str.strip() for namespace_str in namespace_file] vocab = cls(non_padded_namespaces=non_padded_namespaces) # Check every file in the directory. for namespace_filename in os.listdir(directory): if namespace_filename == NAMESPACE_PADDING_FILE: continue if namespace_filename.startswith("."): continue namespace = namespace_filename.replace('.txt', '') if any(namespace_match(pattern, namespace) for pattern in non_padded_namespaces): is_padded = False else: is_padded = True filename = os.path.join(directory, namespace_filename) vocab.set_from_file(filename, is_padded, namespace=namespace) return vocab
<SYSTEM_TASK:> If you already have a vocabulary file for a trained model somewhere, and you really want to <END_TASK> <USER_TASK:> Description: def set_from_file(self, filename: str, is_padded: bool = True, oov_token: str = DEFAULT_OOV_TOKEN, namespace: str = "tokens"): """ If you already have a vocabulary file for a trained model somewhere, and you really want to use that vocabulary file instead of just setting the vocabulary from a dataset, for whatever reason, you can do that with this method. You must specify the namespace to use, and we assume that you want to use padding and OOV tokens for this. Parameters ---------- filename : ``str`` The file containing the vocabulary to load. It should be formatted as one token per line, with nothing else in the line. The index we assign to the token is the line number in the file (1-indexed if ``is_padded``, 0-indexed otherwise). Note that this file should contain the OOV token string! is_padded : ``bool``, optional (default=True) Is this vocabulary padded? For token / word / character vocabularies, this should be ``True``; while for tag or label vocabularies, this should typically be ``False``. If ``True``, we add a padding token with index 0, and we enforce that the ``oov_token`` is present in the file. oov_token : ``str``, optional (default=DEFAULT_OOV_TOKEN) What token does this vocabulary use to represent out-of-vocabulary characters? This must show up as a line in the vocabulary file. When we find it, we replace ``oov_token`` with ``self._oov_token``, because we only use one OOV token across namespaces. namespace : ``str``, optional (default="tokens") What namespace should we overwrite with this vocab file? """
if is_padded: self._token_to_index[namespace] = {self._padding_token: 0} self._index_to_token[namespace] = {0: self._padding_token} else: self._token_to_index[namespace] = {} self._index_to_token[namespace] = {} with codecs.open(filename, 'r', 'utf-8') as input_file: lines = input_file.read().split('\n') # Be flexible about having final newline or not if lines and lines[-1] == '': lines = lines[:-1] for i, line in enumerate(lines): index = i + 1 if is_padded else i token = line.replace('@@NEWLINE@@', '\n') if token == oov_token: token = self._oov_token self._token_to_index[namespace][token] = index self._index_to_token[namespace][index] = token if is_padded: assert self._oov_token in self._token_to_index[namespace], "OOV token not found!"
<SYSTEM_TASK:> Extends an already generated vocabulary using a collection of instances. <END_TASK> <USER_TASK:> Description: def extend_from_instances(self, params: Params, instances: Iterable['adi.Instance'] = ()) -> None: """ Extends an already generated vocabulary using a collection of instances. """
min_count = params.pop("min_count", None) max_vocab_size = pop_max_vocab_size(params) non_padded_namespaces = params.pop("non_padded_namespaces", DEFAULT_NON_PADDED_NAMESPACES) pretrained_files = params.pop("pretrained_files", {}) min_pretrained_embeddings = params.pop("min_pretrained_embeddings", None) only_include_pretrained_words = params.pop_bool("only_include_pretrained_words", False) tokens_to_add = params.pop("tokens_to_add", None) params.assert_empty("Vocabulary - from dataset") logger.info("Fitting token dictionary from dataset.") namespace_token_counts: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int)) for instance in Tqdm.tqdm(instances): instance.count_vocab_items(namespace_token_counts) self._extend(counter=namespace_token_counts, min_count=min_count, max_vocab_size=max_vocab_size, non_padded_namespaces=non_padded_namespaces, pretrained_files=pretrained_files, only_include_pretrained_words=only_include_pretrained_words, tokens_to_add=tokens_to_add, min_pretrained_embeddings=min_pretrained_embeddings)
<SYSTEM_TASK:> Returns whether or not there are padding and OOV tokens added to the given namespace. <END_TASK> <USER_TASK:> Description: def is_padded(self, namespace: str) -> bool: """ Returns whether or not there are padding and OOV tokens added to the given namespace. """
return self._index_to_token[namespace][0] == self._padding_token
<SYSTEM_TASK:> Adds ``token`` to the index, if it is not already present. Either way, we return the index of <END_TASK> <USER_TASK:> Description: def add_token_to_namespace(self, token: str, namespace: str = 'tokens') -> int: """ Adds ``token`` to the index, if it is not already present. Either way, we return the index of the token. """
if not isinstance(token, str): raise ValueError("Vocabulary tokens must be strings, or saving and loading will break." " Got %s (with type %s)" % (repr(token), type(token))) if token not in self._token_to_index[namespace]: index = len(self._token_to_index[namespace]) self._token_to_index[namespace][token] = index self._index_to_token[namespace][index] = token return index else: return self._token_to_index[namespace][token]
<SYSTEM_TASK:> Computes the regularization penalty for the model. <END_TASK> <USER_TASK:> Description: def get_regularization_penalty(self) -> Union[float, torch.Tensor]: """ Computes the regularization penalty for the model. Returns 0 if the model was not configured to use regularization. """
if self._regularizer is None: return 0.0 else: return self._regularizer(self)
<SYSTEM_TASK:> This method checks the device of the model parameters to determine the cuda_device <END_TASK> <USER_TASK:> Description: def _get_prediction_device(self) -> int: """ This method checks the device of the model parameters to determine the cuda_device this model should be run on for predictions. If there are no parameters, it returns -1. Returns ------- The cuda device this model should run on for predictions. """
devices = {util.get_device_of(param) for param in self.parameters()} if len(devices) > 1: devices_string = ", ".join(str(x) for x in devices) raise ConfigurationError(f"Parameters have mismatching cuda_devices: {devices_string}") elif len(devices) == 1: return devices.pop() else: return -1
<SYSTEM_TASK:> This method warns once if a user implements a model which returns a dictionary with <END_TASK> <USER_TASK:> Description: def _maybe_warn_for_unseparable_batches(self, output_key: str): """ This method warns once if a user implements a model which returns a dictionary with values which we are unable to split back up into elements of the batch. This is controlled by a class attribute ``_warn_for_unseperable_batches`` because it would be extremely verbose otherwise. """
if output_key not in self._warn_for_unseparable_batches: logger.warning(f"Encountered the {output_key} key in the model's return dictionary which " "couldn't be split by the batch size. Key will be ignored.") # We only want to warn once for this key, # so we set this to false so we don't warn again. self._warn_for_unseparable_batches.add(output_key)
<SYSTEM_TASK:> Takes a logical form, and the list of target values as strings from the original lisp <END_TASK> <USER_TASK:> Description: def evaluate_logical_form(self, logical_form: str, target_list: List[str]) -> bool: """ Takes a logical form, and the list of target values as strings from the original lisp string, and returns True iff the logical form executes to the target list, using the official WikiTableQuestions evaluation script. """
normalized_target_list = [TableQuestionContext.normalize_string(value) for value in target_list] target_value_list = evaluator.to_value_list(normalized_target_list) try: denotation = self.execute(logical_form) except ExecutionError: logger.warning(f'Failed to execute: {logical_form}') return False if isinstance(denotation, list): denotation_list = [str(denotation_item) for denotation_item in denotation] else: denotation_list = [str(denotation)] denotation_value_list = evaluator.to_value_list(denotation_list) return evaluator.check_denotation(target_value_list, denotation_value_list)
<SYSTEM_TASK:> Select function takes a list of rows and a column name and returns a list of strings as <END_TASK> <USER_TASK:> Description: def select_string(self, rows: List[Row], column: StringColumn) -> List[str]: """ Select function takes a list of rows and a column name and returns a list of strings as in cells. """
return [str(row.values[column.name]) for row in rows if row.values[column.name] is not None]
<SYSTEM_TASK:> Select function takes a row as a list and a column name and returns the date in that column. <END_TASK> <USER_TASK:> Description: def select_date(self, rows: List[Row], column: DateColumn) -> Date: """ Select function takes a row as a list and a column name and returns the date in that column. """
dates: List[Date] = [] for row in rows: cell_value = row.values[column.name] if isinstance(cell_value, Date): dates.append(cell_value) return dates[0] if dates else Date(-1, -1, -1)
<SYSTEM_TASK:> Takes a row and a column and returns a list of rows from the full set of rows that contain <END_TASK> <USER_TASK:> Description: def same_as(self, rows: List[Row], column: Column) -> List[Row]: """ Takes a row and a column and returns a list of rows from the full set of rows that contain the same value under the given column as the given row. """
cell_value = rows[0].values[column.name] return_list = [] for table_row in self.table_data: if table_row.values[column.name] == cell_value: return_list.append(table_row) return return_list
<SYSTEM_TASK:> Takes three numbers and returns a ``Date`` object whose year, month, and day are the three <END_TASK> <USER_TASK:> Description: def date(self, year: Number, month: Number, day: Number) -> Date: """ Takes three numbers and returns a ``Date`` object whose year, month, and day are the three numbers in that order. """
return Date(year, month, day)
<SYSTEM_TASK:> Takes an expression that evaluates to a list of rows, and returns the first one in that <END_TASK> <USER_TASK:> Description: def first(self, rows: List[Row]) -> List[Row]: """ Takes an expression that evaluates to a list of rows, and returns the first one in that list. """
if not rows: logger.warning("Trying to get first row from an empty list") return [] return [rows[0]]
<SYSTEM_TASK:> Takes an expression that evaluates to a list of rows, and returns the last one in that <END_TASK> <USER_TASK:> Description: def last(self, rows: List[Row]) -> List[Row]: """ Takes an expression that evaluates to a list of rows, and returns the last one in that list. """
if not rows: logger.warning("Trying to get last row from an empty list") return [] return [rows[-1]]
<SYSTEM_TASK:> Takes an expression that evaluates to a single row, and returns the row that occurs before <END_TASK> <USER_TASK:> Description: def previous(self, rows: List[Row]) -> List[Row]: """ Takes an expression that evaluates to a single row, and returns the row that occurs before the input row in the original set of rows. If the input row happens to be the top row, we will return an empty list. """
if not rows: return [] input_row_index = self._get_row_index(rows[0]) if input_row_index > 0: return [self.table_data[input_row_index - 1]] return []
<SYSTEM_TASK:> Takes an expression that evaluates to a single row, and returns the row that occurs after <END_TASK> <USER_TASK:> Description: def next(self, rows: List[Row]) -> List[Row]: """ Takes an expression that evaluates to a single row, and returns the row that occurs after the input row in the original set of rows. If the input row happens to be the last row, we will return an empty list. """
if not rows: return [] input_row_index = self._get_row_index(rows[0]) if input_row_index < len(self.table_data) - 1 and input_row_index != -1: return [self.table_data[input_row_index + 1]] return []