Code
stringlengths
103
85.9k
Summary
sequencelengths
0
94
Please provide a description of the function:def attr(self, key): ret = ctypes.c_char_p() success = ctypes.c_int() _check_call(_LIB.XGBoosterGetAttr( self.handle, c_str(key), ctypes.byref(ret), ctypes.byref(success))) if success.value != 0: return py_str(ret.value) return None
[ "Get attribute string from the Booster.\n\n Parameters\n ----------\n key : str\n The key to get attribute from.\n\n Returns\n -------\n value : str\n The attribute value of the key, returns None if attribute do not exist.\n " ]
Please provide a description of the function:def attributes(self): length = c_bst_ulong() sarr = ctypes.POINTER(ctypes.c_char_p)() _check_call(_LIB.XGBoosterGetAttrNames(self.handle, ctypes.byref(length), ctypes.byref(sarr))) attr_names = from_cstr_to_pystr(sarr, length) return {n: self.attr(n) for n in attr_names}
[ "Get attributes stored in the Booster as a dictionary.\n\n Returns\n -------\n result : dictionary of attribute_name: attribute_value pairs of strings.\n Returns an empty dict if there's no attributes.\n " ]
Please provide a description of the function:def set_attr(self, **kwargs): for key, value in kwargs.items(): if value is not None: if not isinstance(value, STRING_TYPES): raise ValueError("Set Attr only accepts string values") value = c_str(str(value)) _check_call(_LIB.XGBoosterSetAttr( self.handle, c_str(key), value))
[ "Set the attribute of the Booster.\n\n Parameters\n ----------\n **kwargs\n The attributes to set. Setting a value to None deletes an attribute.\n " ]
Please provide a description of the function:def set_param(self, params, value=None): if isinstance(params, Mapping): params = params.items() elif isinstance(params, STRING_TYPES) and value is not None: params = [(params, value)] for key, val in params: _check_call(_LIB.XGBoosterSetParam(self.handle, c_str(key), c_str(str(val))))
[ "Set parameters into the Booster.\n\n Parameters\n ----------\n params: dict/list/str\n list of key,value pairs, dict of key to value or simply str key\n value: optional\n value of the specified parameter, when params is str key\n " ]
Please provide a description of the function:def eval(self, data, name='eval', iteration=0): self._validate_features(data) return self.eval_set([(data, name)], iteration)
[ "Evaluate the model on mat.\n\n Parameters\n ----------\n data : DMatrix\n The dmatrix storing the input.\n\n name : str, optional\n The name of the dataset.\n\n iteration : int, optional\n The current iteration number.\n\n Returns\n -------\n result: str\n Evaluation result string.\n " ]
Please provide a description of the function:def predict(self, data, output_margin=False, ntree_limit=0, pred_leaf=False, pred_contribs=False, approx_contribs=False, pred_interactions=False, validate_features=True): option_mask = 0x00 if output_margin: option_mask |= 0x01 if pred_leaf: option_mask |= 0x02 if pred_contribs: option_mask |= 0x04 if approx_contribs: option_mask |= 0x08 if pred_interactions: option_mask |= 0x10 if validate_features: self._validate_features(data) length = c_bst_ulong() preds = ctypes.POINTER(ctypes.c_float)() _check_call(_LIB.XGBoosterPredict(self.handle, data.handle, ctypes.c_int(option_mask), ctypes.c_uint(ntree_limit), ctypes.byref(length), ctypes.byref(preds))) preds = ctypes2numpy(preds, length.value, np.float32) if pred_leaf: preds = preds.astype(np.int32) nrow = data.num_row() if preds.size != nrow and preds.size % nrow == 0: chunk_size = int(preds.size / nrow) if pred_interactions: ngroup = int(chunk_size / ((data.num_col() + 1) * (data.num_col() + 1))) if ngroup == 1: preds = preds.reshape(nrow, data.num_col() + 1, data.num_col() + 1) else: preds = preds.reshape(nrow, ngroup, data.num_col() + 1, data.num_col() + 1) elif pred_contribs: ngroup = int(chunk_size / (data.num_col() + 1)) if ngroup == 1: preds = preds.reshape(nrow, data.num_col() + 1) else: preds = preds.reshape(nrow, ngroup, data.num_col() + 1) else: preds = preds.reshape(nrow, chunk_size) return preds
[ "\n Predict with data.\n\n .. note:: This function is not thread safe.\n\n For each booster object, predict can only be called from one thread.\n If you want to run prediction using multiple thread, call ``bst.copy()`` to make copies\n of model object and then call ``predict()``.\n\n .. note:: Using ``predict()`` with DART booster\n\n If the booster object is DART type, ``predict()`` will perform dropouts, i.e. only\n some of the trees will be evaluated. This will produce incorrect results if ``data`` is\n not the training data. To obtain correct results on test sets, set ``ntree_limit`` to\n a nonzero value, e.g.\n\n .. code-block:: python\n\n preds = bst.predict(dtest, ntree_limit=num_round)\n\n Parameters\n ----------\n data : DMatrix\n The dmatrix storing the input.\n\n output_margin : bool\n Whether to output the raw untransformed margin value.\n\n ntree_limit : int\n Limit number of trees in the prediction; defaults to 0 (use all trees).\n\n pred_leaf : bool\n When this option is on, the output will be a matrix of (nsample, ntrees)\n with each record indicating the predicted leaf index of each sample in each tree.\n Note that the leaf index of a tree is unique per tree, so you may find leaf 1\n in both tree 1 and tree 0.\n\n pred_contribs : bool\n When this is True the output will be a matrix of size (nsample, nfeats + 1)\n with each record indicating the feature contributions (SHAP values) for that\n prediction. The sum of all feature contributions is equal to the raw untransformed\n margin value of the prediction. Note the final column is the bias term.\n\n approx_contribs : bool\n Approximate the contributions of each feature\n\n pred_interactions : bool\n When this is True the output will be a matrix of size (nsample, nfeats + 1, nfeats + 1)\n indicating the SHAP interaction values for each pair of features. The sum of each\n row (or column) of the interaction values equals the corresponding SHAP value (from\n pred_contribs), and the sum of the entire matrix equals the raw untransformed margin\n value of the prediction. Note the last row and column correspond to the bias term.\n\n validate_features : bool\n When this is True, validate that the Booster's and data's feature_names are identical.\n Otherwise, it is assumed that the feature_names are the same.\n\n Returns\n -------\n prediction : numpy array\n " ]
Please provide a description of the function:def save_model(self, fname): if isinstance(fname, STRING_TYPES): # assume file name _check_call(_LIB.XGBoosterSaveModel(self.handle, c_str(fname))) else: raise TypeError("fname must be a string")
[ "\n Save the model to a file.\n\n The model is saved in an XGBoost internal binary format which is\n universal among the various XGBoost interfaces. Auxiliary attributes of\n the Python Booster object (such as feature_names) will not be saved.\n To preserve all attributes, pickle the Booster object.\n\n Parameters\n ----------\n fname : string\n Output file name\n " ]
Please provide a description of the function:def load_model(self, fname): if isinstance(fname, STRING_TYPES): # assume file name, cannot use os.path.exist to check, file can be from URL. _check_call(_LIB.XGBoosterLoadModel(self.handle, c_str(fname))) else: buf = fname length = c_bst_ulong(len(buf)) ptr = (ctypes.c_char * len(buf)).from_buffer(buf) _check_call(_LIB.XGBoosterLoadModelFromBuffer(self.handle, ptr, length))
[ "\n Load the model from a file.\n\n The model is loaded from an XGBoost internal binary format which is\n universal among the various XGBoost interfaces. Auxiliary attributes of\n the Python Booster object (such as feature_names) will not be loaded.\n To preserve all attributes, pickle the Booster object.\n\n Parameters\n ----------\n fname : string or a memory buffer\n Input file name or memory buffer(see also save_raw)\n " ]
Please provide a description of the function:def dump_model(self, fout, fmap='', with_stats=False, dump_format="text"): if isinstance(fout, STRING_TYPES): fout = open(fout, 'w') need_close = True else: need_close = False ret = self.get_dump(fmap, with_stats, dump_format) if dump_format == 'json': fout.write('[\n') for i, _ in enumerate(ret): fout.write(ret[i]) if i < len(ret) - 1: fout.write(",\n") fout.write('\n]') else: for i, _ in enumerate(ret): fout.write('booster[{}]:\n'.format(i)) fout.write(ret[i]) if need_close: fout.close()
[ "\n Dump model into a text or JSON file.\n\n Parameters\n ----------\n fout : string\n Output file name.\n fmap : string, optional\n Name of the file containing feature map names.\n with_stats : bool, optional\n Controls whether the split statistics are output.\n dump_format : string, optional\n Format of model dump file. Can be 'text' or 'json'.\n " ]
Please provide a description of the function:def get_dump(self, fmap='', with_stats=False, dump_format="text"): length = c_bst_ulong() sarr = ctypes.POINTER(ctypes.c_char_p)() if self.feature_names is not None and fmap == '': flen = len(self.feature_names) fname = from_pystr_to_cstr(self.feature_names) if self.feature_types is None: # use quantitative as default # {'q': quantitative, 'i': indicator} ftype = from_pystr_to_cstr(['q'] * flen) else: ftype = from_pystr_to_cstr(self.feature_types) _check_call(_LIB.XGBoosterDumpModelExWithFeatures( self.handle, ctypes.c_int(flen), fname, ftype, ctypes.c_int(with_stats), c_str(dump_format), ctypes.byref(length), ctypes.byref(sarr))) else: if fmap != '' and not os.path.exists(fmap): raise ValueError("No such file: {0}".format(fmap)) _check_call(_LIB.XGBoosterDumpModelEx(self.handle, c_str(fmap), ctypes.c_int(with_stats), c_str(dump_format), ctypes.byref(length), ctypes.byref(sarr))) res = from_cstr_to_pystr(sarr, length) return res
[ "\n Returns the model dump as a list of strings.\n\n Parameters\n ----------\n fmap : string, optional\n Name of the file containing feature map names.\n with_stats : bool, optional\n Controls whether the split statistics are output.\n dump_format : string, optional\n Format of model dump. Can be 'text' or 'json'.\n " ]
Please provide a description of the function:def get_score(self, fmap='', importance_type='weight'): if getattr(self, 'booster', None) is not None and self.booster not in {'gbtree', 'dart'}: raise ValueError('Feature importance is not defined for Booster type {}' .format(self.booster)) allowed_importance_types = ['weight', 'gain', 'cover', 'total_gain', 'total_cover'] if importance_type not in allowed_importance_types: msg = ("importance_type mismatch, got '{}', expected one of " + repr(allowed_importance_types)) raise ValueError(msg.format(importance_type)) # if it's weight, then omap stores the number of missing values if importance_type == 'weight': # do a simpler tree dump to save time trees = self.get_dump(fmap, with_stats=False) fmap = {} for tree in trees: for line in tree.split('\n'): # look for the opening square bracket arr = line.split('[') # if no opening bracket (leaf node), ignore this line if len(arr) == 1: continue # extract feature name from string between [] fid = arr[1].split(']')[0].split('<')[0] if fid not in fmap: # if the feature hasn't been seen yet fmap[fid] = 1 else: fmap[fid] += 1 return fmap average_over_splits = True if importance_type == 'total_gain': importance_type = 'gain' average_over_splits = False elif importance_type == 'total_cover': importance_type = 'cover' average_over_splits = False trees = self.get_dump(fmap, with_stats=True) importance_type += '=' fmap = {} gmap = {} for tree in trees: for line in tree.split('\n'): # look for the opening square bracket arr = line.split('[') # if no opening bracket (leaf node), ignore this line if len(arr) == 1: continue # look for the closing bracket, extract only info within that bracket fid = arr[1].split(']') # extract gain or cover from string after closing bracket g = float(fid[1].split(importance_type)[1].split(',')[0]) # extract feature name from string before closing bracket fid = fid[0].split('<')[0] if fid not in fmap: # if the feature hasn't been seen yet fmap[fid] = 1 gmap[fid] = g else: fmap[fid] += 1 gmap[fid] += g # calculate average value (gain/cover) for each feature if average_over_splits: for fid in gmap: gmap[fid] = gmap[fid] / fmap[fid] return gmap
[ "Get feature importance of each feature.\n Importance type can be defined as:\n\n * 'weight': the number of times a feature is used to split the data across all trees.\n * 'gain': the average gain across all splits the feature is used in.\n * 'cover': the average coverage across all splits the feature is used in.\n * 'total_gain': the total gain across all splits the feature is used in.\n * 'total_cover': the total coverage across all splits the feature is used in.\n\n .. note:: Feature importance is defined only for tree boosters\n\n Feature importance is only defined when the decision tree model is chosen as base\n learner (`booster=gbtree`). It is not defined for other base learner types, such\n as linear learners (`booster=gblinear`).\n\n Parameters\n ----------\n fmap: str (optional)\n The name of feature map file.\n importance_type: str, default 'weight'\n One of the importance types defined above.\n " ]
Please provide a description of the function:def trees_to_dataframe(self, fmap=''): # pylint: disable=too-many-locals if not PANDAS_INSTALLED: raise Exception(('pandas must be available to use this method.' 'Install pandas before calling again.')) if getattr(self, 'booster', None) is not None and self.booster not in {'gbtree', 'dart'}: raise ValueError('This method is not defined for Booster type {}' .format(self.booster)) tree_ids = [] node_ids = [] fids = [] splits = [] y_directs = [] n_directs = [] missings = [] gains = [] covers = [] trees = self.get_dump(fmap, with_stats=True) for i, tree in enumerate(trees): for line in tree.split('\n'): arr = line.split('[') # Leaf node if len(arr) == 1: # Last element of line.split is an empy string if arr == ['']: continue # parse string parse = arr[0].split(':') stats = re.split('=|,', parse[1]) # append to lists tree_ids.append(i) node_ids.append(int(re.findall(r'\b\d+\b', parse[0])[0])) fids.append('Leaf') splits.append(float('NAN')) y_directs.append(float('NAN')) n_directs.append(float('NAN')) missings.append(float('NAN')) gains.append(float(stats[1])) covers.append(float(stats[3])) # Not a Leaf Node else: # parse string fid = arr[1].split(']') parse = fid[0].split('<') stats = re.split('=|,', fid[1]) # append to lists tree_ids.append(i) node_ids.append(int(re.findall(r'\b\d+\b', arr[0])[0])) fids.append(parse[0]) splits.append(float(parse[1])) str_i = str(i) y_directs.append(str_i + '-' + stats[1]) n_directs.append(str_i + '-' + stats[3]) missings.append(str_i + '-' + stats[5]) gains.append(float(stats[7])) covers.append(float(stats[9])) ids = [str(t_id) + '-' + str(n_id) for t_id, n_id in zip(tree_ids, node_ids)] df = DataFrame({'Tree': tree_ids, 'Node': node_ids, 'ID': ids, 'Feature': fids, 'Split': splits, 'Yes': y_directs, 'No': n_directs, 'Missing': missings, 'Gain': gains, 'Cover': covers}) if callable(getattr(df, 'sort_values', None)): # pylint: disable=no-member return df.sort_values(['Tree', 'Node']).reset_index(drop=True) # pylint: disable=no-member return df.sort(['Tree', 'Node']).reset_index(drop=True)
[ "Parse a boosted tree model text dump into a pandas DataFrame structure.\n\n This feature is only defined when the decision tree model is chosen as base\n learner (`booster in {gbtree, dart}`). It is not defined for other base learner\n types, such as linear learners (`booster=gblinear`).\n\n Parameters\n ----------\n fmap: str (optional)\n The name of feature map file.\n " ]
Please provide a description of the function:def _validate_features(self, data): if self.feature_names is None: self.feature_names = data.feature_names self.feature_types = data.feature_types else: # Booster can't accept data with different feature names if self.feature_names != data.feature_names: dat_missing = set(self.feature_names) - set(data.feature_names) my_missing = set(data.feature_names) - set(self.feature_names) msg = 'feature_names mismatch: {0} {1}' if dat_missing: msg += ('\nexpected ' + ', '.join(str(s) for s in dat_missing) + ' in input data') if my_missing: msg += ('\ntraining data did not have the following fields: ' + ', '.join(str(s) for s in my_missing)) raise ValueError(msg.format(self.feature_names, data.feature_names))
[ "\n Validate Booster and data's feature_names are identical.\n Set feature_names and feature_types from DMatrix\n " ]
Please provide a description of the function:def get_split_value_histogram(self, feature, fmap='', bins=None, as_pandas=True): xgdump = self.get_dump(fmap=fmap) values = [] regexp = re.compile(r"\[{0}<([\d.Ee+-]+)\]".format(feature)) for i, _ in enumerate(xgdump): m = re.findall(regexp, xgdump[i]) values.extend([float(x) for x in m]) n_unique = len(np.unique(values)) bins = max(min(n_unique, bins) if bins is not None else n_unique, 1) nph = np.histogram(values, bins=bins) nph = np.column_stack((nph[1][1:], nph[0])) nph = nph[nph[:, 1] > 0] if as_pandas and PANDAS_INSTALLED: return DataFrame(nph, columns=['SplitValue', 'Count']) if as_pandas and not PANDAS_INSTALLED: sys.stderr.write( "Returning histogram as ndarray (as_pandas == True, but pandas is not installed).") return nph
[ "Get split value histogram of a feature\n\n Parameters\n ----------\n feature: str\n The name of the feature.\n fmap: str (optional)\n The name of feature map file.\n bin: int, default None\n The maximum number of bins.\n Number of bins equals number of unique split values n_unique,\n if bins == None or bins > n_unique.\n as_pandas: bool, default True\n Return pd.DataFrame when pandas is installed.\n If False or pandas is not installed, return numpy ndarray.\n\n Returns\n -------\n a histogram of used splitting values for the specified feature\n either as numpy array or pandas DataFrame.\n " ]
Please provide a description of the function:def plot_importance(booster, ax=None, height=0.2, xlim=None, ylim=None, title='Feature importance', xlabel='F score', ylabel='Features', importance_type='weight', max_num_features=None, grid=True, show_values=True, **kwargs): try: import matplotlib.pyplot as plt except ImportError: raise ImportError('You must install matplotlib to plot importance') if isinstance(booster, XGBModel): importance = booster.get_booster().get_score(importance_type=importance_type) elif isinstance(booster, Booster): importance = booster.get_score(importance_type=importance_type) elif isinstance(booster, dict): importance = booster else: raise ValueError('tree must be Booster, XGBModel or dict instance') if not importance: raise ValueError('Booster.get_score() results in empty') tuples = [(k, importance[k]) for k in importance] if max_num_features is not None: # pylint: disable=invalid-unary-operand-type tuples = sorted(tuples, key=lambda x: x[1])[-max_num_features:] else: tuples = sorted(tuples, key=lambda x: x[1]) labels, values = zip(*tuples) if ax is None: _, ax = plt.subplots(1, 1) ylocs = np.arange(len(values)) ax.barh(ylocs, values, align='center', height=height, **kwargs) if show_values is True: for x, y in zip(values, ylocs): ax.text(x + 1, y, x, va='center') ax.set_yticks(ylocs) ax.set_yticklabels(labels) if xlim is not None: if not isinstance(xlim, tuple) or len(xlim) != 2: raise ValueError('xlim must be a tuple of 2 elements') else: xlim = (0, max(values) * 1.1) ax.set_xlim(xlim) if ylim is not None: if not isinstance(ylim, tuple) or len(ylim) != 2: raise ValueError('ylim must be a tuple of 2 elements') else: ylim = (-1, len(values)) ax.set_ylim(ylim) if title is not None: ax.set_title(title) if xlabel is not None: ax.set_xlabel(xlabel) if ylabel is not None: ax.set_ylabel(ylabel) ax.grid(grid) return ax
[ "Plot importance based on fitted trees.\n\n Parameters\n ----------\n booster : Booster, XGBModel or dict\n Booster or XGBModel instance, or dict taken by Booster.get_fscore()\n ax : matplotlib Axes, default None\n Target axes instance. If None, new figure and axes will be created.\n grid : bool, Turn the axes grids on or off. Default is True (On).\n importance_type : str, default \"weight\"\n How the importance is calculated: either \"weight\", \"gain\", or \"cover\"\n\n * \"weight\" is the number of times a feature appears in a tree\n * \"gain\" is the average gain of splits which use the feature\n * \"cover\" is the average coverage of splits which use the feature\n where coverage is defined as the number of samples affected by the split\n max_num_features : int, default None\n Maximum number of top features displayed on plot. If None, all features will be displayed.\n height : float, default 0.2\n Bar height, passed to ax.barh()\n xlim : tuple, default None\n Tuple passed to axes.xlim()\n ylim : tuple, default None\n Tuple passed to axes.ylim()\n title : str, default \"Feature importance\"\n Axes title. To disable, pass None.\n xlabel : str, default \"F score\"\n X axis title label. To disable, pass None.\n ylabel : str, default \"Features\"\n Y axis title label. To disable, pass None.\n show_values : bool, default True\n Show values on plot. To disable, pass False.\n kwargs :\n Other keywords passed to ax.barh()\n\n Returns\n -------\n ax : matplotlib Axes\n " ]
Please provide a description of the function:def _parse_node(graph, text, condition_node_params, leaf_node_params): match = _NODEPAT.match(text) if match is not None: node = match.group(1) graph.node(node, label=match.group(2), **condition_node_params) return node match = _LEAFPAT.match(text) if match is not None: node = match.group(1) graph.node(node, label=match.group(2), **leaf_node_params) return node raise ValueError('Unable to parse node: {0}'.format(text))
[ "parse dumped node" ]
Please provide a description of the function:def _parse_edge(graph, node, text, yes_color='#0000FF', no_color='#FF0000'): try: match = _EDGEPAT.match(text) if match is not None: yes, no, missing = match.groups() if yes == missing: graph.edge(node, yes, label='yes, missing', color=yes_color) graph.edge(node, no, label='no', color=no_color) else: graph.edge(node, yes, label='yes', color=yes_color) graph.edge(node, no, label='no, missing', color=no_color) return except ValueError: pass match = _EDGEPAT2.match(text) if match is not None: yes, no = match.groups() graph.edge(node, yes, label='yes', color=yes_color) graph.edge(node, no, label='no', color=no_color) return raise ValueError('Unable to parse edge: {0}'.format(text))
[ "parse dumped edge" ]
Please provide a description of the function:def to_graphviz(booster, fmap='', num_trees=0, rankdir='UT', yes_color='#0000FF', no_color='#FF0000', condition_node_params=None, leaf_node_params=None, **kwargs): if condition_node_params is None: condition_node_params = {} if leaf_node_params is None: leaf_node_params = {} try: from graphviz import Digraph except ImportError: raise ImportError('You must install graphviz to plot tree') if not isinstance(booster, (Booster, XGBModel)): raise ValueError('booster must be Booster or XGBModel instance') if isinstance(booster, XGBModel): booster = booster.get_booster() tree = booster.get_dump(fmap=fmap)[num_trees] tree = tree.split() kwargs = kwargs.copy() kwargs.update({'rankdir': rankdir}) graph = Digraph(graph_attr=kwargs) for i, text in enumerate(tree): if text[0].isdigit(): node = _parse_node( graph, text, condition_node_params=condition_node_params, leaf_node_params=leaf_node_params) else: if i == 0: # 1st string must be node raise ValueError('Unable to parse given string as tree') _parse_edge(graph, node, text, yes_color=yes_color, no_color=no_color) return graph
[ "Convert specified tree to graphviz instance. IPython can automatically plot the\n returned graphiz instance. Otherwise, you should call .render() method\n of the returned graphiz instance.\n\n Parameters\n ----------\n booster : Booster, XGBModel\n Booster or XGBModel instance\n fmap: str (optional)\n The name of feature map file\n num_trees : int, default 0\n Specify the ordinal number of target tree\n rankdir : str, default \"UT\"\n Passed to graphiz via graph_attr\n yes_color : str, default '#0000FF'\n Edge color when meets the node condition.\n no_color : str, default '#FF0000'\n Edge color when doesn't meet the node condition.\n condition_node_params : dict (optional)\n condition node configuration,\n {'shape':'box',\n 'style':'filled,rounded',\n 'fillcolor':'#78bceb'\n }\n leaf_node_params : dict (optional)\n leaf node configuration\n {'shape':'box',\n 'style':'filled',\n 'fillcolor':'#e48038'\n }\n kwargs :\n Other keywords passed to graphviz graph_attr\n\n Returns\n -------\n ax : matplotlib Axes\n " ]
Please provide a description of the function:def newAction(parent, text, slot=None, shortcut=None, icon=None, tip=None, checkable=False, enabled=True): a = QAction(text, parent) if icon is not None: a.setIcon(newIcon(icon)) if shortcut is not None: if isinstance(shortcut, (list, tuple)): a.setShortcuts(shortcut) else: a.setShortcut(shortcut) if tip is not None: a.setToolTip(tip) a.setStatusTip(tip) if slot is not None: a.triggered.connect(slot) if checkable: a.setCheckable(True) a.setEnabled(enabled) return a
[ "Create a new action and assign callbacks, shortcuts, etc." ]
Please provide a description of the function:def natural_sort(list, key=lambda s:s): def get_alphanum_key_func(key): convert = lambda text: int(text) if text.isdigit() else text return lambda s: [convert(c) for c in re.split('([0-9]+)', key(s))] sort_key = get_alphanum_key_func(key) list.sort(key=sort_key)
[ "\n Sort the list into natural alphanumeric order.\n " ]
Please provide a description of the function:def mouseMoveEvent(self, ev): pos = self.transformPos(ev.pos()) # Update coordinates in status bar if image is opened window = self.parent().window() if window.filePath is not None: self.parent().window().labelCoordinates.setText( 'X: %d; Y: %d' % (pos.x(), pos.y())) # Polygon drawing. if self.drawing(): self.overrideCursor(CURSOR_DRAW) if self.current: color = self.drawingLineColor if self.outOfPixmap(pos): # Don't allow the user to draw outside the pixmap. # Project the point to the pixmap's edges. pos = self.intersectionPoint(self.current[-1], pos) elif len(self.current) > 1 and self.closeEnough(pos, self.current[0]): # Attract line to starting point and colorise to alert the # user: pos = self.current[0] color = self.current.line_color self.overrideCursor(CURSOR_POINT) self.current.highlightVertex(0, Shape.NEAR_VERTEX) if self.drawSquare: initPos = self.current[0] minX = initPos.x() minY = initPos.y() min_size = min(abs(pos.x() - minX), abs(pos.y() - minY)) directionX = -1 if pos.x() - minX < 0 else 1 directionY = -1 if pos.y() - minY < 0 else 1 self.line[1] = QPointF(minX + directionX * min_size, minY + directionY * min_size) else: self.line[1] = pos self.line.line_color = color self.prevPoint = QPointF() self.current.highlightClear() else: self.prevPoint = pos self.repaint() return # Polygon copy moving. if Qt.RightButton & ev.buttons(): if self.selectedShapeCopy and self.prevPoint: self.overrideCursor(CURSOR_MOVE) self.boundedMoveShape(self.selectedShapeCopy, pos) self.repaint() elif self.selectedShape: self.selectedShapeCopy = self.selectedShape.copy() self.repaint() return # Polygon/Vertex moving. if Qt.LeftButton & ev.buttons(): if self.selectedVertex(): self.boundedMoveVertex(pos) self.shapeMoved.emit() self.repaint() elif self.selectedShape and self.prevPoint: self.overrideCursor(CURSOR_MOVE) self.boundedMoveShape(self.selectedShape, pos) self.shapeMoved.emit() self.repaint() return # Just hovering over the canvas, 2 posibilities: # - Highlight shapes # - Highlight vertex # Update shape/vertex fill and tooltip value accordingly. self.setToolTip("Image") for shape in reversed([s for s in self.shapes if self.isVisible(s)]): # Look for a nearby vertex to highlight. If that fails, # check if we happen to be inside a shape. index = shape.nearestVertex(pos, self.epsilon) if index is not None: if self.selectedVertex(): self.hShape.highlightClear() self.hVertex, self.hShape = index, shape shape.highlightVertex(index, shape.MOVE_VERTEX) self.overrideCursor(CURSOR_POINT) self.setToolTip("Click & drag to move point") self.setStatusTip(self.toolTip()) self.update() break elif shape.containsPoint(pos): if self.selectedVertex(): self.hShape.highlightClear() self.hVertex, self.hShape = None, shape self.setToolTip( "Click & drag to move shape '%s'" % shape.label) self.setStatusTip(self.toolTip()) self.overrideCursor(CURSOR_GRAB) self.update() break else: # Nothing found, clear highlights, reset state. if self.hShape: self.hShape.highlightClear() self.update() self.hVertex, self.hShape = None, None self.overrideCursor(CURSOR_DEFAULT)
[ "Update line with last point and current coordinates." ]
Please provide a description of the function:def selectShapePoint(self, point): self.deSelectShape() if self.selectedVertex(): # A vertex is marked for selection. index, shape = self.hVertex, self.hShape shape.highlightVertex(index, shape.MOVE_VERTEX) self.selectShape(shape) return for shape in reversed(self.shapes): if self.isVisible(shape) and shape.containsPoint(point): self.selectShape(shape) self.calculateOffsets(shape, point) return
[ "Select the first shape created which contains this point." ]
Please provide a description of the function:def snapPointToCanvas(self, x, y): if x < 0 or x > self.pixmap.width() or y < 0 or y > self.pixmap.height(): x = max(x, 0) y = max(y, 0) x = min(x, self.pixmap.width()) y = min(y, self.pixmap.height()) return x, y, True return x, y, False
[ "\n Moves a point x,y to within the boundaries of the canvas.\n :return: (x,y,snapped) where snapped is True if x or y were changed, False if not.\n " ]
Please provide a description of the function:def get_main_app(argv=[]): app = QApplication(argv) app.setApplicationName(__appname__) app.setWindowIcon(newIcon("app")) # Tzutalin 201705+: Accept extra agruments to change predefined class file # Usage : labelImg.py image predefClassFile saveDir win = MainWindow(argv[1] if len(argv) >= 2 else None, argv[2] if len(argv) >= 3 else os.path.join( os.path.dirname(sys.argv[0]), 'data', 'predefined_classes.txt'), argv[3] if len(argv) >= 4 else None) win.show() return app, win
[ "\n Standard boilerplate Qt application code.\n Do everything but app.exec_() -- so that we can test the application in one thread\n " ]
Please provide a description of the function:def toggleActions(self, value=True): for z in self.actions.zoomActions: z.setEnabled(value) for action in self.actions.onLoadActive: action.setEnabled(value)
[ "Enable/Disable widgets which depend on an opened image." ]
Please provide a description of the function:def toggleDrawingSensitive(self, drawing=True): self.actions.editMode.setEnabled(not drawing) if not drawing and self.beginner(): # Cancel creation. print('Cancel creation.') self.canvas.setEditing(True) self.canvas.restoreCursor() self.actions.create.setEnabled(True)
[ "In the middle of drawing, toggling between modes should be disabled." ]
Please provide a description of the function:def btnstate(self, item= None): if not self.canvas.editing(): return item = self.currentItem() if not item: # If not selected Item, take the first one item = self.labelList.item(self.labelList.count()-1) difficult = self.diffcButton.isChecked() try: shape = self.itemsToShapes[item] except: pass # Checked and Update try: if difficult != shape.difficult: shape.difficult = difficult self.setDirty() else: # User probably changed item visibility self.canvas.setShapeVisible(shape, item.checkState() == Qt.Checked) except: pass
[ " Function to handle difficult examples\n Update on each object " ]
Please provide a description of the function:def newShape(self): if not self.useDefaultLabelCheckbox.isChecked() or not self.defaultLabelTextLine.text(): if len(self.labelHist) > 0: self.labelDialog = LabelDialog( parent=self, listItem=self.labelHist) # Sync single class mode from PR#106 if self.singleClassMode.isChecked() and self.lastLabel: text = self.lastLabel else: text = self.labelDialog.popUp(text=self.prevLabelText) self.lastLabel = text else: text = self.defaultLabelTextLine.text() # Add Chris self.diffcButton.setChecked(False) if text is not None: self.prevLabelText = text generate_color = generateColorByText(text) shape = self.canvas.setLastLabel(text, generate_color, generate_color) self.addLabel(shape) if self.beginner(): # Switch to edit mode. self.canvas.setEditing(True) self.actions.create.setEnabled(True) else: self.actions.editMode.setEnabled(True) self.setDirty() if text not in self.labelHist: self.labelHist.append(text) else: # self.canvas.undoLastLine() self.canvas.resetAllLines()
[ "Pop-up and give focus to the label editor.\n\n position MUST be in global coordinates.\n " ]
Please provide a description of the function:def loadFile(self, filePath=None): self.resetState() self.canvas.setEnabled(False) if filePath is None: filePath = self.settings.get(SETTING_FILENAME) # Make sure that filePath is a regular python string, rather than QString filePath = ustr(filePath) unicodeFilePath = ustr(filePath) # Tzutalin 20160906 : Add file list and dock to move faster # Highlight the file item if unicodeFilePath and self.fileListWidget.count() > 0: index = self.mImgList.index(unicodeFilePath) fileWidgetItem = self.fileListWidget.item(index) fileWidgetItem.setSelected(True) if unicodeFilePath and os.path.exists(unicodeFilePath): if LabelFile.isLabelFile(unicodeFilePath): try: self.labelFile = LabelFile(unicodeFilePath) except LabelFileError as e: self.errorMessage(u'Error opening file', (u"<p><b>%s</b></p>" u"<p>Make sure <i>%s</i> is a valid label file.") % (e, unicodeFilePath)) self.status("Error reading %s" % unicodeFilePath) return False self.imageData = self.labelFile.imageData self.lineColor = QColor(*self.labelFile.lineColor) self.fillColor = QColor(*self.labelFile.fillColor) self.canvas.verified = self.labelFile.verified else: # Load image: # read data first and store for saving into label file. self.imageData = read(unicodeFilePath, None) self.labelFile = None self.canvas.verified = False image = QImage.fromData(self.imageData) if image.isNull(): self.errorMessage(u'Error opening file', u"<p>Make sure <i>%s</i> is a valid image file." % unicodeFilePath) self.status("Error reading %s" % unicodeFilePath) return False self.status("Loaded %s" % os.path.basename(unicodeFilePath)) self.image = image self.filePath = unicodeFilePath self.canvas.loadPixmap(QPixmap.fromImage(image)) if self.labelFile: self.loadLabels(self.labelFile.shapes) self.setClean() self.canvas.setEnabled(True) self.adjustScale(initial=True) self.paintCanvas() self.addRecentFile(self.filePath) self.toggleActions(True) # Label xml file and show bound box according to its filename # if self.usingPascalVocFormat is True: if self.defaultSaveDir is not None: basename = os.path.basename( os.path.splitext(self.filePath)[0]) xmlPath = os.path.join(self.defaultSaveDir, basename + XML_EXT) txtPath = os.path.join(self.defaultSaveDir, basename + TXT_EXT) if os.path.isfile(xmlPath): self.loadPascalXMLByFilename(xmlPath) elif os.path.isfile(txtPath): self.loadYOLOTXTByFilename(txtPath) else: xmlPath = os.path.splitext(filePath)[0] + XML_EXT txtPath = os.path.splitext(filePath)[0] + TXT_EXT if os.path.isfile(xmlPath): self.loadPascalXMLByFilename(xmlPath) elif os.path.isfile(txtPath): self.loadYOLOTXTByFilename(txtPath) self.setWindowTitle(__appname__ + ' ' + filePath) # Default : select last item if there is at least one item if self.labelList.count(): self.labelList.setCurrentItem(self.labelList.item(self.labelList.count()-1)) self.labelList.item(self.labelList.count()-1).setSelected(True) self.canvas.setFocus(True) return True return False
[ "Load the specified file, or the last opened file if None.", "Annotation file priority:\n PascalXML > YOLO\n " ]
Please provide a description of the function:def scaleFitWindow(self): e = 2.0 # So that no scrollbars are generated. w1 = self.centralWidget().width() - e h1 = self.centralWidget().height() - e a1 = w1 / h1 # Calculate a new scale value based on the pixmap's aspect ratio. w2 = self.canvas.pixmap.width() - 0.0 h2 = self.canvas.pixmap.height() - 0.0 a2 = w2 / h2 return w1 / w2 if a2 >= a1 else h1 / h2
[ "Figure out the size of the pixmap in order to fit the main widget." ]
Please provide a description of the function:def ustr(x): '''py2/py3 unicode helper''' if sys.version_info < (3, 0, 0): from PyQt4.QtCore import QString if type(x) == str: return x.decode(DEFAULT_ENCODING) if type(x) == QString: #https://blog.csdn.net/friendan/article/details/51088476 #https://blog.csdn.net/xxm524/article/details/74937308 return unicode(x.toUtf8(), DEFAULT_ENCODING, 'ignore') return x else: return x
[]
Please provide a description of the function:def prettify(self, elem): rough_string = ElementTree.tostring(elem, 'utf8') root = etree.fromstring(rough_string) return etree.tostring(root, pretty_print=True, encoding=ENCODE_METHOD).replace(" ".encode(), "\t".encode()) # minidom does not support UTF-8 '''reparsed = minidom.parseString(rough_string) return reparsed.toprettyxml(indent="\t", encoding=ENCODE_METHOD)'''
[ "\n Return a pretty-printed XML string for the Element.\n " ]
Please provide a description of the function:def genXML(self): # Check conditions if self.filename is None or \ self.foldername is None or \ self.imgSize is None: return None top = Element('annotation') if self.verified: top.set('verified', 'yes') folder = SubElement(top, 'folder') folder.text = self.foldername filename = SubElement(top, 'filename') filename.text = self.filename if self.localImgPath is not None: localImgPath = SubElement(top, 'path') localImgPath.text = self.localImgPath source = SubElement(top, 'source') database = SubElement(source, 'database') database.text = self.databaseSrc size_part = SubElement(top, 'size') width = SubElement(size_part, 'width') height = SubElement(size_part, 'height') depth = SubElement(size_part, 'depth') width.text = str(self.imgSize[1]) height.text = str(self.imgSize[0]) if len(self.imgSize) == 3: depth.text = str(self.imgSize[2]) else: depth.text = '1' segmented = SubElement(top, 'segmented') segmented.text = '0' return top
[ "\n Return XML root\n " ]
Please provide a description of the function:async def fetch(self, url, method='GET', headers=None, body=None): request_headers = self.prepare_request_headers(headers) url = self.proxy + url if self.verbose: print("\nRequest:", method, url, headers, body) self.logger.debug("%s %s, Request: %s %s", method, url, headers, body) encoded_body = body.encode() if body else None session_method = getattr(self.session, method.lower()) response = None http_response = None json_response = None try: async with session_method(yarl.URL(url, encoded=True), data=encoded_body, headers=request_headers, timeout=(self.timeout / 1000), proxy=self.aiohttp_proxy) as response: http_response = await response.text() json_response = self.parse_json(http_response) if self.is_json_encoded_object(http_response) else None headers = response.headers if self.enableLastHttpResponse: self.last_http_response = http_response if self.enableLastResponseHeaders: self.last_response_headers = headers if self.enableLastJsonResponse: self.last_json_response = json_response if self.verbose: print("\nResponse:", method, url, response.status, headers, http_response) self.logger.debug("%s %s, Response: %s %s %s", method, url, response.status, headers, http_response) except socket.gaierror as e: self.raise_error(ExchangeNotAvailable, url, method, e, None) except concurrent.futures._base.TimeoutError as e: self.raise_error(RequestTimeout, method, url, e, None) except aiohttp.client_exceptions.ClientConnectionError as e: self.raise_error(ExchangeNotAvailable, url, method, e, None) except aiohttp.client_exceptions.ClientError as e: # base exception class self.raise_error(ExchangeError, url, method, e, None) self.handle_errors(response.status, response.reason, url, method, headers, http_response, json_response) self.handle_rest_errors(None, response.status, http_response, url, method) self.handle_rest_response(http_response, json_response, url, method, headers, body) if json_response is not None: return json_response return http_response
[ "Perform a HTTP request and return decoded JSON data" ]
Please provide a description of the function:def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None): if self.enableRateLimit: self.throttle() self.lastRestRequestTimestamp = self.milliseconds() request = self.sign(path, api, method, params, headers, body) return self.fetch(request['url'], request['method'], request['headers'], request['body'])
[ "A better wrapper over request for deferred signing" ]
Please provide a description of the function:def request(self, path, api='public', method='GET', params={}, headers=None, body=None): return self.fetch2(path, api, method, params, headers, body)
[ "Exchange.request is the entry point for all generated methods" ]
Please provide a description of the function:def find_broadly_matched_key(self, broad, string): keys = list(broad.keys()) for i in range(0, len(keys)): key = keys[i] if string.find(key) >= 0: return key return None
[ "A helper method for matching error strings exactly vs broadly" ]
Please provide a description of the function:def fetch(self, url, method='GET', headers=None, body=None): request_headers = self.prepare_request_headers(headers) url = self.proxy + url if self.verbose: print("\nRequest:", method, url, request_headers, body) self.logger.debug("%s %s, Request: %s %s", method, url, request_headers, body) if body: body = body.encode() self.session.cookies.clear() response = None http_response = None json_response = None try: response = self.session.request( method, url, data=body, headers=request_headers, timeout=int(self.timeout / 1000), proxies=self.proxies ) http_response = response.text json_response = self.parse_json(http_response) if self.is_json_encoded_object(http_response) else None headers = response.headers # FIXME remove last_x_responses from subclasses if self.enableLastHttpResponse: self.last_http_response = http_response if self.enableLastJsonResponse: self.last_json_response = json_response if self.enableLastResponseHeaders: self.last_response_headers = headers if self.verbose: print("\nResponse:", method, url, response.status_code, headers, http_response) self.logger.debug("%s %s, Response: %s %s %s", method, url, response.status_code, headers, http_response) response.raise_for_status() except Timeout as e: self.raise_error(RequestTimeout, method, url, e) except TooManyRedirects as e: self.raise_error(ExchangeError, url, method, e) except SSLError as e: self.raise_error(ExchangeError, url, method, e) except HTTPError as e: self.handle_errors(response.status_code, response.reason, url, method, headers, http_response, json_response) self.handle_rest_errors(e, response.status_code, http_response, url, method) self.raise_error(ExchangeError, url, method, e, http_response) except RequestException as e: # base exception class error_string = str(e) if ('ECONNRESET' in error_string) or ('Connection aborted.' in error_string): self.raise_error(NetworkError, url, method, e) else: self.raise_error(ExchangeError, url, method, e) self.handle_errors(response.status_code, response.reason, url, method, headers, http_response, json_response) self.handle_rest_response(http_response, json_response, url, method, headers, body) if json_response is not None: return json_response return http_response
[ "Perform a HTTP request and return decoded JSON data" ]
Please provide a description of the function:def safe_either(method, dictionary, key1, key2, default_value=None): value = method(dictionary, key1) return value if value is not None else method(dictionary, key2, default_value)
[ "A helper-wrapper for the safe_value_2() family." ]
Please provide a description of the function:def truncate(num, precision=0): if precision > 0: decimal_precision = math.pow(10, precision) return math.trunc(num * decimal_precision) / decimal_precision return int(Exchange.truncate_to_string(num, precision))
[ "Deprecated, use decimal_to_precision instead" ]
Please provide a description of the function:def truncate_to_string(num, precision=0): if precision > 0: parts = ('{0:.%df}' % precision).format(Decimal(num)).split('.') decimal_digits = parts[1][:precision].rstrip('0') decimal_digits = decimal_digits if len(decimal_digits) else '0' return parts[0] + '.' + decimal_digits return ('%d' % num)
[ "Deprecated, todo: remove references from subclasses" ]
Please provide a description of the function:def check_address(self, address): if address is None: self.raise_error(InvalidAddress, details='address is None') if all(letter == address[0] for letter in address) or len(address) < self.minFundingAddressLength or ' ' in address: self.raise_error(InvalidAddress, details='address is invalid or has less than ' + str(self.minFundingAddressLength) + ' characters: "' + str(address) + '"') return address
[ "Checks an address is not the same character repeated or an empty sequence" ]
Please provide a description of the function:def reduce_filename(f): r''' Expects something like /tmp/tmpAjry4Gdsbench/test.weights.e5.XXX.YYY.pb Where XXX is a variation on the model size for example And where YYY is a const related to the training dataset ''' f = os.path.basename(f).split('.') return keep_only_digits(f[-3])
[]
Please provide a description of the function:def keep_only_digits(s): r''' local helper to just keep digits ''' fs = '' for c in s: if c.isdigit(): fs += c return int(fs)
[]
Please provide a description of the function:def parse_stm_file(stm_file): r stm_segments = [] with codecs.open(stm_file, encoding="utf-8") as stm_lines: for stm_line in stm_lines: stmSegment = STMSegment(stm_line) if not "ignore_time_segment_in_scoring" == stmSegment.transcript: stm_segments.append(stmSegment) return stm_segments
[ "\n Parses an STM file at ``stm_file`` into a list of :class:`STMSegment`.\n " ]
Please provide a description of the function:def read_wave(path): with contextlib.closing(wave.open(path, 'rb')) as wf: num_channels = wf.getnchannels() assert num_channels == 1 sample_width = wf.getsampwidth() assert sample_width == 2 sample_rate = wf.getframerate() assert sample_rate in (8000, 16000, 32000) frames = wf.getnframes() pcm_data = wf.readframes(frames) duration = frames / sample_rate return pcm_data, sample_rate, duration
[ "Reads a .wav file.\n\n Takes the path, and returns (PCM audio data, sample rate).\n " ]
Please provide a description of the function:def write_wave(path, audio, sample_rate): with contextlib.closing(wave.open(path, 'wb')) as wf: wf.setnchannels(1) wf.setsampwidth(2) wf.setframerate(sample_rate) wf.writeframes(audio)
[ "Writes a .wav file.\n\n Takes path, PCM audio data, and sample rate.\n " ]
Please provide a description of the function:def frame_generator(frame_duration_ms, audio, sample_rate): n = int(sample_rate * (frame_duration_ms / 1000.0) * 2) offset = 0 timestamp = 0.0 duration = (float(n) / sample_rate) / 2.0 while offset + n < len(audio): yield Frame(audio[offset:offset + n], timestamp, duration) timestamp += duration offset += n
[ "Generates audio frames from PCM audio data.\n\n Takes the desired frame duration in milliseconds, the PCM data, and\n the sample rate.\n\n Yields Frames of the requested duration.\n " ]
Please provide a description of the function:def run(self): ''' Initialise the runner function with the passed args, kwargs ''' # Retrieve args/kwargs here; and fire up the processing using them try: transcript = self.fn(*self.args, **self.kwargs) except: traceback.print_exc() exctype, value = sys.exc_info()[:2] self.signals.error.emit((exctype, value, traceback.format_exc())) else: # Return the result of the processing self.signals.result.emit(transcript) finally: # Done self.signals.finished.emit()
[]
Please provide a description of the function:def exec_command(command, cwd=None): r''' Helper to exec locally (subprocess) or remotely (paramiko) ''' rc = None stdout = stderr = None if ssh_conn is None: ld_library_path = {'LD_LIBRARY_PATH': '.:%s' % os.environ.get('LD_LIBRARY_PATH', '')} p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=ld_library_path, cwd=cwd) stdout, stderr = p.communicate() rc = p.returncode else: # environment= requires paramiko >= 2.1 (fails with 2.0.2) final_command = command if cwd is None else 'cd %s && %s %s' % (cwd, 'LD_LIBRARY_PATH=.:$LD_LIBRARY_PATH', command) ssh_stdin, ssh_stdout, ssh_stderr = ssh_conn.exec_command(final_command) stdout = ''.join(ssh_stdout.readlines()) stderr = ''.join(ssh_stderr.readlines()) rc = ssh_stdout.channel.recv_exit_status() return rc, stdout, stderr
[]
Please provide a description of the function:def get_arch_string(): r''' Check local or remote system arch, to produce TaskCluster proper link. ''' rc, stdout, stderr = exec_command('uname -sm') if rc > 0: raise AssertionError('Error checking OS') stdout = stdout.lower().strip() if not 'linux' in stdout: raise AssertionError('Unsupported OS') if 'armv7l' in stdout: return 'arm' if 'x86_64' in stdout: nv_rc, nv_stdout, nv_stderr = exec_command('nvidia-smi') nv_stdout = nv_stdout.lower().strip() if 'NVIDIA-SMI' in nv_stdout: return 'gpu' else: return 'cpu' raise AssertionError('Unsupported arch:', stdout)
[]
Please provide a description of the function:def extract_native_client_tarball(dir): r''' Download a native_client.tar.xz file from TaskCluster and extract it to dir. ''' assert_valid_dir(dir) target_tarball = os.path.join(dir, 'native_client.tar.xz') if os.path.isfile(target_tarball) and os.stat(target_tarball).st_size == 0: return subprocess.check_call(['pixz', '-d', 'native_client.tar.xz'], cwd=dir) subprocess.check_call(['tar', 'xf', 'native_client.tar'], cwd=dir) os.unlink(os.path.join(dir, 'native_client.tar')) open(target_tarball, 'w').close()
[]
Please provide a description of the function:def is_zip_file(models): r''' Ensure that a path is a zip file by: - checking length is 1 - checking extension is '.zip' ''' ext = os.path.splitext(models[0])[1] return (len(models) == 1) and (ext == '.zip')
[]
Please provide a description of the function:def maybe_inspect_zip(models): r''' Detect if models is a list of protocolbuffer files or a ZIP file. If the latter, then unzip it and return the list of protocolbuffer files that were inside. ''' if not(is_zip_file(models)): return models if len(models) > 1: return models if len(models) < 1: raise AssertionError('No models at all') return zipfile.ZipFile(models[0]).namelist()
[]
Please provide a description of the function:def all_files(models=[]): r''' Return a list of full path of files matching 'models', sorted in human numerical order (i.e., 0 1 2 ..., 10 11 12, ..., 100, ..., 1000). Files are supposed to be named identically except one variable component e.g. the list, test.weights.e5.lstm1200.ldc93s1.pb test.weights.e5.lstm1000.ldc93s1.pb test.weights.e5.lstm800.ldc93s1.pb gets sorted: test.weights.e5.lstm800.ldc93s1.pb test.weights.e5.lstm1000.ldc93s1.pb test.weights.e5.lstm1200.ldc93s1.pb ''' def nsort(a, b): fa = os.path.basename(a).split('.') fb = os.path.basename(b).split('.') elements_to_remove = [] assert len(fa) == len(fb) for i in range(0, len(fa)): if fa[i] == fb[i]: elements_to_remove.append(fa[i]) for e in elements_to_remove: fa.remove(e) fb.remove(e) assert len(fa) == len(fb) assert len(fa) == 1 fa = keep_only_digits(fa[0]) fb = keep_only_digits(fb[0]) if fa < fb: return -1 if fa == fb: return 0 if fa > fb: return 1 base = list(map(lambda x: os.path.abspath(x), maybe_inspect_zip(models))) base.sort(cmp=nsort) return base
[]
Please provide a description of the function:def setup_tempdir(dir, models, wav, alphabet, lm_binary, trie, binaries): r''' Copy models, libs and binary to a directory (new one if dir is None) ''' if dir is None: dir = tempfile.mkdtemp(suffix='dsbench') sorted_models = all_files(models=models) if binaries is None: maybe_download_binaries(dir) else: print('Using local binaries: %s' % (binaries)) shutil.copy2(binaries, dir) extract_native_client_tarball(dir) filenames = map(lambda x: os.path.join(dir, os.path.basename(x)), sorted_models) missing_models = filter(lambda x: not os.path.isfile(x), filenames) if len(missing_models) > 0: # If we have a ZIP file, directly extract it to the proper path if is_zip_file(models): print('Extracting %s to %s' % (models[0], dir)) zipfile.ZipFile(models[0]).extractall(path=dir) print('Extracted %s.' % models[0]) else: # If one model is missing, let's copy everything again. Be safe. for f in sorted_models: print('Copying %s to %s' % (f, dir)) shutil.copy2(f, dir) for extra_file in [ wav, alphabet, lm_binary, trie ]: if extra_file and not os.path.isfile(os.path.join(dir, os.path.basename(extra_file))): print('Copying %s to %s' % (extra_file, dir)) shutil.copy2(extra_file, dir) if ssh_conn: copy_tree(dir) return dir, sorted_models
[]
Please provide a description of the function:def teardown_tempdir(dir): r''' Cleanup temporary directory. ''' if ssh_conn: delete_tree(dir) assert_valid_dir(dir) shutil.rmtree(dir)
[]
Please provide a description of the function:def get_sshconfig(): r''' Read user's SSH configuration file ''' with open(os.path.expanduser('~/.ssh/config')) as f: cfg = paramiko.SSHConfig() cfg.parse(f) ret_dict = {} for d in cfg._config: _copy = dict(d) # Avoid buggy behavior with strange host definitions, we need # Hostname and not Host. del _copy['host'] for host in d['host']: ret_dict[host] = _copy['config'] return ret_dict
[]
Please provide a description of the function:def establish_ssh(target=None, auto_trust=False, allow_agent=True, look_keys=True): r''' Establish a SSH connection to a remote host. It should be able to use SSH's config file Host name declarations. By default, will not automatically add trust for hosts, will use SSH agent and will try to load keys. ''' def password_prompt(username, hostname): r''' If the Host is relying on password authentication, lets ask it. Relying on SSH itself to take care of that would not work when the remote authentication is password behind a SSH-key+2FA jumphost. ''' return getpass.getpass('No SSH key for %s@%s, please provide password: ' % (username, hostname)) ssh_conn = None if target is not None: ssh_conf = get_sshconfig() cfg = { 'hostname': None, 'port': 22, 'allow_agent': allow_agent, 'look_for_keys': look_keys } if ssh_conf.has_key(target): user_config = ssh_conf.get(target) # If ssh_config file's Host defined 'User' instead of 'Username' if user_config.has_key('user') and not user_config.has_key('username'): user_config['username'] = user_config['user'] del user_config['user'] for k in ('username', 'hostname', 'port'): if k in user_config: cfg[k] = user_config[k] # Assume Password auth. If we don't do that, then when connecting # through a jumphost we will run into issues and the user will # not be able to input his password to the SSH prompt. if 'identityfile' in user_config: cfg['key_filename'] = user_config['identityfile'] else: cfg['password'] = password_prompt(cfg['username'], cfg['hostname'] or target) # Should be the last one, since ProxyCommand will issue connection to remote host if 'proxycommand' in user_config: cfg['sock'] = paramiko.ProxyCommand(user_config['proxycommand']) else: cfg['username'] = target.split('@')[0] cfg['hostname'] = target.split('@')[1].split(':')[0] cfg['password'] = password_prompt(cfg['username'], cfg['hostname']) try: cfg['port'] = int(target.split('@')[1].split(':')[1]) except IndexError: # IndexError will happen if no :PORT is there. # Default value 22 is defined above in 'cfg'. pass ssh_conn = paramiko.SSHClient() if auto_trust: ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh_conn.connect(**cfg) return ssh_conn
[]
Please provide a description of the function:def run_benchmarks(dir, models, wav, alphabet, lm_binary=None, trie=None, iters=-1): r''' Core of the running of the benchmarks. We will run on all of models, against the WAV file provided as wav, and the provided alphabet. ''' assert_valid_dir(dir) inference_times = [ ] for model in models: model_filename = model current_model = { 'name': model, 'iters': [ ], 'mean': numpy.infty, 'stddev': numpy.infty } if lm_binary and trie: cmdline = './deepspeech --model "%s" --alphabet "%s" --lm "%s" --trie "%s" --audio "%s" -t' % (model_filename, alphabet, lm_binary, trie, wav) else: cmdline = './deepspeech --model "%s" --alphabet "%s" --audio "%s" -t' % (model_filename, alphabet, wav) for it in range(iters): sys.stdout.write('\rRunning %s: %d/%d' % (os.path.basename(model), (it+1), iters)) sys.stdout.flush() rc, stdout, stderr = exec_command(cmdline, cwd=dir) if rc == 0: inference_time = float(stdout.split('\n')[1].split('=')[-1]) # print("[%d] model=%s inference=%f" % (it, model, inference_time)) current_model['iters'].append(inference_time) else: print('exec_command("%s") failed with rc=%d' % (cmdline, rc)) print('stdout: %s' % stdout) print('stderr: %s' % stderr) raise AssertionError('Execution failure: rc=%d' % (rc)) sys.stdout.write('\n') sys.stdout.flush() current_model['mean'] = numpy.mean(current_model['iters']) current_model['stddev'] = numpy.std(current_model['iters']) inference_times.append(current_model) return inference_times
[]
Please provide a description of the function:def produce_csv(input, output): r''' Take an input dictionnary and write it to the object-file output. ''' output.write('"model","mean","std"\n') for model_data in input: output.write('"%s",%f,%f\n' % (model_data['name'], model_data['mean'], model_data['stddev'])) output.flush() output.close() print("Wrote as %s" % output.name)
[]
Please provide a description of the function:def to_sparse_tuple(sequence): r indices = np.asarray(list(zip([0]*len(sequence), range(len(sequence)))), dtype=np.int64) shape = np.asarray([1, len(sequence)], dtype=np.int64) return indices, sequence, shape
[ "Creates a sparse representention of ``sequence``.\n Returns a tuple with (indices, values, shape)\n " ]
Please provide a description of the function:def _parallel_downloader(voxforge_url, archive_dir, total, counter): def download(d): (i, file) = d download_url = voxforge_url + '/' + file c = counter.increment() print('Downloading file {} ({}/{})...'.format(i+1, c, total)) maybe_download(filename_of(download_url), archive_dir, download_url) return download
[ "Generate a function to download a file based on given parameters\n This works by currying the above given arguments into a closure\n in the form of the following function.\n\n :param voxforge_url: the base voxforge URL\n :param archive_dir: the location to store the downloaded file\n :param total: the total number of files to download\n :param counter: an atomic counter to keep track of # of downloaded files\n :return: a function that actually downloads a file given these params\n ", "Binds voxforge_url, archive_dir, total, and counter into this scope\n Downloads the given file\n :param d: a tuple consisting of (index, file) where index is the index\n of the file to download and file is the name of the file to download\n " ]
Please provide a description of the function:def _parallel_extracter(data_dir, number_of_test, number_of_dev, total, counter): def extract(d): (i, archive) = d if i < number_of_test: dataset_dir = path.join(data_dir, "test") elif i<number_of_test+number_of_dev: dataset_dir = path.join(data_dir, "dev") else: dataset_dir = path.join(data_dir, "train") if not gfile.Exists(path.join(dataset_dir, '.'.join(filename_of(archive).split(".")[:-1]))): c = counter.increment() print('Extracting file {} ({}/{})...'.format(i+1, c, total)) tar = tarfile.open(archive) tar.extractall(dataset_dir) tar.close() return extract
[ "Generate a function to extract a tar file based on given parameters\n This works by currying the above given arguments into a closure\n in the form of the following function.\n\n :param data_dir: the target directory to extract into\n :param number_of_test: the number of files to keep as the test set\n :param number_of_dev: the number of files to keep as the dev set\n :param total: the total number of files to extract\n :param counter: an atomic counter to keep track of # of extracted files\n :return: a function that actually extracts a tar file given these params\n ", "Binds data_dir, number_of_test, number_of_dev, total, and counter into this scope\n Extracts the given file\n :param d: a tuple consisting of (index, file) where index is the index\n of the file to extract and file is the name of the file to extract\n " ]
Please provide a description of the function:def increment(self, amount=1): self.__lock.acquire() self.__count += amount v = self.value() self.__lock.release() return v
[ "Increments the counter by the given amount\n :param amount: the amount to increment by (default 1)\n :return: the incremented value of the counter\n " ]
Please provide a description of the function:def calculate_report(labels, decodings, distances, losses): r''' This routine will calculate a WER report. It'll compute the `mean` WER and create ``Sample`` objects of the ``report_count`` top lowest loss items from the provided WER results tuple (only items with WER!=0 and ordered by their WER). ''' samples = pmap(process_decode_result, zip(labels, decodings, distances, losses)) # Getting the WER and CER from the accumulated edit distances and lengths samples_wer, samples_cer = wer_cer_batch(labels, decodings) # Order the remaining items by their loss (lowest loss on top) samples.sort(key=lambda s: s.loss) # Then order by WER (highest WER on top) samples.sort(key=lambda s: s.wer, reverse=True) return samples_wer, samples_cer, samples
[]
Please provide a description of the function:def sparse_tensor_value_to_texts(value, alphabet): r return sparse_tuple_to_texts((value.indices, value.values, value.dense_shape), alphabet)
[ "\n Given a :class:`tf.SparseTensor` ``value``, return an array of Python strings\n representing its values, converting tokens to strings using ``alphabet``.\n " ]
Please provide a description of the function:def parse_args(args): parser = argparse.ArgumentParser( description="Imports GramVaani data for Deep Speech" ) parser.add_argument( "--version", action="version", version="GramVaaniImporter {ver}".format(ver=__version__), ) parser.add_argument( "-v", "--verbose", action="store_const", required=False, help="set loglevel to INFO", dest="loglevel", const=logging.INFO, ) parser.add_argument( "-vv", "--very-verbose", action="store_const", required=False, help="set loglevel to DEBUG", dest="loglevel", const=logging.DEBUG, ) parser.add_argument( "-c", "--csv_filename", required=True, help="Path to the GramVaani csv", dest="csv_filename", ) parser.add_argument( "-t", "--target_dir", required=True, help="Directory in which to save the importer GramVaani data", dest="target_dir", ) return parser.parse_args(args)
[ "Parse command line parameters\n Args:\n args ([str]): Command line parameters as list of strings\n Returns:\n :obj:`argparse.Namespace`: command line parameters namespace\n " ]
Please provide a description of the function:def setup_logging(level): format = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s" logging.basicConfig( level=level, stream=sys.stdout, format=format, datefmt="%Y-%m-%d %H:%M:%S" )
[ "Setup basic logging\n Args:\n level (int): minimum log level for emitting messages\n " ]
Please provide a description of the function:def main(args): args = parse_args(args) setup_logging(args.loglevel) _logger.info("Starting GramVaani importer...") _logger.info("Starting loading GramVaani csv...") csv = GramVaaniCSV(args.csv_filename) _logger.info("Starting downloading GramVaani mp3's...") downloader = GramVaaniDownloader(csv, args.target_dir) mp3_directory = downloader.download() _logger.info("Starting converting GramVaani mp3's to wav's...") converter = GramVaaniConverter(args.target_dir, mp3_directory) wav_directory = converter.convert() datasets = GramVaaniDataSets(args.target_dir, wav_directory, csv) datasets.create() datasets.save() _logger.info("Finished GramVaani importer...")
[ "Main entry point allowing external calls\n Args:\n args ([str]): command line parameter list\n " ]
Please provide a description of the function:def download(self): mp3_directory = self._pre_download() self.data.swifter.apply(func=lambda arg: self._download(*arg, mp3_directory), axis=1, raw=True) return mp3_directory
[ "Downloads the data associated with this instance\n Return:\n mp3_directory (os.path): The directory into which the associated mp3's were downloaded\n " ]
Please provide a description of the function:def convert(self): wav_directory = self._pre_convert() for mp3_filename in self.mp3_directory.glob('**/*.mp3'): wav_filename = path.join(wav_directory, os.path.splitext(os.path.basename(mp3_filename))[0] + ".wav") if not path.exists(wav_filename): _logger.debug("Converting mp3 file %s to wav file %s" % (mp3_filename, wav_filename)) transformer = Transformer() transformer.convert(samplerate=SAMPLE_RATE, n_channels=N_CHANNELS, bitdepth=BITDEPTH) transformer.build(str(mp3_filename), str(wav_filename)) else: _logger.debug("Already converted mp3 file %s to wav file %s" % (mp3_filename, wav_filename)) return wav_directory
[ "Converts the mp3's associated with this instance to wav's\n Return:\n wav_directory (os.path): The directory into which the associated wav's were downloaded\n " ]
Please provide a description of the function:def text_to_char_array(original, alphabet): r return np.asarray([alphabet.label_from_string(c) for c in original])
[ "\n Given a Python string ``original``, remove unsupported characters, map characters\n to integers and return a numpy array representing the processed string.\n " ]
Please provide a description of the function:def wer_cer_batch(originals, results): r # The WER is calculated on word (and NOT on character) level. # Therefore we split the strings into words first assert len(originals) == len(results) total_cer = 0.0 total_char_length = 0.0 total_wer = 0.0 total_word_length = 0.0 for original, result in zip(originals, results): total_cer += levenshtein(original, result) total_char_length += len(original) total_wer += levenshtein(original.split(), result.split()) total_word_length += len(original.split()) return total_wer / total_word_length, total_cer / total_char_length
[ "\n The WER is defined as the editing/Levenshtein distance on word level\n divided by the amount of words in the original text.\n In case of the original having more words (N) than the result and both\n being totally different (all N words resulting in 1 edit operation each),\n the WER will always be 1 (N / N = 1).\n " ]
Please provide a description of the function:def variable_on_cpu(name, shape, initializer): r # Use the /cpu:0 device for scoped operations with tf.device(Config.cpu_device): # Create or get apropos variable var = tf.get_variable(name=name, shape=shape, initializer=initializer) return var
[ "\n Next we concern ourselves with graph creation.\n However, before we do so we must introduce a utility function ``variable_on_cpu()``\n used to create a variable in CPU memory.\n " ]
Please provide a description of the function:def calculate_mean_edit_distance_and_loss(iterator, dropout, reuse): r''' This routine beam search decodes a mini-batch and calculates the loss and mean edit distance. Next to total and average loss it returns the mean edit distance, the decoded result and the batch's original Y. ''' # Obtain the next batch of data (batch_x, batch_seq_len), batch_y = iterator.get_next() # Calculate the logits of the batch logits, _ = create_model(batch_x, batch_seq_len, dropout, reuse=reuse) # Compute the CTC loss using TensorFlow's `ctc_loss` total_loss = tf.nn.ctc_loss(labels=batch_y, inputs=logits, sequence_length=batch_seq_len) # Calculate the average loss across the batch avg_loss = tf.reduce_mean(total_loss) # Finally we return the average loss return avg_loss
[]
Please provide a description of the function:def get_tower_results(iterator, optimizer, dropout_rates): r''' With this preliminary step out of the way, we can for each GPU introduce a tower for which's batch we calculate and return the optimization gradients and the average loss across towers. ''' # To calculate the mean of the losses tower_avg_losses = [] # Tower gradients to return tower_gradients = [] with tf.variable_scope(tf.get_variable_scope()): # Loop over available_devices for i in range(len(Config.available_devices)): # Execute operations of tower i on device i device = Config.available_devices[i] with tf.device(device): # Create a scope for all operations of tower i with tf.name_scope('tower_%d' % i): # Calculate the avg_loss and mean_edit_distance and retrieve the decoded # batch along with the original batch's labels (Y) of this tower avg_loss = calculate_mean_edit_distance_and_loss(iterator, dropout_rates, reuse=i > 0) # Allow for variables to be re-used by the next tower tf.get_variable_scope().reuse_variables() # Retain tower's avg losses tower_avg_losses.append(avg_loss) # Compute gradients for model parameters using tower's mini-batch gradients = optimizer.compute_gradients(avg_loss) # Retain tower's gradients tower_gradients.append(gradients) avg_loss_across_towers = tf.reduce_mean(tower_avg_losses, 0) tf.summary.scalar(name='step_loss', tensor=avg_loss_across_towers, collections=['step_summaries']) # Return gradients and the average loss return tower_gradients, avg_loss_across_towers
[]
Please provide a description of the function:def average_gradients(tower_gradients): r''' A routine for computing each variable's average of the gradients obtained from the GPUs. Note also that this code acts as a synchronization point as it requires all GPUs to be finished with their mini-batch before it can run to completion. ''' # List of average gradients to return to the caller average_grads = [] # Run this on cpu_device to conserve GPU memory with tf.device(Config.cpu_device): # Loop over gradient/variable pairs from all towers for grad_and_vars in zip(*tower_gradients): # Introduce grads to store the gradients for the current variable grads = [] # Loop over the gradients for the current variable for g, _ in grad_and_vars: # Add 0 dimension to the gradients to represent the tower. expanded_g = tf.expand_dims(g, 0) # Append on a 'tower' dimension which we will average over below. grads.append(expanded_g) # Average over the 'tower' dimension grad = tf.concat(grads, 0) grad = tf.reduce_mean(grad, 0) # Create a gradient/variable tuple for the current variable with its average gradient grad_and_var = (grad, grad_and_vars[0][1]) # Add the current tuple to average_grads average_grads.append(grad_and_var) # Return result to caller return average_grads
[]
Please provide a description of the function:def log_variable(variable, gradient=None): r''' We introduce a function for logging a tensor variable's current state. It logs scalar values for the mean, standard deviation, minimum and maximum. Furthermore it logs a histogram of its state and (if given) of an optimization gradient. ''' name = variable.name.replace(':', '_') mean = tf.reduce_mean(variable) tf.summary.scalar(name='%s/mean' % name, tensor=mean) tf.summary.scalar(name='%s/sttdev' % name, tensor=tf.sqrt(tf.reduce_mean(tf.square(variable - mean)))) tf.summary.scalar(name='%s/max' % name, tensor=tf.reduce_max(variable)) tf.summary.scalar(name='%s/min' % name, tensor=tf.reduce_min(variable)) tf.summary.histogram(name=name, values=variable) if gradient is not None: if isinstance(gradient, tf.IndexedSlices): grad_values = gradient.values else: grad_values = gradient if grad_values is not None: tf.summary.histogram(name='%s/gradients' % name, values=grad_values)
[]
Please provide a description of the function:def export(): r''' Restores the trained variables into a simpler graph that will be exported for serving. ''' log_info('Exporting the model...') from tensorflow.python.framework.ops import Tensor, Operation inputs, outputs, _ = create_inference_graph(batch_size=FLAGS.export_batch_size, n_steps=FLAGS.n_steps, tflite=FLAGS.export_tflite) output_names_tensors = [tensor.op.name for tensor in outputs.values() if isinstance(tensor, Tensor)] output_names_ops = [op.name for op in outputs.values() if isinstance(op, Operation)] output_names = ",".join(output_names_tensors + output_names_ops) if not FLAGS.export_tflite: mapping = {v.op.name: v for v in tf.global_variables() if not v.op.name.startswith('previous_state_')} else: # Create a saver using variables from the above newly created graph def fixup(name): if name.startswith('rnn/lstm_cell/'): return name.replace('rnn/lstm_cell/', 'lstm_fused_cell/') return name mapping = {fixup(v.op.name): v for v in tf.global_variables()} saver = tf.train.Saver(mapping) # Restore variables from training checkpoint checkpoint = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir) checkpoint_path = checkpoint.model_checkpoint_path output_filename = 'output_graph.pb' if FLAGS.remove_export: if os.path.isdir(FLAGS.export_dir): log_info('Removing old export') shutil.rmtree(FLAGS.export_dir) try: output_graph_path = os.path.join(FLAGS.export_dir, output_filename) if not os.path.isdir(FLAGS.export_dir): os.makedirs(FLAGS.export_dir) def do_graph_freeze(output_file=None, output_node_names=None, variables_blacklist=None): return freeze_graph.freeze_graph_with_def_protos( input_graph_def=tf.get_default_graph().as_graph_def(), input_saver_def=saver.as_saver_def(), input_checkpoint=checkpoint_path, output_node_names=output_node_names, restore_op_name=None, filename_tensor_name=None, output_graph=output_file, clear_devices=False, variable_names_blacklist=variables_blacklist, initializer_nodes='') if not FLAGS.export_tflite: frozen_graph = do_graph_freeze(output_node_names=output_names, variables_blacklist='previous_state_c,previous_state_h') frozen_graph.version = int(file_relative_read('GRAPH_VERSION').strip()) # Add a no-op node to the graph with metadata information to be loaded by the native client metadata = frozen_graph.node.add() metadata.name = 'model_metadata' metadata.op = 'NoOp' metadata.attr['sample_rate'].i = FLAGS.audio_sample_rate metadata.attr['feature_win_len'].i = FLAGS.feature_win_len metadata.attr['feature_win_step'].i = FLAGS.feature_win_step if FLAGS.export_language: metadata.attr['language'].s = FLAGS.export_language.encode('ascii') with open(output_graph_path, 'wb') as fout: fout.write(frozen_graph.SerializeToString()) else: frozen_graph = do_graph_freeze(output_node_names=output_names, variables_blacklist='') output_tflite_path = os.path.join(FLAGS.export_dir, output_filename.replace('.pb', '.tflite')) converter = tf.lite.TFLiteConverter(frozen_graph, input_tensors=inputs.values(), output_tensors=outputs.values()) converter.post_training_quantize = True # AudioSpectrogram and Mfcc ops are custom but have built-in kernels in TFLite converter.allow_custom_ops = True tflite_model = converter.convert() with open(output_tflite_path, 'wb') as fout: fout.write(tflite_model) log_info('Exported model for TF Lite engine as {}'.format(os.path.basename(output_tflite_path))) log_info('Models exported at %s' % (FLAGS.export_dir)) except RuntimeError as e: log_error(str(e))
[]
Please provide a description of the function:def ctc_beam_search_decoder(probs_seq, alphabet, beam_size, cutoff_prob=1.0, cutoff_top_n=40, scorer=None): beam_results = swigwrapper.ctc_beam_search_decoder( probs_seq, alphabet.config_file(), beam_size, cutoff_prob, cutoff_top_n, scorer) beam_results = [(res.probability, alphabet.decode(res.tokens)) for res in beam_results] return beam_results
[ "Wrapper for the CTC Beam Search Decoder.\n\n :param probs_seq: 2-D list of probability distributions over each time\n step, with each element being a list of normalized\n probabilities over alphabet and blank.\n :type probs_seq: 2-D list\n :param alphabet: alphabet list.\n :alphabet: Alphabet\n :param beam_size: Width for beam search.\n :type beam_size: int\n :param cutoff_prob: Cutoff probability in pruning,\n default 1.0, no pruning.\n :type cutoff_prob: float\n :param cutoff_top_n: Cutoff number in pruning, only top cutoff_top_n\n characters with highest probs in alphabet will be\n used in beam search, default 40.\n :type cutoff_top_n: int\n :param scorer: External scorer for partially decoded sentence, e.g. word\n count or language model.\n :type scorer: Scorer\n :return: List of tuples of log probability and sentence as decoding\n results, in descending order of the probability.\n :rtype: list\n " ]
Please provide a description of the function:def ctc_beam_search_decoder_batch(probs_seq, seq_lengths, alphabet, beam_size, num_processes, cutoff_prob=1.0, cutoff_top_n=40, scorer=None): batch_beam_results = swigwrapper.ctc_beam_search_decoder_batch( probs_seq, seq_lengths, alphabet.config_file(), beam_size, num_processes, cutoff_prob, cutoff_top_n, scorer) batch_beam_results = [ [(res.probability, alphabet.decode(res.tokens)) for res in beam_results] for beam_results in batch_beam_results ] return batch_beam_results
[ "Wrapper for the batched CTC beam search decoder.\n\n :param probs_seq: 3-D list with each element as an instance of 2-D list\n of probabilities used by ctc_beam_search_decoder().\n :type probs_seq: 3-D list\n :param alphabet: alphabet list.\n :alphabet: Alphabet\n :param beam_size: Width for beam search.\n :type beam_size: int\n :param num_processes: Number of parallel processes.\n :type num_processes: int\n :param cutoff_prob: Cutoff probability in alphabet pruning,\n default 1.0, no pruning.\n :type cutoff_prob: float\n :param cutoff_top_n: Cutoff number in pruning, only top cutoff_top_n\n characters with highest probs in alphabet will be\n used in beam search, default 40.\n :type cutoff_top_n: int\n :param num_processes: Number of parallel processes.\n :type num_processes: int\n :param scorer: External scorer for partially decoded sentence, e.g. word\n count or language model.\n :type scorer: Scorer\n :return: List of tuples of log probability and sentence as decoding\n results, in descending order of the probability.\n :rtype: list\n " ]
Please provide a description of the function:def resample(self, data, input_rate): data16 = np.fromstring(string=data, dtype=np.int16) resample_size = int(len(data16) / self.input_rate * self.RATE_PROCESS) resample = signal.resample(data16, resample_size) resample16 = np.array(resample, dtype=np.int16) return resample16.tostring()
[ "\n Microphone may not support our native processing sampling rate, so\n resample from input_rate to RATE_PROCESS here for webrtcvad and\n deepspeech\n\n Args:\n data (binary): Input audio stream\n input_rate (int): Input audio rate to resample from\n " ]
Please provide a description of the function:def read_resampled(self): return self.resample(data=self.buffer_queue.get(), input_rate=self.input_rate)
[ "Return a block of audio data resampled to 16000hz, blocking if necessary." ]
Please provide a description of the function:def frame_generator(self): if self.input_rate == self.RATE_PROCESS: while True: yield self.read() else: while True: yield self.read_resampled()
[ "Generator that yields all audio frames from microphone." ]
Please provide a description of the function:def vad_collector(self, padding_ms=300, ratio=0.75, frames=None): if frames is None: frames = self.frame_generator() num_padding_frames = padding_ms // self.frame_duration_ms ring_buffer = collections.deque(maxlen=num_padding_frames) triggered = False for frame in frames: is_speech = self.vad.is_speech(frame, self.sample_rate) if not triggered: ring_buffer.append((frame, is_speech)) num_voiced = len([f for f, speech in ring_buffer if speech]) if num_voiced > ratio * ring_buffer.maxlen: triggered = True for f, s in ring_buffer: yield f ring_buffer.clear() else: yield frame ring_buffer.append((frame, is_speech)) num_unvoiced = len([f for f, speech in ring_buffer if not speech]) if num_unvoiced > ratio * ring_buffer.maxlen: triggered = False yield None ring_buffer.clear()
[ "Generator that yields series of consecutive audio frames comprising each utterence, separated by yielding a single None.\n Determines voice activity by ratio of frames in padding_ms. Uses a buffer to include padding_ms prior to being triggered.\n Example: (frame, ..., frame, None, frame, ..., frame, None, ...)\n |---utterence---| |---utterence---|\n " ]
Please provide a description of the function:def cut(sentence, HMM=True): global dt if jieba.pool is None: for w in dt.cut(sentence, HMM=HMM): yield w else: parts = strdecode(sentence).splitlines(True) if HMM: result = jieba.pool.map(_lcut_internal, parts) else: result = jieba.pool.map(_lcut_internal_no_hmm, parts) for r in result: for w in r: yield w
[ "\n Global `cut` function that supports parallel processing.\n\n Note that this only works using dt, custom POSTokenizer\n instances are not supported.\n " ]
Please provide a description of the function:def enable_parallel(processnum=None): global pool, dt, cut, cut_for_search from multiprocessing import cpu_count if os.name == 'nt': raise NotImplementedError( "jieba: parallel mode only supports posix system") else: from multiprocessing import Pool dt.check_initialized() if processnum is None: processnum = cpu_count() pool = Pool(processnum) cut = _pcut cut_for_search = _pcut_for_search
[ "\n Change the module's `cut` and `cut_for_search` functions to the\n parallel version.\n\n Note that this only works using dt, custom Tokenizer\n instances are not supported.\n " ]
Please provide a description of the function:def cut(self, sentence, cut_all=False, HMM=True): ''' The main function that segments an entire sentence that contains Chinese characters into separated words. Parameter: - sentence: The str(unicode) to be segmented. - cut_all: Model type. True for full pattern, False for accurate pattern. - HMM: Whether to use the Hidden Markov Model. ''' sentence = strdecode(sentence) if cut_all: re_han = re_han_cut_all re_skip = re_skip_cut_all else: re_han = re_han_default re_skip = re_skip_default if cut_all: cut_block = self.__cut_all elif HMM: cut_block = self.__cut_DAG else: cut_block = self.__cut_DAG_NO_HMM blocks = re_han.split(sentence) for blk in blocks: if not blk: continue if re_han.match(blk): for word in cut_block(blk): yield word else: tmp = re_skip.split(blk) for x in tmp: if re_skip.match(x): yield x elif not cut_all: for xx in x: yield xx else: yield x
[]
Please provide a description of the function:def cut_for_search(self, sentence, HMM=True): words = self.cut(sentence, HMM=HMM) for w in words: if len(w) > 2: for i in xrange(len(w) - 1): gram2 = w[i:i + 2] if self.FREQ.get(gram2): yield gram2 if len(w) > 3: for i in xrange(len(w) - 2): gram3 = w[i:i + 3] if self.FREQ.get(gram3): yield gram3 yield w
[ "\n Finer segmentation for search engines.\n " ]
Please provide a description of the function:def load_userdict(self, f): ''' Load personalized dict to improve detect rate. Parameter: - f : A plain text file contains words and their ocurrences. Can be a file-like object, or the path of the dictionary file, whose encoding must be utf-8. Structure of dict file: word1 freq1 word_type1 word2 freq2 word_type2 ... Word type may be ignored ''' self.check_initialized() if isinstance(f, string_types): f_name = f f = open(f, 'rb') else: f_name = resolve_filename(f) for lineno, ln in enumerate(f, 1): line = ln.strip() if not isinstance(line, text_type): try: line = line.decode('utf-8').lstrip('\ufeff') except UnicodeDecodeError: raise ValueError('dictionary file %s must be utf-8' % f_name) if not line: continue # match won't be None because there's at least one character word, freq, tag = re_userdict.match(line).groups() if freq is not None: freq = freq.strip() if tag is not None: tag = tag.strip() self.add_word(word, freq, tag)
[]
Please provide a description of the function:def add_word(self, word, freq=None, tag=None): self.check_initialized() word = strdecode(word) freq = int(freq) if freq is not None else self.suggest_freq(word, False) self.FREQ[word] = freq self.total += freq if tag: self.user_word_tag_tab[word] = tag for ch in xrange(len(word)): wfrag = word[:ch + 1] if wfrag not in self.FREQ: self.FREQ[wfrag] = 0 if freq == 0: finalseg.add_force_split(word)
[ "\n Add a word to dictionary.\n\n freq and tag can be omitted, freq defaults to be a calculated value\n that ensures the word can be cut out.\n " ]
Please provide a description of the function:def suggest_freq(self, segment, tune=False): self.check_initialized() ftotal = float(self.total) freq = 1 if isinstance(segment, string_types): word = segment for seg in self.cut(word, HMM=False): freq *= self.FREQ.get(seg, 1) / ftotal freq = max(int(freq * self.total) + 1, self.FREQ.get(word, 1)) else: segment = tuple(map(strdecode, segment)) word = ''.join(segment) for seg in segment: freq *= self.FREQ.get(seg, 1) / ftotal freq = min(int(freq * self.total), self.FREQ.get(word, 0)) if tune: add_word(word, freq) return freq
[ "\n Suggest word frequency to force the characters in a word to be\n joined or splitted.\n\n Parameter:\n - segment : The segments that the word is expected to be cut into,\n If the word should be treated as a whole, use a str.\n - tune : If True, tune the word frequency.\n\n Note that HMM may affect the final result. If the result doesn't change,\n set HMM=False.\n " ]
Please provide a description of the function:def tokenize(self, unicode_sentence, mode="default", HMM=True): if not isinstance(unicode_sentence, text_type): raise ValueError("jieba: the input parameter should be unicode.") start = 0 if mode == 'default': for w in self.cut(unicode_sentence, HMM=HMM): width = len(w) yield (w, start, start + width) start += width else: for w in self.cut(unicode_sentence, HMM=HMM): width = len(w) if len(w) > 2: for i in xrange(len(w) - 1): gram2 = w[i:i + 2] if self.FREQ.get(gram2): yield (gram2, start + i, start + i + 2) if len(w) > 3: for i in xrange(len(w) - 2): gram3 = w[i:i + 3] if self.FREQ.get(gram3): yield (gram3, start + i, start + i + 3) yield (w, start, start + width) start += width
[ "\n Tokenize a sentence and yields tuples of (word, start, end)\n\n Parameter:\n - sentence: the str(unicode) to be segmented.\n - mode: \"default\" or \"search\", \"search\" is for finer segmentation.\n - HMM: whether to use the Hidden Markov Model.\n " ]
Please provide a description of the function:def textrank(self, sentence, topK=20, withWeight=False, allowPOS=('ns', 'n', 'vn', 'v'), withFlag=False): self.pos_filt = frozenset(allowPOS) g = UndirectWeightedGraph() cm = defaultdict(int) words = tuple(self.tokenizer.cut(sentence)) for i, wp in enumerate(words): if self.pairfilter(wp): for j in xrange(i + 1, i + self.span): if j >= len(words): break if not self.pairfilter(words[j]): continue if allowPOS and withFlag: cm[(wp, words[j])] += 1 else: cm[(wp.word, words[j].word)] += 1 for terms, w in cm.items(): g.addEdge(terms[0], terms[1], w) nodes_rank = g.rank() if withWeight: tags = sorted(nodes_rank.items(), key=itemgetter(1), reverse=True) else: tags = sorted(nodes_rank, key=nodes_rank.__getitem__, reverse=True) if topK: return tags[:topK] else: return tags
[ "\n Extract keywords from sentence using TextRank algorithm.\n Parameter:\n - topK: return how many top keywords. `None` for all possible words.\n - withWeight: if True, return a list of (word, weight);\n if False, return a list of words.\n - allowPOS: the allowed POS list eg. ['ns', 'n', 'vn', 'v'].\n if the POS of w is not in this list, it will be filtered.\n - withFlag: if True, return a list of pair(word, weight) like posseg.cut\n if False, return a list of words\n " ]
Please provide a description of the function:def extract_tags(self, sentence, topK=20, withWeight=False, allowPOS=(), withFlag=False): if allowPOS: allowPOS = frozenset(allowPOS) words = self.postokenizer.cut(sentence) else: words = self.tokenizer.cut(sentence) freq = {} for w in words: if allowPOS: if w.flag not in allowPOS: continue elif not withFlag: w = w.word wc = w.word if allowPOS and withFlag else w if len(wc.strip()) < 2 or wc.lower() in self.stop_words: continue freq[w] = freq.get(w, 0.0) + 1.0 total = sum(freq.values()) for k in freq: kw = k.word if allowPOS and withFlag else k freq[k] *= self.idf_freq.get(kw, self.median_idf) / total if withWeight: tags = sorted(freq.items(), key=itemgetter(1), reverse=True) else: tags = sorted(freq, key=freq.__getitem__, reverse=True) if topK: return tags[:topK] else: return tags
[ "\n Extract keywords from sentence using TF-IDF algorithm.\n Parameter:\n - topK: return how many top keywords. `None` for all possible words.\n - withWeight: if True, return a list of (word, weight);\n if False, return a list of words.\n - allowPOS: the allowed POS list eg. ['ns', 'n', 'vn', 'v','nr'].\n if the POS of w is not in this list,it will be filtered.\n - withFlag: only work with allowPOS is not empty.\n if True, return a list of pair(word, weight) like posseg.cut\n if False, return a list of words\n " ]
Please provide a description of the function:def paracrawl_v3_pairs(paracrawl_file): raw_sentences = _raw_sentences(paracrawl_file) for s_en in raw_sentences: try: s_xx = next(raw_sentences) if s_en and s_xx: # Prevent empty string examples. yield s_en, s_xx except StopIteration: tf.logging.error( 'Unmatched final sentence while reading in sentence pairs: [%s]', s_en)
[ "Generates raw (English, other) pairs from a ParaCrawl V3.0 data file.\n\n Args:\n paracrawl_file: A ParaCrawl V3.0 en-.. data file.\n Yields:\n Pairs of (sentence_en, sentence_xx), as Unicode strings.\n Raises:\n StopIteration: If the file ends while this method is in the middle of\n creating a translation pair.\n " ]
Please provide a description of the function:def _raw_sentences(paracrawl_file): for line_utf8 in paracrawl_file: line_uni = line_utf8.decode('UTF-8') text_match = re.match(r' +<seg>(.*)</seg>$', line_uni) if text_match: txt = text_match.group(1) txt = re.sub(r'&amp;', r'&', txt) txt = re.sub(r'& ?amp;', r'&', txt) txt = re.sub(r'& ?apos;', r"'", txt) txt = re.sub(r'& ?quot;', r'"', txt) txt = re.sub(r'& ?lt;', r'<', txt) txt = re.sub(r'& ?gt;', r'>', txt) yield txt
[ "Generates Unicode strings, one for each <seg> in a ParaCrawl data file.\n\n Also decodes some of the most common HTML entities found in ParaCrawl data.\n\n Args:\n paracrawl_file: A ParaCrawl V3.0 en-.. data file.\n Yields:\n One Unicode string for each <seg> element in the ParaCrawl data file.\n " ]
Please provide a description of the function:def clean_en_xx_pairs(en_xx_pairs): for s1, s2 in en_xx_pairs: if _regex_filter(s1): continue s1_list, s2_list = _split_sentences(s1, s2) if len(s1_list) != len(s2_list): continue # discard this pair elif len(s1_list) == 1: yield s1, s2 else: for s1_subsentence, s2_subsentence in itertools.izip(s1_list, s2_list): if _regex_filter(s1_subsentence): continue yield s1_subsentence, s2_subsentence
[ "Generates a cleaned-up stream of (English, other) translation pairs.\n\n Cleaning includes both filtering and simplistic sentence splitting, with\n minimal assumptions on the non-English pair member: (1) All filtering is\n done based on the English member of the pair, and (2) sentence splitting\n assumes only that sentences can end with one of '.!?' and begin with an\n ASCII uppercase letter. Input pairs that would get split into different\n numbers of sentences (e.g., three English sentences vs. two German ones) are\n discarded.\n\n Args:\n en_xx_pairs: A stream (iterable) of Unicode string pairs. Each item in the\n stream should be a (sentence_en, sentence_xx) pair.\n Yields:\n Cleaned-up (sentence_en, sentence_xx) pairs.\n " ]
Please provide a description of the function:def _get_case_file_paths(tmp_dir, case, training_fraction=0.95): paths = tf.gfile.Glob("%s/*.jpg" % tmp_dir) if not paths: raise ValueError("Search of tmp_dir (%s) " % tmp_dir, "for subimage paths yielded an empty list, ", "can't proceed with returning training/eval split.") split_index = int(math.floor(len(paths)*training_fraction)) if split_index >= len(paths): raise ValueError("For a path list of size %s " "and a training_fraction of %s " "the resulting split_index of the paths list, " "%s, would leave no elements for the eval " "condition." % (len(paths), training_fraction, split_index)) if case: return paths[:split_index] else: return paths[split_index:]
[ "Obtain a list of image paths corresponding to training or eval case.\n\n Args:\n tmp_dir: str, the root path to which raw images were written, at the\n top level having meta/ and raw/ subdirs.\n case: bool, whether obtaining file paths for training (true) or eval\n (false).\n training_fraction: float, the fraction of the sub-image path list to\n consider as the basis for training examples.\n\n Returns:\n list: A list of file paths.\n\n Raises:\n ValueError: if images not found in tmp_dir, or if training_fraction would\n leave no examples for eval.\n " ]