repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
lmjohns3/theanets
theanets/layers/base.py
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/layers/base.py#L370-L394
def find(self, key): '''Get a shared variable for a parameter by name. Parameters ---------- key : str or int The name of the parameter to look up, or the index of the parameter in our parameter list. These are both dependent on the implementation of the layer. Returns ------- param : shared variable A shared variable containing values for the given parameter. Raises ------ KeyError If a param with the given name does not exist. ''' name = self._fmt(str(key)) for i, p in enumerate(self._params): if key == i or name == p.name: return p raise KeyError(key)
[ "def", "find", "(", "self", ",", "key", ")", ":", "name", "=", "self", ".", "_fmt", "(", "str", "(", "key", ")", ")", "for", "i", ",", "p", "in", "enumerate", "(", "self", ".", "_params", ")", ":", "if", "key", "==", "i", "or", "name", "==", "p", ".", "name", ":", "return", "p", "raise", "KeyError", "(", "key", ")" ]
Get a shared variable for a parameter by name. Parameters ---------- key : str or int The name of the parameter to look up, or the index of the parameter in our parameter list. These are both dependent on the implementation of the layer. Returns ------- param : shared variable A shared variable containing values for the given parameter. Raises ------ KeyError If a param with the given name does not exist.
[ "Get", "a", "shared", "variable", "for", "a", "parameter", "by", "name", "." ]
python
test
29.56
vmonaco/pohmm
pohmm/pohmm.py
https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/pohmm.py#L989-L1049
def expected_value(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None): """ Determine the joint maximum likelihood estimate """ # Use the first feature by default if feature is None: feature = self.emission_name[0] # Will default to marginal pstate if pstate is unknown or None pstate_idx = self.e[pstate] if pstate is not None and pstate_prob is not None: raise Exception('Must provide either pstate or pstate_proba but not both') if hstate is not None and hstate_prob is not None: raise Exception('Must provide either hstate or hstate_proba but not both') # Marginalize pstate using the steady state probas if pstate_prob is None: pstate_prob = self.pstate_steadyprob # Marginalize hstate using the steady state probas if hstate_prob is None: hstate_prob = self.steadyprob[pstate_idx] if pstate is None and hstate is None: # Marginalize both pstate and hstate w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten() if self.emission_name_distr[feature] == 'lognormal': return np.sum(w * expected_lognormal(self.emission[feature]['logsigma'].flatten(), self.emission[feature]['logmu'].flatten())) elif self.emission_name_distr[feature] == 'normal': return np.sum(w * expected_normal(self.emission[feature]['mu'].flatten(), self.emission[feature]['sigma'].flatten())) elif hstate is None: # Marginalize hstate if self.emission_name_distr[feature] == 'lognormal': return np.sum(hstate_prob * expected_lognormal(self.emission[feature]['logsigma'][pstate_idx, :], self.emission[feature]['logmu'][pstate_idx, :])) elif self.emission_name_distr[feature] == 'normal': return np.sum(hstate_prob * expected_normal(self.emission[feature]['mu'][pstate_idx, :], self.emission[feature]['sigma'][pstate_idx, :])) elif pstate is None: # Marginalize pstate if self.emission_name_distr[feature] == 'lognormal': return np.sum(pstate_prob * expected_lognormal(self.emission[feature]['logsigma'][:, hstate], self.emission[feature]['logmu'][:, hstate])) elif self.emission_name_distr[feature] == 'normal': return np.sum(pstate_prob * expected_normal(self.emission[feature]['mu'][:, hstate], self.emission[feature]['sigma'][:, hstate])) else: if self.emission_name_distr[feature] == 'lognormal': return expected_lognormal(self.emission[feature]['logsigma'][pstate_idx, hstate], self.emission[feature]['logmu'][pstate_idx, hstate]) elif self.emission_name_distr[feature] == 'normal': return expected_normal(self.emission[feature]['mu'][pstate_idx, hstate], self.emission[feature]['sigma'][pstate_idx, hstate]) return
[ "def", "expected_value", "(", "self", ",", "feature", "=", "None", ",", "pstate", "=", "None", ",", "hstate", "=", "None", ",", "pstate_prob", "=", "None", ",", "hstate_prob", "=", "None", ")", ":", "# Use the first feature by default", "if", "feature", "is", "None", ":", "feature", "=", "self", ".", "emission_name", "[", "0", "]", "# Will default to marginal pstate if pstate is unknown or None", "pstate_idx", "=", "self", ".", "e", "[", "pstate", "]", "if", "pstate", "is", "not", "None", "and", "pstate_prob", "is", "not", "None", ":", "raise", "Exception", "(", "'Must provide either pstate or pstate_proba but not both'", ")", "if", "hstate", "is", "not", "None", "and", "hstate_prob", "is", "not", "None", ":", "raise", "Exception", "(", "'Must provide either hstate or hstate_proba but not both'", ")", "# Marginalize pstate using the steady state probas", "if", "pstate_prob", "is", "None", ":", "pstate_prob", "=", "self", ".", "pstate_steadyprob", "# Marginalize hstate using the steady state probas", "if", "hstate_prob", "is", "None", ":", "hstate_prob", "=", "self", ".", "steadyprob", "[", "pstate_idx", "]", "if", "pstate", "is", "None", "and", "hstate", "is", "None", ":", "# Marginalize both pstate and hstate", "w", "=", "(", "pstate_prob", "[", ":", ",", "np", ".", "newaxis", "]", "*", "hstate_prob", ")", ".", "flatten", "(", ")", "if", "self", ".", "emission_name_distr", "[", "feature", "]", "==", "'lognormal'", ":", "return", "np", ".", "sum", "(", "w", "*", "expected_lognormal", "(", "self", ".", "emission", "[", "feature", "]", "[", "'logsigma'", "]", ".", "flatten", "(", ")", ",", "self", ".", "emission", "[", "feature", "]", "[", "'logmu'", "]", ".", "flatten", "(", ")", ")", ")", "elif", "self", ".", "emission_name_distr", "[", "feature", "]", "==", "'normal'", ":", "return", "np", ".", "sum", "(", "w", "*", "expected_normal", "(", "self", ".", "emission", "[", "feature", "]", "[", "'mu'", "]", ".", "flatten", "(", ")", ",", "self", ".", "emission", "[", "feature", "]", "[", "'sigma'", "]", ".", "flatten", "(", ")", ")", ")", "elif", "hstate", "is", "None", ":", "# Marginalize hstate", "if", "self", ".", "emission_name_distr", "[", "feature", "]", "==", "'lognormal'", ":", "return", "np", ".", "sum", "(", "hstate_prob", "*", "expected_lognormal", "(", "self", ".", "emission", "[", "feature", "]", "[", "'logsigma'", "]", "[", "pstate_idx", ",", ":", "]", ",", "self", ".", "emission", "[", "feature", "]", "[", "'logmu'", "]", "[", "pstate_idx", ",", ":", "]", ")", ")", "elif", "self", ".", "emission_name_distr", "[", "feature", "]", "==", "'normal'", ":", "return", "np", ".", "sum", "(", "hstate_prob", "*", "expected_normal", "(", "self", ".", "emission", "[", "feature", "]", "[", "'mu'", "]", "[", "pstate_idx", ",", ":", "]", ",", "self", ".", "emission", "[", "feature", "]", "[", "'sigma'", "]", "[", "pstate_idx", ",", ":", "]", ")", ")", "elif", "pstate", "is", "None", ":", "# Marginalize pstate", "if", "self", ".", "emission_name_distr", "[", "feature", "]", "==", "'lognormal'", ":", "return", "np", ".", "sum", "(", "pstate_prob", "*", "expected_lognormal", "(", "self", ".", "emission", "[", "feature", "]", "[", "'logsigma'", "]", "[", ":", ",", "hstate", "]", ",", "self", ".", "emission", "[", "feature", "]", "[", "'logmu'", "]", "[", ":", ",", "hstate", "]", ")", ")", "elif", "self", ".", "emission_name_distr", "[", "feature", "]", "==", "'normal'", ":", "return", "np", ".", "sum", "(", "pstate_prob", "*", "expected_normal", "(", "self", ".", "emission", "[", "feature", "]", "[", "'mu'", "]", "[", ":", ",", "hstate", "]", ",", "self", ".", "emission", "[", "feature", "]", "[", "'sigma'", "]", "[", ":", ",", "hstate", "]", ")", ")", "else", ":", "if", "self", ".", "emission_name_distr", "[", "feature", "]", "==", "'lognormal'", ":", "return", "expected_lognormal", "(", "self", ".", "emission", "[", "feature", "]", "[", "'logsigma'", "]", "[", "pstate_idx", ",", "hstate", "]", ",", "self", ".", "emission", "[", "feature", "]", "[", "'logmu'", "]", "[", "pstate_idx", ",", "hstate", "]", ")", "elif", "self", ".", "emission_name_distr", "[", "feature", "]", "==", "'normal'", ":", "return", "expected_normal", "(", "self", ".", "emission", "[", "feature", "]", "[", "'mu'", "]", "[", "pstate_idx", ",", "hstate", "]", ",", "self", ".", "emission", "[", "feature", "]", "[", "'sigma'", "]", "[", "pstate_idx", ",", "hstate", "]", ")", "return" ]
Determine the joint maximum likelihood estimate
[ "Determine", "the", "joint", "maximum", "likelihood", "estimate" ]
python
train
55.409836
manns/pyspread
pyspread/src/interfaces/pys.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/interfaces/pys.py#L280-L295
def _pys2col_widths(self, line): """Updates col_widths in code_array""" # Split with maxsplit 3 split_line = self._split_tidy(line) key = col, tab = self._get_key(*split_line[:2]) width = float(split_line[2]) shape = self.code_array.shape try: if col < shape[1] and tab < shape[2]: self.code_array.col_widths[key] = width except ValueError: pass
[ "def", "_pys2col_widths", "(", "self", ",", "line", ")", ":", "# Split with maxsplit 3", "split_line", "=", "self", ".", "_split_tidy", "(", "line", ")", "key", "=", "col", ",", "tab", "=", "self", ".", "_get_key", "(", "*", "split_line", "[", ":", "2", "]", ")", "width", "=", "float", "(", "split_line", "[", "2", "]", ")", "shape", "=", "self", ".", "code_array", ".", "shape", "try", ":", "if", "col", "<", "shape", "[", "1", "]", "and", "tab", "<", "shape", "[", "2", "]", ":", "self", ".", "code_array", ".", "col_widths", "[", "key", "]", "=", "width", "except", "ValueError", ":", "pass" ]
Updates col_widths in code_array
[ "Updates", "col_widths", "in", "code_array" ]
python
train
27.375
chrisrink10/basilisp
src/basilisp/lang/compiler/generator.py
https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/compiler/generator.py#L812-L832
def _do_to_py_ast(ctx: GeneratorContext, node: Do) -> GeneratedPyAST: """Return a Python AST Node for a `do` expression.""" assert node.op == NodeOp.DO assert not node.is_body body_ast = GeneratedPyAST.reduce( *map(partial(gen_py_ast, ctx), chain(node.statements, [node.ret])) ) fn_body_ast: List[ast.AST] = [] do_result_name = genname(_DO_PREFIX) fn_body_ast.extend(map(statementize, body_ast.dependencies)) fn_body_ast.append( ast.Assign( targets=[ast.Name(id=do_result_name, ctx=ast.Store())], value=body_ast.node ) ) return GeneratedPyAST( node=ast.Name(id=do_result_name, ctx=ast.Load()), dependencies=fn_body_ast )
[ "def", "_do_to_py_ast", "(", "ctx", ":", "GeneratorContext", ",", "node", ":", "Do", ")", "->", "GeneratedPyAST", ":", "assert", "node", ".", "op", "==", "NodeOp", ".", "DO", "assert", "not", "node", ".", "is_body", "body_ast", "=", "GeneratedPyAST", ".", "reduce", "(", "*", "map", "(", "partial", "(", "gen_py_ast", ",", "ctx", ")", ",", "chain", "(", "node", ".", "statements", ",", "[", "node", ".", "ret", "]", ")", ")", ")", "fn_body_ast", ":", "List", "[", "ast", ".", "AST", "]", "=", "[", "]", "do_result_name", "=", "genname", "(", "_DO_PREFIX", ")", "fn_body_ast", ".", "extend", "(", "map", "(", "statementize", ",", "body_ast", ".", "dependencies", ")", ")", "fn_body_ast", ".", "append", "(", "ast", ".", "Assign", "(", "targets", "=", "[", "ast", ".", "Name", "(", "id", "=", "do_result_name", ",", "ctx", "=", "ast", ".", "Store", "(", ")", ")", "]", ",", "value", "=", "body_ast", ".", "node", ")", ")", "return", "GeneratedPyAST", "(", "node", "=", "ast", ".", "Name", "(", "id", "=", "do_result_name", ",", "ctx", "=", "ast", ".", "Load", "(", ")", ")", ",", "dependencies", "=", "fn_body_ast", ")" ]
Return a Python AST Node for a `do` expression.
[ "Return", "a", "Python", "AST", "Node", "for", "a", "do", "expression", "." ]
python
test
33.095238
phoebe-project/phoebe2
phoebe/parameters/parameters.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/parameters/parameters.py#L1283-L1314
def filter(self, twig=None, check_visible=True, check_default=True, **kwargs): """ Filter the ParameterSet based on the meta-tags of the Parameters and return another ParameterSet. Because another ParameterSet is returned, these filter calls are chainable. >>> b.filter(context='component').filter(component='starA') :parameter str twig: (optional) the search twig - essentially a single string with any delimiter (ie '@') that will be parsed into any of the meta-tags. Example: instead of b.filter(context='component', component='starA'), you could do b.filter('starA@component'). :parameter bool check_visible: whether to hide invisible parameters. These are usually parameters that do not play a role unless the value of another parameter meets some condition. :parameter bool check_default: whether to exclude parameters which have a _default tag (these are parameters which solely exist to provide defaults for when new parameters or datasets are added and the parameter needs to be copied appropriately). Defaults to True. :parameter **kwargs: meta-tags to search (ie. 'context', 'component', 'model', etc). See :func:`meta` for all possible options. :return: the resulting :class:`ParameterSet` """ kwargs['check_visible'] = check_visible kwargs['check_default'] = check_default kwargs['force_ps'] = True return self.filter_or_get(twig=twig, **kwargs)
[ "def", "filter", "(", "self", ",", "twig", "=", "None", ",", "check_visible", "=", "True", ",", "check_default", "=", "True", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'check_visible'", "]", "=", "check_visible", "kwargs", "[", "'check_default'", "]", "=", "check_default", "kwargs", "[", "'force_ps'", "]", "=", "True", "return", "self", ".", "filter_or_get", "(", "twig", "=", "twig", ",", "*", "*", "kwargs", ")" ]
Filter the ParameterSet based on the meta-tags of the Parameters and return another ParameterSet. Because another ParameterSet is returned, these filter calls are chainable. >>> b.filter(context='component').filter(component='starA') :parameter str twig: (optional) the search twig - essentially a single string with any delimiter (ie '@') that will be parsed into any of the meta-tags. Example: instead of b.filter(context='component', component='starA'), you could do b.filter('starA@component'). :parameter bool check_visible: whether to hide invisible parameters. These are usually parameters that do not play a role unless the value of another parameter meets some condition. :parameter bool check_default: whether to exclude parameters which have a _default tag (these are parameters which solely exist to provide defaults for when new parameters or datasets are added and the parameter needs to be copied appropriately). Defaults to True. :parameter **kwargs: meta-tags to search (ie. 'context', 'component', 'model', etc). See :func:`meta` for all possible options. :return: the resulting :class:`ParameterSet`
[ "Filter", "the", "ParameterSet", "based", "on", "the", "meta", "-", "tags", "of", "the", "Parameters", "and", "return", "another", "ParameterSet", "." ]
python
train
51.65625
tensorflow/probability
tensorflow_probability/python/stats/quantiles.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/quantiles.py#L292-L400
def histogram(x, edges, axis=None, extend_lower_interval=False, extend_upper_interval=False, dtype=None, name=None): """Count how often `x` falls in intervals defined by `edges`. Given `edges = [c0, ..., cK]`, defining intervals `I0 = [c0, c1)`, `I1 = [c1, c2)`, ..., `I_{K-1} = [c_{K-1}, cK]`, This function counts how often `x` falls into each interval. Values of `x` outside of the intervals cause errors. Consider using `extend_lower_interval`, `extend_upper_interval` to deal with this. Args: x: Numeric `N-D` `Tensor` with `N > 0`. If `axis` is not `None`, must have statically known number of dimensions. The `axis` kwarg determines which dimensions index iid samples. Other dimensions of `x` index "events" for which we will compute different histograms. edges: `Tensor` of same `dtype` as `x`. The first dimension indexes edges of intervals. Must either be `1-D` or have `edges.shape[1:]` the same as the dimensions of `x` excluding `axis`. If `rank(edges) > 1`, `edges[k]` designates a shape `edges.shape[1:]` `Tensor` of interval edges for the corresponding dimensions of `x`. axis: Optional `0-D` or `1-D` integer `Tensor` with constant values. The axis in `x` that index iid samples. `Default value:` `None` (treat every dimension as sample dimension). extend_lower_interval: Python `bool`. If `True`, extend the lowest interval `I0` to `(-inf, c1]`. extend_upper_interval: Python `bool`. If `True`, extend the upper interval `I_{K-1}` to `[c_{K-1}, +inf)`. dtype: The output type (`int32` or `int64`). `Default value:` `x.dtype`. name: A Python string name to prepend to created ops. `Default value:` 'histogram' Returns: counts: `Tensor` of type `dtype` and, with `~axis = [i for i in range(arr.ndim) if i not in axis]`, `counts.shape = [edges.shape[0]] + x.shape[~axis]`. With `I` a multi-index into `~axis`, `counts[k][I]` is the number of times event(s) fell into the `kth` interval of `edges`. #### Examples ```python # x.shape = [1000, 2] # x[:, 0] ~ Uniform(0, 1), x[:, 1] ~ Uniform(1, 2). x = tf.stack([tf.random_uniform([1000]), 1 + tf.random_uniform([1000])], axis=-1) # edges ==> bins [0, 0.5), [0.5, 1.0), [1.0, 1.5), [1.5, 2.0]. edges = [0., 0.5, 1.0, 1.5, 2.0] tfp.stats.histogram(x, edges) ==> approximately [500, 500, 500, 500] tfp.stats.histogram(x, edges, axis=0) ==> approximately [[500, 500, 0, 0], [0, 0, 500, 500]] ``` """ with tf.compat.v1.name_scope(name, 'histogram', values=[x, edges, axis]): # Tensor conversions. in_dtype = dtype_util.common_dtype([x, edges], preferred_dtype=tf.float32) x = tf.convert_to_tensor(value=x, name='x', dtype=in_dtype) edges = tf.convert_to_tensor(value=edges, name='edges', dtype=in_dtype) # Move dims in axis to the left end as one flattened dim. # After this, x.shape = [n_samples] + E. if axis is None: x = tf.reshape(x, shape=[-1]) else: x_ndims = _get_static_ndims( x, expect_static=True, expect_ndims_at_least=1) axis = _make_static_axis_non_negative_list(axis, x_ndims) if not axis: raise ValueError('`axis` cannot be empty. Found: {}'.format(axis)) x = _move_dims_to_flat_end(x, axis, x_ndims, right_end=False) # bins.shape = x.shape = [n_samples] + E, # and bins[i] is a shape E Tensor of the bins that sample `i` fell into. # E is the "event shape", which is [] if axis is None. bins = find_bins( x, edges=edges, # If not extending intervals, then values outside the edges will return # -1, which gives an error when fed to bincount. extend_lower_interval=extend_lower_interval, extend_upper_interval=extend_upper_interval, dtype=tf.int32) # TODO(b/124015136) Use standard tf.math.bincount once it supports `axis`. counts = count_integers( bins, # Ensure we get correct output, even if x did not fall into every bin minlength=tf.shape(input=edges)[0] - 1, maxlength=tf.shape(input=edges)[0] - 1, axis=0, dtype=dtype or in_dtype) n_edges = tf.compat.dimension_value(edges.shape[0]) if n_edges is not None: counts.set_shape( tf.TensorShape([n_edges - 1]).concatenate(counts.shape[1:])) return counts
[ "def", "histogram", "(", "x", ",", "edges", ",", "axis", "=", "None", ",", "extend_lower_interval", "=", "False", ",", "extend_upper_interval", "=", "False", ",", "dtype", "=", "None", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "'histogram'", ",", "values", "=", "[", "x", ",", "edges", ",", "axis", "]", ")", ":", "# Tensor conversions.", "in_dtype", "=", "dtype_util", ".", "common_dtype", "(", "[", "x", ",", "edges", "]", ",", "preferred_dtype", "=", "tf", ".", "float32", ")", "x", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "x", ",", "name", "=", "'x'", ",", "dtype", "=", "in_dtype", ")", "edges", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "edges", ",", "name", "=", "'edges'", ",", "dtype", "=", "in_dtype", ")", "# Move dims in axis to the left end as one flattened dim.", "# After this, x.shape = [n_samples] + E.", "if", "axis", "is", "None", ":", "x", "=", "tf", ".", "reshape", "(", "x", ",", "shape", "=", "[", "-", "1", "]", ")", "else", ":", "x_ndims", "=", "_get_static_ndims", "(", "x", ",", "expect_static", "=", "True", ",", "expect_ndims_at_least", "=", "1", ")", "axis", "=", "_make_static_axis_non_negative_list", "(", "axis", ",", "x_ndims", ")", "if", "not", "axis", ":", "raise", "ValueError", "(", "'`axis` cannot be empty. Found: {}'", ".", "format", "(", "axis", ")", ")", "x", "=", "_move_dims_to_flat_end", "(", "x", ",", "axis", ",", "x_ndims", ",", "right_end", "=", "False", ")", "# bins.shape = x.shape = [n_samples] + E,", "# and bins[i] is a shape E Tensor of the bins that sample `i` fell into.", "# E is the \"event shape\", which is [] if axis is None.", "bins", "=", "find_bins", "(", "x", ",", "edges", "=", "edges", ",", "# If not extending intervals, then values outside the edges will return", "# -1, which gives an error when fed to bincount.", "extend_lower_interval", "=", "extend_lower_interval", ",", "extend_upper_interval", "=", "extend_upper_interval", ",", "dtype", "=", "tf", ".", "int32", ")", "# TODO(b/124015136) Use standard tf.math.bincount once it supports `axis`.", "counts", "=", "count_integers", "(", "bins", ",", "# Ensure we get correct output, even if x did not fall into every bin", "minlength", "=", "tf", ".", "shape", "(", "input", "=", "edges", ")", "[", "0", "]", "-", "1", ",", "maxlength", "=", "tf", ".", "shape", "(", "input", "=", "edges", ")", "[", "0", "]", "-", "1", ",", "axis", "=", "0", ",", "dtype", "=", "dtype", "or", "in_dtype", ")", "n_edges", "=", "tf", ".", "compat", ".", "dimension_value", "(", "edges", ".", "shape", "[", "0", "]", ")", "if", "n_edges", "is", "not", "None", ":", "counts", ".", "set_shape", "(", "tf", ".", "TensorShape", "(", "[", "n_edges", "-", "1", "]", ")", ".", "concatenate", "(", "counts", ".", "shape", "[", "1", ":", "]", ")", ")", "return", "counts" ]
Count how often `x` falls in intervals defined by `edges`. Given `edges = [c0, ..., cK]`, defining intervals `I0 = [c0, c1)`, `I1 = [c1, c2)`, ..., `I_{K-1} = [c_{K-1}, cK]`, This function counts how often `x` falls into each interval. Values of `x` outside of the intervals cause errors. Consider using `extend_lower_interval`, `extend_upper_interval` to deal with this. Args: x: Numeric `N-D` `Tensor` with `N > 0`. If `axis` is not `None`, must have statically known number of dimensions. The `axis` kwarg determines which dimensions index iid samples. Other dimensions of `x` index "events" for which we will compute different histograms. edges: `Tensor` of same `dtype` as `x`. The first dimension indexes edges of intervals. Must either be `1-D` or have `edges.shape[1:]` the same as the dimensions of `x` excluding `axis`. If `rank(edges) > 1`, `edges[k]` designates a shape `edges.shape[1:]` `Tensor` of interval edges for the corresponding dimensions of `x`. axis: Optional `0-D` or `1-D` integer `Tensor` with constant values. The axis in `x` that index iid samples. `Default value:` `None` (treat every dimension as sample dimension). extend_lower_interval: Python `bool`. If `True`, extend the lowest interval `I0` to `(-inf, c1]`. extend_upper_interval: Python `bool`. If `True`, extend the upper interval `I_{K-1}` to `[c_{K-1}, +inf)`. dtype: The output type (`int32` or `int64`). `Default value:` `x.dtype`. name: A Python string name to prepend to created ops. `Default value:` 'histogram' Returns: counts: `Tensor` of type `dtype` and, with `~axis = [i for i in range(arr.ndim) if i not in axis]`, `counts.shape = [edges.shape[0]] + x.shape[~axis]`. With `I` a multi-index into `~axis`, `counts[k][I]` is the number of times event(s) fell into the `kth` interval of `edges`. #### Examples ```python # x.shape = [1000, 2] # x[:, 0] ~ Uniform(0, 1), x[:, 1] ~ Uniform(1, 2). x = tf.stack([tf.random_uniform([1000]), 1 + tf.random_uniform([1000])], axis=-1) # edges ==> bins [0, 0.5), [0.5, 1.0), [1.0, 1.5), [1.5, 2.0]. edges = [0., 0.5, 1.0, 1.5, 2.0] tfp.stats.histogram(x, edges) ==> approximately [500, 500, 500, 500] tfp.stats.histogram(x, edges, axis=0) ==> approximately [[500, 500, 0, 0], [0, 0, 500, 500]] ```
[ "Count", "how", "often", "x", "falls", "in", "intervals", "defined", "by", "edges", "." ]
python
test
40.477064
yyuu/botornado
boto/s3/bucket.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/s3/bucket.py#L360-L397
def get_all_versions(self, headers=None, **params): """ A lower-level, version-aware method for listing contents of a bucket. This closely models the actual S3 API and requires you to manually handle the paging of results. For a higher-level method that handles the details of paging for you, you can use the list method. :type max_keys: int :param max_keys: The maximum number of keys to retrieve :type prefix: string :param prefix: The prefix of the keys you want to retrieve :type key_marker: string :param key_marker: The "marker" of where you are in the result set with respect to keys. :type version_id_marker: string :param version_id_marker: The "marker" of where you are in the result set with respect to version-id's. :type delimiter: string :param delimiter: If this optional, Unicode string parameter is included with your request, then keys that contain the same string between the prefix and the first occurrence of the delimiter will be rolled up into a single result element in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere in the response. :rtype: ResultSet :return: The result from S3 listing the keys requested """ return self._get_all([('Version', self.key_class), ('CommonPrefixes', Prefix), ('DeleteMarker', DeleteMarker)], 'versions', headers, **params)
[ "def", "get_all_versions", "(", "self", ",", "headers", "=", "None", ",", "*", "*", "params", ")", ":", "return", "self", ".", "_get_all", "(", "[", "(", "'Version'", ",", "self", ".", "key_class", ")", ",", "(", "'CommonPrefixes'", ",", "Prefix", ")", ",", "(", "'DeleteMarker'", ",", "DeleteMarker", ")", "]", ",", "'versions'", ",", "headers", ",", "*", "*", "params", ")" ]
A lower-level, version-aware method for listing contents of a bucket. This closely models the actual S3 API and requires you to manually handle the paging of results. For a higher-level method that handles the details of paging for you, you can use the list method. :type max_keys: int :param max_keys: The maximum number of keys to retrieve :type prefix: string :param prefix: The prefix of the keys you want to retrieve :type key_marker: string :param key_marker: The "marker" of where you are in the result set with respect to keys. :type version_id_marker: string :param version_id_marker: The "marker" of where you are in the result set with respect to version-id's. :type delimiter: string :param delimiter: If this optional, Unicode string parameter is included with your request, then keys that contain the same string between the prefix and the first occurrence of the delimiter will be rolled up into a single result element in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere in the response. :rtype: ResultSet :return: The result from S3 listing the keys requested
[ "A", "lower", "-", "level", "version", "-", "aware", "method", "for", "listing", "contents", "of", "a", "bucket", ".", "This", "closely", "models", "the", "actual", "S3", "API", "and", "requires", "you", "to", "manually", "handle", "the", "paging", "of", "results", ".", "For", "a", "higher", "-", "level", "method", "that", "handles", "the", "details", "of", "paging", "for", "you", "you", "can", "use", "the", "list", "method", ".", ":", "type", "max_keys", ":", "int", ":", "param", "max_keys", ":", "The", "maximum", "number", "of", "keys", "to", "retrieve", ":", "type", "prefix", ":", "string", ":", "param", "prefix", ":", "The", "prefix", "of", "the", "keys", "you", "want", "to", "retrieve", ":", "type", "key_marker", ":", "string", ":", "param", "key_marker", ":", "The", "marker", "of", "where", "you", "are", "in", "the", "result", "set", "with", "respect", "to", "keys", ".", ":", "type", "version_id_marker", ":", "string", ":", "param", "version_id_marker", ":", "The", "marker", "of", "where", "you", "are", "in", "the", "result", "set", "with", "respect", "to", "version", "-", "id", "s", ".", ":", "type", "delimiter", ":", "string", ":", "param", "delimiter", ":", "If", "this", "optional", "Unicode", "string", "parameter", "is", "included", "with", "your", "request", "then", "keys", "that", "contain", "the", "same", "string", "between", "the", "prefix", "and", "the", "first", "occurrence", "of", "the", "delimiter", "will", "be", "rolled", "up", "into", "a", "single", "result", "element", "in", "the", "CommonPrefixes", "collection", ".", "These", "rolled", "-", "up", "keys", "are", "not", "returned", "elsewhere", "in", "the", "response", "." ]
python
train
47.026316
has2k1/plotnine
plotnine/aes.py
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/aes.py#L294-L311
def strip_dots(value): """ Remove dots(if any) that mark calculated aesthetics Parameters ---------- value : object Aesthetic value. In most cases this will be a string but other types will pass through unmodified. Return ------ out : object Aesthetic value with the dots removed. """ with suppress(TypeError): value = DOTS_RE.sub(r'\1', value) return value
[ "def", "strip_dots", "(", "value", ")", ":", "with", "suppress", "(", "TypeError", ")", ":", "value", "=", "DOTS_RE", ".", "sub", "(", "r'\\1'", ",", "value", ")", "return", "value" ]
Remove dots(if any) that mark calculated aesthetics Parameters ---------- value : object Aesthetic value. In most cases this will be a string but other types will pass through unmodified. Return ------ out : object Aesthetic value with the dots removed.
[ "Remove", "dots", "(", "if", "any", ")", "that", "mark", "calculated", "aesthetics" ]
python
train
23.222222
gwastro/pycbc
pycbc/inference/sampler/base_mcmc.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/sampler/base_mcmc.py#L385-L427
def set_p0(self, samples_file=None, prior=None): """Sets the initial position of the walkers. Parameters ---------- samples_file : InferenceFile, optional If provided, use the last iteration in the given file for the starting positions. prior : JointDistribution, optional Use the given prior to set the initial positions rather than ``model``'s prior. Returns ------- p0 : dict A dictionary maping sampling params to the starting positions. """ # if samples are given then use those as initial positions if samples_file is not None: with self.io(samples_file, 'r') as fp: samples = fp.read_samples(self.variable_params, iteration=-1, flatten=False) # remove the (length 1) niterations dimension samples = samples[..., 0] # make sure we have the same shape assert samples.shape == self.base_shape, ( "samples in file {} have shape {}, but I have shape {}". format(samples_file, samples.shape, self.base_shape)) # transform to sampling parameter space if self.model.sampling_transforms is not None: samples = self.model.sampling_transforms.apply(samples) # draw random samples if samples are not provided else: nsamples = numpy.prod(self.base_shape) samples = self.model.prior_rvs(size=nsamples, prior=prior).reshape( self.base_shape) # store as ND array with shape [base_shape] x nparams ndim = len(self.variable_params) p0 = numpy.ones(list(self.base_shape)+[ndim]) for i, param in enumerate(self.sampling_params): p0[..., i] = samples[param] self._p0 = p0 return self.p0
[ "def", "set_p0", "(", "self", ",", "samples_file", "=", "None", ",", "prior", "=", "None", ")", ":", "# if samples are given then use those as initial positions", "if", "samples_file", "is", "not", "None", ":", "with", "self", ".", "io", "(", "samples_file", ",", "'r'", ")", "as", "fp", ":", "samples", "=", "fp", ".", "read_samples", "(", "self", ".", "variable_params", ",", "iteration", "=", "-", "1", ",", "flatten", "=", "False", ")", "# remove the (length 1) niterations dimension", "samples", "=", "samples", "[", "...", ",", "0", "]", "# make sure we have the same shape", "assert", "samples", ".", "shape", "==", "self", ".", "base_shape", ",", "(", "\"samples in file {} have shape {}, but I have shape {}\"", ".", "format", "(", "samples_file", ",", "samples", ".", "shape", ",", "self", ".", "base_shape", ")", ")", "# transform to sampling parameter space", "if", "self", ".", "model", ".", "sampling_transforms", "is", "not", "None", ":", "samples", "=", "self", ".", "model", ".", "sampling_transforms", ".", "apply", "(", "samples", ")", "# draw random samples if samples are not provided", "else", ":", "nsamples", "=", "numpy", ".", "prod", "(", "self", ".", "base_shape", ")", "samples", "=", "self", ".", "model", ".", "prior_rvs", "(", "size", "=", "nsamples", ",", "prior", "=", "prior", ")", ".", "reshape", "(", "self", ".", "base_shape", ")", "# store as ND array with shape [base_shape] x nparams", "ndim", "=", "len", "(", "self", ".", "variable_params", ")", "p0", "=", "numpy", ".", "ones", "(", "list", "(", "self", ".", "base_shape", ")", "+", "[", "ndim", "]", ")", "for", "i", ",", "param", "in", "enumerate", "(", "self", ".", "sampling_params", ")", ":", "p0", "[", "...", ",", "i", "]", "=", "samples", "[", "param", "]", "self", ".", "_p0", "=", "p0", "return", "self", ".", "p0" ]
Sets the initial position of the walkers. Parameters ---------- samples_file : InferenceFile, optional If provided, use the last iteration in the given file for the starting positions. prior : JointDistribution, optional Use the given prior to set the initial positions rather than ``model``'s prior. Returns ------- p0 : dict A dictionary maping sampling params to the starting positions.
[ "Sets", "the", "initial", "position", "of", "the", "walkers", "." ]
python
train
44.55814
manns/pyspread
pyspread/src/gui/_widgets.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_widgets.py#L686-L702
def toggle(self, event): """Toggles state to next bitmap""" if self.state < len(self.bitmap_list) - 1: self.state += 1 else: self.state = 0 self.SetBitmapLabel(self.bitmap_list[self.state]) try: event.Skip() except AttributeError: pass """For compatibility with toggle buttons""" setattr(self, "GetToolState", lambda x: self.state)
[ "def", "toggle", "(", "self", ",", "event", ")", ":", "if", "self", ".", "state", "<", "len", "(", "self", ".", "bitmap_list", ")", "-", "1", ":", "self", ".", "state", "+=", "1", "else", ":", "self", ".", "state", "=", "0", "self", ".", "SetBitmapLabel", "(", "self", ".", "bitmap_list", "[", "self", ".", "state", "]", ")", "try", ":", "event", ".", "Skip", "(", ")", "except", "AttributeError", ":", "pass", "\"\"\"For compatibility with toggle buttons\"\"\"", "setattr", "(", "self", ",", "\"GetToolState\"", ",", "lambda", "x", ":", "self", ".", "state", ")" ]
Toggles state to next bitmap
[ "Toggles", "state", "to", "next", "bitmap" ]
python
train
25.352941
gabfl/password-generator-py
src/pwgenerator.py
https://github.com/gabfl/password-generator-py/blob/cd59078fd3e6ea85b7acd9bfcf6d04014c0f7220/src/pwgenerator.py#L99-L124
def main(): """ Main method """ # Options global args parser = argparse.ArgumentParser() parser.add_argument("-n", "--min_word_length", type=int, help="Minimum length for each word", default=3) parser.add_argument("-x", "--max_word_length", type=int, help="Maximum length for each word", default=8) parser.add_argument("-i", "--max_int_value", type=int, help="Maximum value for the integer", default=1000) parser.add_argument("-e", "--number_of_elements", type=int, help="Number of elements in the password (ie. 4 = 3 words + 1 integer)", default=4) parser.add_argument("-s", "--no_special_characters", action='store_true', help="Do not use special characters") args = parser.parse_args() # Print a password print(pw(min_word_length=args.min_word_length, max_word_length=args.max_word_length, max_int_value=args.max_int_value, number_of_elements=args.number_of_elements, no_special_characters=args.no_special_characters))
[ "def", "main", "(", ")", ":", "# Options", "global", "args", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "parser", ".", "add_argument", "(", "\"-n\"", ",", "\"--min_word_length\"", ",", "type", "=", "int", ",", "help", "=", "\"Minimum length for each word\"", ",", "default", "=", "3", ")", "parser", ".", "add_argument", "(", "\"-x\"", ",", "\"--max_word_length\"", ",", "type", "=", "int", ",", "help", "=", "\"Maximum length for each word\"", ",", "default", "=", "8", ")", "parser", ".", "add_argument", "(", "\"-i\"", ",", "\"--max_int_value\"", ",", "type", "=", "int", ",", "help", "=", "\"Maximum value for the integer\"", ",", "default", "=", "1000", ")", "parser", ".", "add_argument", "(", "\"-e\"", ",", "\"--number_of_elements\"", ",", "type", "=", "int", ",", "help", "=", "\"Number of elements in the password (ie. 4 = 3 words + 1 integer)\"", ",", "default", "=", "4", ")", "parser", ".", "add_argument", "(", "\"-s\"", ",", "\"--no_special_characters\"", ",", "action", "=", "'store_true'", ",", "help", "=", "\"Do not use special characters\"", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "# Print a password", "print", "(", "pw", "(", "min_word_length", "=", "args", ".", "min_word_length", ",", "max_word_length", "=", "args", ".", "max_word_length", ",", "max_int_value", "=", "args", ".", "max_int_value", ",", "number_of_elements", "=", "args", ".", "number_of_elements", ",", "no_special_characters", "=", "args", ".", "no_special_characters", ")", ")" ]
Main method
[ "Main", "method" ]
python
train
43.461538
adewes/blitzdb
blitzdb/backends/file/backend.py
https://github.com/adewes/blitzdb/blob/4b459e0bcde9e1f6224dd4e3bea74194586864b0/blitzdb/backends/file/backend.py#L567-L589
def _canonicalize_query(self, query): """ Transform the query dictionary to replace e.g. documents with __ref__ fields. """ def transform_query(q): if isinstance(q, dict): nq = {} for key,value in q.items(): nq[key] = transform_query(value) return nq elif isinstance(q, (list,QuerySet,tuple)): return [transform_query(x) for x in q] elif isinstance(q,Document): collection = self.get_collection_for_obj(q) ref = "%s:%s" % (collection,q.pk) return ref else: return q return transform_query(query)
[ "def", "_canonicalize_query", "(", "self", ",", "query", ")", ":", "def", "transform_query", "(", "q", ")", ":", "if", "isinstance", "(", "q", ",", "dict", ")", ":", "nq", "=", "{", "}", "for", "key", ",", "value", "in", "q", ".", "items", "(", ")", ":", "nq", "[", "key", "]", "=", "transform_query", "(", "value", ")", "return", "nq", "elif", "isinstance", "(", "q", ",", "(", "list", ",", "QuerySet", ",", "tuple", ")", ")", ":", "return", "[", "transform_query", "(", "x", ")", "for", "x", "in", "q", "]", "elif", "isinstance", "(", "q", ",", "Document", ")", ":", "collection", "=", "self", ".", "get_collection_for_obj", "(", "q", ")", "ref", "=", "\"%s:%s\"", "%", "(", "collection", ",", "q", ".", "pk", ")", "return", "ref", "else", ":", "return", "q", "return", "transform_query", "(", "query", ")" ]
Transform the query dictionary to replace e.g. documents with __ref__ fields.
[ "Transform", "the", "query", "dictionary", "to", "replace", "e", ".", "g", ".", "documents", "with", "__ref__", "fields", "." ]
python
train
31
chaoss/grimoirelab-perceval
perceval/backends/core/gitlab.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/gitlab.py#L554-L585
def fetch_items(self, path, payload): """Return the items from GitLab API using links pagination""" page = 0 # current page last_page = None # last page url_next = urijoin(self.base_url, GitLabClient.PROJECTS, self.owner + '%2F' + self.repository, path) logger.debug("Get GitLab paginated items from " + url_next) response = self.fetch(url_next, payload=payload) items = response.text page += 1 if 'last' in response.links: last_url = response.links['last']['url'] last_page = last_url.split('&page=')[1].split('&')[0] last_page = int(last_page) logger.debug("Page: %i/%i" % (page, last_page)) while items: yield items items = None if 'next' in response.links: url_next = response.links['next']['url'] # Loving requests :) response = self.fetch(url_next, payload=payload) page += 1 items = response.text logger.debug("Page: %i/%i" % (page, last_page))
[ "def", "fetch_items", "(", "self", ",", "path", ",", "payload", ")", ":", "page", "=", "0", "# current page", "last_page", "=", "None", "# last page", "url_next", "=", "urijoin", "(", "self", ".", "base_url", ",", "GitLabClient", ".", "PROJECTS", ",", "self", ".", "owner", "+", "'%2F'", "+", "self", ".", "repository", ",", "path", ")", "logger", ".", "debug", "(", "\"Get GitLab paginated items from \"", "+", "url_next", ")", "response", "=", "self", ".", "fetch", "(", "url_next", ",", "payload", "=", "payload", ")", "items", "=", "response", ".", "text", "page", "+=", "1", "if", "'last'", "in", "response", ".", "links", ":", "last_url", "=", "response", ".", "links", "[", "'last'", "]", "[", "'url'", "]", "last_page", "=", "last_url", ".", "split", "(", "'&page='", ")", "[", "1", "]", ".", "split", "(", "'&'", ")", "[", "0", "]", "last_page", "=", "int", "(", "last_page", ")", "logger", ".", "debug", "(", "\"Page: %i/%i\"", "%", "(", "page", ",", "last_page", ")", ")", "while", "items", ":", "yield", "items", "items", "=", "None", "if", "'next'", "in", "response", ".", "links", ":", "url_next", "=", "response", ".", "links", "[", "'next'", "]", "[", "'url'", "]", "# Loving requests :)", "response", "=", "self", ".", "fetch", "(", "url_next", ",", "payload", "=", "payload", ")", "page", "+=", "1", "items", "=", "response", ".", "text", "logger", ".", "debug", "(", "\"Page: %i/%i\"", "%", "(", "page", ",", "last_page", ")", ")" ]
Return the items from GitLab API using links pagination
[ "Return", "the", "items", "from", "GitLab", "API", "using", "links", "pagination" ]
python
test
33.625
NatLibFi/Skosify
skosify/infer.py
https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/infer.py#L115-L133
def rdfs_properties(rdf): """Perform RDFS subproperty inference. Add superproperties where subproperties have been used.""" # find out the subproperty mappings superprops = {} # key: property val: set([superprop1, superprop2..]) for s, o in rdf.subject_objects(RDFS.subPropertyOf): superprops.setdefault(s, set()) for sp in rdf.transitive_objects(s, RDFS.subPropertyOf): if sp != s: superprops[s].add(sp) # add the superproperty relationships for p, sps in superprops.items(): logging.debug("setting superproperties: %s -> %s", p, str(sps)) for s, o in rdf.subject_objects(p): for sp in sps: rdf.add((s, sp, o))
[ "def", "rdfs_properties", "(", "rdf", ")", ":", "# find out the subproperty mappings", "superprops", "=", "{", "}", "# key: property val: set([superprop1, superprop2..])", "for", "s", ",", "o", "in", "rdf", ".", "subject_objects", "(", "RDFS", ".", "subPropertyOf", ")", ":", "superprops", ".", "setdefault", "(", "s", ",", "set", "(", ")", ")", "for", "sp", "in", "rdf", ".", "transitive_objects", "(", "s", ",", "RDFS", ".", "subPropertyOf", ")", ":", "if", "sp", "!=", "s", ":", "superprops", "[", "s", "]", ".", "add", "(", "sp", ")", "# add the superproperty relationships", "for", "p", ",", "sps", "in", "superprops", ".", "items", "(", ")", ":", "logging", ".", "debug", "(", "\"setting superproperties: %s -> %s\"", ",", "p", ",", "str", "(", "sps", ")", ")", "for", "s", ",", "o", "in", "rdf", ".", "subject_objects", "(", "p", ")", ":", "for", "sp", "in", "sps", ":", "rdf", ".", "add", "(", "(", "s", ",", "sp", ",", "o", ")", ")" ]
Perform RDFS subproperty inference. Add superproperties where subproperties have been used.
[ "Perform", "RDFS", "subproperty", "inference", "." ]
python
train
37.526316
ricequant/rqalpha
rqalpha/mod/rqalpha_mod_sys_accounts/api/api_stock.py
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/mod/rqalpha_mod_sys_accounts/api/api_stock.py#L457-L469
def is_suspended(order_book_id, count=1): """ 判断某只股票是否全天停牌。 :param str order_book_id: 某只股票的代码或股票代码,可传入单只股票的order_book_id, symbol :param int count: 回溯获取的数据个数。默认为当前能够获取到的最近的数据 :return: count为1时 `bool`; count>1时 `pandas.DataFrame` """ dt = Environment.get_instance().calendar_dt.date() order_book_id = assure_stock_order_book_id(order_book_id) return Environment.get_instance().data_proxy.is_suspended(order_book_id, dt, count)
[ "def", "is_suspended", "(", "order_book_id", ",", "count", "=", "1", ")", ":", "dt", "=", "Environment", ".", "get_instance", "(", ")", ".", "calendar_dt", ".", "date", "(", ")", "order_book_id", "=", "assure_stock_order_book_id", "(", "order_book_id", ")", "return", "Environment", ".", "get_instance", "(", ")", ".", "data_proxy", ".", "is_suspended", "(", "order_book_id", ",", "dt", ",", "count", ")" ]
判断某只股票是否全天停牌。 :param str order_book_id: 某只股票的代码或股票代码,可传入单只股票的order_book_id, symbol :param int count: 回溯获取的数据个数。默认为当前能够获取到的最近的数据 :return: count为1时 `bool`; count>1时 `pandas.DataFrame`
[ "判断某只股票是否全天停牌。" ]
python
train
34.692308
quantumlib/Cirq
cirq/circuits/text_diagram_drawer.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/circuits/text_diagram_drawer.py#L124-L132
def vertical_line(self, x: Union[int, float], y1: Union[int, float], y2: Union[int, float], emphasize: bool = False ) -> None: """Adds a line from (x, y1) to (x, y2).""" y1, y2 = sorted([y1, y2]) self.vertical_lines.append(_VerticalLine(x, y1, y2, emphasize))
[ "def", "vertical_line", "(", "self", ",", "x", ":", "Union", "[", "int", ",", "float", "]", ",", "y1", ":", "Union", "[", "int", ",", "float", "]", ",", "y2", ":", "Union", "[", "int", ",", "float", "]", ",", "emphasize", ":", "bool", "=", "False", ")", "->", "None", ":", "y1", ",", "y2", "=", "sorted", "(", "[", "y1", ",", "y2", "]", ")", "self", ".", "vertical_lines", ".", "append", "(", "_VerticalLine", "(", "x", ",", "y1", ",", "y2", ",", "emphasize", ")", ")" ]
Adds a line from (x, y1) to (x, y2).
[ "Adds", "a", "line", "from", "(", "x", "y1", ")", "to", "(", "x", "y2", ")", "." ]
python
train
42.777778
numenta/nupic
examples/network/custom_region_demo.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/examples/network/custom_region_demo.py#L106-L120
def runNetwork(network, writer): """Run the network and write output to writer. :param network: a Network instance to run :param writer: a csv.writer instance to write output to """ identityRegion = network.regions["identityRegion"] for i in xrange(_NUM_RECORDS): # Run the network for a single iteration network.run(1) # Write out the record number and encoding encoding = identityRegion.getOutputData("out") writer.writerow((i, encoding))
[ "def", "runNetwork", "(", "network", ",", "writer", ")", ":", "identityRegion", "=", "network", ".", "regions", "[", "\"identityRegion\"", "]", "for", "i", "in", "xrange", "(", "_NUM_RECORDS", ")", ":", "# Run the network for a single iteration", "network", ".", "run", "(", "1", ")", "# Write out the record number and encoding", "encoding", "=", "identityRegion", ".", "getOutputData", "(", "\"out\"", ")", "writer", ".", "writerow", "(", "(", "i", ",", "encoding", ")", ")" ]
Run the network and write output to writer. :param network: a Network instance to run :param writer: a csv.writer instance to write output to
[ "Run", "the", "network", "and", "write", "output", "to", "writer", "." ]
python
valid
30.733333
probcomp/crosscat
src/LocalEngine.py
https://github.com/probcomp/crosscat/blob/4a05bddb06a45f3b7b3e05e095720f16257d1535/src/LocalEngine.py#L177-L284
def analyze(self, M_c, T, X_L, X_D, seed, kernel_list=(), n_steps=1, c=(), r=(), max_iterations=-1, max_time=-1, do_diagnostics=False, diagnostics_every_N=1, ROW_CRP_ALPHA_GRID=(), COLUMN_CRP_ALPHA_GRID=(), S_GRID=(), MU_GRID=(), N_GRID=31, do_timing=False, CT_KERNEL=0, progress=None, ): """Evolve the latent state by running MCMC transition kernels. :param seed: The random seed :type seed: int :param M_c: The column metadata :type M_c: dict :param T: The data table in mapped representation (all floats, generated by data_utils.read_data_objects) :param X_L: the latent variables associated with the latent state :type X_L: dict :param X_D: the particular cluster assignments of each row in each view :type X_D: list of lists :param kernel_list: names of the MCMC transition kernels to run :type kernel_list: list of strings :param n_steps: the number of times to run each MCMC transition kernel :type n_steps: int :param c: the (global) column indices to run MCMC transition kernels on :type c: list of ints :param r: the (global) row indices to run MCMC transition kernels on :type r: list of ints :param max_iterations: the maximum number of times ot run each MCMC transition kernel. Applicable only if max_time != -1. :type max_iterations: int :param max_time: the maximum amount of time (seconds) to run MCMC transition kernels for before stopping to return progress :type max_time: float :param progress: a function accepting (n_steps, max_time, step_idx, elapsed_secs, end=None) where `n_steps` is the total number of transition steps, `max_time` is the timeout in secods, `step_idx` is number of transitions so far, `elapsed_secs` is the amount of time so far, and `end=None` is an optional kwarg for indicating the analysis has been completed. For example, `progress` may be used to print a progress bar to standard out. :type progress: function pointer. :returns: X_L, X_D -- the evolved latent state """ if n_steps <= 0: raise ValueError("You must do at least one analyze step.") if CT_KERNEL not in [0, 1]: raise ValueError("CT_KERNEL must be 0 (Gibbs) or 1 (MH)") if do_timing: # Diagnostics and timing are exclusive. do_diagnostics = False diagnostic_func_dict, reprocess_diagnostics_func = \ do_diagnostics_to_func_dict(do_diagnostics) X_L_list, X_D_list, was_multistate = su.ensure_multistate(X_L, X_D) arg_tuples = self.get_analyze_arg_tuples( M_c, T, X_L_list, X_D_list, kernel_list, n_steps, c, r, max_iterations, max_time, diagnostic_func_dict, diagnostics_every_N, ROW_CRP_ALPHA_GRID, COLUMN_CRP_ALPHA_GRID, S_GRID, MU_GRID, N_GRID, do_timing, CT_KERNEL, progress, make_get_next_seed(seed)) chain_tuples = self.mapper(self.do_analyze, arg_tuples) X_L_list, X_D_list, diagnostics_dict_list = zip(*chain_tuples) if do_timing: timing_list = diagnostics_dict_list if not was_multistate: X_L_list, X_D_list = X_L_list[0], X_D_list[0] ret_tuple = X_L_list, X_D_list if diagnostic_func_dict is not None: diagnostics_dict = munge_diagnostics(diagnostics_dict_list) if reprocess_diagnostics_func is not None: diagnostics_dict = reprocess_diagnostics_func(diagnostics_dict) ret_tuple = ret_tuple + (diagnostics_dict, ) if do_timing: ret_tuple = ret_tuple + (timing_list, ) return ret_tuple
[ "def", "analyze", "(", "self", ",", "M_c", ",", "T", ",", "X_L", ",", "X_D", ",", "seed", ",", "kernel_list", "=", "(", ")", ",", "n_steps", "=", "1", ",", "c", "=", "(", ")", ",", "r", "=", "(", ")", ",", "max_iterations", "=", "-", "1", ",", "max_time", "=", "-", "1", ",", "do_diagnostics", "=", "False", ",", "diagnostics_every_N", "=", "1", ",", "ROW_CRP_ALPHA_GRID", "=", "(", ")", ",", "COLUMN_CRP_ALPHA_GRID", "=", "(", ")", ",", "S_GRID", "=", "(", ")", ",", "MU_GRID", "=", "(", ")", ",", "N_GRID", "=", "31", ",", "do_timing", "=", "False", ",", "CT_KERNEL", "=", "0", ",", "progress", "=", "None", ",", ")", ":", "if", "n_steps", "<=", "0", ":", "raise", "ValueError", "(", "\"You must do at least one analyze step.\"", ")", "if", "CT_KERNEL", "not", "in", "[", "0", ",", "1", "]", ":", "raise", "ValueError", "(", "\"CT_KERNEL must be 0 (Gibbs) or 1 (MH)\"", ")", "if", "do_timing", ":", "# Diagnostics and timing are exclusive.", "do_diagnostics", "=", "False", "diagnostic_func_dict", ",", "reprocess_diagnostics_func", "=", "do_diagnostics_to_func_dict", "(", "do_diagnostics", ")", "X_L_list", ",", "X_D_list", ",", "was_multistate", "=", "su", ".", "ensure_multistate", "(", "X_L", ",", "X_D", ")", "arg_tuples", "=", "self", ".", "get_analyze_arg_tuples", "(", "M_c", ",", "T", ",", "X_L_list", ",", "X_D_list", ",", "kernel_list", ",", "n_steps", ",", "c", ",", "r", ",", "max_iterations", ",", "max_time", ",", "diagnostic_func_dict", ",", "diagnostics_every_N", ",", "ROW_CRP_ALPHA_GRID", ",", "COLUMN_CRP_ALPHA_GRID", ",", "S_GRID", ",", "MU_GRID", ",", "N_GRID", ",", "do_timing", ",", "CT_KERNEL", ",", "progress", ",", "make_get_next_seed", "(", "seed", ")", ")", "chain_tuples", "=", "self", ".", "mapper", "(", "self", ".", "do_analyze", ",", "arg_tuples", ")", "X_L_list", ",", "X_D_list", ",", "diagnostics_dict_list", "=", "zip", "(", "*", "chain_tuples", ")", "if", "do_timing", ":", "timing_list", "=", "diagnostics_dict_list", "if", "not", "was_multistate", ":", "X_L_list", ",", "X_D_list", "=", "X_L_list", "[", "0", "]", ",", "X_D_list", "[", "0", "]", "ret_tuple", "=", "X_L_list", ",", "X_D_list", "if", "diagnostic_func_dict", "is", "not", "None", ":", "diagnostics_dict", "=", "munge_diagnostics", "(", "diagnostics_dict_list", ")", "if", "reprocess_diagnostics_func", "is", "not", "None", ":", "diagnostics_dict", "=", "reprocess_diagnostics_func", "(", "diagnostics_dict", ")", "ret_tuple", "=", "ret_tuple", "+", "(", "diagnostics_dict", ",", ")", "if", "do_timing", ":", "ret_tuple", "=", "ret_tuple", "+", "(", "timing_list", ",", ")", "return", "ret_tuple" ]
Evolve the latent state by running MCMC transition kernels. :param seed: The random seed :type seed: int :param M_c: The column metadata :type M_c: dict :param T: The data table in mapped representation (all floats, generated by data_utils.read_data_objects) :param X_L: the latent variables associated with the latent state :type X_L: dict :param X_D: the particular cluster assignments of each row in each view :type X_D: list of lists :param kernel_list: names of the MCMC transition kernels to run :type kernel_list: list of strings :param n_steps: the number of times to run each MCMC transition kernel :type n_steps: int :param c: the (global) column indices to run MCMC transition kernels on :type c: list of ints :param r: the (global) row indices to run MCMC transition kernels on :type r: list of ints :param max_iterations: the maximum number of times ot run each MCMC transition kernel. Applicable only if max_time != -1. :type max_iterations: int :param max_time: the maximum amount of time (seconds) to run MCMC transition kernels for before stopping to return progress :type max_time: float :param progress: a function accepting (n_steps, max_time, step_idx, elapsed_secs, end=None) where `n_steps` is the total number of transition steps, `max_time` is the timeout in secods, `step_idx` is number of transitions so far, `elapsed_secs` is the amount of time so far, and `end=None` is an optional kwarg for indicating the analysis has been completed. For example, `progress` may be used to print a progress bar to standard out. :type progress: function pointer. :returns: X_L, X_D -- the evolved latent state
[ "Evolve", "the", "latent", "state", "by", "running", "MCMC", "transition", "kernels", "." ]
python
train
38.203704
peri-source/peri
peri/util.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/util.py#L665-L670
def filtered_image(self, im): """Returns a filtered image after applying the Fourier-space filters""" q = np.fft.fftn(im) for k,v in self.filters: q[k] -= v return np.real(np.fft.ifftn(q))
[ "def", "filtered_image", "(", "self", ",", "im", ")", ":", "q", "=", "np", ".", "fft", ".", "fftn", "(", "im", ")", "for", "k", ",", "v", "in", "self", ".", "filters", ":", "q", "[", "k", "]", "-=", "v", "return", "np", ".", "real", "(", "np", ".", "fft", ".", "ifftn", "(", "q", ")", ")" ]
Returns a filtered image after applying the Fourier-space filters
[ "Returns", "a", "filtered", "image", "after", "applying", "the", "Fourier", "-", "space", "filters" ]
python
valid
37.833333
tcalmant/ipopo
pelix/shell/core.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/shell/core.py#L813-L819
def install(self, io_handler, module_name): """ Installs the bundle with the given module name """ bundle = self._context.install_bundle(module_name) io_handler.write_line("Bundle ID: {0}", bundle.get_bundle_id()) return bundle.get_bundle_id()
[ "def", "install", "(", "self", ",", "io_handler", ",", "module_name", ")", ":", "bundle", "=", "self", ".", "_context", ".", "install_bundle", "(", "module_name", ")", "io_handler", ".", "write_line", "(", "\"Bundle ID: {0}\"", ",", "bundle", ".", "get_bundle_id", "(", ")", ")", "return", "bundle", ".", "get_bundle_id", "(", ")" ]
Installs the bundle with the given module name
[ "Installs", "the", "bundle", "with", "the", "given", "module", "name" ]
python
train
40.714286
chrisspen/burlap
burlap/debug.py
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/debug.py#L262-L269
def tunnel(self, local_port, remote_port): """ Creates an SSH tunnel. """ r = self.local_renderer r.env.tunnel_local_port = local_port r.env.tunnel_remote_port = remote_port r.local(' ssh -i {key_filename} -L {tunnel_local_port}:localhost:{tunnel_remote_port} {user}@{host_string} -N')
[ "def", "tunnel", "(", "self", ",", "local_port", ",", "remote_port", ")", ":", "r", "=", "self", ".", "local_renderer", "r", ".", "env", ".", "tunnel_local_port", "=", "local_port", "r", ".", "env", ".", "tunnel_remote_port", "=", "remote_port", "r", ".", "local", "(", "' ssh -i {key_filename} -L {tunnel_local_port}:localhost:{tunnel_remote_port} {user}@{host_string} -N'", ")" ]
Creates an SSH tunnel.
[ "Creates", "an", "SSH", "tunnel", "." ]
python
valid
41.75
corydodt/Crosscap
crosscap/urltool.py
https://github.com/corydodt/Crosscap/blob/388a2ec36b8aa85e8f1ed692bb6e43474ba76c8e/crosscap/urltool.py#L159-L188
def dumpRule(serviceCls, rule, prefix): """ Create an in-between representation of the rule, so we can eventually convert it to OpenAPIPathItem with OpenAPIOperation(s) """ rulePath = prefix + rule.rule rulePath = re.sub('/{2,}', '/', rulePath) cor = ConvertedRule( rulePath=rulePath, operationId=rule.endpoint ) # look for methods for meth in sorted(rule.methods or []): cor.methods.append(meth) # edit _branch operationId to provide the true method name origEP = cor.operationId if origEP.endswith('_branch'): origEP = origEP[:-7] cor.branch = True cor.operationId = '%s.%s' % (serviceCls.__name__, origEP) # get the actual method so we can inspect it for extension attributes meth = getattr(serviceCls, origEP) if hasattr(meth, '_subKleinQname'): cor.subKlein = meth._subKleinQname cor.doco = OpenAPIExtendedDocumentation.fromObject(meth, decode=True) return cor
[ "def", "dumpRule", "(", "serviceCls", ",", "rule", ",", "prefix", ")", ":", "rulePath", "=", "prefix", "+", "rule", ".", "rule", "rulePath", "=", "re", ".", "sub", "(", "'/{2,}'", ",", "'/'", ",", "rulePath", ")", "cor", "=", "ConvertedRule", "(", "rulePath", "=", "rulePath", ",", "operationId", "=", "rule", ".", "endpoint", ")", "# look for methods", "for", "meth", "in", "sorted", "(", "rule", ".", "methods", "or", "[", "]", ")", ":", "cor", ".", "methods", ".", "append", "(", "meth", ")", "# edit _branch operationId to provide the true method name", "origEP", "=", "cor", ".", "operationId", "if", "origEP", ".", "endswith", "(", "'_branch'", ")", ":", "origEP", "=", "origEP", "[", ":", "-", "7", "]", "cor", ".", "branch", "=", "True", "cor", ".", "operationId", "=", "'%s.%s'", "%", "(", "serviceCls", ".", "__name__", ",", "origEP", ")", "# get the actual method so we can inspect it for extension attributes", "meth", "=", "getattr", "(", "serviceCls", ",", "origEP", ")", "if", "hasattr", "(", "meth", ",", "'_subKleinQname'", ")", ":", "cor", ".", "subKlein", "=", "meth", ".", "_subKleinQname", "cor", ".", "doco", "=", "OpenAPIExtendedDocumentation", ".", "fromObject", "(", "meth", ",", "decode", "=", "True", ")", "return", "cor" ]
Create an in-between representation of the rule, so we can eventually convert it to OpenAPIPathItem with OpenAPIOperation(s)
[ "Create", "an", "in", "-", "between", "representation", "of", "the", "rule", "so", "we", "can", "eventually", "convert", "it", "to", "OpenAPIPathItem", "with", "OpenAPIOperation", "(", "s", ")" ]
python
train
32.566667
totalgood/pugnlp
src/pugnlp/util.py
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L606-L628
def list_set(seq): """Similar to `list(set(seq))`, but maintains the order of `seq` while eliminating duplicates In general list(set(L)) will not have the same order as the original list. This is a list(set(L)) function that will preserve the order of L. Arguments: seq (iterable): list, tuple, or other iterable to be used to produce an ordered `set()` Returns: iterable: A copy of `seq` but with duplicates removed, and distinct elements in the same order as in `seq` Examples: >>> list_set([2.7,3,2,2,2,1,1,2,3,4,3,2,42,1]) [2.7, 3, 2, 1, 4, 42] >>> list_set(['Zzz','abc', ('what.', 'ever.'), 0, 0.0, 'Zzz', 0.00, 'ABC']) ['Zzz', 'abc', ('what.', 'ever.'), 0, 'ABC'] """ new_list = [] for i in seq: if i not in new_list: new_list += [i] return type(seq)(new_list)
[ "def", "list_set", "(", "seq", ")", ":", "new_list", "=", "[", "]", "for", "i", "in", "seq", ":", "if", "i", "not", "in", "new_list", ":", "new_list", "+=", "[", "i", "]", "return", "type", "(", "seq", ")", "(", "new_list", ")" ]
Similar to `list(set(seq))`, but maintains the order of `seq` while eliminating duplicates In general list(set(L)) will not have the same order as the original list. This is a list(set(L)) function that will preserve the order of L. Arguments: seq (iterable): list, tuple, or other iterable to be used to produce an ordered `set()` Returns: iterable: A copy of `seq` but with duplicates removed, and distinct elements in the same order as in `seq` Examples: >>> list_set([2.7,3,2,2,2,1,1,2,3,4,3,2,42,1]) [2.7, 3, 2, 1, 4, 42] >>> list_set(['Zzz','abc', ('what.', 'ever.'), 0, 0.0, 'Zzz', 0.00, 'ABC']) ['Zzz', 'abc', ('what.', 'ever.'), 0, 'ABC']
[ "Similar", "to", "list", "(", "set", "(", "seq", "))", "but", "maintains", "the", "order", "of", "seq", "while", "eliminating", "duplicates" ]
python
train
36.695652
ontio/ontology-python-sdk
ontology/network/rpc.py
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/network/rpc.py#L447-L459
def send_raw_transaction(self, tx: Transaction, is_full: bool = False) -> str: """ This interface is used to send the transaction into the network. :param tx: Transaction object in ontology Python SDK. :param is_full: :return: a hexadecimal transaction hash value. """ tx_data = tx.serialize(is_hex=True) payload = self.generate_json_rpc_payload(RpcMethod.SEND_TRANSACTION, [tx_data]) response = self.__post(self.__url, payload) if is_full: return response return response['result']
[ "def", "send_raw_transaction", "(", "self", ",", "tx", ":", "Transaction", ",", "is_full", ":", "bool", "=", "False", ")", "->", "str", ":", "tx_data", "=", "tx", ".", "serialize", "(", "is_hex", "=", "True", ")", "payload", "=", "self", ".", "generate_json_rpc_payload", "(", "RpcMethod", ".", "SEND_TRANSACTION", ",", "[", "tx_data", "]", ")", "response", "=", "self", ".", "__post", "(", "self", ".", "__url", ",", "payload", ")", "if", "is_full", ":", "return", "response", "return", "response", "[", "'result'", "]" ]
This interface is used to send the transaction into the network. :param tx: Transaction object in ontology Python SDK. :param is_full: :return: a hexadecimal transaction hash value.
[ "This", "interface", "is", "used", "to", "send", "the", "transaction", "into", "the", "network", ".", ":", "param", "tx", ":", "Transaction", "object", "in", "ontology", "Python", "SDK", ".", ":", "param", "is_full", ":", ":", "return", ":", "a", "hexadecimal", "transaction", "hash", "value", "." ]
python
train
43.846154
vertexproject/synapse
synapse/lib/provenance.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/provenance.py#L144-L152
def provStacks(self, offs, size): ''' Returns a stream of provenance stacks at the given offset ''' for _, iden in self.provseq.slice(offs, size): stack = self.getProvStack(iden) if stack is None: continue yield (iden, stack)
[ "def", "provStacks", "(", "self", ",", "offs", ",", "size", ")", ":", "for", "_", ",", "iden", "in", "self", ".", "provseq", ".", "slice", "(", "offs", ",", "size", ")", ":", "stack", "=", "self", ".", "getProvStack", "(", "iden", ")", "if", "stack", "is", "None", ":", "continue", "yield", "(", "iden", ",", "stack", ")" ]
Returns a stream of provenance stacks at the given offset
[ "Returns", "a", "stream", "of", "provenance", "stacks", "at", "the", "given", "offset" ]
python
train
33.444444
ChrisCummins/labm8
fs.py
https://github.com/ChrisCummins/labm8/blob/dd10d67a757aefb180cb508f86696f99440c94f5/fs.py#L304-L330
def rm(*components, **kwargs): """ Remove a file or directory. If path is a directory, this recursively removes the directory and any contents. Non-existent paths are silently ignored. Supports Unix style globbing by default (disable using glob=False). For details on globbing pattern expansion, see: https://docs.python.org/2/library/glob.html Arguments: *components (string[]): path to the file or directory to remove. May be absolute or relative. May contain unix glob **kwargs: if "glob" is True, perform Unix style pattern expansion of paths (default: True). """ _path = path(*components) glob = kwargs.get("glob", True) paths = iglob(_path) if glob else [_path] for file in paths: if isfile(file): os.remove(file) elif exists(file): shutil.rmtree(file, ignore_errors=False)
[ "def", "rm", "(", "*", "components", ",", "*", "*", "kwargs", ")", ":", "_path", "=", "path", "(", "*", "components", ")", "glob", "=", "kwargs", ".", "get", "(", "\"glob\"", ",", "True", ")", "paths", "=", "iglob", "(", "_path", ")", "if", "glob", "else", "[", "_path", "]", "for", "file", "in", "paths", ":", "if", "isfile", "(", "file", ")", ":", "os", ".", "remove", "(", "file", ")", "elif", "exists", "(", "file", ")", ":", "shutil", ".", "rmtree", "(", "file", ",", "ignore_errors", "=", "False", ")" ]
Remove a file or directory. If path is a directory, this recursively removes the directory and any contents. Non-existent paths are silently ignored. Supports Unix style globbing by default (disable using glob=False). For details on globbing pattern expansion, see: https://docs.python.org/2/library/glob.html Arguments: *components (string[]): path to the file or directory to remove. May be absolute or relative. May contain unix glob **kwargs: if "glob" is True, perform Unix style pattern expansion of paths (default: True).
[ "Remove", "a", "file", "or", "directory", "." ]
python
train
32.962963
AustralianSynchrotron/lightflow
lightflow/workers.py
https://github.com/AustralianSynchrotron/lightflow/blob/dc53dbc1d961e20fb144273baca258060705c03e/lightflow/workers.py#L10-L42
def start_worker(queues, config, *, name=None, celery_args=None, check_datastore=True): """ Start a worker process. Args: queues (list): List of queue names this worker accepts jobs from. config (Config): Reference to the configuration object from which the settings for the worker are retrieved. name (string): Unique name for the worker. The hostname template variables from Celery can be used. If not given, a unique name is created. celery_args (list): List of additional Celery worker command line arguments. Please note that this depends on the version of Celery used and might change. Use with caution. check_datastore (bool): Set to True to check whether the data store is available prior to starting the worker. """ celery_app = create_app(config) if check_datastore: with DataStore(**config.data_store, auto_connect=True, handle_reconnect=False) as ds: celery_app.user_options['datastore_info'] = ds.server_info argv = [ 'worker', '-n={}'.format(uuid4() if name is None else name), '--queues={}'.format(','.join(queues)) ] argv.extend(celery_args or []) celery_app.steps['consumer'].add(WorkerLifecycle) celery_app.user_options['config'] = config celery_app.worker_main(argv)
[ "def", "start_worker", "(", "queues", ",", "config", ",", "*", ",", "name", "=", "None", ",", "celery_args", "=", "None", ",", "check_datastore", "=", "True", ")", ":", "celery_app", "=", "create_app", "(", "config", ")", "if", "check_datastore", ":", "with", "DataStore", "(", "*", "*", "config", ".", "data_store", ",", "auto_connect", "=", "True", ",", "handle_reconnect", "=", "False", ")", "as", "ds", ":", "celery_app", ".", "user_options", "[", "'datastore_info'", "]", "=", "ds", ".", "server_info", "argv", "=", "[", "'worker'", ",", "'-n={}'", ".", "format", "(", "uuid4", "(", ")", "if", "name", "is", "None", "else", "name", ")", ",", "'--queues={}'", ".", "format", "(", "','", ".", "join", "(", "queues", ")", ")", "]", "argv", ".", "extend", "(", "celery_args", "or", "[", "]", ")", "celery_app", ".", "steps", "[", "'consumer'", "]", ".", "add", "(", "WorkerLifecycle", ")", "celery_app", ".", "user_options", "[", "'config'", "]", "=", "config", "celery_app", ".", "worker_main", "(", "argv", ")" ]
Start a worker process. Args: queues (list): List of queue names this worker accepts jobs from. config (Config): Reference to the configuration object from which the settings for the worker are retrieved. name (string): Unique name for the worker. The hostname template variables from Celery can be used. If not given, a unique name is created. celery_args (list): List of additional Celery worker command line arguments. Please note that this depends on the version of Celery used and might change. Use with caution. check_datastore (bool): Set to True to check whether the data store is available prior to starting the worker.
[ "Start", "a", "worker", "process", "." ]
python
train
41.484848
synw/goerr
goerr/__init__.py
https://github.com/synw/goerr/blob/08b3809d6715bffe26899a769d96fa5de8573faf/goerr/__init__.py#L225-L263
def _errmsg(self, error: "Err", tb: bool=False, i: int=None, msgformat: str="terminal") -> str: """ Get the error message """ if msgformat == "terminal": msg = self._headline(error, i) if error.ex is not None: msg += "\n" + "line " + colors.bold(str(error.line)) msg += ": " + colors.yellow(error.code) msg += "\n" + str(error.file) if self.errs_traceback is True or tb is True: if error.tb is not None: msg += "\n" + error.tb elif msgformat == "csv": sep = "," msg = error.msg + sep msg += str(error.line) + sep + error.code + sep msg += str(error.file) elif msgformat == "text": sep = "," msg = error.msg if error.ex is not None: msg += sep + str(error.line) + sep + error.code + sep msg += str(error.file) + sep if self.errs_traceback is True or tb is True: if error.tb is not None: msg += sep + error.tb elif msgformat == "dict": msg = {"date": datetime.now()} if error.ex is not None: msg["msg"] = error.msg msg["line"] = error.line msg["code"] = error.code msg["file"] = error.file if self.errs_traceback is True or tb is True: if error.tb is not None: msg["traceback"] = error.tb return msg
[ "def", "_errmsg", "(", "self", ",", "error", ":", "\"Err\"", ",", "tb", ":", "bool", "=", "False", ",", "i", ":", "int", "=", "None", ",", "msgformat", ":", "str", "=", "\"terminal\"", ")", "->", "str", ":", "if", "msgformat", "==", "\"terminal\"", ":", "msg", "=", "self", ".", "_headline", "(", "error", ",", "i", ")", "if", "error", ".", "ex", "is", "not", "None", ":", "msg", "+=", "\"\\n\"", "+", "\"line \"", "+", "colors", ".", "bold", "(", "str", "(", "error", ".", "line", ")", ")", "msg", "+=", "\": \"", "+", "colors", ".", "yellow", "(", "error", ".", "code", ")", "msg", "+=", "\"\\n\"", "+", "str", "(", "error", ".", "file", ")", "if", "self", ".", "errs_traceback", "is", "True", "or", "tb", "is", "True", ":", "if", "error", ".", "tb", "is", "not", "None", ":", "msg", "+=", "\"\\n\"", "+", "error", ".", "tb", "elif", "msgformat", "==", "\"csv\"", ":", "sep", "=", "\",\"", "msg", "=", "error", ".", "msg", "+", "sep", "msg", "+=", "str", "(", "error", ".", "line", ")", "+", "sep", "+", "error", ".", "code", "+", "sep", "msg", "+=", "str", "(", "error", ".", "file", ")", "elif", "msgformat", "==", "\"text\"", ":", "sep", "=", "\",\"", "msg", "=", "error", ".", "msg", "if", "error", ".", "ex", "is", "not", "None", ":", "msg", "+=", "sep", "+", "str", "(", "error", ".", "line", ")", "+", "sep", "+", "error", ".", "code", "+", "sep", "msg", "+=", "str", "(", "error", ".", "file", ")", "+", "sep", "if", "self", ".", "errs_traceback", "is", "True", "or", "tb", "is", "True", ":", "if", "error", ".", "tb", "is", "not", "None", ":", "msg", "+=", "sep", "+", "error", ".", "tb", "elif", "msgformat", "==", "\"dict\"", ":", "msg", "=", "{", "\"date\"", ":", "datetime", ".", "now", "(", ")", "}", "if", "error", ".", "ex", "is", "not", "None", ":", "msg", "[", "\"msg\"", "]", "=", "error", ".", "msg", "msg", "[", "\"line\"", "]", "=", "error", ".", "line", "msg", "[", "\"code\"", "]", "=", "error", ".", "code", "msg", "[", "\"file\"", "]", "=", "error", ".", "file", "if", "self", ".", "errs_traceback", "is", "True", "or", "tb", "is", "True", ":", "if", "error", ".", "tb", "is", "not", "None", ":", "msg", "[", "\"traceback\"", "]", "=", "error", ".", "tb", "return", "msg" ]
Get the error message
[ "Get", "the", "error", "message" ]
python
train
41.358974
Xython/Linq.py
linq/standard/dict.py
https://github.com/Xython/Linq.py/blob/ffb65f92f1df0d8161d5f835f5947554f6f33d72/linq/standard/dict.py#L143-L159
def TakeWhile(self: dict, f): """ [ { 'self': [1, 2, 3, 4, 5], 'f': lambda x: x < 4, 'assert': lambda ret: list(ret) == [1, 2, 3] } ] """ if is_to_destruct(f): f = destruct_func(f) for e in self.items(): if not f(e): break yield e
[ "def", "TakeWhile", "(", "self", ":", "dict", ",", "f", ")", ":", "if", "is_to_destruct", "(", "f", ")", ":", "f", "=", "destruct_func", "(", "f", ")", "for", "e", "in", "self", ".", "items", "(", ")", ":", "if", "not", "f", "(", "e", ")", ":", "break", "yield", "e" ]
[ { 'self': [1, 2, 3, 4, 5], 'f': lambda x: x < 4, 'assert': lambda ret: list(ret) == [1, 2, 3] } ]
[ "[", "{", "self", ":", "[", "1", "2", "3", "4", "5", "]", "f", ":", "lambda", "x", ":", "x", "<", "4", "assert", ":", "lambda", "ret", ":", "list", "(", "ret", ")", "==", "[", "1", "2", "3", "]", "}", "]" ]
python
train
19.352941
thecynic/pylutron
pylutron/__init__.py
https://github.com/thecynic/pylutron/blob/4d9222c96ef7ac7ac458031c058ad93ec31cebbf/pylutron/__init__.py#L63-L72
def connect(self): """Connects to the lutron controller.""" if self._connected or self.is_alive(): raise ConnectionExistsError("Already connected") # After starting the thread we wait for it to post us # an event signifying that connection is established. This # ensures that the caller only resumes when we are fully connected. self.start() with self._lock: self._connect_cond.wait_for(lambda: self._connected)
[ "def", "connect", "(", "self", ")", ":", "if", "self", ".", "_connected", "or", "self", ".", "is_alive", "(", ")", ":", "raise", "ConnectionExistsError", "(", "\"Already connected\"", ")", "# After starting the thread we wait for it to post us", "# an event signifying that connection is established. This", "# ensures that the caller only resumes when we are fully connected.", "self", ".", "start", "(", ")", "with", "self", ".", "_lock", ":", "self", ".", "_connect_cond", ".", "wait_for", "(", "lambda", ":", "self", ".", "_connected", ")" ]
Connects to the lutron controller.
[ "Connects", "to", "the", "lutron", "controller", "." ]
python
train
44.2
bapakode/OmMongo
ommongo/fields/document_field.py
https://github.com/bapakode/OmMongo/blob/52b5a5420516dc709f2d2eb065818c7973991ce3/ommongo/fields/document_field.py#L96-L100
def wrap(self, value): ''' Validate ``value`` and then use the document's class to wrap the value''' self.validate_wrap(value) return self.type.wrap(value)
[ "def", "wrap", "(", "self", ",", "value", ")", ":", "self", ".", "validate_wrap", "(", "value", ")", "return", "self", ".", "type", ".", "wrap", "(", "value", ")" ]
Validate ``value`` and then use the document's class to wrap the value
[ "Validate", "value", "and", "then", "use", "the", "document", "s", "class", "to", "wrap", "the", "value" ]
python
train
37.4
google/grr
grr/client/grr_response_client/client_actions/file_finder.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/client_actions/file_finder.py#L28-L55
def FileFinderOSFromClient(args): """This function expands paths from the args and returns related stat entries. Args: args: An `rdf_file_finder.FileFinderArgs` object. Yields: `rdf_paths.PathSpec` instances. """ stat_cache = filesystem.StatCache() opts = args.action.stat for path in GetExpandedPaths(args): try: content_conditions = conditions.ContentCondition.Parse(args.conditions) for content_condition in content_conditions: with io.open(path, "rb") as fd: result = list(content_condition.Search(fd)) if not result: raise _SkipFileException() # TODO: `opts.resolve_links` has type `RDFBool`, not `bool`. stat = stat_cache.Get(path, follow_symlink=bool(opts.resolve_links)) stat_entry = client_utils.StatEntryFromStatPathSpec( stat, ext_attrs=opts.collect_ext_attrs) yield stat_entry except _SkipFileException: pass
[ "def", "FileFinderOSFromClient", "(", "args", ")", ":", "stat_cache", "=", "filesystem", ".", "StatCache", "(", ")", "opts", "=", "args", ".", "action", ".", "stat", "for", "path", "in", "GetExpandedPaths", "(", "args", ")", ":", "try", ":", "content_conditions", "=", "conditions", ".", "ContentCondition", ".", "Parse", "(", "args", ".", "conditions", ")", "for", "content_condition", "in", "content_conditions", ":", "with", "io", ".", "open", "(", "path", ",", "\"rb\"", ")", "as", "fd", ":", "result", "=", "list", "(", "content_condition", ".", "Search", "(", "fd", ")", ")", "if", "not", "result", ":", "raise", "_SkipFileException", "(", ")", "# TODO: `opts.resolve_links` has type `RDFBool`, not `bool`.", "stat", "=", "stat_cache", ".", "Get", "(", "path", ",", "follow_symlink", "=", "bool", "(", "opts", ".", "resolve_links", ")", ")", "stat_entry", "=", "client_utils", ".", "StatEntryFromStatPathSpec", "(", "stat", ",", "ext_attrs", "=", "opts", ".", "collect_ext_attrs", ")", "yield", "stat_entry", "except", "_SkipFileException", ":", "pass" ]
This function expands paths from the args and returns related stat entries. Args: args: An `rdf_file_finder.FileFinderArgs` object. Yields: `rdf_paths.PathSpec` instances.
[ "This", "function", "expands", "paths", "from", "the", "args", "and", "returns", "related", "stat", "entries", "." ]
python
train
32.678571
pyQode/pyqode.core
pyqode/core/widgets/output_window.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/widgets/output_window.py#L1141-L1159
def _append_message(self, text, char_format): """ Parses text and executes parsed operations. """ self._cursor = self._text_edit.textCursor() operations = self._parser.parse_text(FormattedText(text, char_format)) for i, operation in enumerate(operations): try: func = getattr(self, '_%s' % operation.command) except AttributeError: print('command not implemented: %r - %r' % ( operation.command, operation.data)) else: try: func(operation.data) except Exception: _logger().exception('exception while running %r', operation) # uncomment next line for debugging commands self._text_edit.repaint()
[ "def", "_append_message", "(", "self", ",", "text", ",", "char_format", ")", ":", "self", ".", "_cursor", "=", "self", ".", "_text_edit", ".", "textCursor", "(", ")", "operations", "=", "self", ".", "_parser", ".", "parse_text", "(", "FormattedText", "(", "text", ",", "char_format", ")", ")", "for", "i", ",", "operation", "in", "enumerate", "(", "operations", ")", ":", "try", ":", "func", "=", "getattr", "(", "self", ",", "'_%s'", "%", "operation", ".", "command", ")", "except", "AttributeError", ":", "print", "(", "'command not implemented: %r - %r'", "%", "(", "operation", ".", "command", ",", "operation", ".", "data", ")", ")", "else", ":", "try", ":", "func", "(", "operation", ".", "data", ")", "except", "Exception", ":", "_logger", "(", ")", ".", "exception", "(", "'exception while running %r'", ",", "operation", ")", "# uncomment next line for debugging commands", "self", ".", "_text_edit", ".", "repaint", "(", ")" ]
Parses text and executes parsed operations.
[ "Parses", "text", "and", "executes", "parsed", "operations", "." ]
python
train
43.368421
KxSystems/pyq
src/pyq/_n.py
https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/_n.py#L148-L166
def array(self, dtype=None): """An implementation of __array__()""" t = self._t # timestamp (12) through last enum (76) if 11 <= t < 77: dtype = dtypeof(self) a = numpy.empty(len(self), dtype) k2a(a, self) return a # table (98) if t == 98: if dtype is None: dtype = list(zip(self.cols, (dtypeof(c) for c in self.flip.value))) dtype = numpy.dtype(dtype) a = numpy.empty(int(self.count), dtype) for c in dtype.fields: k2a(a[c], self[c]) return a return numpy.array(list(self), dtype)
[ "def", "array", "(", "self", ",", "dtype", "=", "None", ")", ":", "t", "=", "self", ".", "_t", "# timestamp (12) through last enum (76)", "if", "11", "<=", "t", "<", "77", ":", "dtype", "=", "dtypeof", "(", "self", ")", "a", "=", "numpy", ".", "empty", "(", "len", "(", "self", ")", ",", "dtype", ")", "k2a", "(", "a", ",", "self", ")", "return", "a", "# table (98)", "if", "t", "==", "98", ":", "if", "dtype", "is", "None", ":", "dtype", "=", "list", "(", "zip", "(", "self", ".", "cols", ",", "(", "dtypeof", "(", "c", ")", "for", "c", "in", "self", ".", "flip", ".", "value", ")", ")", ")", "dtype", "=", "numpy", ".", "dtype", "(", "dtype", ")", "a", "=", "numpy", ".", "empty", "(", "int", "(", "self", ".", "count", ")", ",", "dtype", ")", "for", "c", "in", "dtype", ".", "fields", ":", "k2a", "(", "a", "[", "c", "]", ",", "self", "[", "c", "]", ")", "return", "a", "return", "numpy", ".", "array", "(", "list", "(", "self", ")", ",", "dtype", ")" ]
An implementation of __array__()
[ "An", "implementation", "of", "__array__", "()" ]
python
train
30.894737
ekzhu/datasketch
datasketch/hyperloglog.py
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/hyperloglog.py#L184-L188
def clear(self): ''' Reset the current HyperLogLog to empty. ''' self.reg = np.zeros((self.m,), dtype=np.int8)
[ "def", "clear", "(", "self", ")", ":", "self", ".", "reg", "=", "np", ".", "zeros", "(", "(", "self", ".", "m", ",", ")", ",", "dtype", "=", "np", ".", "int8", ")" ]
Reset the current HyperLogLog to empty.
[ "Reset", "the", "current", "HyperLogLog", "to", "empty", "." ]
python
test
27.6
Robpol86/libnl
libnl/nl80211/iw_util.py
https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/nl80211/iw_util.py#L131-L159
def get_ht_mcs(mcs): """http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/util.c?id=v3.17#n591. Positional arguments: mcs -- bytearray. Returns: Dict. """ answers = dict() max_rx_supp_data_rate = (mcs[10] & ((mcs[11] & 0x3) << 8)) tx_mcs_set_defined = not not (mcs[12] & (1 << 0)) tx_mcs_set_equal = not (mcs[12] & (1 << 1)) tx_max_num_spatial_streams = ((mcs[12] >> 2) & 3) + 1 tx_unequal_modulation = not not (mcs[12] & (1 << 4)) if max_rx_supp_data_rate: answers['HT Max RX data rate (Mbps)'] = max_rx_supp_data_rate if tx_mcs_set_defined and tx_mcs_set_equal: answers['HT TX/RX MCS rate indexes supported'] = get_mcs_index(mcs) elif tx_mcs_set_defined: answers['HT RX MCS rate indexes supported'] = get_mcs_index(mcs) answers['TX unequal modulation supported'] = bool(tx_unequal_modulation) answers['HT TX Max spatial streams'] = tx_max_num_spatial_streams else: answers['HT RX MCS rate indexes supported'] = get_mcs_index(mcs) return answers
[ "def", "get_ht_mcs", "(", "mcs", ")", ":", "answers", "=", "dict", "(", ")", "max_rx_supp_data_rate", "=", "(", "mcs", "[", "10", "]", "&", "(", "(", "mcs", "[", "11", "]", "&", "0x3", ")", "<<", "8", ")", ")", "tx_mcs_set_defined", "=", "not", "not", "(", "mcs", "[", "12", "]", "&", "(", "1", "<<", "0", ")", ")", "tx_mcs_set_equal", "=", "not", "(", "mcs", "[", "12", "]", "&", "(", "1", "<<", "1", ")", ")", "tx_max_num_spatial_streams", "=", "(", "(", "mcs", "[", "12", "]", ">>", "2", ")", "&", "3", ")", "+", "1", "tx_unequal_modulation", "=", "not", "not", "(", "mcs", "[", "12", "]", "&", "(", "1", "<<", "4", ")", ")", "if", "max_rx_supp_data_rate", ":", "answers", "[", "'HT Max RX data rate (Mbps)'", "]", "=", "max_rx_supp_data_rate", "if", "tx_mcs_set_defined", "and", "tx_mcs_set_equal", ":", "answers", "[", "'HT TX/RX MCS rate indexes supported'", "]", "=", "get_mcs_index", "(", "mcs", ")", "elif", "tx_mcs_set_defined", ":", "answers", "[", "'HT RX MCS rate indexes supported'", "]", "=", "get_mcs_index", "(", "mcs", ")", "answers", "[", "'TX unequal modulation supported'", "]", "=", "bool", "(", "tx_unequal_modulation", ")", "answers", "[", "'HT TX Max spatial streams'", "]", "=", "tx_max_num_spatial_streams", "else", ":", "answers", "[", "'HT RX MCS rate indexes supported'", "]", "=", "get_mcs_index", "(", "mcs", ")", "return", "answers" ]
http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/util.c?id=v3.17#n591. Positional arguments: mcs -- bytearray. Returns: Dict.
[ "http", ":", "//", "git", ".", "kernel", ".", "org", "/", "cgit", "/", "linux", "/", "kernel", "/", "git", "/", "jberg", "/", "iw", ".", "git", "/", "tree", "/", "util", ".", "c?id", "=", "v3", ".", "17#n591", "." ]
python
train
36.241379
dagster-io/dagster
python_modules/dagster/dagster/core/execution.py
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/execution.py#L241-L273
def transformed_value(self, output_name=DEFAULT_OUTPUT): '''Returns transformed value either for DEFAULT_OUTPUT or for the output given as output_name. Returns None if execution result isn't a success. Reconstructs the pipeline context to materialize value. ''' check.str_param(output_name, 'output_name') if not self.solid.definition.has_output(output_name): raise DagsterInvariantViolationError( '{output_name} not defined in solid {solid}'.format( output_name=output_name, solid=self.solid.name ) ) if self.success: for result in self.transforms: if ( result.is_successful_output and result.step_output_data.output_name == output_name ): with self.reconstruct_context() as context: value = self._get_value(context, result.step_output_data) return value raise DagsterInvariantViolationError( ( 'Did not find result {output_name} in solid {self.solid.name} ' 'execution result' ).format(output_name=output_name, self=self) ) else: return None
[ "def", "transformed_value", "(", "self", ",", "output_name", "=", "DEFAULT_OUTPUT", ")", ":", "check", ".", "str_param", "(", "output_name", ",", "'output_name'", ")", "if", "not", "self", ".", "solid", ".", "definition", ".", "has_output", "(", "output_name", ")", ":", "raise", "DagsterInvariantViolationError", "(", "'{output_name} not defined in solid {solid}'", ".", "format", "(", "output_name", "=", "output_name", ",", "solid", "=", "self", ".", "solid", ".", "name", ")", ")", "if", "self", ".", "success", ":", "for", "result", "in", "self", ".", "transforms", ":", "if", "(", "result", ".", "is_successful_output", "and", "result", ".", "step_output_data", ".", "output_name", "==", "output_name", ")", ":", "with", "self", ".", "reconstruct_context", "(", ")", "as", "context", ":", "value", "=", "self", ".", "_get_value", "(", "context", ",", "result", ".", "step_output_data", ")", "return", "value", "raise", "DagsterInvariantViolationError", "(", "(", "'Did not find result {output_name} in solid {self.solid.name} '", "'execution result'", ")", ".", "format", "(", "output_name", "=", "output_name", ",", "self", "=", "self", ")", ")", "else", ":", "return", "None" ]
Returns transformed value either for DEFAULT_OUTPUT or for the output given as output_name. Returns None if execution result isn't a success. Reconstructs the pipeline context to materialize value.
[ "Returns", "transformed", "value", "either", "for", "DEFAULT_OUTPUT", "or", "for", "the", "output", "given", "as", "output_name", ".", "Returns", "None", "if", "execution", "result", "isn", "t", "a", "success", "." ]
python
test
39.727273
mulkieran/justbases
src/justbases/_nats.py
https://github.com/mulkieran/justbases/blob/dd52ff4b3d11609f54b2673599ee4eeb20f9734f/src/justbases/_nats.py#L124-L160
def carry_in(value, carry, base): """ Add a carry digit to a number represented by ``value``. :param value: the value :type value: list of int :param int carry: the carry digit (>= 0) :param int base: the base (>= 2) :returns: carry-out and result :rtype: tuple of int * (list of int) Complexity: O(len(value)) """ if base < 2: raise BasesValueError(base, "base", "must be at least 2") if any(x < 0 or x >= base for x in value): raise BasesValueError( value, "value", "elements must be at least 0 and less than %s" % base ) if carry < 0 or carry >= base: raise BasesValueError( carry, "carry", "carry must be less than %s" % base ) result = [] for val in reversed(value): (carry, new_val) = divmod(val + carry, base) result.append(new_val) return (carry, list(reversed(result)))
[ "def", "carry_in", "(", "value", ",", "carry", ",", "base", ")", ":", "if", "base", "<", "2", ":", "raise", "BasesValueError", "(", "base", ",", "\"base\"", ",", "\"must be at least 2\"", ")", "if", "any", "(", "x", "<", "0", "or", "x", ">=", "base", "for", "x", "in", "value", ")", ":", "raise", "BasesValueError", "(", "value", ",", "\"value\"", ",", "\"elements must be at least 0 and less than %s\"", "%", "base", ")", "if", "carry", "<", "0", "or", "carry", ">=", "base", ":", "raise", "BasesValueError", "(", "carry", ",", "\"carry\"", ",", "\"carry must be less than %s\"", "%", "base", ")", "result", "=", "[", "]", "for", "val", "in", "reversed", "(", "value", ")", ":", "(", "carry", ",", "new_val", ")", "=", "divmod", "(", "val", "+", "carry", ",", "base", ")", "result", ".", "append", "(", "new_val", ")", "return", "(", "carry", ",", "list", "(", "reversed", "(", "result", ")", ")", ")" ]
Add a carry digit to a number represented by ``value``. :param value: the value :type value: list of int :param int carry: the carry digit (>= 0) :param int base: the base (>= 2) :returns: carry-out and result :rtype: tuple of int * (list of int) Complexity: O(len(value))
[ "Add", "a", "carry", "digit", "to", "a", "number", "represented", "by", "value", "." ]
python
train
28.216216
xiaocong/uiautomator
uiautomator/__init__.py
https://github.com/xiaocong/uiautomator/blob/9a0c892ffd056713f91aa2153d1533c5b0553a1c/uiautomator/__init__.py#L619-L632
def screenshot(self, filename, scale=1.0, quality=100): '''take screenshot.''' result = self.server.screenshot(filename, scale, quality) if result: return result device_file = self.server.jsonrpc.takeScreenshot("screenshot.png", scale, quality) if not device_file: return None p = self.server.adb.cmd("pull", device_file, filename) p.wait() self.server.adb.cmd("shell", "rm", device_file).wait() return filename if p.returncode is 0 else None
[ "def", "screenshot", "(", "self", ",", "filename", ",", "scale", "=", "1.0", ",", "quality", "=", "100", ")", ":", "result", "=", "self", ".", "server", ".", "screenshot", "(", "filename", ",", "scale", ",", "quality", ")", "if", "result", ":", "return", "result", "device_file", "=", "self", ".", "server", ".", "jsonrpc", ".", "takeScreenshot", "(", "\"screenshot.png\"", ",", "scale", ",", "quality", ")", "if", "not", "device_file", ":", "return", "None", "p", "=", "self", ".", "server", ".", "adb", ".", "cmd", "(", "\"pull\"", ",", "device_file", ",", "filename", ")", "p", ".", "wait", "(", ")", "self", ".", "server", ".", "adb", ".", "cmd", "(", "\"shell\"", ",", "\"rm\"", ",", "device_file", ")", ".", "wait", "(", ")", "return", "filename", "if", "p", ".", "returncode", "is", "0", "else", "None" ]
take screenshot.
[ "take", "screenshot", "." ]
python
train
41.642857
jilljenn/tryalgo
tryalgo/eulerian_tour.py
https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/eulerian_tour.py#L41-L63
def eulerian_tour_directed(graph): """Eulerian tour on a directed graph :param graph: directed graph in listlist format, cannot be listdict :assumes: graph is eulerian :returns: eulerian cycle as a vertex list :complexity: `O(|V|+|E|)` """ P = [] Q = [0] R = [] succ = [0] * len(graph) while Q: node = Q.pop() P.append(node) while succ[node] < len(graph[node]): neighbor = graph[node][succ[node]] succ[node] += 1 R.append(neighbor) node = neighbor while R: Q.append(R.pop()) return P
[ "def", "eulerian_tour_directed", "(", "graph", ")", ":", "P", "=", "[", "]", "Q", "=", "[", "0", "]", "R", "=", "[", "]", "succ", "=", "[", "0", "]", "*", "len", "(", "graph", ")", "while", "Q", ":", "node", "=", "Q", ".", "pop", "(", ")", "P", ".", "append", "(", "node", ")", "while", "succ", "[", "node", "]", "<", "len", "(", "graph", "[", "node", "]", ")", ":", "neighbor", "=", "graph", "[", "node", "]", "[", "succ", "[", "node", "]", "]", "succ", "[", "node", "]", "+=", "1", "R", ".", "append", "(", "neighbor", ")", "node", "=", "neighbor", "while", "R", ":", "Q", ".", "append", "(", "R", ".", "pop", "(", ")", ")", "return", "P" ]
Eulerian tour on a directed graph :param graph: directed graph in listlist format, cannot be listdict :assumes: graph is eulerian :returns: eulerian cycle as a vertex list :complexity: `O(|V|+|E|)`
[ "Eulerian", "tour", "on", "a", "directed", "graph" ]
python
train
26.695652
cgarciae/phi
phi/dsl.py
https://github.com/cgarciae/phi/blob/87fd7100a76f823232f4fd8360498b4b80675265/phi/dsl.py#L930-L973
def With(self, context_manager, *body, **kwargs): """ **With** def With(context_manager, *body): **Arguments** * **context_manager**: a [context manager](https://docs.python.org/2/reference/datamodel.html#context-managers) object or valid expression from the DSL that returns a context manager. * ***body**: any valid expression of the DSL to be evaluated inside the context. `*body` is interpreted as a tuple so all expression contained are composed. As with normal python programs you sometimes might want to create a context for a block of code. You normally give a [context manager](https://docs.python.org/2/reference/datamodel.html#context-managers) to the [with](https://docs.python.org/2/reference/compound_stmts.html#the-with-statement) statemente, in Phi you use `P.With` or `phi.With` **Context** Python's `with` statemente returns a context object through `as` keyword, in the DSL this object can be obtained using the `P.Context` method or the `phi.Context` function. ### Examples from phi import P, Obj, Context, With, Pipe text = Pipe( "text.txt", With( open, Context, Obj.read() ) ) The previous is equivalent to with open("text.txt") as f: text = f.read() """ context_f = _parse(context_manager)._f body_f = E.Seq(*body)._f def g(x, state): context, state = context_f(x, state) with context as scope: with _WithContextManager(scope): return body_f(x, state) return self.__then__(g, **kwargs)
[ "def", "With", "(", "self", ",", "context_manager", ",", "*", "body", ",", "*", "*", "kwargs", ")", ":", "context_f", "=", "_parse", "(", "context_manager", ")", ".", "_f", "body_f", "=", "E", ".", "Seq", "(", "*", "body", ")", ".", "_f", "def", "g", "(", "x", ",", "state", ")", ":", "context", ",", "state", "=", "context_f", "(", "x", ",", "state", ")", "with", "context", "as", "scope", ":", "with", "_WithContextManager", "(", "scope", ")", ":", "return", "body_f", "(", "x", ",", "state", ")", "return", "self", ".", "__then__", "(", "g", ",", "*", "*", "kwargs", ")" ]
**With** def With(context_manager, *body): **Arguments** * **context_manager**: a [context manager](https://docs.python.org/2/reference/datamodel.html#context-managers) object or valid expression from the DSL that returns a context manager. * ***body**: any valid expression of the DSL to be evaluated inside the context. `*body` is interpreted as a tuple so all expression contained are composed. As with normal python programs you sometimes might want to create a context for a block of code. You normally give a [context manager](https://docs.python.org/2/reference/datamodel.html#context-managers) to the [with](https://docs.python.org/2/reference/compound_stmts.html#the-with-statement) statemente, in Phi you use `P.With` or `phi.With` **Context** Python's `with` statemente returns a context object through `as` keyword, in the DSL this object can be obtained using the `P.Context` method or the `phi.Context` function. ### Examples from phi import P, Obj, Context, With, Pipe text = Pipe( "text.txt", With( open, Context, Obj.read() ) ) The previous is equivalent to with open("text.txt") as f: text = f.read()
[ "**", "With", "**" ]
python
train
35.25
taskcluster/taskcluster-client.py
taskcluster/ec2manager.py
https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/ec2manager.py#L272-L281
def allState(self, *args, **kwargs): """ List out the entire internal state This method is only for debugging the ec2-manager This method is ``experimental`` """ return self._makeApiCall(self.funcinfo["allState"], *args, **kwargs)
[ "def", "allState", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_makeApiCall", "(", "self", ".", "funcinfo", "[", "\"allState\"", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
List out the entire internal state This method is only for debugging the ec2-manager This method is ``experimental``
[ "List", "out", "the", "entire", "internal", "state" ]
python
train
27.2
sassoo/goldman
goldman/models/base.py
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/models/base.py#L157-L180
def to_exceptions(cls, errors): """ Convert the validation errors into ValidationFailure exc's Transform native schematics validation errors into a goldman ValidationFailure exception. :param errors: dict of errors in schematics format :return: list of ValidationFailure exception objects """ ret = [] for key, val in errors.items(): if key in cls.relationships: attr = '/data/relationships/%s' % key else: attr = '/data/attributes/%s' % key for error in val: ret.append(ValidationFailure(attr, detail=error)) return ret
[ "def", "to_exceptions", "(", "cls", ",", "errors", ")", ":", "ret", "=", "[", "]", "for", "key", ",", "val", "in", "errors", ".", "items", "(", ")", ":", "if", "key", "in", "cls", ".", "relationships", ":", "attr", "=", "'/data/relationships/%s'", "%", "key", "else", ":", "attr", "=", "'/data/attributes/%s'", "%", "key", "for", "error", "in", "val", ":", "ret", ".", "append", "(", "ValidationFailure", "(", "attr", ",", "detail", "=", "error", ")", ")", "return", "ret" ]
Convert the validation errors into ValidationFailure exc's Transform native schematics validation errors into a goldman ValidationFailure exception. :param errors: dict of errors in schematics format :return: list of ValidationFailure exception objects
[ "Convert", "the", "validation", "errors", "into", "ValidationFailure", "exc", "s" ]
python
train
28.458333
LordDarkula/chess_py
chess_py/core/algebraic/converter.py
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/core/algebraic/converter.py#L105-L305
def incomplete_alg(alg_str, input_color, position): """ Converts a string written in short algebraic form into an incomplete move. These incomplete moves do not have the initial location specified and therefore cannot be used to update the board. IN order to fully utilize incomplete move, it must be run through ``make_legal()`` with the corresponding position. It is recommended to use ``short_alg()`` instead of this method because it returns a complete move. Examples: e4, Nf3, exd5, Qxf3, 00, 000, e8=Q :type: alg_str: str :type: input_color: Color """ edge_rank = 0 \ if input_color == color.white \ else 7 if alg_str is None or len(alg_str) <= 1: raise ValueError("algebraic string {} is invalid".format(alg_str)) # King-side castle if alg_str in ["00", "oo", "OO", "0-0", "o-o", "O-O"]: return Move(end_loc=Location(edge_rank, 6), piece=King(input_color, Location(edge_rank, 4)), status=notation_const.KING_SIDE_CASTLE, start_loc=Location(edge_rank, 4)) # Queen-side castle if alg_str in ["000", "ooo", "OOO", "0-0-0", "o-o-o", "O-O-O"]: return Move(end_loc=Location(edge_rank, 2), piece=King(input_color, Location(edge_rank, 4)), status=notation_const.QUEEN_SIDE_CASTLE, start_loc=Location(edge_rank, 4)) try: end_location = Location.from_string(alg_str[-2:]) except ValueError: end_location = Location.from_string(alg_str[-4:-2]) # Pawn movement if len(alg_str) == 2: possible_pawn = position.piece_at_square(end_location.shift_back(input_color)) if type(possible_pawn) is Pawn and \ possible_pawn.color == input_color: start_location = end_location.shift_back(input_color) else: start_location = end_location.shift_back(input_color, times=2) return Move(end_loc=end_location, piece=position.piece_at_square(start_location), status=notation_const.MOVEMENT, start_loc=start_location) # Non-pawn Piece movement if len(alg_str) == 3: possible_piece, start_location = _get_piece_start_location(end_location, input_color, _get_piece(alg_str, 0), position) return Move(end_loc=end_location, piece=possible_piece, status=notation_const.MOVEMENT, start_loc=start_location) # Multiple options (Capture or Piece movement with file specified) if len(alg_str) == 4: # Capture if alg_str[1].upper() == "X": # Pawn capture if not alg_str[0].isupper(): pawn_location = Location(end_location.rank, ord(alg_str[0]) - 97).shift_back(input_color) possible_pawn = position.piece_at_square(pawn_location) if type(possible_pawn) is Pawn and \ possible_pawn.color == input_color: en_passant_pawn = position.piece_at_square(end_location.shift_back(input_color)) if type(en_passant_pawn) is Pawn and \ en_passant_pawn.color != input_color and \ position.is_square_empty(end_location): return Move(end_loc=end_location, piece=position.piece_at_square(pawn_location), status=notation_const.EN_PASSANT, start_loc=pawn_location) else: return Move(end_loc=end_location, piece=position.piece_at_square(pawn_location), status=notation_const.CAPTURE, start_loc=pawn_location) # Piece capture elif alg_str[0].isupper(): possible_piece, start_location = _get_piece_start_location(end_location, input_color, _get_piece(alg_str, 0), position) return Move(end_loc=end_location, piece=possible_piece, status=notation_const.CAPTURE, start_loc=start_location) # Pawn Promotion elif alg_str[2] == "=": promote_end_loc = Location.from_string(alg_str[:2]) if promote_end_loc.rank != 0 and promote_end_loc.rank != 7: raise ValueError("Promotion {} must be on the last rank".format(alg_str)) return Move(end_loc=promote_end_loc, piece=Pawn(input_color, promote_end_loc), status=notation_const.PROMOTE, promoted_to_piece=_get_piece(alg_str, 3), start_loc=promote_end_loc.shift_back(input_color)) # Non-pawn Piece movement with file specified (aRb7) elif alg_str[1].isupper() and not alg_str[0].isdigit(): possible_piece, start_location = _get_piece_start_location(end_location, input_color, _get_piece(alg_str, 1), position, start_file=alg_str[0]) return Move(end_loc=end_location, piece=possible_piece, status=notation_const.MOVEMENT, start_loc=start_location) # (alt) Non-pawn Piece movement with file specified (Rab7) elif alg_str[0].isupper() and not alg_str[1].isdigit(): possible_piece, start_location = _get_piece_start_location(end_location, input_color, _get_piece(alg_str, 0), position, start_file=alg_str[1]) return Move(end_loc=end_location, piece=possible_piece, status=notation_const.MOVEMENT, start_loc=start_location) # Non-pawn Piece movement with rank specified (R1b7) elif alg_str[0].isupper() and alg_str[1].isdigit(): possible_piece, start_location = _get_piece_start_location(end_location, input_color, _get_piece(alg_str, 0), position, start_rank=alg_str[1]) return Move(end_loc=end_location, piece=possible_piece, status=notation_const.MOVEMENT, start_loc=start_location) # Multiple options if len(alg_str) == 5: # Non-pawn Piece movement with rank and file specified (a2Ra1 if not alg_str[0].isdigit() and \ alg_str[1].isdigit() and \ alg_str[2].isupper() and \ not alg_str[3].isdigit() and \ alg_str[4].isdigit: start_loc = Location.from_string(alg_str[:2]) return Move(end_loc=end_location, piece=_get_piece(alg_str, 2)(input_color, end_location), status=notation_const.MOVEMENT, start_loc=start_loc) # Multiple Piece capture options if alg_str[2].upper() == "X": # Piece capture with rank specified (R1xa1) if alg_str[1].isdigit(): possible_piece, start_location = _get_piece_start_location(end_location, input_color, _get_piece(alg_str, 0), position, start_rank=alg_str[1]) return Move(end_loc=end_location, piece=possible_piece, status=notation_const.CAPTURE, start_loc=start_location) # Piece capture with file specified (Rdxd7) else: possible_piece, start_location = _get_piece_start_location(end_location, input_color, _get_piece(alg_str, 0), position, start_file=alg_str[1]) return Move(end_loc=end_location, piece=possible_piece, status=notation_const.CAPTURE, start_loc=start_location) # Pawn promotion with capture if len(alg_str) == 6 and alg_str[4] == "=": start_file = ord(alg_str[0]) - 97 promote_capture_end_loc = Location.from_string(alg_str[2:4]) return Move(end_loc=promote_capture_end_loc, piece=Pawn(input_color, promote_capture_end_loc), status=notation_const.CAPTURE_AND_PROMOTE, promoted_to_piece=_get_piece(alg_str, 5), start_loc=Location(end_location.shift_back(input_color).rank, start_file)) raise ValueError("algebraic string {} is invalid in \n{}".format(alg_str, position))
[ "def", "incomplete_alg", "(", "alg_str", ",", "input_color", ",", "position", ")", ":", "edge_rank", "=", "0", "if", "input_color", "==", "color", ".", "white", "else", "7", "if", "alg_str", "is", "None", "or", "len", "(", "alg_str", ")", "<=", "1", ":", "raise", "ValueError", "(", "\"algebraic string {} is invalid\"", ".", "format", "(", "alg_str", ")", ")", "# King-side castle", "if", "alg_str", "in", "[", "\"00\"", ",", "\"oo\"", ",", "\"OO\"", ",", "\"0-0\"", ",", "\"o-o\"", ",", "\"O-O\"", "]", ":", "return", "Move", "(", "end_loc", "=", "Location", "(", "edge_rank", ",", "6", ")", ",", "piece", "=", "King", "(", "input_color", ",", "Location", "(", "edge_rank", ",", "4", ")", ")", ",", "status", "=", "notation_const", ".", "KING_SIDE_CASTLE", ",", "start_loc", "=", "Location", "(", "edge_rank", ",", "4", ")", ")", "# Queen-side castle", "if", "alg_str", "in", "[", "\"000\"", ",", "\"ooo\"", ",", "\"OOO\"", ",", "\"0-0-0\"", ",", "\"o-o-o\"", ",", "\"O-O-O\"", "]", ":", "return", "Move", "(", "end_loc", "=", "Location", "(", "edge_rank", ",", "2", ")", ",", "piece", "=", "King", "(", "input_color", ",", "Location", "(", "edge_rank", ",", "4", ")", ")", ",", "status", "=", "notation_const", ".", "QUEEN_SIDE_CASTLE", ",", "start_loc", "=", "Location", "(", "edge_rank", ",", "4", ")", ")", "try", ":", "end_location", "=", "Location", ".", "from_string", "(", "alg_str", "[", "-", "2", ":", "]", ")", "except", "ValueError", ":", "end_location", "=", "Location", ".", "from_string", "(", "alg_str", "[", "-", "4", ":", "-", "2", "]", ")", "# Pawn movement", "if", "len", "(", "alg_str", ")", "==", "2", ":", "possible_pawn", "=", "position", ".", "piece_at_square", "(", "end_location", ".", "shift_back", "(", "input_color", ")", ")", "if", "type", "(", "possible_pawn", ")", "is", "Pawn", "and", "possible_pawn", ".", "color", "==", "input_color", ":", "start_location", "=", "end_location", ".", "shift_back", "(", "input_color", ")", "else", ":", "start_location", "=", "end_location", ".", "shift_back", "(", "input_color", ",", "times", "=", "2", ")", "return", "Move", "(", "end_loc", "=", "end_location", ",", "piece", "=", "position", ".", "piece_at_square", "(", "start_location", ")", ",", "status", "=", "notation_const", ".", "MOVEMENT", ",", "start_loc", "=", "start_location", ")", "# Non-pawn Piece movement", "if", "len", "(", "alg_str", ")", "==", "3", ":", "possible_piece", ",", "start_location", "=", "_get_piece_start_location", "(", "end_location", ",", "input_color", ",", "_get_piece", "(", "alg_str", ",", "0", ")", ",", "position", ")", "return", "Move", "(", "end_loc", "=", "end_location", ",", "piece", "=", "possible_piece", ",", "status", "=", "notation_const", ".", "MOVEMENT", ",", "start_loc", "=", "start_location", ")", "# Multiple options (Capture or Piece movement with file specified)", "if", "len", "(", "alg_str", ")", "==", "4", ":", "# Capture", "if", "alg_str", "[", "1", "]", ".", "upper", "(", ")", "==", "\"X\"", ":", "# Pawn capture", "if", "not", "alg_str", "[", "0", "]", ".", "isupper", "(", ")", ":", "pawn_location", "=", "Location", "(", "end_location", ".", "rank", ",", "ord", "(", "alg_str", "[", "0", "]", ")", "-", "97", ")", ".", "shift_back", "(", "input_color", ")", "possible_pawn", "=", "position", ".", "piece_at_square", "(", "pawn_location", ")", "if", "type", "(", "possible_pawn", ")", "is", "Pawn", "and", "possible_pawn", ".", "color", "==", "input_color", ":", "en_passant_pawn", "=", "position", ".", "piece_at_square", "(", "end_location", ".", "shift_back", "(", "input_color", ")", ")", "if", "type", "(", "en_passant_pawn", ")", "is", "Pawn", "and", "en_passant_pawn", ".", "color", "!=", "input_color", "and", "position", ".", "is_square_empty", "(", "end_location", ")", ":", "return", "Move", "(", "end_loc", "=", "end_location", ",", "piece", "=", "position", ".", "piece_at_square", "(", "pawn_location", ")", ",", "status", "=", "notation_const", ".", "EN_PASSANT", ",", "start_loc", "=", "pawn_location", ")", "else", ":", "return", "Move", "(", "end_loc", "=", "end_location", ",", "piece", "=", "position", ".", "piece_at_square", "(", "pawn_location", ")", ",", "status", "=", "notation_const", ".", "CAPTURE", ",", "start_loc", "=", "pawn_location", ")", "# Piece capture", "elif", "alg_str", "[", "0", "]", ".", "isupper", "(", ")", ":", "possible_piece", ",", "start_location", "=", "_get_piece_start_location", "(", "end_location", ",", "input_color", ",", "_get_piece", "(", "alg_str", ",", "0", ")", ",", "position", ")", "return", "Move", "(", "end_loc", "=", "end_location", ",", "piece", "=", "possible_piece", ",", "status", "=", "notation_const", ".", "CAPTURE", ",", "start_loc", "=", "start_location", ")", "# Pawn Promotion", "elif", "alg_str", "[", "2", "]", "==", "\"=\"", ":", "promote_end_loc", "=", "Location", ".", "from_string", "(", "alg_str", "[", ":", "2", "]", ")", "if", "promote_end_loc", ".", "rank", "!=", "0", "and", "promote_end_loc", ".", "rank", "!=", "7", ":", "raise", "ValueError", "(", "\"Promotion {} must be on the last rank\"", ".", "format", "(", "alg_str", ")", ")", "return", "Move", "(", "end_loc", "=", "promote_end_loc", ",", "piece", "=", "Pawn", "(", "input_color", ",", "promote_end_loc", ")", ",", "status", "=", "notation_const", ".", "PROMOTE", ",", "promoted_to_piece", "=", "_get_piece", "(", "alg_str", ",", "3", ")", ",", "start_loc", "=", "promote_end_loc", ".", "shift_back", "(", "input_color", ")", ")", "# Non-pawn Piece movement with file specified (aRb7)", "elif", "alg_str", "[", "1", "]", ".", "isupper", "(", ")", "and", "not", "alg_str", "[", "0", "]", ".", "isdigit", "(", ")", ":", "possible_piece", ",", "start_location", "=", "_get_piece_start_location", "(", "end_location", ",", "input_color", ",", "_get_piece", "(", "alg_str", ",", "1", ")", ",", "position", ",", "start_file", "=", "alg_str", "[", "0", "]", ")", "return", "Move", "(", "end_loc", "=", "end_location", ",", "piece", "=", "possible_piece", ",", "status", "=", "notation_const", ".", "MOVEMENT", ",", "start_loc", "=", "start_location", ")", "# (alt) Non-pawn Piece movement with file specified (Rab7)", "elif", "alg_str", "[", "0", "]", ".", "isupper", "(", ")", "and", "not", "alg_str", "[", "1", "]", ".", "isdigit", "(", ")", ":", "possible_piece", ",", "start_location", "=", "_get_piece_start_location", "(", "end_location", ",", "input_color", ",", "_get_piece", "(", "alg_str", ",", "0", ")", ",", "position", ",", "start_file", "=", "alg_str", "[", "1", "]", ")", "return", "Move", "(", "end_loc", "=", "end_location", ",", "piece", "=", "possible_piece", ",", "status", "=", "notation_const", ".", "MOVEMENT", ",", "start_loc", "=", "start_location", ")", "# Non-pawn Piece movement with rank specified (R1b7)", "elif", "alg_str", "[", "0", "]", ".", "isupper", "(", ")", "and", "alg_str", "[", "1", "]", ".", "isdigit", "(", ")", ":", "possible_piece", ",", "start_location", "=", "_get_piece_start_location", "(", "end_location", ",", "input_color", ",", "_get_piece", "(", "alg_str", ",", "0", ")", ",", "position", ",", "start_rank", "=", "alg_str", "[", "1", "]", ")", "return", "Move", "(", "end_loc", "=", "end_location", ",", "piece", "=", "possible_piece", ",", "status", "=", "notation_const", ".", "MOVEMENT", ",", "start_loc", "=", "start_location", ")", "# Multiple options", "if", "len", "(", "alg_str", ")", "==", "5", ":", "# Non-pawn Piece movement with rank and file specified (a2Ra1", "if", "not", "alg_str", "[", "0", "]", ".", "isdigit", "(", ")", "and", "alg_str", "[", "1", "]", ".", "isdigit", "(", ")", "and", "alg_str", "[", "2", "]", ".", "isupper", "(", ")", "and", "not", "alg_str", "[", "3", "]", ".", "isdigit", "(", ")", "and", "alg_str", "[", "4", "]", ".", "isdigit", ":", "start_loc", "=", "Location", ".", "from_string", "(", "alg_str", "[", ":", "2", "]", ")", "return", "Move", "(", "end_loc", "=", "end_location", ",", "piece", "=", "_get_piece", "(", "alg_str", ",", "2", ")", "(", "input_color", ",", "end_location", ")", ",", "status", "=", "notation_const", ".", "MOVEMENT", ",", "start_loc", "=", "start_loc", ")", "# Multiple Piece capture options", "if", "alg_str", "[", "2", "]", ".", "upper", "(", ")", "==", "\"X\"", ":", "# Piece capture with rank specified (R1xa1)", "if", "alg_str", "[", "1", "]", ".", "isdigit", "(", ")", ":", "possible_piece", ",", "start_location", "=", "_get_piece_start_location", "(", "end_location", ",", "input_color", ",", "_get_piece", "(", "alg_str", ",", "0", ")", ",", "position", ",", "start_rank", "=", "alg_str", "[", "1", "]", ")", "return", "Move", "(", "end_loc", "=", "end_location", ",", "piece", "=", "possible_piece", ",", "status", "=", "notation_const", ".", "CAPTURE", ",", "start_loc", "=", "start_location", ")", "# Piece capture with file specified (Rdxd7)", "else", ":", "possible_piece", ",", "start_location", "=", "_get_piece_start_location", "(", "end_location", ",", "input_color", ",", "_get_piece", "(", "alg_str", ",", "0", ")", ",", "position", ",", "start_file", "=", "alg_str", "[", "1", "]", ")", "return", "Move", "(", "end_loc", "=", "end_location", ",", "piece", "=", "possible_piece", ",", "status", "=", "notation_const", ".", "CAPTURE", ",", "start_loc", "=", "start_location", ")", "# Pawn promotion with capture", "if", "len", "(", "alg_str", ")", "==", "6", "and", "alg_str", "[", "4", "]", "==", "\"=\"", ":", "start_file", "=", "ord", "(", "alg_str", "[", "0", "]", ")", "-", "97", "promote_capture_end_loc", "=", "Location", ".", "from_string", "(", "alg_str", "[", "2", ":", "4", "]", ")", "return", "Move", "(", "end_loc", "=", "promote_capture_end_loc", ",", "piece", "=", "Pawn", "(", "input_color", ",", "promote_capture_end_loc", ")", ",", "status", "=", "notation_const", ".", "CAPTURE_AND_PROMOTE", ",", "promoted_to_piece", "=", "_get_piece", "(", "alg_str", ",", "5", ")", ",", "start_loc", "=", "Location", "(", "end_location", ".", "shift_back", "(", "input_color", ")", ".", "rank", ",", "start_file", ")", ")", "raise", "ValueError", "(", "\"algebraic string {} is invalid in \\n{}\"", ".", "format", "(", "alg_str", ",", "position", ")", ")" ]
Converts a string written in short algebraic form into an incomplete move. These incomplete moves do not have the initial location specified and therefore cannot be used to update the board. IN order to fully utilize incomplete move, it must be run through ``make_legal()`` with the corresponding position. It is recommended to use ``short_alg()`` instead of this method because it returns a complete move. Examples: e4, Nf3, exd5, Qxf3, 00, 000, e8=Q :type: alg_str: str :type: input_color: Color
[ "Converts", "a", "string", "written", "in", "short", "algebraic", "form", "into", "an", "incomplete", "move", ".", "These", "incomplete", "moves", "do", "not", "have", "the", "initial", "location", "specified", "and", "therefore", "cannot", "be", "used", "to", "update", "the", "board", ".", "IN", "order", "to", "fully", "utilize", "incomplete", "move", "it", "must", "be", "run", "through", "make_legal", "()", "with", "the", "corresponding", "position", ".", "It", "is", "recommended", "to", "use", "short_alg", "()", "instead", "of", "this", "method", "because", "it", "returns", "a", "complete", "move", "." ]
python
train
51.288557
apache/airflow
airflow/contrib/hooks/gcp_transfer_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_transfer_hook.py#L246-L262
def get_transfer_operation(self, operation_name): """ Gets an transfer operation in Google Storage Transfer Service. :param operation_name: (Required) Name of the transfer operation. :type operation_name: str :return: transfer operation See: https://cloud.google.com/storage-transfer/docs/reference/rest/v1/Operation :rtype: dict """ return ( self.get_conn() .transferOperations() .get(name=operation_name) .execute(num_retries=self.num_retries) )
[ "def", "get_transfer_operation", "(", "self", ",", "operation_name", ")", ":", "return", "(", "self", ".", "get_conn", "(", ")", ".", "transferOperations", "(", ")", ".", "get", "(", "name", "=", "operation_name", ")", ".", "execute", "(", "num_retries", "=", "self", ".", "num_retries", ")", ")" ]
Gets an transfer operation in Google Storage Transfer Service. :param operation_name: (Required) Name of the transfer operation. :type operation_name: str :return: transfer operation See: https://cloud.google.com/storage-transfer/docs/reference/rest/v1/Operation :rtype: dict
[ "Gets", "an", "transfer", "operation", "in", "Google", "Storage", "Transfer", "Service", "." ]
python
test
33.882353
portfors-lab/sparkle
sparkle/acq/daq_tasks.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/acq/daq_tasks.py#L211-L219
def write(self,output): """Writes the data to be output to the device buffer :param output: data to output :type output: numpy.ndarray """ w = c_int32() self.WriteAnalogF64(self.npoints, 0, 10.0, DAQmx_Val_GroupByChannel, output, w, None);
[ "def", "write", "(", "self", ",", "output", ")", ":", "w", "=", "c_int32", "(", ")", "self", ".", "WriteAnalogF64", "(", "self", ".", "npoints", ",", "0", ",", "10.0", ",", "DAQmx_Val_GroupByChannel", ",", "output", ",", "w", ",", "None", ")" ]
Writes the data to be output to the device buffer :param output: data to output :type output: numpy.ndarray
[ "Writes", "the", "data", "to", "be", "output", "to", "the", "device", "buffer", ":", "param", "output", ":", "data", "to", "output", ":", "type", "output", ":", "numpy", ".", "ndarray" ]
python
train
35.111111
ioam/parambokeh
parambokeh/__init__.py
https://github.com/ioam/parambokeh/blob/fb9744f216273c7b24e65d037b1d621c08d7fde6/parambokeh/__init__.py#L80-L94
def process_hv_plots(widgets, plots): """ Temporary fix to patch HoloViews plot comms """ bokeh_plots = [] for plot in plots: if hasattr(plot, '_update_callbacks'): for subplot in plot.traverse(lambda x: x): subplot.comm = widgets.server_comm for cb in subplot.callbacks: for c in cb.callbacks: c.code = c.code.replace(plot.id, widgets.plot_id) plot = plot.state bokeh_plots.append(plot) return bokeh_plots
[ "def", "process_hv_plots", "(", "widgets", ",", "plots", ")", ":", "bokeh_plots", "=", "[", "]", "for", "plot", "in", "plots", ":", "if", "hasattr", "(", "plot", ",", "'_update_callbacks'", ")", ":", "for", "subplot", "in", "plot", ".", "traverse", "(", "lambda", "x", ":", "x", ")", ":", "subplot", ".", "comm", "=", "widgets", ".", "server_comm", "for", "cb", "in", "subplot", ".", "callbacks", ":", "for", "c", "in", "cb", ".", "callbacks", ":", "c", ".", "code", "=", "c", ".", "code", ".", "replace", "(", "plot", ".", "id", ",", "widgets", ".", "plot_id", ")", "plot", "=", "plot", ".", "state", "bokeh_plots", ".", "append", "(", "plot", ")", "return", "bokeh_plots" ]
Temporary fix to patch HoloViews plot comms
[ "Temporary", "fix", "to", "patch", "HoloViews", "plot", "comms" ]
python
test
35.466667
lltk/lltk
lltk/fr/scrapers/verbix.py
https://github.com/lltk/lltk/blob/d171de55c1b97695fddedf4b02401ae27bf1d634/lltk/fr/scrapers/verbix.py#L23-L30
def _normalize(self, string): ''' Returns a sanitized string. ''' string = super(VerbixFr, self)._normalize(string) string = string.replace('il; elle', 'il/elle') string = string.replace('ils; elles', 'ils/elles') string = string.strip() return string
[ "def", "_normalize", "(", "self", ",", "string", ")", ":", "string", "=", "super", "(", "VerbixFr", ",", "self", ")", ".", "_normalize", "(", "string", ")", "string", "=", "string", ".", "replace", "(", "'il; elle'", ",", "'il/elle'", ")", "string", "=", "string", ".", "replace", "(", "'ils; elles'", ",", "'ils/elles'", ")", "string", "=", "string", ".", "strip", "(", ")", "return", "string" ]
Returns a sanitized string.
[ "Returns", "a", "sanitized", "string", "." ]
python
train
32.125
unfoldingWord-dev/python-gogs-client
gogs_client/interface.py
https://github.com/unfoldingWord-dev/python-gogs-client/blob/b7f27f4995abf914c0db8a424760f5b27331939d/gogs_client/interface.py#L197-L213
def get_branch(self, auth, username, repo_name, branch_name): """ Returns the branch with name ``branch_name`` in the repository with name ``repo_name`` owned by the user with username ``username``. :param auth.Authentication auth: authentication object :param str username: username of owner of repository containing the branch :param str repo_name: name of the repository with the branch :param str branch_name: name of the branch to return :return: a branch :rtype: GogsBranch :raises NetworkFailure: if there is an error communicating with the server :raises ApiFailure: if the request cannot be serviced """ path = "/repos/{u}/{r}/branches/{b}".format(u=username, r=repo_name, b=branch_name) response = self.get(path, auth=auth) return GogsBranch.from_json(response.json())
[ "def", "get_branch", "(", "self", ",", "auth", ",", "username", ",", "repo_name", ",", "branch_name", ")", ":", "path", "=", "\"/repos/{u}/{r}/branches/{b}\"", ".", "format", "(", "u", "=", "username", ",", "r", "=", "repo_name", ",", "b", "=", "branch_name", ")", "response", "=", "self", ".", "get", "(", "path", ",", "auth", "=", "auth", ")", "return", "GogsBranch", ".", "from_json", "(", "response", ".", "json", "(", ")", ")" ]
Returns the branch with name ``branch_name`` in the repository with name ``repo_name`` owned by the user with username ``username``. :param auth.Authentication auth: authentication object :param str username: username of owner of repository containing the branch :param str repo_name: name of the repository with the branch :param str branch_name: name of the branch to return :return: a branch :rtype: GogsBranch :raises NetworkFailure: if there is an error communicating with the server :raises ApiFailure: if the request cannot be serviced
[ "Returns", "the", "branch", "with", "name", "branch_name", "in", "the", "repository", "with", "name", "repo_name", "owned", "by", "the", "user", "with", "username", "username", "." ]
python
train
52
ajenhl/tacl
tacl/data_store.py
https://github.com/ajenhl/tacl/blob/b8a343248e77f1c07a5a4ac133a9ad6e0b4781c2/tacl/data_store.py#L440-L444
def _drop_indices(self): """Drops the database indices relating to n-grams.""" self._logger.info('Dropping database indices') self._conn.execute(constants.DROP_TEXTNGRAM_INDEX_SQL) self._logger.info('Finished dropping database indices')
[ "def", "_drop_indices", "(", "self", ")", ":", "self", ".", "_logger", ".", "info", "(", "'Dropping database indices'", ")", "self", ".", "_conn", ".", "execute", "(", "constants", ".", "DROP_TEXTNGRAM_INDEX_SQL", ")", "self", ".", "_logger", ".", "info", "(", "'Finished dropping database indices'", ")" ]
Drops the database indices relating to n-grams.
[ "Drops", "the", "database", "indices", "relating", "to", "n", "-", "grams", "." ]
python
train
52.8
ambitioninc/django-entity
entity/models.py
https://github.com/ambitioninc/django-entity/blob/ebc61f34313c52f4ef5819eb1da25b2ad837e80c/entity/models.py#L128-L145
def is_sub_to_any_kind(self, *super_entity_kinds): """ Find all entities that have super_entities of any of the specified kinds """ if super_entity_kinds: # get the pks of the desired subs from the relationships table if len(super_entity_kinds) == 1: entity_pks = EntityRelationship.objects.filter( super_entity__entity_kind=super_entity_kinds[0] ).select_related('entity_kind', 'sub_entity').values_list('sub_entity', flat=True) else: entity_pks = EntityRelationship.objects.filter( super_entity__entity_kind__in=super_entity_kinds ).select_related('entity_kind', 'sub_entity').values_list('sub_entity', flat=True) # return a queryset limited to only those pks return self.filter(pk__in=entity_pks) else: return self
[ "def", "is_sub_to_any_kind", "(", "self", ",", "*", "super_entity_kinds", ")", ":", "if", "super_entity_kinds", ":", "# get the pks of the desired subs from the relationships table", "if", "len", "(", "super_entity_kinds", ")", "==", "1", ":", "entity_pks", "=", "EntityRelationship", ".", "objects", ".", "filter", "(", "super_entity__entity_kind", "=", "super_entity_kinds", "[", "0", "]", ")", ".", "select_related", "(", "'entity_kind'", ",", "'sub_entity'", ")", ".", "values_list", "(", "'sub_entity'", ",", "flat", "=", "True", ")", "else", ":", "entity_pks", "=", "EntityRelationship", ".", "objects", ".", "filter", "(", "super_entity__entity_kind__in", "=", "super_entity_kinds", ")", ".", "select_related", "(", "'entity_kind'", ",", "'sub_entity'", ")", ".", "values_list", "(", "'sub_entity'", ",", "flat", "=", "True", ")", "# return a queryset limited to only those pks", "return", "self", ".", "filter", "(", "pk__in", "=", "entity_pks", ")", "else", ":", "return", "self" ]
Find all entities that have super_entities of any of the specified kinds
[ "Find", "all", "entities", "that", "have", "super_entities", "of", "any", "of", "the", "specified", "kinds" ]
python
train
50.888889
Ouranosinc/xclim
xclim/utils.py
https://github.com/Ouranosinc/xclim/blob/2080d139188bd8de2aeca097a025c2d89d6e0e09/xclim/utils.py#L593-L627
def percentile_doy(arr, window=5, per=.1): """Percentile value for each day of the year Return the climatological percentile over a moving window around each day of the year. Parameters ---------- arr : xarray.DataArray Input data. window : int Number of days around each day of the year to include in the calculation. per : float Percentile between [0,1] Returns ------- xarray.DataArray The percentiles indexed by the day of the year. """ # TODO: Support percentile array, store percentile in coordinates. # This is supported by DataArray.quantile, but not by groupby.reduce. rr = arr.rolling(min_periods=1, center=True, time=window).construct('window') # Create empty percentile array g = rr.groupby('time.dayofyear') p = g.reduce(np.nanpercentile, dim=('time', 'window'), q=per * 100) # The percentile for the 366th day has a sample size of 1/4 of the other days. # To have the same sample size, we interpolate the percentile from 1-365 doy range to 1-366 if p.dayofyear.max() == 366: p = adjust_doy_calendar(p.loc[p.dayofyear < 366], arr) p.attrs.update(arr.attrs.copy()) return p
[ "def", "percentile_doy", "(", "arr", ",", "window", "=", "5", ",", "per", "=", ".1", ")", ":", "# TODO: Support percentile array, store percentile in coordinates.", "# This is supported by DataArray.quantile, but not by groupby.reduce.", "rr", "=", "arr", ".", "rolling", "(", "min_periods", "=", "1", ",", "center", "=", "True", ",", "time", "=", "window", ")", ".", "construct", "(", "'window'", ")", "# Create empty percentile array", "g", "=", "rr", ".", "groupby", "(", "'time.dayofyear'", ")", "p", "=", "g", ".", "reduce", "(", "np", ".", "nanpercentile", ",", "dim", "=", "(", "'time'", ",", "'window'", ")", ",", "q", "=", "per", "*", "100", ")", "# The percentile for the 366th day has a sample size of 1/4 of the other days.", "# To have the same sample size, we interpolate the percentile from 1-365 doy range to 1-366", "if", "p", ".", "dayofyear", ".", "max", "(", ")", "==", "366", ":", "p", "=", "adjust_doy_calendar", "(", "p", ".", "loc", "[", "p", ".", "dayofyear", "<", "366", "]", ",", "arr", ")", "p", ".", "attrs", ".", "update", "(", "arr", ".", "attrs", ".", "copy", "(", ")", ")", "return", "p" ]
Percentile value for each day of the year Return the climatological percentile over a moving window around each day of the year. Parameters ---------- arr : xarray.DataArray Input data. window : int Number of days around each day of the year to include in the calculation. per : float Percentile between [0,1] Returns ------- xarray.DataArray The percentiles indexed by the day of the year.
[ "Percentile", "value", "for", "each", "day", "of", "the", "year" ]
python
train
33.685714
Kortemme-Lab/klab
klab/deprecated/rosettadb.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/deprecated/rosettadb.py#L699-L705
def _getFieldsInDB(self, tablename): """get all the fields from a specific table""" SQL = 'SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.Columns where TABLE_NAME="%s"' % tablename array_data = self.execQuery(SQL) return [x[0] for x in array_data]
[ "def", "_getFieldsInDB", "(", "self", ",", "tablename", ")", ":", "SQL", "=", "'SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.Columns where TABLE_NAME=\"%s\"'", "%", "tablename", "array_data", "=", "self", ".", "execQuery", "(", "SQL", ")", "return", "[", "x", "[", "0", "]", "for", "x", "in", "array_data", "]" ]
get all the fields from a specific table
[ "get", "all", "the", "fields", "from", "a", "specific", "table" ]
python
train
35.714286
apple/turicreate
deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L100-L168
def main(): """Script entrypoint.""" # Parse the arguments parser = argparse.ArgumentParser( description='Convert MSBuild XML to JSON format') parser.add_argument( '-t', '--toolchain', help='The name of the toolchain', required=True) parser.add_argument( '-o', '--output', help='The output directory', default='') parser.add_argument( '-r', '--overwrite', help='Whether previously output should be overwritten', dest='overwrite', action='store_true') parser.set_defaults(overwrite=False) parser.add_argument( '-d', '--debug', help="Debug tool output", action="store_const", dest="loglevel", const=logging.DEBUG, default=logging.WARNING) parser.add_argument( '-v', '--verbose', help="Verbose output", action="store_const", dest="loglevel", const=logging.INFO) parser.add_argument('input', help='The input files', nargs='+') args = parser.parse_args() toolchain = args.toolchain logging.basicConfig(level=args.loglevel) logging.info('Creating %s toolchain files', toolchain) values = {} # Iterate through the inputs for input in args.input: input = __get_path(input) read_msbuild_xml(input, values) # Determine if the output directory needs to be created output_dir = __get_path(args.output) if not os.path.exists(output_dir): os.mkdir(output_dir) logging.info('Created output directory %s', output_dir) for key, value in values.items(): output_path = __output_path(toolchain, key, output_dir) if os.path.exists(output_path) and not args.overwrite: logging.info('Comparing previous output to current') __merge_json_values(value, read_msbuild_json(output_path)) else: logging.info('Original output will be overwritten') logging.info('Writing MS Build JSON file at %s', output_path) __write_json_file(output_path, value)
[ "def", "main", "(", ")", ":", "# Parse the arguments", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Convert MSBuild XML to JSON format'", ")", "parser", ".", "add_argument", "(", "'-t'", ",", "'--toolchain'", ",", "help", "=", "'The name of the toolchain'", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "'-o'", ",", "'--output'", ",", "help", "=", "'The output directory'", ",", "default", "=", "''", ")", "parser", ".", "add_argument", "(", "'-r'", ",", "'--overwrite'", ",", "help", "=", "'Whether previously output should be overwritten'", ",", "dest", "=", "'overwrite'", ",", "action", "=", "'store_true'", ")", "parser", ".", "set_defaults", "(", "overwrite", "=", "False", ")", "parser", ".", "add_argument", "(", "'-d'", ",", "'--debug'", ",", "help", "=", "\"Debug tool output\"", ",", "action", "=", "\"store_const\"", ",", "dest", "=", "\"loglevel\"", ",", "const", "=", "logging", ".", "DEBUG", ",", "default", "=", "logging", ".", "WARNING", ")", "parser", ".", "add_argument", "(", "'-v'", ",", "'--verbose'", ",", "help", "=", "\"Verbose output\"", ",", "action", "=", "\"store_const\"", ",", "dest", "=", "\"loglevel\"", ",", "const", "=", "logging", ".", "INFO", ")", "parser", ".", "add_argument", "(", "'input'", ",", "help", "=", "'The input files'", ",", "nargs", "=", "'+'", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "toolchain", "=", "args", ".", "toolchain", "logging", ".", "basicConfig", "(", "level", "=", "args", ".", "loglevel", ")", "logging", ".", "info", "(", "'Creating %s toolchain files'", ",", "toolchain", ")", "values", "=", "{", "}", "# Iterate through the inputs", "for", "input", "in", "args", ".", "input", ":", "input", "=", "__get_path", "(", "input", ")", "read_msbuild_xml", "(", "input", ",", "values", ")", "# Determine if the output directory needs to be created", "output_dir", "=", "__get_path", "(", "args", ".", "output", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "output_dir", ")", ":", "os", ".", "mkdir", "(", "output_dir", ")", "logging", ".", "info", "(", "'Created output directory %s'", ",", "output_dir", ")", "for", "key", ",", "value", "in", "values", ".", "items", "(", ")", ":", "output_path", "=", "__output_path", "(", "toolchain", ",", "key", ",", "output_dir", ")", "if", "os", ".", "path", ".", "exists", "(", "output_path", ")", "and", "not", "args", ".", "overwrite", ":", "logging", ".", "info", "(", "'Comparing previous output to current'", ")", "__merge_json_values", "(", "value", ",", "read_msbuild_json", "(", "output_path", ")", ")", "else", ":", "logging", ".", "info", "(", "'Original output will be overwritten'", ")", "logging", ".", "info", "(", "'Writing MS Build JSON file at %s'", ",", "output_path", ")", "__write_json_file", "(", "output_path", ",", "value", ")" ]
Script entrypoint.
[ "Script", "entrypoint", "." ]
python
train
29.304348
singingwolfboy/flask-dance
flask_dance/contrib/slack.py
https://github.com/singingwolfboy/flask-dance/blob/87d45328bbdaff833559a6d3da71461fe4579592/flask_dance/contrib/slack.py#L22-L88
def make_slack_blueprint( client_id=None, client_secret=None, scope=None, redirect_url=None, redirect_to=None, login_url=None, authorized_url=None, session_class=None, storage=None, ): """ Make a blueprint for authenticating with Slack using OAuth 2. This requires a client ID and client secret from Slack. You should either pass them to this constructor, or make sure that your Flask application config defines them, using the variables :envvar:`SLACK_OAUTH_CLIENT_ID` and :envvar:`SLACK_OAUTH_CLIENT_SECRET`. Args: client_id (str): The client ID for your application on Slack. client_secret (str): The client secret for your application on Slack scope (str, optional): comma-separated list of scopes for the OAuth token redirect_url (str): the URL to redirect to after the authentication dance is complete redirect_to (str): if ``redirect_url`` is not defined, the name of the view to redirect to after the authentication dance is complete. The actual URL will be determined by :func:`flask.url_for` login_url (str, optional): the URL path for the ``login`` view. Defaults to ``/slack`` authorized_url (str, optional): the URL path for the ``authorized`` view. Defaults to ``/slack/authorized``. session_class (class, optional): The class to use for creating a Requests session. Defaults to :class:`~flask_dance.consumer.requests.OAuth2Session`. storage: A token storage class, or an instance of a token storage class, to use for this blueprint. Defaults to :class:`~flask_dance.consumer.storage.session.SessionStorage`. :rtype: :class:`~flask_dance.consumer.OAuth2ConsumerBlueprint` :returns: A :ref:`blueprint <flask:blueprints>` to attach to your Flask app. """ scope = scope or ["identify", "chat:write:bot"] slack_bp = SlackBlueprint( "slack", __name__, client_id=client_id, client_secret=client_secret, scope=scope, base_url="https://slack.com/api/", authorization_url="https://slack.com/oauth/authorize", token_url="https://slack.com/api/oauth.access", redirect_url=redirect_url, redirect_to=redirect_to, login_url=login_url, authorized_url=authorized_url, session_class=session_class, storage=storage, ) slack_bp.from_config["client_id"] = "SLACK_OAUTH_CLIENT_ID" slack_bp.from_config["client_secret"] = "SLACK_OAUTH_CLIENT_SECRET" @slack_bp.before_app_request def set_applocal_session(): ctx = stack.top ctx.slack_oauth = slack_bp.session return slack_bp
[ "def", "make_slack_blueprint", "(", "client_id", "=", "None", ",", "client_secret", "=", "None", ",", "scope", "=", "None", ",", "redirect_url", "=", "None", ",", "redirect_to", "=", "None", ",", "login_url", "=", "None", ",", "authorized_url", "=", "None", ",", "session_class", "=", "None", ",", "storage", "=", "None", ",", ")", ":", "scope", "=", "scope", "or", "[", "\"identify\"", ",", "\"chat:write:bot\"", "]", "slack_bp", "=", "SlackBlueprint", "(", "\"slack\"", ",", "__name__", ",", "client_id", "=", "client_id", ",", "client_secret", "=", "client_secret", ",", "scope", "=", "scope", ",", "base_url", "=", "\"https://slack.com/api/\"", ",", "authorization_url", "=", "\"https://slack.com/oauth/authorize\"", ",", "token_url", "=", "\"https://slack.com/api/oauth.access\"", ",", "redirect_url", "=", "redirect_url", ",", "redirect_to", "=", "redirect_to", ",", "login_url", "=", "login_url", ",", "authorized_url", "=", "authorized_url", ",", "session_class", "=", "session_class", ",", "storage", "=", "storage", ",", ")", "slack_bp", ".", "from_config", "[", "\"client_id\"", "]", "=", "\"SLACK_OAUTH_CLIENT_ID\"", "slack_bp", ".", "from_config", "[", "\"client_secret\"", "]", "=", "\"SLACK_OAUTH_CLIENT_SECRET\"", "@", "slack_bp", ".", "before_app_request", "def", "set_applocal_session", "(", ")", ":", "ctx", "=", "stack", ".", "top", "ctx", ".", "slack_oauth", "=", "slack_bp", ".", "session", "return", "slack_bp" ]
Make a blueprint for authenticating with Slack using OAuth 2. This requires a client ID and client secret from Slack. You should either pass them to this constructor, or make sure that your Flask application config defines them, using the variables :envvar:`SLACK_OAUTH_CLIENT_ID` and :envvar:`SLACK_OAUTH_CLIENT_SECRET`. Args: client_id (str): The client ID for your application on Slack. client_secret (str): The client secret for your application on Slack scope (str, optional): comma-separated list of scopes for the OAuth token redirect_url (str): the URL to redirect to after the authentication dance is complete redirect_to (str): if ``redirect_url`` is not defined, the name of the view to redirect to after the authentication dance is complete. The actual URL will be determined by :func:`flask.url_for` login_url (str, optional): the URL path for the ``login`` view. Defaults to ``/slack`` authorized_url (str, optional): the URL path for the ``authorized`` view. Defaults to ``/slack/authorized``. session_class (class, optional): The class to use for creating a Requests session. Defaults to :class:`~flask_dance.consumer.requests.OAuth2Session`. storage: A token storage class, or an instance of a token storage class, to use for this blueprint. Defaults to :class:`~flask_dance.consumer.storage.session.SessionStorage`. :rtype: :class:`~flask_dance.consumer.OAuth2ConsumerBlueprint` :returns: A :ref:`blueprint <flask:blueprints>` to attach to your Flask app.
[ "Make", "a", "blueprint", "for", "authenticating", "with", "Slack", "using", "OAuth", "2", ".", "This", "requires", "a", "client", "ID", "and", "client", "secret", "from", "Slack", ".", "You", "should", "either", "pass", "them", "to", "this", "constructor", "or", "make", "sure", "that", "your", "Flask", "application", "config", "defines", "them", "using", "the", "variables", ":", "envvar", ":", "SLACK_OAUTH_CLIENT_ID", "and", ":", "envvar", ":", "SLACK_OAUTH_CLIENT_SECRET", "." ]
python
train
40.701493
gccxml/pygccxml
pygccxml/declarations/pointer_traits.py
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/declarations/pointer_traits.py#L68-L79
def value_type(type_): """returns reference to `boost::shared_ptr` \ or `std::shared_ptr` value type""" if not smart_pointer_traits.is_smart_pointer(type_): raise TypeError( 'Type "%s" is not an instantiation of \ boost::shared_ptr or std::shared_ptr' % type_.decl_string) try: return internal_type_traits.get_by_name(type_, "element_type") except runtime_errors.declaration_not_found_t: return _search_in_bases(type_)
[ "def", "value_type", "(", "type_", ")", ":", "if", "not", "smart_pointer_traits", ".", "is_smart_pointer", "(", "type_", ")", ":", "raise", "TypeError", "(", "'Type \"%s\" is not an instantiation of \\\n boost::shared_ptr or std::shared_ptr'", "%", "type_", ".", "decl_string", ")", "try", ":", "return", "internal_type_traits", ".", "get_by_name", "(", "type_", ",", "\"element_type\"", ")", "except", "runtime_errors", ".", "declaration_not_found_t", ":", "return", "_search_in_bases", "(", "type_", ")" ]
returns reference to `boost::shared_ptr` \ or `std::shared_ptr` value type
[ "returns", "reference", "to", "boost", "::", "shared_ptr", "\\", "or", "std", "::", "shared_ptr", "value", "type" ]
python
train
44.25
IvanMalison/okcupyd
okcupyd/util/fetchable.py
https://github.com/IvanMalison/okcupyd/blob/46f4eaa9419098f6c299738ce148af55c64deb64/okcupyd/util/fetchable.py#L137-L160
def refresh(self, nice_repr=True, **kwargs): """ :param nice_repr: Append the repr of a list containing the items that have been fetched to this point by the fetcher. :type nice_repr: bool :param kwargs: kwargs that should be passed to the fetcher when its fetch method is called. These are merged with the values provided to the constructor, with the ones provided here taking precedence if there is a conflict. """ for key, value in self._kwargs.items(): kwargs.setdefault(key, value) # No real good reason to hold on to this. DONT TOUCH. self._original_iterable = self._fetcher.fetch(**kwargs) self.exhausted = False if nice_repr: self._accumulated = [] self._original_iterable = self._make_nice_repr_iterator( self._original_iterable, self._accumulated ) else: self._accumulated = None self._clonable, = itertools.tee(self._original_iterable, 1) return self
[ "def", "refresh", "(", "self", ",", "nice_repr", "=", "True", ",", "*", "*", "kwargs", ")", ":", "for", "key", ",", "value", "in", "self", ".", "_kwargs", ".", "items", "(", ")", ":", "kwargs", ".", "setdefault", "(", "key", ",", "value", ")", "# No real good reason to hold on to this. DONT TOUCH.", "self", ".", "_original_iterable", "=", "self", ".", "_fetcher", ".", "fetch", "(", "*", "*", "kwargs", ")", "self", ".", "exhausted", "=", "False", "if", "nice_repr", ":", "self", ".", "_accumulated", "=", "[", "]", "self", ".", "_original_iterable", "=", "self", ".", "_make_nice_repr_iterator", "(", "self", ".", "_original_iterable", ",", "self", ".", "_accumulated", ")", "else", ":", "self", ".", "_accumulated", "=", "None", "self", ".", "_clonable", ",", "=", "itertools", ".", "tee", "(", "self", ".", "_original_iterable", ",", "1", ")", "return", "self" ]
:param nice_repr: Append the repr of a list containing the items that have been fetched to this point by the fetcher. :type nice_repr: bool :param kwargs: kwargs that should be passed to the fetcher when its fetch method is called. These are merged with the values provided to the constructor, with the ones provided here taking precedence if there is a conflict.
[ ":", "param", "nice_repr", ":", "Append", "the", "repr", "of", "a", "list", "containing", "the", "items", "that", "have", "been", "fetched", "to", "this", "point", "by", "the", "fetcher", ".", ":", "type", "nice_repr", ":", "bool", ":", "param", "kwargs", ":", "kwargs", "that", "should", "be", "passed", "to", "the", "fetcher", "when", "its", "fetch", "method", "is", "called", ".", "These", "are", "merged", "with", "the", "values", "provided", "to", "the", "constructor", "with", "the", "ones", "provided", "here", "taking", "precedence", "if", "there", "is", "a", "conflict", "." ]
python
train
46.375
bmcfee/pumpp
pumpp/feature/cqt.py
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/feature/cqt.py#L141-L160
def transform_audio(self, y): '''Compute the CQT with unwrapped phase Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, n_bins) CQT magnitude data['dphase'] : np.ndarray, shape=(n_frames, n_bins) Unwrapped phase differential ''' data = super(CQTPhaseDiff, self).transform_audio(y) data['dphase'] = self.phase_diff(data.pop('phase')) return data
[ "def", "transform_audio", "(", "self", ",", "y", ")", ":", "data", "=", "super", "(", "CQTPhaseDiff", ",", "self", ")", ".", "transform_audio", "(", "y", ")", "data", "[", "'dphase'", "]", "=", "self", ".", "phase_diff", "(", "data", ".", "pop", "(", "'phase'", ")", ")", "return", "data" ]
Compute the CQT with unwrapped phase Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, n_bins) CQT magnitude data['dphase'] : np.ndarray, shape=(n_frames, n_bins) Unwrapped phase differential
[ "Compute", "the", "CQT", "with", "unwrapped", "phase" ]
python
train
27.95
etal/biofrills
biofrills/alnutils.py
https://github.com/etal/biofrills/blob/36684bb6c7632f96215e8b2b4ebc86640f331bcd/biofrills/alnutils.py#L86-L92
def col_counts(col, weights=None, gap_chars='-.'): """Absolute counts of each residue type in a single column.""" cnt = defaultdict(float) for aa, wt in zip(col, weights): if aa not in gap_chars: cnt[aa] += wt return cnt
[ "def", "col_counts", "(", "col", ",", "weights", "=", "None", ",", "gap_chars", "=", "'-.'", ")", ":", "cnt", "=", "defaultdict", "(", "float", ")", "for", "aa", ",", "wt", "in", "zip", "(", "col", ",", "weights", ")", ":", "if", "aa", "not", "in", "gap_chars", ":", "cnt", "[", "aa", "]", "+=", "wt", "return", "cnt" ]
Absolute counts of each residue type in a single column.
[ "Absolute", "counts", "of", "each", "residue", "type", "in", "a", "single", "column", "." ]
python
train
35.714286
beetbox/audioread
audioread/__init__.py
https://github.com/beetbox/audioread/blob/c8bedf7880f13a7b7488b108aaf245d648674818/audioread/__init__.py#L92-L116
def audio_open(path, backends=None): """Open an audio file using a library that is available on this system. The optional `backends` parameter can be a list of audio file classes to try opening the file with. If it is not provided, `audio_open` tries all available backends. If you call this function many times, you can avoid the cost of checking for available backends every time by calling `available_backends` once and passing the result to each `audio_open` call. If all backends fail to read the file, a NoBackendError exception is raised. """ if backends is None: backends = available_backends() for BackendClass in backends: try: return BackendClass(path) except DecodeError: pass # All backends failed! raise NoBackendError()
[ "def", "audio_open", "(", "path", ",", "backends", "=", "None", ")", ":", "if", "backends", "is", "None", ":", "backends", "=", "available_backends", "(", ")", "for", "BackendClass", "in", "backends", ":", "try", ":", "return", "BackendClass", "(", "path", ")", "except", "DecodeError", ":", "pass", "# All backends failed!", "raise", "NoBackendError", "(", ")" ]
Open an audio file using a library that is available on this system. The optional `backends` parameter can be a list of audio file classes to try opening the file with. If it is not provided, `audio_open` tries all available backends. If you call this function many times, you can avoid the cost of checking for available backends every time by calling `available_backends` once and passing the result to each `audio_open` call. If all backends fail to read the file, a NoBackendError exception is raised.
[ "Open", "an", "audio", "file", "using", "a", "library", "that", "is", "available", "on", "this", "system", "." ]
python
train
32.88
visualfabriq/bquery
bquery/ctable.py
https://github.com/visualfabriq/bquery/blob/3702e974696e22876944a3339affad2f29e1ee06/bquery/ctable.py#L742-L819
def where_terms_factorization_check(self, term_list): """ check for where terms if they are applicable Create a boolean array where `term_list` is true. A terms list has a [(col, operator, value), ..] construction. Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])] :param term_list: :param outcols: :param limit: :param skip: :return: :raise ValueError: """ if type(term_list) not in [list, set, tuple]: raise ValueError("Only term lists are supported") valid = True for term in term_list: # get terms filter_col = term[0] filter_operator = term[1].lower().strip(' ') filter_value = term[2] # check values if filter_col not in self.cols: raise KeyError(unicode(filter_col) + ' not in table') col_values_rootdir = os.path.join(self.rootdir, filter_col + '.values') if not os.path.exists(col_values_rootdir): # no factorization available break col_carray = bcolz.carray(rootdir=col_values_rootdir, mode='r') col_values = set(col_carray) if filter_operator in ['in', 'not in', 'nin']: if type(filter_value) not in [list, set, tuple]: raise ValueError("In selections need lists, sets or tuples") if len(filter_value) < 1: raise ValueError("A value list needs to have values") # optimize lists of 1 value if len(filter_value) == 1: filter_value = filter_value[0] if filter_operator == 'in': filter_operator = '==' else: filter_operator = '!=' else: filter_value = set(filter_value) if filter_operator in ['==', 'eq']: valid = filter_value in col_values elif filter_operator in ['!=', 'neq']: valid = any(val for val in col_values if val != filter_value) elif filter_operator in ['in']: valid = any(val for val in filter_value if val in col_values) elif filter_operator in ['nin', 'not in']: valid = any(val for val in col_values if val not in filter_value) elif filter_operator in ['>']: valid = any(val for val in col_values if val > filter_value) elif filter_operator in ['>=']: valid = any(val for val in col_values if val >= filter_value) elif filter_operator in ['<']: valid = any(val for val in col_values if val < filter_value) elif filter_operator in ['<=']: valid = any(val for val in col_values if val >= filter_value) else: raise KeyError(str(filter_operator) + ' is not an accepted operator for filtering') # if one of the filters is blocking, we can stop if not valid: break return valid
[ "def", "where_terms_factorization_check", "(", "self", ",", "term_list", ")", ":", "if", "type", "(", "term_list", ")", "not", "in", "[", "list", ",", "set", ",", "tuple", "]", ":", "raise", "ValueError", "(", "\"Only term lists are supported\"", ")", "valid", "=", "True", "for", "term", "in", "term_list", ":", "# get terms", "filter_col", "=", "term", "[", "0", "]", "filter_operator", "=", "term", "[", "1", "]", ".", "lower", "(", ")", ".", "strip", "(", "' '", ")", "filter_value", "=", "term", "[", "2", "]", "# check values", "if", "filter_col", "not", "in", "self", ".", "cols", ":", "raise", "KeyError", "(", "unicode", "(", "filter_col", ")", "+", "' not in table'", ")", "col_values_rootdir", "=", "os", ".", "path", ".", "join", "(", "self", ".", "rootdir", ",", "filter_col", "+", "'.values'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "col_values_rootdir", ")", ":", "# no factorization available", "break", "col_carray", "=", "bcolz", ".", "carray", "(", "rootdir", "=", "col_values_rootdir", ",", "mode", "=", "'r'", ")", "col_values", "=", "set", "(", "col_carray", ")", "if", "filter_operator", "in", "[", "'in'", ",", "'not in'", ",", "'nin'", "]", ":", "if", "type", "(", "filter_value", ")", "not", "in", "[", "list", ",", "set", ",", "tuple", "]", ":", "raise", "ValueError", "(", "\"In selections need lists, sets or tuples\"", ")", "if", "len", "(", "filter_value", ")", "<", "1", ":", "raise", "ValueError", "(", "\"A value list needs to have values\"", ")", "# optimize lists of 1 value", "if", "len", "(", "filter_value", ")", "==", "1", ":", "filter_value", "=", "filter_value", "[", "0", "]", "if", "filter_operator", "==", "'in'", ":", "filter_operator", "=", "'=='", "else", ":", "filter_operator", "=", "'!='", "else", ":", "filter_value", "=", "set", "(", "filter_value", ")", "if", "filter_operator", "in", "[", "'=='", ",", "'eq'", "]", ":", "valid", "=", "filter_value", "in", "col_values", "elif", "filter_operator", "in", "[", "'!='", ",", "'neq'", "]", ":", "valid", "=", "any", "(", "val", "for", "val", "in", "col_values", "if", "val", "!=", "filter_value", ")", "elif", "filter_operator", "in", "[", "'in'", "]", ":", "valid", "=", "any", "(", "val", "for", "val", "in", "filter_value", "if", "val", "in", "col_values", ")", "elif", "filter_operator", "in", "[", "'nin'", ",", "'not in'", "]", ":", "valid", "=", "any", "(", "val", "for", "val", "in", "col_values", "if", "val", "not", "in", "filter_value", ")", "elif", "filter_operator", "in", "[", "'>'", "]", ":", "valid", "=", "any", "(", "val", "for", "val", "in", "col_values", "if", "val", ">", "filter_value", ")", "elif", "filter_operator", "in", "[", "'>='", "]", ":", "valid", "=", "any", "(", "val", "for", "val", "in", "col_values", "if", "val", ">=", "filter_value", ")", "elif", "filter_operator", "in", "[", "'<'", "]", ":", "valid", "=", "any", "(", "val", "for", "val", "in", "col_values", "if", "val", "<", "filter_value", ")", "elif", "filter_operator", "in", "[", "'<='", "]", ":", "valid", "=", "any", "(", "val", "for", "val", "in", "col_values", "if", "val", ">=", "filter_value", ")", "else", ":", "raise", "KeyError", "(", "str", "(", "filter_operator", ")", "+", "' is not an accepted operator for filtering'", ")", "# if one of the filters is blocking, we can stop", "if", "not", "valid", ":", "break", "return", "valid" ]
check for where terms if they are applicable Create a boolean array where `term_list` is true. A terms list has a [(col, operator, value), ..] construction. Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])] :param term_list: :param outcols: :param limit: :param skip: :return: :raise ValueError:
[ "check", "for", "where", "terms", "if", "they", "are", "applicable", "Create", "a", "boolean", "array", "where", "term_list", "is", "true", ".", "A", "terms", "list", "has", "a", "[", "(", "col", "operator", "value", ")", "..", "]", "construction", ".", "Eg", ".", "[", "(", "sales", ">", "2", ")", "(", "state", "in", "[", "IL", "AR", "]", ")", "]" ]
python
train
39.589744
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_map/mp_slipmap_ui.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_map/mp_slipmap_ui.py#L324-L360
def update_position(self): '''update position text''' state = self.state pos = self.mouse_pos newtext = '' alt = 0 if pos is not None: (lat,lon) = self.coordinates(pos.x, pos.y) newtext += 'Cursor: %f %f (%s)' % (lat, lon, mp_util.latlon_to_grid((lat, lon))) if state.elevation: alt = self.ElevationMap.GetElevation(lat, lon) if alt is not None: newtext += ' %.1fm' % alt state.mt.set_download(state.download) pending = 0 if state.download: pending = state.mt.tiles_pending() if pending: newtext += ' Map Downloading %u ' % pending if alt == -1: newtext += ' SRTM Downloading ' newtext += '\n' if self.click_pos is not None: newtext += 'Click: %f %f (%s %s) (%s)' % (self.click_pos[0], self.click_pos[1], mp_util.degrees_to_dms(self.click_pos[0]), mp_util.degrees_to_dms(self.click_pos[1]), mp_util.latlon_to_grid(self.click_pos)) if self.last_click_pos is not None: distance = mp_util.gps_distance(self.last_click_pos[0], self.last_click_pos[1], self.click_pos[0], self.click_pos[1]) bearing = mp_util.gps_bearing(self.last_click_pos[0], self.last_click_pos[1], self.click_pos[0], self.click_pos[1]) newtext += ' Distance: %.1fm Bearing %.1f' % (distance, bearing) if newtext != state.oldtext: self.position.Clear() self.position.WriteText(newtext) state.oldtext = newtext
[ "def", "update_position", "(", "self", ")", ":", "state", "=", "self", ".", "state", "pos", "=", "self", ".", "mouse_pos", "newtext", "=", "''", "alt", "=", "0", "if", "pos", "is", "not", "None", ":", "(", "lat", ",", "lon", ")", "=", "self", ".", "coordinates", "(", "pos", ".", "x", ",", "pos", ".", "y", ")", "newtext", "+=", "'Cursor: %f %f (%s)'", "%", "(", "lat", ",", "lon", ",", "mp_util", ".", "latlon_to_grid", "(", "(", "lat", ",", "lon", ")", ")", ")", "if", "state", ".", "elevation", ":", "alt", "=", "self", ".", "ElevationMap", ".", "GetElevation", "(", "lat", ",", "lon", ")", "if", "alt", "is", "not", "None", ":", "newtext", "+=", "' %.1fm'", "%", "alt", "state", ".", "mt", ".", "set_download", "(", "state", ".", "download", ")", "pending", "=", "0", "if", "state", ".", "download", ":", "pending", "=", "state", ".", "mt", ".", "tiles_pending", "(", ")", "if", "pending", ":", "newtext", "+=", "' Map Downloading %u '", "%", "pending", "if", "alt", "==", "-", "1", ":", "newtext", "+=", "' SRTM Downloading '", "newtext", "+=", "'\\n'", "if", "self", ".", "click_pos", "is", "not", "None", ":", "newtext", "+=", "'Click: %f %f (%s %s) (%s)'", "%", "(", "self", ".", "click_pos", "[", "0", "]", ",", "self", ".", "click_pos", "[", "1", "]", ",", "mp_util", ".", "degrees_to_dms", "(", "self", ".", "click_pos", "[", "0", "]", ")", ",", "mp_util", ".", "degrees_to_dms", "(", "self", ".", "click_pos", "[", "1", "]", ")", ",", "mp_util", ".", "latlon_to_grid", "(", "self", ".", "click_pos", ")", ")", "if", "self", ".", "last_click_pos", "is", "not", "None", ":", "distance", "=", "mp_util", ".", "gps_distance", "(", "self", ".", "last_click_pos", "[", "0", "]", ",", "self", ".", "last_click_pos", "[", "1", "]", ",", "self", ".", "click_pos", "[", "0", "]", ",", "self", ".", "click_pos", "[", "1", "]", ")", "bearing", "=", "mp_util", ".", "gps_bearing", "(", "self", ".", "last_click_pos", "[", "0", "]", ",", "self", ".", "last_click_pos", "[", "1", "]", ",", "self", ".", "click_pos", "[", "0", "]", ",", "self", ".", "click_pos", "[", "1", "]", ")", "newtext", "+=", "' Distance: %.1fm Bearing %.1f'", "%", "(", "distance", ",", "bearing", ")", "if", "newtext", "!=", "state", ".", "oldtext", ":", "self", ".", "position", ".", "Clear", "(", ")", "self", ".", "position", ".", "WriteText", "(", "newtext", ")", "state", ".", "oldtext", "=", "newtext" ]
update position text
[ "update", "position", "text" ]
python
train
49.081081
SciTools/biggus
biggus/_init.py
https://github.com/SciTools/biggus/blob/0a76fbe7806dd6295081cd399bcb76135d834d25/biggus/_init.py#L3198-L3232
def _sliced_shape(shape, keys): """ Returns the shape that results from slicing an array of the given shape by the given keys. >>> _sliced_shape(shape=(52350, 70, 90, 180), ... keys=(np.newaxis, slice(None, 10), 3, ... slice(None), slice(2, 3))) (1, 10, 90, 1) """ keys = _full_keys(keys, len(shape)) sliced_shape = [] shape_dim = -1 for key in keys: shape_dim += 1 if _is_scalar(key): continue elif isinstance(key, slice): size = len(range(*key.indices(shape[shape_dim]))) sliced_shape.append(size) elif isinstance(key, np.ndarray) and key.dtype == np.dtype('bool'): # Numpy boolean indexing. sliced_shape.append(builtins.sum(key)) elif isinstance(key, (tuple, np.ndarray)): sliced_shape.append(len(key)) elif key is np.newaxis: shape_dim -= 1 sliced_shape.append(1) else: raise ValueError('Invalid indexing object "{}"'.format(key)) sliced_shape = tuple(sliced_shape) return sliced_shape
[ "def", "_sliced_shape", "(", "shape", ",", "keys", ")", ":", "keys", "=", "_full_keys", "(", "keys", ",", "len", "(", "shape", ")", ")", "sliced_shape", "=", "[", "]", "shape_dim", "=", "-", "1", "for", "key", "in", "keys", ":", "shape_dim", "+=", "1", "if", "_is_scalar", "(", "key", ")", ":", "continue", "elif", "isinstance", "(", "key", ",", "slice", ")", ":", "size", "=", "len", "(", "range", "(", "*", "key", ".", "indices", "(", "shape", "[", "shape_dim", "]", ")", ")", ")", "sliced_shape", ".", "append", "(", "size", ")", "elif", "isinstance", "(", "key", ",", "np", ".", "ndarray", ")", "and", "key", ".", "dtype", "==", "np", ".", "dtype", "(", "'bool'", ")", ":", "# Numpy boolean indexing.", "sliced_shape", ".", "append", "(", "builtins", ".", "sum", "(", "key", ")", ")", "elif", "isinstance", "(", "key", ",", "(", "tuple", ",", "np", ".", "ndarray", ")", ")", ":", "sliced_shape", ".", "append", "(", "len", "(", "key", ")", ")", "elif", "key", "is", "np", ".", "newaxis", ":", "shape_dim", "-=", "1", "sliced_shape", ".", "append", "(", "1", ")", "else", ":", "raise", "ValueError", "(", "'Invalid indexing object \"{}\"'", ".", "format", "(", "key", ")", ")", "sliced_shape", "=", "tuple", "(", "sliced_shape", ")", "return", "sliced_shape" ]
Returns the shape that results from slicing an array of the given shape by the given keys. >>> _sliced_shape(shape=(52350, 70, 90, 180), ... keys=(np.newaxis, slice(None, 10), 3, ... slice(None), slice(2, 3))) (1, 10, 90, 1)
[ "Returns", "the", "shape", "that", "results", "from", "slicing", "an", "array", "of", "the", "given", "shape", "by", "the", "given", "keys", "." ]
python
train
31.8
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/frontend/qt/console/frontend_widget.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/qt/console/frontend_widget.py#L237-L250
def _tab_pressed(self): """ Called when the tab key is pressed. Returns whether to continue processing the event. """ # Perform tab completion if: # 1) The cursor is in the input buffer. # 2) There is a non-whitespace character before the cursor. text = self._get_input_buffer_cursor_line() if text is None: return False complete = bool(text[:self._get_input_buffer_cursor_column()].strip()) if complete: self._complete() return not complete
[ "def", "_tab_pressed", "(", "self", ")", ":", "# Perform tab completion if:", "# 1) The cursor is in the input buffer.", "# 2) There is a non-whitespace character before the cursor.", "text", "=", "self", ".", "_get_input_buffer_cursor_line", "(", ")", "if", "text", "is", "None", ":", "return", "False", "complete", "=", "bool", "(", "text", "[", ":", "self", ".", "_get_input_buffer_cursor_column", "(", ")", "]", ".", "strip", "(", ")", ")", "if", "complete", ":", "self", ".", "_complete", "(", ")", "return", "not", "complete" ]
Called when the tab key is pressed. Returns whether to continue processing the event.
[ "Called", "when", "the", "tab", "key", "is", "pressed", ".", "Returns", "whether", "to", "continue", "processing", "the", "event", "." ]
python
test
38.857143
sibirrer/lenstronomy
lenstronomy/Util/image_util.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/Util/image_util.py#L165-L182
def coordInImage(x_coord, y_coord, numPix, deltapix): """ checks whether image positions are within the pixel image in units of arcsec if not: remove it :param imcoord: image coordinate (in units of angels) [[x,y,delta,magnification][...]] :type imcoord: (n,4) numpy array :returns: image positions within the pixel image """ idex=[] min = -deltapix*numPix/2 max = deltapix*numPix/2 for i in range(len(x_coord)): #sum over image positions if (x_coord[i] < min or x_coord[i] > max or y_coord[i] < min or y_coord[i] > max): idex.append(i) x_coord = np.delete(x_coord, idex, axis=0) y_coord = np.delete(y_coord, idex, axis=0) return x_coord, y_coord
[ "def", "coordInImage", "(", "x_coord", ",", "y_coord", ",", "numPix", ",", "deltapix", ")", ":", "idex", "=", "[", "]", "min", "=", "-", "deltapix", "*", "numPix", "/", "2", "max", "=", "deltapix", "*", "numPix", "/", "2", "for", "i", "in", "range", "(", "len", "(", "x_coord", ")", ")", ":", "#sum over image positions", "if", "(", "x_coord", "[", "i", "]", "<", "min", "or", "x_coord", "[", "i", "]", ">", "max", "or", "y_coord", "[", "i", "]", "<", "min", "or", "y_coord", "[", "i", "]", ">", "max", ")", ":", "idex", ".", "append", "(", "i", ")", "x_coord", "=", "np", ".", "delete", "(", "x_coord", ",", "idex", ",", "axis", "=", "0", ")", "y_coord", "=", "np", ".", "delete", "(", "y_coord", ",", "idex", ",", "axis", "=", "0", ")", "return", "x_coord", ",", "y_coord" ]
checks whether image positions are within the pixel image in units of arcsec if not: remove it :param imcoord: image coordinate (in units of angels) [[x,y,delta,magnification][...]] :type imcoord: (n,4) numpy array :returns: image positions within the pixel image
[ "checks", "whether", "image", "positions", "are", "within", "the", "pixel", "image", "in", "units", "of", "arcsec", "if", "not", ":", "remove", "it" ]
python
train
39.277778
tensorflow/tensorboard
tensorboard/plugins/graph/keras_util.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/graph/keras_util.py#L114-L177
def _update_dicts(name_scope, model_layer, input_to_in_layer, model_name_to_output, prev_node_name): """Updates input_to_in_layer, model_name_to_output, and prev_node_name based on the model_layer. Args: name_scope: a string representing a scope name, similar to that of tf.name_scope. model_layer: a dict representing a Keras model configuration. input_to_in_layer: a dict mapping Keras.layers.Input to inbound layer. model_name_to_output: a dict mapping Keras Model name to output layer of the model. prev_node_name: a string representing a previous, in sequential model layout, node name. Returns: A tuple of (input_to_in_layer, model_name_to_output, prev_node_name). input_to_in_layer: a dict mapping Keras.layers.Input to inbound layer. model_name_to_output: a dict mapping Keras Model name to output layer of the model. prev_node_name: a string representing a previous, in sequential model layout, node name. """ layer_config = model_layer.get('config') if not layer_config.get('layers'): raise ValueError('layer is not a model.') node_name = _scoped_name(name_scope, layer_config.get('name')) input_layers = layer_config.get('input_layers') output_layers = layer_config.get('output_layers') inbound_nodes = model_layer.get('inbound_nodes') is_functional_model = bool(input_layers and output_layers) # In case of [1] and the parent model is functional, current layer # will have the 'inbound_nodes' property. is_parent_functional_model = bool(inbound_nodes) if is_parent_functional_model and is_functional_model: for (input_layer, inbound_node) in zip(input_layers, inbound_nodes): input_layer_name = _scoped_name(node_name, input_layer) inbound_node_name = _scoped_name(name_scope, inbound_node[0]) input_to_in_layer[input_layer_name] = inbound_node_name elif is_parent_functional_model and not is_functional_model: # Sequential model can take only one input. Make sure inbound to the # model is linked to the first layer in the Sequential model. prev_node_name = _scoped_name(name_scope, inbound_nodes[0][0][0]) elif not is_parent_functional_model and prev_node_name and is_functional_model: assert len(input_layers) == 1, ( 'Cannot have multi-input Functional model when parent model ' 'is not Functional. Number of input layers: %d' % len(input_layer)) input_layer = input_layers[0] input_layer_name = _scoped_name(node_name, input_layer) input_to_in_layer[input_layer_name] = prev_node_name if is_functional_model and output_layers: layers = _norm_to_list_of_layers(output_layers) layer_names = [_scoped_name(node_name, layer[0]) for layer in layers] model_name_to_output[node_name] = layer_names else: last_layer = layer_config.get('layers')[-1] last_layer_name = last_layer.get('config').get('name') output_node = _scoped_name(node_name, last_layer_name) model_name_to_output[node_name] = [output_node] return (input_to_in_layer, model_name_to_output, prev_node_name)
[ "def", "_update_dicts", "(", "name_scope", ",", "model_layer", ",", "input_to_in_layer", ",", "model_name_to_output", ",", "prev_node_name", ")", ":", "layer_config", "=", "model_layer", ".", "get", "(", "'config'", ")", "if", "not", "layer_config", ".", "get", "(", "'layers'", ")", ":", "raise", "ValueError", "(", "'layer is not a model.'", ")", "node_name", "=", "_scoped_name", "(", "name_scope", ",", "layer_config", ".", "get", "(", "'name'", ")", ")", "input_layers", "=", "layer_config", ".", "get", "(", "'input_layers'", ")", "output_layers", "=", "layer_config", ".", "get", "(", "'output_layers'", ")", "inbound_nodes", "=", "model_layer", ".", "get", "(", "'inbound_nodes'", ")", "is_functional_model", "=", "bool", "(", "input_layers", "and", "output_layers", ")", "# In case of [1] and the parent model is functional, current layer", "# will have the 'inbound_nodes' property.", "is_parent_functional_model", "=", "bool", "(", "inbound_nodes", ")", "if", "is_parent_functional_model", "and", "is_functional_model", ":", "for", "(", "input_layer", ",", "inbound_node", ")", "in", "zip", "(", "input_layers", ",", "inbound_nodes", ")", ":", "input_layer_name", "=", "_scoped_name", "(", "node_name", ",", "input_layer", ")", "inbound_node_name", "=", "_scoped_name", "(", "name_scope", ",", "inbound_node", "[", "0", "]", ")", "input_to_in_layer", "[", "input_layer_name", "]", "=", "inbound_node_name", "elif", "is_parent_functional_model", "and", "not", "is_functional_model", ":", "# Sequential model can take only one input. Make sure inbound to the", "# model is linked to the first layer in the Sequential model.", "prev_node_name", "=", "_scoped_name", "(", "name_scope", ",", "inbound_nodes", "[", "0", "]", "[", "0", "]", "[", "0", "]", ")", "elif", "not", "is_parent_functional_model", "and", "prev_node_name", "and", "is_functional_model", ":", "assert", "len", "(", "input_layers", ")", "==", "1", ",", "(", "'Cannot have multi-input Functional model when parent model '", "'is not Functional. Number of input layers: %d'", "%", "len", "(", "input_layer", ")", ")", "input_layer", "=", "input_layers", "[", "0", "]", "input_layer_name", "=", "_scoped_name", "(", "node_name", ",", "input_layer", ")", "input_to_in_layer", "[", "input_layer_name", "]", "=", "prev_node_name", "if", "is_functional_model", "and", "output_layers", ":", "layers", "=", "_norm_to_list_of_layers", "(", "output_layers", ")", "layer_names", "=", "[", "_scoped_name", "(", "node_name", ",", "layer", "[", "0", "]", ")", "for", "layer", "in", "layers", "]", "model_name_to_output", "[", "node_name", "]", "=", "layer_names", "else", ":", "last_layer", "=", "layer_config", ".", "get", "(", "'layers'", ")", "[", "-", "1", "]", "last_layer_name", "=", "last_layer", ".", "get", "(", "'config'", ")", ".", "get", "(", "'name'", ")", "output_node", "=", "_scoped_name", "(", "node_name", ",", "last_layer_name", ")", "model_name_to_output", "[", "node_name", "]", "=", "[", "output_node", "]", "return", "(", "input_to_in_layer", ",", "model_name_to_output", ",", "prev_node_name", ")" ]
Updates input_to_in_layer, model_name_to_output, and prev_node_name based on the model_layer. Args: name_scope: a string representing a scope name, similar to that of tf.name_scope. model_layer: a dict representing a Keras model configuration. input_to_in_layer: a dict mapping Keras.layers.Input to inbound layer. model_name_to_output: a dict mapping Keras Model name to output layer of the model. prev_node_name: a string representing a previous, in sequential model layout, node name. Returns: A tuple of (input_to_in_layer, model_name_to_output, prev_node_name). input_to_in_layer: a dict mapping Keras.layers.Input to inbound layer. model_name_to_output: a dict mapping Keras Model name to output layer of the model. prev_node_name: a string representing a previous, in sequential model layout, node name.
[ "Updates", "input_to_in_layer", "model_name_to_output", "and", "prev_node_name", "based", "on", "the", "model_layer", "." ]
python
train
48.609375
bwhite/hadoopy
hadoopy/thirdparty/pyinstaller/PyInstaller/utils/winresource.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/utils/winresource.py#L69-L78
def update_resources(self, data, type_, names=None, languages=None): """ Update or add resource data. type_ = resource type to update names = a list of resource names to update (None = all) languages = a list of resource languages to update (None = all) """ UpdateResources(self.filename, data, type_, names, languages)
[ "def", "update_resources", "(", "self", ",", "data", ",", "type_", ",", "names", "=", "None", ",", "languages", "=", "None", ")", ":", "UpdateResources", "(", "self", ".", "filename", ",", "data", ",", "type_", ",", "names", ",", "languages", ")" ]
Update or add resource data. type_ = resource type to update names = a list of resource names to update (None = all) languages = a list of resource languages to update (None = all)
[ "Update", "or", "add", "resource", "data", ".", "type_", "=", "resource", "type", "to", "update", "names", "=", "a", "list", "of", "resource", "names", "to", "update", "(", "None", "=", "all", ")", "languages", "=", "a", "list", "of", "resource", "languages", "to", "update", "(", "None", "=", "all", ")" ]
python
train
38.4
aws/sagemaker-python-sdk
src/sagemaker/session.py
https://github.com/aws/sagemaker-python-sdk/blob/a9e724c7d3f5572b68c3903548c792a59d99799a/src/sagemaker/session.py#L692-L710
def wait_for_model_package(self, model_package_name, poll=5): """Wait for an Amazon SageMaker endpoint deployment to complete. Args: endpoint (str): Name of the ``Endpoint`` to wait for. poll (int): Polling interval in seconds (default: 5). Returns: dict: Return value from the ``DescribeEndpoint`` API. """ desc = _wait_until(lambda: _create_model_package_status(self.sagemaker_client, model_package_name), poll) status = desc['ModelPackageStatus'] if status != 'Completed': reason = desc.get('FailureReason', None) raise ValueError('Error creating model package {}: {} Reason: {}'.format( model_package_name, status, reason)) return desc
[ "def", "wait_for_model_package", "(", "self", ",", "model_package_name", ",", "poll", "=", "5", ")", ":", "desc", "=", "_wait_until", "(", "lambda", ":", "_create_model_package_status", "(", "self", ".", "sagemaker_client", ",", "model_package_name", ")", ",", "poll", ")", "status", "=", "desc", "[", "'ModelPackageStatus'", "]", "if", "status", "!=", "'Completed'", ":", "reason", "=", "desc", ".", "get", "(", "'FailureReason'", ",", "None", ")", "raise", "ValueError", "(", "'Error creating model package {}: {} Reason: {}'", ".", "format", "(", "model_package_name", ",", "status", ",", "reason", ")", ")", "return", "desc" ]
Wait for an Amazon SageMaker endpoint deployment to complete. Args: endpoint (str): Name of the ``Endpoint`` to wait for. poll (int): Polling interval in seconds (default: 5). Returns: dict: Return value from the ``DescribeEndpoint`` API.
[ "Wait", "for", "an", "Amazon", "SageMaker", "endpoint", "deployment", "to", "complete", "." ]
python
train
41.631579
aio-libs/aiosparql
aiosparql/escape.py
https://github.com/aio-libs/aiosparql/blob/9aaf313fe71908c2cc4d7a7ab3b5af2dc9e0c99d/aiosparql/escape.py#L11-L35
def escape_any(value): """ Section 4.1.2 defines SPARQL shortened forms https://www.w3.org/TR/2013/REC-sparql11-query-20130321/#QSynLiterals Examples of literal syntax in SPARQL include: "chat" 'chat'@fr with language tag "fr" "xyz"^^<http://example.org/ns/userDatatype> "abc"^^appNS:appDataType '''The librarian said, "Perhaps you would enjoy 'War and Peace'."''' 1, which is the same as "1"^^xsd:integer 1.3, which is the same as "1.3"^^xsd:decimal 1.300, which is the same as "1.300"^^xsd:decimal 1.0e6, which is the same as "1.0e6"^^xsd:double true, which is the same as "true"^^xsd:boolean false, which is the same as "false"^^xsd:boolean """ if isinstance(value, type): raise TypeError("object %r is not an instance" % value) for type_, escape_method in escapers: if isinstance(value, type_): return escape_method(value) return escape_string(str(value))
[ "def", "escape_any", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "type", ")", ":", "raise", "TypeError", "(", "\"object %r is not an instance\"", "%", "value", ")", "for", "type_", ",", "escape_method", "in", "escapers", ":", "if", "isinstance", "(", "value", ",", "type_", ")", ":", "return", "escape_method", "(", "value", ")", "return", "escape_string", "(", "str", "(", "value", ")", ")" ]
Section 4.1.2 defines SPARQL shortened forms https://www.w3.org/TR/2013/REC-sparql11-query-20130321/#QSynLiterals Examples of literal syntax in SPARQL include: "chat" 'chat'@fr with language tag "fr" "xyz"^^<http://example.org/ns/userDatatype> "abc"^^appNS:appDataType '''The librarian said, "Perhaps you would enjoy 'War and Peace'."''' 1, which is the same as "1"^^xsd:integer 1.3, which is the same as "1.3"^^xsd:decimal 1.300, which is the same as "1.300"^^xsd:decimal 1.0e6, which is the same as "1.0e6"^^xsd:double true, which is the same as "true"^^xsd:boolean false, which is the same as "false"^^xsd:boolean
[ "Section", "4", ".", "1", ".", "2", "defines", "SPARQL", "shortened", "forms", "https", ":", "//", "www", ".", "w3", ".", "org", "/", "TR", "/", "2013", "/", "REC", "-", "sparql11", "-", "query", "-", "20130321", "/", "#QSynLiterals" ]
python
train
39.4
DLR-RM/RAFCON
source/rafcon/core/library_manager.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/library_manager.py#L128-L143
def _clean_path(path): """Create a fully fissile absolute system path with no symbolic links and environment variables""" path = path.replace('"', '') path = path.replace("'", '') # Replace ~ with /home/user path = os.path.expanduser(path) # Replace environment variables path = os.path.expandvars(path) # If the path is relative, assume it is relative to the config file directory if not os.path.isabs(path): path = os.path.join(config.global_config.path, path) # Clean path, e.g. replace /./ with / path = os.path.abspath(path) # Eliminate symbolic links path = os.path.realpath(path) return path
[ "def", "_clean_path", "(", "path", ")", ":", "path", "=", "path", ".", "replace", "(", "'\"'", ",", "''", ")", "path", "=", "path", ".", "replace", "(", "\"'\"", ",", "''", ")", "# Replace ~ with /home/user", "path", "=", "os", ".", "path", ".", "expanduser", "(", "path", ")", "# Replace environment variables", "path", "=", "os", ".", "path", ".", "expandvars", "(", "path", ")", "# If the path is relative, assume it is relative to the config file directory", "if", "not", "os", ".", "path", ".", "isabs", "(", "path", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "config", ".", "global_config", ".", "path", ",", "path", ")", "# Clean path, e.g. replace /./ with /", "path", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "# Eliminate symbolic links", "path", "=", "os", ".", "path", ".", "realpath", "(", "path", ")", "return", "path" ]
Create a fully fissile absolute system path with no symbolic links and environment variables
[ "Create", "a", "fully", "fissile", "absolute", "system", "path", "with", "no", "symbolic", "links", "and", "environment", "variables" ]
python
train
44.1875
pkgw/pwkit
pwkit/astutil.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/astutil.py#L849-L908
def get_simbad_astrometry_info (ident, items=_simbaditems, debug=False): """Fetch astrometric information from the Simbad web service. Given the name of a source as known to the CDS Simbad service, this function looks up its positional information and returns it in a dictionary. In most cases you should use an :class:`AstrometryInfo` object and its :meth:`~AstrometryInfo.fill_from_simbad` method instead of this function. Arguments: ident The Simbad name of the source to look up. items An iterable of data items to look up. The default fetches position, proper motion, parallax, and radial velocity information. Each item name resembles the string ``COO(d;A)`` or ``PLX(E)``. The allowed formats are defined `on this CDS page <http://simbad.u-strasbg.fr/Pages/guide/sim-fscript.htx>`_. debug If true, the response from the webserver will be printed. The return value is a dictionary with a key corresponding to the textual result returned for each requested item. """ import codecs try: from urllib.parse import quote except ImportError: from urllib import quote try: from urllib.request import urlopen except ImportError: from urllib2 import urlopen s = '\\n'.join ('%s %%%s' % (i, i) for i in items) s = '''output console=off script=off format object "%s" query id %s''' % (s, ident) url = _simbadbase + quote (s) results = {} errtext = None for line in codecs.getreader('utf-8')(urlopen (url)): line = line.strip () if debug: print_ ('D: SA >>', line) if errtext is not None: errtext += line elif line.startswith ('::error'): errtext = '' elif len (line): k, v = line.split (' ', 1) results[k] = v if errtext is not None: raise Exception ('SIMBAD query error: ' + errtext) return results
[ "def", "get_simbad_astrometry_info", "(", "ident", ",", "items", "=", "_simbaditems", ",", "debug", "=", "False", ")", ":", "import", "codecs", "try", ":", "from", "urllib", ".", "parse", "import", "quote", "except", "ImportError", ":", "from", "urllib", "import", "quote", "try", ":", "from", "urllib", ".", "request", "import", "urlopen", "except", "ImportError", ":", "from", "urllib2", "import", "urlopen", "s", "=", "'\\\\n'", ".", "join", "(", "'%s %%%s'", "%", "(", "i", ",", "i", ")", "for", "i", "in", "items", ")", "s", "=", "'''output console=off script=off\nformat object \"%s\"\nquery id %s'''", "%", "(", "s", ",", "ident", ")", "url", "=", "_simbadbase", "+", "quote", "(", "s", ")", "results", "=", "{", "}", "errtext", "=", "None", "for", "line", "in", "codecs", ".", "getreader", "(", "'utf-8'", ")", "(", "urlopen", "(", "url", ")", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "debug", ":", "print_", "(", "'D: SA >>'", ",", "line", ")", "if", "errtext", "is", "not", "None", ":", "errtext", "+=", "line", "elif", "line", ".", "startswith", "(", "'::error'", ")", ":", "errtext", "=", "''", "elif", "len", "(", "line", ")", ":", "k", ",", "v", "=", "line", ".", "split", "(", "' '", ",", "1", ")", "results", "[", "k", "]", "=", "v", "if", "errtext", "is", "not", "None", ":", "raise", "Exception", "(", "'SIMBAD query error: '", "+", "errtext", ")", "return", "results" ]
Fetch astrometric information from the Simbad web service. Given the name of a source as known to the CDS Simbad service, this function looks up its positional information and returns it in a dictionary. In most cases you should use an :class:`AstrometryInfo` object and its :meth:`~AstrometryInfo.fill_from_simbad` method instead of this function. Arguments: ident The Simbad name of the source to look up. items An iterable of data items to look up. The default fetches position, proper motion, parallax, and radial velocity information. Each item name resembles the string ``COO(d;A)`` or ``PLX(E)``. The allowed formats are defined `on this CDS page <http://simbad.u-strasbg.fr/Pages/guide/sim-fscript.htx>`_. debug If true, the response from the webserver will be printed. The return value is a dictionary with a key corresponding to the textual result returned for each requested item.
[ "Fetch", "astrometric", "information", "from", "the", "Simbad", "web", "service", "." ]
python
train
32.166667
standage/tag
tag/index.py
https://github.com/standage/tag/blob/94686adf57115cea1c5235e99299e691f80ba10b/tag/index.py#L81-L93
def consume(self, entrystream): """ Load a stream of entries into memory. Only Feature objects and sequence-region directives are loaded, all other entries are discarded. """ for entry in entrystream: if isinstance(entry, tag.directive.Directive) and \ entry.type == 'sequence-region': self.consume_seqreg(entry) elif isinstance(entry, tag.feature.Feature): self.consume_feature(entry)
[ "def", "consume", "(", "self", ",", "entrystream", ")", ":", "for", "entry", "in", "entrystream", ":", "if", "isinstance", "(", "entry", ",", "tag", ".", "directive", ".", "Directive", ")", "and", "entry", ".", "type", "==", "'sequence-region'", ":", "self", ".", "consume_seqreg", "(", "entry", ")", "elif", "isinstance", "(", "entry", ",", "tag", ".", "feature", ".", "Feature", ")", ":", "self", ".", "consume_feature", "(", "entry", ")" ]
Load a stream of entries into memory. Only Feature objects and sequence-region directives are loaded, all other entries are discarded.
[ "Load", "a", "stream", "of", "entries", "into", "memory", "." ]
python
train
38.307692
TeamHG-Memex/eli5
eli5/utils.py
https://github.com/TeamHG-Memex/eli5/blob/371b402a0676295c05e582a2dd591f7af476b86b/eli5/utils.py#L213-L232
def _get_value_indices(names1, names2, lookups): """ >>> _get_value_indices(['foo', 'bar', 'baz'], ['foo', 'bar', 'baz'], ... ['bar', 'foo']) [1, 0] >>> _get_value_indices(['foo', 'bar', 'baz'], ['FOO', 'bar', 'baz'], ... ['bar', 'FOO']) [1, 0] >>> _get_value_indices(['foo', 'bar', 'BAZ'], ['foo', 'BAZ', 'baz'], ... ['BAZ', 'foo']) [2, 0] >>> _get_value_indices(['foo', 'bar', 'baz'], ['foo', 'bar', 'baz'], ... ['spam']) Traceback (most recent call last): ... KeyError: 'spam' """ positions = {name: idx for idx, name in enumerate(names2)} positions.update({name: idx for idx, name in enumerate(names1)}) return [positions[name] for name in lookups]
[ "def", "_get_value_indices", "(", "names1", ",", "names2", ",", "lookups", ")", ":", "positions", "=", "{", "name", ":", "idx", "for", "idx", ",", "name", "in", "enumerate", "(", "names2", ")", "}", "positions", ".", "update", "(", "{", "name", ":", "idx", "for", "idx", ",", "name", "in", "enumerate", "(", "names1", ")", "}", ")", "return", "[", "positions", "[", "name", "]", "for", "name", "in", "lookups", "]" ]
>>> _get_value_indices(['foo', 'bar', 'baz'], ['foo', 'bar', 'baz'], ... ['bar', 'foo']) [1, 0] >>> _get_value_indices(['foo', 'bar', 'baz'], ['FOO', 'bar', 'baz'], ... ['bar', 'FOO']) [1, 0] >>> _get_value_indices(['foo', 'bar', 'BAZ'], ['foo', 'BAZ', 'baz'], ... ['BAZ', 'foo']) [2, 0] >>> _get_value_indices(['foo', 'bar', 'baz'], ['foo', 'bar', 'baz'], ... ['spam']) Traceback (most recent call last): ... KeyError: 'spam'
[ ">>>", "_get_value_indices", "(", "[", "foo", "bar", "baz", "]", "[", "foo", "bar", "baz", "]", "...", "[", "bar", "foo", "]", ")", "[", "1", "0", "]", ">>>", "_get_value_indices", "(", "[", "foo", "bar", "baz", "]", "[", "FOO", "bar", "baz", "]", "...", "[", "bar", "FOO", "]", ")", "[", "1", "0", "]", ">>>", "_get_value_indices", "(", "[", "foo", "bar", "BAZ", "]", "[", "foo", "BAZ", "baz", "]", "...", "[", "BAZ", "foo", "]", ")", "[", "2", "0", "]", ">>>", "_get_value_indices", "(", "[", "foo", "bar", "baz", "]", "[", "foo", "bar", "baz", "]", "...", "[", "spam", "]", ")", "Traceback", "(", "most", "recent", "call", "last", ")", ":", "...", "KeyError", ":", "spam" ]
python
train
39.25
ui/django-post_office
post_office/mail.py
https://github.com/ui/django-post_office/blob/03e1ffb69829b475402f0f3ecd9f8a90af7da4bd/post_office/mail.py#L169-L178
def get_queued(): """ Returns a list of emails that should be sent: - Status is queued - Has scheduled_time lower than the current time or None """ return Email.objects.filter(status=STATUS.queued) \ .select_related('template') \ .filter(Q(scheduled_time__lte=now()) | Q(scheduled_time=None)) \ .order_by(*get_sending_order()).prefetch_related('attachments')[:get_batch_size()]
[ "def", "get_queued", "(", ")", ":", "return", "Email", ".", "objects", ".", "filter", "(", "status", "=", "STATUS", ".", "queued", ")", ".", "select_related", "(", "'template'", ")", ".", "filter", "(", "Q", "(", "scheduled_time__lte", "=", "now", "(", ")", ")", "|", "Q", "(", "scheduled_time", "=", "None", ")", ")", ".", "order_by", "(", "*", "get_sending_order", "(", ")", ")", ".", "prefetch_related", "(", "'attachments'", ")", "[", ":", "get_batch_size", "(", ")", "]" ]
Returns a list of emails that should be sent: - Status is queued - Has scheduled_time lower than the current time or None
[ "Returns", "a", "list", "of", "emails", "that", "should", "be", "sent", ":", "-", "Status", "is", "queued", "-", "Has", "scheduled_time", "lower", "than", "the", "current", "time", "or", "None" ]
python
train
41.8
bjmorgan/vasppy
vasppy/summary.py
https://github.com/bjmorgan/vasppy/blob/cc2d1449697b17ee1c43715a02cddcb1139a6834/vasppy/summary.py#L77-L90
def find_vasp_calculations(): """ Returns a list of all subdirectories that contain either a vasprun.xml file or a compressed vasprun.xml.gz file. Args: None Returns: (List): list of all VASP calculation subdirectories. """ dir_list = [ './' + re.sub( r'vasprun\.xml', '', path ) for path in glob.iglob( '**/vasprun.xml', recursive=True ) ] gz_dir_list = [ './' + re.sub( r'vasprun\.xml\.gz', '', path ) for path in glob.iglob( '**/vasprun.xml.gz', recursive=True ) ] return dir_list + gz_dir_list
[ "def", "find_vasp_calculations", "(", ")", ":", "dir_list", "=", "[", "'./'", "+", "re", ".", "sub", "(", "r'vasprun\\.xml'", ",", "''", ",", "path", ")", "for", "path", "in", "glob", ".", "iglob", "(", "'**/vasprun.xml'", ",", "recursive", "=", "True", ")", "]", "gz_dir_list", "=", "[", "'./'", "+", "re", ".", "sub", "(", "r'vasprun\\.xml\\.gz'", ",", "''", ",", "path", ")", "for", "path", "in", "glob", ".", "iglob", "(", "'**/vasprun.xml.gz'", ",", "recursive", "=", "True", ")", "]", "return", "dir_list", "+", "gz_dir_list" ]
Returns a list of all subdirectories that contain either a vasprun.xml file or a compressed vasprun.xml.gz file. Args: None Returns: (List): list of all VASP calculation subdirectories.
[ "Returns", "a", "list", "of", "all", "subdirectories", "that", "contain", "either", "a", "vasprun", ".", "xml", "file", "or", "a", "compressed", "vasprun", ".", "xml", ".", "gz", "file", "." ]
python
train
38.428571
mlperf/training
image_classification/tensorflow/official/resnet/imagenet_main.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/image_classification/tensorflow/official/resnet/imagenet_main.py#L120-L145
def parse_record(raw_record, is_training, dtype): """Parses a record containing a training example of an image. The input record is parsed into a label and image, and the image is passed through preprocessing steps (cropping, flipping, and so on). Args: raw_record: scalar Tensor tf.string containing a serialized Example protocol buffer. is_training: A boolean denoting whether the input is for training. dtype: data type to use for images/features. Returns: Tuple with processed image tensor and one-hot-encoded label tensor. """ image_buffer, label = _parse_example_proto(raw_record) image = imagenet_preprocessing.preprocess_image( image_buffer=image_buffer, output_height=_DEFAULT_IMAGE_SIZE, output_width=_DEFAULT_IMAGE_SIZE, num_channels=_NUM_CHANNELS, is_training=is_training) image = tf.cast(image, dtype) return image, label
[ "def", "parse_record", "(", "raw_record", ",", "is_training", ",", "dtype", ")", ":", "image_buffer", ",", "label", "=", "_parse_example_proto", "(", "raw_record", ")", "image", "=", "imagenet_preprocessing", ".", "preprocess_image", "(", "image_buffer", "=", "image_buffer", ",", "output_height", "=", "_DEFAULT_IMAGE_SIZE", ",", "output_width", "=", "_DEFAULT_IMAGE_SIZE", ",", "num_channels", "=", "_NUM_CHANNELS", ",", "is_training", "=", "is_training", ")", "image", "=", "tf", ".", "cast", "(", "image", ",", "dtype", ")", "return", "image", ",", "label" ]
Parses a record containing a training example of an image. The input record is parsed into a label and image, and the image is passed through preprocessing steps (cropping, flipping, and so on). Args: raw_record: scalar Tensor tf.string containing a serialized Example protocol buffer. is_training: A boolean denoting whether the input is for training. dtype: data type to use for images/features. Returns: Tuple with processed image tensor and one-hot-encoded label tensor.
[ "Parses", "a", "record", "containing", "a", "training", "example", "of", "an", "image", "." ]
python
train
34.076923
pandas-dev/pandas
pandas/core/reshape/pivot.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/pivot.py#L391-L528
def crosstab(index, columns, values=None, rownames=None, colnames=None, aggfunc=None, margins=False, margins_name='All', dropna=True, normalize=False): """ Compute a simple cross tabulation of two (or more) factors. By default computes a frequency table of the factors unless an array of values and an aggregation function are passed. Parameters ---------- index : array-like, Series, or list of arrays/Series Values to group by in the rows. columns : array-like, Series, or list of arrays/Series Values to group by in the columns. values : array-like, optional Array of values to aggregate according to the factors. Requires `aggfunc` be specified. rownames : sequence, default None If passed, must match number of row arrays passed. colnames : sequence, default None If passed, must match number of column arrays passed. aggfunc : function, optional If specified, requires `values` be specified as well. margins : bool, default False Add row/column margins (subtotals). margins_name : str, default 'All' Name of the row/column that will contain the totals when margins is True. .. versionadded:: 0.21.0 dropna : bool, default True Do not include columns whose entries are all NaN. normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False Normalize by dividing all values by the sum of values. - If passed 'all' or `True`, will normalize over all values. - If passed 'index' will normalize over each row. - If passed 'columns' will normalize over each column. - If margins is `True`, will also normalize margin values. .. versionadded:: 0.18.1 Returns ------- DataFrame Cross tabulation of the data. See Also -------- DataFrame.pivot : Reshape data based on column values. pivot_table : Create a pivot table as a DataFrame. Notes ----- Any Series passed will have their name attributes used unless row or column names for the cross-tabulation are specified. Any input passed containing Categorical data will have **all** of its categories included in the cross-tabulation, even if the actual data does not contain any instances of a particular category. In the event that there aren't overlapping indexes an empty DataFrame will be returned. Examples -------- >>> a = np.array(["foo", "foo", "foo", "foo", "bar", "bar", ... "bar", "bar", "foo", "foo", "foo"], dtype=object) >>> b = np.array(["one", "one", "one", "two", "one", "one", ... "one", "two", "two", "two", "one"], dtype=object) >>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny", ... "shiny", "dull", "shiny", "shiny", "shiny"], ... dtype=object) >>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c']) b one two c dull shiny dull shiny a bar 1 2 1 0 foo 2 2 1 2 Here 'c' and 'f' are not represented in the data and will not be shown in the output because dropna is True by default. Set dropna=False to preserve categories with no data. >>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c']) >>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f']) >>> pd.crosstab(foo, bar) col_0 d e row_0 a 1 0 b 0 1 >>> pd.crosstab(foo, bar, dropna=False) col_0 d e f row_0 a 1 0 0 b 0 1 0 c 0 0 0 """ index = com.maybe_make_list(index) columns = com.maybe_make_list(columns) rownames = _get_names(index, rownames, prefix='row') colnames = _get_names(columns, colnames, prefix='col') common_idx = _get_objs_combined_axis(index + columns, intersect=True, sort=False) data = {} data.update(zip(rownames, index)) data.update(zip(colnames, columns)) if values is None and aggfunc is not None: raise ValueError("aggfunc cannot be used without values.") if values is not None and aggfunc is None: raise ValueError("values cannot be used without an aggfunc.") from pandas import DataFrame df = DataFrame(data, index=common_idx) if values is None: df['__dummy__'] = 0 kwargs = {'aggfunc': len, 'fill_value': 0} else: df['__dummy__'] = values kwargs = {'aggfunc': aggfunc} table = df.pivot_table('__dummy__', index=rownames, columns=colnames, margins=margins, margins_name=margins_name, dropna=dropna, **kwargs) # Post-process if normalize is not False: table = _normalize(table, normalize=normalize, margins=margins, margins_name=margins_name) return table
[ "def", "crosstab", "(", "index", ",", "columns", ",", "values", "=", "None", ",", "rownames", "=", "None", ",", "colnames", "=", "None", ",", "aggfunc", "=", "None", ",", "margins", "=", "False", ",", "margins_name", "=", "'All'", ",", "dropna", "=", "True", ",", "normalize", "=", "False", ")", ":", "index", "=", "com", ".", "maybe_make_list", "(", "index", ")", "columns", "=", "com", ".", "maybe_make_list", "(", "columns", ")", "rownames", "=", "_get_names", "(", "index", ",", "rownames", ",", "prefix", "=", "'row'", ")", "colnames", "=", "_get_names", "(", "columns", ",", "colnames", ",", "prefix", "=", "'col'", ")", "common_idx", "=", "_get_objs_combined_axis", "(", "index", "+", "columns", ",", "intersect", "=", "True", ",", "sort", "=", "False", ")", "data", "=", "{", "}", "data", ".", "update", "(", "zip", "(", "rownames", ",", "index", ")", ")", "data", ".", "update", "(", "zip", "(", "colnames", ",", "columns", ")", ")", "if", "values", "is", "None", "and", "aggfunc", "is", "not", "None", ":", "raise", "ValueError", "(", "\"aggfunc cannot be used without values.\"", ")", "if", "values", "is", "not", "None", "and", "aggfunc", "is", "None", ":", "raise", "ValueError", "(", "\"values cannot be used without an aggfunc.\"", ")", "from", "pandas", "import", "DataFrame", "df", "=", "DataFrame", "(", "data", ",", "index", "=", "common_idx", ")", "if", "values", "is", "None", ":", "df", "[", "'__dummy__'", "]", "=", "0", "kwargs", "=", "{", "'aggfunc'", ":", "len", ",", "'fill_value'", ":", "0", "}", "else", ":", "df", "[", "'__dummy__'", "]", "=", "values", "kwargs", "=", "{", "'aggfunc'", ":", "aggfunc", "}", "table", "=", "df", ".", "pivot_table", "(", "'__dummy__'", ",", "index", "=", "rownames", ",", "columns", "=", "colnames", ",", "margins", "=", "margins", ",", "margins_name", "=", "margins_name", ",", "dropna", "=", "dropna", ",", "*", "*", "kwargs", ")", "# Post-process", "if", "normalize", "is", "not", "False", ":", "table", "=", "_normalize", "(", "table", ",", "normalize", "=", "normalize", ",", "margins", "=", "margins", ",", "margins_name", "=", "margins_name", ")", "return", "table" ]
Compute a simple cross tabulation of two (or more) factors. By default computes a frequency table of the factors unless an array of values and an aggregation function are passed. Parameters ---------- index : array-like, Series, or list of arrays/Series Values to group by in the rows. columns : array-like, Series, or list of arrays/Series Values to group by in the columns. values : array-like, optional Array of values to aggregate according to the factors. Requires `aggfunc` be specified. rownames : sequence, default None If passed, must match number of row arrays passed. colnames : sequence, default None If passed, must match number of column arrays passed. aggfunc : function, optional If specified, requires `values` be specified as well. margins : bool, default False Add row/column margins (subtotals). margins_name : str, default 'All' Name of the row/column that will contain the totals when margins is True. .. versionadded:: 0.21.0 dropna : bool, default True Do not include columns whose entries are all NaN. normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False Normalize by dividing all values by the sum of values. - If passed 'all' or `True`, will normalize over all values. - If passed 'index' will normalize over each row. - If passed 'columns' will normalize over each column. - If margins is `True`, will also normalize margin values. .. versionadded:: 0.18.1 Returns ------- DataFrame Cross tabulation of the data. See Also -------- DataFrame.pivot : Reshape data based on column values. pivot_table : Create a pivot table as a DataFrame. Notes ----- Any Series passed will have their name attributes used unless row or column names for the cross-tabulation are specified. Any input passed containing Categorical data will have **all** of its categories included in the cross-tabulation, even if the actual data does not contain any instances of a particular category. In the event that there aren't overlapping indexes an empty DataFrame will be returned. Examples -------- >>> a = np.array(["foo", "foo", "foo", "foo", "bar", "bar", ... "bar", "bar", "foo", "foo", "foo"], dtype=object) >>> b = np.array(["one", "one", "one", "two", "one", "one", ... "one", "two", "two", "two", "one"], dtype=object) >>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny", ... "shiny", "dull", "shiny", "shiny", "shiny"], ... dtype=object) >>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c']) b one two c dull shiny dull shiny a bar 1 2 1 0 foo 2 2 1 2 Here 'c' and 'f' are not represented in the data and will not be shown in the output because dropna is True by default. Set dropna=False to preserve categories with no data. >>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c']) >>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f']) >>> pd.crosstab(foo, bar) col_0 d e row_0 a 1 0 b 0 1 >>> pd.crosstab(foo, bar, dropna=False) col_0 d e f row_0 a 1 0 0 b 0 1 0 c 0 0 0
[ "Compute", "a", "simple", "cross", "tabulation", "of", "two", "(", "or", "more", ")", "factors", ".", "By", "default", "computes", "a", "frequency", "table", "of", "the", "factors", "unless", "an", "array", "of", "values", "and", "an", "aggregation", "function", "are", "passed", "." ]
python
train
35.173913
mitsei/dlkit
dlkit/json_/grading/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/grading/sessions.py#L2755-L2777
def alias_grade_entry(self, grade_entry_id, alias_id): """Adds an ``Id`` to a ``GradeEntry`` for the purpose of creating compatibility. The primary ``Id`` of the ``GradeEntry`` is determined by the provider. The new ``Id`` performs as an alias to the primary ``Id``. If the alias is a pointer to another grade entry, it is reassigned to the given grade entry ``Id``. arg: grade_entry_id (osid.id.Id): the ``Id`` of a ``GradeEntry`` arg: alias_id (osid.id.Id): the alias ``Id`` raise: AlreadyExists - ``alias_id`` is already assigned raise: NotFound - ``grade_entry_id`` not found raise: NullArgument - ``grade_entry_id`` or ``alias_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceAdminSession.alias_resources_template self._alias_id(primary_id=grade_entry_id, equivalent_id=alias_id)
[ "def", "alias_grade_entry", "(", "self", ",", "grade_entry_id", ",", "alias_id", ")", ":", "# Implemented from template for", "# osid.resource.ResourceAdminSession.alias_resources_template", "self", ".", "_alias_id", "(", "primary_id", "=", "grade_entry_id", ",", "equivalent_id", "=", "alias_id", ")" ]
Adds an ``Id`` to a ``GradeEntry`` for the purpose of creating compatibility. The primary ``Id`` of the ``GradeEntry`` is determined by the provider. The new ``Id`` performs as an alias to the primary ``Id``. If the alias is a pointer to another grade entry, it is reassigned to the given grade entry ``Id``. arg: grade_entry_id (osid.id.Id): the ``Id`` of a ``GradeEntry`` arg: alias_id (osid.id.Id): the alias ``Id`` raise: AlreadyExists - ``alias_id`` is already assigned raise: NotFound - ``grade_entry_id`` not found raise: NullArgument - ``grade_entry_id`` or ``alias_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Adds", "an", "Id", "to", "a", "GradeEntry", "for", "the", "purpose", "of", "creating", "compatibility", "." ]
python
train
49.217391
ewels/MultiQC
multiqc/modules/samblaster/samblaster.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/samblaster/samblaster.py#L69-L109
def parse_samblaster(self, f): """ Go through log file looking for samblaster output. If the Grab the name from the RG tag of the preceding bwa command """ dups_regex = "samblaster: (Removed|Marked) (\d+) of (\d+) \((\d+.\d+)%\) read ids as duplicates" input_file_regex = "samblaster: Opening (\S+) for read." rgtag_name_regex = "\\\\tID:(\S*?)\\\\t" data = {} s_name = None fh = f['f'] for l in fh: # try to find name from RG-tag. If bwa mem is used upstream samblaster with pipes, then the bwa mem command # including the read group will be written in the log match = re.search(rgtag_name_regex, l) if match: s_name = self.clean_s_name( match.group(1), f['root']) # try to find name from the input file name, if used match = re.search(input_file_regex, l) if match: basefn = os.path.basename(match.group(1)) fname, ext = os.path.splitext(basefn) # if it's stdin, then try bwa RG-tag instead if fname != 'stdin': s_name = self.clean_s_name( fname, f['root']) match = re.search(dups_regex, l) if match: data['n_dups'] = int(match.group(2)) data['n_tot'] = int(match.group(3)) data['n_nondups'] = data['n_tot'] - data['n_dups'] data['pct_dups'] = float(match.group(4)) if s_name is None: s_name = f['s_name'] if len(data) > 0: if s_name in self.samblaster_data: log.debug("Duplicate sample name found in {}! Overwriting: {}".format(f['fn'], s_name)) self.add_data_source(f, s_name) self.samblaster_data[s_name] = data
[ "def", "parse_samblaster", "(", "self", ",", "f", ")", ":", "dups_regex", "=", "\"samblaster: (Removed|Marked) (\\d+) of (\\d+) \\((\\d+.\\d+)%\\) read ids as duplicates\"", "input_file_regex", "=", "\"samblaster: Opening (\\S+) for read.\"", "rgtag_name_regex", "=", "\"\\\\\\\\tID:(\\S*?)\\\\\\\\t\"", "data", "=", "{", "}", "s_name", "=", "None", "fh", "=", "f", "[", "'f'", "]", "for", "l", "in", "fh", ":", "# try to find name from RG-tag. If bwa mem is used upstream samblaster with pipes, then the bwa mem command", "# including the read group will be written in the log", "match", "=", "re", ".", "search", "(", "rgtag_name_regex", ",", "l", ")", "if", "match", ":", "s_name", "=", "self", ".", "clean_s_name", "(", "match", ".", "group", "(", "1", ")", ",", "f", "[", "'root'", "]", ")", "# try to find name from the input file name, if used", "match", "=", "re", ".", "search", "(", "input_file_regex", ",", "l", ")", "if", "match", ":", "basefn", "=", "os", ".", "path", ".", "basename", "(", "match", ".", "group", "(", "1", ")", ")", "fname", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "basefn", ")", "# if it's stdin, then try bwa RG-tag instead", "if", "fname", "!=", "'stdin'", ":", "s_name", "=", "self", ".", "clean_s_name", "(", "fname", ",", "f", "[", "'root'", "]", ")", "match", "=", "re", ".", "search", "(", "dups_regex", ",", "l", ")", "if", "match", ":", "data", "[", "'n_dups'", "]", "=", "int", "(", "match", ".", "group", "(", "2", ")", ")", "data", "[", "'n_tot'", "]", "=", "int", "(", "match", ".", "group", "(", "3", ")", ")", "data", "[", "'n_nondups'", "]", "=", "data", "[", "'n_tot'", "]", "-", "data", "[", "'n_dups'", "]", "data", "[", "'pct_dups'", "]", "=", "float", "(", "match", ".", "group", "(", "4", ")", ")", "if", "s_name", "is", "None", ":", "s_name", "=", "f", "[", "'s_name'", "]", "if", "len", "(", "data", ")", ">", "0", ":", "if", "s_name", "in", "self", ".", "samblaster_data", ":", "log", ".", "debug", "(", "\"Duplicate sample name found in {}! Overwriting: {}\"", ".", "format", "(", "f", "[", "'fn'", "]", ",", "s_name", ")", ")", "self", ".", "add_data_source", "(", "f", ",", "s_name", ")", "self", ".", "samblaster_data", "[", "s_name", "]", "=", "data" ]
Go through log file looking for samblaster output. If the Grab the name from the RG tag of the preceding bwa command
[ "Go", "through", "log", "file", "looking", "for", "samblaster", "output", ".", "If", "the", "Grab", "the", "name", "from", "the", "RG", "tag", "of", "the", "preceding", "bwa", "command" ]
python
train
44.195122
iotile/coretools
iotileship/iotile/ship/recipe.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotileship/iotile/ship/recipe.py#L378-L400
def _prepare_resources(self, variables, overrides=None): """Create and optionally open all shared resources.""" if overrides is None: overrides = {} res_map = {} own_map = {} for decl in self.resources.values(): resource = overrides.get(decl.name) if resource is None: args = _complete_parameters(decl.args, variables) resource = decl.type(args) own_map[decl.name] = resource if decl.autocreate: resource.open() res_map[decl.name] = resource return res_map, own_map
[ "def", "_prepare_resources", "(", "self", ",", "variables", ",", "overrides", "=", "None", ")", ":", "if", "overrides", "is", "None", ":", "overrides", "=", "{", "}", "res_map", "=", "{", "}", "own_map", "=", "{", "}", "for", "decl", "in", "self", ".", "resources", ".", "values", "(", ")", ":", "resource", "=", "overrides", ".", "get", "(", "decl", ".", "name", ")", "if", "resource", "is", "None", ":", "args", "=", "_complete_parameters", "(", "decl", ".", "args", ",", "variables", ")", "resource", "=", "decl", ".", "type", "(", "args", ")", "own_map", "[", "decl", ".", "name", "]", "=", "resource", "if", "decl", ".", "autocreate", ":", "resource", ".", "open", "(", ")", "res_map", "[", "decl", ".", "name", "]", "=", "resource", "return", "res_map", ",", "own_map" ]
Create and optionally open all shared resources.
[ "Create", "and", "optionally", "open", "all", "shared", "resources", "." ]
python
train
27.043478
edx/XBlock
xblock/runtime.py
https://github.com/edx/XBlock/blob/368bf46e2c0ee69bbb21817f428c4684936e18ee/xblock/runtime.py#L820-L831
def render_child(self, child, view_name=None, context=None): """A shortcut to render a child block. Use this method to render your children from your own view function. If `view_name` is not provided, it will default to the view name you're being rendered with. Returns the same value as :func:`render`. """ return child.render(view_name or self._view_name, context)
[ "def", "render_child", "(", "self", ",", "child", ",", "view_name", "=", "None", ",", "context", "=", "None", ")", ":", "return", "child", ".", "render", "(", "view_name", "or", "self", ".", "_view_name", ",", "context", ")" ]
A shortcut to render a child block. Use this method to render your children from your own view function. If `view_name` is not provided, it will default to the view name you're being rendered with. Returns the same value as :func:`render`.
[ "A", "shortcut", "to", "render", "a", "child", "block", "." ]
python
train
34.583333
inspirehep/inspire-schemas
inspire_schemas/builders/literature.py
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L596-L608
def add_public_note(self, public_note, source=None): """Add public note. :param public_note: public note for the current article. :type public_note: string :param source: source for the given notes. :type source: string """ self._append_to('public_notes', self._sourced_dict( source, value=public_note, ))
[ "def", "add_public_note", "(", "self", ",", "public_note", ",", "source", "=", "None", ")", ":", "self", ".", "_append_to", "(", "'public_notes'", ",", "self", ".", "_sourced_dict", "(", "source", ",", "value", "=", "public_note", ",", ")", ")" ]
Add public note. :param public_note: public note for the current article. :type public_note: string :param source: source for the given notes. :type source: string
[ "Add", "public", "note", "." ]
python
train
29.461538
honzajavorek/redis-collections
redis_collections/sets.py
https://github.com/honzajavorek/redis-collections/blob/07ca8efe88fb128f7dc7319dfa6a26cd39b3776b/redis_collections/sets.py#L102-L129
def isdisjoint(self, other): """ Return ``True`` if the set has no elements in common with *other*. Sets are disjoint if and only if their intersection is the empty set. :param other: Any kind of iterable. :rtype: boolean """ def isdisjoint_trans_pure(pipe): return not pipe.sinter(self.key, other.key) def isdisjoint_trans_mixed(pipe): self_values = set(self.__iter__(pipe)) if use_redis: other_values = set(other.__iter__(pipe)) else: other_values = set(other) return self_values.isdisjoint(other_values) if self._same_redis(other): return self._transaction(isdisjoint_trans_pure, other.key) if self._same_redis(other, RedisCollection): use_redis = True return self._transaction(isdisjoint_trans_mixed, other.key) use_redis = False return self._transaction(isdisjoint_trans_mixed)
[ "def", "isdisjoint", "(", "self", ",", "other", ")", ":", "def", "isdisjoint_trans_pure", "(", "pipe", ")", ":", "return", "not", "pipe", ".", "sinter", "(", "self", ".", "key", ",", "other", ".", "key", ")", "def", "isdisjoint_trans_mixed", "(", "pipe", ")", ":", "self_values", "=", "set", "(", "self", ".", "__iter__", "(", "pipe", ")", ")", "if", "use_redis", ":", "other_values", "=", "set", "(", "other", ".", "__iter__", "(", "pipe", ")", ")", "else", ":", "other_values", "=", "set", "(", "other", ")", "return", "self_values", ".", "isdisjoint", "(", "other_values", ")", "if", "self", ".", "_same_redis", "(", "other", ")", ":", "return", "self", ".", "_transaction", "(", "isdisjoint_trans_pure", ",", "other", ".", "key", ")", "if", "self", ".", "_same_redis", "(", "other", ",", "RedisCollection", ")", ":", "use_redis", "=", "True", "return", "self", ".", "_transaction", "(", "isdisjoint_trans_mixed", ",", "other", ".", "key", ")", "use_redis", "=", "False", "return", "self", ".", "_transaction", "(", "isdisjoint_trans_mixed", ")" ]
Return ``True`` if the set has no elements in common with *other*. Sets are disjoint if and only if their intersection is the empty set. :param other: Any kind of iterable. :rtype: boolean
[ "Return", "True", "if", "the", "set", "has", "no", "elements", "in", "common", "with", "*", "other", "*", ".", "Sets", "are", "disjoint", "if", "and", "only", "if", "their", "intersection", "is", "the", "empty", "set", "." ]
python
train
35.142857
jilljenn/tryalgo
tryalgo/bellman_ford.py
https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/bellman_ford.py#L8-L35
def bellman_ford(graph, weight, source=0): """ Single source shortest paths by Bellman-Ford :param graph: directed graph in listlist or listdict format :param weight: can be negative. in matrix format or same listdict graph :returns: distance table, precedence table, bool :explanation: bool is True if a negative circuit is reachable from the source, circuits can have length 2. :complexity: `O(|V|*|E|)` """ n = len(graph) dist = [float('inf')] * n prec = [None] * n dist[source] = 0 for nb_iterations in range(n): changed = False for node in range(n): for neighbor in graph[node]: alt = dist[node] + weight[node][neighbor] if alt < dist[neighbor]: dist[neighbor] = alt prec[neighbor] = node changed = True if not changed: # fixed point return dist, prec, False return dist, prec, True
[ "def", "bellman_ford", "(", "graph", ",", "weight", ",", "source", "=", "0", ")", ":", "n", "=", "len", "(", "graph", ")", "dist", "=", "[", "float", "(", "'inf'", ")", "]", "*", "n", "prec", "=", "[", "None", "]", "*", "n", "dist", "[", "source", "]", "=", "0", "for", "nb_iterations", "in", "range", "(", "n", ")", ":", "changed", "=", "False", "for", "node", "in", "range", "(", "n", ")", ":", "for", "neighbor", "in", "graph", "[", "node", "]", ":", "alt", "=", "dist", "[", "node", "]", "+", "weight", "[", "node", "]", "[", "neighbor", "]", "if", "alt", "<", "dist", "[", "neighbor", "]", ":", "dist", "[", "neighbor", "]", "=", "alt", "prec", "[", "neighbor", "]", "=", "node", "changed", "=", "True", "if", "not", "changed", ":", "# fixed point", "return", "dist", ",", "prec", ",", "False", "return", "dist", ",", "prec", ",", "True" ]
Single source shortest paths by Bellman-Ford :param graph: directed graph in listlist or listdict format :param weight: can be negative. in matrix format or same listdict graph :returns: distance table, precedence table, bool :explanation: bool is True if a negative circuit is reachable from the source, circuits can have length 2. :complexity: `O(|V|*|E|)`
[ "Single", "source", "shortest", "paths", "by", "Bellman", "-", "Ford" ]
python
train
36.642857
kmadac/bitstamp-python-client
bitstamp/client.py
https://github.com/kmadac/bitstamp-python-client/blob/35b9a61f3892cc281de89963d210f7bd5757c717/bitstamp/client.py#L56-L64
def _construct_url(self, url, base, quote): """ Adds the orderbook to the url if base and quote are specified. """ if not base and not quote: return url else: url = url + base.lower() + quote.lower() + "/" return url
[ "def", "_construct_url", "(", "self", ",", "url", ",", "base", ",", "quote", ")", ":", "if", "not", "base", "and", "not", "quote", ":", "return", "url", "else", ":", "url", "=", "url", "+", "base", ".", "lower", "(", ")", "+", "quote", ".", "lower", "(", ")", "+", "\"/\"", "return", "url" ]
Adds the orderbook to the url if base and quote are specified.
[ "Adds", "the", "orderbook", "to", "the", "url", "if", "base", "and", "quote", "are", "specified", "." ]
python
train
31.555556
ph4r05/monero-serialize
monero_serialize/xmrboost.py
https://github.com/ph4r05/monero-serialize/blob/cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42/monero_serialize/xmrboost.py#L361-L389
async def container(self, container=None, container_type=None, params=None): """ Loads/dumps container :return: """ # Container versioning is a bit tricky, primitive type containers are not versioned. elem_type = x.container_elem_type(container_type, params) raw_container = container_is_raw(container_type, params) elem_elementary = TypeWrapper.is_elementary_type(elem_type) is_versioned = not elem_elementary and not raw_container version = None if is_versioned: version = await self.version(container_type, params, elem=container) if self.is_tracked(): return self.get_tracked() if hasattr(container_type, 'boost_serialize'): container = container_type() if container is None else container self.pop_track(is_versioned) return await container.boost_serialize(self, elem=container, elem_type=container_type, params=params, version=version) # Container entry version + container if self.writing: self.pop_track(is_versioned) return await self.container_dump(container, container_type, params) else: obj = await self.container_load(container_type, params=params, container=container) return self.track_obj(obj, is_versioned)
[ "async", "def", "container", "(", "self", ",", "container", "=", "None", ",", "container_type", "=", "None", ",", "params", "=", "None", ")", ":", "# Container versioning is a bit tricky, primitive type containers are not versioned.", "elem_type", "=", "x", ".", "container_elem_type", "(", "container_type", ",", "params", ")", "raw_container", "=", "container_is_raw", "(", "container_type", ",", "params", ")", "elem_elementary", "=", "TypeWrapper", ".", "is_elementary_type", "(", "elem_type", ")", "is_versioned", "=", "not", "elem_elementary", "and", "not", "raw_container", "version", "=", "None", "if", "is_versioned", ":", "version", "=", "await", "self", ".", "version", "(", "container_type", ",", "params", ",", "elem", "=", "container", ")", "if", "self", ".", "is_tracked", "(", ")", ":", "return", "self", ".", "get_tracked", "(", ")", "if", "hasattr", "(", "container_type", ",", "'boost_serialize'", ")", ":", "container", "=", "container_type", "(", ")", "if", "container", "is", "None", "else", "container", "self", ".", "pop_track", "(", "is_versioned", ")", "return", "await", "container", ".", "boost_serialize", "(", "self", ",", "elem", "=", "container", ",", "elem_type", "=", "container_type", ",", "params", "=", "params", ",", "version", "=", "version", ")", "# Container entry version + container", "if", "self", ".", "writing", ":", "self", ".", "pop_track", "(", "is_versioned", ")", "return", "await", "self", ".", "container_dump", "(", "container", ",", "container_type", ",", "params", ")", "else", ":", "obj", "=", "await", "self", ".", "container_load", "(", "container_type", ",", "params", "=", "params", ",", "container", "=", "container", ")", "return", "self", ".", "track_obj", "(", "obj", ",", "is_versioned", ")" ]
Loads/dumps container :return:
[ "Loads", "/", "dumps", "container", ":", "return", ":" ]
python
train
46.310345
googleads/googleads-python-lib
googleads/adwords.py
https://github.com/googleads/googleads-python-lib/blob/aa3b1b474b0f9789ca55ca46f4b2b57aeae38874/googleads/adwords.py#L969-L1008
def Load(cls, file_input, client=None): """Loads an IncrementalUploadHelper from the given file-like object. Args: file_input: a file-like object containing a serialized IncrementalUploadHelper. client: an AdWordsClient instance. If not specified, an AdWordsClient will be instantiated using the default configuration file. Returns: An IncrementalUploadHelper instance initialized using the contents of the serialized input file. Raises: GoogleAdsError: If there is an error reading the input file containing the serialized IncrementalUploadHelper. GoogleAdsValueError: If the contents of the input file can't be parsed to produce an IncrementalUploadHelper. """ if client is None: client = AdWordsClient.LoadFromStorage() try: data = yaml.safe_load(file_input) except yaml.YAMLError as e: raise googleads.errors.GoogleAdsError( 'Error loading IncrementalUploadHelper from file: %s' % str(e)) try: request_builder = BatchJobHelper.GetRequestBuilder( client, version=data['version'], server=data['server'] ) return cls(request_builder, data['upload_url'], current_content_length=data['current_content_length'], is_last=data['is_last']) except KeyError as e: raise googleads.errors.GoogleAdsValueError( 'Can\'t parse IncrementalUploadHelper from file. Required field ' '"%s" is missing.' % e.message)
[ "def", "Load", "(", "cls", ",", "file_input", ",", "client", "=", "None", ")", ":", "if", "client", "is", "None", ":", "client", "=", "AdWordsClient", ".", "LoadFromStorage", "(", ")", "try", ":", "data", "=", "yaml", ".", "safe_load", "(", "file_input", ")", "except", "yaml", ".", "YAMLError", "as", "e", ":", "raise", "googleads", ".", "errors", ".", "GoogleAdsError", "(", "'Error loading IncrementalUploadHelper from file: %s'", "%", "str", "(", "e", ")", ")", "try", ":", "request_builder", "=", "BatchJobHelper", ".", "GetRequestBuilder", "(", "client", ",", "version", "=", "data", "[", "'version'", "]", ",", "server", "=", "data", "[", "'server'", "]", ")", "return", "cls", "(", "request_builder", ",", "data", "[", "'upload_url'", "]", ",", "current_content_length", "=", "data", "[", "'current_content_length'", "]", ",", "is_last", "=", "data", "[", "'is_last'", "]", ")", "except", "KeyError", "as", "e", ":", "raise", "googleads", ".", "errors", ".", "GoogleAdsValueError", "(", "'Can\\'t parse IncrementalUploadHelper from file. Required field '", "'\"%s\" is missing.'", "%", "e", ".", "message", ")" ]
Loads an IncrementalUploadHelper from the given file-like object. Args: file_input: a file-like object containing a serialized IncrementalUploadHelper. client: an AdWordsClient instance. If not specified, an AdWordsClient will be instantiated using the default configuration file. Returns: An IncrementalUploadHelper instance initialized using the contents of the serialized input file. Raises: GoogleAdsError: If there is an error reading the input file containing the serialized IncrementalUploadHelper. GoogleAdsValueError: If the contents of the input file can't be parsed to produce an IncrementalUploadHelper.
[ "Loads", "an", "IncrementalUploadHelper", "from", "the", "given", "file", "-", "like", "object", "." ]
python
train
37.225
tcalmant/ipopo
pelix/framework.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/framework.py#L547-L559
def uninstall(self): """ Uninstalls the bundle """ with self._lock: if self._state == Bundle.ACTIVE: self.stop() # Change the bundle state self._state = Bundle.UNINSTALLED # Call the framework self.__framework.uninstall_bundle(self)
[ "def", "uninstall", "(", "self", ")", ":", "with", "self", ".", "_lock", ":", "if", "self", ".", "_state", "==", "Bundle", ".", "ACTIVE", ":", "self", ".", "stop", "(", ")", "# Change the bundle state", "self", ".", "_state", "=", "Bundle", ".", "UNINSTALLED", "# Call the framework", "self", ".", "__framework", ".", "uninstall_bundle", "(", "self", ")" ]
Uninstalls the bundle
[ "Uninstalls", "the", "bundle" ]
python
train
25.384615
google/transitfeed
gtfsscheduleviewer/marey_graph.py
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/gtfsscheduleviewer/marey_graph.py#L298-L354
def _DrawTrips(self,triplist,colpar=""): """Generates svg polylines for each transit trip. Args: # Class Trip is defined in transitfeed.py [Trip, Trip, ...] Returns: # A string containing a polyline tag for each trip ' <polyline class="T" stroke="#336633" points="433,0 ...' """ stations = [] if not self._stations and triplist: self._stations = self._CalculateYLines(self._TravelTimes(triplist)) if not self._stations: self._AddWarning("Failed to use traveltimes for graph") self._stations = self._CalculateYLines(self._Uniform(triplist)) if not self._stations: self._AddWarning("Failed to calculate station distances") return stations = self._stations tmpstrs = [] servlist = [] for t in triplist: if not colpar: if t.service_id not in servlist: servlist.append(t.service_id) shade = int(servlist.index(t.service_id) * (200/len(servlist))+55) color = "#00%s00" % hex(shade)[2:4] else: color=colpar start_offsets = [0] first_stop = t.GetTimeStops()[0] for j,freq_offset in enumerate(start_offsets): if j>0 and not colpar: color="purple" scriptcall = 'onmouseover="LineClick(\'%s\',\'Trip %s starting %s\')"' % (t.trip_id, t.trip_id, transitfeed.FormatSecondsSinceMidnight(t.GetStartTime())) tmpstrhead = '<polyline class="T" id="%s" stroke="%s" %s points="' % \ (str(t.trip_id),color, scriptcall) tmpstrs.append(tmpstrhead) for i, s in enumerate(t.GetTimeStops()): arr_t = s[0] dep_t = s[1] if arr_t is None or dep_t is None: continue arr_x = int(arr_t/3600.0 * self._hour_grid) - self._hour_grid * self._offset dep_x = int(dep_t/3600.0 * self._hour_grid) - self._hour_grid * self._offset tmpstrs.append("%s,%s " % (int(arr_x+20), int(stations[i]+20))) tmpstrs.append("%s,%s " % (int(dep_x+20), int(stations[i]+20))) tmpstrs.append('" />') return "".join(tmpstrs)
[ "def", "_DrawTrips", "(", "self", ",", "triplist", ",", "colpar", "=", "\"\"", ")", ":", "stations", "=", "[", "]", "if", "not", "self", ".", "_stations", "and", "triplist", ":", "self", ".", "_stations", "=", "self", ".", "_CalculateYLines", "(", "self", ".", "_TravelTimes", "(", "triplist", ")", ")", "if", "not", "self", ".", "_stations", ":", "self", ".", "_AddWarning", "(", "\"Failed to use traveltimes for graph\"", ")", "self", ".", "_stations", "=", "self", ".", "_CalculateYLines", "(", "self", ".", "_Uniform", "(", "triplist", ")", ")", "if", "not", "self", ".", "_stations", ":", "self", ".", "_AddWarning", "(", "\"Failed to calculate station distances\"", ")", "return", "stations", "=", "self", ".", "_stations", "tmpstrs", "=", "[", "]", "servlist", "=", "[", "]", "for", "t", "in", "triplist", ":", "if", "not", "colpar", ":", "if", "t", ".", "service_id", "not", "in", "servlist", ":", "servlist", ".", "append", "(", "t", ".", "service_id", ")", "shade", "=", "int", "(", "servlist", ".", "index", "(", "t", ".", "service_id", ")", "*", "(", "200", "/", "len", "(", "servlist", ")", ")", "+", "55", ")", "color", "=", "\"#00%s00\"", "%", "hex", "(", "shade", ")", "[", "2", ":", "4", "]", "else", ":", "color", "=", "colpar", "start_offsets", "=", "[", "0", "]", "first_stop", "=", "t", ".", "GetTimeStops", "(", ")", "[", "0", "]", "for", "j", ",", "freq_offset", "in", "enumerate", "(", "start_offsets", ")", ":", "if", "j", ">", "0", "and", "not", "colpar", ":", "color", "=", "\"purple\"", "scriptcall", "=", "'onmouseover=\"LineClick(\\'%s\\',\\'Trip %s starting %s\\')\"'", "%", "(", "t", ".", "trip_id", ",", "t", ".", "trip_id", ",", "transitfeed", ".", "FormatSecondsSinceMidnight", "(", "t", ".", "GetStartTime", "(", ")", ")", ")", "tmpstrhead", "=", "'<polyline class=\"T\" id=\"%s\" stroke=\"%s\" %s points=\"'", "%", "(", "str", "(", "t", ".", "trip_id", ")", ",", "color", ",", "scriptcall", ")", "tmpstrs", ".", "append", "(", "tmpstrhead", ")", "for", "i", ",", "s", "in", "enumerate", "(", "t", ".", "GetTimeStops", "(", ")", ")", ":", "arr_t", "=", "s", "[", "0", "]", "dep_t", "=", "s", "[", "1", "]", "if", "arr_t", "is", "None", "or", "dep_t", "is", "None", ":", "continue", "arr_x", "=", "int", "(", "arr_t", "/", "3600.0", "*", "self", ".", "_hour_grid", ")", "-", "self", ".", "_hour_grid", "*", "self", ".", "_offset", "dep_x", "=", "int", "(", "dep_t", "/", "3600.0", "*", "self", ".", "_hour_grid", ")", "-", "self", ".", "_hour_grid", "*", "self", ".", "_offset", "tmpstrs", ".", "append", "(", "\"%s,%s \"", "%", "(", "int", "(", "arr_x", "+", "20", ")", ",", "int", "(", "stations", "[", "i", "]", "+", "20", ")", ")", ")", "tmpstrs", ".", "append", "(", "\"%s,%s \"", "%", "(", "int", "(", "dep_x", "+", "20", ")", ",", "int", "(", "stations", "[", "i", "]", "+", "20", ")", ")", ")", "tmpstrs", ".", "append", "(", "'\" />'", ")", "return", "\"\"", ".", "join", "(", "tmpstrs", ")" ]
Generates svg polylines for each transit trip. Args: # Class Trip is defined in transitfeed.py [Trip, Trip, ...] Returns: # A string containing a polyline tag for each trip ' <polyline class="T" stroke="#336633" points="433,0 ...'
[ "Generates", "svg", "polylines", "for", "each", "transit", "trip", "." ]
python
train
36.385965
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L708-L728
def append(self, word, lemma=None, type=None, chunk=None, role=None, relation=None, pnp=None, anchor=None, iob=None, custom={}): """ Appends the next word to the sentence / chunk / preposition. For example: Sentence.append("clawed", "claw", "VB", "VP", role=None, relation=1) - word : the current word, - lemma : the canonical form of the word, - type : part-of-speech tag for the word (NN, JJ, ...), - chunk : part-of-speech tag for the chunk this word is part of (NP, VP, ...), - role : the chunk's grammatical role (SBJ, OBJ, ...), - relation : an id shared by other related chunks (e.g., SBJ-1 <=> VP-1), - pnp : PNP if this word is in a prepositional noun phrase (B- prefix optional), - iob : BEGIN if the word marks the start of a new chunk, INSIDE (optional) if the word is part of the previous chunk, - custom : a dictionary of (tag, value)-items for user-defined word tags. """ self._do_word(word, lemma, type) # Append Word object. self._do_chunk(chunk, role, relation, iob) # Append Chunk, or add last word to last chunk. self._do_conjunction() self._do_relation() self._do_pnp(pnp, anchor) self._do_anchor(anchor) self._do_custom(custom)
[ "def", "append", "(", "self", ",", "word", ",", "lemma", "=", "None", ",", "type", "=", "None", ",", "chunk", "=", "None", ",", "role", "=", "None", ",", "relation", "=", "None", ",", "pnp", "=", "None", ",", "anchor", "=", "None", ",", "iob", "=", "None", ",", "custom", "=", "{", "}", ")", ":", "self", ".", "_do_word", "(", "word", ",", "lemma", ",", "type", ")", "# Append Word object.", "self", ".", "_do_chunk", "(", "chunk", ",", "role", ",", "relation", ",", "iob", ")", "# Append Chunk, or add last word to last chunk.", "self", ".", "_do_conjunction", "(", ")", "self", ".", "_do_relation", "(", ")", "self", ".", "_do_pnp", "(", "pnp", ",", "anchor", ")", "self", ".", "_do_anchor", "(", "anchor", ")", "self", ".", "_do_custom", "(", "custom", ")" ]
Appends the next word to the sentence / chunk / preposition. For example: Sentence.append("clawed", "claw", "VB", "VP", role=None, relation=1) - word : the current word, - lemma : the canonical form of the word, - type : part-of-speech tag for the word (NN, JJ, ...), - chunk : part-of-speech tag for the chunk this word is part of (NP, VP, ...), - role : the chunk's grammatical role (SBJ, OBJ, ...), - relation : an id shared by other related chunks (e.g., SBJ-1 <=> VP-1), - pnp : PNP if this word is in a prepositional noun phrase (B- prefix optional), - iob : BEGIN if the word marks the start of a new chunk, INSIDE (optional) if the word is part of the previous chunk, - custom : a dictionary of (tag, value)-items for user-defined word tags.
[ "Appends", "the", "next", "word", "to", "the", "sentence", "/", "chunk", "/", "preposition", ".", "For", "example", ":", "Sentence", ".", "append", "(", "clawed", "claw", "VB", "VP", "role", "=", "None", "relation", "=", "1", ")", "-", "word", ":", "the", "current", "word", "-", "lemma", ":", "the", "canonical", "form", "of", "the", "word", "-", "type", ":", "part", "-", "of", "-", "speech", "tag", "for", "the", "word", "(", "NN", "JJ", "...", ")", "-", "chunk", ":", "part", "-", "of", "-", "speech", "tag", "for", "the", "chunk", "this", "word", "is", "part", "of", "(", "NP", "VP", "...", ")", "-", "role", ":", "the", "chunk", "s", "grammatical", "role", "(", "SBJ", "OBJ", "...", ")", "-", "relation", ":", "an", "id", "shared", "by", "other", "related", "chunks", "(", "e", ".", "g", ".", "SBJ", "-", "1", "<", "=", ">", "VP", "-", "1", ")", "-", "pnp", ":", "PNP", "if", "this", "word", "is", "in", "a", "prepositional", "noun", "phrase", "(", "B", "-", "prefix", "optional", ")", "-", "iob", ":", "BEGIN", "if", "the", "word", "marks", "the", "start", "of", "a", "new", "chunk", "INSIDE", "(", "optional", ")", "if", "the", "word", "is", "part", "of", "the", "previous", "chunk", "-", "custom", ":", "a", "dictionary", "of", "(", "tag", "value", ")", "-", "items", "for", "user", "-", "defined", "word", "tags", "." ]
python
train
66.095238
hyperledger/indy-sdk
wrappers/python/indy/wallet.py
https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/wrappers/python/indy/wallet.py#L320-L351
async def generate_wallet_key(config: Optional[str]) -> str: """ Generate wallet master key. Returned key is compatible with "RAW" key derivation method. It allows to avoid expensive key derivation for use cases when wallet keys can be stored in a secure enclave. :param config: (optional) key configuration json. { "seed": string, (optional) Seed that allows deterministic key creation (if not set random one will be created). Can be UTF-8, base64 or hex string. } :return: Error code """ logger = logging.getLogger(__name__) logger.debug("generate_wallet_key: >>> config: %r", config) if not hasattr(generate_wallet_key, "cb"): logger.debug("generate_wallet_key: Creating callback") generate_wallet_key.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p)) c_config = c_char_p(config.encode('utf-8')) if config is not None else None key = await do_call('indy_generate_wallet_key', c_config, generate_wallet_key.cb) res = key.decode() logger.debug("generate_wallet_key: <<< res: %r", res) return res
[ "async", "def", "generate_wallet_key", "(", "config", ":", "Optional", "[", "str", "]", ")", "->", "str", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "logger", ".", "debug", "(", "\"generate_wallet_key: >>> config: %r\"", ",", "config", ")", "if", "not", "hasattr", "(", "generate_wallet_key", ",", "\"cb\"", ")", ":", "logger", ".", "debug", "(", "\"generate_wallet_key: Creating callback\"", ")", "generate_wallet_key", ".", "cb", "=", "create_cb", "(", "CFUNCTYPE", "(", "None", ",", "c_int32", ",", "c_int32", ",", "c_char_p", ")", ")", "c_config", "=", "c_char_p", "(", "config", ".", "encode", "(", "'utf-8'", ")", ")", "if", "config", "is", "not", "None", "else", "None", "key", "=", "await", "do_call", "(", "'indy_generate_wallet_key'", ",", "c_config", ",", "generate_wallet_key", ".", "cb", ")", "res", "=", "key", ".", "decode", "(", ")", "logger", ".", "debug", "(", "\"generate_wallet_key: <<< res: %r\"", ",", "res", ")", "return", "res" ]
Generate wallet master key. Returned key is compatible with "RAW" key derivation method. It allows to avoid expensive key derivation for use cases when wallet keys can be stored in a secure enclave. :param config: (optional) key configuration json. { "seed": string, (optional) Seed that allows deterministic key creation (if not set random one will be created). Can be UTF-8, base64 or hex string. } :return: Error code
[ "Generate", "wallet", "master", "key", ".", "Returned", "key", "is", "compatible", "with", "RAW", "key", "derivation", "method", ".", "It", "allows", "to", "avoid", "expensive", "key", "derivation", "for", "use", "cases", "when", "wallet", "keys", "can", "be", "stored", "in", "a", "secure", "enclave", "." ]
python
train
36.75
jasonbot/arcrest
arcrest/server.py
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L483-L487
def url(self): """The URL as a string of the resource.""" if not self._url[2].endswith('/'): self._url[2] += '/' return RestURL.url.__get__(self)
[ "def", "url", "(", "self", ")", ":", "if", "not", "self", ".", "_url", "[", "2", "]", ".", "endswith", "(", "'/'", ")", ":", "self", ".", "_url", "[", "2", "]", "+=", "'/'", "return", "RestURL", ".", "url", ".", "__get__", "(", "self", ")" ]
The URL as a string of the resource.
[ "The", "URL", "as", "a", "string", "of", "the", "resource", "." ]
python
train
35.4
sebp/scikit-survival
sksurv/setup.py
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/setup.py#L39-L74
def maybe_cythonize_extensions(top_path, config): """Tweaks for building extensions between release and development mode.""" is_release = os.path.exists(os.path.join(top_path, 'PKG-INFO')) if is_release: build_from_c_and_cpp_files(config.ext_modules) else: message = ('Please install cython with a version >= {0} in order ' 'to build a scikit-survival development version.').format( CYTHON_MIN_VERSION) try: import Cython if LooseVersion(Cython.__version__) < CYTHON_MIN_VERSION: message += ' Your version of Cython was {0}.'.format( Cython.__version__) raise ValueError(message) from Cython.Build import cythonize except ImportError as exc: exc.args += (message,) raise # http://docs.cython.org/en/latest/src/userguide/source_files_and_compilation.html#cythonize-arguments directives = {'language_level': '3'} cy_cov = os.environ.get('CYTHON_COVERAGE', False) if cy_cov: directives['linetrace'] = True macros = [('CYTHON_TRACE', '1'), ('CYTHON_TRACE_NOGIL', '1')] else: macros = [] config.ext_modules = cythonize( config.ext_modules, compiler_directives=directives) for e in config.ext_modules: e.define_macros.extend(macros)
[ "def", "maybe_cythonize_extensions", "(", "top_path", ",", "config", ")", ":", "is_release", "=", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "top_path", ",", "'PKG-INFO'", ")", ")", "if", "is_release", ":", "build_from_c_and_cpp_files", "(", "config", ".", "ext_modules", ")", "else", ":", "message", "=", "(", "'Please install cython with a version >= {0} in order '", "'to build a scikit-survival development version.'", ")", ".", "format", "(", "CYTHON_MIN_VERSION", ")", "try", ":", "import", "Cython", "if", "LooseVersion", "(", "Cython", ".", "__version__", ")", "<", "CYTHON_MIN_VERSION", ":", "message", "+=", "' Your version of Cython was {0}.'", ".", "format", "(", "Cython", ".", "__version__", ")", "raise", "ValueError", "(", "message", ")", "from", "Cython", ".", "Build", "import", "cythonize", "except", "ImportError", "as", "exc", ":", "exc", ".", "args", "+=", "(", "message", ",", ")", "raise", "# http://docs.cython.org/en/latest/src/userguide/source_files_and_compilation.html#cythonize-arguments", "directives", "=", "{", "'language_level'", ":", "'3'", "}", "cy_cov", "=", "os", ".", "environ", ".", "get", "(", "'CYTHON_COVERAGE'", ",", "False", ")", "if", "cy_cov", ":", "directives", "[", "'linetrace'", "]", "=", "True", "macros", "=", "[", "(", "'CYTHON_TRACE'", ",", "'1'", ")", ",", "(", "'CYTHON_TRACE_NOGIL'", ",", "'1'", ")", "]", "else", ":", "macros", "=", "[", "]", "config", ".", "ext_modules", "=", "cythonize", "(", "config", ".", "ext_modules", ",", "compiler_directives", "=", "directives", ")", "for", "e", "in", "config", ".", "ext_modules", ":", "e", ".", "define_macros", ".", "extend", "(", "macros", ")" ]
Tweaks for building extensions between release and development mode.
[ "Tweaks", "for", "building", "extensions", "between", "release", "and", "development", "mode", "." ]
python
train
39.583333
tanghaibao/goatools
goatools/rpt/nts_xfrm.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/rpt/nts_xfrm.py#L22-L33
def mknts(self, add_dct): """Add information from add_dct to a new copy of namedtuples stored in nts.""" nts = [] assert len(add_dct) == len(self.nts) flds = list(next(iter(self.nts))._fields) + list(next(iter(add_dct)).keys()) ntobj = cx.namedtuple("ntgoea", " ".join(flds)) for dct_new, ntgoea in zip(add_dct, self.nts): dct_curr = ntgoea._asdict() for key, val in dct_new.items(): dct_curr[key] = val nts.append(ntobj(**dct_curr)) return nts
[ "def", "mknts", "(", "self", ",", "add_dct", ")", ":", "nts", "=", "[", "]", "assert", "len", "(", "add_dct", ")", "==", "len", "(", "self", ".", "nts", ")", "flds", "=", "list", "(", "next", "(", "iter", "(", "self", ".", "nts", ")", ")", ".", "_fields", ")", "+", "list", "(", "next", "(", "iter", "(", "add_dct", ")", ")", ".", "keys", "(", ")", ")", "ntobj", "=", "cx", ".", "namedtuple", "(", "\"ntgoea\"", ",", "\" \"", ".", "join", "(", "flds", ")", ")", "for", "dct_new", ",", "ntgoea", "in", "zip", "(", "add_dct", ",", "self", ".", "nts", ")", ":", "dct_curr", "=", "ntgoea", ".", "_asdict", "(", ")", "for", "key", ",", "val", "in", "dct_new", ".", "items", "(", ")", ":", "dct_curr", "[", "key", "]", "=", "val", "nts", ".", "append", "(", "ntobj", "(", "*", "*", "dct_curr", ")", ")", "return", "nts" ]
Add information from add_dct to a new copy of namedtuples stored in nts.
[ "Add", "information", "from", "add_dct", "to", "a", "new", "copy", "of", "namedtuples", "stored", "in", "nts", "." ]
python
train
45.083333
maxzheng/bumper-lib
bumper/cars.py
https://github.com/maxzheng/bumper-lib/blob/32a9dec5448673825bb2d7d92fa68882b597f794/bumper/cars.py#L91-L145
def add(self, requirements, required=None): """ Add requirements to be managed :param list/Requirement requirements: List of :class:`BumpRequirement` or :class:`pkg_resources.Requirement` :param bool required: Set required flag for each requirement if provided. """ if isinstance(requirements, RequirementsManager): requirements = list(requirements) elif not isinstance(requirements, list): requirements = [requirements] for req in requirements: name = req.project_name if not isinstance(req, BumpRequirement): req = BumpRequirement(req, required=required) elif required is not None: req.required = required add = True if name in self.requirements: for existing_req in self.requirements[name]: if req == existing_req: add = False break # Need to replace existing as the new req will be used to bump next, and req.required could be # updated. replace = False # Two pins: Use highest pinned version if (req.specs and req.specs[0][0] == '==' and existing_req.specs and existing_req.specs[0][0] == '=='): if pkg_resources.parse_version(req.specs[0][1]) < pkg_resources.parse_version( existing_req.specs[0][1]): req.requirement = existing_req.requirement replace = True # Replace Any if not (req.specs and existing_req.specs): if existing_req.specs: req.requirement = existing_req.requirement replace = True if replace: req.required |= existing_req.required if existing_req.required_by and not req.required_by: req.required_by = existing_req.required_by self.requirements[name].remove(existing_req) break if add: self.requirements[name].append(req)
[ "def", "add", "(", "self", ",", "requirements", ",", "required", "=", "None", ")", ":", "if", "isinstance", "(", "requirements", ",", "RequirementsManager", ")", ":", "requirements", "=", "list", "(", "requirements", ")", "elif", "not", "isinstance", "(", "requirements", ",", "list", ")", ":", "requirements", "=", "[", "requirements", "]", "for", "req", "in", "requirements", ":", "name", "=", "req", ".", "project_name", "if", "not", "isinstance", "(", "req", ",", "BumpRequirement", ")", ":", "req", "=", "BumpRequirement", "(", "req", ",", "required", "=", "required", ")", "elif", "required", "is", "not", "None", ":", "req", ".", "required", "=", "required", "add", "=", "True", "if", "name", "in", "self", ".", "requirements", ":", "for", "existing_req", "in", "self", ".", "requirements", "[", "name", "]", ":", "if", "req", "==", "existing_req", ":", "add", "=", "False", "break", "# Need to replace existing as the new req will be used to bump next, and req.required could be", "# updated.", "replace", "=", "False", "# Two pins: Use highest pinned version", "if", "(", "req", ".", "specs", "and", "req", ".", "specs", "[", "0", "]", "[", "0", "]", "==", "'=='", "and", "existing_req", ".", "specs", "and", "existing_req", ".", "specs", "[", "0", "]", "[", "0", "]", "==", "'=='", ")", ":", "if", "pkg_resources", ".", "parse_version", "(", "req", ".", "specs", "[", "0", "]", "[", "1", "]", ")", "<", "pkg_resources", ".", "parse_version", "(", "existing_req", ".", "specs", "[", "0", "]", "[", "1", "]", ")", ":", "req", ".", "requirement", "=", "existing_req", ".", "requirement", "replace", "=", "True", "# Replace Any", "if", "not", "(", "req", ".", "specs", "and", "existing_req", ".", "specs", ")", ":", "if", "existing_req", ".", "specs", ":", "req", ".", "requirement", "=", "existing_req", ".", "requirement", "replace", "=", "True", "if", "replace", ":", "req", ".", "required", "|=", "existing_req", ".", "required", "if", "existing_req", ".", "required_by", "and", "not", "req", ".", "required_by", ":", "req", ".", "required_by", "=", "existing_req", ".", "required_by", "self", ".", "requirements", "[", "name", "]", ".", "remove", "(", "existing_req", ")", "break", "if", "add", ":", "self", ".", "requirements", "[", "name", "]", ".", "append", "(", "req", ")" ]
Add requirements to be managed :param list/Requirement requirements: List of :class:`BumpRequirement` or :class:`pkg_resources.Requirement` :param bool required: Set required flag for each requirement if provided.
[ "Add", "requirements", "to", "be", "managed" ]
python
valid
41.581818
spotify/luigi
luigi/configuration/core.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/configuration/core.py#L61-L87
def add_config_path(path): """Select config parser by file extension and add path into parser. """ if not os.path.isfile(path): warnings.warn("Config file does not exist: {path}".format(path=path)) return False # select parser by file extension _base, ext = os.path.splitext(path) if ext and ext[1:] in PARSERS: parser = ext[1:] else: parser = PARSER parser_class = PARSERS[parser] _check_parser(parser_class, parser) if parser != PARSER: msg = ( "Config for {added} parser added, but used {used} parser. " "Set up right parser via env var: " "export LUIGI_CONFIG_PARSER={added}" ) warnings.warn(msg.format(added=parser, used=PARSER)) # add config path to parser parser_class.add_config_path(path) return True
[ "def", "add_config_path", "(", "path", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "warnings", ".", "warn", "(", "\"Config file does not exist: {path}\"", ".", "format", "(", "path", "=", "path", ")", ")", "return", "False", "# select parser by file extension", "_base", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "path", ")", "if", "ext", "and", "ext", "[", "1", ":", "]", "in", "PARSERS", ":", "parser", "=", "ext", "[", "1", ":", "]", "else", ":", "parser", "=", "PARSER", "parser_class", "=", "PARSERS", "[", "parser", "]", "_check_parser", "(", "parser_class", ",", "parser", ")", "if", "parser", "!=", "PARSER", ":", "msg", "=", "(", "\"Config for {added} parser added, but used {used} parser. \"", "\"Set up right parser via env var: \"", "\"export LUIGI_CONFIG_PARSER={added}\"", ")", "warnings", ".", "warn", "(", "msg", ".", "format", "(", "added", "=", "parser", ",", "used", "=", "PARSER", ")", ")", "# add config path to parser", "parser_class", ".", "add_config_path", "(", "path", ")", "return", "True" ]
Select config parser by file extension and add path into parser.
[ "Select", "config", "parser", "by", "file", "extension", "and", "add", "path", "into", "parser", "." ]
python
train
30.740741
PureStorage-OpenConnect/rest-client
purestorage/purestorage.py
https://github.com/PureStorage-OpenConnect/rest-client/blob/097d5f2bc6facf607d7e4a92567b09fb8cf5cb34/purestorage/purestorage.py#L3211-L3232
def get_certificate(self, **kwargs): """Get the attributes of the current array certificate. :param \*\*kwargs: See the REST API Guide on your array for the documentation on the request: **GET cert** :type \*\*kwargs: optional :returns: A dictionary describing the configured array certificate. :rtype: ResponseDict .. note:: Requires use of REST API 1.3 or later. """ if self._rest_version >= LooseVersion("1.12"): return self._request("GET", "cert/{0}".format(kwargs.pop('name', 'management')), kwargs) else: return self._request("GET", "cert", kwargs)
[ "def", "get_certificate", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_rest_version", ">=", "LooseVersion", "(", "\"1.12\"", ")", ":", "return", "self", ".", "_request", "(", "\"GET\"", ",", "\"cert/{0}\"", ".", "format", "(", "kwargs", ".", "pop", "(", "'name'", ",", "'management'", ")", ")", ",", "kwargs", ")", "else", ":", "return", "self", ".", "_request", "(", "\"GET\"", ",", "\"cert\"", ",", "kwargs", ")" ]
Get the attributes of the current array certificate. :param \*\*kwargs: See the REST API Guide on your array for the documentation on the request: **GET cert** :type \*\*kwargs: optional :returns: A dictionary describing the configured array certificate. :rtype: ResponseDict .. note:: Requires use of REST API 1.3 or later.
[ "Get", "the", "attributes", "of", "the", "current", "array", "certificate", "." ]
python
train
32.590909
tensorflow/probability
tensorflow_probability/python/distributions/hidden_markov_model.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/hidden_markov_model.py#L926-L933
def _extract_log_probs(num_states, dist): """Tabulate log probabilities from a batch of distributions.""" states = tf.reshape(tf.range(num_states), tf.concat([[num_states], tf.ones_like(dist.batch_shape_tensor())], axis=0)) return distribution_util.move_dimension(dist.log_prob(states), 0, -1)
[ "def", "_extract_log_probs", "(", "num_states", ",", "dist", ")", ":", "states", "=", "tf", ".", "reshape", "(", "tf", ".", "range", "(", "num_states", ")", ",", "tf", ".", "concat", "(", "[", "[", "num_states", "]", ",", "tf", ".", "ones_like", "(", "dist", ".", "batch_shape_tensor", "(", ")", ")", "]", ",", "axis", "=", "0", ")", ")", "return", "distribution_util", ".", "move_dimension", "(", "dist", ".", "log_prob", "(", "states", ")", ",", "0", ",", "-", "1", ")" ]
Tabulate log probabilities from a batch of distributions.
[ "Tabulate", "log", "probabilities", "from", "a", "batch", "of", "distributions", "." ]
python
test
47.5
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L5466-L5471
def renderModelHasComponent(self, pchRenderModelName, pchComponentName): """Returns true if the render model has a component with the specified name""" fn = self.function_table.renderModelHasComponent result = fn(pchRenderModelName, pchComponentName) return result
[ "def", "renderModelHasComponent", "(", "self", ",", "pchRenderModelName", ",", "pchComponentName", ")", ":", "fn", "=", "self", ".", "function_table", ".", "renderModelHasComponent", "result", "=", "fn", "(", "pchRenderModelName", ",", "pchComponentName", ")", "return", "result" ]
Returns true if the render model has a component with the specified name
[ "Returns", "true", "if", "the", "render", "model", "has", "a", "component", "with", "the", "specified", "name" ]
python
train
48.666667
pypa/pipenv
pipenv/patched/pipfile/api.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/pipfile/api.py#L176-L181
def lock(self): """Returns a JSON representation of the Pipfile.""" data = self.data data['_meta']['hash'] = {"sha256": self.hash} data['_meta']['pipfile-spec'] = 6 return json.dumps(data, indent=4, separators=(',', ': '))
[ "def", "lock", "(", "self", ")", ":", "data", "=", "self", ".", "data", "data", "[", "'_meta'", "]", "[", "'hash'", "]", "=", "{", "\"sha256\"", ":", "self", ".", "hash", "}", "data", "[", "'_meta'", "]", "[", "'pipfile-spec'", "]", "=", "6", "return", "json", ".", "dumps", "(", "data", ",", "indent", "=", "4", ",", "separators", "=", "(", "','", ",", "': '", ")", ")" ]
Returns a JSON representation of the Pipfile.
[ "Returns", "a", "JSON", "representation", "of", "the", "Pipfile", "." ]
python
train
42.833333