repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
marazt/object-mapper
mapper/object_mapper.py
https://github.com/marazt/object-mapper/blob/b02c6d68c5bf86462aa8080aff3e93b133afd43e/mapper/object_mapper.py#L84-L108
def create_map(self, type_from, type_to, mapping=None): """Method for adding mapping definitions :param type_from: source type :param type_to: target type :param mapping: dictionary of mapping definitions in a form {'target_property_name', lambda function from rhe source} :return: None """ key_from = type_from.__name__ key_to = type_to.__name__ if mapping is None: mapping = {} if key_from in self.mappings: inner_map = self.mappings[key_from] if key_to in inner_map: raise ObjectMapperException("Mapping for {0} -> {1} already exists".format(key_from, key_to)) else: inner_map[key_to] = mapping else: self.mappings[key_from] = {} self.mappings[key_from][key_to] = mapping
[ "def", "create_map", "(", "self", ",", "type_from", ",", "type_to", ",", "mapping", "=", "None", ")", ":", "key_from", "=", "type_from", ".", "__name__", "key_to", "=", "type_to", ".", "__name__", "if", "mapping", "is", "None", ":", "mapping", "=", "{", "}", "if", "key_from", "in", "self", ".", "mappings", ":", "inner_map", "=", "self", ".", "mappings", "[", "key_from", "]", "if", "key_to", "in", "inner_map", ":", "raise", "ObjectMapperException", "(", "\"Mapping for {0} -> {1} already exists\"", ".", "format", "(", "key_from", ",", "key_to", ")", ")", "else", ":", "inner_map", "[", "key_to", "]", "=", "mapping", "else", ":", "self", ".", "mappings", "[", "key_from", "]", "=", "{", "}", "self", ".", "mappings", "[", "key_from", "]", "[", "key_to", "]", "=", "mapping" ]
Method for adding mapping definitions :param type_from: source type :param type_to: target type :param mapping: dictionary of mapping definitions in a form {'target_property_name', lambda function from rhe source} :return: None
[ "Method", "for", "adding", "mapping", "definitions", ":", "param", "type_from", ":", "source", "type", ":", "param", "type_to", ":", "target", "type", ":", "param", "mapping", ":", "dictionary", "of", "mapping", "definitions", "in", "a", "form", "{", "target_property_name", "lambda", "function", "from", "rhe", "source", "}", ":", "return", ":", "None" ]
python
valid
dpgaspar/Flask-AppBuilder
flask_appbuilder/forms.py
https://github.com/dpgaspar/Flask-AppBuilder/blob/c293734c1b86e176a3ba57ee2deab6676d125576/flask_appbuilder/forms.py#L236-L277
def create_form(self, label_columns=None, inc_columns=None, description_columns=None, validators_columns=None, extra_fields=None, filter_rel_fields=None): """ Converts a model to a form given :param label_columns: A dictionary with the column's labels. :param inc_columns: A list with the columns to include :param description_columns: A dictionary with a description for cols. :param validators_columns: A dictionary with WTForms validators ex:: validators={'personal_email':EmailValidator} :param extra_fields: A dictionary containing column names and a WTForm Form fields to be added to the form, these fields do not exist on the model itself ex:: extra_fields={'some_col':BooleanField('Some Col', default=False)} :param filter_rel_fields: A filter to be applied on relationships """ label_columns = label_columns or {} inc_columns = inc_columns or [] description_columns = description_columns or {} validators_columns = validators_columns or {} extra_fields = extra_fields or {} form_props = {} for col_name in inc_columns: if col_name in extra_fields: form_props[col_name] = extra_fields.get(col_name) else: self._convert_col(col_name, self._get_label(col_name, label_columns), self._get_description(col_name, description_columns), self._get_validators(col_name, validators_columns), filter_rel_fields, form_props) return type('DynamicForm', (DynamicForm,), form_props)
[ "def", "create_form", "(", "self", ",", "label_columns", "=", "None", ",", "inc_columns", "=", "None", ",", "description_columns", "=", "None", ",", "validators_columns", "=", "None", ",", "extra_fields", "=", "None", ",", "filter_rel_fields", "=", "None", ")", ":", "label_columns", "=", "label_columns", "or", "{", "}", "inc_columns", "=", "inc_columns", "or", "[", "]", "description_columns", "=", "description_columns", "or", "{", "}", "validators_columns", "=", "validators_columns", "or", "{", "}", "extra_fields", "=", "extra_fields", "or", "{", "}", "form_props", "=", "{", "}", "for", "col_name", "in", "inc_columns", ":", "if", "col_name", "in", "extra_fields", ":", "form_props", "[", "col_name", "]", "=", "extra_fields", ".", "get", "(", "col_name", ")", "else", ":", "self", ".", "_convert_col", "(", "col_name", ",", "self", ".", "_get_label", "(", "col_name", ",", "label_columns", ")", ",", "self", ".", "_get_description", "(", "col_name", ",", "description_columns", ")", ",", "self", ".", "_get_validators", "(", "col_name", ",", "validators_columns", ")", ",", "filter_rel_fields", ",", "form_props", ")", "return", "type", "(", "'DynamicForm'", ",", "(", "DynamicForm", ",", ")", ",", "form_props", ")" ]
Converts a model to a form given :param label_columns: A dictionary with the column's labels. :param inc_columns: A list with the columns to include :param description_columns: A dictionary with a description for cols. :param validators_columns: A dictionary with WTForms validators ex:: validators={'personal_email':EmailValidator} :param extra_fields: A dictionary containing column names and a WTForm Form fields to be added to the form, these fields do not exist on the model itself ex:: extra_fields={'some_col':BooleanField('Some Col', default=False)} :param filter_rel_fields: A filter to be applied on relationships
[ "Converts", "a", "model", "to", "a", "form", "given" ]
python
train
PmagPy/PmagPy
pmagpy/contribution_builder.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1646-L1652
def delete_row(self, ind): """ remove self.df row at ind inplace """ self.df = pd.concat([self.df[:ind], self.df[ind+1:]], sort=True) return self.df
[ "def", "delete_row", "(", "self", ",", "ind", ")", ":", "self", ".", "df", "=", "pd", ".", "concat", "(", "[", "self", ".", "df", "[", ":", "ind", "]", ",", "self", ".", "df", "[", "ind", "+", "1", ":", "]", "]", ",", "sort", "=", "True", ")", "return", "self", ".", "df" ]
remove self.df row at ind inplace
[ "remove", "self", ".", "df", "row", "at", "ind", "inplace" ]
python
train
opengridcc/opengrid
opengrid/library/regression.py
https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/library/regression.py#L459-L473
def _modeldesc_to_dict(self, md): """Return a string representation of a patsy ModelDesc object""" d = {'lhs_termlist': [md.lhs_termlist[0].factors[0].name()]} rhs_termlist = [] # add other terms, if any for term in md.rhs_termlist[:]: if len(term.factors) == 0: # intercept, represent by empty string rhs_termlist.append('') else: rhs_termlist.append(term.factors[0].name()) d['rhs_termlist'] = rhs_termlist return d
[ "def", "_modeldesc_to_dict", "(", "self", ",", "md", ")", ":", "d", "=", "{", "'lhs_termlist'", ":", "[", "md", ".", "lhs_termlist", "[", "0", "]", ".", "factors", "[", "0", "]", ".", "name", "(", ")", "]", "}", "rhs_termlist", "=", "[", "]", "# add other terms, if any", "for", "term", "in", "md", ".", "rhs_termlist", "[", ":", "]", ":", "if", "len", "(", "term", ".", "factors", ")", "==", "0", ":", "# intercept, represent by empty string", "rhs_termlist", ".", "append", "(", "''", ")", "else", ":", "rhs_termlist", ".", "append", "(", "term", ".", "factors", "[", "0", "]", ".", "name", "(", ")", ")", "d", "[", "'rhs_termlist'", "]", "=", "rhs_termlist", "return", "d" ]
Return a string representation of a patsy ModelDesc object
[ "Return", "a", "string", "representation", "of", "a", "patsy", "ModelDesc", "object" ]
python
train
apache/incubator-mxnet
example/ssd/symbol/legacy_vgg16_ssd_300.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/symbol/legacy_vgg16_ssd_300.py#L175-L207
def get_symbol(num_classes=20, nms_thresh=0.5, force_suppress=False, nms_topk=400, **kwargs): """ Single-shot multi-box detection with VGG 16 layers ConvNet This is a modified version, with fc6/fc7 layers replaced by conv layers And the network is slightly smaller than original VGG 16 network This is the detection network Parameters: ---------- num_classes: int number of object classes not including background nms_thresh : float threshold of overlap for non-maximum suppression force_suppress : boolean whether suppress different class objects nms_topk : int apply NMS to top K detections Returns: ---------- mx.Symbol """ net = get_symbol_train(num_classes) cls_preds = net.get_internals()["multibox_cls_pred_output"] loc_preds = net.get_internals()["multibox_loc_pred_output"] anchor_boxes = net.get_internals()["multibox_anchors_output"] cls_prob = mx.symbol.softmax(data=cls_preds, axis=1, name='cls_prob') out = mx.symbol.contrib.MultiBoxDetection(*[cls_prob, loc_preds, anchor_boxes], \ name="detection", nms_threshold=nms_thresh, force_suppress=force_suppress, variances=(0.1, 0.1, 0.2, 0.2), nms_topk=nms_topk) return out
[ "def", "get_symbol", "(", "num_classes", "=", "20", ",", "nms_thresh", "=", "0.5", ",", "force_suppress", "=", "False", ",", "nms_topk", "=", "400", ",", "*", "*", "kwargs", ")", ":", "net", "=", "get_symbol_train", "(", "num_classes", ")", "cls_preds", "=", "net", ".", "get_internals", "(", ")", "[", "\"multibox_cls_pred_output\"", "]", "loc_preds", "=", "net", ".", "get_internals", "(", ")", "[", "\"multibox_loc_pred_output\"", "]", "anchor_boxes", "=", "net", ".", "get_internals", "(", ")", "[", "\"multibox_anchors_output\"", "]", "cls_prob", "=", "mx", ".", "symbol", ".", "softmax", "(", "data", "=", "cls_preds", ",", "axis", "=", "1", ",", "name", "=", "'cls_prob'", ")", "out", "=", "mx", ".", "symbol", ".", "contrib", ".", "MultiBoxDetection", "(", "*", "[", "cls_prob", ",", "loc_preds", ",", "anchor_boxes", "]", ",", "name", "=", "\"detection\"", ",", "nms_threshold", "=", "nms_thresh", ",", "force_suppress", "=", "force_suppress", ",", "variances", "=", "(", "0.1", ",", "0.1", ",", "0.2", ",", "0.2", ")", ",", "nms_topk", "=", "nms_topk", ")", "return", "out" ]
Single-shot multi-box detection with VGG 16 layers ConvNet This is a modified version, with fc6/fc7 layers replaced by conv layers And the network is slightly smaller than original VGG 16 network This is the detection network Parameters: ---------- num_classes: int number of object classes not including background nms_thresh : float threshold of overlap for non-maximum suppression force_suppress : boolean whether suppress different class objects nms_topk : int apply NMS to top K detections Returns: ---------- mx.Symbol
[ "Single", "-", "shot", "multi", "-", "box", "detection", "with", "VGG", "16", "layers", "ConvNet", "This", "is", "a", "modified", "version", "with", "fc6", "/", "fc7", "layers", "replaced", "by", "conv", "layers", "And", "the", "network", "is", "slightly", "smaller", "than", "original", "VGG", "16", "network", "This", "is", "the", "detection", "network" ]
python
train
tensorflow/probability
tensorflow_probability/python/distributions/distribution.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/distribution.py#L1392-L1411
def _recursively_replace_dict_for_pretty_dict(x): """Recursively replace `dict`s with `_PrettyDict`.""" # We use "PrettyDict" because collections.OrderedDict repr/str has the word # "OrderedDict" in it. We only want to print "OrderedDict" if in fact the # input really is an OrderedDict. if isinstance(x, dict): return _PrettyDict({ k: _recursively_replace_dict_for_pretty_dict(v) for k, v in x.items()}) if (isinstance(x, collections.Sequence) and not isinstance(x, six.string_types)): args = (_recursively_replace_dict_for_pretty_dict(x_) for x_ in x) is_named_tuple = (isinstance(x, tuple) and hasattr(x, "_asdict") and hasattr(x, "_fields")) return type(x)(*args) if is_named_tuple else type(x)(args) if isinstance(x, collections.Mapping): return type(x)(**{k: _recursively_replace_dict_for_pretty_dict(v) for k, v in x.items()}) return x
[ "def", "_recursively_replace_dict_for_pretty_dict", "(", "x", ")", ":", "# We use \"PrettyDict\" because collections.OrderedDict repr/str has the word", "# \"OrderedDict\" in it. We only want to print \"OrderedDict\" if in fact the", "# input really is an OrderedDict.", "if", "isinstance", "(", "x", ",", "dict", ")", ":", "return", "_PrettyDict", "(", "{", "k", ":", "_recursively_replace_dict_for_pretty_dict", "(", "v", ")", "for", "k", ",", "v", "in", "x", ".", "items", "(", ")", "}", ")", "if", "(", "isinstance", "(", "x", ",", "collections", ".", "Sequence", ")", "and", "not", "isinstance", "(", "x", ",", "six", ".", "string_types", ")", ")", ":", "args", "=", "(", "_recursively_replace_dict_for_pretty_dict", "(", "x_", ")", "for", "x_", "in", "x", ")", "is_named_tuple", "=", "(", "isinstance", "(", "x", ",", "tuple", ")", "and", "hasattr", "(", "x", ",", "\"_asdict\"", ")", "and", "hasattr", "(", "x", ",", "\"_fields\"", ")", ")", "return", "type", "(", "x", ")", "(", "*", "args", ")", "if", "is_named_tuple", "else", "type", "(", "x", ")", "(", "args", ")", "if", "isinstance", "(", "x", ",", "collections", ".", "Mapping", ")", ":", "return", "type", "(", "x", ")", "(", "*", "*", "{", "k", ":", "_recursively_replace_dict_for_pretty_dict", "(", "v", ")", "for", "k", ",", "v", "in", "x", ".", "items", "(", ")", "}", ")", "return", "x" ]
Recursively replace `dict`s with `_PrettyDict`.
[ "Recursively", "replace", "dict", "s", "with", "_PrettyDict", "." ]
python
test
bcbio/bcbio-nextgen
bcbio/variation/mutect2.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/mutect2.py#L70-L76
def _prep_inputs(align_bams, ref_file, items): """Ensure inputs to calling are indexed as expected. """ broad_runner = broad.runner_from_path("picard", items[0]["config"]) broad_runner.run_fn("picard_index_ref", ref_file) for x in align_bams: bam.index(x, items[0]["config"])
[ "def", "_prep_inputs", "(", "align_bams", ",", "ref_file", ",", "items", ")", ":", "broad_runner", "=", "broad", ".", "runner_from_path", "(", "\"picard\"", ",", "items", "[", "0", "]", "[", "\"config\"", "]", ")", "broad_runner", ".", "run_fn", "(", "\"picard_index_ref\"", ",", "ref_file", ")", "for", "x", "in", "align_bams", ":", "bam", ".", "index", "(", "x", ",", "items", "[", "0", "]", "[", "\"config\"", "]", ")" ]
Ensure inputs to calling are indexed as expected.
[ "Ensure", "inputs", "to", "calling", "are", "indexed", "as", "expected", "." ]
python
train
bioidiap/gridtk
gridtk/tools.py
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/tools.py#L316-L336
def qdel(jobid, context='grid'): """Halts a given job. Keyword parameters: jobid The job identifier as returned by qsub() context The setshell context in which we should try a 'qsub'. Normally you don't need to change the default. This variable can also be set to a context dictionary in which case we just setup using that context instead of probing for a new one, what can be fast. """ scmd = ['qdel', '%d' % jobid] logger.debug("Qdel command '%s'", ' '.join(scmd)) from .setshell import sexec sexec(context, scmd, error_on_nonzero=False)
[ "def", "qdel", "(", "jobid", ",", "context", "=", "'grid'", ")", ":", "scmd", "=", "[", "'qdel'", ",", "'%d'", "%", "jobid", "]", "logger", ".", "debug", "(", "\"Qdel command '%s'\"", ",", "' '", ".", "join", "(", "scmd", ")", ")", "from", ".", "setshell", "import", "sexec", "sexec", "(", "context", ",", "scmd", ",", "error_on_nonzero", "=", "False", ")" ]
Halts a given job. Keyword parameters: jobid The job identifier as returned by qsub() context The setshell context in which we should try a 'qsub'. Normally you don't need to change the default. This variable can also be set to a context dictionary in which case we just setup using that context instead of probing for a new one, what can be fast.
[ "Halts", "a", "given", "job", "." ]
python
train
contains-io/rcli
rcli/dispatcher.py
https://github.com/contains-io/rcli/blob/cdd6191a0e0a19bc767f84921650835d099349cf/rcli/dispatcher.py#L81-L104
def _run_command(argv): # type: (typing.List[str]) -> typing.Any """Run the command with the given CLI options and exit. Command functions are expected to have a __doc__ string that is parseable by docopt. Args: argv: The list of command line arguments supplied for a command. The first argument is expected to be the name of the command to be run. Note that this is different than the full arguments parsed by docopt for the entire program. Raises: ValueError: Raised if the user attempted to run an invalid command. """ command_name, argv = _get_command_and_argv(argv) _LOGGER.info('Running command "%s %s" with args: %s', settings.command, command_name, argv) subcommand = _get_subcommand(command_name) func = call.get_callable(subcommand) doc = usage.format_usage(subcommand.__doc__) args = _get_parsed_args(command_name, doc, argv) return call.call(func, args) or 0
[ "def", "_run_command", "(", "argv", ")", ":", "# type: (typing.List[str]) -> typing.Any", "command_name", ",", "argv", "=", "_get_command_and_argv", "(", "argv", ")", "_LOGGER", ".", "info", "(", "'Running command \"%s %s\" with args: %s'", ",", "settings", ".", "command", ",", "command_name", ",", "argv", ")", "subcommand", "=", "_get_subcommand", "(", "command_name", ")", "func", "=", "call", ".", "get_callable", "(", "subcommand", ")", "doc", "=", "usage", ".", "format_usage", "(", "subcommand", ".", "__doc__", ")", "args", "=", "_get_parsed_args", "(", "command_name", ",", "doc", ",", "argv", ")", "return", "call", ".", "call", "(", "func", ",", "args", ")", "or", "0" ]
Run the command with the given CLI options and exit. Command functions are expected to have a __doc__ string that is parseable by docopt. Args: argv: The list of command line arguments supplied for a command. The first argument is expected to be the name of the command to be run. Note that this is different than the full arguments parsed by docopt for the entire program. Raises: ValueError: Raised if the user attempted to run an invalid command.
[ "Run", "the", "command", "with", "the", "given", "CLI", "options", "and", "exit", "." ]
python
train
twisted/twistedchecker
twistedchecker/checkers/pycodestyleformat.py
https://github.com/twisted/twistedchecker/blob/80060e1c07cf5d67d747dbec8ec0e5ee913e8929/twistedchecker/checkers/pycodestyleformat.py#L218-L306
def modifiedBlankLines(logical_line, blank_lines, indent_level, line_number, blank_before, previous_logical, previous_indent_level): """ This function is copied from a modified pycodestyle checker for Twisted. See https://github.com/cyli/TwistySublime/blob/master/twisted_pycodestyle.py Twisted Coding Standard: Separate top-level function and class definitions with three blank lines. Method definitions inside a class are separated by two blank lines. Extra blank lines may be used (sparingly) to separate groups of related functions. Blank lines may be omitted between a bunch of related one-liners (e.g. a set of dummy implementations). Use blank lines in functions, sparingly, to indicate logical sections. Okay: def a():\n pass\n\n\n\ndef b():\n pass Okay: class A():\n pass\n\n\n\nclass B():\n pass Okay: def a():\n pass\n\n\n# Foo\n# Bar\n\ndef b():\n pass E301: class Foo:\n b = 0\n def bar():\n pass E302: def a():\n pass\n\ndef b(n):\n pass E303: def a():\n pass\n\n\n\ndef b(n):\n pass E303: def a():\n\n\n\n pass E304: @decorator\n\ndef a():\n pass E305: "comment"\n\n\ndef a():\n pass E306: variable="value"\ndef a(): pass @param logical_line: Supplied by PyCodeStyle. The content of the line it is dealing with. @param blank_lines: Supplied by PyCodeStyle. @param indent_level: Supplied by PyCodeStyle. The current indent level. @param line_number: Supplied by PyCodeStyle. The current line number. @param blank_before: Supplied by PyCodeStyle. The number of blank lines before this one. @param previous_logical: Supplied by PyCodeStyle. The previous logical line. @param previous_indent_level: Supplied by PyCodeStyle. The indent level of the previous line. """ def isClassDefDecorator(thing): return (thing.startswith('def ') or thing.startswith('class ') or thing.startswith('@')) # Don't expect blank lines before the first line if line_number == 1: return previous_is_comment = pycodestyle.DOCSTRING_REGEX.match(previous_logical) # Check blank lines after a decorator, if previous_logical.startswith('@'): if blank_before: yield 0, "E304 blank lines found after function decorator" if isClassDefDecorator(logical_line): if indent_level: # There should only be 1 line or less between docstrings and # the next function if previous_is_comment: # Previous is a comment so it has one extra indentation. at_same_indent = previous_indent_level - 4 == indent_level if ( at_same_indent and logical_line.startswith('def ') and blank_before == 2 ): # This look like a previous method with a docstring # and empty body. return if blank_before > 1: yield 0, ( "E305 too many blank lines after docstring " "(%d)" % (blank_before,)) # Between first level functions, there should be 2 blank lines. # any further indended functions can have one or zero lines else: if not (blank_before == 2 or indent_level > 4 or previous_indent_level <= indent_level): yield 0, ("E301 expected 2 blank lines, " "found %d" % (blank_before,)) # Top level, there should be 3 blank lines between class/function # definitions (but not necessarily after variable declarations) elif previous_indent_level and blank_before != 3: yield 0, ("E302 expected 3 blank lines, " "found %d" % (blank_before,)) elif blank_before > 1 and indent_level: yield 0, "E303 too many blank lines (%d)" % (blank_before,)
[ "def", "modifiedBlankLines", "(", "logical_line", ",", "blank_lines", ",", "indent_level", ",", "line_number", ",", "blank_before", ",", "previous_logical", ",", "previous_indent_level", ")", ":", "def", "isClassDefDecorator", "(", "thing", ")", ":", "return", "(", "thing", ".", "startswith", "(", "'def '", ")", "or", "thing", ".", "startswith", "(", "'class '", ")", "or", "thing", ".", "startswith", "(", "'@'", ")", ")", "# Don't expect blank lines before the first line", "if", "line_number", "==", "1", ":", "return", "previous_is_comment", "=", "pycodestyle", ".", "DOCSTRING_REGEX", ".", "match", "(", "previous_logical", ")", "# Check blank lines after a decorator,", "if", "previous_logical", ".", "startswith", "(", "'@'", ")", ":", "if", "blank_before", ":", "yield", "0", ",", "\"E304 blank lines found after function decorator\"", "if", "isClassDefDecorator", "(", "logical_line", ")", ":", "if", "indent_level", ":", "# There should only be 1 line or less between docstrings and", "# the next function", "if", "previous_is_comment", ":", "# Previous is a comment so it has one extra indentation.", "at_same_indent", "=", "previous_indent_level", "-", "4", "==", "indent_level", "if", "(", "at_same_indent", "and", "logical_line", ".", "startswith", "(", "'def '", ")", "and", "blank_before", "==", "2", ")", ":", "# This look like a previous method with a docstring", "# and empty body.", "return", "if", "blank_before", ">", "1", ":", "yield", "0", ",", "(", "\"E305 too many blank lines after docstring \"", "\"(%d)\"", "%", "(", "blank_before", ",", ")", ")", "# Between first level functions, there should be 2 blank lines.", "# any further indended functions can have one or zero lines", "else", ":", "if", "not", "(", "blank_before", "==", "2", "or", "indent_level", ">", "4", "or", "previous_indent_level", "<=", "indent_level", ")", ":", "yield", "0", ",", "(", "\"E301 expected 2 blank lines, \"", "\"found %d\"", "%", "(", "blank_before", ",", ")", ")", "# Top level, there should be 3 blank lines between class/function", "# definitions (but not necessarily after variable declarations)", "elif", "previous_indent_level", "and", "blank_before", "!=", "3", ":", "yield", "0", ",", "(", "\"E302 expected 3 blank lines, \"", "\"found %d\"", "%", "(", "blank_before", ",", ")", ")", "elif", "blank_before", ">", "1", "and", "indent_level", ":", "yield", "0", ",", "\"E303 too many blank lines (%d)\"", "%", "(", "blank_before", ",", ")" ]
This function is copied from a modified pycodestyle checker for Twisted. See https://github.com/cyli/TwistySublime/blob/master/twisted_pycodestyle.py Twisted Coding Standard: Separate top-level function and class definitions with three blank lines. Method definitions inside a class are separated by two blank lines. Extra blank lines may be used (sparingly) to separate groups of related functions. Blank lines may be omitted between a bunch of related one-liners (e.g. a set of dummy implementations). Use blank lines in functions, sparingly, to indicate logical sections. Okay: def a():\n pass\n\n\n\ndef b():\n pass Okay: class A():\n pass\n\n\n\nclass B():\n pass Okay: def a():\n pass\n\n\n# Foo\n# Bar\n\ndef b():\n pass E301: class Foo:\n b = 0\n def bar():\n pass E302: def a():\n pass\n\ndef b(n):\n pass E303: def a():\n pass\n\n\n\ndef b(n):\n pass E303: def a():\n\n\n\n pass E304: @decorator\n\ndef a():\n pass E305: "comment"\n\n\ndef a():\n pass E306: variable="value"\ndef a(): pass @param logical_line: Supplied by PyCodeStyle. The content of the line it is dealing with. @param blank_lines: Supplied by PyCodeStyle. @param indent_level: Supplied by PyCodeStyle. The current indent level. @param line_number: Supplied by PyCodeStyle. The current line number. @param blank_before: Supplied by PyCodeStyle. The number of blank lines before this one. @param previous_logical: Supplied by PyCodeStyle. The previous logical line. @param previous_indent_level: Supplied by PyCodeStyle. The indent level of the previous line.
[ "This", "function", "is", "copied", "from", "a", "modified", "pycodestyle", "checker", "for", "Twisted", ".", "See", "https", ":", "//", "github", ".", "com", "/", "cyli", "/", "TwistySublime", "/", "blob", "/", "master", "/", "twisted_pycodestyle", ".", "py", "Twisted", "Coding", "Standard", ":" ]
python
train
ynop/audiomate
audiomate/processing/pipeline/base.py
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/processing/pipeline/base.py#L372-L387
def _create_buffers(self): """ Create a buffer for every step in the pipeline. """ self.buffers = {} for step in self.graph.nodes(): num_buffers = 1 if isinstance(step, Reduction): num_buffers = len(step.parents) self.buffers[step] = Buffer(step.min_frames, step.left_context, step.right_context, num_buffers) return self.buffers
[ "def", "_create_buffers", "(", "self", ")", ":", "self", ".", "buffers", "=", "{", "}", "for", "step", "in", "self", ".", "graph", ".", "nodes", "(", ")", ":", "num_buffers", "=", "1", "if", "isinstance", "(", "step", ",", "Reduction", ")", ":", "num_buffers", "=", "len", "(", "step", ".", "parents", ")", "self", ".", "buffers", "[", "step", "]", "=", "Buffer", "(", "step", ".", "min_frames", ",", "step", ".", "left_context", ",", "step", ".", "right_context", ",", "num_buffers", ")", "return", "self", ".", "buffers" ]
Create a buffer for every step in the pipeline.
[ "Create", "a", "buffer", "for", "every", "step", "in", "the", "pipeline", "." ]
python
train
biolink/ontobio
ontobio/neo/scigraph_ontology.py
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/neo/scigraph_ontology.py#L89-L96
def _neighbors_graph(self, **params) -> Dict: """ Get neighbors of a node parameters are directly passed through to SciGraph: e.g. depth, relationshipType """ response = self._get_response("graph/neighbors", format="json", **params) return response.json()
[ "def", "_neighbors_graph", "(", "self", ",", "*", "*", "params", ")", "->", "Dict", ":", "response", "=", "self", ".", "_get_response", "(", "\"graph/neighbors\"", ",", "format", "=", "\"json\"", ",", "*", "*", "params", ")", "return", "response", ".", "json", "(", ")" ]
Get neighbors of a node parameters are directly passed through to SciGraph: e.g. depth, relationshipType
[ "Get", "neighbors", "of", "a", "node" ]
python
train
oceanprotocol/squid-py
squid_py/agreements/register_service_agreement.py
https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/agreements/register_service_agreement.py#L174-L215
def execute_pending_service_agreements(storage_path, account, actor_type, did_resolver_fn): """ Iterates over pending service agreements recorded in the local storage, fetches their service definitions, and subscribes to service agreement events. :param storage_path: storage path for the internal db, str :param account: :param actor_type: :param did_resolver_fn: :return: """ keeper = Keeper.get_instance() # service_agreement_id, did, service_definition_id, price, files, start_time, status for (agreement_id, did, _, price, files, start_time, _) in get_service_agreements(storage_path): ddo = did_resolver_fn(did) for service in ddo.services: if service.type != 'Access': continue consumer_provider_tuple = keeper.escrow_access_secretstore_template.get_agreement_data( agreement_id) if not consumer_provider_tuple: continue consumer, provider = consumer_provider_tuple did = ddo.did service_agreement = ServiceAgreement.from_service_dict(service.as_dictionary()) condition_ids = service_agreement.generate_agreement_condition_ids( agreement_id, did, consumer, provider, keeper) if actor_type == 'consumer': assert account.address == consumer process_agreement_events_consumer( provider, agreement_id, did, service_agreement, price, account, condition_ids, None) else: assert account.address == provider process_agreement_events_publisher( account, agreement_id, did, service_agreement, price, consumer, condition_ids)
[ "def", "execute_pending_service_agreements", "(", "storage_path", ",", "account", ",", "actor_type", ",", "did_resolver_fn", ")", ":", "keeper", "=", "Keeper", ".", "get_instance", "(", ")", "# service_agreement_id, did, service_definition_id, price, files, start_time, status", "for", "(", "agreement_id", ",", "did", ",", "_", ",", "price", ",", "files", ",", "start_time", ",", "_", ")", "in", "get_service_agreements", "(", "storage_path", ")", ":", "ddo", "=", "did_resolver_fn", "(", "did", ")", "for", "service", "in", "ddo", ".", "services", ":", "if", "service", ".", "type", "!=", "'Access'", ":", "continue", "consumer_provider_tuple", "=", "keeper", ".", "escrow_access_secretstore_template", ".", "get_agreement_data", "(", "agreement_id", ")", "if", "not", "consumer_provider_tuple", ":", "continue", "consumer", ",", "provider", "=", "consumer_provider_tuple", "did", "=", "ddo", ".", "did", "service_agreement", "=", "ServiceAgreement", ".", "from_service_dict", "(", "service", ".", "as_dictionary", "(", ")", ")", "condition_ids", "=", "service_agreement", ".", "generate_agreement_condition_ids", "(", "agreement_id", ",", "did", ",", "consumer", ",", "provider", ",", "keeper", ")", "if", "actor_type", "==", "'consumer'", ":", "assert", "account", ".", "address", "==", "consumer", "process_agreement_events_consumer", "(", "provider", ",", "agreement_id", ",", "did", ",", "service_agreement", ",", "price", ",", "account", ",", "condition_ids", ",", "None", ")", "else", ":", "assert", "account", ".", "address", "==", "provider", "process_agreement_events_publisher", "(", "account", ",", "agreement_id", ",", "did", ",", "service_agreement", ",", "price", ",", "consumer", ",", "condition_ids", ")" ]
Iterates over pending service agreements recorded in the local storage, fetches their service definitions, and subscribes to service agreement events. :param storage_path: storage path for the internal db, str :param account: :param actor_type: :param did_resolver_fn: :return:
[ "Iterates", "over", "pending", "service", "agreements", "recorded", "in", "the", "local", "storage", "fetches", "their", "service", "definitions", "and", "subscribes", "to", "service", "agreement", "events", "." ]
python
train
obulpathi/cdn-fastly-python
fastly/__init__.py
https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L715-L718
def list_domains_by_service(self, service_id): """List the domains within a service.""" content = self._fetch("/service/%s/domain" % service_id, method="GET") return map(lambda x: FastlyDomain(self, x), content)
[ "def", "list_domains_by_service", "(", "self", ",", "service_id", ")", ":", "content", "=", "self", ".", "_fetch", "(", "\"/service/%s/domain\"", "%", "service_id", ",", "method", "=", "\"GET\"", ")", "return", "map", "(", "lambda", "x", ":", "FastlyDomain", "(", "self", ",", "x", ")", ",", "content", ")" ]
List the domains within a service.
[ "List", "the", "domains", "within", "a", "service", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_netconf_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_netconf_ext.py#L53-L65
def get_netconf_client_capabilities_output_session_vendor(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_netconf_client_capabilities = ET.Element("get_netconf_client_capabilities") config = get_netconf_client_capabilities output = ET.SubElement(get_netconf_client_capabilities, "output") session = ET.SubElement(output, "session") vendor = ET.SubElement(session, "vendor") vendor.text = kwargs.pop('vendor') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_netconf_client_capabilities_output_session_vendor", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_netconf_client_capabilities", "=", "ET", ".", "Element", "(", "\"get_netconf_client_capabilities\"", ")", "config", "=", "get_netconf_client_capabilities", "output", "=", "ET", ".", "SubElement", "(", "get_netconf_client_capabilities", ",", "\"output\"", ")", "session", "=", "ET", ".", "SubElement", "(", "output", ",", "\"session\"", ")", "vendor", "=", "ET", ".", "SubElement", "(", "session", ",", "\"vendor\"", ")", "vendor", ".", "text", "=", "kwargs", ".", "pop", "(", "'vendor'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
allianceauth/allianceauth
allianceauth/eveonline/autogroups/models.py
https://github.com/allianceauth/allianceauth/blob/6585b07e96571a99a4d6dc03cc03f9b8c8f690ca/allianceauth/eveonline/autogroups/models.py#L32-L47
def update_groups_for_user(self, user: User, state: State = None): """ Update the Group memberships for the given users state :param user: User to update for :param state: State to update user for :return: """ if state is None: state = user.profile.state for config in self.filter(states=state): # grant user new groups for their state config.update_group_membership_for_user(user) for config in self.exclude(states=state): # ensure user does not have groups from previous state config.remove_user_from_alliance_groups(user) config.remove_user_from_corp_groups(user)
[ "def", "update_groups_for_user", "(", "self", ",", "user", ":", "User", ",", "state", ":", "State", "=", "None", ")", ":", "if", "state", "is", "None", ":", "state", "=", "user", ".", "profile", ".", "state", "for", "config", "in", "self", ".", "filter", "(", "states", "=", "state", ")", ":", "# grant user new groups for their state", "config", ".", "update_group_membership_for_user", "(", "user", ")", "for", "config", "in", "self", ".", "exclude", "(", "states", "=", "state", ")", ":", "# ensure user does not have groups from previous state", "config", ".", "remove_user_from_alliance_groups", "(", "user", ")", "config", ".", "remove_user_from_corp_groups", "(", "user", ")" ]
Update the Group memberships for the given users state :param user: User to update for :param state: State to update user for :return:
[ "Update", "the", "Group", "memberships", "for", "the", "given", "users", "state", ":", "param", "user", ":", "User", "to", "update", "for", ":", "param", "state", ":", "State", "to", "update", "user", "for", ":", "return", ":" ]
python
train
pycontribs/pyrax
pyrax/cloudnetworks.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/cloudnetworks.py#L149-L168
def create(self, label=None, name=None, cidr=None): """ Wraps the basic create() call to handle specific failures. """ try: return super(CloudNetworkClient, self).create(label=label, name=name, cidr=cidr) except exc.BadRequest as e: msg = e.message if "too many networks" in msg: raise exc.NetworkCountExceeded("Cannot create network; the " "maximum number of isolated networks already exist.") elif "does not contain enough" in msg: raise exc.NetworkCIDRInvalid("Networks must contain two or " "more hosts; the CIDR '%s' is too restrictive." % cidr) elif "CIDR is malformed" in msg: raise exc.NetworkCIDRMalformed("The CIDR '%s' is not valid." % cidr) else: # Something unexpected raise
[ "def", "create", "(", "self", ",", "label", "=", "None", ",", "name", "=", "None", ",", "cidr", "=", "None", ")", ":", "try", ":", "return", "super", "(", "CloudNetworkClient", ",", "self", ")", ".", "create", "(", "label", "=", "label", ",", "name", "=", "name", ",", "cidr", "=", "cidr", ")", "except", "exc", ".", "BadRequest", "as", "e", ":", "msg", "=", "e", ".", "message", "if", "\"too many networks\"", "in", "msg", ":", "raise", "exc", ".", "NetworkCountExceeded", "(", "\"Cannot create network; the \"", "\"maximum number of isolated networks already exist.\"", ")", "elif", "\"does not contain enough\"", "in", "msg", ":", "raise", "exc", ".", "NetworkCIDRInvalid", "(", "\"Networks must contain two or \"", "\"more hosts; the CIDR '%s' is too restrictive.\"", "%", "cidr", ")", "elif", "\"CIDR is malformed\"", "in", "msg", ":", "raise", "exc", ".", "NetworkCIDRMalformed", "(", "\"The CIDR '%s' is not valid.\"", "%", "cidr", ")", "else", ":", "# Something unexpected", "raise" ]
Wraps the basic create() call to handle specific failures.
[ "Wraps", "the", "basic", "create", "()", "call", "to", "handle", "specific", "failures", "." ]
python
train
pycampers/ampy
ampy/files.py
https://github.com/pycampers/ampy/blob/6851f8b177c334f5ff7bd43bf07307a437433ba2/ampy/files.py#L83-L174
def ls(self, directory="/", long_format=True, recursive=False): """List the contents of the specified directory (or root if none is specified). Returns a list of strings with the names of files in the specified directory. If long_format is True then a list of 2-tuples with the name and size (in bytes) of the item is returned. Note that it appears the size of directories is not supported by MicroPython and will always return 0 (i.e. no recursive size computation). """ # Disabling for now, see https://github.com/adafruit/ampy/issues/55. # # Make sure directory ends in a slash. # if not directory.endswith("/"): # directory += "/" # Make sure directory starts with slash, for consistency. if not directory.startswith("/"): directory = "/" + directory command = """\ try: import os except ImportError: import uos as os\n""" if recursive: command += """\ def listdir(directory): result = set() def _listdir(dir_or_file): try: # if its a directory, then it should provide some children. children = os.listdir(dir_or_file) except OSError: # probably a file. run stat() to confirm. os.stat(dir_or_file) result.add(dir_or_file) else: # probably a directory, add to result if empty. if children: # queue the children to be dealt with in next iteration. for child in children: # create the full path. if dir_or_file == '/': next = dir_or_file + child else: next = dir_or_file + '/' + child _listdir(next) else: result.add(dir_or_file) _listdir(directory) return sorted(result)\n""" else: command += """\ def listdir(directory): if directory == '/': return sorted([directory + f for f in os.listdir(directory)]) else: return sorted([directory + '/' + f for f in os.listdir(directory)])\n""" # Execute os.listdir() command on the board. if long_format: command += """ r = [] for f in listdir('{0}'): size = os.stat(f)[6] r.append('{{0}} - {{1}} bytes'.format(f, size)) print(r) """.format( directory ) else: command += """ print(listdir('{0}')) """.format( directory ) self._pyboard.enter_raw_repl() try: out = self._pyboard.exec_(textwrap.dedent(command)) except PyboardError as ex: # Check if this is an OSError #2, i.e. directory doesn't exist and # rethrow it as something more descriptive. if ex.args[2].decode("utf-8").find("OSError: [Errno 2] ENOENT") != -1: raise RuntimeError("No such directory: {0}".format(directory)) else: raise ex self._pyboard.exit_raw_repl() # Parse the result list and return it. return ast.literal_eval(out.decode("utf-8"))
[ "def", "ls", "(", "self", ",", "directory", "=", "\"/\"", ",", "long_format", "=", "True", ",", "recursive", "=", "False", ")", ":", "# Disabling for now, see https://github.com/adafruit/ampy/issues/55.", "# # Make sure directory ends in a slash.", "# if not directory.endswith(\"/\"):", "# directory += \"/\"", "# Make sure directory starts with slash, for consistency.", "if", "not", "directory", ".", "startswith", "(", "\"/\"", ")", ":", "directory", "=", "\"/\"", "+", "directory", "command", "=", "\"\"\"\\\n try: \n import os\n except ImportError:\n import uos as os\\n\"\"\"", "if", "recursive", ":", "command", "+=", "\"\"\"\\\n def listdir(directory):\n result = set()\n\n def _listdir(dir_or_file):\n try:\n # if its a directory, then it should provide some children.\n children = os.listdir(dir_or_file)\n except OSError: \n # probably a file. run stat() to confirm.\n os.stat(dir_or_file)\n result.add(dir_or_file) \n else:\n # probably a directory, add to result if empty.\n if children:\n # queue the children to be dealt with in next iteration.\n for child in children:\n # create the full path.\n if dir_or_file == '/':\n next = dir_or_file + child\n else:\n next = dir_or_file + '/' + child\n \n _listdir(next)\n else:\n result.add(dir_or_file) \n\n _listdir(directory)\n return sorted(result)\\n\"\"\"", "else", ":", "command", "+=", "\"\"\"\\\n def listdir(directory):\n if directory == '/': \n return sorted([directory + f for f in os.listdir(directory)])\n else:\n return sorted([directory + '/' + f for f in os.listdir(directory)])\\n\"\"\"", "# Execute os.listdir() command on the board.", "if", "long_format", ":", "command", "+=", "\"\"\"\n r = []\n for f in listdir('{0}'):\n size = os.stat(f)[6] \n r.append('{{0}} - {{1}} bytes'.format(f, size))\n print(r)\n \"\"\"", ".", "format", "(", "directory", ")", "else", ":", "command", "+=", "\"\"\"\n print(listdir('{0}'))\n \"\"\"", ".", "format", "(", "directory", ")", "self", ".", "_pyboard", ".", "enter_raw_repl", "(", ")", "try", ":", "out", "=", "self", ".", "_pyboard", ".", "exec_", "(", "textwrap", ".", "dedent", "(", "command", ")", ")", "except", "PyboardError", "as", "ex", ":", "# Check if this is an OSError #2, i.e. directory doesn't exist and", "# rethrow it as something more descriptive.", "if", "ex", ".", "args", "[", "2", "]", ".", "decode", "(", "\"utf-8\"", ")", ".", "find", "(", "\"OSError: [Errno 2] ENOENT\"", ")", "!=", "-", "1", ":", "raise", "RuntimeError", "(", "\"No such directory: {0}\"", ".", "format", "(", "directory", ")", ")", "else", ":", "raise", "ex", "self", ".", "_pyboard", ".", "exit_raw_repl", "(", ")", "# Parse the result list and return it.", "return", "ast", ".", "literal_eval", "(", "out", ".", "decode", "(", "\"utf-8\"", ")", ")" ]
List the contents of the specified directory (or root if none is specified). Returns a list of strings with the names of files in the specified directory. If long_format is True then a list of 2-tuples with the name and size (in bytes) of the item is returned. Note that it appears the size of directories is not supported by MicroPython and will always return 0 (i.e. no recursive size computation).
[ "List", "the", "contents", "of", "the", "specified", "directory", "(", "or", "root", "if", "none", "is", "specified", ")", ".", "Returns", "a", "list", "of", "strings", "with", "the", "names", "of", "files", "in", "the", "specified", "directory", ".", "If", "long_format", "is", "True", "then", "a", "list", "of", "2", "-", "tuples", "with", "the", "name", "and", "size", "(", "in", "bytes", ")", "of", "the", "item", "is", "returned", ".", "Note", "that", "it", "appears", "the", "size", "of", "directories", "is", "not", "supported", "by", "MicroPython", "and", "will", "always", "return", "0", "(", "i", ".", "e", ".", "no", "recursive", "size", "computation", ")", "." ]
python
train
usc-isi-i2/etk
etk/knowledge_graph_schema.py
https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/knowledge_graph_schema.py#L80-L103
def iso_date(d) -> str: """ Return iso format of a date Args: d: Returns: str """ if isinstance(d, datetime): return d.isoformat() elif isinstance(d, date): return datetime.combine(d, datetime.min.time()).isoformat() else: try: datetime.strptime(d, '%Y-%m-%dT%H:%M:%S') return d except ValueError: try: datetime.strptime(d, '%Y-%m-%d') return d + "T00:00:00" except ValueError: pass raise ISODateError("Can not convert value to ISO format for kg")
[ "def", "iso_date", "(", "d", ")", "->", "str", ":", "if", "isinstance", "(", "d", ",", "datetime", ")", ":", "return", "d", ".", "isoformat", "(", ")", "elif", "isinstance", "(", "d", ",", "date", ")", ":", "return", "datetime", ".", "combine", "(", "d", ",", "datetime", ".", "min", ".", "time", "(", ")", ")", ".", "isoformat", "(", ")", "else", ":", "try", ":", "datetime", ".", "strptime", "(", "d", ",", "'%Y-%m-%dT%H:%M:%S'", ")", "return", "d", "except", "ValueError", ":", "try", ":", "datetime", ".", "strptime", "(", "d", ",", "'%Y-%m-%d'", ")", "return", "d", "+", "\"T00:00:00\"", "except", "ValueError", ":", "pass", "raise", "ISODateError", "(", "\"Can not convert value to ISO format for kg\"", ")" ]
Return iso format of a date Args: d: Returns: str
[ "Return", "iso", "format", "of", "a", "date" ]
python
train
gem/oq-engine
openquake/hazardlib/geo/point.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/geo/point.py#L160-L186
def distance_to_mesh(self, mesh, with_depths=True): """ Compute distance (in km) between this point and each point of ``mesh``. :param mesh: :class:`~openquake.hazardlib.geo.mesh.Mesh` of points to calculate distance to. :param with_depths: If ``True`` (by default), distance is calculated between actual point and the mesh, geodetic distance of projections is combined with vertical distance (difference of depths). If this is set to ``False``, only geodetic distance between projections is calculated. :returns: Numpy array of floats of the same shape as ``mesh`` with distance values in km in respective indices. """ if with_depths: if mesh.depths is None: mesh_depths = numpy.zeros_like(mesh.lons) else: mesh_depths = mesh.depths return geodetic.distance(self.longitude, self.latitude, self.depth, mesh.lons, mesh.lats, mesh_depths) else: return geodetic.geodetic_distance(self.longitude, self.latitude, mesh.lons, mesh.lats)
[ "def", "distance_to_mesh", "(", "self", ",", "mesh", ",", "with_depths", "=", "True", ")", ":", "if", "with_depths", ":", "if", "mesh", ".", "depths", "is", "None", ":", "mesh_depths", "=", "numpy", ".", "zeros_like", "(", "mesh", ".", "lons", ")", "else", ":", "mesh_depths", "=", "mesh", ".", "depths", "return", "geodetic", ".", "distance", "(", "self", ".", "longitude", ",", "self", ".", "latitude", ",", "self", ".", "depth", ",", "mesh", ".", "lons", ",", "mesh", ".", "lats", ",", "mesh_depths", ")", "else", ":", "return", "geodetic", ".", "geodetic_distance", "(", "self", ".", "longitude", ",", "self", ".", "latitude", ",", "mesh", ".", "lons", ",", "mesh", ".", "lats", ")" ]
Compute distance (in km) between this point and each point of ``mesh``. :param mesh: :class:`~openquake.hazardlib.geo.mesh.Mesh` of points to calculate distance to. :param with_depths: If ``True`` (by default), distance is calculated between actual point and the mesh, geodetic distance of projections is combined with vertical distance (difference of depths). If this is set to ``False``, only geodetic distance between projections is calculated. :returns: Numpy array of floats of the same shape as ``mesh`` with distance values in km in respective indices.
[ "Compute", "distance", "(", "in", "km", ")", "between", "this", "point", "and", "each", "point", "of", "mesh", "." ]
python
train
wavefrontHQ/python-client
wavefront_api_client/api/alert_api.py
https://github.com/wavefrontHQ/python-client/blob/b0f1046a8f68c2c7d69e395f7167241f224c738a/wavefront_api_client/api/alert_api.py#L911-L931
def hide_alert(self, id, **kwargs): # noqa: E501 """Hide a specific integration alert # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.hide_alert(id, async_req=True) >>> result = thread.get() :param async_req bool :param int id: (required) :return: ResponseContainerAlert If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.hide_alert_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.hide_alert_with_http_info(id, **kwargs) # noqa: E501 return data
[ "def", "hide_alert", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "hide_alert_with_http_info", "(", "id", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "hide_alert_with_http_info", "(", "id", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
Hide a specific integration alert # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.hide_alert(id, async_req=True) >>> result = thread.get() :param async_req bool :param int id: (required) :return: ResponseContainerAlert If the method is called asynchronously, returns the request thread.
[ "Hide", "a", "specific", "integration", "alert", "#", "noqa", ":", "E501" ]
python
train
cox-labs/perseuspy
perseuspy/parameters.py
https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/parameters.py#L42-L49
def boolParam(parameters, name): """ boolean parameter value. :param parameters: the parameters tree. :param name: the name of the parameter. """ value = _simple_string_value(parameters, 'BoolParam', name) if value not in {'true', 'false'}: raise ValueError('BoolParam Value has to be either "true" or "false", was {}.'.format(value)) return value == 'true'
[ "def", "boolParam", "(", "parameters", ",", "name", ")", ":", "value", "=", "_simple_string_value", "(", "parameters", ",", "'BoolParam'", ",", "name", ")", "if", "value", "not", "in", "{", "'true'", ",", "'false'", "}", ":", "raise", "ValueError", "(", "'BoolParam Value has to be either \"true\" or \"false\", was {}.'", ".", "format", "(", "value", ")", ")", "return", "value", "==", "'true'" ]
boolean parameter value. :param parameters: the parameters tree. :param name: the name of the parameter.
[ "boolean", "parameter", "value", ".", ":", "param", "parameters", ":", "the", "parameters", "tree", ".", ":", "param", "name", ":", "the", "name", "of", "the", "parameter", "." ]
python
train
gmr/tinman
tinman/handlers/base.py
https://github.com/gmr/tinman/blob/98f0acd15a228d752caa1864cdf02aaa3d492a9f/tinman/handlers/base.py#L305-L310
def _set_session_cookie(self): """Set the session data cookie.""" LOGGER.debug('Setting session cookie for %s', self.session.id) self.set_secure_cookie(name=self._session_cookie_name, value=self.session.id, expires=self._cookie_expiration)
[ "def", "_set_session_cookie", "(", "self", ")", ":", "LOGGER", ".", "debug", "(", "'Setting session cookie for %s'", ",", "self", ".", "session", ".", "id", ")", "self", ".", "set_secure_cookie", "(", "name", "=", "self", ".", "_session_cookie_name", ",", "value", "=", "self", ".", "session", ".", "id", ",", "expires", "=", "self", ".", "_cookie_expiration", ")" ]
Set the session data cookie.
[ "Set", "the", "session", "data", "cookie", "." ]
python
train
materialsproject/pymatgen
pymatgen/core/structure.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/structure.py#L495-L520
def extract_cluster(self, target_sites, **kwargs): """ Extracts a cluster of atoms based on bond lengths Args: target_sites ([Site]): List of initial sites to nucleate cluster. \\*\\*kwargs: kwargs passed through to CovalentBond.is_bonded. Returns: [Site/PeriodicSite] Cluster of atoms. """ cluster = list(target_sites) others = [site for site in self if site not in cluster] size = 0 while len(cluster) > size: size = len(cluster) new_others = [] for site in others: for site2 in cluster: if CovalentBond.is_bonded(site, site2, **kwargs): cluster.append(site) break else: new_others.append(site) others = new_others return cluster
[ "def", "extract_cluster", "(", "self", ",", "target_sites", ",", "*", "*", "kwargs", ")", ":", "cluster", "=", "list", "(", "target_sites", ")", "others", "=", "[", "site", "for", "site", "in", "self", "if", "site", "not", "in", "cluster", "]", "size", "=", "0", "while", "len", "(", "cluster", ")", ">", "size", ":", "size", "=", "len", "(", "cluster", ")", "new_others", "=", "[", "]", "for", "site", "in", "others", ":", "for", "site2", "in", "cluster", ":", "if", "CovalentBond", ".", "is_bonded", "(", "site", ",", "site2", ",", "*", "*", "kwargs", ")", ":", "cluster", ".", "append", "(", "site", ")", "break", "else", ":", "new_others", ".", "append", "(", "site", ")", "others", "=", "new_others", "return", "cluster" ]
Extracts a cluster of atoms based on bond lengths Args: target_sites ([Site]): List of initial sites to nucleate cluster. \\*\\*kwargs: kwargs passed through to CovalentBond.is_bonded. Returns: [Site/PeriodicSite] Cluster of atoms.
[ "Extracts", "a", "cluster", "of", "atoms", "based", "on", "bond", "lengths" ]
python
train
astroduff/commah
commah/commah.py
https://github.com/astroduff/commah/blob/3ec70338c5123a053c79ddcf2cb3beac26bc9137/commah/commah.py#L147-L172
def _delta_sigma(**cosmo): """ Perturb best-fit constant of proportionality Ascaling for rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c) Parameters ---------- cosmo : dict Dictionary of cosmological parameters, similar in format to: {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275, 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0, 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6} Returns ------- float The perturbed 'A' relation between rho_2 and rho_crit for the cosmology Raises ------ """ M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo) perturbed_A = (0.796/cosmo['sigma_8']) * \ (M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6) return(perturbed_A)
[ "def", "_delta_sigma", "(", "*", "*", "cosmo", ")", ":", "M8_cosmo", "=", "cp", ".", "perturbation", ".", "radius_to_mass", "(", "8", ",", "*", "*", "cosmo", ")", "perturbed_A", "=", "(", "0.796", "/", "cosmo", "[", "'sigma_8'", "]", ")", "*", "(", "M8_cosmo", "/", "2.5e14", ")", "**", "(", "(", "cosmo", "[", "'n'", "]", "-", "0.963", ")", "/", "6", ")", "return", "(", "perturbed_A", ")" ]
Perturb best-fit constant of proportionality Ascaling for rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c) Parameters ---------- cosmo : dict Dictionary of cosmological parameters, similar in format to: {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275, 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0, 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6} Returns ------- float The perturbed 'A' relation between rho_2 and rho_crit for the cosmology Raises ------
[ "Perturb", "best", "-", "fit", "constant", "of", "proportionality", "Ascaling", "for", "rho_crit", "-", "rho_2", "relation", "for", "unknown", "cosmology", "(", "Correa", "et", "al", "2015c", ")" ]
python
train
Esri/ArcREST
src/arcrest/manageags/_usagereports.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageags/_usagereports.py#L110-L140
def editUsageReportSettings(self, samplingInterval, enabled=True, maxHistory=0): """ The usage reports settings are applied to the entire site. A POST request updates the usage reports settings. Inputs: samplingInterval - Defines the duration (in minutes) for which the usage statistics are aggregated or sampled, in-memory, before being written out to the statistics database. enabled - default True - Can be true or false. When usage reports are enabled, service usage statistics are collected and persisted to a statistics database. When usage reports are disabled, the statistics are not collected. maxHistory - default 0 - Represents the number of days after which usage statistics are deleted after the statistics database. If the maxHistory parameter is set to 0, the statistics are persisted forever. """ params = { "f" : "json", "maxHistory" : maxHistory, "enabled" : enabled, "samplingInterval" : samplingInterval } url = self._url + "/settings/edit" return self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
[ "def", "editUsageReportSettings", "(", "self", ",", "samplingInterval", ",", "enabled", "=", "True", ",", "maxHistory", "=", "0", ")", ":", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"maxHistory\"", ":", "maxHistory", ",", "\"enabled\"", ":", "enabled", ",", "\"samplingInterval\"", ":", "samplingInterval", "}", "url", "=", "self", ".", "_url", "+", "\"/settings/edit\"", "return", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")" ]
The usage reports settings are applied to the entire site. A POST request updates the usage reports settings. Inputs: samplingInterval - Defines the duration (in minutes) for which the usage statistics are aggregated or sampled, in-memory, before being written out to the statistics database. enabled - default True - Can be true or false. When usage reports are enabled, service usage statistics are collected and persisted to a statistics database. When usage reports are disabled, the statistics are not collected. maxHistory - default 0 - Represents the number of days after which usage statistics are deleted after the statistics database. If the maxHistory parameter is set to 0, the statistics are persisted forever.
[ "The", "usage", "reports", "settings", "are", "applied", "to", "the", "entire", "site", ".", "A", "POST", "request", "updates", "the", "usage", "reports", "settings", "." ]
python
train
JdeRobot/base
src/drivers/drone/cmdvel.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/drone/cmdvel.py#L92-L102
def publish (self): ''' Function to publish cmdvel. ''' #print(self) #self.using_event.wait() self.lock.acquire() msg = cmdvel2PosTarget(self.vel) self.lock.release() self.pub.publish(msg)
[ "def", "publish", "(", "self", ")", ":", "#print(self)", "#self.using_event.wait()", "self", ".", "lock", ".", "acquire", "(", ")", "msg", "=", "cmdvel2PosTarget", "(", "self", ".", "vel", ")", "self", ".", "lock", ".", "release", "(", ")", "self", ".", "pub", ".", "publish", "(", "msg", ")" ]
Function to publish cmdvel.
[ "Function", "to", "publish", "cmdvel", "." ]
python
train
potash/drain
drain/data.py
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/data.py#L299-L311
def expand_counts(df, column, values=None): """ expand a column containing value:count dictionaries """ d = counts_to_dicts(df, column) if len(d) > 0: if values is None: values = set(np.concatenate(d.apply(lambda c: c.keys()).values)) for value in values: name = values[value] if type(values) is dict else str(value) df[column + '_' + name.replace(' ', '_')] =\ d.apply(lambda c: c[value] if value in c else 0) df.drop(column, axis=1, inplace=True)
[ "def", "expand_counts", "(", "df", ",", "column", ",", "values", "=", "None", ")", ":", "d", "=", "counts_to_dicts", "(", "df", ",", "column", ")", "if", "len", "(", "d", ")", ">", "0", ":", "if", "values", "is", "None", ":", "values", "=", "set", "(", "np", ".", "concatenate", "(", "d", ".", "apply", "(", "lambda", "c", ":", "c", ".", "keys", "(", ")", ")", ".", "values", ")", ")", "for", "value", "in", "values", ":", "name", "=", "values", "[", "value", "]", "if", "type", "(", "values", ")", "is", "dict", "else", "str", "(", "value", ")", "df", "[", "column", "+", "'_'", "+", "name", ".", "replace", "(", "' '", ",", "'_'", ")", "]", "=", "d", ".", "apply", "(", "lambda", "c", ":", "c", "[", "value", "]", "if", "value", "in", "c", "else", "0", ")", "df", ".", "drop", "(", "column", ",", "axis", "=", "1", ",", "inplace", "=", "True", ")" ]
expand a column containing value:count dictionaries
[ "expand", "a", "column", "containing", "value", ":", "count", "dictionaries" ]
python
train
tanghaibao/goatools
goatools/evidence_codes.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/evidence_codes.py#L156-L163
def get_grp2code2nt(self): """Return ordered dict for group to namedtuple""" grp2code2nt = cx.OrderedDict([(g, []) for g in self.grps]) for code, ntd in self.code2nt.items(): grp2code2nt[ntd.group].append((code, ntd)) for grp, nts in grp2code2nt.items(): grp2code2nt[grp] = cx.OrderedDict(nts) return grp2code2nt
[ "def", "get_grp2code2nt", "(", "self", ")", ":", "grp2code2nt", "=", "cx", ".", "OrderedDict", "(", "[", "(", "g", ",", "[", "]", ")", "for", "g", "in", "self", ".", "grps", "]", ")", "for", "code", ",", "ntd", "in", "self", ".", "code2nt", ".", "items", "(", ")", ":", "grp2code2nt", "[", "ntd", ".", "group", "]", ".", "append", "(", "(", "code", ",", "ntd", ")", ")", "for", "grp", ",", "nts", "in", "grp2code2nt", ".", "items", "(", ")", ":", "grp2code2nt", "[", "grp", "]", "=", "cx", ".", "OrderedDict", "(", "nts", ")", "return", "grp2code2nt" ]
Return ordered dict for group to namedtuple
[ "Return", "ordered", "dict", "for", "group", "to", "namedtuple" ]
python
train
biosignalsnotebooks/biosignalsnotebooks
biosignalsnotebooks/build/lib/biosignalsnotebooks/visualise.py
https://github.com/biosignalsnotebooks/biosignalsnotebooks/blob/aaa01d4125180b3a34f1e26e0d3ff08c23f666d3/biosignalsnotebooks/build/lib/biosignalsnotebooks/visualise.py#L762-L799
def opensignals_kwargs(obj): """ ----- Brief ----- Function used to automatically apply the OpenSignals graphical style to the toolbar of Bokeh grid plots. ----------- Description ----------- Bokeh grid plots have numerous options in order to personalise the visual aspect and functionalities of plots. OpenSignals uses a specific graphical design that limits this options and unifies the aspect of its plots. This function applies the graphical aspect of the toolbar of OpenSignals to a given Bokeh figure object given as input. ---------- Parameters ---------- obj : str String that identifies if the kwargs will be the input of "figure" or "gridplot". Returns ------- out : dict Dictionary with toolbar parameters. """ out = None if obj == "figure": out = {} elif obj == "gridplot": out = {"toolbar_options": {"logo": None}, "sizing_mode": 'scale_width'} elif obj == "line": out = {"line_width": 2, "line_color": opensignals_color_pallet()} return out
[ "def", "opensignals_kwargs", "(", "obj", ")", ":", "out", "=", "None", "if", "obj", "==", "\"figure\"", ":", "out", "=", "{", "}", "elif", "obj", "==", "\"gridplot\"", ":", "out", "=", "{", "\"toolbar_options\"", ":", "{", "\"logo\"", ":", "None", "}", ",", "\"sizing_mode\"", ":", "'scale_width'", "}", "elif", "obj", "==", "\"line\"", ":", "out", "=", "{", "\"line_width\"", ":", "2", ",", "\"line_color\"", ":", "opensignals_color_pallet", "(", ")", "}", "return", "out" ]
----- Brief ----- Function used to automatically apply the OpenSignals graphical style to the toolbar of Bokeh grid plots. ----------- Description ----------- Bokeh grid plots have numerous options in order to personalise the visual aspect and functionalities of plots. OpenSignals uses a specific graphical design that limits this options and unifies the aspect of its plots. This function applies the graphical aspect of the toolbar of OpenSignals to a given Bokeh figure object given as input. ---------- Parameters ---------- obj : str String that identifies if the kwargs will be the input of "figure" or "gridplot". Returns ------- out : dict Dictionary with toolbar parameters.
[ "-----", "Brief", "-----", "Function", "used", "to", "automatically", "apply", "the", "OpenSignals", "graphical", "style", "to", "the", "toolbar", "of", "Bokeh", "grid", "plots", "." ]
python
train
azraq27/neural
neural/dsets.py
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dsets.py#L287-L299
def ijk_to_xyz(dset,ijk): '''convert the dset indices ``ijk`` to RAI coordinates ``xyz``''' i = nl.dset_info(dset) orient_codes = [int(x) for x in nl.run(['@AfniOrient2RAImap',i.orient]).output.split()] orient_is = [abs(x)-1 for x in orient_codes] rai = [] for rai_i in xrange(3): ijk_i = orient_is[rai_i] if orient_codes[rai_i] > 0: rai.append(ijk[ijk_i]*i.voxel_size[rai_i] + i.spatial_from[rai_i]) else: rai.append(i.spatial_to[rai_i] - ijk[ijk_i]*i.voxel_size[rai_i]) return rai
[ "def", "ijk_to_xyz", "(", "dset", ",", "ijk", ")", ":", "i", "=", "nl", ".", "dset_info", "(", "dset", ")", "orient_codes", "=", "[", "int", "(", "x", ")", "for", "x", "in", "nl", ".", "run", "(", "[", "'@AfniOrient2RAImap'", ",", "i", ".", "orient", "]", ")", ".", "output", ".", "split", "(", ")", "]", "orient_is", "=", "[", "abs", "(", "x", ")", "-", "1", "for", "x", "in", "orient_codes", "]", "rai", "=", "[", "]", "for", "rai_i", "in", "xrange", "(", "3", ")", ":", "ijk_i", "=", "orient_is", "[", "rai_i", "]", "if", "orient_codes", "[", "rai_i", "]", ">", "0", ":", "rai", ".", "append", "(", "ijk", "[", "ijk_i", "]", "*", "i", ".", "voxel_size", "[", "rai_i", "]", "+", "i", ".", "spatial_from", "[", "rai_i", "]", ")", "else", ":", "rai", ".", "append", "(", "i", ".", "spatial_to", "[", "rai_i", "]", "-", "ijk", "[", "ijk_i", "]", "*", "i", ".", "voxel_size", "[", "rai_i", "]", ")", "return", "rai" ]
convert the dset indices ``ijk`` to RAI coordinates ``xyz``
[ "convert", "the", "dset", "indices", "ijk", "to", "RAI", "coordinates", "xyz" ]
python
train
ewels/MultiQC
multiqc/modules/cutadapt/cutadapt.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/cutadapt/cutadapt.py#L176-L200
def cutadapt_length_trimmed_plot (self): """ Generate the trimming length plot """ description = 'This plot shows the number of reads with certain lengths of adapter trimmed. \n\ Obs/Exp shows the raw counts divided by the number expected due to sequencing errors. A defined peak \n\ may be related to adapter length. See the \n\ <a href="http://cutadapt.readthedocs.org/en/latest/guide.html#how-to-read-the-report" target="_blank">cutadapt documentation</a> \n\ for more information on how these numbers are generated.' pconfig = { 'id': 'cutadapt_plot', 'title': 'Cutadapt: Lengths of Trimmed Sequences', 'ylab': 'Counts', 'xlab': 'Length Trimmed (bp)', 'xDecimals': False, 'ymin': 0, 'tt_label': '<b>{point.x} bp trimmed</b>: {point.y:.0f}', 'data_labels': [{'name': 'Counts', 'ylab': 'Count'}, {'name': 'Obs/Exp', 'ylab': 'Observed / Expected'}] } self.add_section( description = description, plot = linegraph.plot([self.cutadapt_length_counts, self.cutadapt_length_obsexp], pconfig) )
[ "def", "cutadapt_length_trimmed_plot", "(", "self", ")", ":", "description", "=", "'This plot shows the number of reads with certain lengths of adapter trimmed. \\n\\\n Obs/Exp shows the raw counts divided by the number expected due to sequencing errors. A defined peak \\n\\\n may be related to adapter length. See the \\n\\\n <a href=\"http://cutadapt.readthedocs.org/en/latest/guide.html#how-to-read-the-report\" target=\"_blank\">cutadapt documentation</a> \\n\\\n for more information on how these numbers are generated.'", "pconfig", "=", "{", "'id'", ":", "'cutadapt_plot'", ",", "'title'", ":", "'Cutadapt: Lengths of Trimmed Sequences'", ",", "'ylab'", ":", "'Counts'", ",", "'xlab'", ":", "'Length Trimmed (bp)'", ",", "'xDecimals'", ":", "False", ",", "'ymin'", ":", "0", ",", "'tt_label'", ":", "'<b>{point.x} bp trimmed</b>: {point.y:.0f}'", ",", "'data_labels'", ":", "[", "{", "'name'", ":", "'Counts'", ",", "'ylab'", ":", "'Count'", "}", ",", "{", "'name'", ":", "'Obs/Exp'", ",", "'ylab'", ":", "'Observed / Expected'", "}", "]", "}", "self", ".", "add_section", "(", "description", "=", "description", ",", "plot", "=", "linegraph", ".", "plot", "(", "[", "self", ".", "cutadapt_length_counts", ",", "self", ".", "cutadapt_length_obsexp", "]", ",", "pconfig", ")", ")" ]
Generate the trimming length plot
[ "Generate", "the", "trimming", "length", "plot" ]
python
train
stan-dev/pystan
pystan/misc.py
https://github.com/stan-dev/pystan/blob/57bdccea11888157e7aaafba083003080a934805/pystan/misc.py#L159-L167
def _format_number_si(num, n_signif_figures): """Format a number using scientific notation to given significant figures""" if math.isnan(num) or math.isinf(num): return str(num) leading, exp = '{:E}'.format(num).split('E') leading = round(float(leading), n_signif_figures - 1) exp = exp[:1] + exp[2:] if exp[1] == '0' else exp formatted = '{}e{}'.format(leading, exp.lstrip('+')) return formatted
[ "def", "_format_number_si", "(", "num", ",", "n_signif_figures", ")", ":", "if", "math", ".", "isnan", "(", "num", ")", "or", "math", ".", "isinf", "(", "num", ")", ":", "return", "str", "(", "num", ")", "leading", ",", "exp", "=", "'{:E}'", ".", "format", "(", "num", ")", ".", "split", "(", "'E'", ")", "leading", "=", "round", "(", "float", "(", "leading", ")", ",", "n_signif_figures", "-", "1", ")", "exp", "=", "exp", "[", ":", "1", "]", "+", "exp", "[", "2", ":", "]", "if", "exp", "[", "1", "]", "==", "'0'", "else", "exp", "formatted", "=", "'{}e{}'", ".", "format", "(", "leading", ",", "exp", ".", "lstrip", "(", "'+'", ")", ")", "return", "formatted" ]
Format a number using scientific notation to given significant figures
[ "Format", "a", "number", "using", "scientific", "notation", "to", "given", "significant", "figures" ]
python
train
secdev/scapy
scapy/arch/windows/structures.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/arch/windows/structures.py#L166-L172
def GetIcmpStatistics(): """Return all Windows ICMP stats from iphlpapi""" statistics = MIB_ICMP() _GetIcmpStatistics(byref(statistics)) results = _struct_to_dict(statistics) del(statistics) return results
[ "def", "GetIcmpStatistics", "(", ")", ":", "statistics", "=", "MIB_ICMP", "(", ")", "_GetIcmpStatistics", "(", "byref", "(", "statistics", ")", ")", "results", "=", "_struct_to_dict", "(", "statistics", ")", "del", "(", "statistics", ")", "return", "results" ]
Return all Windows ICMP stats from iphlpapi
[ "Return", "all", "Windows", "ICMP", "stats", "from", "iphlpapi" ]
python
train
frigg/frigg-coverage
frigg_coverage/__init__.py
https://github.com/frigg/frigg-coverage/blob/ae7d29a8d94f3fe5405d5882cd4c4726ed638e97/frigg_coverage/__init__.py#L7-L19
def parse_coverage(coverage_report, parser): """ :param coverage_report: A string with the contents of a coverage file :type coverage_report: String :param parser: A string with name of the parser to use :type parser: String :return: Total coverage """ if parser in PARSERS: if coverage_report: return PARSERS[parser].parse_coverage_report(coverage_report) return None return NotImplemented
[ "def", "parse_coverage", "(", "coverage_report", ",", "parser", ")", ":", "if", "parser", "in", "PARSERS", ":", "if", "coverage_report", ":", "return", "PARSERS", "[", "parser", "]", ".", "parse_coverage_report", "(", "coverage_report", ")", "return", "None", "return", "NotImplemented" ]
:param coverage_report: A string with the contents of a coverage file :type coverage_report: String :param parser: A string with name of the parser to use :type parser: String :return: Total coverage
[ ":", "param", "coverage_report", ":", "A", "string", "with", "the", "contents", "of", "a", "coverage", "file", ":", "type", "coverage_report", ":", "String", ":", "param", "parser", ":", "A", "string", "with", "name", "of", "the", "parser", "to", "use", ":", "type", "parser", ":", "String", ":", "return", ":", "Total", "coverage" ]
python
train
vtkiorg/vtki
vtki/ipy_tools.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/ipy_tools.py#L133-L151
def _get_scalar_names(self, limit=None): """Only give scalar options that have a varying range""" names = [] if limit == 'point': inpnames = list(self.input_dataset.point_arrays.keys()) elif limit == 'cell': inpnames = list(self.input_dataset.cell_arrays.keys()) else: inpnames = self.input_dataset.scalar_names for name in inpnames: arr = self.input_dataset.get_scalar(name) rng = self.input_dataset.get_data_range(name) if arr is not None and arr.size > 0 and (rng[1]-rng[0] > 0.0): names.append(name) try: self._last_scalars = names[0] except IndexError: pass return names
[ "def", "_get_scalar_names", "(", "self", ",", "limit", "=", "None", ")", ":", "names", "=", "[", "]", "if", "limit", "==", "'point'", ":", "inpnames", "=", "list", "(", "self", ".", "input_dataset", ".", "point_arrays", ".", "keys", "(", ")", ")", "elif", "limit", "==", "'cell'", ":", "inpnames", "=", "list", "(", "self", ".", "input_dataset", ".", "cell_arrays", ".", "keys", "(", ")", ")", "else", ":", "inpnames", "=", "self", ".", "input_dataset", ".", "scalar_names", "for", "name", "in", "inpnames", ":", "arr", "=", "self", ".", "input_dataset", ".", "get_scalar", "(", "name", ")", "rng", "=", "self", ".", "input_dataset", ".", "get_data_range", "(", "name", ")", "if", "arr", "is", "not", "None", "and", "arr", ".", "size", ">", "0", "and", "(", "rng", "[", "1", "]", "-", "rng", "[", "0", "]", ">", "0.0", ")", ":", "names", ".", "append", "(", "name", ")", "try", ":", "self", ".", "_last_scalars", "=", "names", "[", "0", "]", "except", "IndexError", ":", "pass", "return", "names" ]
Only give scalar options that have a varying range
[ "Only", "give", "scalar", "options", "that", "have", "a", "varying", "range" ]
python
train
tanghaibao/goatools
goatools/evidence_codes.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/evidence_codes.py#L173-L178
def get_grp2codes(self): """Get dict of group name to namedtuples.""" grp2codes = cx.defaultdict(set) for code, ntd in self.code2nt.items(): grp2codes[ntd.group].add(code) return dict(grp2codes)
[ "def", "get_grp2codes", "(", "self", ")", ":", "grp2codes", "=", "cx", ".", "defaultdict", "(", "set", ")", "for", "code", ",", "ntd", "in", "self", ".", "code2nt", ".", "items", "(", ")", ":", "grp2codes", "[", "ntd", ".", "group", "]", ".", "add", "(", "code", ")", "return", "dict", "(", "grp2codes", ")" ]
Get dict of group name to namedtuples.
[ "Get", "dict", "of", "group", "name", "to", "namedtuples", "." ]
python
train
raviparekh/webapp-error-handler
app_error_handler/application_error_handler.py
https://github.com/raviparekh/webapp-error-handler/blob/11e20bea464331e254034b02661c53fb19102d4a/app_error_handler/application_error_handler.py#L9-L39
def register_app_for_error_handling(wsgi_app, app_name, app_logger, custom_logging_service=None): """Wraps a WSGI app and handles uncaught exceptions and defined exception and outputs a the exception in a structured format. Parameters: - wsgi_app is the app.wsgi_app of flask, - app_name should in correct format e.g. APP_NAME_1, - app_logger is the logger object""" logging_service = LoggingService(app_logger) if custom_logging_service is None else custom_logging_service exception_manager = ExceptionHandler(app_name, logging_service) def wrapper(environ, start_response): try: return wsgi_app(environ, start_response) except RootException as e: app_request = Request(environ) stack_trace = traceback.format_exc().splitlines()[-1] exception_manager.update_with_exception_data(e, app_request, stack_trace) except Exception: app_request = Request(environ) stack_trace = traceback.format_exc() e = RootException("FATAL_000", {}, {}, {}, status_code=500) e.error_message = "Unknown System Error" exception_manager.update_with_exception_data(e, app_request, stack_trace) error_details = exception_manager.construct_error_details() http_status_code = exception_manager.get_http_status_code() response = Response(json.dumps(error_details), status=http_status_code, content_type='application/json') return response(environ, start_response) return wrapper
[ "def", "register_app_for_error_handling", "(", "wsgi_app", ",", "app_name", ",", "app_logger", ",", "custom_logging_service", "=", "None", ")", ":", "logging_service", "=", "LoggingService", "(", "app_logger", ")", "if", "custom_logging_service", "is", "None", "else", "custom_logging_service", "exception_manager", "=", "ExceptionHandler", "(", "app_name", ",", "logging_service", ")", "def", "wrapper", "(", "environ", ",", "start_response", ")", ":", "try", ":", "return", "wsgi_app", "(", "environ", ",", "start_response", ")", "except", "RootException", "as", "e", ":", "app_request", "=", "Request", "(", "environ", ")", "stack_trace", "=", "traceback", ".", "format_exc", "(", ")", ".", "splitlines", "(", ")", "[", "-", "1", "]", "exception_manager", ".", "update_with_exception_data", "(", "e", ",", "app_request", ",", "stack_trace", ")", "except", "Exception", ":", "app_request", "=", "Request", "(", "environ", ")", "stack_trace", "=", "traceback", ".", "format_exc", "(", ")", "e", "=", "RootException", "(", "\"FATAL_000\"", ",", "{", "}", ",", "{", "}", ",", "{", "}", ",", "status_code", "=", "500", ")", "e", ".", "error_message", "=", "\"Unknown System Error\"", "exception_manager", ".", "update_with_exception_data", "(", "e", ",", "app_request", ",", "stack_trace", ")", "error_details", "=", "exception_manager", ".", "construct_error_details", "(", ")", "http_status_code", "=", "exception_manager", ".", "get_http_status_code", "(", ")", "response", "=", "Response", "(", "json", ".", "dumps", "(", "error_details", ")", ",", "status", "=", "http_status_code", ",", "content_type", "=", "'application/json'", ")", "return", "response", "(", "environ", ",", "start_response", ")", "return", "wrapper" ]
Wraps a WSGI app and handles uncaught exceptions and defined exception and outputs a the exception in a structured format. Parameters: - wsgi_app is the app.wsgi_app of flask, - app_name should in correct format e.g. APP_NAME_1, - app_logger is the logger object
[ "Wraps", "a", "WSGI", "app", "and", "handles", "uncaught", "exceptions", "and", "defined", "exception", "and", "outputs", "a", "the", "exception", "in", "a", "structured", "format", ".", "Parameters", ":", "-", "wsgi_app", "is", "the", "app", ".", "wsgi_app", "of", "flask", "-", "app_name", "should", "in", "correct", "format", "e", ".", "g", ".", "APP_NAME_1", "-", "app_logger", "is", "the", "logger", "object" ]
python
test
dwavesystems/dwave_networkx
dwave_networkx/algorithms/elimination_ordering.py
https://github.com/dwavesystems/dwave_networkx/blob/9ea1223ddbc7e86db2f90b8b23e250e6642c3d68/dwave_networkx/algorithms/elimination_ordering.py#L730-L747
def _theorem6p1(): """See Theorem 6.1 in paper.""" pruning_set = set() def _prune(x): if len(x) <= 2: return False # this is faster than tuple(x[-3:]) key = (tuple(x[:-2]), x[-2], x[-1]) return key in pruning_set def _explored(x): if len(x) >= 3: prunable = (tuple(x[:-2]), x[-1], x[-2]) pruning_set.add(prunable) return _prune, _explored
[ "def", "_theorem6p1", "(", ")", ":", "pruning_set", "=", "set", "(", ")", "def", "_prune", "(", "x", ")", ":", "if", "len", "(", "x", ")", "<=", "2", ":", "return", "False", "# this is faster than tuple(x[-3:])", "key", "=", "(", "tuple", "(", "x", "[", ":", "-", "2", "]", ")", ",", "x", "[", "-", "2", "]", ",", "x", "[", "-", "1", "]", ")", "return", "key", "in", "pruning_set", "def", "_explored", "(", "x", ")", ":", "if", "len", "(", "x", ")", ">=", "3", ":", "prunable", "=", "(", "tuple", "(", "x", "[", ":", "-", "2", "]", ")", ",", "x", "[", "-", "1", "]", ",", "x", "[", "-", "2", "]", ")", "pruning_set", ".", "add", "(", "prunable", ")", "return", "_prune", ",", "_explored" ]
See Theorem 6.1 in paper.
[ "See", "Theorem", "6", ".", "1", "in", "paper", "." ]
python
train
blockstack/blockstack-core
blockstack/lib/nameset/db.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/nameset/db.py#L929-L960
def namedb_op_sanity_check( opcode, op_data, record ): """ Sanity checks over operation and state graph data: * opcode and op_data must be consistent * record must have an opcode * the given opcode must be reachable from it. """ assert 'address' in record, "BUG: current record has no 'address' field" assert op_data.has_key('op'), "BUG: operation data is missing its 'op'" op_data_opcode = op_get_opcode_name( op_data['op'] ) assert record.has_key('op'), "BUG: current record is missing its 'op'" cur_opcode = op_get_opcode_name( record['op'] ) assert op_data_opcode is not None, "BUG: undefined operation '%s'" % op_data['op'] assert cur_opcode is not None, "BUG: undefined current operation '%s'" % record['op'] if op_data_opcode != opcode: # only allowed of the serialized opcode is the same # (i.e. as is the case for register/renew) assert NAME_OPCODES.get( op_data_opcode, None ) is not None, "BUG: unrecognized opcode '%s'" % op_data_opcode assert NAME_OPCODES.get( opcode, None ) is not None, "BUG: unrecognized opcode '%s'" % opcode assert NAME_OPCODES[op_data_opcode] == NAME_OPCODES[opcode], "BUG: %s != %s" % (opcode, op_data_opcode) assert opcode in OPCODE_SEQUENCE_GRAPH, "BUG: impossible to arrive at operation '%s'" % opcode assert cur_opcode in OPCODE_SEQUENCE_GRAPH, "BUG: impossible to have processed operation '%s'" % cur_opcode assert opcode in OPCODE_SEQUENCE_GRAPH[ cur_opcode ], "BUG: impossible sequence from '%s' to '%s'" % (cur_opcode, opcode) return True
[ "def", "namedb_op_sanity_check", "(", "opcode", ",", "op_data", ",", "record", ")", ":", "assert", "'address'", "in", "record", ",", "\"BUG: current record has no 'address' field\"", "assert", "op_data", ".", "has_key", "(", "'op'", ")", ",", "\"BUG: operation data is missing its 'op'\"", "op_data_opcode", "=", "op_get_opcode_name", "(", "op_data", "[", "'op'", "]", ")", "assert", "record", ".", "has_key", "(", "'op'", ")", ",", "\"BUG: current record is missing its 'op'\"", "cur_opcode", "=", "op_get_opcode_name", "(", "record", "[", "'op'", "]", ")", "assert", "op_data_opcode", "is", "not", "None", ",", "\"BUG: undefined operation '%s'\"", "%", "op_data", "[", "'op'", "]", "assert", "cur_opcode", "is", "not", "None", ",", "\"BUG: undefined current operation '%s'\"", "%", "record", "[", "'op'", "]", "if", "op_data_opcode", "!=", "opcode", ":", "# only allowed of the serialized opcode is the same", "# (i.e. as is the case for register/renew)", "assert", "NAME_OPCODES", ".", "get", "(", "op_data_opcode", ",", "None", ")", "is", "not", "None", ",", "\"BUG: unrecognized opcode '%s'\"", "%", "op_data_opcode", "assert", "NAME_OPCODES", ".", "get", "(", "opcode", ",", "None", ")", "is", "not", "None", ",", "\"BUG: unrecognized opcode '%s'\"", "%", "opcode", "assert", "NAME_OPCODES", "[", "op_data_opcode", "]", "==", "NAME_OPCODES", "[", "opcode", "]", ",", "\"BUG: %s != %s\"", "%", "(", "opcode", ",", "op_data_opcode", ")", "assert", "opcode", "in", "OPCODE_SEQUENCE_GRAPH", ",", "\"BUG: impossible to arrive at operation '%s'\"", "%", "opcode", "assert", "cur_opcode", "in", "OPCODE_SEQUENCE_GRAPH", ",", "\"BUG: impossible to have processed operation '%s'\"", "%", "cur_opcode", "assert", "opcode", "in", "OPCODE_SEQUENCE_GRAPH", "[", "cur_opcode", "]", ",", "\"BUG: impossible sequence from '%s' to '%s'\"", "%", "(", "cur_opcode", ",", "opcode", ")", "return", "True" ]
Sanity checks over operation and state graph data: * opcode and op_data must be consistent * record must have an opcode * the given opcode must be reachable from it.
[ "Sanity", "checks", "over", "operation", "and", "state", "graph", "data", ":", "*", "opcode", "and", "op_data", "must", "be", "consistent", "*", "record", "must", "have", "an", "opcode", "*", "the", "given", "opcode", "must", "be", "reachable", "from", "it", "." ]
python
train
awslabs/sockeye
sockeye/rnn_attention.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/rnn_attention.py#L91-L106
def get_attention(config: AttentionConfig, max_seq_len: int, prefix: str = C.ATTENTION_PREFIX) -> 'Attention': """ Returns an Attention instance based on attention_type. :param config: Attention configuration. :param max_seq_len: Maximum length of source sequences. :param prefix: Name prefix. :return: Instance of Attention. """ att_cls = Attention.get_attention_cls(config.type) params = config.__dict__.copy() params.pop('_frozen') params['max_seq_len'] = max_seq_len params['prefix'] = prefix return _instantiate(att_cls, params)
[ "def", "get_attention", "(", "config", ":", "AttentionConfig", ",", "max_seq_len", ":", "int", ",", "prefix", ":", "str", "=", "C", ".", "ATTENTION_PREFIX", ")", "->", "'Attention'", ":", "att_cls", "=", "Attention", ".", "get_attention_cls", "(", "config", ".", "type", ")", "params", "=", "config", ".", "__dict__", ".", "copy", "(", ")", "params", ".", "pop", "(", "'_frozen'", ")", "params", "[", "'max_seq_len'", "]", "=", "max_seq_len", "params", "[", "'prefix'", "]", "=", "prefix", "return", "_instantiate", "(", "att_cls", ",", "params", ")" ]
Returns an Attention instance based on attention_type. :param config: Attention configuration. :param max_seq_len: Maximum length of source sequences. :param prefix: Name prefix. :return: Instance of Attention.
[ "Returns", "an", "Attention", "instance", "based", "on", "attention_type", "." ]
python
train
Commonists/CommonsDownloader
commonsdownloader/commonsdownloader.py
https://github.com/Commonists/CommonsDownloader/blob/ac8147432b31ce3cdee5f7a75d0c48b788ee4666/commonsdownloader/commonsdownloader.py#L27-L31
def download_from_category(category_name, output_path, width): """Download files of a given category.""" file_names = get_category_files_from_api(category_name) files_to_download = izip_longest(file_names, [], fillvalue=width) download_files_if_not_in_manifest(files_to_download, output_path)
[ "def", "download_from_category", "(", "category_name", ",", "output_path", ",", "width", ")", ":", "file_names", "=", "get_category_files_from_api", "(", "category_name", ")", "files_to_download", "=", "izip_longest", "(", "file_names", ",", "[", "]", ",", "fillvalue", "=", "width", ")", "download_files_if_not_in_manifest", "(", "files_to_download", ",", "output_path", ")" ]
Download files of a given category.
[ "Download", "files", "of", "a", "given", "category", "." ]
python
train
neurosynth/neurosynth
neurosynth/base/imageutils.py
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/base/imageutils.py#L98-L152
def create_grid(image, scale=4, apply_mask=True, save_file=None): """ Creates an image containing labeled cells in a 3D grid. Args: image: String or nibabel image. The image used to define the grid dimensions. Also used to define the mask to apply to the grid. Only voxels with non-zero values in the mask will be retained; all other voxels will be zeroed out in the returned image. scale: The scaling factor which controls the grid size. Value reflects diameter of cube in voxels. apply_mask: Boolean indicating whether or not to zero out voxels not in image. save_file: Optional string giving the path to save image to. Image written out is a standard Nifti image. If save_file is None, no image is written. Returns: A nibabel image with the same dimensions as the input image. All voxels in each cell in the 3D grid are assigned the same non-zero label. """ if isinstance(image, string_types): image = nb.load(image) # create a list of cluster centers centers = [] x_length, y_length, z_length = image.shape for x in range(0, x_length, scale): for y in range(0, y_length, scale): for z in range(0, z_length, scale): centers.append((x, y, z)) # create a box around each center with the diameter equal to the scaling # factor grid = np.zeros(image.shape) for (i, (x, y, z)) in enumerate(centers): for mov_x in range((-scale + 1) // 2, (scale + 1) // 2): for mov_y in range((-scale + 1) // 2, (scale + 1) // 2): for mov_z in range((-scale + 1) // 2, (scale + 1) // 2): try: # Ignore voxels outside bounds of image grid[x + mov_x, y + mov_y, z + mov_z] = i + 1 except: pass if apply_mask: mask = image if isinstance(mask, string_types): mask = nb.load(mask) if type(mask).__module__ != np.__name__: mask = mask.get_data() grid[~mask.astype(bool)] = 0.0 grid = nb.Nifti1Image(grid, image.get_affine(), image.get_header()) if save_file is not None: nb.save(grid, save_file) return grid
[ "def", "create_grid", "(", "image", ",", "scale", "=", "4", ",", "apply_mask", "=", "True", ",", "save_file", "=", "None", ")", ":", "if", "isinstance", "(", "image", ",", "string_types", ")", ":", "image", "=", "nb", ".", "load", "(", "image", ")", "# create a list of cluster centers", "centers", "=", "[", "]", "x_length", ",", "y_length", ",", "z_length", "=", "image", ".", "shape", "for", "x", "in", "range", "(", "0", ",", "x_length", ",", "scale", ")", ":", "for", "y", "in", "range", "(", "0", ",", "y_length", ",", "scale", ")", ":", "for", "z", "in", "range", "(", "0", ",", "z_length", ",", "scale", ")", ":", "centers", ".", "append", "(", "(", "x", ",", "y", ",", "z", ")", ")", "# create a box around each center with the diameter equal to the scaling", "# factor", "grid", "=", "np", ".", "zeros", "(", "image", ".", "shape", ")", "for", "(", "i", ",", "(", "x", ",", "y", ",", "z", ")", ")", "in", "enumerate", "(", "centers", ")", ":", "for", "mov_x", "in", "range", "(", "(", "-", "scale", "+", "1", ")", "//", "2", ",", "(", "scale", "+", "1", ")", "//", "2", ")", ":", "for", "mov_y", "in", "range", "(", "(", "-", "scale", "+", "1", ")", "//", "2", ",", "(", "scale", "+", "1", ")", "//", "2", ")", ":", "for", "mov_z", "in", "range", "(", "(", "-", "scale", "+", "1", ")", "//", "2", ",", "(", "scale", "+", "1", ")", "//", "2", ")", ":", "try", ":", "# Ignore voxels outside bounds of image", "grid", "[", "x", "+", "mov_x", ",", "y", "+", "mov_y", ",", "z", "+", "mov_z", "]", "=", "i", "+", "1", "except", ":", "pass", "if", "apply_mask", ":", "mask", "=", "image", "if", "isinstance", "(", "mask", ",", "string_types", ")", ":", "mask", "=", "nb", ".", "load", "(", "mask", ")", "if", "type", "(", "mask", ")", ".", "__module__", "!=", "np", ".", "__name__", ":", "mask", "=", "mask", ".", "get_data", "(", ")", "grid", "[", "~", "mask", ".", "astype", "(", "bool", ")", "]", "=", "0.0", "grid", "=", "nb", ".", "Nifti1Image", "(", "grid", ",", "image", ".", "get_affine", "(", ")", ",", "image", ".", "get_header", "(", ")", ")", "if", "save_file", "is", "not", "None", ":", "nb", ".", "save", "(", "grid", ",", "save_file", ")", "return", "grid" ]
Creates an image containing labeled cells in a 3D grid. Args: image: String or nibabel image. The image used to define the grid dimensions. Also used to define the mask to apply to the grid. Only voxels with non-zero values in the mask will be retained; all other voxels will be zeroed out in the returned image. scale: The scaling factor which controls the grid size. Value reflects diameter of cube in voxels. apply_mask: Boolean indicating whether or not to zero out voxels not in image. save_file: Optional string giving the path to save image to. Image written out is a standard Nifti image. If save_file is None, no image is written. Returns: A nibabel image with the same dimensions as the input image. All voxels in each cell in the 3D grid are assigned the same non-zero label.
[ "Creates", "an", "image", "containing", "labeled", "cells", "in", "a", "3D", "grid", ".", "Args", ":", "image", ":", "String", "or", "nibabel", "image", ".", "The", "image", "used", "to", "define", "the", "grid", "dimensions", ".", "Also", "used", "to", "define", "the", "mask", "to", "apply", "to", "the", "grid", ".", "Only", "voxels", "with", "non", "-", "zero", "values", "in", "the", "mask", "will", "be", "retained", ";", "all", "other", "voxels", "will", "be", "zeroed", "out", "in", "the", "returned", "image", ".", "scale", ":", "The", "scaling", "factor", "which", "controls", "the", "grid", "size", ".", "Value", "reflects", "diameter", "of", "cube", "in", "voxels", ".", "apply_mask", ":", "Boolean", "indicating", "whether", "or", "not", "to", "zero", "out", "voxels", "not", "in", "image", ".", "save_file", ":", "Optional", "string", "giving", "the", "path", "to", "save", "image", "to", ".", "Image", "written", "out", "is", "a", "standard", "Nifti", "image", ".", "If", "save_file", "is", "None", "no", "image", "is", "written", ".", "Returns", ":", "A", "nibabel", "image", "with", "the", "same", "dimensions", "as", "the", "input", "image", ".", "All", "voxels", "in", "each", "cell", "in", "the", "3D", "grid", "are", "assigned", "the", "same", "non", "-", "zero", "label", "." ]
python
test
IdentityPython/pysaml2
src/saml2/attribute_converter.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/attribute_converter.py#L236-L257
def from_dict(self, mapdict): """ Import the attribute map from a dictionary :param mapdict: The dictionary """ self.name_format = mapdict["identifier"] try: self._fro = dict( [(k.lower(), v) for k, v in mapdict["fro"].items()]) except KeyError: pass try: self._to = dict([(k.lower(), v) for k, v in mapdict["to"].items()]) except KeyError: pass if self._fro is None and self._to is None: raise ConverterError("Missing specifications") if self._fro is None or self._to is None: self.adjust()
[ "def", "from_dict", "(", "self", ",", "mapdict", ")", ":", "self", ".", "name_format", "=", "mapdict", "[", "\"identifier\"", "]", "try", ":", "self", ".", "_fro", "=", "dict", "(", "[", "(", "k", ".", "lower", "(", ")", ",", "v", ")", "for", "k", ",", "v", "in", "mapdict", "[", "\"fro\"", "]", ".", "items", "(", ")", "]", ")", "except", "KeyError", ":", "pass", "try", ":", "self", ".", "_to", "=", "dict", "(", "[", "(", "k", ".", "lower", "(", ")", ",", "v", ")", "for", "k", ",", "v", "in", "mapdict", "[", "\"to\"", "]", ".", "items", "(", ")", "]", ")", "except", "KeyError", ":", "pass", "if", "self", ".", "_fro", "is", "None", "and", "self", ".", "_to", "is", "None", ":", "raise", "ConverterError", "(", "\"Missing specifications\"", ")", "if", "self", ".", "_fro", "is", "None", "or", "self", ".", "_to", "is", "None", ":", "self", ".", "adjust", "(", ")" ]
Import the attribute map from a dictionary :param mapdict: The dictionary
[ "Import", "the", "attribute", "map", "from", "a", "dictionary" ]
python
train
danielfrg/datasciencebox
datasciencebox/salt/_modules/conda.py
https://github.com/danielfrg/datasciencebox/blob/6b7aa642c6616a46547035fcb815acc1de605a6f/datasciencebox/salt/_modules/conda.py#L115-L124
def _create_conda_cmd(conda_cmd, args=None, env=None, user=None): """ Utility to create a valid conda command """ cmd = [_get_conda_path(user=user), conda_cmd] if env: cmd.extend(['-n', env]) if args is not None and isinstance(args, list) and args != []: cmd.extend(args) return cmd
[ "def", "_create_conda_cmd", "(", "conda_cmd", ",", "args", "=", "None", ",", "env", "=", "None", ",", "user", "=", "None", ")", ":", "cmd", "=", "[", "_get_conda_path", "(", "user", "=", "user", ")", ",", "conda_cmd", "]", "if", "env", ":", "cmd", ".", "extend", "(", "[", "'-n'", ",", "env", "]", ")", "if", "args", "is", "not", "None", "and", "isinstance", "(", "args", ",", "list", ")", "and", "args", "!=", "[", "]", ":", "cmd", ".", "extend", "(", "args", ")", "return", "cmd" ]
Utility to create a valid conda command
[ "Utility", "to", "create", "a", "valid", "conda", "command" ]
python
train
mayfield/shellish
shellish/layout/table.py
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/layout/table.py#L1018-L1041
def tabulate(data, header=True, headers=None, accessors=None, **table_options): """ Shortcut function to produce tabular output of data without the need to create and configure a Table instance directly. The function does however return a table instance when it's done for any further use by the user. """ if header and not headers: data = iter(data) try: headers = next(data) except StopIteration: pass if headers and hasattr(headers, 'items') and accessors is None: # Dict mode; Build accessors and headers from keys of data. data = itertools.chain([headers], data) accessors = list(headers) headers = [' '.join(map(str.capitalize, x.replace('_', ' ').split())) for x in accessors] t = Table(headers=headers, accessors=accessors, **table_options) try: t.print(data) except RowsNotFound: pass return t
[ "def", "tabulate", "(", "data", ",", "header", "=", "True", ",", "headers", "=", "None", ",", "accessors", "=", "None", ",", "*", "*", "table_options", ")", ":", "if", "header", "and", "not", "headers", ":", "data", "=", "iter", "(", "data", ")", "try", ":", "headers", "=", "next", "(", "data", ")", "except", "StopIteration", ":", "pass", "if", "headers", "and", "hasattr", "(", "headers", ",", "'items'", ")", "and", "accessors", "is", "None", ":", "# Dict mode; Build accessors and headers from keys of data.", "data", "=", "itertools", ".", "chain", "(", "[", "headers", "]", ",", "data", ")", "accessors", "=", "list", "(", "headers", ")", "headers", "=", "[", "' '", ".", "join", "(", "map", "(", "str", ".", "capitalize", ",", "x", ".", "replace", "(", "'_'", ",", "' '", ")", ".", "split", "(", ")", ")", ")", "for", "x", "in", "accessors", "]", "t", "=", "Table", "(", "headers", "=", "headers", ",", "accessors", "=", "accessors", ",", "*", "*", "table_options", ")", "try", ":", "t", ".", "print", "(", "data", ")", "except", "RowsNotFound", ":", "pass", "return", "t" ]
Shortcut function to produce tabular output of data without the need to create and configure a Table instance directly. The function does however return a table instance when it's done for any further use by the user.
[ "Shortcut", "function", "to", "produce", "tabular", "output", "of", "data", "without", "the", "need", "to", "create", "and", "configure", "a", "Table", "instance", "directly", ".", "The", "function", "does", "however", "return", "a", "table", "instance", "when", "it", "s", "done", "for", "any", "further", "use", "by", "the", "user", "." ]
python
train
broadinstitute/fiss
firecloud/fiss.py
https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L85-L91
def space_lock(args): """ Lock a workspace """ r = fapi.lock_workspace(args.project, args.workspace) fapi._check_response_code(r, 204) if fcconfig.verbosity: eprint('Locked workspace {0}/{1}'.format(args.project, args.workspace)) return 0
[ "def", "space_lock", "(", "args", ")", ":", "r", "=", "fapi", ".", "lock_workspace", "(", "args", ".", "project", ",", "args", ".", "workspace", ")", "fapi", ".", "_check_response_code", "(", "r", ",", "204", ")", "if", "fcconfig", ".", "verbosity", ":", "eprint", "(", "'Locked workspace {0}/{1}'", ".", "format", "(", "args", ".", "project", ",", "args", ".", "workspace", ")", ")", "return", "0" ]
Lock a workspace
[ "Lock", "a", "workspace" ]
python
train
saltstack/salt
salt/pillar/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/__init__.py#L463-L470
def __gather_avail(self): ''' Gather the lists of available sls data from the master ''' avail = {} for saltenv in self._get_envs(): avail[saltenv] = self.client.list_states(saltenv) return avail
[ "def", "__gather_avail", "(", "self", ")", ":", "avail", "=", "{", "}", "for", "saltenv", "in", "self", ".", "_get_envs", "(", ")", ":", "avail", "[", "saltenv", "]", "=", "self", ".", "client", ".", "list_states", "(", "saltenv", ")", "return", "avail" ]
Gather the lists of available sls data from the master
[ "Gather", "the", "lists", "of", "available", "sls", "data", "from", "the", "master" ]
python
train
jfinkels/birkhoff
birkhoff.py
https://github.com/jfinkels/birkhoff/blob/86fff692c9cfb7217e51e25868230f4e0b53caa0/birkhoff.py#L123-L237
def birkhoff_von_neumann_decomposition(D): """Returns the Birkhoff--von Neumann decomposition of the doubly stochastic matrix `D`. The input `D` must be a square NumPy array representing a doubly stochastic matrix (that is, a matrix whose entries are nonnegative reals and whose row sums and column sums are all 1). Each doubly stochastic matrix is a convex combination of at most ``n ** 2`` permutation matrices, where ``n`` is the dimension of the input array. The returned value is a list of pairs whose length is at most ``n ** 2``. In each pair, the first element is a real number in the interval **(0, 1]** and the second element is a NumPy array representing a permutation matrix. This represents the doubly stochastic matrix as a convex combination of the permutation matrices. The input matrix may also be a scalar multiple of a doubly stochastic matrix, in which case the row sums and column sums must each be *c*, for some positive real number *c*. This may be useful in avoiding precision issues: given a doubly stochastic matrix that will have many entries close to one, multiply it by a large positive integer. The returned permutation matrices will be the same regardless of whether the given matrix is a doubly stochastic matrix or a scalar multiple of a doubly stochastic matrix, but in the latter case, the coefficients will all be scaled by the appropriate scalar multiple, and their sum will be that scalar instead of one. For example:: >>> import numpy as np >>> from birkhoff import birkhoff_von_neumann_decomposition as decomp >>> D = np.ones((2, 2)) >>> zipped_pairs = decomp(D) >>> coefficients, permutations = zip(*zipped_pairs) >>> coefficients (1.0, 1.0) >>> permutations[0] array([[ 1., 0.], [ 0., 1.]]) >>> permutations[1] array([[ 0., 1.], [ 1., 0.]]) >>> zipped_pairs = decomp(D / 2) # halve each value in the matrix >>> coefficients, permutations = zip(*zipped_pairs) >>> coefficients # will be half as large as before (0.5, 0.5) >>> permutations[0] # will be the same as before array([[ 1., 0.], [ 0., 1.]]) >>> permutations[1] array([[ 0., 1.], [ 1., 0.]]) The returned list of pairs is given in the order computed by the algorithm (so in particular they are not sorted in any way). """ m, n = D.shape if m != n: raise ValueError('Input matrix must be square ({} x {})'.format(m, n)) indices = list(itertools.product(range(m), range(n))) # These two lists will store the result as we build it up each iteration. coefficients = [] permutations = [] # Create a copy of D so that we don't modify it directly. Cast the # entries of the matrix to floating point numbers, regardless of # whether they were integers. S = D.astype('float') while not np.all(S == 0): # Create an undirected graph whose adjacency matrix contains a 1 # exactly where the matrix S has a nonzero entry. W = to_pattern_matrix(S) # Construct the bipartite graph whose left and right vertices both # represent the vertex set of the pattern graph (whose adjacency matrix # is ``W``). X = to_bipartite_matrix(W) # Convert the matrix of a bipartite graph into a NetworkX graph object. G = from_numpy_matrix(X) # Compute a perfect matching for this graph. The dictionary `M` has one # entry for each matched vertex (in both the left and the right vertex # sets), and the corresponding value is its partner. # # The bipartite maximum matching algorithm requires specifying # the left set of nodes in the bipartite graph. By construction, # the left set of nodes is {0, ..., n - 1} and the right set is # {n, ..., 2n - 1}; see `to_bipartite_matrix()`. left_nodes = range(n) M = maximum_matching(G, left_nodes) # However, since we have both a left vertex set and a right vertex set, # each representing the original vertex set of the pattern graph # (``W``), we need to convert any vertex greater than ``n`` to its # original vertex number. To do this, # # - ignore any keys greater than ``n``, since they are already # covered by earlier key/value pairs, # - ensure that all values are less than ``n``. # M = {u: v % n for u, v in M.items() if u < n} # Convert that perfect matching to a permutation matrix. P = to_permutation_matrix(M) # Get the smallest entry of S corresponding to the 1 entries in the # permutation matrix. q = min(S[i, j] for (i, j) in indices if P[i, j] == 1) # Store the coefficient and the permutation matrix for later. coefficients.append(q) permutations.append(P) # Subtract P scaled by q. After this subtraction, S has a zero entry # where the value q used to live. S -= q * P # PRECISION ISSUE: There seems to be a problem with floating point # precision here, so we need to round down to 0 any entry that is very # small. S[np.abs(S) < TOLERANCE] = 0.0 return list(zip(coefficients, permutations))
[ "def", "birkhoff_von_neumann_decomposition", "(", "D", ")", ":", "m", ",", "n", "=", "D", ".", "shape", "if", "m", "!=", "n", ":", "raise", "ValueError", "(", "'Input matrix must be square ({} x {})'", ".", "format", "(", "m", ",", "n", ")", ")", "indices", "=", "list", "(", "itertools", ".", "product", "(", "range", "(", "m", ")", ",", "range", "(", "n", ")", ")", ")", "# These two lists will store the result as we build it up each iteration.", "coefficients", "=", "[", "]", "permutations", "=", "[", "]", "# Create a copy of D so that we don't modify it directly. Cast the", "# entries of the matrix to floating point numbers, regardless of", "# whether they were integers.", "S", "=", "D", ".", "astype", "(", "'float'", ")", "while", "not", "np", ".", "all", "(", "S", "==", "0", ")", ":", "# Create an undirected graph whose adjacency matrix contains a 1", "# exactly where the matrix S has a nonzero entry.", "W", "=", "to_pattern_matrix", "(", "S", ")", "# Construct the bipartite graph whose left and right vertices both", "# represent the vertex set of the pattern graph (whose adjacency matrix", "# is ``W``).", "X", "=", "to_bipartite_matrix", "(", "W", ")", "# Convert the matrix of a bipartite graph into a NetworkX graph object.", "G", "=", "from_numpy_matrix", "(", "X", ")", "# Compute a perfect matching for this graph. The dictionary `M` has one", "# entry for each matched vertex (in both the left and the right vertex", "# sets), and the corresponding value is its partner.", "#", "# The bipartite maximum matching algorithm requires specifying", "# the left set of nodes in the bipartite graph. By construction,", "# the left set of nodes is {0, ..., n - 1} and the right set is", "# {n, ..., 2n - 1}; see `to_bipartite_matrix()`.", "left_nodes", "=", "range", "(", "n", ")", "M", "=", "maximum_matching", "(", "G", ",", "left_nodes", ")", "# However, since we have both a left vertex set and a right vertex set,", "# each representing the original vertex set of the pattern graph", "# (``W``), we need to convert any vertex greater than ``n`` to its", "# original vertex number. To do this,", "#", "# - ignore any keys greater than ``n``, since they are already", "# covered by earlier key/value pairs,", "# - ensure that all values are less than ``n``.", "#", "M", "=", "{", "u", ":", "v", "%", "n", "for", "u", ",", "v", "in", "M", ".", "items", "(", ")", "if", "u", "<", "n", "}", "# Convert that perfect matching to a permutation matrix.", "P", "=", "to_permutation_matrix", "(", "M", ")", "# Get the smallest entry of S corresponding to the 1 entries in the", "# permutation matrix.", "q", "=", "min", "(", "S", "[", "i", ",", "j", "]", "for", "(", "i", ",", "j", ")", "in", "indices", "if", "P", "[", "i", ",", "j", "]", "==", "1", ")", "# Store the coefficient and the permutation matrix for later.", "coefficients", ".", "append", "(", "q", ")", "permutations", ".", "append", "(", "P", ")", "# Subtract P scaled by q. After this subtraction, S has a zero entry", "# where the value q used to live.", "S", "-=", "q", "*", "P", "# PRECISION ISSUE: There seems to be a problem with floating point", "# precision here, so we need to round down to 0 any entry that is very", "# small.", "S", "[", "np", ".", "abs", "(", "S", ")", "<", "TOLERANCE", "]", "=", "0.0", "return", "list", "(", "zip", "(", "coefficients", ",", "permutations", ")", ")" ]
Returns the Birkhoff--von Neumann decomposition of the doubly stochastic matrix `D`. The input `D` must be a square NumPy array representing a doubly stochastic matrix (that is, a matrix whose entries are nonnegative reals and whose row sums and column sums are all 1). Each doubly stochastic matrix is a convex combination of at most ``n ** 2`` permutation matrices, where ``n`` is the dimension of the input array. The returned value is a list of pairs whose length is at most ``n ** 2``. In each pair, the first element is a real number in the interval **(0, 1]** and the second element is a NumPy array representing a permutation matrix. This represents the doubly stochastic matrix as a convex combination of the permutation matrices. The input matrix may also be a scalar multiple of a doubly stochastic matrix, in which case the row sums and column sums must each be *c*, for some positive real number *c*. This may be useful in avoiding precision issues: given a doubly stochastic matrix that will have many entries close to one, multiply it by a large positive integer. The returned permutation matrices will be the same regardless of whether the given matrix is a doubly stochastic matrix or a scalar multiple of a doubly stochastic matrix, but in the latter case, the coefficients will all be scaled by the appropriate scalar multiple, and their sum will be that scalar instead of one. For example:: >>> import numpy as np >>> from birkhoff import birkhoff_von_neumann_decomposition as decomp >>> D = np.ones((2, 2)) >>> zipped_pairs = decomp(D) >>> coefficients, permutations = zip(*zipped_pairs) >>> coefficients (1.0, 1.0) >>> permutations[0] array([[ 1., 0.], [ 0., 1.]]) >>> permutations[1] array([[ 0., 1.], [ 1., 0.]]) >>> zipped_pairs = decomp(D / 2) # halve each value in the matrix >>> coefficients, permutations = zip(*zipped_pairs) >>> coefficients # will be half as large as before (0.5, 0.5) >>> permutations[0] # will be the same as before array([[ 1., 0.], [ 0., 1.]]) >>> permutations[1] array([[ 0., 1.], [ 1., 0.]]) The returned list of pairs is given in the order computed by the algorithm (so in particular they are not sorted in any way).
[ "Returns", "the", "Birkhoff", "--", "von", "Neumann", "decomposition", "of", "the", "doubly", "stochastic", "matrix", "D", "." ]
python
valid
stevearc/dynamo3
dynamo3/rate.py
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/rate.py#L27-L31
def add(self, now, num): """ Add a timestamp and date to the data """ if num == 0: return self.points.append((now, num))
[ "def", "add", "(", "self", ",", "now", ",", "num", ")", ":", "if", "num", "==", "0", ":", "return", "self", ".", "points", ".", "append", "(", "(", "now", ",", "num", ")", ")" ]
Add a timestamp and date to the data
[ "Add", "a", "timestamp", "and", "date", "to", "the", "data" ]
python
train
saltstack/salt
salt/modules/netbox.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netbox.py#L224-L256
def create_device_type(model, manufacturer): ''' .. versionadded:: 2019.2.0 Create a device type. If the manufacturer doesn't exist, create a new manufacturer. model String of device model, e.g., ``MX480`` manufacturer String of device manufacturer, e.g., ``Juniper`` CLI Example: .. code-block:: bash salt myminion netbox.create_device_type MX480 Juniper ''' nb_type = get_('dcim', 'device-types', model=model) if nb_type: return False nb_man = get_('dcim', 'manufacturers', name=manufacturer) new_man = None if not nb_man: new_man = create_manufacturer(manufacturer) payload = {'model': model, 'manufacturer': nb_man['id'], 'slug': slugify(model)} typ = _add('dcim', 'device-types', payload) ret_dict = {'dcim': {'device-types': payload}} if new_man: ret_dict['dcim'].update(new_man['dcim']) if typ: return ret_dict else: return False
[ "def", "create_device_type", "(", "model", ",", "manufacturer", ")", ":", "nb_type", "=", "get_", "(", "'dcim'", ",", "'device-types'", ",", "model", "=", "model", ")", "if", "nb_type", ":", "return", "False", "nb_man", "=", "get_", "(", "'dcim'", ",", "'manufacturers'", ",", "name", "=", "manufacturer", ")", "new_man", "=", "None", "if", "not", "nb_man", ":", "new_man", "=", "create_manufacturer", "(", "manufacturer", ")", "payload", "=", "{", "'model'", ":", "model", ",", "'manufacturer'", ":", "nb_man", "[", "'id'", "]", ",", "'slug'", ":", "slugify", "(", "model", ")", "}", "typ", "=", "_add", "(", "'dcim'", ",", "'device-types'", ",", "payload", ")", "ret_dict", "=", "{", "'dcim'", ":", "{", "'device-types'", ":", "payload", "}", "}", "if", "new_man", ":", "ret_dict", "[", "'dcim'", "]", ".", "update", "(", "new_man", "[", "'dcim'", "]", ")", "if", "typ", ":", "return", "ret_dict", "else", ":", "return", "False" ]
.. versionadded:: 2019.2.0 Create a device type. If the manufacturer doesn't exist, create a new manufacturer. model String of device model, e.g., ``MX480`` manufacturer String of device manufacturer, e.g., ``Juniper`` CLI Example: .. code-block:: bash salt myminion netbox.create_device_type MX480 Juniper
[ "..", "versionadded", "::", "2019", ".", "2", ".", "0" ]
python
train
googleapis/google-cloud-python
redis/google/cloud/redis_v1beta1/gapic/cloud_redis_client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/redis/google/cloud/redis_v1beta1/gapic/cloud_redis_client.py#L310-L366
def get_instance( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Gets the details of a specific Redis instance. Example: >>> from google.cloud import redis_v1beta1 >>> >>> client = redis_v1beta1.CloudRedisClient() >>> >>> name = client.instance_path('[PROJECT]', '[LOCATION]', '[INSTANCE]') >>> >>> response = client.get_instance(name) Args: name (str): Required. Redis instance resource name using the form: ``projects/{project_id}/locations/{location_id}/instances/{instance_id}`` where ``location_id`` refers to a GCP region retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.redis_v1beta1.types.Instance` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "get_instance" not in self._inner_api_calls: self._inner_api_calls[ "get_instance" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_instance, default_retry=self._method_configs["GetInstance"].retry, default_timeout=self._method_configs["GetInstance"].timeout, client_info=self._client_info, ) request = cloud_redis_pb2.GetInstanceRequest(name=name) return self._inner_api_calls["get_instance"]( request, retry=retry, timeout=timeout, metadata=metadata )
[ "def", "get_instance", "(", "self", ",", "name", ",", "retry", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "metadata", "=", "None", ",", ")", ":", "# Wrap the transport method to add retry and timeout logic.", "if", "\"get_instance\"", "not", "in", "self", ".", "_inner_api_calls", ":", "self", ".", "_inner_api_calls", "[", "\"get_instance\"", "]", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "wrap_method", "(", "self", ".", "transport", ".", "get_instance", ",", "default_retry", "=", "self", ".", "_method_configs", "[", "\"GetInstance\"", "]", ".", "retry", ",", "default_timeout", "=", "self", ".", "_method_configs", "[", "\"GetInstance\"", "]", ".", "timeout", ",", "client_info", "=", "self", ".", "_client_info", ",", ")", "request", "=", "cloud_redis_pb2", ".", "GetInstanceRequest", "(", "name", "=", "name", ")", "return", "self", ".", "_inner_api_calls", "[", "\"get_instance\"", "]", "(", "request", ",", "retry", "=", "retry", ",", "timeout", "=", "timeout", ",", "metadata", "=", "metadata", ")" ]
Gets the details of a specific Redis instance. Example: >>> from google.cloud import redis_v1beta1 >>> >>> client = redis_v1beta1.CloudRedisClient() >>> >>> name = client.instance_path('[PROJECT]', '[LOCATION]', '[INSTANCE]') >>> >>> response = client.get_instance(name) Args: name (str): Required. Redis instance resource name using the form: ``projects/{project_id}/locations/{location_id}/instances/{instance_id}`` where ``location_id`` refers to a GCP region retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.redis_v1beta1.types.Instance` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
[ "Gets", "the", "details", "of", "a", "specific", "Redis", "instance", "." ]
python
train
noirbizarre/django-eztables
eztables/views.py
https://github.com/noirbizarre/django-eztables/blob/347e74dcc08121d20f4cf942181d873dbe33b995/eztables/views.py#L35-L48
def get_real_field(model, field_name): ''' Get the real field from a model given its name. Handle nested models recursively (aka. ``__`` lookups) ''' parts = field_name.split('__') field = model._meta.get_field(parts[0]) if len(parts) == 1: return model._meta.get_field(field_name) elif isinstance(field, models.ForeignKey): return get_real_field(field.rel.to, '__'.join(parts[1:])) else: raise Exception('Unhandled field: %s' % field_name)
[ "def", "get_real_field", "(", "model", ",", "field_name", ")", ":", "parts", "=", "field_name", ".", "split", "(", "'__'", ")", "field", "=", "model", ".", "_meta", ".", "get_field", "(", "parts", "[", "0", "]", ")", "if", "len", "(", "parts", ")", "==", "1", ":", "return", "model", ".", "_meta", ".", "get_field", "(", "field_name", ")", "elif", "isinstance", "(", "field", ",", "models", ".", "ForeignKey", ")", ":", "return", "get_real_field", "(", "field", ".", "rel", ".", "to", ",", "'__'", ".", "join", "(", "parts", "[", "1", ":", "]", ")", ")", "else", ":", "raise", "Exception", "(", "'Unhandled field: %s'", "%", "field_name", ")" ]
Get the real field from a model given its name. Handle nested models recursively (aka. ``__`` lookups)
[ "Get", "the", "real", "field", "from", "a", "model", "given", "its", "name", "." ]
python
train
vxgmichel/aiostream
aiostream/stream/create.py
https://github.com/vxgmichel/aiostream/blob/43bdf04ab19108a3f1b5a472062e1392a26cbcf8/aiostream/stream/create.py#L111-L118
def range(*args, interval=0): """Generate a given range of numbers. It supports the same arguments as the builtin function. An optional interval can be given to space the values out. """ agen = from_iterable.raw(builtins.range(*args)) return time.spaceout.raw(agen, interval) if interval else agen
[ "def", "range", "(", "*", "args", ",", "interval", "=", "0", ")", ":", "agen", "=", "from_iterable", ".", "raw", "(", "builtins", ".", "range", "(", "*", "args", ")", ")", "return", "time", ".", "spaceout", ".", "raw", "(", "agen", ",", "interval", ")", "if", "interval", "else", "agen" ]
Generate a given range of numbers. It supports the same arguments as the builtin function. An optional interval can be given to space the values out.
[ "Generate", "a", "given", "range", "of", "numbers", "." ]
python
train
sepandhaghighi/pycm
pycm/pycm_overall_func.py
https://github.com/sepandhaghighi/pycm/blob/cb03258afd6a821d10acba73c965aaac174bedcd/pycm/pycm_overall_func.py#L108-L133
def overall_MCC_calc(classes, table, TOP, P): """ Calculate Overall_MCC. :param classes: classes :type classes : list :param table: input matrix :type table : dict :param TOP: test outcome positive :type TOP : dict :param P: condition positive :type P : dict :return: Overall_MCC as float """ try: cov_x_y = 0 cov_x_x = 0 cov_y_y = 0 matrix_sum = sum(list(TOP.values())) for i in classes: cov_x_x += TOP[i] * (matrix_sum - TOP[i]) cov_y_y += P[i] * (matrix_sum - P[i]) cov_x_y += (table[i][i] * matrix_sum - P[i] * TOP[i]) return cov_x_y / (math.sqrt(cov_y_y * cov_x_x)) except Exception: return "None"
[ "def", "overall_MCC_calc", "(", "classes", ",", "table", ",", "TOP", ",", "P", ")", ":", "try", ":", "cov_x_y", "=", "0", "cov_x_x", "=", "0", "cov_y_y", "=", "0", "matrix_sum", "=", "sum", "(", "list", "(", "TOP", ".", "values", "(", ")", ")", ")", "for", "i", "in", "classes", ":", "cov_x_x", "+=", "TOP", "[", "i", "]", "*", "(", "matrix_sum", "-", "TOP", "[", "i", "]", ")", "cov_y_y", "+=", "P", "[", "i", "]", "*", "(", "matrix_sum", "-", "P", "[", "i", "]", ")", "cov_x_y", "+=", "(", "table", "[", "i", "]", "[", "i", "]", "*", "matrix_sum", "-", "P", "[", "i", "]", "*", "TOP", "[", "i", "]", ")", "return", "cov_x_y", "/", "(", "math", ".", "sqrt", "(", "cov_y_y", "*", "cov_x_x", ")", ")", "except", "Exception", ":", "return", "\"None\"" ]
Calculate Overall_MCC. :param classes: classes :type classes : list :param table: input matrix :type table : dict :param TOP: test outcome positive :type TOP : dict :param P: condition positive :type P : dict :return: Overall_MCC as float
[ "Calculate", "Overall_MCC", "." ]
python
train
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L7370-L7374
def xpathRegisterNs(self, prefix, ns_uri): """Register a new namespace. If @ns_uri is None it unregisters the namespace """ ret = libxml2mod.xmlXPathRegisterNs(self._o, prefix, ns_uri) return ret
[ "def", "xpathRegisterNs", "(", "self", ",", "prefix", ",", "ns_uri", ")", ":", "ret", "=", "libxml2mod", ".", "xmlXPathRegisterNs", "(", "self", ".", "_o", ",", "prefix", ",", "ns_uri", ")", "return", "ret" ]
Register a new namespace. If @ns_uri is None it unregisters the namespace
[ "Register", "a", "new", "namespace", ".", "If" ]
python
train
gitpython-developers/smmap
smmap/util.py
https://github.com/gitpython-developers/smmap/blob/48e9e30b0ef3c24ac7ed88e6e3bfa37dc945bf4c/smmap/util.py#L32-L43
def align_to_mmap(num, round_up): """ Align the given integer number to the closest page offset, which usually is 4096 bytes. :param round_up: if True, the next higher multiple of page size is used, otherwise the lower page_size will be used (i.e. if True, 1 becomes 4096, otherwise it becomes 0) :return: num rounded to closest page""" res = (num // ALLOCATIONGRANULARITY) * ALLOCATIONGRANULARITY if round_up and (res != num): res += ALLOCATIONGRANULARITY # END handle size return res
[ "def", "align_to_mmap", "(", "num", ",", "round_up", ")", ":", "res", "=", "(", "num", "//", "ALLOCATIONGRANULARITY", ")", "*", "ALLOCATIONGRANULARITY", "if", "round_up", "and", "(", "res", "!=", "num", ")", ":", "res", "+=", "ALLOCATIONGRANULARITY", "# END handle size", "return", "res" ]
Align the given integer number to the closest page offset, which usually is 4096 bytes. :param round_up: if True, the next higher multiple of page size is used, otherwise the lower page_size will be used (i.e. if True, 1 becomes 4096, otherwise it becomes 0) :return: num rounded to closest page
[ "Align", "the", "given", "integer", "number", "to", "the", "closest", "page", "offset", "which", "usually", "is", "4096", "bytes", "." ]
python
train
mcs07/PubChemPy
pubchempy.py
https://github.com/mcs07/PubChemPy/blob/e3c4f4a9b6120433e5cc3383464c7a79e9b2b86e/pubchempy.py#L695-L714
def _setup_bonds(self): """Derive Bond objects from the record.""" self._bonds = {} if 'bonds' not in self.record: return # Create bonds aid1s = self.record['bonds']['aid1'] aid2s = self.record['bonds']['aid2'] orders = self.record['bonds']['order'] if not len(aid1s) == len(aid2s) == len(orders): raise ResponseParseError('Error parsing bonds') for aid1, aid2, order in zip(aid1s, aid2s, orders): self._bonds[frozenset((aid1, aid2))] = Bond(aid1=aid1, aid2=aid2, order=order) # Add styles if 'coords' in self.record and 'style' in self.record['coords'][0]['conformers'][0]: aid1s = self.record['coords'][0]['conformers'][0]['style']['aid1'] aid2s = self.record['coords'][0]['conformers'][0]['style']['aid2'] styles = self.record['coords'][0]['conformers'][0]['style']['annotation'] for aid1, aid2, style in zip(aid1s, aid2s, styles): self._bonds[frozenset((aid1, aid2))].style = style
[ "def", "_setup_bonds", "(", "self", ")", ":", "self", ".", "_bonds", "=", "{", "}", "if", "'bonds'", "not", "in", "self", ".", "record", ":", "return", "# Create bonds", "aid1s", "=", "self", ".", "record", "[", "'bonds'", "]", "[", "'aid1'", "]", "aid2s", "=", "self", ".", "record", "[", "'bonds'", "]", "[", "'aid2'", "]", "orders", "=", "self", ".", "record", "[", "'bonds'", "]", "[", "'order'", "]", "if", "not", "len", "(", "aid1s", ")", "==", "len", "(", "aid2s", ")", "==", "len", "(", "orders", ")", ":", "raise", "ResponseParseError", "(", "'Error parsing bonds'", ")", "for", "aid1", ",", "aid2", ",", "order", "in", "zip", "(", "aid1s", ",", "aid2s", ",", "orders", ")", ":", "self", ".", "_bonds", "[", "frozenset", "(", "(", "aid1", ",", "aid2", ")", ")", "]", "=", "Bond", "(", "aid1", "=", "aid1", ",", "aid2", "=", "aid2", ",", "order", "=", "order", ")", "# Add styles", "if", "'coords'", "in", "self", ".", "record", "and", "'style'", "in", "self", ".", "record", "[", "'coords'", "]", "[", "0", "]", "[", "'conformers'", "]", "[", "0", "]", ":", "aid1s", "=", "self", ".", "record", "[", "'coords'", "]", "[", "0", "]", "[", "'conformers'", "]", "[", "0", "]", "[", "'style'", "]", "[", "'aid1'", "]", "aid2s", "=", "self", ".", "record", "[", "'coords'", "]", "[", "0", "]", "[", "'conformers'", "]", "[", "0", "]", "[", "'style'", "]", "[", "'aid2'", "]", "styles", "=", "self", ".", "record", "[", "'coords'", "]", "[", "0", "]", "[", "'conformers'", "]", "[", "0", "]", "[", "'style'", "]", "[", "'annotation'", "]", "for", "aid1", ",", "aid2", ",", "style", "in", "zip", "(", "aid1s", ",", "aid2s", ",", "styles", ")", ":", "self", ".", "_bonds", "[", "frozenset", "(", "(", "aid1", ",", "aid2", ")", ")", "]", ".", "style", "=", "style" ]
Derive Bond objects from the record.
[ "Derive", "Bond", "objects", "from", "the", "record", "." ]
python
train
h2oai/h2o-3
scripts/extractGLRMRuntimeJavaLog.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/scripts/extractGLRMRuntimeJavaLog.py#L31-L112
def extractRunInto(javaLogText): """ This function will extract the various operation time for GLRM model building iterations. :param javaLogText: :return: """ global g_initialXY global g_reguarlize_Y global g_regularize_X_objective global g_updateX global g_updateY global g_objective global g_stepsize global g_history if os.path.isfile(javaLogText): run_result = dict() run_result["total time (ms)"] = [] run_result["initialXY (ms)"] = [] run_result["regularize Y (ms)"] = [] run_result["regularize X and objective (ms)"] = [] run_result["update X (ms)"] = [] run_result["update Y (ms)"] = [] run_result["objective (ms)"] = [] run_result["step size (ms)"] = [] run_result["update history (ms)"] = [] total_run_time = -1 val = 0.0 with open(javaLogText, 'r') as thefile: # go into tempfile and grab test run info for each_line in thefile: temp_string = each_line.split() if len(temp_string) > 0: val = temp_string[-1].replace('\\','') if g_initialXY in each_line: # start of a new file if total_run_time > 0: # update total run time run_result["total time (ms)"].append(total_run_time) total_run_time = 0.0 else: total_run_time = 0.0 run_result["initialXY (ms)"].append(float(val)) total_run_time = total_run_time+float(val) if g_reguarlize_Y in each_line: run_result["regularize Y (ms)"].append(float(val)) total_run_time = total_run_time+float(val) if g_regularize_X_objective in each_line: run_result["regularize X and objective (ms)"].append(float(val)) total_run_time = total_run_time+float(val) if g_updateX in each_line: run_result["update X (ms)"].append(float(val)) total_run_time = total_run_time+float(val) if g_updateY in each_line: run_result["update Y (ms)"].append(float(val)) total_run_time = total_run_time+float(val) if g_objective in each_line: run_result["objective (ms)"].append(float(val)) total_run_time = total_run_time+float(val) if g_stepsize in each_line: run_result["step size (ms)"].append(float(val)) total_run_time = total_run_time+float(val) if g_history in each_line: run_result["update history (ms)"].append(float(val)) total_run_time = total_run_time+float(val) run_result["total time (ms)"].append(total_run_time) # save the last one print("Run result summary: \n {0}".format(run_result)) else: print("Cannot find your java log file. Nothing is done.\n")
[ "def", "extractRunInto", "(", "javaLogText", ")", ":", "global", "g_initialXY", "global", "g_reguarlize_Y", "global", "g_regularize_X_objective", "global", "g_updateX", "global", "g_updateY", "global", "g_objective", "global", "g_stepsize", "global", "g_history", "if", "os", ".", "path", ".", "isfile", "(", "javaLogText", ")", ":", "run_result", "=", "dict", "(", ")", "run_result", "[", "\"total time (ms)\"", "]", "=", "[", "]", "run_result", "[", "\"initialXY (ms)\"", "]", "=", "[", "]", "run_result", "[", "\"regularize Y (ms)\"", "]", "=", "[", "]", "run_result", "[", "\"regularize X and objective (ms)\"", "]", "=", "[", "]", "run_result", "[", "\"update X (ms)\"", "]", "=", "[", "]", "run_result", "[", "\"update Y (ms)\"", "]", "=", "[", "]", "run_result", "[", "\"objective (ms)\"", "]", "=", "[", "]", "run_result", "[", "\"step size (ms)\"", "]", "=", "[", "]", "run_result", "[", "\"update history (ms)\"", "]", "=", "[", "]", "total_run_time", "=", "-", "1", "val", "=", "0.0", "with", "open", "(", "javaLogText", ",", "'r'", ")", "as", "thefile", ":", "# go into tempfile and grab test run info", "for", "each_line", "in", "thefile", ":", "temp_string", "=", "each_line", ".", "split", "(", ")", "if", "len", "(", "temp_string", ")", ">", "0", ":", "val", "=", "temp_string", "[", "-", "1", "]", ".", "replace", "(", "'\\\\'", ",", "''", ")", "if", "g_initialXY", "in", "each_line", ":", "# start of a new file", "if", "total_run_time", ">", "0", ":", "# update total run time", "run_result", "[", "\"total time (ms)\"", "]", ".", "append", "(", "total_run_time", ")", "total_run_time", "=", "0.0", "else", ":", "total_run_time", "=", "0.0", "run_result", "[", "\"initialXY (ms)\"", "]", ".", "append", "(", "float", "(", "val", ")", ")", "total_run_time", "=", "total_run_time", "+", "float", "(", "val", ")", "if", "g_reguarlize_Y", "in", "each_line", ":", "run_result", "[", "\"regularize Y (ms)\"", "]", ".", "append", "(", "float", "(", "val", ")", ")", "total_run_time", "=", "total_run_time", "+", "float", "(", "val", ")", "if", "g_regularize_X_objective", "in", "each_line", ":", "run_result", "[", "\"regularize X and objective (ms)\"", "]", ".", "append", "(", "float", "(", "val", ")", ")", "total_run_time", "=", "total_run_time", "+", "float", "(", "val", ")", "if", "g_updateX", "in", "each_line", ":", "run_result", "[", "\"update X (ms)\"", "]", ".", "append", "(", "float", "(", "val", ")", ")", "total_run_time", "=", "total_run_time", "+", "float", "(", "val", ")", "if", "g_updateY", "in", "each_line", ":", "run_result", "[", "\"update Y (ms)\"", "]", ".", "append", "(", "float", "(", "val", ")", ")", "total_run_time", "=", "total_run_time", "+", "float", "(", "val", ")", "if", "g_objective", "in", "each_line", ":", "run_result", "[", "\"objective (ms)\"", "]", ".", "append", "(", "float", "(", "val", ")", ")", "total_run_time", "=", "total_run_time", "+", "float", "(", "val", ")", "if", "g_stepsize", "in", "each_line", ":", "run_result", "[", "\"step size (ms)\"", "]", ".", "append", "(", "float", "(", "val", ")", ")", "total_run_time", "=", "total_run_time", "+", "float", "(", "val", ")", "if", "g_history", "in", "each_line", ":", "run_result", "[", "\"update history (ms)\"", "]", ".", "append", "(", "float", "(", "val", ")", ")", "total_run_time", "=", "total_run_time", "+", "float", "(", "val", ")", "run_result", "[", "\"total time (ms)\"", "]", ".", "append", "(", "total_run_time", ")", "# save the last one", "print", "(", "\"Run result summary: \\n {0}\"", ".", "format", "(", "run_result", ")", ")", "else", ":", "print", "(", "\"Cannot find your java log file. Nothing is done.\\n\"", ")" ]
This function will extract the various operation time for GLRM model building iterations. :param javaLogText: :return:
[ "This", "function", "will", "extract", "the", "various", "operation", "time", "for", "GLRM", "model", "building", "iterations", "." ]
python
test
opennode/waldur-core
waldur_core/logging/elasticsearch_client.py
https://github.com/opennode/waldur-core/blob/d6c17a9592bb6c49c33567542eef8d099605a46a/waldur_core/logging/elasticsearch_client.py#L89-L94
def _execute_if_not_empty(func): """ Execute function only if one of input parameters is not empty """ def wrapper(*args, **kwargs): if any(args[1:]) or any(kwargs.items()): return func(*args, **kwargs) return wrapper
[ "def", "_execute_if_not_empty", "(", "func", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "any", "(", "args", "[", "1", ":", "]", ")", "or", "any", "(", "kwargs", ".", "items", "(", ")", ")", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
Execute function only if one of input parameters is not empty
[ "Execute", "function", "only", "if", "one", "of", "input", "parameters", "is", "not", "empty" ]
python
train
python-fedex-devs/python-fedex
fedex/services/ship_service.py
https://github.com/python-fedex-devs/python-fedex/blob/7ea2ca80c362f5dbbc8d959ab47648c7a4ab24eb/fedex/services/ship_service.py#L191-L198
def _prepare_wsdl_objects(self): """ Preps the WSDL data structures for the user. """ self.DeletionControlType = self.client.factory.create('DeletionControlType') self.TrackingId = self.client.factory.create('TrackingId') self.TrackingId.TrackingIdType = self.client.factory.create('TrackingIdType')
[ "def", "_prepare_wsdl_objects", "(", "self", ")", ":", "self", ".", "DeletionControlType", "=", "self", ".", "client", ".", "factory", ".", "create", "(", "'DeletionControlType'", ")", "self", ".", "TrackingId", "=", "self", ".", "client", ".", "factory", ".", "create", "(", "'TrackingId'", ")", "self", ".", "TrackingId", ".", "TrackingIdType", "=", "self", ".", "client", ".", "factory", ".", "create", "(", "'TrackingIdType'", ")" ]
Preps the WSDL data structures for the user.
[ "Preps", "the", "WSDL", "data", "structures", "for", "the", "user", "." ]
python
train
saltstack/salt
salt/modules/lxd.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/lxd.py#L3354-L3397
def snapshots_get(container, name, remote_addr=None, cert=None, key=None, verify_cert=True): ''' Get information about snapshot for a container container : The name of the container to get. name : The name of the snapshot. remote_addr : An URL to a remote server. The 'cert' and 'key' fields must also be provided if 'remote_addr' is defined. Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Verify the ssl certificate. Default: True CLI Examples: .. code-block:: bash $ salt '*' lxd.snapshots_get test-container test-snapshot ''' container = container_get( container, remote_addr, cert, key, verify_cert, _raw=True ) return container.snapshots.get(name)
[ "def", "snapshots_get", "(", "container", ",", "name", ",", "remote_addr", "=", "None", ",", "cert", "=", "None", ",", "key", "=", "None", ",", "verify_cert", "=", "True", ")", ":", "container", "=", "container_get", "(", "container", ",", "remote_addr", ",", "cert", ",", "key", ",", "verify_cert", ",", "_raw", "=", "True", ")", "return", "container", ".", "snapshots", ".", "get", "(", "name", ")" ]
Get information about snapshot for a container container : The name of the container to get. name : The name of the snapshot. remote_addr : An URL to a remote server. The 'cert' and 'key' fields must also be provided if 'remote_addr' is defined. Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Verify the ssl certificate. Default: True CLI Examples: .. code-block:: bash $ salt '*' lxd.snapshots_get test-container test-snapshot
[ "Get", "information", "about", "snapshot", "for", "a", "container" ]
python
train
onelogin/python3-saml
src/onelogin/saml2/response.py
https://github.com/onelogin/python3-saml/blob/064b7275fba1e5f39a9116ba1cdcc5d01fc34daa/src/onelogin/saml2/response.py#L590-L657
def process_signed_elements(self): """ Verifies the signature nodes: - Checks that are Response or Assertion - Check that IDs and reference URI are unique and consistent. :returns: The signed elements tag names :rtype: list """ sign_nodes = self.__query('//ds:Signature') signed_elements = [] verified_seis = [] verified_ids = [] response_tag = '{%s}Response' % OneLogin_Saml2_Constants.NS_SAMLP assertion_tag = '{%s}Assertion' % OneLogin_Saml2_Constants.NS_SAML for sign_node in sign_nodes: signed_element = sign_node.getparent().tag if signed_element != response_tag and signed_element != assertion_tag: raise OneLogin_Saml2_ValidationError( 'Invalid Signature Element %s SAML Response rejected' % signed_element, OneLogin_Saml2_ValidationError.WRONG_SIGNED_ELEMENT ) if not sign_node.getparent().get('ID'): raise OneLogin_Saml2_ValidationError( 'Signed Element must contain an ID. SAML Response rejected', OneLogin_Saml2_ValidationError.ID_NOT_FOUND_IN_SIGNED_ELEMENT ) id_value = sign_node.getparent().get('ID') if id_value in verified_ids: raise OneLogin_Saml2_ValidationError( 'Duplicated ID. SAML Response rejected', OneLogin_Saml2_ValidationError.DUPLICATED_ID_IN_SIGNED_ELEMENTS ) verified_ids.append(id_value) # Check that reference URI matches the parent ID and no duplicate References or IDs ref = OneLogin_Saml2_XML.query(sign_node, './/ds:Reference') if ref: ref = ref[0] if ref.get('URI'): sei = ref.get('URI')[1:] if sei != id_value: raise OneLogin_Saml2_ValidationError( 'Found an invalid Signed Element. SAML Response rejected', OneLogin_Saml2_ValidationError.INVALID_SIGNED_ELEMENT ) if sei in verified_seis: raise OneLogin_Saml2_ValidationError( 'Duplicated Reference URI. SAML Response rejected', OneLogin_Saml2_ValidationError.DUPLICATED_REFERENCE_IN_SIGNED_ELEMENTS ) verified_seis.append(sei) signed_elements.append(signed_element) if signed_elements: if not self.validate_signed_elements(signed_elements, raise_exceptions=True): raise OneLogin_Saml2_ValidationError( 'Found an unexpected Signature Element. SAML Response rejected', OneLogin_Saml2_ValidationError.UNEXPECTED_SIGNED_ELEMENTS ) return signed_elements
[ "def", "process_signed_elements", "(", "self", ")", ":", "sign_nodes", "=", "self", ".", "__query", "(", "'//ds:Signature'", ")", "signed_elements", "=", "[", "]", "verified_seis", "=", "[", "]", "verified_ids", "=", "[", "]", "response_tag", "=", "'{%s}Response'", "%", "OneLogin_Saml2_Constants", ".", "NS_SAMLP", "assertion_tag", "=", "'{%s}Assertion'", "%", "OneLogin_Saml2_Constants", ".", "NS_SAML", "for", "sign_node", "in", "sign_nodes", ":", "signed_element", "=", "sign_node", ".", "getparent", "(", ")", ".", "tag", "if", "signed_element", "!=", "response_tag", "and", "signed_element", "!=", "assertion_tag", ":", "raise", "OneLogin_Saml2_ValidationError", "(", "'Invalid Signature Element %s SAML Response rejected'", "%", "signed_element", ",", "OneLogin_Saml2_ValidationError", ".", "WRONG_SIGNED_ELEMENT", ")", "if", "not", "sign_node", ".", "getparent", "(", ")", ".", "get", "(", "'ID'", ")", ":", "raise", "OneLogin_Saml2_ValidationError", "(", "'Signed Element must contain an ID. SAML Response rejected'", ",", "OneLogin_Saml2_ValidationError", ".", "ID_NOT_FOUND_IN_SIGNED_ELEMENT", ")", "id_value", "=", "sign_node", ".", "getparent", "(", ")", ".", "get", "(", "'ID'", ")", "if", "id_value", "in", "verified_ids", ":", "raise", "OneLogin_Saml2_ValidationError", "(", "'Duplicated ID. SAML Response rejected'", ",", "OneLogin_Saml2_ValidationError", ".", "DUPLICATED_ID_IN_SIGNED_ELEMENTS", ")", "verified_ids", ".", "append", "(", "id_value", ")", "# Check that reference URI matches the parent ID and no duplicate References or IDs", "ref", "=", "OneLogin_Saml2_XML", ".", "query", "(", "sign_node", ",", "'.//ds:Reference'", ")", "if", "ref", ":", "ref", "=", "ref", "[", "0", "]", "if", "ref", ".", "get", "(", "'URI'", ")", ":", "sei", "=", "ref", ".", "get", "(", "'URI'", ")", "[", "1", ":", "]", "if", "sei", "!=", "id_value", ":", "raise", "OneLogin_Saml2_ValidationError", "(", "'Found an invalid Signed Element. SAML Response rejected'", ",", "OneLogin_Saml2_ValidationError", ".", "INVALID_SIGNED_ELEMENT", ")", "if", "sei", "in", "verified_seis", ":", "raise", "OneLogin_Saml2_ValidationError", "(", "'Duplicated Reference URI. SAML Response rejected'", ",", "OneLogin_Saml2_ValidationError", ".", "DUPLICATED_REFERENCE_IN_SIGNED_ELEMENTS", ")", "verified_seis", ".", "append", "(", "sei", ")", "signed_elements", ".", "append", "(", "signed_element", ")", "if", "signed_elements", ":", "if", "not", "self", ".", "validate_signed_elements", "(", "signed_elements", ",", "raise_exceptions", "=", "True", ")", ":", "raise", "OneLogin_Saml2_ValidationError", "(", "'Found an unexpected Signature Element. SAML Response rejected'", ",", "OneLogin_Saml2_ValidationError", ".", "UNEXPECTED_SIGNED_ELEMENTS", ")", "return", "signed_elements" ]
Verifies the signature nodes: - Checks that are Response or Assertion - Check that IDs and reference URI are unique and consistent. :returns: The signed elements tag names :rtype: list
[ "Verifies", "the", "signature", "nodes", ":", "-", "Checks", "that", "are", "Response", "or", "Assertion", "-", "Check", "that", "IDs", "and", "reference", "URI", "are", "unique", "and", "consistent", "." ]
python
train
acutesoftware/virtual-AI-simulator
scripts/recipe_finder.py
https://github.com/acutesoftware/virtual-AI-simulator/blob/57de679a5b1a58c38fefe6aea58af1f3a7e79c58/scripts/recipe_finder.py#L32-L55
def main(): """ script to find a list of recipes for a group of people with specific likes and dislikes. Output of script best ingred = ['Tea', 'Tofu', 'Cheese', 'Cucumber', 'Salad', 'Chocolate'] worst ingred = ['Fish', 'Lamb', 'Pie', 'Asparagus', 'Chicken', 'Turnips'] Use this = Tofu Use this = Cheese """ s = rawdata.content.DataFiles() all_ingredients = list(s.get_collist_by_name(data_files[1]['file'], data_files[1]['col'])[0]) #find_best_ingredients(ingredients_on_hand, dinner_guests) best_ingred, worst_ingred = find_best_ingredients(all_ingredients, dinner_guests) print('best ingred = ', best_ingred) print('worst ingred = ', worst_ingred) for have in ingredients_on_hand: if have in best_ingred: print('Use this = ', have)
[ "def", "main", "(", ")", ":", "s", "=", "rawdata", ".", "content", ".", "DataFiles", "(", ")", "all_ingredients", "=", "list", "(", "s", ".", "get_collist_by_name", "(", "data_files", "[", "1", "]", "[", "'file'", "]", ",", "data_files", "[", "1", "]", "[", "'col'", "]", ")", "[", "0", "]", ")", "#find_best_ingredients(ingredients_on_hand, dinner_guests)", "best_ingred", ",", "worst_ingred", "=", "find_best_ingredients", "(", "all_ingredients", ",", "dinner_guests", ")", "print", "(", "'best ingred = '", ",", "best_ingred", ")", "print", "(", "'worst ingred = '", ",", "worst_ingred", ")", "for", "have", "in", "ingredients_on_hand", ":", "if", "have", "in", "best_ingred", ":", "print", "(", "'Use this = '", ",", "have", ")" ]
script to find a list of recipes for a group of people with specific likes and dislikes. Output of script best ingred = ['Tea', 'Tofu', 'Cheese', 'Cucumber', 'Salad', 'Chocolate'] worst ingred = ['Fish', 'Lamb', 'Pie', 'Asparagus', 'Chicken', 'Turnips'] Use this = Tofu Use this = Cheese
[ "script", "to", "find", "a", "list", "of", "recipes", "for", "a", "group", "of", "people", "with", "specific", "likes", "and", "dislikes", ".", "Output", "of", "script" ]
python
train
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L929-L932
def community_topic_create(self, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/help_center/topics#create-topic" api_path = "/api/v2/community/topics.json" return self.call(api_path, method="POST", data=data, **kwargs)
[ "def", "community_topic_create", "(", "self", ",", "data", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/community/topics.json\"", "return", "self", ".", "call", "(", "api_path", ",", "method", "=", "\"POST\"", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/help_center/topics#create-topic
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "help_center", "/", "topics#create", "-", "topic" ]
python
train
SBRG/ssbio
ssbio/databases/pdb.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/pdb.py#L287-L346
def map_uniprot_resnum_to_pdb(uniprot_resnum, chain_id, sifts_file): """Map a UniProt residue number to its corresponding PDB residue number. This function requires that the SIFTS file be downloaded, and also a chain ID (as different chains may have different mappings). Args: uniprot_resnum (int): integer of the residue number you'd like to map chain_id (str): string of the PDB chain to map to sifts_file (str): Path to the SIFTS XML file Returns: (tuple): tuple containing: mapped_resnum (int): Mapped residue number is_observed (bool): Indicates if the 3D structure actually shows the residue """ # Load the xml with lxml parser = etree.XMLParser(ns_clean=True) tree = etree.parse(sifts_file, parser) root = tree.getroot() my_pdb_resnum = None # TODO: "Engineered_Mutation is also a possible annotation, need to figure out what to do with that my_pdb_annotation = False # Find the right chain (entities in the xml doc) ent = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}entity' for chain in root.findall(ent): # TODO: IMPORTANT - entityId is not the chain ID!!! it is just in alphabetical order! if chain.attrib['entityId'] == chain_id: # Find the "crossRefDb" tag that has the attributes dbSource="UniProt" and dbResNum="your_resnum_here" # Then match it to the crossRefDb dbResNum that has the attribute dbSource="PDBresnum" # Check if uniprot + resnum even exists in the sifts file (it won't if the pdb doesn't contain the residue) ures = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}crossRefDb[@dbSource="UniProt"][@dbResNum="%s"]' % uniprot_resnum my_uniprot_residue = chain.findall(ures) if len(my_uniprot_residue) == 1: # Get crossRefDb dbSource="PDB" parent = my_uniprot_residue[0].getparent() pres = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}crossRefDb[@dbSource="PDB"]' my_pdb_residue = parent.findall(pres) my_pdb_resnum = int(my_pdb_residue[0].attrib['dbResNum']) # Get <residueDetail dbSource="PDBe" property="Annotation"> # Will be Not_Observed if it is not seen in the PDB anno = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}residueDetail[@dbSource="PDBe"][@property="Annotation"]' my_pdb_annotation = parent.findall(anno) if len(my_pdb_annotation) == 1: my_pdb_annotation = my_pdb_annotation[0].text if my_pdb_annotation == 'Not_Observed': my_pdb_annotation = False else: my_pdb_annotation = True else: return None, False return my_pdb_resnum, my_pdb_annotation
[ "def", "map_uniprot_resnum_to_pdb", "(", "uniprot_resnum", ",", "chain_id", ",", "sifts_file", ")", ":", "# Load the xml with lxml", "parser", "=", "etree", ".", "XMLParser", "(", "ns_clean", "=", "True", ")", "tree", "=", "etree", ".", "parse", "(", "sifts_file", ",", "parser", ")", "root", "=", "tree", ".", "getroot", "(", ")", "my_pdb_resnum", "=", "None", "# TODO: \"Engineered_Mutation is also a possible annotation, need to figure out what to do with that", "my_pdb_annotation", "=", "False", "# Find the right chain (entities in the xml doc)", "ent", "=", "'.//{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}entity'", "for", "chain", "in", "root", ".", "findall", "(", "ent", ")", ":", "# TODO: IMPORTANT - entityId is not the chain ID!!! it is just in alphabetical order!", "if", "chain", ".", "attrib", "[", "'entityId'", "]", "==", "chain_id", ":", "# Find the \"crossRefDb\" tag that has the attributes dbSource=\"UniProt\" and dbResNum=\"your_resnum_here\"", "# Then match it to the crossRefDb dbResNum that has the attribute dbSource=\"PDBresnum\"", "# Check if uniprot + resnum even exists in the sifts file (it won't if the pdb doesn't contain the residue)", "ures", "=", "'.//{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}crossRefDb[@dbSource=\"UniProt\"][@dbResNum=\"%s\"]'", "%", "uniprot_resnum", "my_uniprot_residue", "=", "chain", ".", "findall", "(", "ures", ")", "if", "len", "(", "my_uniprot_residue", ")", "==", "1", ":", "# Get crossRefDb dbSource=\"PDB\"", "parent", "=", "my_uniprot_residue", "[", "0", "]", ".", "getparent", "(", ")", "pres", "=", "'.//{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}crossRefDb[@dbSource=\"PDB\"]'", "my_pdb_residue", "=", "parent", ".", "findall", "(", "pres", ")", "my_pdb_resnum", "=", "int", "(", "my_pdb_residue", "[", "0", "]", ".", "attrib", "[", "'dbResNum'", "]", ")", "# Get <residueDetail dbSource=\"PDBe\" property=\"Annotation\">", "# Will be Not_Observed if it is not seen in the PDB", "anno", "=", "'.//{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}residueDetail[@dbSource=\"PDBe\"][@property=\"Annotation\"]'", "my_pdb_annotation", "=", "parent", ".", "findall", "(", "anno", ")", "if", "len", "(", "my_pdb_annotation", ")", "==", "1", ":", "my_pdb_annotation", "=", "my_pdb_annotation", "[", "0", "]", ".", "text", "if", "my_pdb_annotation", "==", "'Not_Observed'", ":", "my_pdb_annotation", "=", "False", "else", ":", "my_pdb_annotation", "=", "True", "else", ":", "return", "None", ",", "False", "return", "my_pdb_resnum", ",", "my_pdb_annotation" ]
Map a UniProt residue number to its corresponding PDB residue number. This function requires that the SIFTS file be downloaded, and also a chain ID (as different chains may have different mappings). Args: uniprot_resnum (int): integer of the residue number you'd like to map chain_id (str): string of the PDB chain to map to sifts_file (str): Path to the SIFTS XML file Returns: (tuple): tuple containing: mapped_resnum (int): Mapped residue number is_observed (bool): Indicates if the 3D structure actually shows the residue
[ "Map", "a", "UniProt", "residue", "number", "to", "its", "corresponding", "PDB", "residue", "number", "." ]
python
train
zabertech/python-izaber
izaber/date.py
https://github.com/zabertech/python-izaber/blob/729bf9ef637e084c8ab3cc16c34cf659d3a79ee4/izaber/date.py#L340-L348
def daily_hours(self,local=False): """ This returns a number from 0 to 24 that describes the number of hours passed in a day. This is very useful for hr.attendances """ data = self.get(local) daily_hours = (data.hour + data.minute / 60.0 + data.second / 3600.0) return round(daily_hours,2)
[ "def", "daily_hours", "(", "self", ",", "local", "=", "False", ")", ":", "data", "=", "self", ".", "get", "(", "local", ")", "daily_hours", "=", "(", "data", ".", "hour", "+", "data", ".", "minute", "/", "60.0", "+", "data", ".", "second", "/", "3600.0", ")", "return", "round", "(", "daily_hours", ",", "2", ")" ]
This returns a number from 0 to 24 that describes the number of hours passed in a day. This is very useful for hr.attendances
[ "This", "returns", "a", "number", "from", "0", "to", "24", "that", "describes", "the", "number", "of", "hours", "passed", "in", "a", "day", ".", "This", "is", "very", "useful", "for", "hr", ".", "attendances" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/research/moe_experiments.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/moe_experiments.py#L503-L512
def denoise_v1_m15(): """Denoising experiment.""" hparams = xmoe2_v1() # no local attention # TODO(noam): non-masked version of local-attention hparams.decoder_layers = [ "att" if l == "local_att" else l for l in hparams.decoder_layers] hparams.decoder_type = "denoising" hparams.noising_spec_train = {"type": "mask", "prob": 0.15} return hparams
[ "def", "denoise_v1_m15", "(", ")", ":", "hparams", "=", "xmoe2_v1", "(", ")", "# no local attention", "# TODO(noam): non-masked version of local-attention", "hparams", ".", "decoder_layers", "=", "[", "\"att\"", "if", "l", "==", "\"local_att\"", "else", "l", "for", "l", "in", "hparams", ".", "decoder_layers", "]", "hparams", ".", "decoder_type", "=", "\"denoising\"", "hparams", ".", "noising_spec_train", "=", "{", "\"type\"", ":", "\"mask\"", ",", "\"prob\"", ":", "0.15", "}", "return", "hparams" ]
Denoising experiment.
[ "Denoising", "experiment", "." ]
python
train
fishtown-analytics/dbt
core/dbt/adapters/cache.py
https://github.com/fishtown-analytics/dbt/blob/aa4f771df28b307af0cf9fe2fc24432f10a8236b/core/dbt/adapters/cache.py#L188-L197
def remove_schema(self, database, schema): """Remove a schema from the set of known schemas (case-insensitive) If the schema does not exist, it will be ignored - it could just be a temporary table. :param str database: The database name to remove. :param str schema: The schema name to remove. """ self.schemas.discard((_lower(database), _lower(schema)))
[ "def", "remove_schema", "(", "self", ",", "database", ",", "schema", ")", ":", "self", ".", "schemas", ".", "discard", "(", "(", "_lower", "(", "database", ")", ",", "_lower", "(", "schema", ")", ")", ")" ]
Remove a schema from the set of known schemas (case-insensitive) If the schema does not exist, it will be ignored - it could just be a temporary table. :param str database: The database name to remove. :param str schema: The schema name to remove.
[ "Remove", "a", "schema", "from", "the", "set", "of", "known", "schemas", "(", "case", "-", "insensitive", ")" ]
python
train
log2timeline/plaso
plaso/lib/timelib.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/lib/timelib.py#L356-L362
def RoundToSeconds(cls, timestamp): """Takes a timestamp value and rounds it to a second precision.""" leftovers = timestamp % definitions.MICROSECONDS_PER_SECOND scrubbed = timestamp - leftovers rounded = round(float(leftovers) / definitions.MICROSECONDS_PER_SECOND) return int(scrubbed + rounded * definitions.MICROSECONDS_PER_SECOND)
[ "def", "RoundToSeconds", "(", "cls", ",", "timestamp", ")", ":", "leftovers", "=", "timestamp", "%", "definitions", ".", "MICROSECONDS_PER_SECOND", "scrubbed", "=", "timestamp", "-", "leftovers", "rounded", "=", "round", "(", "float", "(", "leftovers", ")", "/", "definitions", ".", "MICROSECONDS_PER_SECOND", ")", "return", "int", "(", "scrubbed", "+", "rounded", "*", "definitions", ".", "MICROSECONDS_PER_SECOND", ")" ]
Takes a timestamp value and rounds it to a second precision.
[ "Takes", "a", "timestamp", "value", "and", "rounds", "it", "to", "a", "second", "precision", "." ]
python
train
saltstack/salt
salt/states/keystone.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/keystone.py#L402-L429
def tenant_absent(name, profile=None, **connection_args): ''' Ensure that the keystone tenant is absent. name The name of the tenant that should not exist ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'Tenant / project "{0}" is already absent'.format(name)} # Check if tenant is present tenant = __salt__['keystone.tenant_get'](name=name, profile=profile, **connection_args) if 'Error' not in tenant: if __opts__.get('test'): ret['result'] = None ret['comment'] = 'Tenant / project "{0}" will be deleted'.format(name) return ret # Delete tenant __salt__['keystone.tenant_delete'](name=name, profile=profile, **connection_args) ret['comment'] = 'Tenant / project "{0}" has been deleted'.format(name) ret['changes']['Tenant/Project'] = 'Deleted' return ret
[ "def", "tenant_absent", "(", "name", ",", "profile", "=", "None", ",", "*", "*", "connection_args", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "True", ",", "'comment'", ":", "'Tenant / project \"{0}\" is already absent'", ".", "format", "(", "name", ")", "}", "# Check if tenant is present", "tenant", "=", "__salt__", "[", "'keystone.tenant_get'", "]", "(", "name", "=", "name", ",", "profile", "=", "profile", ",", "*", "*", "connection_args", ")", "if", "'Error'", "not", "in", "tenant", ":", "if", "__opts__", ".", "get", "(", "'test'", ")", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'Tenant / project \"{0}\" will be deleted'", ".", "format", "(", "name", ")", "return", "ret", "# Delete tenant", "__salt__", "[", "'keystone.tenant_delete'", "]", "(", "name", "=", "name", ",", "profile", "=", "profile", ",", "*", "*", "connection_args", ")", "ret", "[", "'comment'", "]", "=", "'Tenant / project \"{0}\" has been deleted'", ".", "format", "(", "name", ")", "ret", "[", "'changes'", "]", "[", "'Tenant/Project'", "]", "=", "'Deleted'", "return", "ret" ]
Ensure that the keystone tenant is absent. name The name of the tenant that should not exist
[ "Ensure", "that", "the", "keystone", "tenant", "is", "absent", "." ]
python
train
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L10621-L10639
def authenticate_external(self, auth_params): """Verify credentials using the external auth library. in auth_params of type str The auth parameters, credentials, etc. out result of type str The authentification result. """ if not isinstance(auth_params, list): raise TypeError("auth_params can only be an instance of type list") for a in auth_params[:10]: if not isinstance(a, basestring): raise TypeError( "array can only contain objects of type basestring") result = self._call("authenticateExternal", in_p=[auth_params]) return result
[ "def", "authenticate_external", "(", "self", ",", "auth_params", ")", ":", "if", "not", "isinstance", "(", "auth_params", ",", "list", ")", ":", "raise", "TypeError", "(", "\"auth_params can only be an instance of type list\"", ")", "for", "a", "in", "auth_params", "[", ":", "10", "]", ":", "if", "not", "isinstance", "(", "a", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"array can only contain objects of type basestring\"", ")", "result", "=", "self", ".", "_call", "(", "\"authenticateExternal\"", ",", "in_p", "=", "[", "auth_params", "]", ")", "return", "result" ]
Verify credentials using the external auth library. in auth_params of type str The auth parameters, credentials, etc. out result of type str The authentification result.
[ "Verify", "credentials", "using", "the", "external", "auth", "library", "." ]
python
train
junaruga/rpm-py-installer
install.py
https://github.com/junaruga/rpm-py-installer/blob/12f45feb0ba533dec8d0d16ef1e9b7fb8cfbd4ed/install.py#L28-L54
def run(self): """Run install process.""" try: self.linux.verify_system_status() except InstallSkipError: Log.info('Install skipped.') return work_dir = tempfile.mkdtemp(suffix='-rpm-py-installer') Log.info("Created working directory '{0}'".format(work_dir)) with Cmd.pushd(work_dir): self.rpm_py.download_and_install() if not self.python.is_python_binding_installed(): message = ( 'RPM Python binding failed to install ' 'with unknown reason.' ) raise InstallError(message) # TODO: Print installed module name and version as INFO. if self.is_work_dir_removed: shutil.rmtree(work_dir) Log.info("Removed working directory '{0}'".format(work_dir)) else: Log.info("Saved working directory '{0}'".format(work_dir))
[ "def", "run", "(", "self", ")", ":", "try", ":", "self", ".", "linux", ".", "verify_system_status", "(", ")", "except", "InstallSkipError", ":", "Log", ".", "info", "(", "'Install skipped.'", ")", "return", "work_dir", "=", "tempfile", ".", "mkdtemp", "(", "suffix", "=", "'-rpm-py-installer'", ")", "Log", ".", "info", "(", "\"Created working directory '{0}'\"", ".", "format", "(", "work_dir", ")", ")", "with", "Cmd", ".", "pushd", "(", "work_dir", ")", ":", "self", ".", "rpm_py", ".", "download_and_install", "(", ")", "if", "not", "self", ".", "python", ".", "is_python_binding_installed", "(", ")", ":", "message", "=", "(", "'RPM Python binding failed to install '", "'with unknown reason.'", ")", "raise", "InstallError", "(", "message", ")", "# TODO: Print installed module name and version as INFO.", "if", "self", ".", "is_work_dir_removed", ":", "shutil", ".", "rmtree", "(", "work_dir", ")", "Log", ".", "info", "(", "\"Removed working directory '{0}'\"", ".", "format", "(", "work_dir", ")", ")", "else", ":", "Log", ".", "info", "(", "\"Saved working directory '{0}'\"", ".", "format", "(", "work_dir", ")", ")" ]
Run install process.
[ "Run", "install", "process", "." ]
python
train
inveniosoftware/invenio-files-rest
invenio_files_rest/views.py
https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/views.py#L244-L253
def pass_bucket(f): """Decorate to retrieve a bucket.""" @wraps(f) def decorate(*args, **kwargs): bucket_id = kwargs.pop('bucket_id') bucket = Bucket.get(as_uuid(bucket_id)) if not bucket: abort(404, 'Bucket does not exist.') return f(bucket=bucket, *args, **kwargs) return decorate
[ "def", "pass_bucket", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "decorate", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "bucket_id", "=", "kwargs", ".", "pop", "(", "'bucket_id'", ")", "bucket", "=", "Bucket", ".", "get", "(", "as_uuid", "(", "bucket_id", ")", ")", "if", "not", "bucket", ":", "abort", "(", "404", ",", "'Bucket does not exist.'", ")", "return", "f", "(", "bucket", "=", "bucket", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "decorate" ]
Decorate to retrieve a bucket.
[ "Decorate", "to", "retrieve", "a", "bucket", "." ]
python
train
nfcpy/nfcpy
src/nfc/ndef/handover.py
https://github.com/nfcpy/nfcpy/blob/6649146d1afdd5e82b2b6b1ea00aa58d50785117/src/nfc/ndef/handover.py#L739-L745
def type(self): """The alternative carrier type name, equivalent to :attr:`Carrier.record.type` or :attr:`Carrier.record.carrier_type` if the carrier is specified as a :class:`HandoverCarrierRecord`.""" return self.record.type if self.record.type != "urn:nfc:wkt:Hc" \ else self.record.carrier_type
[ "def", "type", "(", "self", ")", ":", "return", "self", ".", "record", ".", "type", "if", "self", ".", "record", ".", "type", "!=", "\"urn:nfc:wkt:Hc\"", "else", "self", ".", "record", ".", "carrier_type" ]
The alternative carrier type name, equivalent to :attr:`Carrier.record.type` or :attr:`Carrier.record.carrier_type` if the carrier is specified as a :class:`HandoverCarrierRecord`.
[ "The", "alternative", "carrier", "type", "name", "equivalent", "to", ":", "attr", ":", "Carrier", ".", "record", ".", "type", "or", ":", "attr", ":", "Carrier", ".", "record", ".", "carrier_type", "if", "the", "carrier", "is", "specified", "as", "a", ":", "class", ":", "HandoverCarrierRecord", "." ]
python
train
santosjorge/cufflinks
cufflinks/tools.py
https://github.com/santosjorge/cufflinks/blob/ca1cbf93998dc793d0b1f8ac30fe1f2bd105f63a/cufflinks/tools.py#L579-L595
def get_base_layout(figs): """ Generates a layout with the union of all properties of multiple figures' layouts Parameters: ----------- fig : list(Figures) List of Plotly Figures """ layout={} for fig in figs: if not isinstance(fig,dict): fig=fig.to_dict() for k,v in list(fig['layout'].items()): layout[k]=v return layout
[ "def", "get_base_layout", "(", "figs", ")", ":", "layout", "=", "{", "}", "for", "fig", "in", "figs", ":", "if", "not", "isinstance", "(", "fig", ",", "dict", ")", ":", "fig", "=", "fig", ".", "to_dict", "(", ")", "for", "k", ",", "v", "in", "list", "(", "fig", "[", "'layout'", "]", ".", "items", "(", ")", ")", ":", "layout", "[", "k", "]", "=", "v", "return", "layout" ]
Generates a layout with the union of all properties of multiple figures' layouts Parameters: ----------- fig : list(Figures) List of Plotly Figures
[ "Generates", "a", "layout", "with", "the", "union", "of", "all", "properties", "of", "multiple", "figures", "layouts" ]
python
train
BDNYC/astrodbkit
astrodbkit/astrodb.py
https://github.com/BDNYC/astrodbkit/blob/02c03c5e91aa7c7b0f3b5fa95bcf71e33ffcee09/astrodbkit/astrodb.py#L36-L69
def create_database(dbpath, schema='', overwrite=True): """ Create a new database at the given dbpath Parameters ---------- dbpath: str The full path for the new database, including the filename and .db file extension. schema: str The path to the .sql schema for the database overwrite: bool Overwrite dbpath if it already exists """ if dbpath.endswith('.db'): if os.path.isfile(dbpath) and overwrite: os.system('rm {}'.format(dbpath)) # Load the schema if given if schema: os.system("cat {} | sqlite3 {}".format(schema,dbpath)) # Otherwise just make an empty SOURCES table else: sources_table = "CREATE TABLE sources (id INTEGER PRIMARY KEY, ra REAL, dec REAL, designation TEXT, " \ "publication_id INTEGER, shortname TEXT, names TEXT, comments TEXT)" os.system("sqlite3 {} '{}'".format(dbpath, sources_table)) if os.path.isfile(dbpath): print( "\nDatabase created! To load, run\n\ndb = astrodb.Database('{}')" "\n\nThen run db.modify_table() method to create tables.".format(dbpath)) else: print("Please provide a path and file name with a .db file extension, e.g. /Users/<username>/Desktop/test.db")
[ "def", "create_database", "(", "dbpath", ",", "schema", "=", "''", ",", "overwrite", "=", "True", ")", ":", "if", "dbpath", ".", "endswith", "(", "'.db'", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "dbpath", ")", "and", "overwrite", ":", "os", ".", "system", "(", "'rm {}'", ".", "format", "(", "dbpath", ")", ")", "# Load the schema if given", "if", "schema", ":", "os", ".", "system", "(", "\"cat {} | sqlite3 {}\"", ".", "format", "(", "schema", ",", "dbpath", ")", ")", "# Otherwise just make an empty SOURCES table", "else", ":", "sources_table", "=", "\"CREATE TABLE sources (id INTEGER PRIMARY KEY, ra REAL, dec REAL, designation TEXT, \"", "\"publication_id INTEGER, shortname TEXT, names TEXT, comments TEXT)\"", "os", ".", "system", "(", "\"sqlite3 {} '{}'\"", ".", "format", "(", "dbpath", ",", "sources_table", ")", ")", "if", "os", ".", "path", ".", "isfile", "(", "dbpath", ")", ":", "print", "(", "\"\\nDatabase created! To load, run\\n\\ndb = astrodb.Database('{}')\"", "\"\\n\\nThen run db.modify_table() method to create tables.\"", ".", "format", "(", "dbpath", ")", ")", "else", ":", "print", "(", "\"Please provide a path and file name with a .db file extension, e.g. /Users/<username>/Desktop/test.db\"", ")" ]
Create a new database at the given dbpath Parameters ---------- dbpath: str The full path for the new database, including the filename and .db file extension. schema: str The path to the .sql schema for the database overwrite: bool Overwrite dbpath if it already exists
[ "Create", "a", "new", "database", "at", "the", "given", "dbpath" ]
python
train
INM-6/hybridLFPy
hybridLFPy/gdf.py
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/gdf.py#L318-L341
def neurons(self): """ Return list of neuron indices. Parameters ---------- None Returns ------- list list of neuron indices See also -------- sqlite3.connect.cursor """ self.cursor.execute('SELECT DISTINCT neuron FROM spikes ORDER BY neuron') sel = self.cursor.fetchall() return np.array(sel).flatten()
[ "def", "neurons", "(", "self", ")", ":", "self", ".", "cursor", ".", "execute", "(", "'SELECT DISTINCT neuron FROM spikes ORDER BY neuron'", ")", "sel", "=", "self", ".", "cursor", ".", "fetchall", "(", ")", "return", "np", ".", "array", "(", "sel", ")", ".", "flatten", "(", ")" ]
Return list of neuron indices. Parameters ---------- None Returns ------- list list of neuron indices See also -------- sqlite3.connect.cursor
[ "Return", "list", "of", "neuron", "indices", "." ]
python
train
awslabs/aws-cfn-template-flip
cfn_flip/main.py
https://github.com/awslabs/aws-cfn-template-flip/blob/837576bea243e3f5efb0a20b84802371272e2d33/cfn_flip/main.py#L31-L65
def main(ctx, **kwargs): """ AWS CloudFormation Template Flip is a tool that converts AWS CloudFormation templates between JSON and YAML formats, making use of the YAML format's short function syntax where possible. """ in_format = kwargs.pop('in_format') out_format = kwargs.pop('out_format') or kwargs.pop('out_flag') no_flip = kwargs.pop('no_flip') clean = kwargs.pop('clean') long_form = kwargs.pop('long') input_file = kwargs.pop('input') output_file = kwargs.pop('output') if not in_format: if input_file.name.endswith(".json"): in_format = "json" elif input_file.name.endswith(".yaml") or input_file.name.endswith(".yml"): in_format = "yaml" if input_file.name == "<stdin>" and sys.stdin.isatty(): click.echo(ctx.get_help()) ctx.exit() try: output_file.write(flip( input_file.read(), in_format=in_format, out_format=out_format, clean_up=clean, no_flip=no_flip, long_form=long_form )) except Exception as e: raise click.ClickException("{}".format(e))
[ "def", "main", "(", "ctx", ",", "*", "*", "kwargs", ")", ":", "in_format", "=", "kwargs", ".", "pop", "(", "'in_format'", ")", "out_format", "=", "kwargs", ".", "pop", "(", "'out_format'", ")", "or", "kwargs", ".", "pop", "(", "'out_flag'", ")", "no_flip", "=", "kwargs", ".", "pop", "(", "'no_flip'", ")", "clean", "=", "kwargs", ".", "pop", "(", "'clean'", ")", "long_form", "=", "kwargs", ".", "pop", "(", "'long'", ")", "input_file", "=", "kwargs", ".", "pop", "(", "'input'", ")", "output_file", "=", "kwargs", ".", "pop", "(", "'output'", ")", "if", "not", "in_format", ":", "if", "input_file", ".", "name", ".", "endswith", "(", "\".json\"", ")", ":", "in_format", "=", "\"json\"", "elif", "input_file", ".", "name", ".", "endswith", "(", "\".yaml\"", ")", "or", "input_file", ".", "name", ".", "endswith", "(", "\".yml\"", ")", ":", "in_format", "=", "\"yaml\"", "if", "input_file", ".", "name", "==", "\"<stdin>\"", "and", "sys", ".", "stdin", ".", "isatty", "(", ")", ":", "click", ".", "echo", "(", "ctx", ".", "get_help", "(", ")", ")", "ctx", ".", "exit", "(", ")", "try", ":", "output_file", ".", "write", "(", "flip", "(", "input_file", ".", "read", "(", ")", ",", "in_format", "=", "in_format", ",", "out_format", "=", "out_format", ",", "clean_up", "=", "clean", ",", "no_flip", "=", "no_flip", ",", "long_form", "=", "long_form", ")", ")", "except", "Exception", "as", "e", ":", "raise", "click", ".", "ClickException", "(", "\"{}\"", ".", "format", "(", "e", ")", ")" ]
AWS CloudFormation Template Flip is a tool that converts AWS CloudFormation templates between JSON and YAML formats, making use of the YAML format's short function syntax where possible.
[ "AWS", "CloudFormation", "Template", "Flip", "is", "a", "tool", "that", "converts", "AWS", "CloudFormation", "templates", "between", "JSON", "and", "YAML", "formats", "making", "use", "of", "the", "YAML", "format", "s", "short", "function", "syntax", "where", "possible", "." ]
python
train
chaoss/grimoirelab-cereslib
cereslib/enrich/enrich.py
https://github.com/chaoss/grimoirelab-cereslib/blob/5110e6ca490a4f24bec3124286ebf51fd4e08bdd/cereslib/enrich/enrich.py#L452-L467
def __remove_surrogates(self, s, method='replace'): """ Remove surrogates in the specified string """ if type(s) == list and len(s) == 1: if self.__is_surrogate_escaped(s[0]): return s[0].encode('utf-8', method).decode('utf-8') else: return "" if type(s) == list: return "" if type(s) != str: return "" if self.__is_surrogate_escaped(s): return s.encode('utf-8', method).decode('utf-8') return s
[ "def", "__remove_surrogates", "(", "self", ",", "s", ",", "method", "=", "'replace'", ")", ":", "if", "type", "(", "s", ")", "==", "list", "and", "len", "(", "s", ")", "==", "1", ":", "if", "self", ".", "__is_surrogate_escaped", "(", "s", "[", "0", "]", ")", ":", "return", "s", "[", "0", "]", ".", "encode", "(", "'utf-8'", ",", "method", ")", ".", "decode", "(", "'utf-8'", ")", "else", ":", "return", "\"\"", "if", "type", "(", "s", ")", "==", "list", ":", "return", "\"\"", "if", "type", "(", "s", ")", "!=", "str", ":", "return", "\"\"", "if", "self", ".", "__is_surrogate_escaped", "(", "s", ")", ":", "return", "s", ".", "encode", "(", "'utf-8'", ",", "method", ")", ".", "decode", "(", "'utf-8'", ")", "return", "s" ]
Remove surrogates in the specified string
[ "Remove", "surrogates", "in", "the", "specified", "string" ]
python
train
greenape/mktheapidocs
mktheapidocs/mkapi.py
https://github.com/greenape/mktheapidocs/blob/a45e8b43ddd80ed360fe1e98d4f73dc11c4e7bf7/mktheapidocs/mkapi.py#L635-L666
def attributes_section(thing, doc, header_level): """ Generate an attributes section for classes. Prefers type annotations, if they are present. Parameters ---------- thing : class Class to document doc : dict Numpydoc output header_level : int Number of `#`s to use for header Returns ------- list of str Markdown formatted attribute list """ # Get Attributes if not inspect.isclass(thing): return [] props, class_doc = _split_props(thing, doc["Attributes"]) tl = type_list(inspect.signature(thing), class_doc, "\n### Attributes\n\n") if len(tl) == 0 and len(props) > 0: tl.append("\n### Attributes\n\n") for prop in props: tl.append(f"- [`{prop}`](#{prop})\n\n") return tl
[ "def", "attributes_section", "(", "thing", ",", "doc", ",", "header_level", ")", ":", "# Get Attributes", "if", "not", "inspect", ".", "isclass", "(", "thing", ")", ":", "return", "[", "]", "props", ",", "class_doc", "=", "_split_props", "(", "thing", ",", "doc", "[", "\"Attributes\"", "]", ")", "tl", "=", "type_list", "(", "inspect", ".", "signature", "(", "thing", ")", ",", "class_doc", ",", "\"\\n### Attributes\\n\\n\"", ")", "if", "len", "(", "tl", ")", "==", "0", "and", "len", "(", "props", ")", ">", "0", ":", "tl", ".", "append", "(", "\"\\n### Attributes\\n\\n\"", ")", "for", "prop", "in", "props", ":", "tl", ".", "append", "(", "f\"- [`{prop}`](#{prop})\\n\\n\"", ")", "return", "tl" ]
Generate an attributes section for classes. Prefers type annotations, if they are present. Parameters ---------- thing : class Class to document doc : dict Numpydoc output header_level : int Number of `#`s to use for header Returns ------- list of str Markdown formatted attribute list
[ "Generate", "an", "attributes", "section", "for", "classes", "." ]
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/scene/cameras/turntable.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/scene/cameras/turntable.py#L111-L123
def orbit(self, azim, elev): """ Orbits the camera around the center position. Parameters ---------- azim : float Angle in degrees to rotate horizontally around the center point. elev : float Angle in degrees to rotate vertically around the center point. """ self.azimuth += azim self.elevation = np.clip(self.elevation + elev, -90, 90) self.view_changed()
[ "def", "orbit", "(", "self", ",", "azim", ",", "elev", ")", ":", "self", ".", "azimuth", "+=", "azim", "self", ".", "elevation", "=", "np", ".", "clip", "(", "self", ".", "elevation", "+", "elev", ",", "-", "90", ",", "90", ")", "self", ".", "view_changed", "(", ")" ]
Orbits the camera around the center position. Parameters ---------- azim : float Angle in degrees to rotate horizontally around the center point. elev : float Angle in degrees to rotate vertically around the center point.
[ "Orbits", "the", "camera", "around", "the", "center", "position", "." ]
python
train
Netflix-Skunkworks/cloudaux
cloudaux/gcp/auth.py
https://github.com/Netflix-Skunkworks/cloudaux/blob/c4b0870c3ac68b1c69e71d33cf78b6a8bdf437ea/cloudaux/gcp/auth.py#L101-L107
def get_gcp_client(**kwargs): """Public GCP client builder.""" return _gcp_client(project=kwargs['project'], mod_name=kwargs['mod_name'], pkg_name=kwargs.get('pkg_name', 'google.cloud'), key_file=kwargs.get('key_file', None), http_auth=kwargs.get('http', None), user_agent=kwargs.get('user_agent', None))
[ "def", "get_gcp_client", "(", "*", "*", "kwargs", ")", ":", "return", "_gcp_client", "(", "project", "=", "kwargs", "[", "'project'", "]", ",", "mod_name", "=", "kwargs", "[", "'mod_name'", "]", ",", "pkg_name", "=", "kwargs", ".", "get", "(", "'pkg_name'", ",", "'google.cloud'", ")", ",", "key_file", "=", "kwargs", ".", "get", "(", "'key_file'", ",", "None", ")", ",", "http_auth", "=", "kwargs", ".", "get", "(", "'http'", ",", "None", ")", ",", "user_agent", "=", "kwargs", ".", "get", "(", "'user_agent'", ",", "None", ")", ")" ]
Public GCP client builder.
[ "Public", "GCP", "client", "builder", "." ]
python
valid
nuagenetworks/bambou
bambou/nurest_request.py
https://github.com/nuagenetworks/bambou/blob/d334fea23e384d3df8e552fe1849ad707941c666/bambou/nurest_request.py#L120-L127
def set_header(self, header, value): """ Set header value """ # requests>=2.11 only accepts `str` or `bytes` header values # raising an exception here, instead of leaving it to `requests` makes # it easy to know where we passed a wrong header type in the code. if not isinstance(value, (str, bytes)): raise TypeError("header values must be str or bytes, but %s value has type %s" % (header, type(value))) self._headers[header] = value
[ "def", "set_header", "(", "self", ",", "header", ",", "value", ")", ":", "# requests>=2.11 only accepts `str` or `bytes` header values", "# raising an exception here, instead of leaving it to `requests` makes", "# it easy to know where we passed a wrong header type in the code.", "if", "not", "isinstance", "(", "value", ",", "(", "str", ",", "bytes", ")", ")", ":", "raise", "TypeError", "(", "\"header values must be str or bytes, but %s value has type %s\"", "%", "(", "header", ",", "type", "(", "value", ")", ")", ")", "self", ".", "_headers", "[", "header", "]", "=", "value" ]
Set header value
[ "Set", "header", "value" ]
python
train
worstcase/blockade
blockade/state.py
https://github.com/worstcase/blockade/blob/3dc6ad803f0b0d56586dec9542a6a06aa06cf569/blockade/state.py#L108-L121
def load(self): '''Try to load a blockade state file in the current directory''' try: with open(self._state_file) as f: state = yaml.safe_load(f) self._containers = state['containers'] except (IOError, OSError) as err: if err.errno == errno.ENOENT: raise NotInitializedError("No blockade exists in this context") raise InconsistentStateError("Failed to load Blockade state: " + str(err)) except Exception as err: raise InconsistentStateError("Failed to load Blockade state: " + str(err))
[ "def", "load", "(", "self", ")", ":", "try", ":", "with", "open", "(", "self", ".", "_state_file", ")", "as", "f", ":", "state", "=", "yaml", ".", "safe_load", "(", "f", ")", "self", ".", "_containers", "=", "state", "[", "'containers'", "]", "except", "(", "IOError", ",", "OSError", ")", "as", "err", ":", "if", "err", ".", "errno", "==", "errno", ".", "ENOENT", ":", "raise", "NotInitializedError", "(", "\"No blockade exists in this context\"", ")", "raise", "InconsistentStateError", "(", "\"Failed to load Blockade state: \"", "+", "str", "(", "err", ")", ")", "except", "Exception", "as", "err", ":", "raise", "InconsistentStateError", "(", "\"Failed to load Blockade state: \"", "+", "str", "(", "err", ")", ")" ]
Try to load a blockade state file in the current directory
[ "Try", "to", "load", "a", "blockade", "state", "file", "in", "the", "current", "directory" ]
python
valid
goose3/goose3
goose3/extractors/content.py
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/extractors/content.py#L321-L332
def nodes_to_check(self, docs): """\ returns a list of nodes we want to search on like paragraphs and tables """ nodes_to_check = [] for doc in docs: for tag in ['p', 'pre', 'td']: items = self.parser.getElementsByTag(doc, tag=tag) nodes_to_check += items return nodes_to_check
[ "def", "nodes_to_check", "(", "self", ",", "docs", ")", ":", "nodes_to_check", "=", "[", "]", "for", "doc", "in", "docs", ":", "for", "tag", "in", "[", "'p'", ",", "'pre'", ",", "'td'", "]", ":", "items", "=", "self", ".", "parser", ".", "getElementsByTag", "(", "doc", ",", "tag", "=", "tag", ")", "nodes_to_check", "+=", "items", "return", "nodes_to_check" ]
\ returns a list of nodes we want to search on like paragraphs and tables
[ "\\", "returns", "a", "list", "of", "nodes", "we", "want", "to", "search", "on", "like", "paragraphs", "and", "tables" ]
python
valid
markovmodel/msmtools
msmtools/analysis/dense/fingerprints.py
https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/analysis/dense/fingerprints.py#L151-L168
def expectation(P, obs): r"""Equilibrium expectation of given observable. Parameters ---------- P : (M, M) ndarray Transition matrix obs : (M,) ndarray Observable, represented as vector on state space Returns ------- x : float Expectation value """ pi = statdist(P) return np.dot(pi, obs)
[ "def", "expectation", "(", "P", ",", "obs", ")", ":", "pi", "=", "statdist", "(", "P", ")", "return", "np", ".", "dot", "(", "pi", ",", "obs", ")" ]
r"""Equilibrium expectation of given observable. Parameters ---------- P : (M, M) ndarray Transition matrix obs : (M,) ndarray Observable, represented as vector on state space Returns ------- x : float Expectation value
[ "r", "Equilibrium", "expectation", "of", "given", "observable", "." ]
python
train
IRC-SPHERE/HyperStream
hyperstream/factor/factor.py
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/factor/factor.py#L324-L388
def execute(self, time_interval): """ Execute the factor over the given time interval. Note that this is normally done by the workflow, but can also be done on the factor directly :param time_interval: The time interval :return: self (for chaining) """ logging.info('{} running from {} to {}'.format( self.tool.__class__.__name__, time_interval.start, time_interval.end)) input_plate_values = self.input_plate.values if self.input_plate else [None] output_plate_values = self.sink.plate_values meta_data_ids = [p.meta_data_id for p in self.sink.plates] def belongs(plate_value): return all(ii in plate_value for ii in input_plate_value) def meta_data_matches(plate_value): return filter(lambda x: x[0] in meta_data_ids, plate_value) for input_plate_value in input_plate_values: if input_plate_value: # Select only the valid output plate values based on the input plate value # filtered = filter(belongs, output_plate_values) filtered = filter(belongs, output_plate_values) else: filtered = output_plate_values sinks = [self.sink.streams[s] for s in filtered] sub_plate_values_only = map(meta_data_matches, filtered) if not self.source: source = None elif input_plate_value in self.source.streams: source = self.source.streams[input_plate_value] else: logging.warn("{} with value {} not valid for source {}".format( self.input_plate, input_plate_value, self.source)) continue if self.input_plate: if len(self.output_plates) == 1: if self.output_plates[0].parent.plate_id != self.input_plate.plate_id: raise IncompatiblePlatesError("Parent plate of output plate does not match input plate") else: if len(self.output_plates) != 1: raise ValueError("Should be a single output plate if there is no input plate") if len(self.output_plates) > 1: raise NotImplementedError splitting_stream = self.get_splitting_stream(input_plate_value) self.tool.execute( source=source, sinks=sinks, interval=time_interval, splitting_stream=splitting_stream, meta_data_id=self.output_plates[0].meta_data_id, output_plate_values=sub_plate_values_only) self.update_computed_intervals(sinks, time_interval) return self
[ "def", "execute", "(", "self", ",", "time_interval", ")", ":", "logging", ".", "info", "(", "'{} running from {} to {}'", ".", "format", "(", "self", ".", "tool", ".", "__class__", ".", "__name__", ",", "time_interval", ".", "start", ",", "time_interval", ".", "end", ")", ")", "input_plate_values", "=", "self", ".", "input_plate", ".", "values", "if", "self", ".", "input_plate", "else", "[", "None", "]", "output_plate_values", "=", "self", ".", "sink", ".", "plate_values", "meta_data_ids", "=", "[", "p", ".", "meta_data_id", "for", "p", "in", "self", ".", "sink", ".", "plates", "]", "def", "belongs", "(", "plate_value", ")", ":", "return", "all", "(", "ii", "in", "plate_value", "for", "ii", "in", "input_plate_value", ")", "def", "meta_data_matches", "(", "plate_value", ")", ":", "return", "filter", "(", "lambda", "x", ":", "x", "[", "0", "]", "in", "meta_data_ids", ",", "plate_value", ")", "for", "input_plate_value", "in", "input_plate_values", ":", "if", "input_plate_value", ":", "# Select only the valid output plate values based on the input plate value", "# filtered = filter(belongs, output_plate_values)", "filtered", "=", "filter", "(", "belongs", ",", "output_plate_values", ")", "else", ":", "filtered", "=", "output_plate_values", "sinks", "=", "[", "self", ".", "sink", ".", "streams", "[", "s", "]", "for", "s", "in", "filtered", "]", "sub_plate_values_only", "=", "map", "(", "meta_data_matches", ",", "filtered", ")", "if", "not", "self", ".", "source", ":", "source", "=", "None", "elif", "input_plate_value", "in", "self", ".", "source", ".", "streams", ":", "source", "=", "self", ".", "source", ".", "streams", "[", "input_plate_value", "]", "else", ":", "logging", ".", "warn", "(", "\"{} with value {} not valid for source {}\"", ".", "format", "(", "self", ".", "input_plate", ",", "input_plate_value", ",", "self", ".", "source", ")", ")", "continue", "if", "self", ".", "input_plate", ":", "if", "len", "(", "self", ".", "output_plates", ")", "==", "1", ":", "if", "self", ".", "output_plates", "[", "0", "]", ".", "parent", ".", "plate_id", "!=", "self", ".", "input_plate", ".", "plate_id", ":", "raise", "IncompatiblePlatesError", "(", "\"Parent plate of output plate does not match input plate\"", ")", "else", ":", "if", "len", "(", "self", ".", "output_plates", ")", "!=", "1", ":", "raise", "ValueError", "(", "\"Should be a single output plate if there is no input plate\"", ")", "if", "len", "(", "self", ".", "output_plates", ")", ">", "1", ":", "raise", "NotImplementedError", "splitting_stream", "=", "self", ".", "get_splitting_stream", "(", "input_plate_value", ")", "self", ".", "tool", ".", "execute", "(", "source", "=", "source", ",", "sinks", "=", "sinks", ",", "interval", "=", "time_interval", ",", "splitting_stream", "=", "splitting_stream", ",", "meta_data_id", "=", "self", ".", "output_plates", "[", "0", "]", ".", "meta_data_id", ",", "output_plate_values", "=", "sub_plate_values_only", ")", "self", ".", "update_computed_intervals", "(", "sinks", ",", "time_interval", ")", "return", "self" ]
Execute the factor over the given time interval. Note that this is normally done by the workflow, but can also be done on the factor directly :param time_interval: The time interval :return: self (for chaining)
[ "Execute", "the", "factor", "over", "the", "given", "time", "interval", ".", "Note", "that", "this", "is", "normally", "done", "by", "the", "workflow", "but", "can", "also", "be", "done", "on", "the", "factor", "directly" ]
python
train
blockstack/blockstack-core
blockstack/lib/nameset/namedb.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/nameset/namedb.py#L921-L929
def get_namespace_at( self, namespace_id, block_number ): """ Generate and return the sequence of states a namespace record was in at a particular block number. Includes expired namespaces by default. """ cur = self.db.cursor() return namedb_get_namespace_at(cur, namespace_id, block_number, include_expired=True)
[ "def", "get_namespace_at", "(", "self", ",", "namespace_id", ",", "block_number", ")", ":", "cur", "=", "self", ".", "db", ".", "cursor", "(", ")", "return", "namedb_get_namespace_at", "(", "cur", ",", "namespace_id", ",", "block_number", ",", "include_expired", "=", "True", ")" ]
Generate and return the sequence of states a namespace record was in at a particular block number. Includes expired namespaces by default.
[ "Generate", "and", "return", "the", "sequence", "of", "states", "a", "namespace", "record", "was", "in", "at", "a", "particular", "block", "number", "." ]
python
train
kshlm/gant
gant/utils/gant_docker.py
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/gant_docker.py#L189-L201
def ip_cmd(self, name): """ Print ip of given container """ if not self.container_exists(name=name): exit('Unknown container {0}'.format(name)) ip = self.get_container_ip(name) if not ip: exit("Failed to get network address for" " container {0}".format(name)) else: echo(ip)
[ "def", "ip_cmd", "(", "self", ",", "name", ")", ":", "if", "not", "self", ".", "container_exists", "(", "name", "=", "name", ")", ":", "exit", "(", "'Unknown container {0}'", ".", "format", "(", "name", ")", ")", "ip", "=", "self", ".", "get_container_ip", "(", "name", ")", "if", "not", "ip", ":", "exit", "(", "\"Failed to get network address for\"", "\" container {0}\"", ".", "format", "(", "name", ")", ")", "else", ":", "echo", "(", "ip", ")" ]
Print ip of given container
[ "Print", "ip", "of", "given", "container" ]
python
train
Min-ops/cruddy
cruddy/__init__.py
https://github.com/Min-ops/cruddy/blob/b9ba3dda1757e1075bc1c62a6f43473eea27de41/cruddy/__init__.py#L332-L348
def update(self, item, encrypt=True, **kwargs): """ Updates the item based on the current values of the dictionary passed in. """ response = self._new_response() if self._check_supported_op('update', response): if self._prototype_handler.check(item, 'update', response): if encrypt: self._encrypt(item) params = {'Item': item} self._call_ddb_method(self.table.put_item, params, response) if response.status == 'success': response.data = item response.prepare() return response
[ "def", "update", "(", "self", ",", "item", ",", "encrypt", "=", "True", ",", "*", "*", "kwargs", ")", ":", "response", "=", "self", ".", "_new_response", "(", ")", "if", "self", ".", "_check_supported_op", "(", "'update'", ",", "response", ")", ":", "if", "self", ".", "_prototype_handler", ".", "check", "(", "item", ",", "'update'", ",", "response", ")", ":", "if", "encrypt", ":", "self", ".", "_encrypt", "(", "item", ")", "params", "=", "{", "'Item'", ":", "item", "}", "self", ".", "_call_ddb_method", "(", "self", ".", "table", ".", "put_item", ",", "params", ",", "response", ")", "if", "response", ".", "status", "==", "'success'", ":", "response", ".", "data", "=", "item", "response", ".", "prepare", "(", ")", "return", "response" ]
Updates the item based on the current values of the dictionary passed in.
[ "Updates", "the", "item", "based", "on", "the", "current", "values", "of", "the", "dictionary", "passed", "in", "." ]
python
train
theiviaxx/python-perforce
perforce/models.py
https://github.com/theiviaxx/python-perforce/blob/01a3b01fe5949126fa0097d9a8ad386887823b5a/perforce/models.py#L839-L862
def revert(self, unchanged=False): """Reverts any file changes :param unchanged: Only revert if the file is unchanged :type unchanged: bool """ cmd = ['revert'] if unchanged: cmd.append('-a') wasadd = self.action == 'add' cmd.append(self.depotFile) self._connection.run(cmd) if 'movedFile' in self._p4dict: self._p4dict['depotFile'] = self._p4dict['movedFile'] if not wasadd: self.query() if self._changelist: self._changelist.remove(self, permanent=True)
[ "def", "revert", "(", "self", ",", "unchanged", "=", "False", ")", ":", "cmd", "=", "[", "'revert'", "]", "if", "unchanged", ":", "cmd", ".", "append", "(", "'-a'", ")", "wasadd", "=", "self", ".", "action", "==", "'add'", "cmd", ".", "append", "(", "self", ".", "depotFile", ")", "self", ".", "_connection", ".", "run", "(", "cmd", ")", "if", "'movedFile'", "in", "self", ".", "_p4dict", ":", "self", ".", "_p4dict", "[", "'depotFile'", "]", "=", "self", ".", "_p4dict", "[", "'movedFile'", "]", "if", "not", "wasadd", ":", "self", ".", "query", "(", ")", "if", "self", ".", "_changelist", ":", "self", ".", "_changelist", ".", "remove", "(", "self", ",", "permanent", "=", "True", ")" ]
Reverts any file changes :param unchanged: Only revert if the file is unchanged :type unchanged: bool
[ "Reverts", "any", "file", "changes" ]
python
train
pebble/libpebble2
libpebble2/services/install.py
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/services/install.py#L54-L70
def install(self, force_install=False): """ Installs an app. Blocks until the installation is complete, or raises :exc:`AppInstallError` if it fails. While this method runs, "progress" events will be emitted regularly with the following signature: :: (sent_this_interval, sent_total, total_size) :param force_install: Install even if installing this pbw on this platform is usually forbidden. :type force_install: bool """ if not (force_install or self._bundle.should_permit_install()): raise AppInstallError("This pbw is not supported on this platform.") if self._pebble.firmware_version.major < 3: self._install_legacy2() else: self._install_modern()
[ "def", "install", "(", "self", ",", "force_install", "=", "False", ")", ":", "if", "not", "(", "force_install", "or", "self", ".", "_bundle", ".", "should_permit_install", "(", ")", ")", ":", "raise", "AppInstallError", "(", "\"This pbw is not supported on this platform.\"", ")", "if", "self", ".", "_pebble", ".", "firmware_version", ".", "major", "<", "3", ":", "self", ".", "_install_legacy2", "(", ")", "else", ":", "self", ".", "_install_modern", "(", ")" ]
Installs an app. Blocks until the installation is complete, or raises :exc:`AppInstallError` if it fails. While this method runs, "progress" events will be emitted regularly with the following signature: :: (sent_this_interval, sent_total, total_size) :param force_install: Install even if installing this pbw on this platform is usually forbidden. :type force_install: bool
[ "Installs", "an", "app", ".", "Blocks", "until", "the", "installation", "is", "complete", "or", "raises", ":", "exc", ":", "AppInstallError", "if", "it", "fails", "." ]
python
train
Josef-Friedrich/phrydy
phrydy/utils.py
https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/utils.py#L61-L92
def syspath(path, prefix=True): """Convert a path for use by the operating system. In particular, paths on Windows must receive a magic prefix and must be converted to Unicode before they are sent to the OS. To disable the magic prefix on Windows, set `prefix` to False---but only do this if you *really* know what you're doing. """ # Don't do anything if we're not on windows if os.path.__name__ != 'ntpath': return path if not isinstance(path, six.text_type): # Beets currently represents Windows paths internally with UTF-8 # arbitrarily. But earlier versions used MBCS because it is # reported as the FS encoding by Windows. Try both. try: path = path.decode('utf8') except UnicodeError: # The encoding should always be MBCS, Windows' broken # Unicode representation. encoding = sys.getfilesystemencoding() or sys.getdefaultencoding() path = path.decode(encoding, 'replace') # Add the magic prefix if it isn't already there. # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx if prefix and not path.startswith(WINDOWS_MAGIC_PREFIX): if path.startswith(u'\\\\'): # UNC path. Final path should look like \\?\UNC\... path = u'UNC' + path[1:] path = WINDOWS_MAGIC_PREFIX + path return path
[ "def", "syspath", "(", "path", ",", "prefix", "=", "True", ")", ":", "# Don't do anything if we're not on windows", "if", "os", ".", "path", ".", "__name__", "!=", "'ntpath'", ":", "return", "path", "if", "not", "isinstance", "(", "path", ",", "six", ".", "text_type", ")", ":", "# Beets currently represents Windows paths internally with UTF-8", "# arbitrarily. But earlier versions used MBCS because it is", "# reported as the FS encoding by Windows. Try both.", "try", ":", "path", "=", "path", ".", "decode", "(", "'utf8'", ")", "except", "UnicodeError", ":", "# The encoding should always be MBCS, Windows' broken", "# Unicode representation.", "encoding", "=", "sys", ".", "getfilesystemencoding", "(", ")", "or", "sys", ".", "getdefaultencoding", "(", ")", "path", "=", "path", ".", "decode", "(", "encoding", ",", "'replace'", ")", "# Add the magic prefix if it isn't already there.", "# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx", "if", "prefix", "and", "not", "path", ".", "startswith", "(", "WINDOWS_MAGIC_PREFIX", ")", ":", "if", "path", ".", "startswith", "(", "u'\\\\\\\\'", ")", ":", "# UNC path. Final path should look like \\\\?\\UNC\\...", "path", "=", "u'UNC'", "+", "path", "[", "1", ":", "]", "path", "=", "WINDOWS_MAGIC_PREFIX", "+", "path", "return", "path" ]
Convert a path for use by the operating system. In particular, paths on Windows must receive a magic prefix and must be converted to Unicode before they are sent to the OS. To disable the magic prefix on Windows, set `prefix` to False---but only do this if you *really* know what you're doing.
[ "Convert", "a", "path", "for", "use", "by", "the", "operating", "system", ".", "In", "particular", "paths", "on", "Windows", "must", "receive", "a", "magic", "prefix", "and", "must", "be", "converted", "to", "Unicode", "before", "they", "are", "sent", "to", "the", "OS", ".", "To", "disable", "the", "magic", "prefix", "on", "Windows", "set", "prefix", "to", "False", "---", "but", "only", "do", "this", "if", "you", "*", "really", "*", "know", "what", "you", "re", "doing", "." ]
python
train
dossier/dossier.web
dossier/web/routes.py
https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/routes.py#L483-L495
def v1_folder_delete(request, response, kvlclient, fid, sfid=None, cid=None, subid=None): '''Deletes a folder, subfolder or item. The routes for this endpoint are: * ``DELETE /dossier/v1/folder/<fid>`` * ``DELETE /dossier/v1/folder/<fid>/subfolder/<sfid>`` * ``DELETE /dossier/v1/folder/<fid>/subfolder/<sfid>/<cid>`` * ``DELETE /dossier/v1/folder/<fid>/subfolder/<sfid>/<cid>/<subid>`` ''' new_folders(kvlclient, request).delete(make_path(fid, sfid, cid, subid)) response.status = 204
[ "def", "v1_folder_delete", "(", "request", ",", "response", ",", "kvlclient", ",", "fid", ",", "sfid", "=", "None", ",", "cid", "=", "None", ",", "subid", "=", "None", ")", ":", "new_folders", "(", "kvlclient", ",", "request", ")", ".", "delete", "(", "make_path", "(", "fid", ",", "sfid", ",", "cid", ",", "subid", ")", ")", "response", ".", "status", "=", "204" ]
Deletes a folder, subfolder or item. The routes for this endpoint are: * ``DELETE /dossier/v1/folder/<fid>`` * ``DELETE /dossier/v1/folder/<fid>/subfolder/<sfid>`` * ``DELETE /dossier/v1/folder/<fid>/subfolder/<sfid>/<cid>`` * ``DELETE /dossier/v1/folder/<fid>/subfolder/<sfid>/<cid>/<subid>``
[ "Deletes", "a", "folder", "subfolder", "or", "item", "." ]
python
train
gboeing/osmnx
osmnx/pois.py
https://github.com/gboeing/osmnx/blob/be59fd313bcb68af8fc79242c56194f1247e26e2/osmnx/pois.py#L135-L166
def parse_polygonal_poi(coords, response): """ Parse areal POI way polygons from OSM node coords. Parameters ---------- coords : dict dict of node IDs and their lat, lon coordinates Returns ------- dict of POIs containing each's nodes, polygon geometry, and osmid """ if 'type' in response and response['type'] == 'way': nodes = response['nodes'] try: polygon = Polygon([(coords[node]['lon'], coords[node]['lat']) for node in nodes]) poi = {'nodes': nodes, 'geometry': polygon, 'osmid': response['id']} if 'tags' in response: for tag in response['tags']: poi[tag] = response['tags'][tag] return poi except Exception: log('Polygon has invalid geometry: {}'.format(nodes)) return None
[ "def", "parse_polygonal_poi", "(", "coords", ",", "response", ")", ":", "if", "'type'", "in", "response", "and", "response", "[", "'type'", "]", "==", "'way'", ":", "nodes", "=", "response", "[", "'nodes'", "]", "try", ":", "polygon", "=", "Polygon", "(", "[", "(", "coords", "[", "node", "]", "[", "'lon'", "]", ",", "coords", "[", "node", "]", "[", "'lat'", "]", ")", "for", "node", "in", "nodes", "]", ")", "poi", "=", "{", "'nodes'", ":", "nodes", ",", "'geometry'", ":", "polygon", ",", "'osmid'", ":", "response", "[", "'id'", "]", "}", "if", "'tags'", "in", "response", ":", "for", "tag", "in", "response", "[", "'tags'", "]", ":", "poi", "[", "tag", "]", "=", "response", "[", "'tags'", "]", "[", "tag", "]", "return", "poi", "except", "Exception", ":", "log", "(", "'Polygon has invalid geometry: {}'", ".", "format", "(", "nodes", ")", ")", "return", "None" ]
Parse areal POI way polygons from OSM node coords. Parameters ---------- coords : dict dict of node IDs and their lat, lon coordinates Returns ------- dict of POIs containing each's nodes, polygon geometry, and osmid
[ "Parse", "areal", "POI", "way", "polygons", "from", "OSM", "node", "coords", "." ]
python
train
ray-project/ray
python/ray/tune/automlboard/backend/collector.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/automlboard/backend/collector.py#L117-L131
def _initialize(self): """Initialize collector worker thread, Log path will be checked first. Records in DB backend will be cleared. """ if not os.path.exists(self._logdir): raise CollectorError("Log directory %s not exists" % self._logdir) self.logger.info("Collector started, taking %s as parent directory" "for all job logs." % self._logdir) # clear old records JobRecord.objects.filter().delete() TrialRecord.objects.filter().delete() ResultRecord.objects.filter().delete()
[ "def", "_initialize", "(", "self", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "_logdir", ")", ":", "raise", "CollectorError", "(", "\"Log directory %s not exists\"", "%", "self", ".", "_logdir", ")", "self", ".", "logger", ".", "info", "(", "\"Collector started, taking %s as parent directory\"", "\"for all job logs.\"", "%", "self", ".", "_logdir", ")", "# clear old records", "JobRecord", ".", "objects", ".", "filter", "(", ")", ".", "delete", "(", ")", "TrialRecord", ".", "objects", ".", "filter", "(", ")", ".", "delete", "(", ")", "ResultRecord", ".", "objects", ".", "filter", "(", ")", ".", "delete", "(", ")" ]
Initialize collector worker thread, Log path will be checked first. Records in DB backend will be cleared.
[ "Initialize", "collector", "worker", "thread", "Log", "path", "will", "be", "checked", "first", "." ]
python
train
numenta/nupic
src/nupic/encoders/adaptive_scalar.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/adaptive_scalar.py#L169-L182
def encodeIntoArray(self, input, output,learn=None): """ [overrides nupic.encoders.scalar.ScalarEncoder.encodeIntoArray] """ self.recordNum +=1 if learn is None: learn = self._learningEnabled if input == SENTINEL_VALUE_FOR_MISSING_DATA: output[0:self.n] = 0 elif not math.isnan(input): self._setMinAndMax(input, learn) super(AdaptiveScalarEncoder, self).encodeIntoArray(input, output)
[ "def", "encodeIntoArray", "(", "self", ",", "input", ",", "output", ",", "learn", "=", "None", ")", ":", "self", ".", "recordNum", "+=", "1", "if", "learn", "is", "None", ":", "learn", "=", "self", ".", "_learningEnabled", "if", "input", "==", "SENTINEL_VALUE_FOR_MISSING_DATA", ":", "output", "[", "0", ":", "self", ".", "n", "]", "=", "0", "elif", "not", "math", ".", "isnan", "(", "input", ")", ":", "self", ".", "_setMinAndMax", "(", "input", ",", "learn", ")", "super", "(", "AdaptiveScalarEncoder", ",", "self", ")", ".", "encodeIntoArray", "(", "input", ",", "output", ")" ]
[overrides nupic.encoders.scalar.ScalarEncoder.encodeIntoArray]
[ "[", "overrides", "nupic", ".", "encoders", ".", "scalar", ".", "ScalarEncoder", ".", "encodeIntoArray", "]" ]
python
valid
cbclab/MOT
mot/library_functions/__init__.py
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/library_functions/__init__.py#L605-L614
def get_kernel_data(self): """Get the kernel data needed for this optimization routine to work.""" return { 'scratch_mot_float_type': LocalMemory( 'mot_float_type', 8 + 2 * self._var_replace_dict['NMR_OBSERVATIONS'] + 5 * self._var_replace_dict['NMR_PARAMS'] + self._var_replace_dict['NMR_PARAMS'] * self._var_replace_dict['NMR_OBSERVATIONS']), 'scratch_int': LocalMemory('int', self._var_replace_dict['NMR_PARAMS']) }
[ "def", "get_kernel_data", "(", "self", ")", ":", "return", "{", "'scratch_mot_float_type'", ":", "LocalMemory", "(", "'mot_float_type'", ",", "8", "+", "2", "*", "self", ".", "_var_replace_dict", "[", "'NMR_OBSERVATIONS'", "]", "+", "5", "*", "self", ".", "_var_replace_dict", "[", "'NMR_PARAMS'", "]", "+", "self", ".", "_var_replace_dict", "[", "'NMR_PARAMS'", "]", "*", "self", ".", "_var_replace_dict", "[", "'NMR_OBSERVATIONS'", "]", ")", ",", "'scratch_int'", ":", "LocalMemory", "(", "'int'", ",", "self", ".", "_var_replace_dict", "[", "'NMR_PARAMS'", "]", ")", "}" ]
Get the kernel data needed for this optimization routine to work.
[ "Get", "the", "kernel", "data", "needed", "for", "this", "optimization", "routine", "to", "work", "." ]
python
train
derpferd/little-python
littlepython/parser.py
https://github.com/derpferd/little-python/blob/3f89c74cffb6532c12c5b40843bd8ff8605638ba/littlepython/parser.py#L118-L139
def loop(self): """ loop : 'for' init; ctrl; inc block """ self.eat(TokenTypes.FOR_LOOP) init = NoOp() if self.cur_token.type != TokenTypes.SEMI_COLON: init = self.assign_statement() else: self.eat(TokenTypes.SEMI_COLON) ctrl = NoOp() if self.cur_token.type != TokenTypes.SEMI_COLON: ctrl = self.expression() self.eat(TokenTypes.SEMI_COLON) inc = NoOp() if self.cur_token.type != TokenTypes.LBRACE: inc = self.assign_statement() block = self.block() return ForLoop(init, ctrl, inc, block)
[ "def", "loop", "(", "self", ")", ":", "self", ".", "eat", "(", "TokenTypes", ".", "FOR_LOOP", ")", "init", "=", "NoOp", "(", ")", "if", "self", ".", "cur_token", ".", "type", "!=", "TokenTypes", ".", "SEMI_COLON", ":", "init", "=", "self", ".", "assign_statement", "(", ")", "else", ":", "self", ".", "eat", "(", "TokenTypes", ".", "SEMI_COLON", ")", "ctrl", "=", "NoOp", "(", ")", "if", "self", ".", "cur_token", ".", "type", "!=", "TokenTypes", ".", "SEMI_COLON", ":", "ctrl", "=", "self", ".", "expression", "(", ")", "self", ".", "eat", "(", "TokenTypes", ".", "SEMI_COLON", ")", "inc", "=", "NoOp", "(", ")", "if", "self", ".", "cur_token", ".", "type", "!=", "TokenTypes", ".", "LBRACE", ":", "inc", "=", "self", ".", "assign_statement", "(", ")", "block", "=", "self", ".", "block", "(", ")", "return", "ForLoop", "(", "init", ",", "ctrl", ",", "inc", ",", "block", ")" ]
loop : 'for' init; ctrl; inc block
[ "loop", ":", "for", "init", ";", "ctrl", ";", "inc", "block" ]
python
train