repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
JukeboxPipeline/jukebox-core
src/jukeboxcore/addons/genesis/genesis.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/addons/genesis/genesis.py#L460-L472
def closeEvent(self, event): """Send last file signal on close event :param event: The close event :type event: :returns: None :rtype: None :raises: None """ lf = self.browser.get_current_selection() if lf: self.last_file.emit(lf) return super(GenesisWin, self).close()
[ "def", "closeEvent", "(", "self", ",", "event", ")", ":", "lf", "=", "self", ".", "browser", ".", "get_current_selection", "(", ")", "if", "lf", ":", "self", ".", "last_file", ".", "emit", "(", "lf", ")", "return", "super", "(", "GenesisWin", ",", "self", ")", ".", "close", "(", ")" ]
Send last file signal on close event :param event: The close event :type event: :returns: None :rtype: None :raises: None
[ "Send", "last", "file", "signal", "on", "close", "event" ]
python
train
26.923077
swharden/SWHLab
swhlab/indexing/indexing.py
https://github.com/swharden/SWHLab/blob/a86c3c65323cec809a4bd4f81919644927094bf5/swhlab/indexing/indexing.py#L218-L236
def html_single_plot(self,abfID,launch=False,overwrite=False): """create ID_plot.html of just intrinsic properties.""" if type(abfID) is str: abfID=[abfID] for thisABFid in cm.abfSort(abfID): parentID=cm.parent(self.groups,thisABFid) saveAs=os.path.abspath("%s/%s_plot.html"%(self.folder2,parentID)) if overwrite is False and os.path.basename(saveAs) in self.files2: continue filesByType=cm.filesByType(self.groupFiles[parentID]) html="" html+='<div style="background-color: #DDDDFF;">' html+='<span class="title">intrinsic properties for: %s</span></br>'%parentID html+='<code>%s</code>'%os.path.abspath(self.folder1+"/"+parentID+".abf") html+='</div>' for fname in filesByType['plot']: html+=self.htmlFor(fname) print("creating",saveAs,'...') style.save(html,saveAs,launch=launch)
[ "def", "html_single_plot", "(", "self", ",", "abfID", ",", "launch", "=", "False", ",", "overwrite", "=", "False", ")", ":", "if", "type", "(", "abfID", ")", "is", "str", ":", "abfID", "=", "[", "abfID", "]", "for", "thisABFid", "in", "cm", ".", "abfSort", "(", "abfID", ")", ":", "parentID", "=", "cm", ".", "parent", "(", "self", ".", "groups", ",", "thisABFid", ")", "saveAs", "=", "os", ".", "path", ".", "abspath", "(", "\"%s/%s_plot.html\"", "%", "(", "self", ".", "folder2", ",", "parentID", ")", ")", "if", "overwrite", "is", "False", "and", "os", ".", "path", ".", "basename", "(", "saveAs", ")", "in", "self", ".", "files2", ":", "continue", "filesByType", "=", "cm", ".", "filesByType", "(", "self", ".", "groupFiles", "[", "parentID", "]", ")", "html", "=", "\"\"", "html", "+=", "'<div style=\"background-color: #DDDDFF;\">'", "html", "+=", "'<span class=\"title\">intrinsic properties for: %s</span></br>'", "%", "parentID", "html", "+=", "'<code>%s</code>'", "%", "os", ".", "path", ".", "abspath", "(", "self", ".", "folder1", "+", "\"/\"", "+", "parentID", "+", "\".abf\"", ")", "html", "+=", "'</div>'", "for", "fname", "in", "filesByType", "[", "'plot'", "]", ":", "html", "+=", "self", ".", "htmlFor", "(", "fname", ")", "print", "(", "\"creating\"", ",", "saveAs", ",", "'...'", ")", "style", ".", "save", "(", "html", ",", "saveAs", ",", "launch", "=", "launch", ")" ]
create ID_plot.html of just intrinsic properties.
[ "create", "ID_plot", ".", "html", "of", "just", "intrinsic", "properties", "." ]
python
valid
51.368421
log2timeline/plaso
plaso/cli/helpers/hashers.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/helpers/hashers.py#L49-L79
def ParseOptions(cls, options, configuration_object): """Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type. BadConfigOption: when a configuration parameter fails validation. """ if not isinstance(configuration_object, tools.CLITool): raise errors.BadConfigObject( 'Configuration object is not an instance of CLITool') hashers = cls._ParseStringOption( options, 'hashers', default_value=cls._DEFAULT_HASHER_STRING) hasher_file_size_limit = cls._ParseNumericOption( options, 'hasher_file_size_limit', default_value=0) # TODO: validate hasher names. if hasher_file_size_limit < 0: raise errors.BadConfigOption( 'Invalid hasher file size limit value cannot be negative.') setattr(configuration_object, '_hasher_names_string', hashers) setattr( configuration_object, '_hasher_file_size_limit', hasher_file_size_limit)
[ "def", "ParseOptions", "(", "cls", ",", "options", ",", "configuration_object", ")", ":", "if", "not", "isinstance", "(", "configuration_object", ",", "tools", ".", "CLITool", ")", ":", "raise", "errors", ".", "BadConfigObject", "(", "'Configuration object is not an instance of CLITool'", ")", "hashers", "=", "cls", ".", "_ParseStringOption", "(", "options", ",", "'hashers'", ",", "default_value", "=", "cls", ".", "_DEFAULT_HASHER_STRING", ")", "hasher_file_size_limit", "=", "cls", ".", "_ParseNumericOption", "(", "options", ",", "'hasher_file_size_limit'", ",", "default_value", "=", "0", ")", "# TODO: validate hasher names.", "if", "hasher_file_size_limit", "<", "0", ":", "raise", "errors", ".", "BadConfigOption", "(", "'Invalid hasher file size limit value cannot be negative.'", ")", "setattr", "(", "configuration_object", ",", "'_hasher_names_string'", ",", "hashers", ")", "setattr", "(", "configuration_object", ",", "'_hasher_file_size_limit'", ",", "hasher_file_size_limit", ")" ]
Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type. BadConfigOption: when a configuration parameter fails validation.
[ "Parses", "and", "validates", "options", "." ]
python
train
35.83871
openego/eDisGo
edisgo/tools/geo.py
https://github.com/openego/eDisGo/blob/e6245bdaf236f9c49dbda5a18c1c458290f41e2b/edisgo/tools/geo.py#L14-L33
def proj2equidistant(network): """Defines conformal (e.g. WGS84) to ETRS (equidistant) projection Source CRS is loaded from Network's config. Parameters ---------- network : :class:`~.grid.network.Network` The eDisGo container object Returns ------- :py:func:`functools.partial` """ srid = int(network.config['geo']['srid']) return partial(pyproj.transform, pyproj.Proj(init='epsg:{}' .format(str(srid))), # source coordinate system pyproj.Proj(init='epsg:3035') # destination coordinate system )
[ "def", "proj2equidistant", "(", "network", ")", ":", "srid", "=", "int", "(", "network", ".", "config", "[", "'geo'", "]", "[", "'srid'", "]", ")", "return", "partial", "(", "pyproj", ".", "transform", ",", "pyproj", ".", "Proj", "(", "init", "=", "'epsg:{}'", ".", "format", "(", "str", "(", "srid", ")", ")", ")", ",", "# source coordinate system", "pyproj", ".", "Proj", "(", "init", "=", "'epsg:3035'", ")", "# destination coordinate system", ")" ]
Defines conformal (e.g. WGS84) to ETRS (equidistant) projection Source CRS is loaded from Network's config. Parameters ---------- network : :class:`~.grid.network.Network` The eDisGo container object Returns ------- :py:func:`functools.partial`
[ "Defines", "conformal", "(", "e", ".", "g", ".", "WGS84", ")", "to", "ETRS", "(", "equidistant", ")", "projection", "Source", "CRS", "is", "loaded", "from", "Network", "s", "config", "." ]
python
train
31.1
merll/docker-map
dockermap/build/dockerfile.py
https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/build/dockerfile.py#L83-L96
def format_expose(expose): """ Converts a port number or multiple port numbers, as used in the Dockerfile ``EXPOSE`` command, to a tuple. :param: Port numbers, can be as integer, string, or a list/tuple of those. :type expose: int | unicode | str | list | tuple :return: A tuple, to be separated by spaces before inserting in a Dockerfile. :rtype: tuple """ if isinstance(expose, six.string_types): return expose, elif isinstance(expose, collections.Iterable): return map(six.text_type, expose) return six.text_type(expose),
[ "def", "format_expose", "(", "expose", ")", ":", "if", "isinstance", "(", "expose", ",", "six", ".", "string_types", ")", ":", "return", "expose", ",", "elif", "isinstance", "(", "expose", ",", "collections", ".", "Iterable", ")", ":", "return", "map", "(", "six", ".", "text_type", ",", "expose", ")", "return", "six", ".", "text_type", "(", "expose", ")", "," ]
Converts a port number or multiple port numbers, as used in the Dockerfile ``EXPOSE`` command, to a tuple. :param: Port numbers, can be as integer, string, or a list/tuple of those. :type expose: int | unicode | str | list | tuple :return: A tuple, to be separated by spaces before inserting in a Dockerfile. :rtype: tuple
[ "Converts", "a", "port", "number", "or", "multiple", "port", "numbers", "as", "used", "in", "the", "Dockerfile", "EXPOSE", "command", "to", "a", "tuple", "." ]
python
train
40.571429
tensorflow/probability
tensorflow_probability/python/internal/distribution_util.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L398-L462
def maybe_check_scalar_distribution(distribution, expected_base_dtype, validate_args): """Helper which checks validity of a scalar `distribution` init arg. Valid here means: * `distribution` has scalar batch and event shapes. * `distribution` is `FULLY_REPARAMETERIZED` * `distribution` has expected dtype. Args: distribution: `Distribution`-like object. expected_base_dtype: `TensorFlow` `dtype`. validate_args: Python `bool`. Whether to do additional checks: (i) check that reparameterization_type is `FULLY_REPARAMETERIZED`. (ii) add `tf.Assert` ops to the graph to enforce that distribution is scalar in the event that this cannot be determined statically. Returns: List of `tf.Assert` ops to run to enforce validity checks that could not be statically determined. Empty if `not validate_args`. Raises: ValueError: If validate_args and distribution is not FULLY_REPARAMETERIZED ValueError: If distribution is statically determined to not have both scalar batch and scalar event shapes. """ if distribution.dtype != expected_base_dtype: raise TypeError("dtype mismatch; " "distribution.dtype=\"{}\" is not \"{}\"".format( dtype_util.name(distribution.dtype), dtype_util.name(expected_base_dtype))) # Although `reparameterization_type` is a static property, we guard it by # `validate_args`. This allows users to use a `distribution` which is not # reparameterized itself. However, we tacitly assume that although the # distribution is not reparameterized, it only depends on non-trainable # variables. if validate_args and (distribution.reparameterization_type != reparameterization.FULLY_REPARAMETERIZED): raise ValueError("Base distribution should be reparameterized or be " "a function of non-trainable variables; " "distribution.reparameterization_type = \"{}\" " "!= \"FULLY_REPARAMETERIZED\".".format( distribution.reparameterization_type)) with tf.name_scope("check_distribution"): assertions = [] def check_is_scalar(is_scalar, name): is_scalar_ = tf.get_static_value(is_scalar) if is_scalar_ is not None: if not is_scalar_: raise ValueError("distribution must be scalar; " "distribution.{}=False is not True".format(name)) elif validate_args: assertions.append( assert_util.assert_equal( is_scalar, True, message=("distribution must be scalar; " "distribution.{}=False is not True".format(name)))) check_is_scalar(distribution.is_scalar_event(), "is_scalar_event") check_is_scalar(distribution.is_scalar_batch(), "is_scalar_batch") return assertions
[ "def", "maybe_check_scalar_distribution", "(", "distribution", ",", "expected_base_dtype", ",", "validate_args", ")", ":", "if", "distribution", ".", "dtype", "!=", "expected_base_dtype", ":", "raise", "TypeError", "(", "\"dtype mismatch; \"", "\"distribution.dtype=\\\"{}\\\" is not \\\"{}\\\"\"", ".", "format", "(", "dtype_util", ".", "name", "(", "distribution", ".", "dtype", ")", ",", "dtype_util", ".", "name", "(", "expected_base_dtype", ")", ")", ")", "# Although `reparameterization_type` is a static property, we guard it by", "# `validate_args`. This allows users to use a `distribution` which is not", "# reparameterized itself. However, we tacitly assume that although the", "# distribution is not reparameterized, it only depends on non-trainable", "# variables.", "if", "validate_args", "and", "(", "distribution", ".", "reparameterization_type", "!=", "reparameterization", ".", "FULLY_REPARAMETERIZED", ")", ":", "raise", "ValueError", "(", "\"Base distribution should be reparameterized or be \"", "\"a function of non-trainable variables; \"", "\"distribution.reparameterization_type = \\\"{}\\\" \"", "\"!= \\\"FULLY_REPARAMETERIZED\\\".\"", ".", "format", "(", "distribution", ".", "reparameterization_type", ")", ")", "with", "tf", ".", "name_scope", "(", "\"check_distribution\"", ")", ":", "assertions", "=", "[", "]", "def", "check_is_scalar", "(", "is_scalar", ",", "name", ")", ":", "is_scalar_", "=", "tf", ".", "get_static_value", "(", "is_scalar", ")", "if", "is_scalar_", "is", "not", "None", ":", "if", "not", "is_scalar_", ":", "raise", "ValueError", "(", "\"distribution must be scalar; \"", "\"distribution.{}=False is not True\"", ".", "format", "(", "name", ")", ")", "elif", "validate_args", ":", "assertions", ".", "append", "(", "assert_util", ".", "assert_equal", "(", "is_scalar", ",", "True", ",", "message", "=", "(", "\"distribution must be scalar; \"", "\"distribution.{}=False is not True\"", ".", "format", "(", "name", ")", ")", ")", ")", "check_is_scalar", "(", "distribution", ".", "is_scalar_event", "(", ")", ",", "\"is_scalar_event\"", ")", "check_is_scalar", "(", "distribution", ".", "is_scalar_batch", "(", ")", ",", "\"is_scalar_batch\"", ")", "return", "assertions" ]
Helper which checks validity of a scalar `distribution` init arg. Valid here means: * `distribution` has scalar batch and event shapes. * `distribution` is `FULLY_REPARAMETERIZED` * `distribution` has expected dtype. Args: distribution: `Distribution`-like object. expected_base_dtype: `TensorFlow` `dtype`. validate_args: Python `bool`. Whether to do additional checks: (i) check that reparameterization_type is `FULLY_REPARAMETERIZED`. (ii) add `tf.Assert` ops to the graph to enforce that distribution is scalar in the event that this cannot be determined statically. Returns: List of `tf.Assert` ops to run to enforce validity checks that could not be statically determined. Empty if `not validate_args`. Raises: ValueError: If validate_args and distribution is not FULLY_REPARAMETERIZED ValueError: If distribution is statically determined to not have both scalar batch and scalar event shapes.
[ "Helper", "which", "checks", "validity", "of", "a", "scalar", "distribution", "init", "arg", "." ]
python
test
44.630769
tensorflow/probability
tensorflow_probability/python/layers/distribution_layer.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L65-L86
def _event_size(event_shape, name=None): """Computes the number of elements in a tensor with shape `event_shape`. Args: event_shape: A tensor shape. name: The name to use for the tensor op to compute the number of elements (if such an op needs to be created). Returns: event_size: The number of elements in `tensor_shape`. Returns a numpy int when the number of elements can be computed immediately. Otherwise, returns a scalar tensor. """ with tf.compat.v1.name_scope(name, 'event_size', [event_shape]): event_shape = tf.convert_to_tensor( value=event_shape, dtype=tf.int32, name='event_shape') event_shape_const = tf.get_static_value(event_shape) if event_shape_const is not None: return np.prod(event_shape_const) else: return tf.reduce_prod(input_tensor=event_shape)
[ "def", "_event_size", "(", "event_shape", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "'event_size'", ",", "[", "event_shape", "]", ")", ":", "event_shape", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "event_shape", ",", "dtype", "=", "tf", ".", "int32", ",", "name", "=", "'event_shape'", ")", "event_shape_const", "=", "tf", ".", "get_static_value", "(", "event_shape", ")", "if", "event_shape_const", "is", "not", "None", ":", "return", "np", ".", "prod", "(", "event_shape_const", ")", "else", ":", "return", "tf", ".", "reduce_prod", "(", "input_tensor", "=", "event_shape", ")" ]
Computes the number of elements in a tensor with shape `event_shape`. Args: event_shape: A tensor shape. name: The name to use for the tensor op to compute the number of elements (if such an op needs to be created). Returns: event_size: The number of elements in `tensor_shape`. Returns a numpy int when the number of elements can be computed immediately. Otherwise, returns a scalar tensor.
[ "Computes", "the", "number", "of", "elements", "in", "a", "tensor", "with", "shape", "event_shape", "." ]
python
test
37.545455
dddomodossola/remi
editor/editor_widgets.py
https://github.com/dddomodossola/remi/blob/85206f62220662bb7ecd471042268def71ccad28/editor/editor_widgets.py#L183-L206
def update(self, widget, widget_tree): """ for the selected widget are listed the relative signals for each signal there is a dropdown containing all the widgets the user will select the widget that have to listen a specific event """ self.listeners_list = [] self.build_widget_list_from_tree(widget_tree) self.label.set_text('Signal connections: ' + widget.attributes['editor_varname']) #del self.container self.container = gui.VBox(width='100%', height='90%') self.container.style['justify-content'] = 'flex-start' self.container.style['overflow-y'] = 'scroll' self.append(self.container, 'container') ##for all the events of this widget #isclass instead of ismethod because event methods are replaced with ClassEventConnector for (setOnEventListenerFuncname,setOnEventListenerFunc) in inspect.getmembers(widget): #if the member is decorated by decorate_set_on_listener and the function is referred to this event if hasattr(setOnEventListenerFunc, '_event_info'): self.container.append( SignalConnection(widget, self.listeners_list, setOnEventListenerFuncname, setOnEventListenerFunc, width='100%') )
[ "def", "update", "(", "self", ",", "widget", ",", "widget_tree", ")", ":", "self", ".", "listeners_list", "=", "[", "]", "self", ".", "build_widget_list_from_tree", "(", "widget_tree", ")", "self", ".", "label", ".", "set_text", "(", "'Signal connections: '", "+", "widget", ".", "attributes", "[", "'editor_varname'", "]", ")", "#del self.container", "self", ".", "container", "=", "gui", ".", "VBox", "(", "width", "=", "'100%'", ",", "height", "=", "'90%'", ")", "self", ".", "container", ".", "style", "[", "'justify-content'", "]", "=", "'flex-start'", "self", ".", "container", ".", "style", "[", "'overflow-y'", "]", "=", "'scroll'", "self", ".", "append", "(", "self", ".", "container", ",", "'container'", ")", "##for all the events of this widget", "#isclass instead of ismethod because event methods are replaced with ClassEventConnector", "for", "(", "setOnEventListenerFuncname", ",", "setOnEventListenerFunc", ")", "in", "inspect", ".", "getmembers", "(", "widget", ")", ":", "#if the member is decorated by decorate_set_on_listener and the function is referred to this event", "if", "hasattr", "(", "setOnEventListenerFunc", ",", "'_event_info'", ")", ":", "self", ".", "container", ".", "append", "(", "SignalConnection", "(", "widget", ",", "self", ".", "listeners_list", ",", "setOnEventListenerFuncname", ",", "setOnEventListenerFunc", ",", "width", "=", "'100%'", ")", ")" ]
for the selected widget are listed the relative signals for each signal there is a dropdown containing all the widgets the user will select the widget that have to listen a specific event
[ "for", "the", "selected", "widget", "are", "listed", "the", "relative", "signals", "for", "each", "signal", "there", "is", "a", "dropdown", "containing", "all", "the", "widgets", "the", "user", "will", "select", "the", "widget", "that", "have", "to", "listen", "a", "specific", "event" ]
python
train
55.5
ratcave/wavefront_reader
wavefront_reader/reading.py
https://github.com/ratcave/wavefront_reader/blob/c515164a3952d6b85f8044f429406fddd862bfd0/wavefront_reader/reading.py#L7-L15
def parse_mixed_delim_str(line): """Turns .obj face index string line into [verts, texcoords, normals] numeric tuples.""" arrs = [[], [], []] for group in line.split(' '): for col, coord in enumerate(group.split('/')): if coord: arrs[col].append(int(coord)) return [tuple(arr) for arr in arrs]
[ "def", "parse_mixed_delim_str", "(", "line", ")", ":", "arrs", "=", "[", "[", "]", ",", "[", "]", ",", "[", "]", "]", "for", "group", "in", "line", ".", "split", "(", "' '", ")", ":", "for", "col", ",", "coord", "in", "enumerate", "(", "group", ".", "split", "(", "'/'", ")", ")", ":", "if", "coord", ":", "arrs", "[", "col", "]", ".", "append", "(", "int", "(", "coord", ")", ")", "return", "[", "tuple", "(", "arr", ")", "for", "arr", "in", "arrs", "]" ]
Turns .obj face index string line into [verts, texcoords, normals] numeric tuples.
[ "Turns", ".", "obj", "face", "index", "string", "line", "into", "[", "verts", "texcoords", "normals", "]", "numeric", "tuples", "." ]
python
train
37.555556
danilobellini/audiolazy
audiolazy/lazy_text.py
https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/audiolazy/lazy_text.py#L266-L296
def rst_table(data, schema=None): """ Creates a reStructuredText simple table (list of strings) from a list of lists. """ # Process multi-rows (replaced by rows with empty columns when needed) pdata = [] for row in data: prow = [el if isinstance(el, list) else [el] for el in row] pdata.extend(pr for pr in xzip_longest(*prow, fillvalue="")) # Find the columns sizes sizes = [max(len("{0}".format(el)) for el in column) for column in xzip(*pdata)] sizes = [max(size, len(sch)) for size, sch in xzip(sizes, schema)] # Creates the title and border rows if schema is None: schema = pdata[0] pdata = pdata[1:] border = " ".join("=" * size for size in sizes) titles = " ".join("{1:^{0}}".format(*pair) for pair in xzip(sizes, schema)) # Creates the full table and returns rows = [border, titles, border] rows.extend(" ".join("{1:<{0}}".format(*pair) for pair in xzip(sizes, row)) for row in pdata) rows.append(border) return rows
[ "def", "rst_table", "(", "data", ",", "schema", "=", "None", ")", ":", "# Process multi-rows (replaced by rows with empty columns when needed)", "pdata", "=", "[", "]", "for", "row", "in", "data", ":", "prow", "=", "[", "el", "if", "isinstance", "(", "el", ",", "list", ")", "else", "[", "el", "]", "for", "el", "in", "row", "]", "pdata", ".", "extend", "(", "pr", "for", "pr", "in", "xzip_longest", "(", "*", "prow", ",", "fillvalue", "=", "\"\"", ")", ")", "# Find the columns sizes", "sizes", "=", "[", "max", "(", "len", "(", "\"{0}\"", ".", "format", "(", "el", ")", ")", "for", "el", "in", "column", ")", "for", "column", "in", "xzip", "(", "*", "pdata", ")", "]", "sizes", "=", "[", "max", "(", "size", ",", "len", "(", "sch", ")", ")", "for", "size", ",", "sch", "in", "xzip", "(", "sizes", ",", "schema", ")", "]", "# Creates the title and border rows", "if", "schema", "is", "None", ":", "schema", "=", "pdata", "[", "0", "]", "pdata", "=", "pdata", "[", "1", ":", "]", "border", "=", "\" \"", ".", "join", "(", "\"=\"", "*", "size", "for", "size", "in", "sizes", ")", "titles", "=", "\" \"", ".", "join", "(", "\"{1:^{0}}\"", ".", "format", "(", "*", "pair", ")", "for", "pair", "in", "xzip", "(", "sizes", ",", "schema", ")", ")", "# Creates the full table and returns", "rows", "=", "[", "border", ",", "titles", ",", "border", "]", "rows", ".", "extend", "(", "\" \"", ".", "join", "(", "\"{1:<{0}}\"", ".", "format", "(", "*", "pair", ")", "for", "pair", "in", "xzip", "(", "sizes", ",", "row", ")", ")", "for", "row", "in", "pdata", ")", "rows", ".", "append", "(", "border", ")", "return", "rows" ]
Creates a reStructuredText simple table (list of strings) from a list of lists.
[ "Creates", "a", "reStructuredText", "simple", "table", "(", "list", "of", "strings", ")", "from", "a", "list", "of", "lists", "." ]
python
train
32.870968
StackStorm/pybind
pybind/slxos/v17s_1_02/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/__init__.py#L13462-L13485
def _set_dscp_exp_state(self, v, load=False): """ Setter method for dscp_exp_state, mapped from YANG variable /dscp_exp_state (container) If this variable is read-only (config: false) in the source YANG file, then _set_dscp_exp_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_dscp_exp_state() directly. YANG Description: dscp_exp """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=dscp_exp_state.dscp_exp_state, is_container='container', presence=False, yang_name="dscp-exp-state", rest_name="dscp-exp-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'qos-dscp-exp', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-operational', defining_module='brocade-qos-operational', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """dscp_exp_state must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=dscp_exp_state.dscp_exp_state, is_container='container', presence=False, yang_name="dscp-exp-state", rest_name="dscp-exp-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'qos-dscp-exp', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-operational', defining_module='brocade-qos-operational', yang_type='container', is_config=True)""", }) self.__dscp_exp_state = t if hasattr(self, '_set'): self._set()
[ "def", "_set_dscp_exp_state", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "dscp_exp_state", ".", "dscp_exp_state", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"dscp-exp-state\"", ",", "rest_name", "=", "\"dscp-exp-state\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'callpoint'", ":", "u'qos-dscp-exp'", ",", "u'cli-suppress-show-path'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-qos-operational'", ",", "defining_module", "=", "'brocade-qos-operational'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"dscp_exp_state must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=dscp_exp_state.dscp_exp_state, is_container='container', presence=False, yang_name=\"dscp-exp-state\", rest_name=\"dscp-exp-state\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'qos-dscp-exp', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-operational', defining_module='brocade-qos-operational', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__dscp_exp_state", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for dscp_exp_state, mapped from YANG variable /dscp_exp_state (container) If this variable is read-only (config: false) in the source YANG file, then _set_dscp_exp_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_dscp_exp_state() directly. YANG Description: dscp_exp
[ "Setter", "method", "for", "dscp_exp_state", "mapped", "from", "YANG", "variable", "/", "dscp_exp_state", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_dscp_exp_state", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_dscp_exp_state", "()", "directly", "." ]
python
train
73.333333
ToFuProject/tofu
tofu/data/_core.py
https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_core.py#L1026-L1052
def set_dtreat_interp_indch(self, indch=None): """ Set the indices of the channels for which to interpolate data The index can be provided as: - A 1d np.ndarray of boolean or int indices of channels => interpolate data at these channels for all times - A dict with: * keys = int indices of times * values = array of int indices of chan. for which to interpolate Time indices refer to self.ddataRef['t'] Channel indices refer to self.ddataRef['X'] """ lC = [indch is None, type(indch) in [np.ndarray,list], type(indch) is dict] assert any(lC) if lC[2]: lc = [type(k) is int and k<self._ddataRef['nt'] for k in indch.keys()] assert all(lc) for k in indch.keys(): assert hasattr(indch[k],'__iter__') indch[k] = _format_ind(indch[k], n=self._ddataRef['nch']) elif lC[1]: indch = np.asarray(indch) assert indch.ndim==1 indch = _format_ind(indch, n=self._ddataRef['nch']) self._dtreat['interp-indch'] = indch self._ddata['uptodate'] = False
[ "def", "set_dtreat_interp_indch", "(", "self", ",", "indch", "=", "None", ")", ":", "lC", "=", "[", "indch", "is", "None", ",", "type", "(", "indch", ")", "in", "[", "np", ".", "ndarray", ",", "list", "]", ",", "type", "(", "indch", ")", "is", "dict", "]", "assert", "any", "(", "lC", ")", "if", "lC", "[", "2", "]", ":", "lc", "=", "[", "type", "(", "k", ")", "is", "int", "and", "k", "<", "self", ".", "_ddataRef", "[", "'nt'", "]", "for", "k", "in", "indch", ".", "keys", "(", ")", "]", "assert", "all", "(", "lc", ")", "for", "k", "in", "indch", ".", "keys", "(", ")", ":", "assert", "hasattr", "(", "indch", "[", "k", "]", ",", "'__iter__'", ")", "indch", "[", "k", "]", "=", "_format_ind", "(", "indch", "[", "k", "]", ",", "n", "=", "self", ".", "_ddataRef", "[", "'nch'", "]", ")", "elif", "lC", "[", "1", "]", ":", "indch", "=", "np", ".", "asarray", "(", "indch", ")", "assert", "indch", ".", "ndim", "==", "1", "indch", "=", "_format_ind", "(", "indch", ",", "n", "=", "self", ".", "_ddataRef", "[", "'nch'", "]", ")", "self", ".", "_dtreat", "[", "'interp-indch'", "]", "=", "indch", "self", ".", "_ddata", "[", "'uptodate'", "]", "=", "False" ]
Set the indices of the channels for which to interpolate data The index can be provided as: - A 1d np.ndarray of boolean or int indices of channels => interpolate data at these channels for all times - A dict with: * keys = int indices of times * values = array of int indices of chan. for which to interpolate Time indices refer to self.ddataRef['t'] Channel indices refer to self.ddataRef['X']
[ "Set", "the", "indices", "of", "the", "channels", "for", "which", "to", "interpolate", "data" ]
python
train
43.481481
swevm/scaleio-py
scaleiopy/api/scaleio/cluster/storagepool.py
https://github.com/swevm/scaleio-py/blob/d043a0137cb925987fd5c895a3210968ce1d9028/scaleiopy/api/scaleio/cluster/storagepool.py#L23-L34
def get_storage_pool_by_name(self, name): """ Get ScaleIO StoragePool object by its name :param name: Name of StoragePool :return: ScaleIO StoragePool object :raise KeyError: No StoragePool with specified name found :rtype: StoragePool object """ for storage_pool in self.conn.storage_pools: if storage_pool.name == name: return storage_pool raise KeyError("Storage pool of that name not found")
[ "def", "get_storage_pool_by_name", "(", "self", ",", "name", ")", ":", "for", "storage_pool", "in", "self", ".", "conn", ".", "storage_pools", ":", "if", "storage_pool", ".", "name", "==", "name", ":", "return", "storage_pool", "raise", "KeyError", "(", "\"Storage pool of that name not found\"", ")" ]
Get ScaleIO StoragePool object by its name :param name: Name of StoragePool :return: ScaleIO StoragePool object :raise KeyError: No StoragePool with specified name found :rtype: StoragePool object
[ "Get", "ScaleIO", "StoragePool", "object", "by", "its", "name", ":", "param", "name", ":", "Name", "of", "StoragePool", ":", "return", ":", "ScaleIO", "StoragePool", "object", ":", "raise", "KeyError", ":", "No", "StoragePool", "with", "specified", "name", "found", ":", "rtype", ":", "StoragePool", "object" ]
python
train
40.333333
aiidateam/aiida-codtools
aiida_codtools/workflows/cif_clean.py
https://github.com/aiidateam/aiida-codtools/blob/da5e4259b7a2e86cf0cc3f997e11dd36d445fa94/aiida_codtools/workflows/cif_clean.py#L128-L135
def inspect_select_calculation(self): """Inspect the result of the CifSelectCalculation, verifying that it produced a CifData output node.""" try: node = self.ctx.cif_select self.ctx.cif = node.outputs.cif except exceptions.NotExistent: self.report('aborting: CifSelectCalculation<{}> did not return the required cif output'.format(node.uuid)) return self.exit_codes.ERROR_CIF_SELECT_FAILED
[ "def", "inspect_select_calculation", "(", "self", ")", ":", "try", ":", "node", "=", "self", ".", "ctx", ".", "cif_select", "self", ".", "ctx", ".", "cif", "=", "node", ".", "outputs", ".", "cif", "except", "exceptions", ".", "NotExistent", ":", "self", ".", "report", "(", "'aborting: CifSelectCalculation<{}> did not return the required cif output'", ".", "format", "(", "node", ".", "uuid", ")", ")", "return", "self", ".", "exit_codes", ".", "ERROR_CIF_SELECT_FAILED" ]
Inspect the result of the CifSelectCalculation, verifying that it produced a CifData output node.
[ "Inspect", "the", "result", "of", "the", "CifSelectCalculation", "verifying", "that", "it", "produced", "a", "CifData", "output", "node", "." ]
python
train
56.875
balloob/pychromecast
pychromecast/socket_client.py
https://github.com/balloob/pychromecast/blob/831b09c4fed185a7bffe0ea330b7849d5f4e36b6/pychromecast/socket_client.py#L550-L584
def _route_message(self, message, data): """ Route message to any handlers on the message namespace """ # route message to handlers if message.namespace in self._handlers: # debug messages if message.namespace != NS_HEARTBEAT: self.logger.debug( "[%s:%s] Received: %s", self.fn or self.host, self.port, _message_to_string(message, data)) # message handlers try: handled = \ self._handlers[message.namespace].receive_message( message, data) if not handled: if data.get(REQUEST_ID) not in self._request_callbacks: self.logger.debug( "[%s:%s] Message unhandled: %s", self.fn or self.host, self.port, _message_to_string(message, data)) except Exception: # pylint: disable=broad-except self.logger.exception( ("[%s:%s] Exception caught while sending message to " "controller %s: %s"), self.fn or self.host, self.port, type(self._handlers[message.namespace]).__name__, _message_to_string(message, data)) else: self.logger.debug( "[%s:%s] Received unknown namespace: %s", self.fn or self.host, self.port, _message_to_string(message, data))
[ "def", "_route_message", "(", "self", ",", "message", ",", "data", ")", ":", "# route message to handlers", "if", "message", ".", "namespace", "in", "self", ".", "_handlers", ":", "# debug messages", "if", "message", ".", "namespace", "!=", "NS_HEARTBEAT", ":", "self", ".", "logger", ".", "debug", "(", "\"[%s:%s] Received: %s\"", ",", "self", ".", "fn", "or", "self", ".", "host", ",", "self", ".", "port", ",", "_message_to_string", "(", "message", ",", "data", ")", ")", "# message handlers", "try", ":", "handled", "=", "self", ".", "_handlers", "[", "message", ".", "namespace", "]", ".", "receive_message", "(", "message", ",", "data", ")", "if", "not", "handled", ":", "if", "data", ".", "get", "(", "REQUEST_ID", ")", "not", "in", "self", ".", "_request_callbacks", ":", "self", ".", "logger", ".", "debug", "(", "\"[%s:%s] Message unhandled: %s\"", ",", "self", ".", "fn", "or", "self", ".", "host", ",", "self", ".", "port", ",", "_message_to_string", "(", "message", ",", "data", ")", ")", "except", "Exception", ":", "# pylint: disable=broad-except", "self", ".", "logger", ".", "exception", "(", "(", "\"[%s:%s] Exception caught while sending message to \"", "\"controller %s: %s\"", ")", ",", "self", ".", "fn", "or", "self", ".", "host", ",", "self", ".", "port", ",", "type", "(", "self", ".", "_handlers", "[", "message", ".", "namespace", "]", ")", ".", "__name__", ",", "_message_to_string", "(", "message", ",", "data", ")", ")", "else", ":", "self", ".", "logger", ".", "debug", "(", "\"[%s:%s] Received unknown namespace: %s\"", ",", "self", ".", "fn", "or", "self", ".", "host", ",", "self", ".", "port", ",", "_message_to_string", "(", "message", ",", "data", ")", ")" ]
Route message to any handlers on the message namespace
[ "Route", "message", "to", "any", "handlers", "on", "the", "message", "namespace" ]
python
train
43.2
dnanexus/dx-toolkit
src/python/dxpy/api.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/api.py#L515-L521
def file_add_tags(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /file-xxxx/addTags API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Tags#API-method%3A-%2Fclass-xxxx%2FaddTags """ return DXHTTPRequest('/%s/addTags' % object_id, input_params, always_retry=always_retry, **kwargs)
[ "def", "file_add_tags", "(", "object_id", ",", "input_params", "=", "{", "}", ",", "always_retry", "=", "True", ",", "*", "*", "kwargs", ")", ":", "return", "DXHTTPRequest", "(", "'/%s/addTags'", "%", "object_id", ",", "input_params", ",", "always_retry", "=", "always_retry", ",", "*", "*", "kwargs", ")" ]
Invokes the /file-xxxx/addTags API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Tags#API-method%3A-%2Fclass-xxxx%2FaddTags
[ "Invokes", "the", "/", "file", "-", "xxxx", "/", "addTags", "API", "method", "." ]
python
train
50.571429
saltstack/salt
salt/cloud/libcloudfuncs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/libcloudfuncs.py#L335-L409
def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = get_node(conn, name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to find the VM %s', name) profile = None if 'metadata' in node.extra and 'profile' in node.extra['metadata']: profile = node.extra['metadata']['profile'] flush_mine_on_destroy = False if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]: flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy'] if flush_mine_on_destroy: log.info('Clearing Salt Mine: %s', name) mopts_ = salt.config.DEFAULT_MINION_OPTS conf_path = '/'.join(__opts__['conf_file'].split('/')[:-1]) mopts_.update( salt.config.minion_config(os.path.join(conf_path, 'minion')) ) client = salt.client.get_local_client(mopts_) minions = client.cmd(name, 'mine.flush') log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy) log.info('Destroying VM: %s', name) ret = conn.destroy_node(node) if ret: log.info('Destroyed VM: %s', name) # Fire destroy action __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__['delete_sshkeys'] is True: public_ips = getattr(node, __opts__.get('ssh_interface', 'public_ips')) if public_ips: salt.utils.cloud.remove_sshkey(public_ips[0]) private_ips = getattr(node, __opts__.get('ssh_interface', 'private_ips')) if private_ips: salt.utils.cloud.remove_sshkey(private_ips[0]) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) return True log.error('Failed to Destroy VM: %s', name) return False
[ "def", "destroy", "(", "name", ",", "conn", "=", "None", ",", "call", "=", "None", ")", ":", "if", "call", "==", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The destroy action must be called with -d, --destroy, '", "'-a or --action.'", ")", "__utils__", "[", "'cloud.fire_event'", "]", "(", "'event'", ",", "'destroying instance'", ",", "'salt/cloud/{0}/destroying'", ".", "format", "(", "name", ")", ",", "args", "=", "{", "'name'", ":", "name", "}", ",", "sock_dir", "=", "__opts__", "[", "'sock_dir'", "]", ",", "transport", "=", "__opts__", "[", "'transport'", "]", ")", "if", "not", "conn", ":", "conn", "=", "get_conn", "(", ")", "# pylint: disable=E0602", "node", "=", "get_node", "(", "conn", ",", "name", ")", "profiles", "=", "get_configured_provider", "(", ")", "[", "'profiles'", "]", "# pylint: disable=E0602", "if", "node", "is", "None", ":", "log", ".", "error", "(", "'Unable to find the VM %s'", ",", "name", ")", "profile", "=", "None", "if", "'metadata'", "in", "node", ".", "extra", "and", "'profile'", "in", "node", ".", "extra", "[", "'metadata'", "]", ":", "profile", "=", "node", ".", "extra", "[", "'metadata'", "]", "[", "'profile'", "]", "flush_mine_on_destroy", "=", "False", "if", "profile", "and", "profile", "in", "profiles", "and", "'flush_mine_on_destroy'", "in", "profiles", "[", "profile", "]", ":", "flush_mine_on_destroy", "=", "profiles", "[", "profile", "]", "[", "'flush_mine_on_destroy'", "]", "if", "flush_mine_on_destroy", ":", "log", ".", "info", "(", "'Clearing Salt Mine: %s'", ",", "name", ")", "mopts_", "=", "salt", ".", "config", ".", "DEFAULT_MINION_OPTS", "conf_path", "=", "'/'", ".", "join", "(", "__opts__", "[", "'conf_file'", "]", ".", "split", "(", "'/'", ")", "[", ":", "-", "1", "]", ")", "mopts_", ".", "update", "(", "salt", ".", "config", ".", "minion_config", "(", "os", ".", "path", ".", "join", "(", "conf_path", ",", "'minion'", ")", ")", ")", "client", "=", "salt", ".", "client", ".", "get_local_client", "(", "mopts_", ")", "minions", "=", "client", ".", "cmd", "(", "name", ",", "'mine.flush'", ")", "log", ".", "info", "(", "'Clearing Salt Mine: %s, %s'", ",", "name", ",", "flush_mine_on_destroy", ")", "log", ".", "info", "(", "'Destroying VM: %s'", ",", "name", ")", "ret", "=", "conn", ".", "destroy_node", "(", "node", ")", "if", "ret", ":", "log", ".", "info", "(", "'Destroyed VM: %s'", ",", "name", ")", "# Fire destroy action", "__utils__", "[", "'cloud.fire_event'", "]", "(", "'event'", ",", "'destroyed instance'", ",", "'salt/cloud/{0}/destroyed'", ".", "format", "(", "name", ")", ",", "args", "=", "{", "'name'", ":", "name", "}", ",", "sock_dir", "=", "__opts__", "[", "'sock_dir'", "]", ",", "transport", "=", "__opts__", "[", "'transport'", "]", ")", "if", "__opts__", "[", "'delete_sshkeys'", "]", "is", "True", ":", "public_ips", "=", "getattr", "(", "node", ",", "__opts__", ".", "get", "(", "'ssh_interface'", ",", "'public_ips'", ")", ")", "if", "public_ips", ":", "salt", ".", "utils", ".", "cloud", ".", "remove_sshkey", "(", "public_ips", "[", "0", "]", ")", "private_ips", "=", "getattr", "(", "node", ",", "__opts__", ".", "get", "(", "'ssh_interface'", ",", "'private_ips'", ")", ")", "if", "private_ips", ":", "salt", ".", "utils", ".", "cloud", ".", "remove_sshkey", "(", "private_ips", "[", "0", "]", ")", "if", "__opts__", ".", "get", "(", "'update_cachedir'", ",", "False", ")", "is", "True", ":", "__utils__", "[", "'cloud.delete_minion_cachedir'", "]", "(", "name", ",", "__active_provider_name__", ".", "split", "(", "':'", ")", "[", "0", "]", ",", "__opts__", ")", "return", "True", "log", ".", "error", "(", "'Failed to Destroy VM: %s'", ",", "name", ")", "return", "False" ]
Delete a single VM
[ "Delete", "a", "single", "VM" ]
python
train
34.56
sirfoga/pyhal
hal/files/parsers.py
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/files/parsers.py#L62-L70
def get_dicts(self): """Gets dicts in file :return: (generator of) of dicts with data from .csv file """ reader = csv.DictReader(open(self.path, "r", encoding=self.encoding)) for row in reader: if row: yield row
[ "def", "get_dicts", "(", "self", ")", ":", "reader", "=", "csv", ".", "DictReader", "(", "open", "(", "self", ".", "path", ",", "\"r\"", ",", "encoding", "=", "self", ".", "encoding", ")", ")", "for", "row", "in", "reader", ":", "if", "row", ":", "yield", "row" ]
Gets dicts in file :return: (generator of) of dicts with data from .csv file
[ "Gets", "dicts", "in", "file" ]
python
train
30.222222
tensorforce/tensorforce
tensorforce/models/memory_model.py
https://github.com/tensorforce/tensorforce/blob/520a8d992230e382f08e315ede5fc477f5e26bfb/tensorforce/models/memory_model.py#L117-L125
def as_local_model(self): """ Makes sure our optimizer is wrapped into the global_optimizer meta. This is only relevant for distributed RL. """ super(MemoryModel, self).as_local_model() self.optimizer_spec = dict( type='global_optimizer', optimizer=self.optimizer_spec )
[ "def", "as_local_model", "(", "self", ")", ":", "super", "(", "MemoryModel", ",", "self", ")", ".", "as_local_model", "(", ")", "self", ".", "optimizer_spec", "=", "dict", "(", "type", "=", "'global_optimizer'", ",", "optimizer", "=", "self", ".", "optimizer_spec", ")" ]
Makes sure our optimizer is wrapped into the global_optimizer meta. This is only relevant for distributed RL.
[ "Makes", "sure", "our", "optimizer", "is", "wrapped", "into", "the", "global_optimizer", "meta", ".", "This", "is", "only", "relevant", "for", "distributed", "RL", "." ]
python
valid
37.111111
diffeo/rejester
rejester/workers.py
https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/workers.py#L258-L271
def _get_and_start_work(self): "return (async_result, work_unit) or (None, None)" worker_id = nice_identifier() work_unit = self.task_master.get_work(worker_id, available_gb=self.available_gb()) if work_unit is None: return None, None async_result = self.pool.apply_async( run_worker, (HeadlessWorker, self.task_master.registry.config, worker_id, work_unit.work_spec_name, work_unit.key), callback=self._finish_callback) return async_result, work_unit
[ "def", "_get_and_start_work", "(", "self", ")", ":", "worker_id", "=", "nice_identifier", "(", ")", "work_unit", "=", "self", ".", "task_master", ".", "get_work", "(", "worker_id", ",", "available_gb", "=", "self", ".", "available_gb", "(", ")", ")", "if", "work_unit", "is", "None", ":", "return", "None", ",", "None", "async_result", "=", "self", ".", "pool", ".", "apply_async", "(", "run_worker", ",", "(", "HeadlessWorker", ",", "self", ".", "task_master", ".", "registry", ".", "config", ",", "worker_id", ",", "work_unit", ".", "work_spec_name", ",", "work_unit", ".", "key", ")", ",", "callback", "=", "self", ".", "_finish_callback", ")", "return", "async_result", ",", "work_unit" ]
return (async_result, work_unit) or (None, None)
[ "return", "(", "async_result", "work_unit", ")", "or", "(", "None", "None", ")" ]
python
train
40.928571
fusionbox/django-argonauts
argonauts/views.py
https://github.com/fusionbox/django-argonauts/blob/0f64f9700199e8c70a1cb9a055b8e31f6843933d/argonauts/views.py#L19-L26
def render_to_response(self, obj, **response_kwargs): """ Returns an ``HttpResponse`` object instance with Content-Type: application/json. The response body will be the return value of ``self.serialize(obj)`` """ return HttpResponse(self.serialize(obj), content_type='application/json', **response_kwargs)
[ "def", "render_to_response", "(", "self", ",", "obj", ",", "*", "*", "response_kwargs", ")", ":", "return", "HttpResponse", "(", "self", ".", "serialize", "(", "obj", ")", ",", "content_type", "=", "'application/json'", ",", "*", "*", "response_kwargs", ")" ]
Returns an ``HttpResponse`` object instance with Content-Type: application/json. The response body will be the return value of ``self.serialize(obj)``
[ "Returns", "an", "HttpResponse", "object", "instance", "with", "Content", "-", "Type", ":", "application", "/", "json", "." ]
python
train
43.375
knipknap/SpiffWorkflow
SpiffWorkflow/task.py
https://github.com/knipknap/SpiffWorkflow/blob/f0af7f59a332e0619e4f3c00a7d4a3d230760e00/SpiffWorkflow/task.py#L325-L340
def _assign_new_thread_id(self, recursive=True): """ Assigns a new thread id to the task. :type recursive: bool :param recursive: Whether to assign the id to children recursively. :rtype: bool :returns: The new thread id. """ self.__class__.thread_id_pool += 1 self.thread_id = self.__class__.thread_id_pool if not recursive: return self.thread_id for child in self: child.thread_id = self.thread_id return self.thread_id
[ "def", "_assign_new_thread_id", "(", "self", ",", "recursive", "=", "True", ")", ":", "self", ".", "__class__", ".", "thread_id_pool", "+=", "1", "self", ".", "thread_id", "=", "self", ".", "__class__", ".", "thread_id_pool", "if", "not", "recursive", ":", "return", "self", ".", "thread_id", "for", "child", "in", "self", ":", "child", ".", "thread_id", "=", "self", ".", "thread_id", "return", "self", ".", "thread_id" ]
Assigns a new thread id to the task. :type recursive: bool :param recursive: Whether to assign the id to children recursively. :rtype: bool :returns: The new thread id.
[ "Assigns", "a", "new", "thread", "id", "to", "the", "task", "." ]
python
valid
33.0625
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/connect/connect.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/connect/connect.py#L111-L138
def start_notifications(self): """Start the notifications thread. If an external callback is not set up (using `update_webhook`) then calling this function is mandatory to get or set resource. .. code-block:: python >>> api.start_notifications() >>> print(api.get_resource_value(device, path)) Some value >>> api.stop_notifications() :returns: void """ with self._notifications_lock: if self.has_active_notification_thread: return api = self._get_api(mds.NotificationsApi) self._notifications_thread = NotificationsThread( self._db, self._queues, b64decode=self.b64decode, notifications_api=api, subscription_manager=self.subscribe, ) self._notifications_thread.daemon = True self._notifications_thread.start()
[ "def", "start_notifications", "(", "self", ")", ":", "with", "self", ".", "_notifications_lock", ":", "if", "self", ".", "has_active_notification_thread", ":", "return", "api", "=", "self", ".", "_get_api", "(", "mds", ".", "NotificationsApi", ")", "self", ".", "_notifications_thread", "=", "NotificationsThread", "(", "self", ".", "_db", ",", "self", ".", "_queues", ",", "b64decode", "=", "self", ".", "b64decode", ",", "notifications_api", "=", "api", ",", "subscription_manager", "=", "self", ".", "subscribe", ",", ")", "self", ".", "_notifications_thread", ".", "daemon", "=", "True", "self", ".", "_notifications_thread", ".", "start", "(", ")" ]
Start the notifications thread. If an external callback is not set up (using `update_webhook`) then calling this function is mandatory to get or set resource. .. code-block:: python >>> api.start_notifications() >>> print(api.get_resource_value(device, path)) Some value >>> api.stop_notifications() :returns: void
[ "Start", "the", "notifications", "thread", "." ]
python
train
34.25
pantsbuild/pants
src/python/pants/backend/docgen/tasks/markdown_to_html_utils.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/docgen/tasks/markdown_to_html_utils.py#L59-L118
def choose_include_text(s, params, source_path): """Given the contents of a file and !inc[these params], return matching lines If there was a problem matching parameters, return empty list. :param s: file's text :param params: string like "start-at=foo&end-at=bar" :param source_path: path to source .md. Useful in error messages """ lines = s.splitlines() start_after = None start_at = None end_before = None end_at = None for term in params.split("&"): if '=' in term: param, value = [p.strip() for p in term.split('=', 1)] else: param, value = term.strip(), '' if not param: continue if param == "start-after": start_after = value elif param == "start-at": start_at = value elif param == "end-before": end_before = value elif param == "end-at": end_at = value else: raise TaskError('Invalid include directive "{0}"' ' in {1}'.format(params, source_path)) chosen_lines = [] # two loops, one waits to "start recording", one "records" for line_ix in range(0, len(lines)): line = lines[line_ix] if (not start_at) and (not start_after): # if we didn't set a start-* param, don't wait to start break if start_at is not None and start_at in line: break if start_after is not None and start_after in line: line_ix += 1 break else: # never started recording: return '' for line_ix in range(line_ix, len(lines)): line = lines[line_ix] if end_before is not None and end_before in line: break chosen_lines.append(line) if end_at is not None and end_at in line: break else: if (end_before or end_at): # we had an end- filter, but never encountered it. return '' return '\n'.join(chosen_lines)
[ "def", "choose_include_text", "(", "s", ",", "params", ",", "source_path", ")", ":", "lines", "=", "s", ".", "splitlines", "(", ")", "start_after", "=", "None", "start_at", "=", "None", "end_before", "=", "None", "end_at", "=", "None", "for", "term", "in", "params", ".", "split", "(", "\"&\"", ")", ":", "if", "'='", "in", "term", ":", "param", ",", "value", "=", "[", "p", ".", "strip", "(", ")", "for", "p", "in", "term", ".", "split", "(", "'='", ",", "1", ")", "]", "else", ":", "param", ",", "value", "=", "term", ".", "strip", "(", ")", ",", "''", "if", "not", "param", ":", "continue", "if", "param", "==", "\"start-after\"", ":", "start_after", "=", "value", "elif", "param", "==", "\"start-at\"", ":", "start_at", "=", "value", "elif", "param", "==", "\"end-before\"", ":", "end_before", "=", "value", "elif", "param", "==", "\"end-at\"", ":", "end_at", "=", "value", "else", ":", "raise", "TaskError", "(", "'Invalid include directive \"{0}\"'", "' in {1}'", ".", "format", "(", "params", ",", "source_path", ")", ")", "chosen_lines", "=", "[", "]", "# two loops, one waits to \"start recording\", one \"records\"", "for", "line_ix", "in", "range", "(", "0", ",", "len", "(", "lines", ")", ")", ":", "line", "=", "lines", "[", "line_ix", "]", "if", "(", "not", "start_at", ")", "and", "(", "not", "start_after", ")", ":", "# if we didn't set a start-* param, don't wait to start", "break", "if", "start_at", "is", "not", "None", "and", "start_at", "in", "line", ":", "break", "if", "start_after", "is", "not", "None", "and", "start_after", "in", "line", ":", "line_ix", "+=", "1", "break", "else", ":", "# never started recording:", "return", "''", "for", "line_ix", "in", "range", "(", "line_ix", ",", "len", "(", "lines", ")", ")", ":", "line", "=", "lines", "[", "line_ix", "]", "if", "end_before", "is", "not", "None", "and", "end_before", "in", "line", ":", "break", "chosen_lines", ".", "append", "(", "line", ")", "if", "end_at", "is", "not", "None", "and", "end_at", "in", "line", ":", "break", "else", ":", "if", "(", "end_before", "or", "end_at", ")", ":", "# we had an end- filter, but never encountered it.", "return", "''", "return", "'\\n'", ".", "join", "(", "chosen_lines", ")" ]
Given the contents of a file and !inc[these params], return matching lines If there was a problem matching parameters, return empty list. :param s: file's text :param params: string like "start-at=foo&end-at=bar" :param source_path: path to source .md. Useful in error messages
[ "Given", "the", "contents", "of", "a", "file", "and", "!inc", "[", "these", "params", "]", "return", "matching", "lines" ]
python
train
29.316667
globality-corp/microcosm-flask
microcosm_flask/namespaces.py
https://github.com/globality-corp/microcosm-flask/blob/c2eaf57f03e7d041eea343751a4a90fcc80df418/microcosm_flask/namespaces.py#L159-L168
def url_for(self, operation, _external=True, **kwargs): """ Construct a URL for an operation against a resource. :param kwargs: additional arguments for URL path expansion, which are passed to flask.url_for. In particular, _external=True produces absolute url. """ return url_for(self.endpoint_for(operation), _external=_external, **kwargs)
[ "def", "url_for", "(", "self", ",", "operation", ",", "_external", "=", "True", ",", "*", "*", "kwargs", ")", ":", "return", "url_for", "(", "self", ".", "endpoint_for", "(", "operation", ")", ",", "_external", "=", "_external", ",", "*", "*", "kwargs", ")" ]
Construct a URL for an operation against a resource. :param kwargs: additional arguments for URL path expansion, which are passed to flask.url_for. In particular, _external=True produces absolute url.
[ "Construct", "a", "URL", "for", "an", "operation", "against", "a", "resource", "." ]
python
train
39.7
brentp/toolshed
toolshed/files.py
https://github.com/brentp/toolshed/blob/c9529d6872bf28207642896c3b416f68e79b1269/toolshed/files.py#L284-L299
def is_newer_b(a, bfiles): """ check that all b files have been modified more recently than a """ if isinstance(bfiles, basestring): bfiles = [bfiles] if not op.exists(a): return False if not all(op.exists(b) for b in bfiles): return False atime = os.stat(a).st_mtime # modification time for b in bfiles: # a has been modified since if atime > os.stat(b).st_mtime: return False return True
[ "def", "is_newer_b", "(", "a", ",", "bfiles", ")", ":", "if", "isinstance", "(", "bfiles", ",", "basestring", ")", ":", "bfiles", "=", "[", "bfiles", "]", "if", "not", "op", ".", "exists", "(", "a", ")", ":", "return", "False", "if", "not", "all", "(", "op", ".", "exists", "(", "b", ")", "for", "b", "in", "bfiles", ")", ":", "return", "False", "atime", "=", "os", ".", "stat", "(", "a", ")", ".", "st_mtime", "# modification time", "for", "b", "in", "bfiles", ":", "# a has been modified since", "if", "atime", ">", "os", ".", "stat", "(", "b", ")", ".", "st_mtime", ":", "return", "False", "return", "True" ]
check that all b files have been modified more recently than a
[ "check", "that", "all", "b", "files", "have", "been", "modified", "more", "recently", "than", "a" ]
python
train
28
llllllllll/codetransformer
codetransformer/instructions.py
https://github.com/llllllllll/codetransformer/blob/c5f551e915df45adc7da7e0b1b635f0cc6a1bb27/codetransformer/instructions.py#L189-L205
def from_opcode(cls, opcode, arg=_no_arg): """ Create an instruction from an opcode and raw argument. Parameters ---------- opcode : int Opcode for the instruction to create. arg : int, optional The argument for the instruction. Returns ------- intsr : Instruction An instance of the instruction named by ``opcode``. """ return type(cls)(opname[opcode], (cls,), {}, opcode=opcode)(arg)
[ "def", "from_opcode", "(", "cls", ",", "opcode", ",", "arg", "=", "_no_arg", ")", ":", "return", "type", "(", "cls", ")", "(", "opname", "[", "opcode", "]", ",", "(", "cls", ",", ")", ",", "{", "}", ",", "opcode", "=", "opcode", ")", "(", "arg", ")" ]
Create an instruction from an opcode and raw argument. Parameters ---------- opcode : int Opcode for the instruction to create. arg : int, optional The argument for the instruction. Returns ------- intsr : Instruction An instance of the instruction named by ``opcode``.
[ "Create", "an", "instruction", "from", "an", "opcode", "and", "raw", "argument", "." ]
python
train
29.117647
ktdreyer/txbugzilla
txbugzilla/__init__.py
https://github.com/ktdreyer/txbugzilla/blob/ccfc6667ce9d696b08b468b25c813cc2b68d30d6/txbugzilla/__init__.py#L172-L199
def _parse_errback(self, error): """ Parse an error from an XML-RPC call. raises: ``IOError`` when the Twisted XML-RPC connection times out. raises: ``BugzillaNotFoundException`` raises: ``BugzillaNotAuthorizedException`` raises: ``BugzillaException`` if we got a response from the XML-RPC server but it is not one of the ``xmlrpc.Fault``s above that we know about. raises: ``Exception`` if it is not one of the above. """ if isinstance(error.value, IOError): raise error.value if hasattr(xmlrpc, 'Fault'): # Python 2: fault = xmlrpc.Fault else: fault = xmlrpc.client.Fault if isinstance(error.value, fault): if error.value.faultCode == 101: raise BugzillaNotFoundException(error.value.faultString) if error.value.faultCode == 102: raise BugzillaNotAuthorizedException(error.value.faultString) if error.value.faultCode == 32000: raise BugzillaTokenExpiredException(error.value.faultString) raise BugzillaException(error.value) # We don't know what this is, so just raise it. raise error
[ "def", "_parse_errback", "(", "self", ",", "error", ")", ":", "if", "isinstance", "(", "error", ".", "value", ",", "IOError", ")", ":", "raise", "error", ".", "value", "if", "hasattr", "(", "xmlrpc", ",", "'Fault'", ")", ":", "# Python 2:", "fault", "=", "xmlrpc", ".", "Fault", "else", ":", "fault", "=", "xmlrpc", ".", "client", ".", "Fault", "if", "isinstance", "(", "error", ".", "value", ",", "fault", ")", ":", "if", "error", ".", "value", ".", "faultCode", "==", "101", ":", "raise", "BugzillaNotFoundException", "(", "error", ".", "value", ".", "faultString", ")", "if", "error", ".", "value", ".", "faultCode", "==", "102", ":", "raise", "BugzillaNotAuthorizedException", "(", "error", ".", "value", ".", "faultString", ")", "if", "error", ".", "value", ".", "faultCode", "==", "32000", ":", "raise", "BugzillaTokenExpiredException", "(", "error", ".", "value", ".", "faultString", ")", "raise", "BugzillaException", "(", "error", ".", "value", ")", "# We don't know what this is, so just raise it.", "raise", "error" ]
Parse an error from an XML-RPC call. raises: ``IOError`` when the Twisted XML-RPC connection times out. raises: ``BugzillaNotFoundException`` raises: ``BugzillaNotAuthorizedException`` raises: ``BugzillaException`` if we got a response from the XML-RPC server but it is not one of the ``xmlrpc.Fault``s above that we know about. raises: ``Exception`` if it is not one of the above.
[ "Parse", "an", "error", "from", "an", "XML", "-", "RPC", "call", "." ]
python
train
44.178571
sdispater/eloquent
eloquent/orm/relations/morph_one_or_many.py
https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/orm/relations/morph_one_or_many.py#L51-L59
def add_eager_constraints(self, models): """ Set the constraints for an eager load of the relation. :type models: list """ super(MorphOneOrMany, self).add_eager_constraints(models) self._query.where(self._morph_type, self._morph_class)
[ "def", "add_eager_constraints", "(", "self", ",", "models", ")", ":", "super", "(", "MorphOneOrMany", ",", "self", ")", ".", "add_eager_constraints", "(", "models", ")", "self", ".", "_query", ".", "where", "(", "self", ".", "_morph_type", ",", "self", ".", "_morph_class", ")" ]
Set the constraints for an eager load of the relation. :type models: list
[ "Set", "the", "constraints", "for", "an", "eager", "load", "of", "the", "relation", "." ]
python
train
30.777778
tanghaibao/jcvi
jcvi/apps/gbsubmit.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/gbsubmit.py#L535-L541
def parse_description(s): """ Returns a dictionary based on the FASTA header, assuming JCVI data """ s = "".join(s.split()[1:]).replace("/", ";") a = parse_qs(s) return a
[ "def", "parse_description", "(", "s", ")", ":", "s", "=", "\"\"", ".", "join", "(", "s", ".", "split", "(", ")", "[", "1", ":", "]", ")", ".", "replace", "(", "\"/\"", ",", "\";\"", ")", "a", "=", "parse_qs", "(", "s", ")", "return", "a" ]
Returns a dictionary based on the FASTA header, assuming JCVI data
[ "Returns", "a", "dictionary", "based", "on", "the", "FASTA", "header", "assuming", "JCVI", "data" ]
python
train
26.857143
rigetti/pyquil
pyquil/gates.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/gates.py#L213-L225
def PHASE(angle, qubit): """Produces the PHASE gate:: PHASE(phi) = [[1, 0], [0, exp(1j * phi)]] This is the same as the RZ gate. :param angle: The angle to rotate around the z-axis on the bloch sphere. :param qubit: The qubit apply the gate to. :returns: A Gate object. """ return Gate(name="PHASE", params=[angle], qubits=[unpack_qubit(qubit)])
[ "def", "PHASE", "(", "angle", ",", "qubit", ")", ":", "return", "Gate", "(", "name", "=", "\"PHASE\"", ",", "params", "=", "[", "angle", "]", ",", "qubits", "=", "[", "unpack_qubit", "(", "qubit", ")", "]", ")" ]
Produces the PHASE gate:: PHASE(phi) = [[1, 0], [0, exp(1j * phi)]] This is the same as the RZ gate. :param angle: The angle to rotate around the z-axis on the bloch sphere. :param qubit: The qubit apply the gate to. :returns: A Gate object.
[ "Produces", "the", "PHASE", "gate", "::" ]
python
train
30.307692
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/work_item_tracking/work_item_tracking_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/work_item_tracking/work_item_tracking_client.py#L806-L830
def update_query(self, query_update, project, query, undelete_descendants=None): """UpdateQuery. [Preview API] Update a query or a folder. This allows you to update, rename and move queries and folders. :param :class:`<QueryHierarchyItem> <azure.devops.v5_1.work_item_tracking.models.QueryHierarchyItem>` query_update: The query to update. :param str project: Project ID or project name :param str query: The ID or path for the query to update. :param bool undelete_descendants: Undelete the children of this folder. It is important to note that this will not bring back the permission changes that were previously applied to the descendants. :rtype: :class:`<QueryHierarchyItem> <azure.devops.v5_1.work-item-tracking.models.QueryHierarchyItem>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if query is not None: route_values['query'] = self._serialize.url('query', query, 'str') query_parameters = {} if undelete_descendants is not None: query_parameters['$undeleteDescendants'] = self._serialize.query('undelete_descendants', undelete_descendants, 'bool') content = self._serialize.body(query_update, 'QueryHierarchyItem') response = self._send(http_method='PATCH', location_id='a67d190c-c41f-424b-814d-0e906f659301', version='5.1-preview.2', route_values=route_values, query_parameters=query_parameters, content=content) return self._deserialize('QueryHierarchyItem', response)
[ "def", "update_query", "(", "self", ",", "query_update", ",", "project", ",", "query", ",", "undelete_descendants", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'str'", ")", "if", "query", "is", "not", "None", ":", "route_values", "[", "'query'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'query'", ",", "query", ",", "'str'", ")", "query_parameters", "=", "{", "}", "if", "undelete_descendants", "is", "not", "None", ":", "query_parameters", "[", "'$undeleteDescendants'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'undelete_descendants'", ",", "undelete_descendants", ",", "'bool'", ")", "content", "=", "self", ".", "_serialize", ".", "body", "(", "query_update", ",", "'QueryHierarchyItem'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'PATCH'", ",", "location_id", "=", "'a67d190c-c41f-424b-814d-0e906f659301'", ",", "version", "=", "'5.1-preview.2'", ",", "route_values", "=", "route_values", ",", "query_parameters", "=", "query_parameters", ",", "content", "=", "content", ")", "return", "self", ".", "_deserialize", "(", "'QueryHierarchyItem'", ",", "response", ")" ]
UpdateQuery. [Preview API] Update a query or a folder. This allows you to update, rename and move queries and folders. :param :class:`<QueryHierarchyItem> <azure.devops.v5_1.work_item_tracking.models.QueryHierarchyItem>` query_update: The query to update. :param str project: Project ID or project name :param str query: The ID or path for the query to update. :param bool undelete_descendants: Undelete the children of this folder. It is important to note that this will not bring back the permission changes that were previously applied to the descendants. :rtype: :class:`<QueryHierarchyItem> <azure.devops.v5_1.work-item-tracking.models.QueryHierarchyItem>`
[ "UpdateQuery", ".", "[", "Preview", "API", "]", "Update", "a", "query", "or", "a", "folder", ".", "This", "allows", "you", "to", "update", "rename", "and", "move", "queries", "and", "folders", ".", ":", "param", ":", "class", ":", "<QueryHierarchyItem", ">", "<azure", ".", "devops", ".", "v5_1", ".", "work_item_tracking", ".", "models", ".", "QueryHierarchyItem", ">", "query_update", ":", "The", "query", "to", "update", ".", ":", "param", "str", "project", ":", "Project", "ID", "or", "project", "name", ":", "param", "str", "query", ":", "The", "ID", "or", "path", "for", "the", "query", "to", "update", ".", ":", "param", "bool", "undelete_descendants", ":", "Undelete", "the", "children", "of", "this", "folder", ".", "It", "is", "important", "to", "note", "that", "this", "will", "not", "bring", "back", "the", "permission", "changes", "that", "were", "previously", "applied", "to", "the", "descendants", ".", ":", "rtype", ":", ":", "class", ":", "<QueryHierarchyItem", ">", "<azure", ".", "devops", ".", "v5_1", ".", "work", "-", "item", "-", "tracking", ".", "models", ".", "QueryHierarchyItem", ">" ]
python
train
69.76
poppy-project/pypot
pypot/dynamixel/io/abstract_io.py
https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/dynamixel/io/abstract_io.py#L282-L288
def set_status_return_level(self, srl_for_id, **kwargs): """ Sets status return level to the specified motors. """ convert = kwargs['convert'] if 'convert' in kwargs else self._convert if convert: srl_for_id = dict(zip(srl_for_id.keys(), [('never', 'read', 'always').index(s) for s in srl_for_id.values()])) self._set_status_return_level(srl_for_id, convert=False)
[ "def", "set_status_return_level", "(", "self", ",", "srl_for_id", ",", "*", "*", "kwargs", ")", ":", "convert", "=", "kwargs", "[", "'convert'", "]", "if", "'convert'", "in", "kwargs", "else", "self", ".", "_convert", "if", "convert", ":", "srl_for_id", "=", "dict", "(", "zip", "(", "srl_for_id", ".", "keys", "(", ")", ",", "[", "(", "'never'", ",", "'read'", ",", "'always'", ")", ".", "index", "(", "s", ")", "for", "s", "in", "srl_for_id", ".", "values", "(", ")", "]", ")", ")", "self", ".", "_set_status_return_level", "(", "srl_for_id", ",", "convert", "=", "False", ")" ]
Sets status return level to the specified motors.
[ "Sets", "status", "return", "level", "to", "the", "specified", "motors", "." ]
python
train
62.285714
Scoppio/RagnarokEngine3
Tutorials/Platforming Block - PyGame Release/Game/Code/TileHeroCharacter.py
https://github.com/Scoppio/RagnarokEngine3/blob/4395d419ccd64fe9327c41f200b72ee0176ad896/Tutorials/Platforming Block - PyGame Release/Game/Code/TileHeroCharacter.py#L40-L46
def set_default_pos(self, defaultPos): """Set the default starting location of our character.""" self.coords = defaultPos self.velocity = r.Vector2() self.desired_position = defaultPos r.Ragnarok.get_world().Camera.pan = self.coords r.Ragnarok.get_world().Camera.desired_pan = self.coords
[ "def", "set_default_pos", "(", "self", ",", "defaultPos", ")", ":", "self", ".", "coords", "=", "defaultPos", "self", ".", "velocity", "=", "r", ".", "Vector2", "(", ")", "self", ".", "desired_position", "=", "defaultPos", "r", ".", "Ragnarok", ".", "get_world", "(", ")", ".", "Camera", ".", "pan", "=", "self", ".", "coords", "r", ".", "Ragnarok", ".", "get_world", "(", ")", ".", "Camera", ".", "desired_pan", "=", "self", ".", "coords" ]
Set the default starting location of our character.
[ "Set", "the", "default", "starting", "location", "of", "our", "character", "." ]
python
train
47.142857
scanny/python-pptx
pptx/oxml/text.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/oxml/text.py#L356-L362
def content_children(self): """ A sequence containing the text-container child elements of this ``<a:p>`` element, i.e. (a:r|a:br|a:fld). """ text_types = {CT_RegularTextRun, CT_TextLineBreak, CT_TextField} return tuple(elm for elm in self if type(elm) in text_types)
[ "def", "content_children", "(", "self", ")", ":", "text_types", "=", "{", "CT_RegularTextRun", ",", "CT_TextLineBreak", ",", "CT_TextField", "}", "return", "tuple", "(", "elm", "for", "elm", "in", "self", "if", "type", "(", "elm", ")", "in", "text_types", ")" ]
A sequence containing the text-container child elements of this ``<a:p>`` element, i.e. (a:r|a:br|a:fld).
[ "A", "sequence", "containing", "the", "text", "-", "container", "child", "elements", "of", "this", "<a", ":", "p", ">", "element", "i", ".", "e", ".", "(", "a", ":", "r|a", ":", "br|a", ":", "fld", ")", "." ]
python
train
44.142857
scottjbarr/bitfinex
bitfinex/client.py
https://github.com/scottjbarr/bitfinex/blob/03f7c71615fe38c2e28be0ebb761d3106ef0a51a/bitfinex/client.py#L469-L494
def order_book(self, symbol, parameters=None): """ curl "https://api.bitfinex.com/v1/book/btcusd" {"bids":[{"price":"561.1101","amount":"0.985","timestamp":"1395557729.0"}],"asks":[{"price":"562.9999","amount":"0.985","timestamp":"1395557711.0"}]} The 'bids' and 'asks' arrays will have multiple bid and ask dicts. Optional parameters limit_bids (int): Optional. Limit the number of bids returned. May be 0 in which case the array of bids is empty. Default is 50. limit_asks (int): Optional. Limit the number of asks returned. May be 0 in which case the array of asks is empty. Default is 50. eg. curl "https://api.bitfinex.com/v1/book/btcusd?limit_bids=1&limit_asks=0" {"bids":[{"price":"561.1101","amount":"0.985","timestamp":"1395557729.0"}],"asks":[]} """ data = self._get(self.url_for(PATH_ORDERBOOK, path_arg=symbol, parameters=parameters)) for type_ in data.keys(): for list_ in data[type_]: for key, value in list_.items(): list_[key] = float(value) return data
[ "def", "order_book", "(", "self", ",", "symbol", ",", "parameters", "=", "None", ")", ":", "data", "=", "self", ".", "_get", "(", "self", ".", "url_for", "(", "PATH_ORDERBOOK", ",", "path_arg", "=", "symbol", ",", "parameters", "=", "parameters", ")", ")", "for", "type_", "in", "data", ".", "keys", "(", ")", ":", "for", "list_", "in", "data", "[", "type_", "]", ":", "for", "key", ",", "value", "in", "list_", ".", "items", "(", ")", ":", "list_", "[", "key", "]", "=", "float", "(", "value", ")", "return", "data" ]
curl "https://api.bitfinex.com/v1/book/btcusd" {"bids":[{"price":"561.1101","amount":"0.985","timestamp":"1395557729.0"}],"asks":[{"price":"562.9999","amount":"0.985","timestamp":"1395557711.0"}]} The 'bids' and 'asks' arrays will have multiple bid and ask dicts. Optional parameters limit_bids (int): Optional. Limit the number of bids returned. May be 0 in which case the array of bids is empty. Default is 50. limit_asks (int): Optional. Limit the number of asks returned. May be 0 in which case the array of asks is empty. Default is 50. eg. curl "https://api.bitfinex.com/v1/book/btcusd?limit_bids=1&limit_asks=0" {"bids":[{"price":"561.1101","amount":"0.985","timestamp":"1395557729.0"}],"asks":[]}
[ "curl", "https", ":", "//", "api", ".", "bitfinex", ".", "com", "/", "v1", "/", "book", "/", "btcusd" ]
python
train
42.769231
lsst-sqre/lsst-projectmeta-kit
lsstprojectmeta/tex/lsstdoc.py
https://github.com/lsst-sqre/lsst-projectmeta-kit/blob/ac8d4ff65bb93d8fdeb1b46ae6eb5d7414f1ae14/lsstprojectmeta/tex/lsstdoc.py#L389-L423
def format_authors(self, format='html5', deparagraph=True, mathjax=False, smart=True, extra_args=None): """Get the document authors in the specified markup format. Parameters ---------- format : `str`, optional Output format (such as ``'html5'`` or ``'plain'``). deparagraph : `bool`, optional Remove the paragraph tags from single paragraph content. mathjax : `bool`, optional Allow pandoc to use MathJax math markup. smart : `True`, optional Allow pandoc to create "smart" unicode punctuation. extra_args : `list`, optional Additional command line flags to pass to Pandoc. See `lsstprojectmeta.pandoc.convert.convert_text`. Returns ------- output_text : `list` of `str` Sequence of author names in the specified output markup format. """ formatted_authors = [] for latex_author in self.authors: formatted_author = convert_lsstdoc_tex( latex_author, format, deparagraph=deparagraph, mathjax=mathjax, smart=smart, extra_args=extra_args) # removes Pandoc's terminal newlines formatted_author = formatted_author.strip() formatted_authors.append(formatted_author) return formatted_authors
[ "def", "format_authors", "(", "self", ",", "format", "=", "'html5'", ",", "deparagraph", "=", "True", ",", "mathjax", "=", "False", ",", "smart", "=", "True", ",", "extra_args", "=", "None", ")", ":", "formatted_authors", "=", "[", "]", "for", "latex_author", "in", "self", ".", "authors", ":", "formatted_author", "=", "convert_lsstdoc_tex", "(", "latex_author", ",", "format", ",", "deparagraph", "=", "deparagraph", ",", "mathjax", "=", "mathjax", ",", "smart", "=", "smart", ",", "extra_args", "=", "extra_args", ")", "# removes Pandoc's terminal newlines", "formatted_author", "=", "formatted_author", ".", "strip", "(", ")", "formatted_authors", ".", "append", "(", "formatted_author", ")", "return", "formatted_authors" ]
Get the document authors in the specified markup format. Parameters ---------- format : `str`, optional Output format (such as ``'html5'`` or ``'plain'``). deparagraph : `bool`, optional Remove the paragraph tags from single paragraph content. mathjax : `bool`, optional Allow pandoc to use MathJax math markup. smart : `True`, optional Allow pandoc to create "smart" unicode punctuation. extra_args : `list`, optional Additional command line flags to pass to Pandoc. See `lsstprojectmeta.pandoc.convert.convert_text`. Returns ------- output_text : `list` of `str` Sequence of author names in the specified output markup format.
[ "Get", "the", "document", "authors", "in", "the", "specified", "markup", "format", "." ]
python
valid
40.228571
saltstack/salt
salt/modules/vsphere.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/vsphere.py#L9619-L9657
def delete_vm(name, datacenter, placement=None, power_off=False, service_instance=None): ''' Deletes a virtual machine defined by name and placement name Name of the virtual machine datacenter Datacenter of the virtual machine placement Placement information of the virtual machine service_instance vCenter service instance for connection and configuration .. code-block:: bash salt '*' vsphere.delete_vm name=my_vm datacenter=my_datacenter ''' results = {} schema = ESXVirtualMachineDeleteSchema.serialize() try: jsonschema.validate({'name': name, 'datacenter': datacenter, 'placement': placement}, schema) except jsonschema.exceptions.ValidationError as exc: raise InvalidConfigError(exc) (results, vm_ref) = _remove_vm(name, datacenter, service_instance=service_instance, placement=placement, power_off=power_off) salt.utils.vmware.delete_vm(vm_ref) results['deleted_vm'] = True return results
[ "def", "delete_vm", "(", "name", ",", "datacenter", ",", "placement", "=", "None", ",", "power_off", "=", "False", ",", "service_instance", "=", "None", ")", ":", "results", "=", "{", "}", "schema", "=", "ESXVirtualMachineDeleteSchema", ".", "serialize", "(", ")", "try", ":", "jsonschema", ".", "validate", "(", "{", "'name'", ":", "name", ",", "'datacenter'", ":", "datacenter", ",", "'placement'", ":", "placement", "}", ",", "schema", ")", "except", "jsonschema", ".", "exceptions", ".", "ValidationError", "as", "exc", ":", "raise", "InvalidConfigError", "(", "exc", ")", "(", "results", ",", "vm_ref", ")", "=", "_remove_vm", "(", "name", ",", "datacenter", ",", "service_instance", "=", "service_instance", ",", "placement", "=", "placement", ",", "power_off", "=", "power_off", ")", "salt", ".", "utils", ".", "vmware", ".", "delete_vm", "(", "vm_ref", ")", "results", "[", "'deleted_vm'", "]", "=", "True", "return", "results" ]
Deletes a virtual machine defined by name and placement name Name of the virtual machine datacenter Datacenter of the virtual machine placement Placement information of the virtual machine service_instance vCenter service instance for connection and configuration .. code-block:: bash salt '*' vsphere.delete_vm name=my_vm datacenter=my_datacenter
[ "Deletes", "a", "virtual", "machine", "defined", "by", "name", "and", "placement" ]
python
train
31.410256
BreakingBytes/simkit
simkit/core/layers.py
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/layers.py#L203-L219
def edit(self, data_src, value): """ Edit data layer. :param data_src: Name of :class:`DataSource` to edit. :type data_src: str :param value: Values to edit. :type value: dict """ # check if opening file if 'filename' in value: items = [k for k, v in self.reg.data_source.iteritems() if v == data_src] self.reg.unregister(items) # remove items from Registry # open file and register new data self.open(data_src, value['filename'], value.get('path')) self.layer[data_src].update(value)
[ "def", "edit", "(", "self", ",", "data_src", ",", "value", ")", ":", "# check if opening file", "if", "'filename'", "in", "value", ":", "items", "=", "[", "k", "for", "k", ",", "v", "in", "self", ".", "reg", ".", "data_source", ".", "iteritems", "(", ")", "if", "v", "==", "data_src", "]", "self", ".", "reg", ".", "unregister", "(", "items", ")", "# remove items from Registry", "# open file and register new data", "self", ".", "open", "(", "data_src", ",", "value", "[", "'filename'", "]", ",", "value", ".", "get", "(", "'path'", ")", ")", "self", ".", "layer", "[", "data_src", "]", ".", "update", "(", "value", ")" ]
Edit data layer. :param data_src: Name of :class:`DataSource` to edit. :type data_src: str :param value: Values to edit. :type value: dict
[ "Edit", "data", "layer", "." ]
python
train
36.647059
Aluriak/bubble-tools
bubbletools/validator.py
https://github.com/Aluriak/bubble-tools/blob/f014f4a1986abefc80dc418feaa05ed258c2221a/bubbletools/validator.py#L58-L99
def inclusions_validation(tree:BubbleTree) -> iter: """Yield message about inclusions inconsistancies""" # search for powernode overlapping for one, two in it.combinations(tree.inclusions, 2): assert len(one) == len(one.strip()) assert len(two) == len(two.strip()) one_inc = set(included(one, tree.inclusions)) two_inc = set(included(two, tree.inclusions)) common_inc = one_inc & two_inc if len(common_inc) == one_inc: if not two in one_inc: yield ("ERROR inconsistency in inclusions: {} is both" " included and not included in {}.".format(two, one)) if len(common_inc) == two_inc: if not one in two_inc: yield ("ERROR inconsistency in inclusions: {} is both" " included and not included in {}.".format(one, two)) if len(common_inc) > 0: # one and two are not disjoint if len(common_inc) == len(one_inc) or len(common_inc) == len(two_inc): # one is included in the other pass else: # problem: some nodes are shared, but not all yield ("ERROR overlapping powernodes:" " {} nodes are shared by {} and {}," " which are not in inclusion." " Shared nodes are {}".format( len(common_inc), one, two, common_inc)) for pwn in tree.powernodes(): # search for empty powernodes if len(tree.inclusions[pwn]) == 0: yield ("WARNING empty powernode: {} is defined," " but contains nothing".format(pwn)) # search for singleton powernodes if len(tree.inclusions[pwn]) == 1: yield ("WARNING singleton powernode: {} is defined," " but contains only {}".format(pwn, tree.inclusions[pwn])) # search for cycles nodes_in_cycles = utils.have_cycle(tree.inclusions) if nodes_in_cycles: yield ("ERROR inclusion cycle: the following {}" " nodes are involved: {}".format( len(nodes_in_cycles), set(nodes_in_cycles)))
[ "def", "inclusions_validation", "(", "tree", ":", "BubbleTree", ")", "->", "iter", ":", "# search for powernode overlapping", "for", "one", ",", "two", "in", "it", ".", "combinations", "(", "tree", ".", "inclusions", ",", "2", ")", ":", "assert", "len", "(", "one", ")", "==", "len", "(", "one", ".", "strip", "(", ")", ")", "assert", "len", "(", "two", ")", "==", "len", "(", "two", ".", "strip", "(", ")", ")", "one_inc", "=", "set", "(", "included", "(", "one", ",", "tree", ".", "inclusions", ")", ")", "two_inc", "=", "set", "(", "included", "(", "two", ",", "tree", ".", "inclusions", ")", ")", "common_inc", "=", "one_inc", "&", "two_inc", "if", "len", "(", "common_inc", ")", "==", "one_inc", ":", "if", "not", "two", "in", "one_inc", ":", "yield", "(", "\"ERROR inconsistency in inclusions: {} is both\"", "\" included and not included in {}.\"", ".", "format", "(", "two", ",", "one", ")", ")", "if", "len", "(", "common_inc", ")", "==", "two_inc", ":", "if", "not", "one", "in", "two_inc", ":", "yield", "(", "\"ERROR inconsistency in inclusions: {} is both\"", "\" included and not included in {}.\"", ".", "format", "(", "one", ",", "two", ")", ")", "if", "len", "(", "common_inc", ")", ">", "0", ":", "# one and two are not disjoint", "if", "len", "(", "common_inc", ")", "==", "len", "(", "one_inc", ")", "or", "len", "(", "common_inc", ")", "==", "len", "(", "two_inc", ")", ":", "# one is included in the other", "pass", "else", ":", "# problem: some nodes are shared, but not all", "yield", "(", "\"ERROR overlapping powernodes:\"", "\" {} nodes are shared by {} and {},\"", "\" which are not in inclusion.\"", "\" Shared nodes are {}\"", ".", "format", "(", "len", "(", "common_inc", ")", ",", "one", ",", "two", ",", "common_inc", ")", ")", "for", "pwn", "in", "tree", ".", "powernodes", "(", ")", ":", "# search for empty powernodes", "if", "len", "(", "tree", ".", "inclusions", "[", "pwn", "]", ")", "==", "0", ":", "yield", "(", "\"WARNING empty powernode: {} is defined,\"", "\" but contains nothing\"", ".", "format", "(", "pwn", ")", ")", "# search for singleton powernodes", "if", "len", "(", "tree", ".", "inclusions", "[", "pwn", "]", ")", "==", "1", ":", "yield", "(", "\"WARNING singleton powernode: {} is defined,\"", "\" but contains only {}\"", ".", "format", "(", "pwn", ",", "tree", ".", "inclusions", "[", "pwn", "]", ")", ")", "# search for cycles", "nodes_in_cycles", "=", "utils", ".", "have_cycle", "(", "tree", ".", "inclusions", ")", "if", "nodes_in_cycles", ":", "yield", "(", "\"ERROR inclusion cycle: the following {}\"", "\" nodes are involved: {}\"", ".", "format", "(", "len", "(", "nodes_in_cycles", ")", ",", "set", "(", "nodes_in_cycles", ")", ")", ")" ]
Yield message about inclusions inconsistancies
[ "Yield", "message", "about", "inclusions", "inconsistancies" ]
python
train
51.095238
samghelms/mathviz
mathviz_hopper/src/bottle.py
https://github.com/samghelms/mathviz/blob/30fe89537379faea4de8c8b568ac6e52e4d15353/mathviz_hopper/src/bottle.py#L950-L952
def patch(self, path=None, method='PATCH', **options): """ Equals :meth:`route` with a ``PATCH`` method parameter. """ return self.route(path, method, **options)
[ "def", "patch", "(", "self", ",", "path", "=", "None", ",", "method", "=", "'PATCH'", ",", "*", "*", "options", ")", ":", "return", "self", ".", "route", "(", "path", ",", "method", ",", "*", "*", "options", ")" ]
Equals :meth:`route` with a ``PATCH`` method parameter.
[ "Equals", ":", "meth", ":", "route", "with", "a", "PATCH", "method", "parameter", "." ]
python
train
58.333333
tensorflow/probability
tensorflow_probability/python/math/numeric.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/numeric.py#L59-L149
def soft_threshold(x, threshold, name=None): """Soft Thresholding operator. This operator is defined by the equations ```none { x[i] - gamma, x[i] > gamma SoftThreshold(x, gamma)[i] = { 0, x[i] == gamma { x[i] + gamma, x[i] < -gamma ``` In the context of proximal gradient methods, we have ```none SoftThreshold(x, gamma) = prox_{gamma L1}(x) ``` where `prox` is the proximity operator. Thus the soft thresholding operator is used in proximal gradient descent for optimizing a smooth function with (non-smooth) L1 regularization, as outlined below. The proximity operator is defined as: ```none prox_r(x) = argmin{ r(z) + 0.5 ||x - z||_2**2 : z }, ``` where `r` is a (weakly) convex function, not necessarily differentiable. Because the L2 norm is strictly convex, the above argmin is unique. One important application of the proximity operator is as follows. Let `L` be a convex and differentiable function with Lipschitz-continuous gradient. Let `R` be a convex lower semicontinuous function which is possibly nondifferentiable. Let `gamma` be an arbitrary positive real. Then ```none x_star = argmin{ L(x) + R(x) : x } ``` if and only if the fixed-point equation is satisfied: ```none x_star = prox_{gamma R}(x_star - gamma grad L(x_star)) ``` Proximal gradient descent thus typically consists of choosing an initial value `x^{(0)}` and repeatedly applying the update ```none x^{(k+1)} = prox_{gamma^{(k)} R}(x^{(k)} - gamma^{(k)} grad L(x^{(k)})) ``` where `gamma` is allowed to vary from iteration to iteration. Specializing to the case where `R(x) = ||x||_1`, we minimize `L(x) + ||x||_1` by repeatedly applying the update ``` x^{(k+1)} = SoftThreshold(x - gamma grad L(x^{(k)}), gamma) ``` (This idea can also be extended to second-order approximations, although the multivariate case does not have a known closed form like above.) Args: x: `float` `Tensor` representing the input to the SoftThreshold function. threshold: nonnegative scalar, `float` `Tensor` representing the radius of the interval on which each coordinate of SoftThreshold takes the value zero. Denoted `gamma` above. name: Python string indicating the name of the TensorFlow operation. Default value: `'soft_threshold'`. Returns: softthreshold: `float` `Tensor` with the same shape and dtype as `x`, representing the value of the SoftThreshold function. #### References [1]: Yu, Yao-Liang. The Proximity Operator. https://www.cs.cmu.edu/~suvrit/teach/yaoliang_proximity.pdf [2]: Wikipedia Contributors. Proximal gradient methods for learning. _Wikipedia, The Free Encyclopedia_, 2018. https://en.wikipedia.org/wiki/Proximal_gradient_methods_for_learning """ # https://math.stackexchange.com/questions/471339/derivation-of-soft-thresholding-operator with tf.compat.v1.name_scope(name, 'soft_threshold', [x, threshold]): x = tf.convert_to_tensor(value=x, name='x') threshold = tf.convert_to_tensor( value=threshold, dtype=x.dtype, name='threshold') return tf.sign(x) * tf.maximum(tf.abs(x) - threshold, 0.)
[ "def", "soft_threshold", "(", "x", ",", "threshold", ",", "name", "=", "None", ")", ":", "# https://math.stackexchange.com/questions/471339/derivation-of-soft-thresholding-operator", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "'soft_threshold'", ",", "[", "x", ",", "threshold", "]", ")", ":", "x", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "x", ",", "name", "=", "'x'", ")", "threshold", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "threshold", ",", "dtype", "=", "x", ".", "dtype", ",", "name", "=", "'threshold'", ")", "return", "tf", ".", "sign", "(", "x", ")", "*", "tf", ".", "maximum", "(", "tf", ".", "abs", "(", "x", ")", "-", "threshold", ",", "0.", ")" ]
Soft Thresholding operator. This operator is defined by the equations ```none { x[i] - gamma, x[i] > gamma SoftThreshold(x, gamma)[i] = { 0, x[i] == gamma { x[i] + gamma, x[i] < -gamma ``` In the context of proximal gradient methods, we have ```none SoftThreshold(x, gamma) = prox_{gamma L1}(x) ``` where `prox` is the proximity operator. Thus the soft thresholding operator is used in proximal gradient descent for optimizing a smooth function with (non-smooth) L1 regularization, as outlined below. The proximity operator is defined as: ```none prox_r(x) = argmin{ r(z) + 0.5 ||x - z||_2**2 : z }, ``` where `r` is a (weakly) convex function, not necessarily differentiable. Because the L2 norm is strictly convex, the above argmin is unique. One important application of the proximity operator is as follows. Let `L` be a convex and differentiable function with Lipschitz-continuous gradient. Let `R` be a convex lower semicontinuous function which is possibly nondifferentiable. Let `gamma` be an arbitrary positive real. Then ```none x_star = argmin{ L(x) + R(x) : x } ``` if and only if the fixed-point equation is satisfied: ```none x_star = prox_{gamma R}(x_star - gamma grad L(x_star)) ``` Proximal gradient descent thus typically consists of choosing an initial value `x^{(0)}` and repeatedly applying the update ```none x^{(k+1)} = prox_{gamma^{(k)} R}(x^{(k)} - gamma^{(k)} grad L(x^{(k)})) ``` where `gamma` is allowed to vary from iteration to iteration. Specializing to the case where `R(x) = ||x||_1`, we minimize `L(x) + ||x||_1` by repeatedly applying the update ``` x^{(k+1)} = SoftThreshold(x - gamma grad L(x^{(k)}), gamma) ``` (This idea can also be extended to second-order approximations, although the multivariate case does not have a known closed form like above.) Args: x: `float` `Tensor` representing the input to the SoftThreshold function. threshold: nonnegative scalar, `float` `Tensor` representing the radius of the interval on which each coordinate of SoftThreshold takes the value zero. Denoted `gamma` above. name: Python string indicating the name of the TensorFlow operation. Default value: `'soft_threshold'`. Returns: softthreshold: `float` `Tensor` with the same shape and dtype as `x`, representing the value of the SoftThreshold function. #### References [1]: Yu, Yao-Liang. The Proximity Operator. https://www.cs.cmu.edu/~suvrit/teach/yaoliang_proximity.pdf [2]: Wikipedia Contributors. Proximal gradient methods for learning. _Wikipedia, The Free Encyclopedia_, 2018. https://en.wikipedia.org/wiki/Proximal_gradient_methods_for_learning
[ "Soft", "Thresholding", "operator", "." ]
python
test
35
tensorflow/tensorboard
tensorboard/plugins/pr_curve/pr_curve_demo.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/pr_curve/pr_curve_demo.py#L51-L195
def start_runs( logdir, steps, run_name, thresholds, mask_every_other_prediction=False): """Generate a PR curve with precision and recall evenly weighted. Arguments: logdir: The directory into which to store all the runs' data. steps: The number of steps to run for. run_name: The name of the run. thresholds: The number of thresholds to use for PR curves. mask_every_other_prediction: Whether to mask every other prediction by alternating weights between 0 and 1. """ tf.compat.v1.reset_default_graph() tf.compat.v1.set_random_seed(42) # Create a normal distribution layer used to generate true color labels. distribution = tf.compat.v1.distributions.Normal(loc=0., scale=142.) # Sample the distribution to generate colors. Lets generate different numbers # of each color. The first dimension is the count of examples. # The calls to sample() are given fixed random seed values that are "magic" # in that they correspond to the default seeds for those ops when the PR # curve test (which depends on this code) was written. We've pinned these # instead of continuing to use the defaults since the defaults are based on # node IDs from the sequence of nodes added to the graph, which can silently # change when this code or any TF op implementations it uses are modified. # TODO(nickfelt): redo the PR curve test to avoid reliance on random seeds. # Generate reds. number_of_reds = 100 true_reds = tf.clip_by_value( tf.concat([ 255 - tf.abs(distribution.sample([number_of_reds, 1], seed=11)), tf.abs(distribution.sample([number_of_reds, 2], seed=34)) ], axis=1), 0, 255) # Generate greens. number_of_greens = 200 true_greens = tf.clip_by_value( tf.concat([ tf.abs(distribution.sample([number_of_greens, 1], seed=61)), 255 - tf.abs(distribution.sample([number_of_greens, 1], seed=82)), tf.abs(distribution.sample([number_of_greens, 1], seed=105)) ], axis=1), 0, 255) # Generate blues. number_of_blues = 150 true_blues = tf.clip_by_value( tf.concat([ tf.abs(distribution.sample([number_of_blues, 2], seed=132)), 255 - tf.abs(distribution.sample([number_of_blues, 1], seed=153)) ], axis=1), 0, 255) # Assign each color a vector of 3 booleans based on its true label. labels = tf.concat([ tf.tile(tf.constant([[True, False, False]]), (number_of_reds, 1)), tf.tile(tf.constant([[False, True, False]]), (number_of_greens, 1)), tf.tile(tf.constant([[False, False, True]]), (number_of_blues, 1)), ], axis=0) # We introduce 3 normal distributions. They are used to predict whether a # color falls under a certain class (based on distances from corners of the # color triangle). The distributions vary per color. We have the distributions # narrow over time. initial_standard_deviations = [v + FLAGS.steps for v in (158, 200, 242)] iteration = tf.compat.v1.placeholder(tf.int32, shape=[]) red_predictor = tf.compat.v1.distributions.Normal( loc=0., scale=tf.cast( initial_standard_deviations[0] - iteration, dtype=tf.float32)) green_predictor = tf.compat.v1.distributions.Normal( loc=0., scale=tf.cast( initial_standard_deviations[1] - iteration, dtype=tf.float32)) blue_predictor = tf.compat.v1.distributions.Normal( loc=0., scale=tf.cast( initial_standard_deviations[2] - iteration, dtype=tf.float32)) # Make predictions (assign 3 probabilities to each color based on each color's # distance to each of the 3 corners). We seek double the area in the right # tail of the normal distribution. examples = tf.concat([true_reds, true_greens, true_blues], axis=0) probabilities_colors_are_red = (1 - red_predictor.cdf( tf.norm(tensor=examples - tf.constant([255., 0, 0]), axis=1))) * 2 probabilities_colors_are_green = (1 - green_predictor.cdf( tf.norm(tensor=examples - tf.constant([0, 255., 0]), axis=1))) * 2 probabilities_colors_are_blue = (1 - blue_predictor.cdf( tf.norm(tensor=examples - tf.constant([0, 0, 255.]), axis=1))) * 2 predictions = ( probabilities_colors_are_red, probabilities_colors_are_green, probabilities_colors_are_blue ) # This is the crucial piece. We write data required for generating PR curves. # We create 1 summary per class because we create 1 PR curve per class. for i, color in enumerate(('red', 'green', 'blue')): description = ('The probabilities used to create this PR curve are ' 'generated from a normal distribution. Its standard ' 'deviation is initially %0.0f and decreases over time.' % initial_standard_deviations[i]) weights = None if mask_every_other_prediction: # Assign a weight of 0 to every even-indexed prediction. Odd-indexed # predictions are assigned a default weight of 1. consecutive_indices = tf.reshape( tf.range(tf.size(input=predictions[i])), tf.shape(input=predictions[i])) weights = tf.cast(consecutive_indices % 2, dtype=tf.float32) summary.op( name=color, labels=labels[:, i], predictions=predictions[i], num_thresholds=thresholds, weights=weights, display_name='classifying %s' % color, description=description) merged_summary_op = tf.compat.v1.summary.merge_all() events_directory = os.path.join(logdir, run_name) sess = tf.compat.v1.Session() writer = tf.compat.v1.summary.FileWriter(events_directory, sess.graph) for step in xrange(steps): feed_dict = { iteration: step, } merged_summary = sess.run(merged_summary_op, feed_dict=feed_dict) writer.add_summary(merged_summary, step) writer.close()
[ "def", "start_runs", "(", "logdir", ",", "steps", ",", "run_name", ",", "thresholds", ",", "mask_every_other_prediction", "=", "False", ")", ":", "tf", ".", "compat", ".", "v1", ".", "reset_default_graph", "(", ")", "tf", ".", "compat", ".", "v1", ".", "set_random_seed", "(", "42", ")", "# Create a normal distribution layer used to generate true color labels.", "distribution", "=", "tf", ".", "compat", ".", "v1", ".", "distributions", ".", "Normal", "(", "loc", "=", "0.", ",", "scale", "=", "142.", ")", "# Sample the distribution to generate colors. Lets generate different numbers", "# of each color. The first dimension is the count of examples.", "# The calls to sample() are given fixed random seed values that are \"magic\"", "# in that they correspond to the default seeds for those ops when the PR", "# curve test (which depends on this code) was written. We've pinned these", "# instead of continuing to use the defaults since the defaults are based on", "# node IDs from the sequence of nodes added to the graph, which can silently", "# change when this code or any TF op implementations it uses are modified.", "# TODO(nickfelt): redo the PR curve test to avoid reliance on random seeds.", "# Generate reds.", "number_of_reds", "=", "100", "true_reds", "=", "tf", ".", "clip_by_value", "(", "tf", ".", "concat", "(", "[", "255", "-", "tf", ".", "abs", "(", "distribution", ".", "sample", "(", "[", "number_of_reds", ",", "1", "]", ",", "seed", "=", "11", ")", ")", ",", "tf", ".", "abs", "(", "distribution", ".", "sample", "(", "[", "number_of_reds", ",", "2", "]", ",", "seed", "=", "34", ")", ")", "]", ",", "axis", "=", "1", ")", ",", "0", ",", "255", ")", "# Generate greens.", "number_of_greens", "=", "200", "true_greens", "=", "tf", ".", "clip_by_value", "(", "tf", ".", "concat", "(", "[", "tf", ".", "abs", "(", "distribution", ".", "sample", "(", "[", "number_of_greens", ",", "1", "]", ",", "seed", "=", "61", ")", ")", ",", "255", "-", "tf", ".", "abs", "(", "distribution", ".", "sample", "(", "[", "number_of_greens", ",", "1", "]", ",", "seed", "=", "82", ")", ")", ",", "tf", ".", "abs", "(", "distribution", ".", "sample", "(", "[", "number_of_greens", ",", "1", "]", ",", "seed", "=", "105", ")", ")", "]", ",", "axis", "=", "1", ")", ",", "0", ",", "255", ")", "# Generate blues.", "number_of_blues", "=", "150", "true_blues", "=", "tf", ".", "clip_by_value", "(", "tf", ".", "concat", "(", "[", "tf", ".", "abs", "(", "distribution", ".", "sample", "(", "[", "number_of_blues", ",", "2", "]", ",", "seed", "=", "132", ")", ")", ",", "255", "-", "tf", ".", "abs", "(", "distribution", ".", "sample", "(", "[", "number_of_blues", ",", "1", "]", ",", "seed", "=", "153", ")", ")", "]", ",", "axis", "=", "1", ")", ",", "0", ",", "255", ")", "# Assign each color a vector of 3 booleans based on its true label.", "labels", "=", "tf", ".", "concat", "(", "[", "tf", ".", "tile", "(", "tf", ".", "constant", "(", "[", "[", "True", ",", "False", ",", "False", "]", "]", ")", ",", "(", "number_of_reds", ",", "1", ")", ")", ",", "tf", ".", "tile", "(", "tf", ".", "constant", "(", "[", "[", "False", ",", "True", ",", "False", "]", "]", ")", ",", "(", "number_of_greens", ",", "1", ")", ")", ",", "tf", ".", "tile", "(", "tf", ".", "constant", "(", "[", "[", "False", ",", "False", ",", "True", "]", "]", ")", ",", "(", "number_of_blues", ",", "1", ")", ")", ",", "]", ",", "axis", "=", "0", ")", "# We introduce 3 normal distributions. They are used to predict whether a", "# color falls under a certain class (based on distances from corners of the", "# color triangle). The distributions vary per color. We have the distributions", "# narrow over time.", "initial_standard_deviations", "=", "[", "v", "+", "FLAGS", ".", "steps", "for", "v", "in", "(", "158", ",", "200", ",", "242", ")", "]", "iteration", "=", "tf", ".", "compat", ".", "v1", ".", "placeholder", "(", "tf", ".", "int32", ",", "shape", "=", "[", "]", ")", "red_predictor", "=", "tf", ".", "compat", ".", "v1", ".", "distributions", ".", "Normal", "(", "loc", "=", "0.", ",", "scale", "=", "tf", ".", "cast", "(", "initial_standard_deviations", "[", "0", "]", "-", "iteration", ",", "dtype", "=", "tf", ".", "float32", ")", ")", "green_predictor", "=", "tf", ".", "compat", ".", "v1", ".", "distributions", ".", "Normal", "(", "loc", "=", "0.", ",", "scale", "=", "tf", ".", "cast", "(", "initial_standard_deviations", "[", "1", "]", "-", "iteration", ",", "dtype", "=", "tf", ".", "float32", ")", ")", "blue_predictor", "=", "tf", ".", "compat", ".", "v1", ".", "distributions", ".", "Normal", "(", "loc", "=", "0.", ",", "scale", "=", "tf", ".", "cast", "(", "initial_standard_deviations", "[", "2", "]", "-", "iteration", ",", "dtype", "=", "tf", ".", "float32", ")", ")", "# Make predictions (assign 3 probabilities to each color based on each color's", "# distance to each of the 3 corners). We seek double the area in the right", "# tail of the normal distribution.", "examples", "=", "tf", ".", "concat", "(", "[", "true_reds", ",", "true_greens", ",", "true_blues", "]", ",", "axis", "=", "0", ")", "probabilities_colors_are_red", "=", "(", "1", "-", "red_predictor", ".", "cdf", "(", "tf", ".", "norm", "(", "tensor", "=", "examples", "-", "tf", ".", "constant", "(", "[", "255.", ",", "0", ",", "0", "]", ")", ",", "axis", "=", "1", ")", ")", ")", "*", "2", "probabilities_colors_are_green", "=", "(", "1", "-", "green_predictor", ".", "cdf", "(", "tf", ".", "norm", "(", "tensor", "=", "examples", "-", "tf", ".", "constant", "(", "[", "0", ",", "255.", ",", "0", "]", ")", ",", "axis", "=", "1", ")", ")", ")", "*", "2", "probabilities_colors_are_blue", "=", "(", "1", "-", "blue_predictor", ".", "cdf", "(", "tf", ".", "norm", "(", "tensor", "=", "examples", "-", "tf", ".", "constant", "(", "[", "0", ",", "0", ",", "255.", "]", ")", ",", "axis", "=", "1", ")", ")", ")", "*", "2", "predictions", "=", "(", "probabilities_colors_are_red", ",", "probabilities_colors_are_green", ",", "probabilities_colors_are_blue", ")", "# This is the crucial piece. We write data required for generating PR curves.", "# We create 1 summary per class because we create 1 PR curve per class.", "for", "i", ",", "color", "in", "enumerate", "(", "(", "'red'", ",", "'green'", ",", "'blue'", ")", ")", ":", "description", "=", "(", "'The probabilities used to create this PR curve are '", "'generated from a normal distribution. Its standard '", "'deviation is initially %0.0f and decreases over time.'", "%", "initial_standard_deviations", "[", "i", "]", ")", "weights", "=", "None", "if", "mask_every_other_prediction", ":", "# Assign a weight of 0 to every even-indexed prediction. Odd-indexed", "# predictions are assigned a default weight of 1.", "consecutive_indices", "=", "tf", ".", "reshape", "(", "tf", ".", "range", "(", "tf", ".", "size", "(", "input", "=", "predictions", "[", "i", "]", ")", ")", ",", "tf", ".", "shape", "(", "input", "=", "predictions", "[", "i", "]", ")", ")", "weights", "=", "tf", ".", "cast", "(", "consecutive_indices", "%", "2", ",", "dtype", "=", "tf", ".", "float32", ")", "summary", ".", "op", "(", "name", "=", "color", ",", "labels", "=", "labels", "[", ":", ",", "i", "]", ",", "predictions", "=", "predictions", "[", "i", "]", ",", "num_thresholds", "=", "thresholds", ",", "weights", "=", "weights", ",", "display_name", "=", "'classifying %s'", "%", "color", ",", "description", "=", "description", ")", "merged_summary_op", "=", "tf", ".", "compat", ".", "v1", ".", "summary", ".", "merge_all", "(", ")", "events_directory", "=", "os", ".", "path", ".", "join", "(", "logdir", ",", "run_name", ")", "sess", "=", "tf", ".", "compat", ".", "v1", ".", "Session", "(", ")", "writer", "=", "tf", ".", "compat", ".", "v1", ".", "summary", ".", "FileWriter", "(", "events_directory", ",", "sess", ".", "graph", ")", "for", "step", "in", "xrange", "(", "steps", ")", ":", "feed_dict", "=", "{", "iteration", ":", "step", ",", "}", "merged_summary", "=", "sess", ".", "run", "(", "merged_summary_op", ",", "feed_dict", "=", "feed_dict", ")", "writer", ".", "add_summary", "(", "merged_summary", ",", "step", ")", "writer", ".", "close", "(", ")" ]
Generate a PR curve with precision and recall evenly weighted. Arguments: logdir: The directory into which to store all the runs' data. steps: The number of steps to run for. run_name: The name of the run. thresholds: The number of thresholds to use for PR curves. mask_every_other_prediction: Whether to mask every other prediction by alternating weights between 0 and 1.
[ "Generate", "a", "PR", "curve", "with", "precision", "and", "recall", "evenly", "weighted", "." ]
python
train
39.558621
edwards-lab/libGWAS
libgwas/snp_boundary_check.py
https://github.com/edwards-lab/libGWAS/blob/d68c9a083d443dfa5d7c5112de29010909cfe23f/libgwas/snp_boundary_check.py#L77-L92
def ReportConfiguration(self, f): """Report the boundary configuration details :param f: File (or standard out/err) :return: None """ if BoundaryCheck.chrom != -1: print >> f, BuildReportLine("CHROM", BoundaryCheck.chrom) if len(self.start_bounds) > 0: bounds = ",".join(["%s-%s" % (a[0], a[1]) for a in zip(self.start_bounds, self.end_bounds)]) print >> f, BuildReportLine("SNP BOUNDARY", bounds) if len(self.ignored_rs) > 0: print >> f, BuildReportLine("IGNORED RS", ",".join(self.ignored_rs)) if len(self.target_rs) > 0: print >> f, BuildReportLine("TARGET RS", ",".join(self.target_rs))
[ "def", "ReportConfiguration", "(", "self", ",", "f", ")", ":", "if", "BoundaryCheck", ".", "chrom", "!=", "-", "1", ":", "print", ">>", "f", ",", "BuildReportLine", "(", "\"CHROM\"", ",", "BoundaryCheck", ".", "chrom", ")", "if", "len", "(", "self", ".", "start_bounds", ")", ">", "0", ":", "bounds", "=", "\",\"", ".", "join", "(", "[", "\"%s-%s\"", "%", "(", "a", "[", "0", "]", ",", "a", "[", "1", "]", ")", "for", "a", "in", "zip", "(", "self", ".", "start_bounds", ",", "self", ".", "end_bounds", ")", "]", ")", "print", ">>", "f", ",", "BuildReportLine", "(", "\"SNP BOUNDARY\"", ",", "bounds", ")", "if", "len", "(", "self", ".", "ignored_rs", ")", ">", "0", ":", "print", ">>", "f", ",", "BuildReportLine", "(", "\"IGNORED RS\"", ",", "\",\"", ".", "join", "(", "self", ".", "ignored_rs", ")", ")", "if", "len", "(", "self", ".", "target_rs", ")", ">", "0", ":", "print", ">>", "f", ",", "BuildReportLine", "(", "\"TARGET RS\"", ",", "\",\"", ".", "join", "(", "self", ".", "target_rs", ")", ")" ]
Report the boundary configuration details :param f: File (or standard out/err) :return: None
[ "Report", "the", "boundary", "configuration", "details" ]
python
train
44.5625
inasafe/inasafe
scripts/create_api_docs.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/scripts/create_api_docs.py#L24-L49
def create_top_level_index_entry(title, max_depth, subtitles): """Function for creating a text entry in index.rst for its content. :param title : Title for the content. :type title: str :param max_depth : Value for max_depth in the top level index content. :type max_depth: int :param subtitles : list of subtitles that is available. :type subtitles: list :return: A text for the content of top level index. :rtype: str """ return_text = title + '\n' dash = '-' * len(title) + '\n' return_text += dash + '\n' return_text += '.. toctree::' + '\n' return_text += ' :maxdepth: ' + str(max_depth) + '\n\n' for subtitle in subtitles: return_text += ' ' + subtitle + '\n\n' return return_text
[ "def", "create_top_level_index_entry", "(", "title", ",", "max_depth", ",", "subtitles", ")", ":", "return_text", "=", "title", "+", "'\\n'", "dash", "=", "'-'", "*", "len", "(", "title", ")", "+", "'\\n'", "return_text", "+=", "dash", "+", "'\\n'", "return_text", "+=", "'.. toctree::'", "+", "'\\n'", "return_text", "+=", "' :maxdepth: '", "+", "str", "(", "max_depth", ")", "+", "'\\n\\n'", "for", "subtitle", "in", "subtitles", ":", "return_text", "+=", "' '", "+", "subtitle", "+", "'\\n\\n'", "return", "return_text" ]
Function for creating a text entry in index.rst for its content. :param title : Title for the content. :type title: str :param max_depth : Value for max_depth in the top level index content. :type max_depth: int :param subtitles : list of subtitles that is available. :type subtitles: list :return: A text for the content of top level index. :rtype: str
[ "Function", "for", "creating", "a", "text", "entry", "in", "index", ".", "rst", "for", "its", "content", "." ]
python
train
28.769231
brbsix/subnuker
subnuker.py
https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L491-L499
def pattern_logic_aeidon(): """Return patterns to be used for searching subtitles via aeidon.""" if Config.options.pattern_files: return prep_patterns(Config.options.pattern_files) elif Config.options.regex: return Config.REGEX else: return Config.TERMS
[ "def", "pattern_logic_aeidon", "(", ")", ":", "if", "Config", ".", "options", ".", "pattern_files", ":", "return", "prep_patterns", "(", "Config", ".", "options", ".", "pattern_files", ")", "elif", "Config", ".", "options", ".", "regex", ":", "return", "Config", ".", "REGEX", "else", ":", "return", "Config", ".", "TERMS" ]
Return patterns to be used for searching subtitles via aeidon.
[ "Return", "patterns", "to", "be", "used", "for", "searching", "subtitles", "via", "aeidon", "." ]
python
train
31.777778
mlperf/training
single_stage_detector/ssd/coco.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/single_stage_detector/ssd/coco.py#L129-L155
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None): """ Get ann ids that satisfy given filter conditions. default skips that filter :param imgIds (int array) : get anns for given imgs catIds (int array) : get anns for given cats areaRng (float array) : get anns for given area range (e.g. [0 inf]) iscrowd (boolean) : get anns for given crowd label (False or True) :return: ids (int array) : integer array of ann ids """ imgIds = imgIds if _isArrayLike(imgIds) else [imgIds] catIds = catIds if _isArrayLike(catIds) else [catIds] if len(imgIds) == len(catIds) == len(areaRng) == 0: anns = self.dataset['annotations'] else: if not len(imgIds) == 0: lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns] anns = list(itertools.chain.from_iterable(lists)) else: anns = self.dataset['annotations'] anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds] anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]] if not iscrowd == None: ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd] else: ids = [ann['id'] for ann in anns] return ids
[ "def", "getAnnIds", "(", "self", ",", "imgIds", "=", "[", "]", ",", "catIds", "=", "[", "]", ",", "areaRng", "=", "[", "]", ",", "iscrowd", "=", "None", ")", ":", "imgIds", "=", "imgIds", "if", "_isArrayLike", "(", "imgIds", ")", "else", "[", "imgIds", "]", "catIds", "=", "catIds", "if", "_isArrayLike", "(", "catIds", ")", "else", "[", "catIds", "]", "if", "len", "(", "imgIds", ")", "==", "len", "(", "catIds", ")", "==", "len", "(", "areaRng", ")", "==", "0", ":", "anns", "=", "self", ".", "dataset", "[", "'annotations'", "]", "else", ":", "if", "not", "len", "(", "imgIds", ")", "==", "0", ":", "lists", "=", "[", "self", ".", "imgToAnns", "[", "imgId", "]", "for", "imgId", "in", "imgIds", "if", "imgId", "in", "self", ".", "imgToAnns", "]", "anns", "=", "list", "(", "itertools", ".", "chain", ".", "from_iterable", "(", "lists", ")", ")", "else", ":", "anns", "=", "self", ".", "dataset", "[", "'annotations'", "]", "anns", "=", "anns", "if", "len", "(", "catIds", ")", "==", "0", "else", "[", "ann", "for", "ann", "in", "anns", "if", "ann", "[", "'category_id'", "]", "in", "catIds", "]", "anns", "=", "anns", "if", "len", "(", "areaRng", ")", "==", "0", "else", "[", "ann", "for", "ann", "in", "anns", "if", "ann", "[", "'area'", "]", ">", "areaRng", "[", "0", "]", "and", "ann", "[", "'area'", "]", "<", "areaRng", "[", "1", "]", "]", "if", "not", "iscrowd", "==", "None", ":", "ids", "=", "[", "ann", "[", "'id'", "]", "for", "ann", "in", "anns", "if", "ann", "[", "'iscrowd'", "]", "==", "iscrowd", "]", "else", ":", "ids", "=", "[", "ann", "[", "'id'", "]", "for", "ann", "in", "anns", "]", "return", "ids" ]
Get ann ids that satisfy given filter conditions. default skips that filter :param imgIds (int array) : get anns for given imgs catIds (int array) : get anns for given cats areaRng (float array) : get anns for given area range (e.g. [0 inf]) iscrowd (boolean) : get anns for given crowd label (False or True) :return: ids (int array) : integer array of ann ids
[ "Get", "ann", "ids", "that", "satisfy", "given", "filter", "conditions", ".", "default", "skips", "that", "filter", ":", "param", "imgIds", "(", "int", "array", ")", ":", "get", "anns", "for", "given", "imgs", "catIds", "(", "int", "array", ")", ":", "get", "anns", "for", "given", "cats", "areaRng", "(", "float", "array", ")", ":", "get", "anns", "for", "given", "area", "range", "(", "e", ".", "g", ".", "[", "0", "inf", "]", ")", "iscrowd", "(", "boolean", ")", ":", "get", "anns", "for", "given", "crowd", "label", "(", "False", "or", "True", ")", ":", "return", ":", "ids", "(", "int", "array", ")", ":", "integer", "array", "of", "ann", "ids" ]
python
train
53.777778
sendgrid/sendgrid-python
sendgrid/helpers/mail/ganalytics.py
https://github.com/sendgrid/sendgrid-python/blob/266c2abde7a35dfcce263e06bedc6a0bbdebeac9/sendgrid/helpers/mail/ganalytics.py#L156-L176
def get(self): """ Get a JSON-ready representation of this Ganalytics. :returns: This Ganalytics, ready for use in a request body. :rtype: dict """ keys = ["enable", "utm_source", "utm_medium", "utm_term", "utm_content", "utm_campaign"] ganalytics = {} for key in keys: value = getattr(self, key, None) if value is not None: if isinstance(value, bool): ganalytics[key] = value else: ganalytics[key] = value.get() return ganalytics
[ "def", "get", "(", "self", ")", ":", "keys", "=", "[", "\"enable\"", ",", "\"utm_source\"", ",", "\"utm_medium\"", ",", "\"utm_term\"", ",", "\"utm_content\"", ",", "\"utm_campaign\"", "]", "ganalytics", "=", "{", "}", "for", "key", "in", "keys", ":", "value", "=", "getattr", "(", "self", ",", "key", ",", "None", ")", "if", "value", "is", "not", "None", ":", "if", "isinstance", "(", "value", ",", "bool", ")", ":", "ganalytics", "[", "key", "]", "=", "value", "else", ":", "ganalytics", "[", "key", "]", "=", "value", ".", "get", "(", ")", "return", "ganalytics" ]
Get a JSON-ready representation of this Ganalytics. :returns: This Ganalytics, ready for use in a request body. :rtype: dict
[ "Get", "a", "JSON", "-", "ready", "representation", "of", "this", "Ganalytics", "." ]
python
train
28.47619
apache/incubator-mxnet
example/distributed_training/cifar10_dist.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/distributed_training/cifar10_dist.py#L110-L142
def evaluate_accuracy(data_iterator, network): """ Measure the accuracy of ResNet Parameters ---------- data_iterator: Iter examples of dataset network: ResNet Returns ---------- tuple of array element """ acc = mx.metric.Accuracy() # Iterate through data and label for i, (data, label) in enumerate(data_iterator): # Get the data and label into the GPU data = data.as_in_context(ctx[0]) label = label.as_in_context(ctx[0]) # Get network's output which is a probability distribution # Apply argmax on the probability distribution to get network's classification. output = network(data) predictions = nd.argmax(output, axis=1) # Give network's prediction and the correct label to update the metric acc.update(preds=predictions, labels=label) # Return the accuracy return acc.get()[1]
[ "def", "evaluate_accuracy", "(", "data_iterator", ",", "network", ")", ":", "acc", "=", "mx", ".", "metric", ".", "Accuracy", "(", ")", "# Iterate through data and label", "for", "i", ",", "(", "data", ",", "label", ")", "in", "enumerate", "(", "data_iterator", ")", ":", "# Get the data and label into the GPU", "data", "=", "data", ".", "as_in_context", "(", "ctx", "[", "0", "]", ")", "label", "=", "label", ".", "as_in_context", "(", "ctx", "[", "0", "]", ")", "# Get network's output which is a probability distribution", "# Apply argmax on the probability distribution to get network's classification.", "output", "=", "network", "(", "data", ")", "predictions", "=", "nd", ".", "argmax", "(", "output", ",", "axis", "=", "1", ")", "# Give network's prediction and the correct label to update the metric", "acc", ".", "update", "(", "preds", "=", "predictions", ",", "labels", "=", "label", ")", "# Return the accuracy", "return", "acc", ".", "get", "(", ")", "[", "1", "]" ]
Measure the accuracy of ResNet Parameters ---------- data_iterator: Iter examples of dataset network: ResNet Returns ---------- tuple of array element
[ "Measure", "the", "accuracy", "of", "ResNet" ]
python
train
27.181818
PyAr/fades
fades/pipmanager.py
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/pipmanager.py#L77-L89
def get_version(self, dependency): """Return the installed version parsing the output of 'pip show'.""" logger.debug("getting installed version for %s", dependency) stdout = helpers.logged_exec([self.pip_exe, "show", str(dependency)]) version = [line for line in stdout if line.startswith('Version:')] if len(version) == 1: version = version[0].strip().split()[1] logger.debug("Installed version of %s is: %s", dependency, version) return version else: logger.error('Fades is having problems getting the installed version. ' 'Run with -v or check the logs for details') return ''
[ "def", "get_version", "(", "self", ",", "dependency", ")", ":", "logger", ".", "debug", "(", "\"getting installed version for %s\"", ",", "dependency", ")", "stdout", "=", "helpers", ".", "logged_exec", "(", "[", "self", ".", "pip_exe", ",", "\"show\"", ",", "str", "(", "dependency", ")", "]", ")", "version", "=", "[", "line", "for", "line", "in", "stdout", "if", "line", ".", "startswith", "(", "'Version:'", ")", "]", "if", "len", "(", "version", ")", "==", "1", ":", "version", "=", "version", "[", "0", "]", ".", "strip", "(", ")", ".", "split", "(", ")", "[", "1", "]", "logger", ".", "debug", "(", "\"Installed version of %s is: %s\"", ",", "dependency", ",", "version", ")", "return", "version", "else", ":", "logger", ".", "error", "(", "'Fades is having problems getting the installed version. '", "'Run with -v or check the logs for details'", ")", "return", "''" ]
Return the installed version parsing the output of 'pip show'.
[ "Return", "the", "installed", "version", "parsing", "the", "output", "of", "pip", "show", "." ]
python
train
53.846154
tensorlayer/tensorlayer
tensorlayer/nlp.py
https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/nlp.py#L327-L332
def id_to_word(self, word_id): """Returns the word string of an integer word id.""" if word_id >= len(self.reverse_vocab): return self.reverse_vocab[self.unk_id] else: return self.reverse_vocab[word_id]
[ "def", "id_to_word", "(", "self", ",", "word_id", ")", ":", "if", "word_id", ">=", "len", "(", "self", ".", "reverse_vocab", ")", ":", "return", "self", ".", "reverse_vocab", "[", "self", ".", "unk_id", "]", "else", ":", "return", "self", ".", "reverse_vocab", "[", "word_id", "]" ]
Returns the word string of an integer word id.
[ "Returns", "the", "word", "string", "of", "an", "integer", "word", "id", "." ]
python
valid
40.833333
fabioz/PyDev.Debugger
third_party/pep8/lib2to3/lib2to3/pytree.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/lib2to3/lib2to3/pytree.py#L112-L120
def set_prefix(self, prefix): """ Set the prefix for the node (see Leaf class). DEPRECATED; use the prefix property directly. """ warnings.warn("set_prefix() is deprecated; use the prefix property", DeprecationWarning, stacklevel=2) self.prefix = prefix
[ "def", "set_prefix", "(", "self", ",", "prefix", ")", ":", "warnings", ".", "warn", "(", "\"set_prefix() is deprecated; use the prefix property\"", ",", "DeprecationWarning", ",", "stacklevel", "=", "2", ")", "self", ".", "prefix", "=", "prefix" ]
Set the prefix for the node (see Leaf class). DEPRECATED; use the prefix property directly.
[ "Set", "the", "prefix", "for", "the", "node", "(", "see", "Leaf", "class", ")", "." ]
python
train
35.111111
usc-isi-i2/etk
etk/document.py
https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/document.py#L102-L194
def extract(self, extractor: Extractor, extractable: Extractable = None, tokenizer: Tokenizer = None, joiner: str = " ", **options) -> List[Extraction]: """ Invoke the extractor on the given extractable, accumulating all the extractions in a list. Args: extractor (Extractor): extractable (extractable): tokenizer: user can pass custom tokenizer if extractor wants token joiner: user can pass joiner if extractor wants text options: user can pass arguments as a dict to the extract() function of different extractors Returns: List of Extraction, containing all the extractions. """ if not extractable: extractable = self if not tokenizer: tokenizer = self.etk.default_tokenizer extracted_results = list() if extractor.input_type == InputType.TOKENS: if self.etk.error_policy == ErrorPolicy.PROCESS: if isinstance(extractable.value, list): self.etk.log( "Extractor needs tokens, tokenizer needs string to tokenize, got list, converting to string", "warning", self.doc_id, self.url) warnings.warn( "Extractor needs tokens, tokenizer needs string to tokenize, got list, converting to string") elif isinstance(extractable.value, dict): self.etk.log( "Extractor needs tokens, tokenizer needs string to tokenize, got dict, converting to string", "warning", self.doc_id, self.url) warnings.warn( "Extractor needs tokens, tokenizer needs string to tokenize, got dict, converting to string") tokens = extractable.get_tokens(tokenizer) if tokens: extracted_results = extractor.extract(tokens, **options) else: raise ExtractorValueError( "Extractor needs string, tokenizer needs string to tokenize, got " + str(type(extractable.value))) elif extractor.input_type == InputType.TEXT: if self.etk.error_policy == ErrorPolicy.PROCESS: if isinstance(extractable.value, list): self.etk.log("Extractor needs string, got extractable value as list, converting to string", "warning", self.doc_id, self.url) warnings.warn("Extractor needs string, got extractable value as list, converting to string") elif isinstance(extractable.value, dict): self.etk.log("Extractor needs string, got extractable value as dict, converting to string", "warning", self.doc_id, self.url) warnings.warn("Extractor needs string, got extractable value as dict, converting to string") text = extractable.get_string(joiner) if text: extracted_results = extractor.extract(text, **options) else: # raise ExtractorValueError("Extractor needs string, got " + str(type(extractable.value))) # TODO: Yixiang - needs to be handled properly pass elif extractor.input_type == InputType.OBJECT: extracted_results = extractor.extract(extractable.value, **options) elif extractor.input_type == InputType.HTML: if bool(BeautifulSoup(extractable.value, "html.parser").find()): extracted_results = extractor.extract(extractable.value, **options) else: # raise ExtractorValueError("Extractor needs HTML, got non HTML string") # TODO: Yixiang - needs to be handled properly pass try: jsonPath = extractable.full_path except AttributeError: jsonPath = None for e in extracted_results: # for the purpose of provenance hierarrchy tracking, a parent's id for next generation. e.prov_id = self.provenance_id_index extraction_provenance_record: ExtractionProvenanceRecord = ExtractionProvenanceRecord( e.prov_id, jsonPath, e.provenance["extractor_name"], e.provenance["start_char"], e.provenance["end_char"], e.provenance["confidence"], self, extractable.prov_id) self._provenances[e.prov_id] = extraction_provenance_record # for the purpose of provenance hierarchy tracking self.provenance_id_index_incrementer() self.create_provenance(extraction_provenance_record) return extracted_results
[ "def", "extract", "(", "self", ",", "extractor", ":", "Extractor", ",", "extractable", ":", "Extractable", "=", "None", ",", "tokenizer", ":", "Tokenizer", "=", "None", ",", "joiner", ":", "str", "=", "\" \"", ",", "*", "*", "options", ")", "->", "List", "[", "Extraction", "]", ":", "if", "not", "extractable", ":", "extractable", "=", "self", "if", "not", "tokenizer", ":", "tokenizer", "=", "self", ".", "etk", ".", "default_tokenizer", "extracted_results", "=", "list", "(", ")", "if", "extractor", ".", "input_type", "==", "InputType", ".", "TOKENS", ":", "if", "self", ".", "etk", ".", "error_policy", "==", "ErrorPolicy", ".", "PROCESS", ":", "if", "isinstance", "(", "extractable", ".", "value", ",", "list", ")", ":", "self", ".", "etk", ".", "log", "(", "\"Extractor needs tokens, tokenizer needs string to tokenize, got list, converting to string\"", ",", "\"warning\"", ",", "self", ".", "doc_id", ",", "self", ".", "url", ")", "warnings", ".", "warn", "(", "\"Extractor needs tokens, tokenizer needs string to tokenize, got list, converting to string\"", ")", "elif", "isinstance", "(", "extractable", ".", "value", ",", "dict", ")", ":", "self", ".", "etk", ".", "log", "(", "\"Extractor needs tokens, tokenizer needs string to tokenize, got dict, converting to string\"", ",", "\"warning\"", ",", "self", ".", "doc_id", ",", "self", ".", "url", ")", "warnings", ".", "warn", "(", "\"Extractor needs tokens, tokenizer needs string to tokenize, got dict, converting to string\"", ")", "tokens", "=", "extractable", ".", "get_tokens", "(", "tokenizer", ")", "if", "tokens", ":", "extracted_results", "=", "extractor", ".", "extract", "(", "tokens", ",", "*", "*", "options", ")", "else", ":", "raise", "ExtractorValueError", "(", "\"Extractor needs string, tokenizer needs string to tokenize, got \"", "+", "str", "(", "type", "(", "extractable", ".", "value", ")", ")", ")", "elif", "extractor", ".", "input_type", "==", "InputType", ".", "TEXT", ":", "if", "self", ".", "etk", ".", "error_policy", "==", "ErrorPolicy", ".", "PROCESS", ":", "if", "isinstance", "(", "extractable", ".", "value", ",", "list", ")", ":", "self", ".", "etk", ".", "log", "(", "\"Extractor needs string, got extractable value as list, converting to string\"", ",", "\"warning\"", ",", "self", ".", "doc_id", ",", "self", ".", "url", ")", "warnings", ".", "warn", "(", "\"Extractor needs string, got extractable value as list, converting to string\"", ")", "elif", "isinstance", "(", "extractable", ".", "value", ",", "dict", ")", ":", "self", ".", "etk", ".", "log", "(", "\"Extractor needs string, got extractable value as dict, converting to string\"", ",", "\"warning\"", ",", "self", ".", "doc_id", ",", "self", ".", "url", ")", "warnings", ".", "warn", "(", "\"Extractor needs string, got extractable value as dict, converting to string\"", ")", "text", "=", "extractable", ".", "get_string", "(", "joiner", ")", "if", "text", ":", "extracted_results", "=", "extractor", ".", "extract", "(", "text", ",", "*", "*", "options", ")", "else", ":", "# raise ExtractorValueError(\"Extractor needs string, got \" + str(type(extractable.value)))", "# TODO: Yixiang - needs to be handled properly", "pass", "elif", "extractor", ".", "input_type", "==", "InputType", ".", "OBJECT", ":", "extracted_results", "=", "extractor", ".", "extract", "(", "extractable", ".", "value", ",", "*", "*", "options", ")", "elif", "extractor", ".", "input_type", "==", "InputType", ".", "HTML", ":", "if", "bool", "(", "BeautifulSoup", "(", "extractable", ".", "value", ",", "\"html.parser\"", ")", ".", "find", "(", ")", ")", ":", "extracted_results", "=", "extractor", ".", "extract", "(", "extractable", ".", "value", ",", "*", "*", "options", ")", "else", ":", "# raise ExtractorValueError(\"Extractor needs HTML, got non HTML string\")", "# TODO: Yixiang - needs to be handled properly", "pass", "try", ":", "jsonPath", "=", "extractable", ".", "full_path", "except", "AttributeError", ":", "jsonPath", "=", "None", "for", "e", "in", "extracted_results", ":", "# for the purpose of provenance hierarrchy tracking, a parent's id for next generation.", "e", ".", "prov_id", "=", "self", ".", "provenance_id_index", "extraction_provenance_record", ":", "ExtractionProvenanceRecord", "=", "ExtractionProvenanceRecord", "(", "e", ".", "prov_id", ",", "jsonPath", ",", "e", ".", "provenance", "[", "\"extractor_name\"", "]", ",", "e", ".", "provenance", "[", "\"start_char\"", "]", ",", "e", ".", "provenance", "[", "\"end_char\"", "]", ",", "e", ".", "provenance", "[", "\"confidence\"", "]", ",", "self", ",", "extractable", ".", "prov_id", ")", "self", ".", "_provenances", "[", "e", ".", "prov_id", "]", "=", "extraction_provenance_record", "# for the purpose of provenance hierarchy tracking", "self", ".", "provenance_id_index_incrementer", "(", ")", "self", ".", "create_provenance", "(", "extraction_provenance_record", ")", "return", "extracted_results" ]
Invoke the extractor on the given extractable, accumulating all the extractions in a list. Args: extractor (Extractor): extractable (extractable): tokenizer: user can pass custom tokenizer if extractor wants token joiner: user can pass joiner if extractor wants text options: user can pass arguments as a dict to the extract() function of different extractors Returns: List of Extraction, containing all the extractions.
[ "Invoke", "the", "extractor", "on", "the", "given", "extractable", "accumulating", "all", "the", "extractions", "in", "a", "list", "." ]
python
train
50.516129
tchellomello/python-arlo
pyarlo/camera.py
https://github.com/tchellomello/python-arlo/blob/db70aeb81705309c56ad32bbab1094f6cd146524/pyarlo/camera.py#L128-L132
def unseen_videos_reset(self): """Reset the unseen videos counter.""" url = RESET_CAM_ENDPOINT.format(self.unique_id) ret = self._session.query(url).get('success') return ret
[ "def", "unseen_videos_reset", "(", "self", ")", ":", "url", "=", "RESET_CAM_ENDPOINT", ".", "format", "(", "self", ".", "unique_id", ")", "ret", "=", "self", ".", "_session", ".", "query", "(", "url", ")", ".", "get", "(", "'success'", ")", "return", "ret" ]
Reset the unseen videos counter.
[ "Reset", "the", "unseen", "videos", "counter", "." ]
python
train
40.4
sander76/aio-powerview-api
aiopvapi/hub.py
https://github.com/sander76/aio-powerview-api/blob/08b6ac747aba9de19842359a981a7ff1292f5a6c/aiopvapi/hub.py#L88-L99
async def query_firmware(self): """Query the firmware versions.""" _version = await self.request.get(join_path(self._base_path, "/fwversion")) _fw = _version.get("firmware") if _fw: _main = _fw.get("mainProcessor") if _main: self._main_processor_version = self._make_version(_main) _radio = _fw.get("radio") if _radio: self._radio_version = self._make_version(_radio)
[ "async", "def", "query_firmware", "(", "self", ")", ":", "_version", "=", "await", "self", ".", "request", ".", "get", "(", "join_path", "(", "self", ".", "_base_path", ",", "\"/fwversion\"", ")", ")", "_fw", "=", "_version", ".", "get", "(", "\"firmware\"", ")", "if", "_fw", ":", "_main", "=", "_fw", ".", "get", "(", "\"mainProcessor\"", ")", "if", "_main", ":", "self", ".", "_main_processor_version", "=", "self", ".", "_make_version", "(", "_main", ")", "_radio", "=", "_fw", ".", "get", "(", "\"radio\"", ")", "if", "_radio", ":", "self", ".", "_radio_version", "=", "self", ".", "_make_version", "(", "_radio", ")" ]
Query the firmware versions.
[ "Query", "the", "firmware", "versions", "." ]
python
train
39.083333
pybel/pybel-tools
src/pybel_tools/summary/contradictions.py
https://github.com/pybel/pybel-tools/blob/3491adea0ac4ee60f57275ef72f9b73da6dbfe0c/src/pybel_tools/summary/contradictions.py#L17-L23
def pair_has_contradiction(graph: BELGraph, u: BaseEntity, v: BaseEntity) -> bool: """Check if a pair of nodes has any contradictions in their causal relationships. Assumes both nodes are in the graph. """ relations = {data[RELATION] for data in graph[u][v].values()} return relation_set_has_contradictions(relations)
[ "def", "pair_has_contradiction", "(", "graph", ":", "BELGraph", ",", "u", ":", "BaseEntity", ",", "v", ":", "BaseEntity", ")", "->", "bool", ":", "relations", "=", "{", "data", "[", "RELATION", "]", "for", "data", "in", "graph", "[", "u", "]", "[", "v", "]", ".", "values", "(", ")", "}", "return", "relation_set_has_contradictions", "(", "relations", ")" ]
Check if a pair of nodes has any contradictions in their causal relationships. Assumes both nodes are in the graph.
[ "Check", "if", "a", "pair", "of", "nodes", "has", "any", "contradictions", "in", "their", "causal", "relationships", "." ]
python
valid
47.428571
Sean1708/HipPy
hippy/parser.py
https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/parser.py#L239-L263
def _parse_comma_list(self): """Parse a comma seperated list.""" if self._cur_token['type'] not in self._literals: raise Exception( "Parser failed, _parse_comma_list was called on non-literal" " {} on line {}.".format( repr(self._cur_token['value']), self._cur_token['line'] ) ) array = [] while self._cur_token['type'] in self._literals and not self._finished: array.append(self._cur_token['value']) self._increment() self._skip_whitespace() if self._cur_token['type'] is TT.comma: self._increment() self._skip_whitespace() elif ( not self._finished and self._cur_token['type'] not in (TT.ws, TT.lbreak) ): raise ParseError('comma or newline', self._cur_token) return array
[ "def", "_parse_comma_list", "(", "self", ")", ":", "if", "self", ".", "_cur_token", "[", "'type'", "]", "not", "in", "self", ".", "_literals", ":", "raise", "Exception", "(", "\"Parser failed, _parse_comma_list was called on non-literal\"", "\" {} on line {}.\"", ".", "format", "(", "repr", "(", "self", ".", "_cur_token", "[", "'value'", "]", ")", ",", "self", ".", "_cur_token", "[", "'line'", "]", ")", ")", "array", "=", "[", "]", "while", "self", ".", "_cur_token", "[", "'type'", "]", "in", "self", ".", "_literals", "and", "not", "self", ".", "_finished", ":", "array", ".", "append", "(", "self", ".", "_cur_token", "[", "'value'", "]", ")", "self", ".", "_increment", "(", ")", "self", ".", "_skip_whitespace", "(", ")", "if", "self", ".", "_cur_token", "[", "'type'", "]", "is", "TT", ".", "comma", ":", "self", ".", "_increment", "(", ")", "self", ".", "_skip_whitespace", "(", ")", "elif", "(", "not", "self", ".", "_finished", "and", "self", ".", "_cur_token", "[", "'type'", "]", "not", "in", "(", "TT", ".", "ws", ",", "TT", ".", "lbreak", ")", ")", ":", "raise", "ParseError", "(", "'comma or newline'", ",", "self", ".", "_cur_token", ")", "return", "array" ]
Parse a comma seperated list.
[ "Parse", "a", "comma", "seperated", "list", "." ]
python
train
37.44
mlperf/training
image_classification/tensorflow/official/resnet/imagenet_preprocessing.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/image_classification/tensorflow/official/resnet/imagenet_preprocessing.py#L212-L231
def _aspect_preserving_resize(image, resize_min): """Resize images preserving the original aspect ratio. Args: image: A 3-D image `Tensor`. resize_min: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: resized_image: A 3-D tensor containing the resized image. """ mlperf_log.resnet_print(key=mlperf_log.INPUT_RESIZE_ASPECT_PRESERVING, value={"min": resize_min}) shape = tf.shape(image) height, width = shape[0], shape[1] new_height, new_width = _smallest_size_at_least(height, width, resize_min) return _resize_image(image, new_height, new_width)
[ "def", "_aspect_preserving_resize", "(", "image", ",", "resize_min", ")", ":", "mlperf_log", ".", "resnet_print", "(", "key", "=", "mlperf_log", ".", "INPUT_RESIZE_ASPECT_PRESERVING", ",", "value", "=", "{", "\"min\"", ":", "resize_min", "}", ")", "shape", "=", "tf", ".", "shape", "(", "image", ")", "height", ",", "width", "=", "shape", "[", "0", "]", ",", "shape", "[", "1", "]", "new_height", ",", "new_width", "=", "_smallest_size_at_least", "(", "height", ",", "width", ",", "resize_min", ")", "return", "_resize_image", "(", "image", ",", "new_height", ",", "new_width", ")" ]
Resize images preserving the original aspect ratio. Args: image: A 3-D image `Tensor`. resize_min: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: resized_image: A 3-D tensor containing the resized image.
[ "Resize", "images", "preserving", "the", "original", "aspect", "ratio", "." ]
python
train
32.2
jart/fabulous
fabulous/text.py
https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/text.py#L143-L176
def resolve_font(name): """Turns font names into absolute filenames This is case sensitive. The extension should be omitted. For example:: >>> path = resolve_font('NotoSans-Bold') >>> fontdir = os.path.join(os.path.dirname(__file__), 'fonts') >>> noto_path = os.path.join(fontdir, 'NotoSans-Bold.ttf') >>> noto_path = os.path.abspath(noto_path) >>> assert path == noto_path Absolute paths are allowed:: >>> resolve_font(noto_path) == noto_path True Raises :exc:`FontNotFound` on failure:: >>> try: ... resolve_font('blahahaha') ... assert False ... except FontNotFound: ... pass """ if os.path.exists(name): return os.path.abspath(name) fonts = get_font_files() if name in fonts: return fonts[name] raise FontNotFound("Can't find %r :'( Try adding it to ~/.fonts" % name)
[ "def", "resolve_font", "(", "name", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "name", ")", ":", "return", "os", ".", "path", ".", "abspath", "(", "name", ")", "fonts", "=", "get_font_files", "(", ")", "if", "name", "in", "fonts", ":", "return", "fonts", "[", "name", "]", "raise", "FontNotFound", "(", "\"Can't find %r :'( Try adding it to ~/.fonts\"", "%", "name", ")" ]
Turns font names into absolute filenames This is case sensitive. The extension should be omitted. For example:: >>> path = resolve_font('NotoSans-Bold') >>> fontdir = os.path.join(os.path.dirname(__file__), 'fonts') >>> noto_path = os.path.join(fontdir, 'NotoSans-Bold.ttf') >>> noto_path = os.path.abspath(noto_path) >>> assert path == noto_path Absolute paths are allowed:: >>> resolve_font(noto_path) == noto_path True Raises :exc:`FontNotFound` on failure:: >>> try: ... resolve_font('blahahaha') ... assert False ... except FontNotFound: ... pass
[ "Turns", "font", "names", "into", "absolute", "filenames" ]
python
train
26.823529
roboogle/gtkmvc3
gtkmvco/examples/treeview/sorting.py
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/examples/treeview/sorting.py#L52-L67
def get_sort_function(order): """ Returns a callable similar to the built-in `cmp`, to be used on objects. Takes a list of dictionaries. In each, 'key' must be a string that is used to get an attribute of the objects to compare, and 'reverse' must be a boolean indicating whether the result should be reversed. """ stable = tuple((d['key'], -1 if d['reverse'] else 1) for d in order) def sort_function(a, b): for name, direction in stable: v = cmp(getattr(a, name) if a else a, getattr(b, name) if b else b) if v != 0: return v * direction return 0 return sort_function
[ "def", "get_sort_function", "(", "order", ")", ":", "stable", "=", "tuple", "(", "(", "d", "[", "'key'", "]", ",", "-", "1", "if", "d", "[", "'reverse'", "]", "else", "1", ")", "for", "d", "in", "order", ")", "def", "sort_function", "(", "a", ",", "b", ")", ":", "for", "name", ",", "direction", "in", "stable", ":", "v", "=", "cmp", "(", "getattr", "(", "a", ",", "name", ")", "if", "a", "else", "a", ",", "getattr", "(", "b", ",", "name", ")", "if", "b", "else", "b", ")", "if", "v", "!=", "0", ":", "return", "v", "*", "direction", "return", "0", "return", "sort_function" ]
Returns a callable similar to the built-in `cmp`, to be used on objects. Takes a list of dictionaries. In each, 'key' must be a string that is used to get an attribute of the objects to compare, and 'reverse' must be a boolean indicating whether the result should be reversed.
[ "Returns", "a", "callable", "similar", "to", "the", "built", "-", "in", "cmp", "to", "be", "used", "on", "objects", "." ]
python
train
40.4375
joshspeagle/dynesty
dynesty/sampler.py
https://github.com/joshspeagle/dynesty/blob/9e482aafeb5cf84bedb896fa6f07a761d917983e/dynesty/sampler.py#L457-L480
def _remove_live_points(self): """Remove the final set of live points if they were previously added to the current set of dead points.""" if self.added_live: self.added_live = False if self.save_samples: del self.saved_id[-self.nlive:] del self.saved_u[-self.nlive:] del self.saved_v[-self.nlive:] del self.saved_logl[-self.nlive:] del self.saved_logvol[-self.nlive:] del self.saved_logwt[-self.nlive:] del self.saved_logz[-self.nlive:] del self.saved_logzvar[-self.nlive:] del self.saved_h[-self.nlive:] del self.saved_nc[-self.nlive:] del self.saved_boundidx[-self.nlive:] del self.saved_it[-self.nlive:] del self.saved_bounditer[-self.nlive:] del self.saved_scale[-self.nlive:] else: raise ValueError("No live points were added to the " "list of samples!")
[ "def", "_remove_live_points", "(", "self", ")", ":", "if", "self", ".", "added_live", ":", "self", ".", "added_live", "=", "False", "if", "self", ".", "save_samples", ":", "del", "self", ".", "saved_id", "[", "-", "self", ".", "nlive", ":", "]", "del", "self", ".", "saved_u", "[", "-", "self", ".", "nlive", ":", "]", "del", "self", ".", "saved_v", "[", "-", "self", ".", "nlive", ":", "]", "del", "self", ".", "saved_logl", "[", "-", "self", ".", "nlive", ":", "]", "del", "self", ".", "saved_logvol", "[", "-", "self", ".", "nlive", ":", "]", "del", "self", ".", "saved_logwt", "[", "-", "self", ".", "nlive", ":", "]", "del", "self", ".", "saved_logz", "[", "-", "self", ".", "nlive", ":", "]", "del", "self", ".", "saved_logzvar", "[", "-", "self", ".", "nlive", ":", "]", "del", "self", ".", "saved_h", "[", "-", "self", ".", "nlive", ":", "]", "del", "self", ".", "saved_nc", "[", "-", "self", ".", "nlive", ":", "]", "del", "self", ".", "saved_boundidx", "[", "-", "self", ".", "nlive", ":", "]", "del", "self", ".", "saved_it", "[", "-", "self", ".", "nlive", ":", "]", "del", "self", ".", "saved_bounditer", "[", "-", "self", ".", "nlive", ":", "]", "del", "self", ".", "saved_scale", "[", "-", "self", ".", "nlive", ":", "]", "else", ":", "raise", "ValueError", "(", "\"No live points were added to the \"", "\"list of samples!\"", ")" ]
Remove the final set of live points if they were previously added to the current set of dead points.
[ "Remove", "the", "final", "set", "of", "live", "points", "if", "they", "were", "previously", "added", "to", "the", "current", "set", "of", "dead", "points", "." ]
python
train
44.083333
Alignak-monitoring/alignak
alignak/objects/realm.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/realm.py#L337-L350
def get_all_subs_satellites_by_type(self, sat_type, realms): """Get all satellites of the wanted type in this realm recursively :param sat_type: satellite type wanted (scheduler, poller ..) :type sat_type: :param realms: all realms :type realms: list of realm object :return: list of satellite in this realm :rtype: list """ res = copy.copy(getattr(self, sat_type)) for member in self.all_sub_members: res.extend(realms[member].get_all_subs_satellites_by_type(sat_type, realms)) return res
[ "def", "get_all_subs_satellites_by_type", "(", "self", ",", "sat_type", ",", "realms", ")", ":", "res", "=", "copy", ".", "copy", "(", "getattr", "(", "self", ",", "sat_type", ")", ")", "for", "member", "in", "self", ".", "all_sub_members", ":", "res", ".", "extend", "(", "realms", "[", "member", "]", ".", "get_all_subs_satellites_by_type", "(", "sat_type", ",", "realms", ")", ")", "return", "res" ]
Get all satellites of the wanted type in this realm recursively :param sat_type: satellite type wanted (scheduler, poller ..) :type sat_type: :param realms: all realms :type realms: list of realm object :return: list of satellite in this realm :rtype: list
[ "Get", "all", "satellites", "of", "the", "wanted", "type", "in", "this", "realm", "recursively" ]
python
train
41.214286
tanghaibao/goatools
goatools/wr_tbl.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/wr_tbl.py#L33-L40
def prt_txt(prt, data_nts, prtfmt=None, nt_fields=None, **kws): """Print list of namedtuples into a table using prtfmt.""" lines = get_lines(data_nts, prtfmt, nt_fields, **kws) if lines: for line in lines: prt.write(line) else: sys.stdout.write(" 0 items. NOT WRITING\n")
[ "def", "prt_txt", "(", "prt", ",", "data_nts", ",", "prtfmt", "=", "None", ",", "nt_fields", "=", "None", ",", "*", "*", "kws", ")", ":", "lines", "=", "get_lines", "(", "data_nts", ",", "prtfmt", ",", "nt_fields", ",", "*", "*", "kws", ")", "if", "lines", ":", "for", "line", "in", "lines", ":", "prt", ".", "write", "(", "line", ")", "else", ":", "sys", ".", "stdout", ".", "write", "(", "\" 0 items. NOT WRITING\\n\"", ")" ]
Print list of namedtuples into a table using prtfmt.
[ "Print", "list", "of", "namedtuples", "into", "a", "table", "using", "prtfmt", "." ]
python
train
39.125
voidpp/PCA9685-driver
pca9685_driver/device.py
https://github.com/voidpp/PCA9685-driver/blob/774790028cbced30fd69384f945198148b1793fc/pca9685_driver/device.py#L87-L98
def get_led_register_from_name(self, name): """Parse the name for led number :param name: attribute name, like: led_1 """ res = re.match('^led_([0-9]{1,2})$', name) if res is None: raise AttributeError("Unknown attribute: '%s'" % name) led_num = int(res.group(1)) if led_num < 0 or led_num > 15: raise AttributeError("Unknown attribute: '%s'" % name) return self.calc_led_register(led_num)
[ "def", "get_led_register_from_name", "(", "self", ",", "name", ")", ":", "res", "=", "re", ".", "match", "(", "'^led_([0-9]{1,2})$'", ",", "name", ")", "if", "res", "is", "None", ":", "raise", "AttributeError", "(", "\"Unknown attribute: '%s'\"", "%", "name", ")", "led_num", "=", "int", "(", "res", ".", "group", "(", "1", ")", ")", "if", "led_num", "<", "0", "or", "led_num", ">", "15", ":", "raise", "AttributeError", "(", "\"Unknown attribute: '%s'\"", "%", "name", ")", "return", "self", ".", "calc_led_register", "(", "led_num", ")" ]
Parse the name for led number :param name: attribute name, like: led_1
[ "Parse", "the", "name", "for", "led", "number" ]
python
train
38.916667
MoseleyBioinformaticsLab/ctfile
ctfile/ctfile.py
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L604-L625
def _to_ctfile(self): """Convert :class:`~ctfile.ctfile.CTfile` into `CTfile` formatted string. :return: ``CTfile`` formatted string. :rtype: :py:class:`str`. """ output = io.StringIO() for key in self: if key == 'HeaderBlock': for line in self[key].values(): output.write(line) output.write('\n') elif key == 'Ctab': ctab_str = self[key]._to_ctfile() output.write(ctab_str) else: raise KeyError('Molfile object does not supposed to have any other information: "{}".'.format(key)) return output.getvalue()
[ "def", "_to_ctfile", "(", "self", ")", ":", "output", "=", "io", ".", "StringIO", "(", ")", "for", "key", "in", "self", ":", "if", "key", "==", "'HeaderBlock'", ":", "for", "line", "in", "self", "[", "key", "]", ".", "values", "(", ")", ":", "output", ".", "write", "(", "line", ")", "output", ".", "write", "(", "'\\n'", ")", "elif", "key", "==", "'Ctab'", ":", "ctab_str", "=", "self", "[", "key", "]", ".", "_to_ctfile", "(", ")", "output", ".", "write", "(", "ctab_str", ")", "else", ":", "raise", "KeyError", "(", "'Molfile object does not supposed to have any other information: \"{}\".'", ".", "format", "(", "key", ")", ")", "return", "output", ".", "getvalue", "(", ")" ]
Convert :class:`~ctfile.ctfile.CTfile` into `CTfile` formatted string. :return: ``CTfile`` formatted string. :rtype: :py:class:`str`.
[ "Convert", ":", "class", ":", "~ctfile", ".", "ctfile", ".", "CTfile", "into", "CTfile", "formatted", "string", "." ]
python
train
31.136364
quintusdias/glymur
glymur/jp2k.py
https://github.com/quintusdias/glymur/blob/8b8fb091130fff00f1028dc82219e69e3f9baf6d/glymur/jp2k.py#L1746-L1766
def _validate_channel_definition(self, jp2h, colr): """Validate the channel definition box.""" cdef_lst = [j for (j, box) in enumerate(jp2h.box) if box.box_id == 'cdef'] if len(cdef_lst) > 1: msg = ("Only one channel definition box is allowed in the " "JP2 header.") raise IOError(msg) elif len(cdef_lst) == 1: cdef = jp2h.box[cdef_lst[0]] if colr.colorspace == core.SRGB: if any([chan + 1 not in cdef.association or cdef.channel_type[chan] != 0 for chan in [0, 1, 2]]): msg = ("All color channels must be defined in the " "channel definition box.") raise IOError(msg) elif colr.colorspace == core.GREYSCALE: if 0 not in cdef.channel_type: msg = ("All color channels must be defined in the " "channel definition box.") raise IOError(msg)
[ "def", "_validate_channel_definition", "(", "self", ",", "jp2h", ",", "colr", ")", ":", "cdef_lst", "=", "[", "j", "for", "(", "j", ",", "box", ")", "in", "enumerate", "(", "jp2h", ".", "box", ")", "if", "box", ".", "box_id", "==", "'cdef'", "]", "if", "len", "(", "cdef_lst", ")", ">", "1", ":", "msg", "=", "(", "\"Only one channel definition box is allowed in the \"", "\"JP2 header.\"", ")", "raise", "IOError", "(", "msg", ")", "elif", "len", "(", "cdef_lst", ")", "==", "1", ":", "cdef", "=", "jp2h", ".", "box", "[", "cdef_lst", "[", "0", "]", "]", "if", "colr", ".", "colorspace", "==", "core", ".", "SRGB", ":", "if", "any", "(", "[", "chan", "+", "1", "not", "in", "cdef", ".", "association", "or", "cdef", ".", "channel_type", "[", "chan", "]", "!=", "0", "for", "chan", "in", "[", "0", ",", "1", ",", "2", "]", "]", ")", ":", "msg", "=", "(", "\"All color channels must be defined in the \"", "\"channel definition box.\"", ")", "raise", "IOError", "(", "msg", ")", "elif", "colr", ".", "colorspace", "==", "core", ".", "GREYSCALE", ":", "if", "0", "not", "in", "cdef", ".", "channel_type", ":", "msg", "=", "(", "\"All color channels must be defined in the \"", "\"channel definition box.\"", ")", "raise", "IOError", "(", "msg", ")" ]
Validate the channel definition box.
[ "Validate", "the", "channel", "definition", "box", "." ]
python
train
49.428571
saltstack/salt
salt/modules/boto_kinesis.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_kinesis.py#L494-L543
def _execute_with_retries(conn, function, **kwargs): ''' Retry if we're rate limited by AWS or blocked by another call. Give up and return error message if resource not found or argument is invalid. conn The connection established by the calling method via _get_conn() function The function to call on conn. i.e. create_stream **kwargs Any kwargs required by the above function, with their keywords i.e. StreamName=stream_name Returns: The result dict with the HTTP response and JSON data if applicable as 'result', or an error as 'error' CLI example:: salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs ''' r = {} max_attempts = 18 max_retry_delay = 10 for attempt in range(max_attempts): log.info("attempt: %s function: %s", attempt, function) try: fn = getattr(conn, function) r['result'] = fn(**kwargs) return r except botocore.exceptions.ClientError as e: error_code = e.response['Error']['Code'] if "LimitExceededException" in error_code or "ResourceInUseException" in error_code: # could be rate limited by AWS or another command is blocking, # retry with exponential backoff log.debug("Retrying due to AWS exception", exc_info=True) time.sleep(_jittered_backoff(attempt, max_retry_delay)) else: # ResourceNotFoundException or InvalidArgumentException r['error'] = e.response['Error'] log.error(r['error']) r['result'] = None return r r['error'] = "Tried to execute function {0} {1} times, but was unable".format(function, max_attempts) log.error(r['error']) return r
[ "def", "_execute_with_retries", "(", "conn", ",", "function", ",", "*", "*", "kwargs", ")", ":", "r", "=", "{", "}", "max_attempts", "=", "18", "max_retry_delay", "=", "10", "for", "attempt", "in", "range", "(", "max_attempts", ")", ":", "log", ".", "info", "(", "\"attempt: %s function: %s\"", ",", "attempt", ",", "function", ")", "try", ":", "fn", "=", "getattr", "(", "conn", ",", "function", ")", "r", "[", "'result'", "]", "=", "fn", "(", "*", "*", "kwargs", ")", "return", "r", "except", "botocore", ".", "exceptions", ".", "ClientError", "as", "e", ":", "error_code", "=", "e", ".", "response", "[", "'Error'", "]", "[", "'Code'", "]", "if", "\"LimitExceededException\"", "in", "error_code", "or", "\"ResourceInUseException\"", "in", "error_code", ":", "# could be rate limited by AWS or another command is blocking,", "# retry with exponential backoff", "log", ".", "debug", "(", "\"Retrying due to AWS exception\"", ",", "exc_info", "=", "True", ")", "time", ".", "sleep", "(", "_jittered_backoff", "(", "attempt", ",", "max_retry_delay", ")", ")", "else", ":", "# ResourceNotFoundException or InvalidArgumentException", "r", "[", "'error'", "]", "=", "e", ".", "response", "[", "'Error'", "]", "log", ".", "error", "(", "r", "[", "'error'", "]", ")", "r", "[", "'result'", "]", "=", "None", "return", "r", "r", "[", "'error'", "]", "=", "\"Tried to execute function {0} {1} times, but was unable\"", ".", "format", "(", "function", ",", "max_attempts", ")", "log", ".", "error", "(", "r", "[", "'error'", "]", ")", "return", "r" ]
Retry if we're rate limited by AWS or blocked by another call. Give up and return error message if resource not found or argument is invalid. conn The connection established by the calling method via _get_conn() function The function to call on conn. i.e. create_stream **kwargs Any kwargs required by the above function, with their keywords i.e. StreamName=stream_name Returns: The result dict with the HTTP response and JSON data if applicable as 'result', or an error as 'error' CLI example:: salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs
[ "Retry", "if", "we", "re", "rate", "limited", "by", "AWS", "or", "blocked", "by", "another", "call", ".", "Give", "up", "and", "return", "error", "message", "if", "resource", "not", "found", "or", "argument", "is", "invalid", "." ]
python
train
36.7
vingd/vingd-api-python
vingd/client.py
https://github.com/vingd/vingd-api-python/blob/7548a49973a472f7277c8ef847563faa7b6f3706/vingd/client.py#L853-L925
def revoke_vouchers(self, vid_encoded=None, uid_from=None, uid_to=None, gid=None, valid_after=None, valid_before=None, last=None, first=None): """ REVOKES/INVALIDATES a filtered list of vouchers. :type vid_encoded: ``alphanumeric(64)`` :param vid_encoded: Voucher ID, as a string with CRC. :type uid_from: ``bigint`` :param uid_from: Filter by source account UID. :type uid_to: ``bigint`` :param uid_to: Filter by destination account UID. :type gid: ``alphanumeric(32)`` :param gid: Filter by voucher Group ID. GID is localized to `uid_from`. :type valid_after: ``datetime``/``dict`` :param valid_after: Voucher has to be valid after this timestamp. Absolute (``datetime``) or relative (``dict``) timestamps are accepted. Valid keys for relative timestamp dictionary are same as keyword arguments for `datetime.timedelta` (``days``, ``seconds``, ``minutes``, ``hours``, ``weeks``). :type valid_before: ``datetime``/``dict`` :param valid_before: Voucher was valid until this timestamp (for format, see the `valid_after` above). :type last: ``bigint`` :param last: The number of newest vouchers (that satisfy all other criteria) to return. :type first: ``bigint`` :param first: The number of oldest vouchers (that satisfy all other criteria) to return. :note: As with `get_vouchers`, filters are restrictive, narrowing down the set of vouchers, which initially includes complete voucher collection. That means, in turn, that a naive empty-handed `revoke_vouchers()` call shall revoke **all** un-used vouchers (both valid and expired)! :rtype: ``dict`` :returns: A dictionary of successfully revoked vouchers, i.e. a map ``vid_encoded``: ``refund_transfer_id`` for all successfully revoked vouchers. :raises GeneralException: :resource: ``vouchers[/<vid_encoded>][/from=<uid_from>][/to=<uid_to>]`` ``[/valid_after=<valid_after>][/valid_before=<valid_before>]`` ``[/last=<last>][/first=<first>]`` :access: authorized users (ACL flag: ``voucher.revoke``) """ resource = self.kvpath( 'vouchers', ('ident', vid_encoded), **{ 'from': ('int', uid_from), 'to': ('int', uid_to), 'gid': ('ident', gid), 'valid_after': ('isobasic', absdatetime(valid_after)), 'valid_before': ('isobasic', absdatetime(valid_before)), 'first': ('int', first), 'last': ('int', last) } ) return self.request('delete', resource, json.dumps({'revoke': True}))
[ "def", "revoke_vouchers", "(", "self", ",", "vid_encoded", "=", "None", ",", "uid_from", "=", "None", ",", "uid_to", "=", "None", ",", "gid", "=", "None", ",", "valid_after", "=", "None", ",", "valid_before", "=", "None", ",", "last", "=", "None", ",", "first", "=", "None", ")", ":", "resource", "=", "self", ".", "kvpath", "(", "'vouchers'", ",", "(", "'ident'", ",", "vid_encoded", ")", ",", "*", "*", "{", "'from'", ":", "(", "'int'", ",", "uid_from", ")", ",", "'to'", ":", "(", "'int'", ",", "uid_to", ")", ",", "'gid'", ":", "(", "'ident'", ",", "gid", ")", ",", "'valid_after'", ":", "(", "'isobasic'", ",", "absdatetime", "(", "valid_after", ")", ")", ",", "'valid_before'", ":", "(", "'isobasic'", ",", "absdatetime", "(", "valid_before", ")", ")", ",", "'first'", ":", "(", "'int'", ",", "first", ")", ",", "'last'", ":", "(", "'int'", ",", "last", ")", "}", ")", "return", "self", ".", "request", "(", "'delete'", ",", "resource", ",", "json", ".", "dumps", "(", "{", "'revoke'", ":", "True", "}", ")", ")" ]
REVOKES/INVALIDATES a filtered list of vouchers. :type vid_encoded: ``alphanumeric(64)`` :param vid_encoded: Voucher ID, as a string with CRC. :type uid_from: ``bigint`` :param uid_from: Filter by source account UID. :type uid_to: ``bigint`` :param uid_to: Filter by destination account UID. :type gid: ``alphanumeric(32)`` :param gid: Filter by voucher Group ID. GID is localized to `uid_from`. :type valid_after: ``datetime``/``dict`` :param valid_after: Voucher has to be valid after this timestamp. Absolute (``datetime``) or relative (``dict``) timestamps are accepted. Valid keys for relative timestamp dictionary are same as keyword arguments for `datetime.timedelta` (``days``, ``seconds``, ``minutes``, ``hours``, ``weeks``). :type valid_before: ``datetime``/``dict`` :param valid_before: Voucher was valid until this timestamp (for format, see the `valid_after` above). :type last: ``bigint`` :param last: The number of newest vouchers (that satisfy all other criteria) to return. :type first: ``bigint`` :param first: The number of oldest vouchers (that satisfy all other criteria) to return. :note: As with `get_vouchers`, filters are restrictive, narrowing down the set of vouchers, which initially includes complete voucher collection. That means, in turn, that a naive empty-handed `revoke_vouchers()` call shall revoke **all** un-used vouchers (both valid and expired)! :rtype: ``dict`` :returns: A dictionary of successfully revoked vouchers, i.e. a map ``vid_encoded``: ``refund_transfer_id`` for all successfully revoked vouchers. :raises GeneralException: :resource: ``vouchers[/<vid_encoded>][/from=<uid_from>][/to=<uid_to>]`` ``[/valid_after=<valid_after>][/valid_before=<valid_before>]`` ``[/last=<last>][/first=<first>]`` :access: authorized users (ACL flag: ``voucher.revoke``)
[ "REVOKES", "/", "INVALIDATES", "a", "filtered", "list", "of", "vouchers", ".", ":", "type", "vid_encoded", ":", "alphanumeric", "(", "64", ")", ":", "param", "vid_encoded", ":", "Voucher", "ID", "as", "a", "string", "with", "CRC", ".", ":", "type", "uid_from", ":", "bigint", ":", "param", "uid_from", ":", "Filter", "by", "source", "account", "UID", ".", ":", "type", "uid_to", ":", "bigint", ":", "param", "uid_to", ":", "Filter", "by", "destination", "account", "UID", ".", ":", "type", "gid", ":", "alphanumeric", "(", "32", ")", ":", "param", "gid", ":", "Filter", "by", "voucher", "Group", "ID", ".", "GID", "is", "localized", "to", "uid_from", ".", ":", "type", "valid_after", ":", "datetime", "/", "dict", ":", "param", "valid_after", ":", "Voucher", "has", "to", "be", "valid", "after", "this", "timestamp", ".", "Absolute", "(", "datetime", ")", "or", "relative", "(", "dict", ")", "timestamps", "are", "accepted", ".", "Valid", "keys", "for", "relative", "timestamp", "dictionary", "are", "same", "as", "keyword", "arguments", "for", "datetime", ".", "timedelta", "(", "days", "seconds", "minutes", "hours", "weeks", ")", ".", ":", "type", "valid_before", ":", "datetime", "/", "dict", ":", "param", "valid_before", ":", "Voucher", "was", "valid", "until", "this", "timestamp", "(", "for", "format", "see", "the", "valid_after", "above", ")", ".", ":", "type", "last", ":", "bigint", ":", "param", "last", ":", "The", "number", "of", "newest", "vouchers", "(", "that", "satisfy", "all", "other", "criteria", ")", "to", "return", ".", ":", "type", "first", ":", "bigint", ":", "param", "first", ":", "The", "number", "of", "oldest", "vouchers", "(", "that", "satisfy", "all", "other", "criteria", ")", "to", "return", ".", ":", "note", ":", "As", "with", "get_vouchers", "filters", "are", "restrictive", "narrowing", "down", "the", "set", "of", "vouchers", "which", "initially", "includes", "complete", "voucher", "collection", ".", "That", "means", "in", "turn", "that", "a", "naive", "empty", "-", "handed", "revoke_vouchers", "()", "call", "shall", "revoke", "**", "all", "**", "un", "-", "used", "vouchers", "(", "both", "valid", "and", "expired", ")", "!", ":", "rtype", ":", "dict", ":", "returns", ":", "A", "dictionary", "of", "successfully", "revoked", "vouchers", "i", ".", "e", ".", "a", "map", "vid_encoded", ":", "refund_transfer_id", "for", "all", "successfully", "revoked", "vouchers", ".", ":", "raises", "GeneralException", ":", ":", "resource", ":", "vouchers", "[", "/", "<vid_encoded", ">", "]", "[", "/", "from", "=", "<uid_from", ">", "]", "[", "/", "to", "=", "<uid_to", ">", "]", "[", "/", "valid_after", "=", "<valid_after", ">", "]", "[", "/", "valid_before", "=", "<valid_before", ">", "]", "[", "/", "last", "=", "<last", ">", "]", "[", "/", "first", "=", "<first", ">", "]", ":", "access", ":", "authorized", "users", "(", "ACL", "flag", ":", "voucher", ".", "revoke", ")" ]
python
train
41.849315
codelv/enaml-native
src/enamlnative/core/eventloop/ioloop.py
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/core/eventloop/ioloop.py#L569-L583
def call_at(self, when, callback, *args, **kwargs): """Runs the ``callback`` at the absolute time designated by ``when``. ``when`` must be a number using the same reference point as `IOLoop.time`. Returns an opaque handle that may be passed to `remove_timeout` to cancel. Note that unlike the `asyncio` method of the same name, the returned object does not have a ``cancel()`` method. See `add_timeout` for comments on thread-safety and subclassing. .. versionadded:: 4.0 """ return self.add_timeout(when, callback, *args, **kwargs)
[ "def", "call_at", "(", "self", ",", "when", ",", "callback", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "add_timeout", "(", "when", ",", "callback", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Runs the ``callback`` at the absolute time designated by ``when``. ``when`` must be a number using the same reference point as `IOLoop.time`. Returns an opaque handle that may be passed to `remove_timeout` to cancel. Note that unlike the `asyncio` method of the same name, the returned object does not have a ``cancel()`` method. See `add_timeout` for comments on thread-safety and subclassing. .. versionadded:: 4.0
[ "Runs", "the", "callback", "at", "the", "absolute", "time", "designated", "by", "when", "." ]
python
train
40.2
apache/spark
python/pyspark/sql/dataframe.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L934-L954
def colRegex(self, colName): """ Selects column based on the column name specified as a regex and returns it as :class:`Column`. :param colName: string, column name specified as a regex. >>> df = spark.createDataFrame([("a", 1), ("b", 2), ("c", 3)], ["Col1", "Col2"]) >>> df.select(df.colRegex("`(Col1)?+.+`")).show() +----+ |Col2| +----+ | 1| | 2| | 3| +----+ """ if not isinstance(colName, basestring): raise ValueError("colName should be provided as string") jc = self._jdf.colRegex(colName) return Column(jc)
[ "def", "colRegex", "(", "self", ",", "colName", ")", ":", "if", "not", "isinstance", "(", "colName", ",", "basestring", ")", ":", "raise", "ValueError", "(", "\"colName should be provided as string\"", ")", "jc", "=", "self", ".", "_jdf", ".", "colRegex", "(", "colName", ")", "return", "Column", "(", "jc", ")" ]
Selects column based on the column name specified as a regex and returns it as :class:`Column`. :param colName: string, column name specified as a regex. >>> df = spark.createDataFrame([("a", 1), ("b", 2), ("c", 3)], ["Col1", "Col2"]) >>> df.select(df.colRegex("`(Col1)?+.+`")).show() +----+ |Col2| +----+ | 1| | 2| | 3| +----+
[ "Selects", "column", "based", "on", "the", "column", "name", "specified", "as", "a", "regex", "and", "returns", "it", "as", ":", "class", ":", "Column", "." ]
python
train
30.904762
modin-project/modin
modin/pandas/base.py
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1193-L1207
def floordiv(self, other, axis="columns", level=None, fill_value=None): """Divides this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the divide against this. axis: The axis to divide over. level: The Multilevel index level to apply divide over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Divide applied. """ return self._binary_op( "floordiv", other, axis=axis, level=level, fill_value=fill_value )
[ "def", "floordiv", "(", "self", ",", "other", ",", "axis", "=", "\"columns\"", ",", "level", "=", "None", ",", "fill_value", "=", "None", ")", ":", "return", "self", ".", "_binary_op", "(", "\"floordiv\"", ",", "other", ",", "axis", "=", "axis", ",", "level", "=", "level", ",", "fill_value", "=", "fill_value", ")" ]
Divides this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the divide against this. axis: The axis to divide over. level: The Multilevel index level to apply divide over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Divide applied.
[ "Divides", "this", "DataFrame", "against", "another", "DataFrame", "/", "Series", "/", "scalar", ".", "Args", ":", "other", ":", "The", "object", "to", "use", "to", "apply", "the", "divide", "against", "this", ".", "axis", ":", "The", "axis", "to", "divide", "over", ".", "level", ":", "The", "Multilevel", "index", "level", "to", "apply", "divide", "over", ".", "fill_value", ":", "The", "value", "to", "fill", "NaNs", "with", ".", "Returns", ":", "A", "new", "DataFrame", "with", "the", "Divide", "applied", "." ]
python
train
39.866667
SylvanasSun/FishFishJump
fish_dashboard/scrapyd/scrapyd_agent.py
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_dashboard/scrapyd/scrapyd_agent.py#L195-L212
def get_spider_list(self, project_name, version=None): """ Get the list of spiders available in the last (unless overridden) version of some project. :param project_name: the project name :param version: the version of the project to examine :return: a dictionary that spider name list example: {"status": "ok", "spiders": ["spider1", "spider2", "spider3"]} """ url, method = self.command_set['listspiders'][0], self.command_set['listspiders'][1] data = {} data['project'] = project_name if version is not None: data['_version'] = version response = http_utils.request(url, method_type=method, data=data, return_type=http_utils.RETURN_JSON) if response is None: logging.warning('%s failure: not found or connection fail' % sys._getframe().f_code.co_name) response = SpiderList().__dict__ return response
[ "def", "get_spider_list", "(", "self", ",", "project_name", ",", "version", "=", "None", ")", ":", "url", ",", "method", "=", "self", ".", "command_set", "[", "'listspiders'", "]", "[", "0", "]", ",", "self", ".", "command_set", "[", "'listspiders'", "]", "[", "1", "]", "data", "=", "{", "}", "data", "[", "'project'", "]", "=", "project_name", "if", "version", "is", "not", "None", ":", "data", "[", "'_version'", "]", "=", "version", "response", "=", "http_utils", ".", "request", "(", "url", ",", "method_type", "=", "method", ",", "data", "=", "data", ",", "return_type", "=", "http_utils", ".", "RETURN_JSON", ")", "if", "response", "is", "None", ":", "logging", ".", "warning", "(", "'%s failure: not found or connection fail'", "%", "sys", ".", "_getframe", "(", ")", ".", "f_code", ".", "co_name", ")", "response", "=", "SpiderList", "(", ")", ".", "__dict__", "return", "response" ]
Get the list of spiders available in the last (unless overridden) version of some project. :param project_name: the project name :param version: the version of the project to examine :return: a dictionary that spider name list example: {"status": "ok", "spiders": ["spider1", "spider2", "spider3"]}
[ "Get", "the", "list", "of", "spiders", "available", "in", "the", "last", "(", "unless", "overridden", ")", "version", "of", "some", "project", ".", ":", "param", "project_name", ":", "the", "project", "name", ":", "param", "version", ":", "the", "version", "of", "the", "project", "to", "examine", ":", "return", ":", "a", "dictionary", "that", "spider", "name", "list", "example", ":", "{", "status", ":", "ok", "spiders", ":", "[", "spider1", "spider2", "spider3", "]", "}" ]
python
train
52.388889
hammerlab/cohorts
cohorts/cohort.py
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L402-L416
def load_dataframe(self, df_loader_name): """ Instead of joining a DataFrameJoiner with the Cohort in `as_dataframe`, sometimes we may want to just directly load a particular DataFrame. """ logger.debug("loading dataframe: {}".format(df_loader_name)) # Get the DataFrameLoader object corresponding to this name. df_loaders = [df_loader for df_loader in self.df_loaders if df_loader.name == df_loader_name] if len(df_loaders) == 0: raise ValueError("No DataFrameLoader with name %s" % df_loader_name) if len(df_loaders) > 1: raise ValueError("Multiple DataFrameLoaders with name %s" % df_loader_name) return df_loaders[0].load_dataframe()
[ "def", "load_dataframe", "(", "self", ",", "df_loader_name", ")", ":", "logger", ".", "debug", "(", "\"loading dataframe: {}\"", ".", "format", "(", "df_loader_name", ")", ")", "# Get the DataFrameLoader object corresponding to this name.", "df_loaders", "=", "[", "df_loader", "for", "df_loader", "in", "self", ".", "df_loaders", "if", "df_loader", ".", "name", "==", "df_loader_name", "]", "if", "len", "(", "df_loaders", ")", "==", "0", ":", "raise", "ValueError", "(", "\"No DataFrameLoader with name %s\"", "%", "df_loader_name", ")", "if", "len", "(", "df_loaders", ")", ">", "1", ":", "raise", "ValueError", "(", "\"Multiple DataFrameLoaders with name %s\"", "%", "df_loader_name", ")", "return", "df_loaders", "[", "0", "]", ".", "load_dataframe", "(", ")" ]
Instead of joining a DataFrameJoiner with the Cohort in `as_dataframe`, sometimes we may want to just directly load a particular DataFrame.
[ "Instead", "of", "joining", "a", "DataFrameJoiner", "with", "the", "Cohort", "in", "as_dataframe", "sometimes", "we", "may", "want", "to", "just", "directly", "load", "a", "particular", "DataFrame", "." ]
python
train
48.6
pandas-dev/pandas
pandas/core/dtypes/concat.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/concat.py#L18-L56
def get_dtype_kinds(l): """ Parameters ---------- l : list of arrays Returns ------- a set of kinds that exist in this list of arrays """ typs = set() for arr in l: dtype = arr.dtype if is_categorical_dtype(dtype): typ = 'category' elif is_sparse(arr): typ = 'sparse' elif isinstance(arr, ABCRangeIndex): typ = 'range' elif is_datetime64tz_dtype(arr): # if to_concat contains different tz, # the result must be object dtype typ = str(arr.dtype) elif is_datetime64_dtype(dtype): typ = 'datetime' elif is_timedelta64_dtype(dtype): typ = 'timedelta' elif is_object_dtype(dtype): typ = 'object' elif is_bool_dtype(dtype): typ = 'bool' elif is_extension_array_dtype(dtype): typ = str(arr.dtype) else: typ = dtype.kind typs.add(typ) return typs
[ "def", "get_dtype_kinds", "(", "l", ")", ":", "typs", "=", "set", "(", ")", "for", "arr", "in", "l", ":", "dtype", "=", "arr", ".", "dtype", "if", "is_categorical_dtype", "(", "dtype", ")", ":", "typ", "=", "'category'", "elif", "is_sparse", "(", "arr", ")", ":", "typ", "=", "'sparse'", "elif", "isinstance", "(", "arr", ",", "ABCRangeIndex", ")", ":", "typ", "=", "'range'", "elif", "is_datetime64tz_dtype", "(", "arr", ")", ":", "# if to_concat contains different tz,", "# the result must be object dtype", "typ", "=", "str", "(", "arr", ".", "dtype", ")", "elif", "is_datetime64_dtype", "(", "dtype", ")", ":", "typ", "=", "'datetime'", "elif", "is_timedelta64_dtype", "(", "dtype", ")", ":", "typ", "=", "'timedelta'", "elif", "is_object_dtype", "(", "dtype", ")", ":", "typ", "=", "'object'", "elif", "is_bool_dtype", "(", "dtype", ")", ":", "typ", "=", "'bool'", "elif", "is_extension_array_dtype", "(", "dtype", ")", ":", "typ", "=", "str", "(", "arr", ".", "dtype", ")", "else", ":", "typ", "=", "dtype", ".", "kind", "typs", ".", "add", "(", "typ", ")", "return", "typs" ]
Parameters ---------- l : list of arrays Returns ------- a set of kinds that exist in this list of arrays
[ "Parameters", "----------", "l", ":", "list", "of", "arrays" ]
python
train
25.307692
Microsoft/nni
examples/trials/kaggle-tgs-salt/lovasz_losses.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/examples/trials/kaggle-tgs-salt/lovasz_losses.py#L225-L229
def xloss(logits, labels, ignore=None): """ Cross entropy loss """ return F.cross_entropy(logits, Variable(labels), ignore_index=255)
[ "def", "xloss", "(", "logits", ",", "labels", ",", "ignore", "=", "None", ")", ":", "return", "F", ".", "cross_entropy", "(", "logits", ",", "Variable", "(", "labels", ")", ",", "ignore_index", "=", "255", ")" ]
Cross entropy loss
[ "Cross", "entropy", "loss" ]
python
train
29
ericsomdahl/python-bittrex
bittrex/bittrex.py
https://github.com/ericsomdahl/python-bittrex/blob/2dbc08e3221e07a9e618eaa025d98ed197d28e31/bittrex/bittrex.py#L708-L746
def trade_sell(self, market=None, order_type=None, quantity=None, rate=None, time_in_effect=None, condition_type=None, target=0.0): """ Enter a sell order into the book Endpoint 1.1 NO EQUIVALENT -- see sell_market or sell_limit 2.0 /key/market/tradesell :param market: String literal for the market (ex: BTC-LTC) :type market: str :param order_type: ORDERTYPE_LIMIT = 'LIMIT' or ORDERTYPE_MARKET = 'MARKET' :type order_type: str :param quantity: The amount to purchase :type quantity: float :param rate: The rate at which to place the order. This is not needed for market orders :type rate: float :param time_in_effect: TIMEINEFFECT_GOOD_TIL_CANCELLED = 'GOOD_TIL_CANCELLED', TIMEINEFFECT_IMMEDIATE_OR_CANCEL = 'IMMEDIATE_OR_CANCEL', or TIMEINEFFECT_FILL_OR_KILL = 'FILL_OR_KILL' :type time_in_effect: str :param condition_type: CONDITIONTYPE_NONE = 'NONE', CONDITIONTYPE_GREATER_THAN = 'GREATER_THAN', CONDITIONTYPE_LESS_THAN = 'LESS_THAN', CONDITIONTYPE_STOP_LOSS_FIXED = 'STOP_LOSS_FIXED', CONDITIONTYPE_STOP_LOSS_PERCENTAGE = 'STOP_LOSS_PERCENTAGE' :type condition_type: str :param target: used in conjunction with condition_type :type target: float :return: """ return self._api_query(path_dict={ API_V2_0: '/key/market/tradesell' }, options={ 'marketname': market, 'ordertype': order_type, 'quantity': quantity, 'rate': rate, 'timeInEffect': time_in_effect, 'conditiontype': condition_type, 'target': target }, protection=PROTECTION_PRV)
[ "def", "trade_sell", "(", "self", ",", "market", "=", "None", ",", "order_type", "=", "None", ",", "quantity", "=", "None", ",", "rate", "=", "None", ",", "time_in_effect", "=", "None", ",", "condition_type", "=", "None", ",", "target", "=", "0.0", ")", ":", "return", "self", ".", "_api_query", "(", "path_dict", "=", "{", "API_V2_0", ":", "'/key/market/tradesell'", "}", ",", "options", "=", "{", "'marketname'", ":", "market", ",", "'ordertype'", ":", "order_type", ",", "'quantity'", ":", "quantity", ",", "'rate'", ":", "rate", ",", "'timeInEffect'", ":", "time_in_effect", ",", "'conditiontype'", ":", "condition_type", ",", "'target'", ":", "target", "}", ",", "protection", "=", "PROTECTION_PRV", ")" ]
Enter a sell order into the book Endpoint 1.1 NO EQUIVALENT -- see sell_market or sell_limit 2.0 /key/market/tradesell :param market: String literal for the market (ex: BTC-LTC) :type market: str :param order_type: ORDERTYPE_LIMIT = 'LIMIT' or ORDERTYPE_MARKET = 'MARKET' :type order_type: str :param quantity: The amount to purchase :type quantity: float :param rate: The rate at which to place the order. This is not needed for market orders :type rate: float :param time_in_effect: TIMEINEFFECT_GOOD_TIL_CANCELLED = 'GOOD_TIL_CANCELLED', TIMEINEFFECT_IMMEDIATE_OR_CANCEL = 'IMMEDIATE_OR_CANCEL', or TIMEINEFFECT_FILL_OR_KILL = 'FILL_OR_KILL' :type time_in_effect: str :param condition_type: CONDITIONTYPE_NONE = 'NONE', CONDITIONTYPE_GREATER_THAN = 'GREATER_THAN', CONDITIONTYPE_LESS_THAN = 'LESS_THAN', CONDITIONTYPE_STOP_LOSS_FIXED = 'STOP_LOSS_FIXED', CONDITIONTYPE_STOP_LOSS_PERCENTAGE = 'STOP_LOSS_PERCENTAGE' :type condition_type: str :param target: used in conjunction with condition_type :type target: float :return:
[ "Enter", "a", "sell", "order", "into", "the", "book", "Endpoint", "1", ".", "1", "NO", "EQUIVALENT", "--", "see", "sell_market", "or", "sell_limit", "2", ".", "0", "/", "key", "/", "market", "/", "tradesell" ]
python
train
45.487179
soerenwolfers/swutil
swutil/config.py
https://github.com/soerenwolfers/swutil/blob/2d598f2deac8b7e20df95dbc68017e5ab5d6180c/swutil/config.py#L21-L29
def lower(option,value): ''' Enforces lower case options and option values where appropriate ''' if type(option) is str: option=option.lower() if type(value) is str: value=value.lower() return (option,value)
[ "def", "lower", "(", "option", ",", "value", ")", ":", "if", "type", "(", "option", ")", "is", "str", ":", "option", "=", "option", ".", "lower", "(", ")", "if", "type", "(", "value", ")", "is", "str", ":", "value", "=", "value", ".", "lower", "(", ")", "return", "(", "option", ",", "value", ")" ]
Enforces lower case options and option values where appropriate
[ "Enforces", "lower", "case", "options", "and", "option", "values", "where", "appropriate" ]
python
valid
26.555556
rapidpro/dash
dash/utils/__init__.py
https://github.com/rapidpro/dash/blob/e9dc05b31b86fe3fe72e956975d1ee0a275ac016/dash/utils/__init__.py#L63-L75
def get_cacheable(cache_key, cache_ttl, calculate, recalculate=False): """ Gets the result of a method call, using the given key and TTL as a cache """ if not recalculate: cached = cache.get(cache_key) if cached is not None: return json.loads(cached) calculated = calculate() cache.set(cache_key, json.dumps(calculated), cache_ttl) return calculated
[ "def", "get_cacheable", "(", "cache_key", ",", "cache_ttl", ",", "calculate", ",", "recalculate", "=", "False", ")", ":", "if", "not", "recalculate", ":", "cached", "=", "cache", ".", "get", "(", "cache_key", ")", "if", "cached", "is", "not", "None", ":", "return", "json", ".", "loads", "(", "cached", ")", "calculated", "=", "calculate", "(", ")", "cache", ".", "set", "(", "cache_key", ",", "json", ".", "dumps", "(", "calculated", ")", ",", "cache_ttl", ")", "return", "calculated" ]
Gets the result of a method call, using the given key and TTL as a cache
[ "Gets", "the", "result", "of", "a", "method", "call", "using", "the", "given", "key", "and", "TTL", "as", "a", "cache" ]
python
train
30.384615
keon/algorithms
algorithms/tree/avl/avl.py
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/tree/avl/avl.py#L72-L86
def update_balances(self, recursive=True): """ Calculate tree balance factor """ if self.node: if recursive: if self.node.left: self.node.left.update_balances() if self.node.right: self.node.right.update_balances() self.balance = self.node.left.height - self.node.right.height else: self.balance = 0
[ "def", "update_balances", "(", "self", ",", "recursive", "=", "True", ")", ":", "if", "self", ".", "node", ":", "if", "recursive", ":", "if", "self", ".", "node", ".", "left", ":", "self", ".", "node", ".", "left", ".", "update_balances", "(", ")", "if", "self", ".", "node", ".", "right", ":", "self", ".", "node", ".", "right", ".", "update_balances", "(", ")", "self", ".", "balance", "=", "self", ".", "node", ".", "left", ".", "height", "-", "self", ".", "node", ".", "right", ".", "height", "else", ":", "self", ".", "balance", "=", "0" ]
Calculate tree balance factor
[ "Calculate", "tree", "balance", "factor" ]
python
train
29
mikedh/trimesh
trimesh/path/path.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/path.py#L1368-L1378
def identifier(self): """ A unique identifier for the path. Returns --------- identifier: (5,) float, unique identifier """ if len(self.polygons_full) != 1: raise TypeError('Identifier only valid for single body') return polygons.polygon_hash(self.polygons_full[0])
[ "def", "identifier", "(", "self", ")", ":", "if", "len", "(", "self", ".", "polygons_full", ")", "!=", "1", ":", "raise", "TypeError", "(", "'Identifier only valid for single body'", ")", "return", "polygons", ".", "polygon_hash", "(", "self", ".", "polygons_full", "[", "0", "]", ")" ]
A unique identifier for the path. Returns --------- identifier: (5,) float, unique identifier
[ "A", "unique", "identifier", "for", "the", "path", "." ]
python
train
30.181818
nickmckay/LiPD-utilities
Python/lipd/doi_resolver.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/doi_resolver.py#L125-L147
def illegal_doi(self, doi_string): """ DOI string did not match the regex. Determine what the data is. :param doi_string: (str) Malformed DOI string :return: None """ logger_doi_resolver.info("enter illegal_doi") # Ignores empty or irrelevant strings (blank, spaces, na, nan, ', others) if len(doi_string) > 5: # NOAA string if 'noaa' in doi_string.lower(): self.noaa_citation(doi_string) # Paragraph citation / Manual citation elif doi_string.count(' ') > 3: self.root_dict['pub'][0]['citation'] = doi_string # Strange Links or Other, send to quarantine else: logger_doi_resolver.warn("illegal_doi: bad doi string: {}".format(doi_string)) logger_doi_resolver.info("exit illegal_doi") return
[ "def", "illegal_doi", "(", "self", ",", "doi_string", ")", ":", "logger_doi_resolver", ".", "info", "(", "\"enter illegal_doi\"", ")", "# Ignores empty or irrelevant strings (blank, spaces, na, nan, ', others)", "if", "len", "(", "doi_string", ")", ">", "5", ":", "# NOAA string", "if", "'noaa'", "in", "doi_string", ".", "lower", "(", ")", ":", "self", ".", "noaa_citation", "(", "doi_string", ")", "# Paragraph citation / Manual citation", "elif", "doi_string", ".", "count", "(", "' '", ")", ">", "3", ":", "self", ".", "root_dict", "[", "'pub'", "]", "[", "0", "]", "[", "'citation'", "]", "=", "doi_string", "# Strange Links or Other, send to quarantine", "else", ":", "logger_doi_resolver", ".", "warn", "(", "\"illegal_doi: bad doi string: {}\"", ".", "format", "(", "doi_string", ")", ")", "logger_doi_resolver", ".", "info", "(", "\"exit illegal_doi\"", ")", "return" ]
DOI string did not match the regex. Determine what the data is. :param doi_string: (str) Malformed DOI string :return: None
[ "DOI", "string", "did", "not", "match", "the", "regex", ".", "Determine", "what", "the", "data", "is", ".", ":", "param", "doi_string", ":", "(", "str", ")", "Malformed", "DOI", "string", ":", "return", ":", "None" ]
python
train
37.913043
wummel/linkchecker
third_party/miniboa-r42/miniboa/telnet.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/miniboa-r42/miniboa/telnet.py#L702-L706
def _note_remote_option(self, option, state): """Record the status of local negotiated Telnet options.""" if not self.telnet_opt_dict.has_key(option): self.telnet_opt_dict[option] = TelnetOption() self.telnet_opt_dict[option].remote_option = state
[ "def", "_note_remote_option", "(", "self", ",", "option", ",", "state", ")", ":", "if", "not", "self", ".", "telnet_opt_dict", ".", "has_key", "(", "option", ")", ":", "self", ".", "telnet_opt_dict", "[", "option", "]", "=", "TelnetOption", "(", ")", "self", ".", "telnet_opt_dict", "[", "option", "]", ".", "remote_option", "=", "state" ]
Record the status of local negotiated Telnet options.
[ "Record", "the", "status", "of", "local", "negotiated", "Telnet", "options", "." ]
python
train
55.8
leovt/constructible
constructible.py
https://github.com/leovt/constructible/blob/16fb627c81d15ffd8373397633224f50f047f882/constructible.py#L69-L85
def fsqrt(q): ''' given a non-negative fraction q, return a pair (a,b) such that q = a * a * b where b is a square-free integer. if q is a perfect square, a is its square root and b is one. ''' if q == 0: return q, 1 if q < 0: raise ValueError('math domain error %s' % q) a, b = isqrt(q.numerator) c, d = isqrt(q.denominator) # q == (a/c)**2 * (b/d) == (a/(c*d))**2 * b*d return Fraction(a, c * d), b * d
[ "def", "fsqrt", "(", "q", ")", ":", "if", "q", "==", "0", ":", "return", "q", ",", "1", "if", "q", "<", "0", ":", "raise", "ValueError", "(", "'math domain error %s'", "%", "q", ")", "a", ",", "b", "=", "isqrt", "(", "q", ".", "numerator", ")", "c", ",", "d", "=", "isqrt", "(", "q", ".", "denominator", ")", "# q == (a/c)**2 * (b/d) == (a/(c*d))**2 * b*d", "return", "Fraction", "(", "a", ",", "c", "*", "d", ")", ",", "b", "*", "d" ]
given a non-negative fraction q, return a pair (a,b) such that q = a * a * b where b is a square-free integer. if q is a perfect square, a is its square root and b is one.
[ "given", "a", "non", "-", "negative", "fraction", "q", "return", "a", "pair", "(", "a", "b", ")", "such", "that", "q", "=", "a", "*", "a", "*", "b", "where", "b", "is", "a", "square", "-", "free", "integer", "." ]
python
train
26.823529
pyca/pyopenssl
src/OpenSSL/crypto.py
https://github.com/pyca/pyopenssl/blob/1fbe064c50fd030948141d7d630673761525b0d0/src/OpenSSL/crypto.py#L1310-L1321
def gmtime_adj_notBefore(self, amount): """ Adjust the timestamp on which the certificate starts being valid. :param amount: The number of seconds by which to adjust the timestamp. :return: ``None`` """ if not isinstance(amount, int): raise TypeError("amount must be an integer") notBefore = _lib.X509_get_notBefore(self._x509) _lib.X509_gmtime_adj(notBefore, amount)
[ "def", "gmtime_adj_notBefore", "(", "self", ",", "amount", ")", ":", "if", "not", "isinstance", "(", "amount", ",", "int", ")", ":", "raise", "TypeError", "(", "\"amount must be an integer\"", ")", "notBefore", "=", "_lib", ".", "X509_get_notBefore", "(", "self", ".", "_x509", ")", "_lib", ".", "X509_gmtime_adj", "(", "notBefore", ",", "amount", ")" ]
Adjust the timestamp on which the certificate starts being valid. :param amount: The number of seconds by which to adjust the timestamp. :return: ``None``
[ "Adjust", "the", "timestamp", "on", "which", "the", "certificate", "starts", "being", "valid", "." ]
python
test
36.166667
saltstack/salt
salt/returners/etcd_return.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/etcd_return.py#L97-L104
def _get_conn(opts, profile=None): ''' Establish a connection to etcd ''' if profile is None: profile = opts.get('etcd.returner') path = opts.get('etcd.returner_root', '/salt/return') return salt.utils.etcd_util.get_conn(opts, profile), path
[ "def", "_get_conn", "(", "opts", ",", "profile", "=", "None", ")", ":", "if", "profile", "is", "None", ":", "profile", "=", "opts", ".", "get", "(", "'etcd.returner'", ")", "path", "=", "opts", ".", "get", "(", "'etcd.returner_root'", ",", "'/salt/return'", ")", "return", "salt", ".", "utils", ".", "etcd_util", ".", "get_conn", "(", "opts", ",", "profile", ")", ",", "path" ]
Establish a connection to etcd
[ "Establish", "a", "connection", "to", "etcd" ]
python
train
33.25
unixfreak0037/officeparser
officeparser.py
https://github.com/unixfreak0037/officeparser/blob/42c2d40372fe271f2039ca1adc145d2aef8c9545/officeparser.py#L247-L261
def __impl_read_chain(self, start, read_sector_f, read_fat_f): """Returns the entire contents of a chain starting at the given sector.""" sector = start check = [ sector ] # keep a list of sectors we've already read buffer = StringIO() while sector != ENDOFCHAIN: buffer.write(read_sector_f(sector)) next = read_fat_f(sector) if next in check: logging.error('infinite loop detected at {0} to {1} starting at {2}'.format( sector, next, sector_start)) return buffer.getvalue() check.append(next) sector = next return buffer.getvalue()
[ "def", "__impl_read_chain", "(", "self", ",", "start", ",", "read_sector_f", ",", "read_fat_f", ")", ":", "sector", "=", "start", "check", "=", "[", "sector", "]", "# keep a list of sectors we've already read", "buffer", "=", "StringIO", "(", ")", "while", "sector", "!=", "ENDOFCHAIN", ":", "buffer", ".", "write", "(", "read_sector_f", "(", "sector", ")", ")", "next", "=", "read_fat_f", "(", "sector", ")", "if", "next", "in", "check", ":", "logging", ".", "error", "(", "'infinite loop detected at {0} to {1} starting at {2}'", ".", "format", "(", "sector", ",", "next", ",", "sector_start", ")", ")", "return", "buffer", ".", "getvalue", "(", ")", "check", ".", "append", "(", "next", ")", "sector", "=", "next", "return", "buffer", ".", "getvalue", "(", ")" ]
Returns the entire contents of a chain starting at the given sector.
[ "Returns", "the", "entire", "contents", "of", "a", "chain", "starting", "at", "the", "given", "sector", "." ]
python
train
45.2
secdev/scapy
scapy/fields.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/fields.py#L278-L291
def _iterate_fields_cond(self, pkt, val, use_val): """Internal function used by _find_fld_pkt & _find_fld_pkt_val""" # Iterate through the fields for fld, cond in self.flds: if isinstance(cond, tuple): if use_val: if cond[1](pkt, val): return fld continue else: cond = cond[0] if cond(pkt): return fld return self.dflt
[ "def", "_iterate_fields_cond", "(", "self", ",", "pkt", ",", "val", ",", "use_val", ")", ":", "# Iterate through the fields", "for", "fld", ",", "cond", "in", "self", ".", "flds", ":", "if", "isinstance", "(", "cond", ",", "tuple", ")", ":", "if", "use_val", ":", "if", "cond", "[", "1", "]", "(", "pkt", ",", "val", ")", ":", "return", "fld", "continue", "else", ":", "cond", "=", "cond", "[", "0", "]", "if", "cond", "(", "pkt", ")", ":", "return", "fld", "return", "self", ".", "dflt" ]
Internal function used by _find_fld_pkt & _find_fld_pkt_val
[ "Internal", "function", "used", "by", "_find_fld_pkt", "&", "_find_fld_pkt_val" ]
python
train
35.214286
aliyun/aliyun-odps-python-sdk
odps/models/instance.py
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/models/instance.py#L533-L553
def wait_for_success(self, interval=1): """ Wait for instance to complete, and check if the instance is successful. :param interval: time interval to check :return: None :raise: :class:`odps.errors.ODPSError` if the instance failed """ self.wait_for_completion(interval=interval) if not self.is_successful(retry=True): for task_name, task in six.iteritems(self.get_task_statuses()): exc = None if task.status == Instance.Task.TaskStatus.FAILED: exc = errors.parse_instance_error(self.get_task_result(task_name)) elif task.status != Instance.Task.TaskStatus.SUCCESS: exc = errors.ODPSError('%s, status=%s' % (task_name, task.status.value)) if exc: exc.instance_id = self.id raise exc
[ "def", "wait_for_success", "(", "self", ",", "interval", "=", "1", ")", ":", "self", ".", "wait_for_completion", "(", "interval", "=", "interval", ")", "if", "not", "self", ".", "is_successful", "(", "retry", "=", "True", ")", ":", "for", "task_name", ",", "task", "in", "six", ".", "iteritems", "(", "self", ".", "get_task_statuses", "(", ")", ")", ":", "exc", "=", "None", "if", "task", ".", "status", "==", "Instance", ".", "Task", ".", "TaskStatus", ".", "FAILED", ":", "exc", "=", "errors", ".", "parse_instance_error", "(", "self", ".", "get_task_result", "(", "task_name", ")", ")", "elif", "task", ".", "status", "!=", "Instance", ".", "Task", ".", "TaskStatus", ".", "SUCCESS", ":", "exc", "=", "errors", ".", "ODPSError", "(", "'%s, status=%s'", "%", "(", "task_name", ",", "task", ".", "status", ".", "value", ")", ")", "if", "exc", ":", "exc", ".", "instance_id", "=", "self", ".", "id", "raise", "exc" ]
Wait for instance to complete, and check if the instance is successful. :param interval: time interval to check :return: None :raise: :class:`odps.errors.ODPSError` if the instance failed
[ "Wait", "for", "instance", "to", "complete", "and", "check", "if", "the", "instance", "is", "successful", "." ]
python
train
42.142857
saltstack/salt
salt/modules/pyenv.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pyenv.py#L281-L307
def do(cmdline=None, runas=None): ''' Execute a python command with pyenv's shims from the user or the system. CLI Example: .. code-block:: bash salt '*' pyenv.do 'gem list bundler' salt '*' pyenv.do 'gem list bundler' deploy ''' path = _pyenv_path(runas) cmd_split = cmdline.split() quoted_line = '' for cmd in cmd_split: quoted_line = quoted_line + ' ' + _cmd_quote(cmd) result = __salt__['cmd.run_all']( 'env PATH={0}/shims:$PATH {1}'.format(_cmd_quote(path), quoted_line), runas=runas, python_shell=True ) if result['retcode'] == 0: rehash(runas=runas) return result['stdout'] else: return False
[ "def", "do", "(", "cmdline", "=", "None", ",", "runas", "=", "None", ")", ":", "path", "=", "_pyenv_path", "(", "runas", ")", "cmd_split", "=", "cmdline", ".", "split", "(", ")", "quoted_line", "=", "''", "for", "cmd", "in", "cmd_split", ":", "quoted_line", "=", "quoted_line", "+", "' '", "+", "_cmd_quote", "(", "cmd", ")", "result", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "'env PATH={0}/shims:$PATH {1}'", ".", "format", "(", "_cmd_quote", "(", "path", ")", ",", "quoted_line", ")", ",", "runas", "=", "runas", ",", "python_shell", "=", "True", ")", "if", "result", "[", "'retcode'", "]", "==", "0", ":", "rehash", "(", "runas", "=", "runas", ")", "return", "result", "[", "'stdout'", "]", "else", ":", "return", "False" ]
Execute a python command with pyenv's shims from the user or the system. CLI Example: .. code-block:: bash salt '*' pyenv.do 'gem list bundler' salt '*' pyenv.do 'gem list bundler' deploy
[ "Execute", "a", "python", "command", "with", "pyenv", "s", "shims", "from", "the", "user", "or", "the", "system", "." ]
python
train
25.962963
sammchardy/python-kucoin
kucoin/client.py
https://github.com/sammchardy/python-kucoin/blob/a4cacde413804784bd313f27a0ad37234888be29/kucoin/client.py#L161-L184
def _handle_response(response): """Internal helper for handling API responses from the Quoine server. Raises the appropriate exceptions when necessary; otherwise, returns the response. """ if not str(response.status_code).startswith('2'): raise KucoinAPIException(response) try: res = response.json() if 'code' in res and res['code'] != "200000": raise KucoinAPIException(response) if 'success' in res and not res['success']: raise KucoinAPIException(response) # by default return full response # if it's a normal response we have a data attribute, return that if 'data' in res: res = res['data'] return res except ValueError: raise KucoinRequestException('Invalid Response: %s' % response.text)
[ "def", "_handle_response", "(", "response", ")", ":", "if", "not", "str", "(", "response", ".", "status_code", ")", ".", "startswith", "(", "'2'", ")", ":", "raise", "KucoinAPIException", "(", "response", ")", "try", ":", "res", "=", "response", ".", "json", "(", ")", "if", "'code'", "in", "res", "and", "res", "[", "'code'", "]", "!=", "\"200000\"", ":", "raise", "KucoinAPIException", "(", "response", ")", "if", "'success'", "in", "res", "and", "not", "res", "[", "'success'", "]", ":", "raise", "KucoinAPIException", "(", "response", ")", "# by default return full response", "# if it's a normal response we have a data attribute, return that", "if", "'data'", "in", "res", ":", "res", "=", "res", "[", "'data'", "]", "return", "res", "except", "ValueError", ":", "raise", "KucoinRequestException", "(", "'Invalid Response: %s'", "%", "response", ".", "text", ")" ]
Internal helper for handling API responses from the Quoine server. Raises the appropriate exceptions when necessary; otherwise, returns the response.
[ "Internal", "helper", "for", "handling", "API", "responses", "from", "the", "Quoine", "server", ".", "Raises", "the", "appropriate", "exceptions", "when", "necessary", ";", "otherwise", "returns", "the", "response", "." ]
python
train
37
LuminosoInsight/luminoso-api-client-python
luminoso_api/v5_client.py
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_client.py#L125-L153
def connect_with_username_and_password(cls, url=None, username=None, password=None): """ Returns an object that makes requests to the API, authenticated with a short-lived token retrieved from username and password. If username or password is not supplied, the method will prompt for a username and/or password to be entered interactively. See the connect method for more details about the `url` argument. PLEASE NOTE: This method is being provided as a temporary measure. We strongly encourage users of the Luminoso API to use a long-lived token instead, as explained in the V5_README file. """ from .v4_client import LuminosoClient as v4LC if username is None: username = input('Username: ') v4client = v4LC.connect(url=url, username=username, password=password) if url is None: url = '/' if url.startswith('http'): root_url = get_root_url(url) else: url = URL_BASE + '/' + url.lstrip('/') root_url = URL_BASE return cls(v4client.session, root_url)
[ "def", "connect_with_username_and_password", "(", "cls", ",", "url", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "from", ".", "v4_client", "import", "LuminosoClient", "as", "v4LC", "if", "username", "is", "None", ":", "username", "=", "input", "(", "'Username: '", ")", "v4client", "=", "v4LC", ".", "connect", "(", "url", "=", "url", ",", "username", "=", "username", ",", "password", "=", "password", ")", "if", "url", "is", "None", ":", "url", "=", "'/'", "if", "url", ".", "startswith", "(", "'http'", ")", ":", "root_url", "=", "get_root_url", "(", "url", ")", "else", ":", "url", "=", "URL_BASE", "+", "'/'", "+", "url", ".", "lstrip", "(", "'/'", ")", "root_url", "=", "URL_BASE", "return", "cls", "(", "v4client", ".", "session", ",", "root_url", ")" ]
Returns an object that makes requests to the API, authenticated with a short-lived token retrieved from username and password. If username or password is not supplied, the method will prompt for a username and/or password to be entered interactively. See the connect method for more details about the `url` argument. PLEASE NOTE: This method is being provided as a temporary measure. We strongly encourage users of the Luminoso API to use a long-lived token instead, as explained in the V5_README file.
[ "Returns", "an", "object", "that", "makes", "requests", "to", "the", "API", "authenticated", "with", "a", "short", "-", "lived", "token", "retrieved", "from", "username", "and", "password", ".", "If", "username", "or", "password", "is", "not", "supplied", "the", "method", "will", "prompt", "for", "a", "username", "and", "/", "or", "password", "to", "be", "entered", "interactively", "." ]
python
test
40.275862
gitpython-developers/GitPython
git/index/fun.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/index/fun.py#L181-L226
def read_cache(stream): """Read a cache file from the given stream :return: tuple(version, entries_dict, extension_data, content_sha) * version is the integer version number * entries dict is a dictionary which maps IndexEntry instances to a path at a stage * extension_data is '' or 4 bytes of type + 4 bytes of size + size bytes * content_sha is a 20 byte sha on all cache file contents""" version, num_entries = read_header(stream) count = 0 entries = {} read = stream.read tell = stream.tell while count < num_entries: beginoffset = tell() ctime = unpack(">8s", read(8))[0] mtime = unpack(">8s", read(8))[0] (dev, ino, mode, uid, gid, size, sha, flags) = \ unpack(">LLLLLL20sH", read(20 + 4 * 6 + 2)) path_size = flags & CE_NAMEMASK path = read(path_size).decode(defenc) real_size = ((tell() - beginoffset + 8) & ~7) read((beginoffset + real_size) - tell()) entry = IndexEntry((mode, sha, flags, path, ctime, mtime, dev, ino, uid, gid, size)) # entry_key would be the method to use, but we safe the effort entries[(path, entry.stage)] = entry count += 1 # END for each entry # the footer contains extension data and a sha on the content so far # Keep the extension footer,and verify we have a sha in the end # Extension data format is: # 4 bytes ID # 4 bytes length of chunk # repeated 0 - N times extension_data = stream.read(~0) assert len(extension_data) > 19, "Index Footer was not at least a sha on content as it was only %i bytes in size"\ % len(extension_data) content_sha = extension_data[-20:] # truncate the sha in the end as we will dynamically create it anyway extension_data = extension_data[:-20] return (version, entries, extension_data, content_sha)
[ "def", "read_cache", "(", "stream", ")", ":", "version", ",", "num_entries", "=", "read_header", "(", "stream", ")", "count", "=", "0", "entries", "=", "{", "}", "read", "=", "stream", ".", "read", "tell", "=", "stream", ".", "tell", "while", "count", "<", "num_entries", ":", "beginoffset", "=", "tell", "(", ")", "ctime", "=", "unpack", "(", "\">8s\"", ",", "read", "(", "8", ")", ")", "[", "0", "]", "mtime", "=", "unpack", "(", "\">8s\"", ",", "read", "(", "8", ")", ")", "[", "0", "]", "(", "dev", ",", "ino", ",", "mode", ",", "uid", ",", "gid", ",", "size", ",", "sha", ",", "flags", ")", "=", "unpack", "(", "\">LLLLLL20sH\"", ",", "read", "(", "20", "+", "4", "*", "6", "+", "2", ")", ")", "path_size", "=", "flags", "&", "CE_NAMEMASK", "path", "=", "read", "(", "path_size", ")", ".", "decode", "(", "defenc", ")", "real_size", "=", "(", "(", "tell", "(", ")", "-", "beginoffset", "+", "8", ")", "&", "~", "7", ")", "read", "(", "(", "beginoffset", "+", "real_size", ")", "-", "tell", "(", ")", ")", "entry", "=", "IndexEntry", "(", "(", "mode", ",", "sha", ",", "flags", ",", "path", ",", "ctime", ",", "mtime", ",", "dev", ",", "ino", ",", "uid", ",", "gid", ",", "size", ")", ")", "# entry_key would be the method to use, but we safe the effort", "entries", "[", "(", "path", ",", "entry", ".", "stage", ")", "]", "=", "entry", "count", "+=", "1", "# END for each entry", "# the footer contains extension data and a sha on the content so far", "# Keep the extension footer,and verify we have a sha in the end", "# Extension data format is:", "# 4 bytes ID", "# 4 bytes length of chunk", "# repeated 0 - N times", "extension_data", "=", "stream", ".", "read", "(", "~", "0", ")", "assert", "len", "(", "extension_data", ")", ">", "19", ",", "\"Index Footer was not at least a sha on content as it was only %i bytes in size\"", "%", "len", "(", "extension_data", ")", "content_sha", "=", "extension_data", "[", "-", "20", ":", "]", "# truncate the sha in the end as we will dynamically create it anyway", "extension_data", "=", "extension_data", "[", ":", "-", "20", "]", "return", "(", "version", ",", "entries", ",", "extension_data", ",", "content_sha", ")" ]
Read a cache file from the given stream :return: tuple(version, entries_dict, extension_data, content_sha) * version is the integer version number * entries dict is a dictionary which maps IndexEntry instances to a path at a stage * extension_data is '' or 4 bytes of type + 4 bytes of size + size bytes * content_sha is a 20 byte sha on all cache file contents
[ "Read", "a", "cache", "file", "from", "the", "given", "stream", ":", "return", ":", "tuple", "(", "version", "entries_dict", "extension_data", "content_sha", ")", "*", "version", "is", "the", "integer", "version", "number", "*", "entries", "dict", "is", "a", "dictionary", "which", "maps", "IndexEntry", "instances", "to", "a", "path", "at", "a", "stage", "*", "extension_data", "is", "or", "4", "bytes", "of", "type", "+", "4", "bytes", "of", "size", "+", "size", "bytes", "*", "content_sha", "is", "a", "20", "byte", "sha", "on", "all", "cache", "file", "contents" ]
python
train
40.76087
sanger-pathogens/ariba
ariba/assembly_compare.py
https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/assembly_compare.py#L97-L111
def _nucmer_hits_to_assembly_coords(nucmer_hits): '''Input is hits made by self._parse_nucmer_coords_file. Returns dictionary. key = contig name. Value = list of coords that match to the reference gene''' coords = {} for l in nucmer_hits.values(): for hit in l: if hit.qry_name not in coords: coords[hit.qry_name] = [] coords[hit.qry_name].append(hit.qry_coords()) for scaff in coords: pyfastaq.intervals.merge_overlapping_in_list(coords[scaff]) return coords
[ "def", "_nucmer_hits_to_assembly_coords", "(", "nucmer_hits", ")", ":", "coords", "=", "{", "}", "for", "l", "in", "nucmer_hits", ".", "values", "(", ")", ":", "for", "hit", "in", "l", ":", "if", "hit", ".", "qry_name", "not", "in", "coords", ":", "coords", "[", "hit", ".", "qry_name", "]", "=", "[", "]", "coords", "[", "hit", ".", "qry_name", "]", ".", "append", "(", "hit", ".", "qry_coords", "(", ")", ")", "for", "scaff", "in", "coords", ":", "pyfastaq", ".", "intervals", ".", "merge_overlapping_in_list", "(", "coords", "[", "scaff", "]", ")", "return", "coords" ]
Input is hits made by self._parse_nucmer_coords_file. Returns dictionary. key = contig name. Value = list of coords that match to the reference gene
[ "Input", "is", "hits", "made", "by", "self", ".", "_parse_nucmer_coords_file", ".", "Returns", "dictionary", ".", "key", "=", "contig", "name", ".", "Value", "=", "list", "of", "coords", "that", "match", "to", "the", "reference", "gene" ]
python
train
39
chrislit/abydos
abydos/compression/_bwt.py
https://github.com/chrislit/abydos/blob/165466b3ff6afd8024a4c8660421b0c4e7773db9/abydos/compression/_bwt.py#L45-L90
def encode(self, word, terminator='\0'): r"""Return the Burrows-Wheeler transformed form of a word. Parameters ---------- word : str The word to transform using BWT terminator : str A character added to signal the end of the string Returns ------- str Word encoded by BWT Raises ------ ValueError Specified terminator absent from code. Examples -------- >>> bwt = BWT() >>> bwt.encode('align') 'n\x00ilag' >>> bwt.encode('banana') 'annb\x00aa' >>> bwt.encode('banana', '@') 'annb@aa' """ if word: if terminator in word: raise ValueError( 'Specified terminator, {}, already in word.'.format( terminator if terminator != '\0' else '\\0' ) ) else: word += terminator wordlist = sorted( word[i:] + word[:i] for i in range(len(word)) ) return ''.join([w[-1] for w in wordlist]) else: return terminator
[ "def", "encode", "(", "self", ",", "word", ",", "terminator", "=", "'\\0'", ")", ":", "if", "word", ":", "if", "terminator", "in", "word", ":", "raise", "ValueError", "(", "'Specified terminator, {}, already in word.'", ".", "format", "(", "terminator", "if", "terminator", "!=", "'\\0'", "else", "'\\\\0'", ")", ")", "else", ":", "word", "+=", "terminator", "wordlist", "=", "sorted", "(", "word", "[", "i", ":", "]", "+", "word", "[", ":", "i", "]", "for", "i", "in", "range", "(", "len", "(", "word", ")", ")", ")", "return", "''", ".", "join", "(", "[", "w", "[", "-", "1", "]", "for", "w", "in", "wordlist", "]", ")", "else", ":", "return", "terminator" ]
r"""Return the Burrows-Wheeler transformed form of a word. Parameters ---------- word : str The word to transform using BWT terminator : str A character added to signal the end of the string Returns ------- str Word encoded by BWT Raises ------ ValueError Specified terminator absent from code. Examples -------- >>> bwt = BWT() >>> bwt.encode('align') 'n\x00ilag' >>> bwt.encode('banana') 'annb\x00aa' >>> bwt.encode('banana', '@') 'annb@aa'
[ "r", "Return", "the", "Burrows", "-", "Wheeler", "transformed", "form", "of", "a", "word", "." ]
python
valid
26.173913
has2k1/plotnine
plotnine/facets/layout.py
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/facets/layout.py#L161-L183
def setup_panel_params(self, coord): """ Calculate the x & y range & breaks information for each panel Parameters ---------- coord : coord Coordinate """ if not self.panel_scales_x: raise PlotnineError('Missing an x scale') if not self.panel_scales_y: raise PlotnineError('Missing a y scale') self.panel_params = [] cols = ['SCALE_X', 'SCALE_Y'] for i, j in self.layout[cols].itertuples(index=False): i, j = i-1, j-1 params = coord.setup_panel_params( self.panel_scales_x[i], self.panel_scales_y[j]) self.panel_params.append(params)
[ "def", "setup_panel_params", "(", "self", ",", "coord", ")", ":", "if", "not", "self", ".", "panel_scales_x", ":", "raise", "PlotnineError", "(", "'Missing an x scale'", ")", "if", "not", "self", ".", "panel_scales_y", ":", "raise", "PlotnineError", "(", "'Missing a y scale'", ")", "self", ".", "panel_params", "=", "[", "]", "cols", "=", "[", "'SCALE_X'", ",", "'SCALE_Y'", "]", "for", "i", ",", "j", "in", "self", ".", "layout", "[", "cols", "]", ".", "itertuples", "(", "index", "=", "False", ")", ":", "i", ",", "j", "=", "i", "-", "1", ",", "j", "-", "1", "params", "=", "coord", ".", "setup_panel_params", "(", "self", ".", "panel_scales_x", "[", "i", "]", ",", "self", ".", "panel_scales_y", "[", "j", "]", ")", "self", ".", "panel_params", ".", "append", "(", "params", ")" ]
Calculate the x & y range & breaks information for each panel Parameters ---------- coord : coord Coordinate
[ "Calculate", "the", "x", "&", "y", "range", "&", "breaks", "information", "for", "each", "panel" ]
python
train
30.652174
woolfson-group/isambard
isambard/add_ons/filesystem.py
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/add_ons/filesystem.py#L131-L179
def fastas(self, download=False): """ Dict of filepaths for all fasta files associated with code. Parameters ---------- download : bool If True, downloads the fasta file from the PDB. If False, uses the ampal Protein.fasta property Defaults to False - this is definitely the recommended behaviour. Notes ----- Calls self.mmols, and so downloads mmol files if not already present. See .fasta property of isambard.ampal.base_ampal.Protein for more information. Returns ------- fastas_dict : dict, or None. Keys : int mmol number Values : str Filepath for the corresponding fasta file. """ fastas_dict = {} fasta_dir = os.path.join(self.parent_dir, 'fasta') if not os.path.exists(fasta_dir): os.makedirs(fasta_dir) for i, mmol_file in self.mmols.items(): mmol_name = os.path.basename(mmol_file) fasta_file_name = '{0}.fasta'.format(mmol_name) fasta_file = os.path.join(fasta_dir, fasta_file_name) if not os.path.exists(fasta_file): if download: pdb_url = "http://www.rcsb.org/pdb/files/fasta.txt?structureIdList={0}".format(self.code.upper()) r = requests.get(pdb_url) if r.status_code == 200: fasta_string = r.text else: fasta_string = None else: a = convert_pdb_to_ampal(mmol_file) # take first object if AmpalContainer (i.e. NMR structure). if type(a) == AmpalContainer: a = a[0] fasta_string = a.fasta with open(fasta_file, 'w') as foo: foo.write(fasta_string) fastas_dict[i] = fasta_file return fastas_dict
[ "def", "fastas", "(", "self", ",", "download", "=", "False", ")", ":", "fastas_dict", "=", "{", "}", "fasta_dir", "=", "os", ".", "path", ".", "join", "(", "self", ".", "parent_dir", ",", "'fasta'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "fasta_dir", ")", ":", "os", ".", "makedirs", "(", "fasta_dir", ")", "for", "i", ",", "mmol_file", "in", "self", ".", "mmols", ".", "items", "(", ")", ":", "mmol_name", "=", "os", ".", "path", ".", "basename", "(", "mmol_file", ")", "fasta_file_name", "=", "'{0}.fasta'", ".", "format", "(", "mmol_name", ")", "fasta_file", "=", "os", ".", "path", ".", "join", "(", "fasta_dir", ",", "fasta_file_name", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "fasta_file", ")", ":", "if", "download", ":", "pdb_url", "=", "\"http://www.rcsb.org/pdb/files/fasta.txt?structureIdList={0}\"", ".", "format", "(", "self", ".", "code", ".", "upper", "(", ")", ")", "r", "=", "requests", ".", "get", "(", "pdb_url", ")", "if", "r", ".", "status_code", "==", "200", ":", "fasta_string", "=", "r", ".", "text", "else", ":", "fasta_string", "=", "None", "else", ":", "a", "=", "convert_pdb_to_ampal", "(", "mmol_file", ")", "# take first object if AmpalContainer (i.e. NMR structure).", "if", "type", "(", "a", ")", "==", "AmpalContainer", ":", "a", "=", "a", "[", "0", "]", "fasta_string", "=", "a", ".", "fasta", "with", "open", "(", "fasta_file", ",", "'w'", ")", "as", "foo", ":", "foo", ".", "write", "(", "fasta_string", ")", "fastas_dict", "[", "i", "]", "=", "fasta_file", "return", "fastas_dict" ]
Dict of filepaths for all fasta files associated with code. Parameters ---------- download : bool If True, downloads the fasta file from the PDB. If False, uses the ampal Protein.fasta property Defaults to False - this is definitely the recommended behaviour. Notes ----- Calls self.mmols, and so downloads mmol files if not already present. See .fasta property of isambard.ampal.base_ampal.Protein for more information. Returns ------- fastas_dict : dict, or None. Keys : int mmol number Values : str Filepath for the corresponding fasta file.
[ "Dict", "of", "filepaths", "for", "all", "fasta", "files", "associated", "with", "code", "." ]
python
train
40.061224
bitesofcode/projexui
projexui/widgets/xnavigationedit.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xnavigationedit.py#L446-L466
def keyPressEvent( self, event ): """ Overloads the key press event to listen for escape calls to cancel the parts editing. :param event | <QKeyPressEvent> """ if ( self.scrollWidget().isHidden() ): if ( event.key() == Qt.Key_Escape ): self.cancelEdit() return elif ( event.key() in (Qt.Key_Return, Qt.Key_Enter) ): self.acceptEdit() return elif ( event.key() == Qt.Key_A and event.modifiers() == Qt.ControlModifier ): self.startEdit() super(XNavigationEdit, self).keyPressEvent(event)
[ "def", "keyPressEvent", "(", "self", ",", "event", ")", ":", "if", "(", "self", ".", "scrollWidget", "(", ")", ".", "isHidden", "(", ")", ")", ":", "if", "(", "event", ".", "key", "(", ")", "==", "Qt", ".", "Key_Escape", ")", ":", "self", ".", "cancelEdit", "(", ")", "return", "elif", "(", "event", ".", "key", "(", ")", "in", "(", "Qt", ".", "Key_Return", ",", "Qt", ".", "Key_Enter", ")", ")", ":", "self", ".", "acceptEdit", "(", ")", "return", "elif", "(", "event", ".", "key", "(", ")", "==", "Qt", ".", "Key_A", "and", "event", ".", "modifiers", "(", ")", "==", "Qt", ".", "ControlModifier", ")", ":", "self", ".", "startEdit", "(", ")", "super", "(", "XNavigationEdit", ",", "self", ")", ".", "keyPressEvent", "(", "event", ")" ]
Overloads the key press event to listen for escape calls to cancel the parts editing. :param event | <QKeyPressEvent>
[ "Overloads", "the", "key", "press", "event", "to", "listen", "for", "escape", "calls", "to", "cancel", "the", "parts", "editing", ".", ":", "param", "event", "|", "<QKeyPressEvent", ">" ]
python
train
33.238095
4degrees/clique
source/clique/collection.py
https://github.com/4degrees/clique/blob/af1d4fef1d60c30a870257199a4d98597d15417d/source/clique/collection.py#L231-L283
def format(self, pattern='{head}{padding}{tail} [{ranges}]'): '''Return string representation as specified by *pattern*. Pattern can be any format accepted by Python's standard format function and will receive the following keyword arguments as context: * *head* - Common leading part of the collection. * *tail* - Common trailing part of the collection. * *padding* - Padding value in ``%0d`` format. * *range* - Total range in the form ``start-end`` * *ranges* - Comma separated ranges of indexes. * *holes* - Comma separated ranges of missing indexes. ''' data = {} data['head'] = self.head data['tail'] = self.tail if self.padding: data['padding'] = '%0{0}d'.format(self.padding) else: data['padding'] = '%d' if '{holes}' in pattern: data['holes'] = self.holes().format('{ranges}') if '{range}' in pattern or '{ranges}' in pattern: indexes = list(self.indexes) indexes_count = len(indexes) if indexes_count == 0: data['range'] = '' elif indexes_count == 1: data['range'] = '{0}'.format(indexes[0]) else: data['range'] = '{0}-{1}'.format( indexes[0], indexes[-1] ) if '{ranges}' in pattern: separated = self.separate() if len(separated) > 1: ranges = [collection.format('{range}') for collection in separated] else: ranges = [data['range']] data['ranges'] = ', '.join(ranges) return pattern.format(**data)
[ "def", "format", "(", "self", ",", "pattern", "=", "'{head}{padding}{tail} [{ranges}]'", ")", ":", "data", "=", "{", "}", "data", "[", "'head'", "]", "=", "self", ".", "head", "data", "[", "'tail'", "]", "=", "self", ".", "tail", "if", "self", ".", "padding", ":", "data", "[", "'padding'", "]", "=", "'%0{0}d'", ".", "format", "(", "self", ".", "padding", ")", "else", ":", "data", "[", "'padding'", "]", "=", "'%d'", "if", "'{holes}'", "in", "pattern", ":", "data", "[", "'holes'", "]", "=", "self", ".", "holes", "(", ")", ".", "format", "(", "'{ranges}'", ")", "if", "'{range}'", "in", "pattern", "or", "'{ranges}'", "in", "pattern", ":", "indexes", "=", "list", "(", "self", ".", "indexes", ")", "indexes_count", "=", "len", "(", "indexes", ")", "if", "indexes_count", "==", "0", ":", "data", "[", "'range'", "]", "=", "''", "elif", "indexes_count", "==", "1", ":", "data", "[", "'range'", "]", "=", "'{0}'", ".", "format", "(", "indexes", "[", "0", "]", ")", "else", ":", "data", "[", "'range'", "]", "=", "'{0}-{1}'", ".", "format", "(", "indexes", "[", "0", "]", ",", "indexes", "[", "-", "1", "]", ")", "if", "'{ranges}'", "in", "pattern", ":", "separated", "=", "self", ".", "separate", "(", ")", "if", "len", "(", "separated", ")", ">", "1", ":", "ranges", "=", "[", "collection", ".", "format", "(", "'{range}'", ")", "for", "collection", "in", "separated", "]", "else", ":", "ranges", "=", "[", "data", "[", "'range'", "]", "]", "data", "[", "'ranges'", "]", "=", "', '", ".", "join", "(", "ranges", ")", "return", "pattern", ".", "format", "(", "*", "*", "data", ")" ]
Return string representation as specified by *pattern*. Pattern can be any format accepted by Python's standard format function and will receive the following keyword arguments as context: * *head* - Common leading part of the collection. * *tail* - Common trailing part of the collection. * *padding* - Padding value in ``%0d`` format. * *range* - Total range in the form ``start-end`` * *ranges* - Comma separated ranges of indexes. * *holes* - Comma separated ranges of missing indexes.
[ "Return", "string", "representation", "as", "specified", "by", "*", "pattern", "*", "." ]
python
train
32.660377
graphistry/pygraphistry
graphistry/plotter.py
https://github.com/graphistry/pygraphistry/blob/3dfc50e60232c6f5fedd6e5fa9d3048b606944b8/graphistry/plotter.py#L233-L246
def graph(self, ig): """Specify the node and edge data. :param ig: Graph with node and edge attributes. :type ig: NetworkX graph or an IGraph graph. :returns: Plotter. :rtype: Plotter. """ res = copy.copy(self) res._edges = ig res._nodes = None return res
[ "def", "graph", "(", "self", ",", "ig", ")", ":", "res", "=", "copy", ".", "copy", "(", "self", ")", "res", ".", "_edges", "=", "ig", "res", ".", "_nodes", "=", "None", "return", "res" ]
Specify the node and edge data. :param ig: Graph with node and edge attributes. :type ig: NetworkX graph or an IGraph graph. :returns: Plotter. :rtype: Plotter.
[ "Specify", "the", "node", "and", "edge", "data", "." ]
python
train
23.214286
batiste/django-page-cms
pages/templatetags/pages_tags.py
https://github.com/batiste/django-page-cms/blob/3c72111eb7c3997a63c462c1776ffd8ce8c50a5d/pages/templatetags/pages_tags.py#L118-L130
def pages_siblings_menu(context, page, url='/'): """Get the parent page of the given page and render a nested list of its child pages. Good for rendering a secondary menu. :param page: the page where to start the menu from. :param url: not used anymore. """ lang = context.get('lang', pages_settings.PAGE_DEFAULT_LANGUAGE) page = get_page_from_string_or_id(page, lang) if page: siblings = page.get_siblings() context.update({'children': siblings, 'page': page}) return context
[ "def", "pages_siblings_menu", "(", "context", ",", "page", ",", "url", "=", "'/'", ")", ":", "lang", "=", "context", ".", "get", "(", "'lang'", ",", "pages_settings", ".", "PAGE_DEFAULT_LANGUAGE", ")", "page", "=", "get_page_from_string_or_id", "(", "page", ",", "lang", ")", "if", "page", ":", "siblings", "=", "page", ".", "get_siblings", "(", ")", "context", ".", "update", "(", "{", "'children'", ":", "siblings", ",", "'page'", ":", "page", "}", ")", "return", "context" ]
Get the parent page of the given page and render a nested list of its child pages. Good for rendering a secondary menu. :param page: the page where to start the menu from. :param url: not used anymore.
[ "Get", "the", "parent", "page", "of", "the", "given", "page", "and", "render", "a", "nested", "list", "of", "its", "child", "pages", ".", "Good", "for", "rendering", "a", "secondary", "menu", "." ]
python
train
39.769231