repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
phodge/homely
homely/_cli.py
https://github.com/phodge/homely/blob/98ddcf3e4f29b0749645817b4866baaea8376085/homely/_cli.py#L146-L174
def forget(identifier): ''' Tells homely to forget about a dotfiles repository that was previously added. You can then run `homely update` to have homely perform automatic cleanup of anything that was installed by that dotfiles repo. REPO This should be the path to a local dotfiles repository that has already been registered using `homely add`. You may specify multiple REPOs to remove at once. ''' errors = False for one in identifier: cfg = RepoListConfig() info = cfg.find_by_any(one, "ilc") if not info: warn("No repos matching %r" % one) errors = True continue # update the config ... note("Removing record of repo [%s] at %s" % ( info.shortid(), info.localrepo.repo_path)) with saveconfig(RepoListConfig()) as cfg: cfg.remove_repo(info.repoid) # if there were errors, then don't try and do an update if errors: sys.exit(1)
[ "def", "forget", "(", "identifier", ")", ":", "errors", "=", "False", "for", "one", "in", "identifier", ":", "cfg", "=", "RepoListConfig", "(", ")", "info", "=", "cfg", ".", "find_by_any", "(", "one", ",", "\"ilc\"", ")", "if", "not", "info", ":", "warn", "(", "\"No repos matching %r\"", "%", "one", ")", "errors", "=", "True", "continue", "# update the config ...", "note", "(", "\"Removing record of repo [%s] at %s\"", "%", "(", "info", ".", "shortid", "(", ")", ",", "info", ".", "localrepo", ".", "repo_path", ")", ")", "with", "saveconfig", "(", "RepoListConfig", "(", ")", ")", "as", "cfg", ":", "cfg", ".", "remove_repo", "(", "info", ".", "repoid", ")", "# if there were errors, then don't try and do an update", "if", "errors", ":", "sys", ".", "exit", "(", "1", ")" ]
Tells homely to forget about a dotfiles repository that was previously added. You can then run `homely update` to have homely perform automatic cleanup of anything that was installed by that dotfiles repo. REPO This should be the path to a local dotfiles repository that has already been registered using `homely add`. You may specify multiple REPOs to remove at once.
[ "Tells", "homely", "to", "forget", "about", "a", "dotfiles", "repository", "that", "was", "previously", "added", ".", "You", "can", "then", "run", "homely", "update", "to", "have", "homely", "perform", "automatic", "cleanup", "of", "anything", "that", "was", "installed", "by", "that", "dotfiles", "repo", "." ]
python
train
saltstack/salt
salt/utils/vmware.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L755-L874
def get_content(service_instance, obj_type, property_list=None, container_ref=None, traversal_spec=None, local_properties=False): ''' Returns the content of the specified type of object for a Service Instance. For more information, please see: http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html service_instance The Service Instance from which to obtain content. obj_type The type of content to obtain. property_list An optional list of object properties to used to return even more filtered content results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec. local_properties Flag specifying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None. ''' # Start at the rootFolder if container starting point not specified if not container_ref: container_ref = get_root_folder(service_instance) # By default, the object reference used as the starting poing for the filter # is the container_ref passed in the function obj_ref = container_ref local_traversal_spec = False if not traversal_spec and not local_properties: local_traversal_spec = True # We don't have a specific traversal spec override so we are going to # get everything using a container view try: obj_ref = service_instance.content.viewManager.CreateContainerView( container_ref, [obj_type], True) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Create 'Traverse All' traversal spec to determine the path for # collection traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name='traverseEntities', path='view', skip=False, type=vim.view.ContainerView ) # Create property spec to determine properties to be retrieved property_spec = vmodl.query.PropertyCollector.PropertySpec( type=obj_type, all=True if not property_list else False, pathSet=property_list ) # Create object spec to navigate content obj_spec = vmodl.query.PropertyCollector.ObjectSpec( obj=obj_ref, skip=True if not local_properties else False, selectSet=[traversal_spec] if not local_properties else None ) # Create a filter spec and specify object, property spec in it filter_spec = vmodl.query.PropertyCollector.FilterSpec( objectSet=[obj_spec], propSet=[property_spec], reportMissingObjectsInResults=False ) # Retrieve the contents try: content = service_instance.content.propertyCollector.RetrieveContents([filter_spec]) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Destroy the object view if local_traversal_spec: try: obj_ref.Destroy() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return content
[ "def", "get_content", "(", "service_instance", ",", "obj_type", ",", "property_list", "=", "None", ",", "container_ref", "=", "None", ",", "traversal_spec", "=", "None", ",", "local_properties", "=", "False", ")", ":", "# Start at the rootFolder if container starting point not specified", "if", "not", "container_ref", ":", "container_ref", "=", "get_root_folder", "(", "service_instance", ")", "# By default, the object reference used as the starting poing for the filter", "# is the container_ref passed in the function", "obj_ref", "=", "container_ref", "local_traversal_spec", "=", "False", "if", "not", "traversal_spec", "and", "not", "local_properties", ":", "local_traversal_spec", "=", "True", "# We don't have a specific traversal spec override so we are going to", "# get everything using a container view", "try", ":", "obj_ref", "=", "service_instance", ".", "content", ".", "viewManager", ".", "CreateContainerView", "(", "container_ref", ",", "[", "obj_type", "]", ",", "True", ")", "except", "vim", ".", "fault", ".", "NoPermission", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "'Not enough permissions. Required privilege: '", "'{}'", ".", "format", "(", "exc", ".", "privilegeId", ")", ")", "except", "vim", ".", "fault", ".", "VimFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "exc", ".", "msg", ")", "except", "vmodl", ".", "RuntimeFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareRuntimeError", "(", "exc", ".", "msg", ")", "# Create 'Traverse All' traversal spec to determine the path for", "# collection", "traversal_spec", "=", "vmodl", ".", "query", ".", "PropertyCollector", ".", "TraversalSpec", "(", "name", "=", "'traverseEntities'", ",", "path", "=", "'view'", ",", "skip", "=", "False", ",", "type", "=", "vim", ".", "view", ".", "ContainerView", ")", "# Create property spec to determine properties to be retrieved", "property_spec", "=", "vmodl", ".", "query", ".", "PropertyCollector", ".", "PropertySpec", "(", "type", "=", "obj_type", ",", "all", "=", "True", "if", "not", "property_list", "else", "False", ",", "pathSet", "=", "property_list", ")", "# Create object spec to navigate content", "obj_spec", "=", "vmodl", ".", "query", ".", "PropertyCollector", ".", "ObjectSpec", "(", "obj", "=", "obj_ref", ",", "skip", "=", "True", "if", "not", "local_properties", "else", "False", ",", "selectSet", "=", "[", "traversal_spec", "]", "if", "not", "local_properties", "else", "None", ")", "# Create a filter spec and specify object, property spec in it", "filter_spec", "=", "vmodl", ".", "query", ".", "PropertyCollector", ".", "FilterSpec", "(", "objectSet", "=", "[", "obj_spec", "]", ",", "propSet", "=", "[", "property_spec", "]", ",", "reportMissingObjectsInResults", "=", "False", ")", "# Retrieve the contents", "try", ":", "content", "=", "service_instance", ".", "content", ".", "propertyCollector", ".", "RetrieveContents", "(", "[", "filter_spec", "]", ")", "except", "vim", ".", "fault", ".", "NoPermission", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "'Not enough permissions. Required privilege: '", "'{}'", ".", "format", "(", "exc", ".", "privilegeId", ")", ")", "except", "vim", ".", "fault", ".", "VimFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "exc", ".", "msg", ")", "except", "vmodl", ".", "RuntimeFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareRuntimeError", "(", "exc", ".", "msg", ")", "# Destroy the object view", "if", "local_traversal_spec", ":", "try", ":", "obj_ref", ".", "Destroy", "(", ")", "except", "vim", ".", "fault", ".", "NoPermission", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "'Not enough permissions. Required privilege: '", "'{}'", ".", "format", "(", "exc", ".", "privilegeId", ")", ")", "except", "vim", ".", "fault", ".", "VimFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "exc", ".", "msg", ")", "except", "vmodl", ".", "RuntimeFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareRuntimeError", "(", "exc", ".", "msg", ")", "return", "content" ]
Returns the content of the specified type of object for a Service Instance. For more information, please see: http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html service_instance The Service Instance from which to obtain content. obj_type The type of content to obtain. property_list An optional list of object properties to used to return even more filtered content results. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder. traversal_spec An optional TraversalSpec to be used instead of the standard ``Traverse All`` spec. local_properties Flag specifying whether the properties to be retrieved are local to the container. If that is the case, the traversal spec needs to be None.
[ "Returns", "the", "content", "of", "the", "specified", "type", "of", "object", "for", "a", "Service", "Instance", "." ]
python
train
sdcooke/django_bundles
django_bundles/core.py
https://github.com/sdcooke/django_bundles/blob/2810fc455ec7391283792c1f108f4e8340f5d12f/django_bundles/core.py#L221-L235
def get_bundle_versions(): """ Used to cache the bundle versions rather than loading them from the bundle versions file every time they're used """ global _cached_versions if not bundles_settings.BUNDLES_VERSION_FILE: _cached_versions = {} if _cached_versions is None: locs = {} try: execfile(bundles_settings.BUNDLES_VERSION_FILE, locs) _cached_versions = locs['BUNDLES_VERSIONS'] except IOError: _cached_versions = {} return _cached_versions
[ "def", "get_bundle_versions", "(", ")", ":", "global", "_cached_versions", "if", "not", "bundles_settings", ".", "BUNDLES_VERSION_FILE", ":", "_cached_versions", "=", "{", "}", "if", "_cached_versions", "is", "None", ":", "locs", "=", "{", "}", "try", ":", "execfile", "(", "bundles_settings", ".", "BUNDLES_VERSION_FILE", ",", "locs", ")", "_cached_versions", "=", "locs", "[", "'BUNDLES_VERSIONS'", "]", "except", "IOError", ":", "_cached_versions", "=", "{", "}", "return", "_cached_versions" ]
Used to cache the bundle versions rather than loading them from the bundle versions file every time they're used
[ "Used", "to", "cache", "the", "bundle", "versions", "rather", "than", "loading", "them", "from", "the", "bundle", "versions", "file", "every", "time", "they", "re", "used" ]
python
train
emlazzarin/acrylic
acrylic/datatable.py
https://github.com/emlazzarin/acrylic/blob/08c6702d73b9660ead1024653f4fa016f6340e46/acrylic/datatable.py#L579-L623
def join(self, right_table, on=None, right_prefix='R.', outer=False): """ Inner-joins another DataTable to this one using `on` (iterable of join keys). If two tables share columns other than the join keys, appends right_prefix to the right table's column name. If `on` is not provided, performs a 'natural join' using all columns of the same name. """ if on is None: # if no 'on', perform natural join on = list(set(self.fields).intersection(set(right_table.fields))) if isinstance(on, basestring): on = [on] def get_join_key(row): return tuple(row[header] for header in on) keymap = defaultdict(list) for right_row in right_table: keymap[get_join_key(right_row)].append(right_row) new_table = [] for left_row in self: left_key = get_join_key(left_row) if left_key in keymap: left_dict = dict(left_row.items()) for right_row in keymap[left_key]: left_dict_copy = left_dict.copy() for field, val in right_row.items(): if field in on: continue elif field in left_row: left_dict_copy[right_prefix + field] = val else: left_dict_copy[field] = val new_table.append(left_dict_copy) elif outer: left_dict_copy = dict(left_row.items()).copy() for field in right_table.fields: if field in on: continue elif field in left_row: left_dict_copy[right_prefix + field] = None else: left_dict_copy[field] = None new_table.append(left_dict_copy) return DataTable(new_table)
[ "def", "join", "(", "self", ",", "right_table", ",", "on", "=", "None", ",", "right_prefix", "=", "'R.'", ",", "outer", "=", "False", ")", ":", "if", "on", "is", "None", ":", "# if no 'on', perform natural join", "on", "=", "list", "(", "set", "(", "self", ".", "fields", ")", ".", "intersection", "(", "set", "(", "right_table", ".", "fields", ")", ")", ")", "if", "isinstance", "(", "on", ",", "basestring", ")", ":", "on", "=", "[", "on", "]", "def", "get_join_key", "(", "row", ")", ":", "return", "tuple", "(", "row", "[", "header", "]", "for", "header", "in", "on", ")", "keymap", "=", "defaultdict", "(", "list", ")", "for", "right_row", "in", "right_table", ":", "keymap", "[", "get_join_key", "(", "right_row", ")", "]", ".", "append", "(", "right_row", ")", "new_table", "=", "[", "]", "for", "left_row", "in", "self", ":", "left_key", "=", "get_join_key", "(", "left_row", ")", "if", "left_key", "in", "keymap", ":", "left_dict", "=", "dict", "(", "left_row", ".", "items", "(", ")", ")", "for", "right_row", "in", "keymap", "[", "left_key", "]", ":", "left_dict_copy", "=", "left_dict", ".", "copy", "(", ")", "for", "field", ",", "val", "in", "right_row", ".", "items", "(", ")", ":", "if", "field", "in", "on", ":", "continue", "elif", "field", "in", "left_row", ":", "left_dict_copy", "[", "right_prefix", "+", "field", "]", "=", "val", "else", ":", "left_dict_copy", "[", "field", "]", "=", "val", "new_table", ".", "append", "(", "left_dict_copy", ")", "elif", "outer", ":", "left_dict_copy", "=", "dict", "(", "left_row", ".", "items", "(", ")", ")", ".", "copy", "(", ")", "for", "field", "in", "right_table", ".", "fields", ":", "if", "field", "in", "on", ":", "continue", "elif", "field", "in", "left_row", ":", "left_dict_copy", "[", "right_prefix", "+", "field", "]", "=", "None", "else", ":", "left_dict_copy", "[", "field", "]", "=", "None", "new_table", ".", "append", "(", "left_dict_copy", ")", "return", "DataTable", "(", "new_table", ")" ]
Inner-joins another DataTable to this one using `on` (iterable of join keys). If two tables share columns other than the join keys, appends right_prefix to the right table's column name. If `on` is not provided, performs a 'natural join' using all columns of the same name.
[ "Inner", "-", "joins", "another", "DataTable", "to", "this", "one", "using", "on", "(", "iterable", "of", "join", "keys", ")", ".", "If", "two", "tables", "share", "columns", "other", "than", "the", "join", "keys", "appends", "right_prefix", "to", "the", "right", "table", "s", "column", "name", ".", "If", "on", "is", "not", "provided", "performs", "a", "natural", "join", "using", "all", "columns", "of", "the", "same", "name", "." ]
python
train
crytic/slither
slither/printers/summary/require_calls.py
https://github.com/crytic/slither/blob/04c147f7e50223c6af458ca430befae747ccd259/slither/printers/summary/require_calls.py#L25-L42
def output(self, _filename): """ _filename is not used Args: _filename(string) """ for contract in self.slither.contracts_derived: txt = "\nContract %s"%contract.name table = PrettyTable(["Function", "require or assert"]) for function in contract.functions: require = function.all_slithir_operations() require = [ir for ir in require if isinstance(ir, SolidityCall) and ir.function in require_or_assert] require = [ir.node for ir in require] table.add_row([function.name, self._convert([str(m.expression) for m in set(require)])]) txt += "\n"+str(table) self.info(txt)
[ "def", "output", "(", "self", ",", "_filename", ")", ":", "for", "contract", "in", "self", ".", "slither", ".", "contracts_derived", ":", "txt", "=", "\"\\nContract %s\"", "%", "contract", ".", "name", "table", "=", "PrettyTable", "(", "[", "\"Function\"", ",", "\"require or assert\"", "]", ")", "for", "function", "in", "contract", ".", "functions", ":", "require", "=", "function", ".", "all_slithir_operations", "(", ")", "require", "=", "[", "ir", "for", "ir", "in", "require", "if", "isinstance", "(", "ir", ",", "SolidityCall", ")", "and", "ir", ".", "function", "in", "require_or_assert", "]", "require", "=", "[", "ir", ".", "node", "for", "ir", "in", "require", "]", "table", ".", "add_row", "(", "[", "function", ".", "name", ",", "self", ".", "_convert", "(", "[", "str", "(", "m", ".", "expression", ")", "for", "m", "in", "set", "(", "require", ")", "]", ")", "]", ")", "txt", "+=", "\"\\n\"", "+", "str", "(", "table", ")", "self", ".", "info", "(", "txt", ")" ]
_filename is not used Args: _filename(string)
[ "_filename", "is", "not", "used", "Args", ":", "_filename", "(", "string", ")" ]
python
train
yougov/pmxbot
pmxbot/irc.py
https://github.com/yougov/pmxbot/blob/5da84a3258a0fd73cb35b60e39769a5d7bfb2ba7/pmxbot/irc.py#L79-L85
def _get_wrapper(): """ Get a socket wrapper based on SSL config. """ if not pmxbot.config.get('use_ssl', False): return lambda x: x return importlib.import_module('ssl').wrap_socket
[ "def", "_get_wrapper", "(", ")", ":", "if", "not", "pmxbot", ".", "config", ".", "get", "(", "'use_ssl'", ",", "False", ")", ":", "return", "lambda", "x", ":", "x", "return", "importlib", ".", "import_module", "(", "'ssl'", ")", ".", "wrap_socket" ]
Get a socket wrapper based on SSL config.
[ "Get", "a", "socket", "wrapper", "based", "on", "SSL", "config", "." ]
python
train
openego/ding0
ding0/core/network/grids.py
https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/core/network/grids.py#L856-L863
def add_station(self, lv_station): """Adds a LV station to _station and grid graph if not already existing""" if not isinstance(lv_station, LVStationDing0): raise Exception('Given LV station is not a LVStationDing0 object.') if self._station is None: self._station = lv_station self.graph_add_node(lv_station) self.grid_district.lv_load_area.mv_grid_district.mv_grid.graph_add_node(lv_station)
[ "def", "add_station", "(", "self", ",", "lv_station", ")", ":", "if", "not", "isinstance", "(", "lv_station", ",", "LVStationDing0", ")", ":", "raise", "Exception", "(", "'Given LV station is not a LVStationDing0 object.'", ")", "if", "self", ".", "_station", "is", "None", ":", "self", ".", "_station", "=", "lv_station", "self", ".", "graph_add_node", "(", "lv_station", ")", "self", ".", "grid_district", ".", "lv_load_area", ".", "mv_grid_district", ".", "mv_grid", ".", "graph_add_node", "(", "lv_station", ")" ]
Adds a LV station to _station and grid graph if not already existing
[ "Adds", "a", "LV", "station", "to", "_station", "and", "grid", "graph", "if", "not", "already", "existing" ]
python
train
miniconfig/python-openevse-wifi
openevsewifi/__init__.py
https://github.com/miniconfig/python-openevse-wifi/blob/42fabeae052a9f82092fa9220201413732e38bb4/openevsewifi/__init__.py#L92-L97
def getServiceLevel(self): """Returns the service level""" command = '$GE' settings = self.sendCommand(command) flags = int(settings[2], 16) return (flags & 0x0001) + 1
[ "def", "getServiceLevel", "(", "self", ")", ":", "command", "=", "'$GE'", "settings", "=", "self", ".", "sendCommand", "(", "command", ")", "flags", "=", "int", "(", "settings", "[", "2", "]", ",", "16", ")", "return", "(", "flags", "&", "0x0001", ")", "+", "1" ]
Returns the service level
[ "Returns", "the", "service", "level" ]
python
train
locationlabs/gusset
gusset/colortable.py
https://github.com/locationlabs/gusset/blob/c2e6a58e9eec5a00da2231c1e3e7247c65263edf/gusset/colortable.py#L59-L64
def separator(self): """ Generate a separator row using current column widths. """ cells = dict([(column, "-" * self.column_widths[column]) for column in self.columns]) return ColorRow(self, **cells)
[ "def", "separator", "(", "self", ")", ":", "cells", "=", "dict", "(", "[", "(", "column", ",", "\"-\"", "*", "self", ".", "column_widths", "[", "column", "]", ")", "for", "column", "in", "self", ".", "columns", "]", ")", "return", "ColorRow", "(", "self", ",", "*", "*", "cells", ")" ]
Generate a separator row using current column widths.
[ "Generate", "a", "separator", "row", "using", "current", "column", "widths", "." ]
python
train
BoboTiG/python-mss
mss/darwin.py
https://github.com/BoboTiG/python-mss/blob/56347f781edb38a0e7a5104080bd683f49c6f074/mss/darwin.py#L175-L230
def grab(self, monitor): # type: (Monitor) -> ScreenShot """ See :meth:`MSSMixin.grab <mss.base.MSSMixin.grab>` for full details. """ # pylint: disable=too-many-locals # Convert PIL bbox style if isinstance(monitor, tuple): monitor = { "left": monitor[0], "top": monitor[1], "width": monitor[2] - monitor[0], "height": monitor[3] - monitor[1], } core = self.core rect = CGRect( (monitor["left"], monitor["top"]), (monitor["width"], monitor["height"]) ) image_ref = core.CGWindowListCreateImage(rect, 1, 0, 0) if not image_ref: raise ScreenShotError("CoreGraphics.CGWindowListCreateImage() failed.") width = int(core.CGImageGetWidth(image_ref)) height = int(core.CGImageGetHeight(image_ref)) prov = copy_data = None try: prov = core.CGImageGetDataProvider(image_ref) copy_data = core.CGDataProviderCopyData(prov) data_ref = core.CFDataGetBytePtr(copy_data) buf_len = core.CFDataGetLength(copy_data) raw = ctypes.cast(data_ref, ctypes.POINTER(ctypes.c_ubyte * buf_len)) data = bytearray(raw.contents) # Remove padding per row bytes_per_row = int(core.CGImageGetBytesPerRow(image_ref)) bytes_per_pixel = int(core.CGImageGetBitsPerPixel(image_ref)) bytes_per_pixel = (bytes_per_pixel + 7) // 8 if bytes_per_pixel * width != bytes_per_row: cropped = bytearray() for row in range(height): start = row * bytes_per_row end = start + width * bytes_per_pixel cropped.extend(data[start:end]) data = cropped finally: if prov: core.CGDataProviderRelease(prov) if copy_data: core.CFRelease(copy_data) return self.cls_image(data, monitor, size=Size(width, height))
[ "def", "grab", "(", "self", ",", "monitor", ")", ":", "# type: (Monitor) -> ScreenShot", "# pylint: disable=too-many-locals", "# Convert PIL bbox style", "if", "isinstance", "(", "monitor", ",", "tuple", ")", ":", "monitor", "=", "{", "\"left\"", ":", "monitor", "[", "0", "]", ",", "\"top\"", ":", "monitor", "[", "1", "]", ",", "\"width\"", ":", "monitor", "[", "2", "]", "-", "monitor", "[", "0", "]", ",", "\"height\"", ":", "monitor", "[", "3", "]", "-", "monitor", "[", "1", "]", ",", "}", "core", "=", "self", ".", "core", "rect", "=", "CGRect", "(", "(", "monitor", "[", "\"left\"", "]", ",", "monitor", "[", "\"top\"", "]", ")", ",", "(", "monitor", "[", "\"width\"", "]", ",", "monitor", "[", "\"height\"", "]", ")", ")", "image_ref", "=", "core", ".", "CGWindowListCreateImage", "(", "rect", ",", "1", ",", "0", ",", "0", ")", "if", "not", "image_ref", ":", "raise", "ScreenShotError", "(", "\"CoreGraphics.CGWindowListCreateImage() failed.\"", ")", "width", "=", "int", "(", "core", ".", "CGImageGetWidth", "(", "image_ref", ")", ")", "height", "=", "int", "(", "core", ".", "CGImageGetHeight", "(", "image_ref", ")", ")", "prov", "=", "copy_data", "=", "None", "try", ":", "prov", "=", "core", ".", "CGImageGetDataProvider", "(", "image_ref", ")", "copy_data", "=", "core", ".", "CGDataProviderCopyData", "(", "prov", ")", "data_ref", "=", "core", ".", "CFDataGetBytePtr", "(", "copy_data", ")", "buf_len", "=", "core", ".", "CFDataGetLength", "(", "copy_data", ")", "raw", "=", "ctypes", ".", "cast", "(", "data_ref", ",", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_ubyte", "*", "buf_len", ")", ")", "data", "=", "bytearray", "(", "raw", ".", "contents", ")", "# Remove padding per row", "bytes_per_row", "=", "int", "(", "core", ".", "CGImageGetBytesPerRow", "(", "image_ref", ")", ")", "bytes_per_pixel", "=", "int", "(", "core", ".", "CGImageGetBitsPerPixel", "(", "image_ref", ")", ")", "bytes_per_pixel", "=", "(", "bytes_per_pixel", "+", "7", ")", "//", "8", "if", "bytes_per_pixel", "*", "width", "!=", "bytes_per_row", ":", "cropped", "=", "bytearray", "(", ")", "for", "row", "in", "range", "(", "height", ")", ":", "start", "=", "row", "*", "bytes_per_row", "end", "=", "start", "+", "width", "*", "bytes_per_pixel", "cropped", ".", "extend", "(", "data", "[", "start", ":", "end", "]", ")", "data", "=", "cropped", "finally", ":", "if", "prov", ":", "core", ".", "CGDataProviderRelease", "(", "prov", ")", "if", "copy_data", ":", "core", ".", "CFRelease", "(", "copy_data", ")", "return", "self", ".", "cls_image", "(", "data", ",", "monitor", ",", "size", "=", "Size", "(", "width", ",", "height", ")", ")" ]
See :meth:`MSSMixin.grab <mss.base.MSSMixin.grab>` for full details.
[ "See", ":", "meth", ":", "MSSMixin", ".", "grab", "<mss", ".", "base", ".", "MSSMixin", ".", "grab", ">", "for", "full", "details", "." ]
python
train
nwilming/ocupy
ocupy/utils.py
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/utils.py#L130-L148
def dict_fun(data, function): """ Apply a function to all values in a dictionary, return a dictionary with results. Parameters ---------- data : dict a dictionary whose values are adequate input to the second argument of this function. function : function a function that takes one argument Returns ------- a dictionary with the same keys as data, such that result[key] = function(data[key]) """ return dict((k, function(v)) for k, v in list(data.items()))
[ "def", "dict_fun", "(", "data", ",", "function", ")", ":", "return", "dict", "(", "(", "k", ",", "function", "(", "v", ")", ")", "for", "k", ",", "v", "in", "list", "(", "data", ".", "items", "(", ")", ")", ")" ]
Apply a function to all values in a dictionary, return a dictionary with results. Parameters ---------- data : dict a dictionary whose values are adequate input to the second argument of this function. function : function a function that takes one argument Returns ------- a dictionary with the same keys as data, such that result[key] = function(data[key])
[ "Apply", "a", "function", "to", "all", "values", "in", "a", "dictionary", "return", "a", "dictionary", "with", "results", "." ]
python
train
openstack/python-saharaclient
saharaclient/api/clusters.py
https://github.com/openstack/python-saharaclient/blob/c53831d686d9e94187ce5dfdbfa43883b792280e/saharaclient/api/clusters.py#L157-L176
def create(self, name, plugin_name, plugin_version, cluster_template_id=None, default_image_id=None, is_transient=None, description=None, cluster_configs=None, node_groups=None, user_keypair_id=None, anti_affinity=None, net_id=None, count=None, use_autoconfig=None, shares=None, is_public=None, is_protected=None): """Launch a Cluster.""" data = { 'name': name, 'plugin_name': plugin_name, 'plugin_version': plugin_version, } return self._do_create(data, cluster_template_id, default_image_id, is_transient, description, cluster_configs, node_groups, user_keypair_id, anti_affinity, net_id, count, use_autoconfig, shares, is_public, is_protected, api_ver=2)
[ "def", "create", "(", "self", ",", "name", ",", "plugin_name", ",", "plugin_version", ",", "cluster_template_id", "=", "None", ",", "default_image_id", "=", "None", ",", "is_transient", "=", "None", ",", "description", "=", "None", ",", "cluster_configs", "=", "None", ",", "node_groups", "=", "None", ",", "user_keypair_id", "=", "None", ",", "anti_affinity", "=", "None", ",", "net_id", "=", "None", ",", "count", "=", "None", ",", "use_autoconfig", "=", "None", ",", "shares", "=", "None", ",", "is_public", "=", "None", ",", "is_protected", "=", "None", ")", ":", "data", "=", "{", "'name'", ":", "name", ",", "'plugin_name'", ":", "plugin_name", ",", "'plugin_version'", ":", "plugin_version", ",", "}", "return", "self", ".", "_do_create", "(", "data", ",", "cluster_template_id", ",", "default_image_id", ",", "is_transient", ",", "description", ",", "cluster_configs", ",", "node_groups", ",", "user_keypair_id", ",", "anti_affinity", ",", "net_id", ",", "count", ",", "use_autoconfig", ",", "shares", ",", "is_public", ",", "is_protected", ",", "api_ver", "=", "2", ")" ]
Launch a Cluster.
[ "Launch", "a", "Cluster", "." ]
python
train
Legobot/Legobot
Legobot/Connectors/Slack.py
https://github.com/Legobot/Legobot/blob/d13da172960a149681cb5151ce34b2f3a58ad32b/Legobot/Connectors/Slack.py#L428-L487
def handle(self, message): '''Attempts to send a message to the specified destination in Slack. Extends Legobot.Lego.handle() Args: message (Legobot.Message): message w/ metadata to send. ''' logger.debug(message) if Utilities.isNotEmpty(message['metadata']['opts']): target = message['metadata']['opts']['target'] thread = message['metadata']['opts'].get('thread') # pattern = re.compile('@([a-zA-Z0-9._-]+)') pattern = re.compile('^@([a-zA-Z0-9._-]+)|\s@([a-zA-Z0-9._-]+)') matches = re.findall(pattern, message['text']) matches = set(matches) logger.debug('MATCHES!!!! {}'.format(matches)) for match in matches: if isinstance(match, tuple): if match[0] != '': match = match[0] else: match = match[1] if not match.startswith('@'): match = '@' + match message['text'] = message['text'].replace( match, '<{}>'.format(match) ) pattern = re.compile('#([A-Za-z0-9-]+)') matches = re.findall(pattern, message['text']) matches = set(matches) for match in matches: channel_id = self.botThread.get_channel_id_by_name(match) if channel_id: message['text'] = message['text'].replace( '#' + match, '<#{}|{}>'.format( channel_id, match ) ) if (message['text'].find('<<@') != -1 or message['text'].find('<<#') != -1): message['text'] = message['text'].replace('<<', '<') message['text'] = message['text'].replace('>>', '>') if target.startswith('U'): target = self.botThread.get_dm_channel(target) attachment = message['metadata']['opts'].get('attachment') if attachment: text = message['metadata']['opts'].get('fallback') attachment = self.build_attachment( text, target, attachment, thread) self.botThread.post_attachment(attachment) else: self.botThread.slack_client.rtm_send_message( target, message['text'], thread=thread)
[ "def", "handle", "(", "self", ",", "message", ")", ":", "logger", ".", "debug", "(", "message", ")", "if", "Utilities", ".", "isNotEmpty", "(", "message", "[", "'metadata'", "]", "[", "'opts'", "]", ")", ":", "target", "=", "message", "[", "'metadata'", "]", "[", "'opts'", "]", "[", "'target'", "]", "thread", "=", "message", "[", "'metadata'", "]", "[", "'opts'", "]", ".", "get", "(", "'thread'", ")", "# pattern = re.compile('@([a-zA-Z0-9._-]+)')", "pattern", "=", "re", ".", "compile", "(", "'^@([a-zA-Z0-9._-]+)|\\s@([a-zA-Z0-9._-]+)'", ")", "matches", "=", "re", ".", "findall", "(", "pattern", ",", "message", "[", "'text'", "]", ")", "matches", "=", "set", "(", "matches", ")", "logger", ".", "debug", "(", "'MATCHES!!!! {}'", ".", "format", "(", "matches", ")", ")", "for", "match", "in", "matches", ":", "if", "isinstance", "(", "match", ",", "tuple", ")", ":", "if", "match", "[", "0", "]", "!=", "''", ":", "match", "=", "match", "[", "0", "]", "else", ":", "match", "=", "match", "[", "1", "]", "if", "not", "match", ".", "startswith", "(", "'@'", ")", ":", "match", "=", "'@'", "+", "match", "message", "[", "'text'", "]", "=", "message", "[", "'text'", "]", ".", "replace", "(", "match", ",", "'<{}>'", ".", "format", "(", "match", ")", ")", "pattern", "=", "re", ".", "compile", "(", "'#([A-Za-z0-9-]+)'", ")", "matches", "=", "re", ".", "findall", "(", "pattern", ",", "message", "[", "'text'", "]", ")", "matches", "=", "set", "(", "matches", ")", "for", "match", "in", "matches", ":", "channel_id", "=", "self", ".", "botThread", ".", "get_channel_id_by_name", "(", "match", ")", "if", "channel_id", ":", "message", "[", "'text'", "]", "=", "message", "[", "'text'", "]", ".", "replace", "(", "'#'", "+", "match", ",", "'<#{}|{}>'", ".", "format", "(", "channel_id", ",", "match", ")", ")", "if", "(", "message", "[", "'text'", "]", ".", "find", "(", "'<<@'", ")", "!=", "-", "1", "or", "message", "[", "'text'", "]", ".", "find", "(", "'<<#'", ")", "!=", "-", "1", ")", ":", "message", "[", "'text'", "]", "=", "message", "[", "'text'", "]", ".", "replace", "(", "'<<'", ",", "'<'", ")", "message", "[", "'text'", "]", "=", "message", "[", "'text'", "]", ".", "replace", "(", "'>>'", ",", "'>'", ")", "if", "target", ".", "startswith", "(", "'U'", ")", ":", "target", "=", "self", ".", "botThread", ".", "get_dm_channel", "(", "target", ")", "attachment", "=", "message", "[", "'metadata'", "]", "[", "'opts'", "]", ".", "get", "(", "'attachment'", ")", "if", "attachment", ":", "text", "=", "message", "[", "'metadata'", "]", "[", "'opts'", "]", ".", "get", "(", "'fallback'", ")", "attachment", "=", "self", ".", "build_attachment", "(", "text", ",", "target", ",", "attachment", ",", "thread", ")", "self", ".", "botThread", ".", "post_attachment", "(", "attachment", ")", "else", ":", "self", ".", "botThread", ".", "slack_client", ".", "rtm_send_message", "(", "target", ",", "message", "[", "'text'", "]", ",", "thread", "=", "thread", ")" ]
Attempts to send a message to the specified destination in Slack. Extends Legobot.Lego.handle() Args: message (Legobot.Message): message w/ metadata to send.
[ "Attempts", "to", "send", "a", "message", "to", "the", "specified", "destination", "in", "Slack", ".", "Extends", "Legobot", ".", "Lego", ".", "handle", "()" ]
python
train
lingthio/Flask-User
flask_user/db_manager.py
https://github.com/lingthio/Flask-User/blob/a379fa0a281789618c484b459cb41236779b95b1/flask_user/db_manager.py#L99-L105
def add_user(self, **kwargs): """Add a User object, with properties specified in ``**kwargs``.""" user = self.UserClass(**kwargs) if hasattr(user, 'active'): user.active = True self.db_adapter.add_object(user) return user
[ "def", "add_user", "(", "self", ",", "*", "*", "kwargs", ")", ":", "user", "=", "self", ".", "UserClass", "(", "*", "*", "kwargs", ")", "if", "hasattr", "(", "user", ",", "'active'", ")", ":", "user", ".", "active", "=", "True", "self", ".", "db_adapter", ".", "add_object", "(", "user", ")", "return", "user" ]
Add a User object, with properties specified in ``**kwargs``.
[ "Add", "a", "User", "object", "with", "properties", "specified", "in", "**", "kwargs", "." ]
python
train
maxpumperla/elephas
elephas/worker.py
https://github.com/maxpumperla/elephas/blob/84605acdc9564673c487637dcb27f5def128bcc7/elephas/worker.py#L26-L49
def train(self, data_iterator): """Train a keras model on a worker """ optimizer = get_optimizer(self.master_optimizer) self.model = model_from_yaml(self.yaml, self.custom_objects) self.model.compile(optimizer=optimizer, loss=self.master_loss, metrics=self.master_metrics) self.model.set_weights(self.parameters.value) feature_iterator, label_iterator = tee(data_iterator, 2) x_train = np.asarray([x for x, y in feature_iterator]) y_train = np.asarray([y for x, y in label_iterator]) self.model.compile(optimizer=self.master_optimizer, loss=self.master_loss, metrics=self.master_metrics) weights_before_training = self.model.get_weights() if x_train.shape[0] > self.train_config.get('batch_size'): self.model.fit(x_train, y_train, **self.train_config) weights_after_training = self.model.get_weights() deltas = subtract_params( weights_before_training, weights_after_training) yield deltas
[ "def", "train", "(", "self", ",", "data_iterator", ")", ":", "optimizer", "=", "get_optimizer", "(", "self", ".", "master_optimizer", ")", "self", ".", "model", "=", "model_from_yaml", "(", "self", ".", "yaml", ",", "self", ".", "custom_objects", ")", "self", ".", "model", ".", "compile", "(", "optimizer", "=", "optimizer", ",", "loss", "=", "self", ".", "master_loss", ",", "metrics", "=", "self", ".", "master_metrics", ")", "self", ".", "model", ".", "set_weights", "(", "self", ".", "parameters", ".", "value", ")", "feature_iterator", ",", "label_iterator", "=", "tee", "(", "data_iterator", ",", "2", ")", "x_train", "=", "np", ".", "asarray", "(", "[", "x", "for", "x", ",", "y", "in", "feature_iterator", "]", ")", "y_train", "=", "np", ".", "asarray", "(", "[", "y", "for", "x", ",", "y", "in", "label_iterator", "]", ")", "self", ".", "model", ".", "compile", "(", "optimizer", "=", "self", ".", "master_optimizer", ",", "loss", "=", "self", ".", "master_loss", ",", "metrics", "=", "self", ".", "master_metrics", ")", "weights_before_training", "=", "self", ".", "model", ".", "get_weights", "(", ")", "if", "x_train", ".", "shape", "[", "0", "]", ">", "self", ".", "train_config", ".", "get", "(", "'batch_size'", ")", ":", "self", ".", "model", ".", "fit", "(", "x_train", ",", "y_train", ",", "*", "*", "self", ".", "train_config", ")", "weights_after_training", "=", "self", ".", "model", ".", "get_weights", "(", ")", "deltas", "=", "subtract_params", "(", "weights_before_training", ",", "weights_after_training", ")", "yield", "deltas" ]
Train a keras model on a worker
[ "Train", "a", "keras", "model", "on", "a", "worker" ]
python
train
pygobject/pgi
pgi/util.py
https://github.com/pygobject/pgi/blob/2090435df6241a15ec2a78379a36b738b728652c/pgi/util.py#L134-L139
def lookup_name_slow(self, name): """Returns a struct if one exists""" for index in xrange(self.__get_count_cached()): if self.__get_name_cached(index) == name: return self.__get_info_cached(index)
[ "def", "lookup_name_slow", "(", "self", ",", "name", ")", ":", "for", "index", "in", "xrange", "(", "self", ".", "__get_count_cached", "(", ")", ")", ":", "if", "self", ".", "__get_name_cached", "(", "index", ")", "==", "name", ":", "return", "self", ".", "__get_info_cached", "(", "index", ")" ]
Returns a struct if one exists
[ "Returns", "a", "struct", "if", "one", "exists" ]
python
train
gatkin/declxml
declxml.py
https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L945-L959
def parse_at_element( self, element, # type: ET.Element state # type: _ProcessorState ): # type: (...) -> Any """Parse the provided element as a dictionary.""" parsed_dict = {} for child in self._child_processors: state.push_location(child.element_path) parsed_dict[child.alias] = child.parse_from_parent(element, state) state.pop_location() return parsed_dict
[ "def", "parse_at_element", "(", "self", ",", "element", ",", "# type: ET.Element", "state", "# type: _ProcessorState", ")", ":", "# type: (...) -> Any", "parsed_dict", "=", "{", "}", "for", "child", "in", "self", ".", "_child_processors", ":", "state", ".", "push_location", "(", "child", ".", "element_path", ")", "parsed_dict", "[", "child", ".", "alias", "]", "=", "child", ".", "parse_from_parent", "(", "element", ",", "state", ")", "state", ".", "pop_location", "(", ")", "return", "parsed_dict" ]
Parse the provided element as a dictionary.
[ "Parse", "the", "provided", "element", "as", "a", "dictionary", "." ]
python
train
oceanprotocol/squid-py
squid_py/ddo/public_key_rsa.py
https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/ddo/public_key_rsa.py#L27-L32
def set_encode_key_value(self, value, store_type=PUBLIC_KEY_STORE_TYPE_BASE64): """Set the value based on the type of encoding supported by RSA.""" if store_type == PUBLIC_KEY_STORE_TYPE_PEM: PublicKeyBase.set_encode_key_value(self, value.exportKey('PEM').decode(), store_type) else: PublicKeyBase.set_encode_key_value(self, value.exportKey('DER'), store_type)
[ "def", "set_encode_key_value", "(", "self", ",", "value", ",", "store_type", "=", "PUBLIC_KEY_STORE_TYPE_BASE64", ")", ":", "if", "store_type", "==", "PUBLIC_KEY_STORE_TYPE_PEM", ":", "PublicKeyBase", ".", "set_encode_key_value", "(", "self", ",", "value", ".", "exportKey", "(", "'PEM'", ")", ".", "decode", "(", ")", ",", "store_type", ")", "else", ":", "PublicKeyBase", ".", "set_encode_key_value", "(", "self", ",", "value", ".", "exportKey", "(", "'DER'", ")", ",", "store_type", ")" ]
Set the value based on the type of encoding supported by RSA.
[ "Set", "the", "value", "based", "on", "the", "type", "of", "encoding", "supported", "by", "RSA", "." ]
python
train
psd-tools/psd-tools
src/psd_tools/api/shape.py
https://github.com/psd-tools/psd-tools/blob/4952b57bcf1cf2c1f16fd9d6d51d4fa0b53bce4e/src/psd_tools/api/shape.py#L220-L223
def line_alignment(self): """Alignment, one of `inner`, `outer`, `center`.""" key = self._data.get(b'strokeStyleLineAlignment').enum return self.STROKE_STYLE_LINE_ALIGNMENTS.get(key, str(key))
[ "def", "line_alignment", "(", "self", ")", ":", "key", "=", "self", ".", "_data", ".", "get", "(", "b'strokeStyleLineAlignment'", ")", ".", "enum", "return", "self", ".", "STROKE_STYLE_LINE_ALIGNMENTS", ".", "get", "(", "key", ",", "str", "(", "key", ")", ")" ]
Alignment, one of `inner`, `outer`, `center`.
[ "Alignment", "one", "of", "inner", "outer", "center", "." ]
python
train
bitesofcode/projexui
projexui/xsettings.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/xsettings.py#L508-L517
def childKeys(self): """ Returns the list of child keys for this settings instance. :return [<str>, ..] """ if self._customFormat: return self._customFormat.childKeys() else: return super(XSettings, self).childKeys()
[ "def", "childKeys", "(", "self", ")", ":", "if", "self", ".", "_customFormat", ":", "return", "self", ".", "_customFormat", ".", "childKeys", "(", ")", "else", ":", "return", "super", "(", "XSettings", ",", "self", ")", ".", "childKeys", "(", ")" ]
Returns the list of child keys for this settings instance. :return [<str>, ..]
[ "Returns", "the", "list", "of", "child", "keys", "for", "this", "settings", "instance", ".", ":", "return", "[", "<str", ">", "..", "]" ]
python
train
PyPSA/PyPSA
pypsa/graph.py
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/graph.py#L92-L155
def adjacency_matrix(network, branch_components=None, busorder=None, weights=None): """ Construct a sparse adjacency matrix (directed) Parameters ---------- branch_components : iterable sublist of `branch_components` Buses connected by any of the selected branches are adjacent (default: branch_components (network) or passive_branch_components (sub_network)) busorder : pd.Index subset of network.buses.index Basis to use for the matrix representation of the adjacency matrix (default: buses.index (network) or buses_i() (sub_network)) weights : pd.Series or None (default) If given must provide a weight for each branch, multi-indexed on branch_component name and branch name. Returns ------- adjacency_matrix : sp.sparse.coo_matrix Directed adjacency matrix """ from . import components if isinstance(network, components.Network): if branch_components is None: branch_components = network.branch_components if busorder is None: busorder = network.buses.index elif isinstance(network, components.SubNetwork): if branch_components is None: branch_components = network.network.passive_branch_components if busorder is None: busorder = network.buses_i() else: raise TypeError(" must be called with a Network or a SubNetwork") no_buses = len(busorder) no_branches = 0 bus0_inds = [] bus1_inds = [] weight_vals = [] for c in network.iterate_components(branch_components): if c.ind is None: sel = slice(None) no_branches = len(c.df) else: sel = c.ind no_branches = len(c.ind) bus0_inds.append(busorder.get_indexer(c.df.loc[sel, "bus0"])) bus1_inds.append(busorder.get_indexer(c.df.loc[sel, "bus1"])) weight_vals.append(np.ones(no_branches) if weights is None else weights[c.name][sel].values) if no_branches == 0: return sp.sparse.coo_matrix((no_buses, no_buses)) bus0_inds = np.concatenate(bus0_inds) bus1_inds = np.concatenate(bus1_inds) weight_vals = np.concatenate(weight_vals) return sp.sparse.coo_matrix((weight_vals, (bus0_inds, bus1_inds)), shape=(no_buses, no_buses))
[ "def", "adjacency_matrix", "(", "network", ",", "branch_components", "=", "None", ",", "busorder", "=", "None", ",", "weights", "=", "None", ")", ":", "from", ".", "import", "components", "if", "isinstance", "(", "network", ",", "components", ".", "Network", ")", ":", "if", "branch_components", "is", "None", ":", "branch_components", "=", "network", ".", "branch_components", "if", "busorder", "is", "None", ":", "busorder", "=", "network", ".", "buses", ".", "index", "elif", "isinstance", "(", "network", ",", "components", ".", "SubNetwork", ")", ":", "if", "branch_components", "is", "None", ":", "branch_components", "=", "network", ".", "network", ".", "passive_branch_components", "if", "busorder", "is", "None", ":", "busorder", "=", "network", ".", "buses_i", "(", ")", "else", ":", "raise", "TypeError", "(", "\" must be called with a Network or a SubNetwork\"", ")", "no_buses", "=", "len", "(", "busorder", ")", "no_branches", "=", "0", "bus0_inds", "=", "[", "]", "bus1_inds", "=", "[", "]", "weight_vals", "=", "[", "]", "for", "c", "in", "network", ".", "iterate_components", "(", "branch_components", ")", ":", "if", "c", ".", "ind", "is", "None", ":", "sel", "=", "slice", "(", "None", ")", "no_branches", "=", "len", "(", "c", ".", "df", ")", "else", ":", "sel", "=", "c", ".", "ind", "no_branches", "=", "len", "(", "c", ".", "ind", ")", "bus0_inds", ".", "append", "(", "busorder", ".", "get_indexer", "(", "c", ".", "df", ".", "loc", "[", "sel", ",", "\"bus0\"", "]", ")", ")", "bus1_inds", ".", "append", "(", "busorder", ".", "get_indexer", "(", "c", ".", "df", ".", "loc", "[", "sel", ",", "\"bus1\"", "]", ")", ")", "weight_vals", ".", "append", "(", "np", ".", "ones", "(", "no_branches", ")", "if", "weights", "is", "None", "else", "weights", "[", "c", ".", "name", "]", "[", "sel", "]", ".", "values", ")", "if", "no_branches", "==", "0", ":", "return", "sp", ".", "sparse", ".", "coo_matrix", "(", "(", "no_buses", ",", "no_buses", ")", ")", "bus0_inds", "=", "np", ".", "concatenate", "(", "bus0_inds", ")", "bus1_inds", "=", "np", ".", "concatenate", "(", "bus1_inds", ")", "weight_vals", "=", "np", ".", "concatenate", "(", "weight_vals", ")", "return", "sp", ".", "sparse", ".", "coo_matrix", "(", "(", "weight_vals", ",", "(", "bus0_inds", ",", "bus1_inds", ")", ")", ",", "shape", "=", "(", "no_buses", ",", "no_buses", ")", ")" ]
Construct a sparse adjacency matrix (directed) Parameters ---------- branch_components : iterable sublist of `branch_components` Buses connected by any of the selected branches are adjacent (default: branch_components (network) or passive_branch_components (sub_network)) busorder : pd.Index subset of network.buses.index Basis to use for the matrix representation of the adjacency matrix (default: buses.index (network) or buses_i() (sub_network)) weights : pd.Series or None (default) If given must provide a weight for each branch, multi-indexed on branch_component name and branch name. Returns ------- adjacency_matrix : sp.sparse.coo_matrix Directed adjacency matrix
[ "Construct", "a", "sparse", "adjacency", "matrix", "(", "directed", ")" ]
python
train
apple/turicreate
deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L173-L198
def __merge_json_values(current, previous): """Merges the values between the current and previous run of the script.""" for value in current: name = value['name'] # Find the previous value previous_value = __find_and_remove_value(previous, value) if previous_value is not None: flags = value['flags'] previous_flags = previous_value['flags'] if flags != previous_flags: logging.warning( 'Flags for %s are different. Using previous value.', name) value['flags'] = previous_flags else: logging.warning('Value %s is a new value', name) for value in previous: name = value['name'] logging.warning( 'Value %s not present in current run. Appending value.', name) current.append(value)
[ "def", "__merge_json_values", "(", "current", ",", "previous", ")", ":", "for", "value", "in", "current", ":", "name", "=", "value", "[", "'name'", "]", "# Find the previous value", "previous_value", "=", "__find_and_remove_value", "(", "previous", ",", "value", ")", "if", "previous_value", "is", "not", "None", ":", "flags", "=", "value", "[", "'flags'", "]", "previous_flags", "=", "previous_value", "[", "'flags'", "]", "if", "flags", "!=", "previous_flags", ":", "logging", ".", "warning", "(", "'Flags for %s are different. Using previous value.'", ",", "name", ")", "value", "[", "'flags'", "]", "=", "previous_flags", "else", ":", "logging", ".", "warning", "(", "'Value %s is a new value'", ",", "name", ")", "for", "value", "in", "previous", ":", "name", "=", "value", "[", "'name'", "]", "logging", ".", "warning", "(", "'Value %s not present in current run. Appending value.'", ",", "name", ")", "current", ".", "append", "(", "value", ")" ]
Merges the values between the current and previous run of the script.
[ "Merges", "the", "values", "between", "the", "current", "and", "previous", "run", "of", "the", "script", "." ]
python
train
empymod/empymod
empymod/utils.py
https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/utils.py#L824-L897
def check_opt(opt, loop, ht, htarg, verb): r"""Check optimization parameters. This check-function is called from one of the modelling routines in :mod:`model`. Consult these modelling routines for a detailed description of the input parameters. Parameters ---------- opt : {None, 'parallel'} Optimization flag; use ``numexpr`` or not. loop : {None, 'freq', 'off'} Loop flag. ht : str Flag to choose the Hankel transform. htarg : array_like, Depends on the value for ``ht``. verb : {0, 1, 2, 3, 4} Level of verbosity. Returns ------- use_ne_eval : bool Boolean if to use ``numexpr``. loop_freq : bool Boolean if to loop over frequencies. loop_off : bool Boolean if to loop over offsets. """ # Check optimization flag use_ne_eval = False if opt == 'parallel': if numexpr: use_ne_eval = numexpr.evaluate elif verb > 0: print(numexpr_msg) # Define if to loop over frequencies or over offsets lagged_splined_fht = False if ht == 'fht': if htarg[1] != 0: lagged_splined_fht = True if ht in ['hqwe', 'hquad'] or lagged_splined_fht: loop_freq = True loop_off = False else: loop_off = loop == 'off' loop_freq = loop == 'freq' # If verbose, print optimization information if verb > 2: if use_ne_eval: print(" Kernel Opt. : Use parallel") else: print(" Kernel Opt. : None") if loop_off: print(" Loop over : Offsets") elif loop_freq: print(" Loop over : Frequencies") else: print(" Loop over : None (all vectorized)") return use_ne_eval, loop_freq, loop_off
[ "def", "check_opt", "(", "opt", ",", "loop", ",", "ht", ",", "htarg", ",", "verb", ")", ":", "# Check optimization flag", "use_ne_eval", "=", "False", "if", "opt", "==", "'parallel'", ":", "if", "numexpr", ":", "use_ne_eval", "=", "numexpr", ".", "evaluate", "elif", "verb", ">", "0", ":", "print", "(", "numexpr_msg", ")", "# Define if to loop over frequencies or over offsets", "lagged_splined_fht", "=", "False", "if", "ht", "==", "'fht'", ":", "if", "htarg", "[", "1", "]", "!=", "0", ":", "lagged_splined_fht", "=", "True", "if", "ht", "in", "[", "'hqwe'", ",", "'hquad'", "]", "or", "lagged_splined_fht", ":", "loop_freq", "=", "True", "loop_off", "=", "False", "else", ":", "loop_off", "=", "loop", "==", "'off'", "loop_freq", "=", "loop", "==", "'freq'", "# If verbose, print optimization information", "if", "verb", ">", "2", ":", "if", "use_ne_eval", ":", "print", "(", "\" Kernel Opt. : Use parallel\"", ")", "else", ":", "print", "(", "\" Kernel Opt. : None\"", ")", "if", "loop_off", ":", "print", "(", "\" Loop over : Offsets\"", ")", "elif", "loop_freq", ":", "print", "(", "\" Loop over : Frequencies\"", ")", "else", ":", "print", "(", "\" Loop over : None (all vectorized)\"", ")", "return", "use_ne_eval", ",", "loop_freq", ",", "loop_off" ]
r"""Check optimization parameters. This check-function is called from one of the modelling routines in :mod:`model`. Consult these modelling routines for a detailed description of the input parameters. Parameters ---------- opt : {None, 'parallel'} Optimization flag; use ``numexpr`` or not. loop : {None, 'freq', 'off'} Loop flag. ht : str Flag to choose the Hankel transform. htarg : array_like, Depends on the value for ``ht``. verb : {0, 1, 2, 3, 4} Level of verbosity. Returns ------- use_ne_eval : bool Boolean if to use ``numexpr``. loop_freq : bool Boolean if to loop over frequencies. loop_off : bool Boolean if to loop over offsets.
[ "r", "Check", "optimization", "parameters", "." ]
python
train
cloud-custodian/cloud-custodian
c7n/actions/network.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/actions/network.py#L184-L198
def resolve_remove_symbols(self, r, target_group_ids, rgroups): """Resolve the resources security groups that need be modified. Specifically handles symbolic names that match annotations from policy filters for groups being removed. """ if 'matched' in target_group_ids: return r.get('c7n:matched-security-groups', ()) elif 'network-location' in target_group_ids: for reason in r.get('c7n:NetworkLocation', ()): if reason['reason'] == 'SecurityGroupMismatch': return list(reason['security-groups']) elif 'all' in target_group_ids: return rgroups return target_group_ids
[ "def", "resolve_remove_symbols", "(", "self", ",", "r", ",", "target_group_ids", ",", "rgroups", ")", ":", "if", "'matched'", "in", "target_group_ids", ":", "return", "r", ".", "get", "(", "'c7n:matched-security-groups'", ",", "(", ")", ")", "elif", "'network-location'", "in", "target_group_ids", ":", "for", "reason", "in", "r", ".", "get", "(", "'c7n:NetworkLocation'", ",", "(", ")", ")", ":", "if", "reason", "[", "'reason'", "]", "==", "'SecurityGroupMismatch'", ":", "return", "list", "(", "reason", "[", "'security-groups'", "]", ")", "elif", "'all'", "in", "target_group_ids", ":", "return", "rgroups", "return", "target_group_ids" ]
Resolve the resources security groups that need be modified. Specifically handles symbolic names that match annotations from policy filters for groups being removed.
[ "Resolve", "the", "resources", "security", "groups", "that", "need", "be", "modified", "." ]
python
train
pandas-dev/pandas
pandas/core/indexes/base.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L958-L969
def _format_data(self, name=None): """ Return the formatted data as a unicode string. """ # do we want to justify (only do so for non-objects) is_justify = not (self.inferred_type in ('string', 'unicode') or (self.inferred_type == 'categorical' and is_object_dtype(self.categories))) return format_object_summary(self, self._formatter_func, is_justify=is_justify, name=name)
[ "def", "_format_data", "(", "self", ",", "name", "=", "None", ")", ":", "# do we want to justify (only do so for non-objects)", "is_justify", "=", "not", "(", "self", ".", "inferred_type", "in", "(", "'string'", ",", "'unicode'", ")", "or", "(", "self", ".", "inferred_type", "==", "'categorical'", "and", "is_object_dtype", "(", "self", ".", "categories", ")", ")", ")", "return", "format_object_summary", "(", "self", ",", "self", ".", "_formatter_func", ",", "is_justify", "=", "is_justify", ",", "name", "=", "name", ")" ]
Return the formatted data as a unicode string.
[ "Return", "the", "formatted", "data", "as", "a", "unicode", "string", "." ]
python
train
chrislit/abydos
abydos/distance/_eudex.py
https://github.com/chrislit/abydos/blob/165466b3ff6afd8024a4c8660421b0c4e7773db9/abydos/distance/_eudex.py#L202-L236
def dist(self, src, tar, weights='exponential', max_length=8): """Return normalized distance between the Eudex hashes of two terms. This is Eudex distance normalized to [0, 1]. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison weights : str, iterable, or generator function The weights or weights generator function max_length : int The number of characters to encode as a eudex hash Returns ------- int The normalized Eudex Hamming distance Examples -------- >>> cmp = Eudex() >>> round(cmp.dist('cat', 'hat'), 12) 0.062745098039 >>> round(cmp.dist('Niall', 'Neil'), 12) 0.000980392157 >>> round(cmp.dist('Colin', 'Cuilen'), 12) 0.004901960784 >>> round(cmp.dist('ATCG', 'TAGC'), 12) 0.197549019608 """ return self.dist_abs(src, tar, weights, max_length, True)
[ "def", "dist", "(", "self", ",", "src", ",", "tar", ",", "weights", "=", "'exponential'", ",", "max_length", "=", "8", ")", ":", "return", "self", ".", "dist_abs", "(", "src", ",", "tar", ",", "weights", ",", "max_length", ",", "True", ")" ]
Return normalized distance between the Eudex hashes of two terms. This is Eudex distance normalized to [0, 1]. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison weights : str, iterable, or generator function The weights or weights generator function max_length : int The number of characters to encode as a eudex hash Returns ------- int The normalized Eudex Hamming distance Examples -------- >>> cmp = Eudex() >>> round(cmp.dist('cat', 'hat'), 12) 0.062745098039 >>> round(cmp.dist('Niall', 'Neil'), 12) 0.000980392157 >>> round(cmp.dist('Colin', 'Cuilen'), 12) 0.004901960784 >>> round(cmp.dist('ATCG', 'TAGC'), 12) 0.197549019608
[ "Return", "normalized", "distance", "between", "the", "Eudex", "hashes", "of", "two", "terms", "." ]
python
valid
LionelR/pyair
pyair/stats.py
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/stats.py#L113-L116
def foex(a, b): """Returns the factor of exceedance """ return (np.sum(a > b, dtype=float) / len(a) - 0.5) * 100
[ "def", "foex", "(", "a", ",", "b", ")", ":", "return", "(", "np", ".", "sum", "(", "a", ">", "b", ",", "dtype", "=", "float", ")", "/", "len", "(", "a", ")", "-", "0.5", ")", "*", "100" ]
Returns the factor of exceedance
[ "Returns", "the", "factor", "of", "exceedance" ]
python
valid
SCIP-Interfaces/PySCIPOpt
examples/finished/flp.py
https://github.com/SCIP-Interfaces/PySCIPOpt/blob/9c960b40d94a48b0304d73dbe28b467b9c065abe/examples/finished/flp.py#L11-L46
def flp(I,J,d,M,f,c): """flp -- model for the capacitated facility location problem Parameters: - I: set of customers - J: set of facilities - d[i]: demand for customer i - M[j]: capacity of facility j - f[j]: fixed cost for using a facility in point j - c[i,j]: unit cost of servicing demand point i from facility j Returns a model, ready to be solved. """ model = Model("flp") x,y = {},{} for j in J: y[j] = model.addVar(vtype="B", name="y(%s)"%j) for i in I: x[i,j] = model.addVar(vtype="C", name="x(%s,%s)"%(i,j)) for i in I: model.addCons(quicksum(x[i,j] for j in J) == d[i], "Demand(%s)"%i) for j in M: model.addCons(quicksum(x[i,j] for i in I) <= M[j]*y[j], "Capacity(%s)"%i) for (i,j) in x: model.addCons(x[i,j] <= d[i]*y[j], "Strong(%s,%s)"%(i,j)) model.setObjective( quicksum(f[j]*y[j] for j in J) + quicksum(c[i,j]*x[i,j] for i in I for j in J), "minimize") model.data = x,y return model
[ "def", "flp", "(", "I", ",", "J", ",", "d", ",", "M", ",", "f", ",", "c", ")", ":", "model", "=", "Model", "(", "\"flp\"", ")", "x", ",", "y", "=", "{", "}", ",", "{", "}", "for", "j", "in", "J", ":", "y", "[", "j", "]", "=", "model", ".", "addVar", "(", "vtype", "=", "\"B\"", ",", "name", "=", "\"y(%s)\"", "%", "j", ")", "for", "i", "in", "I", ":", "x", "[", "i", ",", "j", "]", "=", "model", ".", "addVar", "(", "vtype", "=", "\"C\"", ",", "name", "=", "\"x(%s,%s)\"", "%", "(", "i", ",", "j", ")", ")", "for", "i", "in", "I", ":", "model", ".", "addCons", "(", "quicksum", "(", "x", "[", "i", ",", "j", "]", "for", "j", "in", "J", ")", "==", "d", "[", "i", "]", ",", "\"Demand(%s)\"", "%", "i", ")", "for", "j", "in", "M", ":", "model", ".", "addCons", "(", "quicksum", "(", "x", "[", "i", ",", "j", "]", "for", "i", "in", "I", ")", "<=", "M", "[", "j", "]", "*", "y", "[", "j", "]", ",", "\"Capacity(%s)\"", "%", "i", ")", "for", "(", "i", ",", "j", ")", "in", "x", ":", "model", ".", "addCons", "(", "x", "[", "i", ",", "j", "]", "<=", "d", "[", "i", "]", "*", "y", "[", "j", "]", ",", "\"Strong(%s,%s)\"", "%", "(", "i", ",", "j", ")", ")", "model", ".", "setObjective", "(", "quicksum", "(", "f", "[", "j", "]", "*", "y", "[", "j", "]", "for", "j", "in", "J", ")", "+", "quicksum", "(", "c", "[", "i", ",", "j", "]", "*", "x", "[", "i", ",", "j", "]", "for", "i", "in", "I", "for", "j", "in", "J", ")", ",", "\"minimize\"", ")", "model", ".", "data", "=", "x", ",", "y", "return", "model" ]
flp -- model for the capacitated facility location problem Parameters: - I: set of customers - J: set of facilities - d[i]: demand for customer i - M[j]: capacity of facility j - f[j]: fixed cost for using a facility in point j - c[i,j]: unit cost of servicing demand point i from facility j Returns a model, ready to be solved.
[ "flp", "--", "model", "for", "the", "capacitated", "facility", "location", "problem", "Parameters", ":", "-", "I", ":", "set", "of", "customers", "-", "J", ":", "set", "of", "facilities", "-", "d", "[", "i", "]", ":", "demand", "for", "customer", "i", "-", "M", "[", "j", "]", ":", "capacity", "of", "facility", "j", "-", "f", "[", "j", "]", ":", "fixed", "cost", "for", "using", "a", "facility", "in", "point", "j", "-", "c", "[", "i", "j", "]", ":", "unit", "cost", "of", "servicing", "demand", "point", "i", "from", "facility", "j", "Returns", "a", "model", "ready", "to", "be", "solved", "." ]
python
train
openstack/networking-arista
networking_arista/ml2/mechanism_arista.py
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L117-L130
def get_instance_type(self, port): """Determine the port type based on device owner and vnic type""" if port[portbindings.VNIC_TYPE] == portbindings.VNIC_BAREMETAL: return a_const.BAREMETAL_RESOURCE owner_to_type = { n_const.DEVICE_OWNER_DHCP: a_const.DHCP_RESOURCE, n_const.DEVICE_OWNER_DVR_INTERFACE: a_const.ROUTER_RESOURCE, trunk_consts.TRUNK_SUBPORT_OWNER: a_const.VM_RESOURCE} if port['device_owner'] in owner_to_type.keys(): return owner_to_type[port['device_owner']] elif port['device_owner'].startswith( n_const.DEVICE_OWNER_COMPUTE_PREFIX): return a_const.VM_RESOURCE return None
[ "def", "get_instance_type", "(", "self", ",", "port", ")", ":", "if", "port", "[", "portbindings", ".", "VNIC_TYPE", "]", "==", "portbindings", ".", "VNIC_BAREMETAL", ":", "return", "a_const", ".", "BAREMETAL_RESOURCE", "owner_to_type", "=", "{", "n_const", ".", "DEVICE_OWNER_DHCP", ":", "a_const", ".", "DHCP_RESOURCE", ",", "n_const", ".", "DEVICE_OWNER_DVR_INTERFACE", ":", "a_const", ".", "ROUTER_RESOURCE", ",", "trunk_consts", ".", "TRUNK_SUBPORT_OWNER", ":", "a_const", ".", "VM_RESOURCE", "}", "if", "port", "[", "'device_owner'", "]", "in", "owner_to_type", ".", "keys", "(", ")", ":", "return", "owner_to_type", "[", "port", "[", "'device_owner'", "]", "]", "elif", "port", "[", "'device_owner'", "]", ".", "startswith", "(", "n_const", ".", "DEVICE_OWNER_COMPUTE_PREFIX", ")", ":", "return", "a_const", ".", "VM_RESOURCE", "return", "None" ]
Determine the port type based on device owner and vnic type
[ "Determine", "the", "port", "type", "based", "on", "device", "owner", "and", "vnic", "type" ]
python
train
KungAlex/pbkdf2helper
pbkdf2helper/helper.py
https://github.com/KungAlex/pbkdf2helper/blob/4719878d6f8cada486415b111ed72164b4ac6f57/pbkdf2helper/helper.py#L66-L73
def split(encoded): """ Split a PBKDF2 hashed Password into algorithm, iterations, salt and hash :param encoded: PBKDF2 hashed Password :return: algorithm, iterations, salt, hash """ algorithm, iterations, salt, h = encoded.split('$', 3) return algorithm, iterations, salt, h
[ "def", "split", "(", "encoded", ")", ":", "algorithm", ",", "iterations", ",", "salt", ",", "h", "=", "encoded", ".", "split", "(", "'$'", ",", "3", ")", "return", "algorithm", ",", "iterations", ",", "salt", ",", "h" ]
Split a PBKDF2 hashed Password into algorithm, iterations, salt and hash :param encoded: PBKDF2 hashed Password :return: algorithm, iterations, salt, hash
[ "Split", "a", "PBKDF2", "hashed", "Password", "into", "algorithm", "iterations", "salt", "and", "hash", ":", "param", "encoded", ":", "PBKDF2", "hashed", "Password", ":", "return", ":", "algorithm", "iterations", "salt", "hash" ]
python
train
globocom/GloboNetworkAPI-client-python
networkapiclient/ClientFactory.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/ClientFactory.py#L352-L358
def create_equipamento(self): """Get an instance of equipamento services facade.""" return Equipamento( self.networkapi_url, self.user, self.password, self.user_ldap)
[ "def", "create_equipamento", "(", "self", ")", ":", "return", "Equipamento", "(", "self", ".", "networkapi_url", ",", "self", ".", "user", ",", "self", ".", "password", ",", "self", ".", "user_ldap", ")" ]
Get an instance of equipamento services facade.
[ "Get", "an", "instance", "of", "equipamento", "services", "facade", "." ]
python
train
vtkiorg/vtki
vtki/renderer.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/renderer.py#L61-L126
def add_actor(self, uinput, reset_camera=False, name=None, loc=None, culling=False): """ Adds an actor to render window. Creates an actor if input is a mapper. Parameters ---------- uinput : vtk.vtkMapper or vtk.vtkActor vtk mapper or vtk actor to be added. reset_camera : bool, optional Resets the camera when true. loc : int, tuple, or list Index of the renderer to add the actor to. For example, ``loc=2`` or ``loc=(1, 1)``. culling : bool optional Does not render faces that should not be visible to the plotter. This can be helpful for dense surface meshes, especially when edges are visible, but can cause flat meshes to be partially displayed. Default False. Returns ------- actor : vtk.vtkActor The actor. actor_properties : vtk.Properties Actor properties. """ # Remove actor by that name if present rv = self.remove_actor(name, reset_camera=False) if isinstance(uinput, vtk.vtkMapper): actor = vtk.vtkActor() actor.SetMapper(uinput) else: actor = uinput self.AddActor(actor) actor.renderer = proxy(self) if name is None: name = str(hex(id(actor))) self._actors[name] = actor if reset_camera: self.reset_camera() elif not self.camera_set and reset_camera is None and not rv: self.reset_camera() else: self.parent._render() self.update_bounds_axes() if culling: try: actor.GetProperty().BackfaceCullingOn() except AttributeError: # pragma: no cover pass return actor, actor.GetProperty()
[ "def", "add_actor", "(", "self", ",", "uinput", ",", "reset_camera", "=", "False", ",", "name", "=", "None", ",", "loc", "=", "None", ",", "culling", "=", "False", ")", ":", "# Remove actor by that name if present", "rv", "=", "self", ".", "remove_actor", "(", "name", ",", "reset_camera", "=", "False", ")", "if", "isinstance", "(", "uinput", ",", "vtk", ".", "vtkMapper", ")", ":", "actor", "=", "vtk", ".", "vtkActor", "(", ")", "actor", ".", "SetMapper", "(", "uinput", ")", "else", ":", "actor", "=", "uinput", "self", ".", "AddActor", "(", "actor", ")", "actor", ".", "renderer", "=", "proxy", "(", "self", ")", "if", "name", "is", "None", ":", "name", "=", "str", "(", "hex", "(", "id", "(", "actor", ")", ")", ")", "self", ".", "_actors", "[", "name", "]", "=", "actor", "if", "reset_camera", ":", "self", ".", "reset_camera", "(", ")", "elif", "not", "self", ".", "camera_set", "and", "reset_camera", "is", "None", "and", "not", "rv", ":", "self", ".", "reset_camera", "(", ")", "else", ":", "self", ".", "parent", ".", "_render", "(", ")", "self", ".", "update_bounds_axes", "(", ")", "if", "culling", ":", "try", ":", "actor", ".", "GetProperty", "(", ")", ".", "BackfaceCullingOn", "(", ")", "except", "AttributeError", ":", "# pragma: no cover", "pass", "return", "actor", ",", "actor", ".", "GetProperty", "(", ")" ]
Adds an actor to render window. Creates an actor if input is a mapper. Parameters ---------- uinput : vtk.vtkMapper or vtk.vtkActor vtk mapper or vtk actor to be added. reset_camera : bool, optional Resets the camera when true. loc : int, tuple, or list Index of the renderer to add the actor to. For example, ``loc=2`` or ``loc=(1, 1)``. culling : bool optional Does not render faces that should not be visible to the plotter. This can be helpful for dense surface meshes, especially when edges are visible, but can cause flat meshes to be partially displayed. Default False. Returns ------- actor : vtk.vtkActor The actor. actor_properties : vtk.Properties Actor properties.
[ "Adds", "an", "actor", "to", "render", "window", ".", "Creates", "an", "actor", "if", "input", "is", "a", "mapper", "." ]
python
train
suds-community/suds
suds/sax/enc.py
https://github.com/suds-community/suds/blob/6fb0a829337b5037a66c20aae6f89b41acd77e40/suds/sax/enc.py#L81-L94
def __needs_encoding(self, s): """ Get whether string I{s} contains special characters. @param s: A string to check. @type s: str @return: True if needs encoding. @rtype: boolean """ if isinstance(s, basestring): for c in self.special: if c in s: return True
[ "def", "__needs_encoding", "(", "self", ",", "s", ")", ":", "if", "isinstance", "(", "s", ",", "basestring", ")", ":", "for", "c", "in", "self", ".", "special", ":", "if", "c", "in", "s", ":", "return", "True" ]
Get whether string I{s} contains special characters. @param s: A string to check. @type s: str @return: True if needs encoding. @rtype: boolean
[ "Get", "whether", "string", "I", "{", "s", "}", "contains", "special", "characters", "." ]
python
train
loli/medpy
medpy/graphcut/energy_label.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/graphcut/energy_label.py#L111-L193
def boundary_stawiaski(graph, label_image, gradient_image): # label image is not required to hold continuous ids or to start from 1 r""" Boundary term based on the sum of border voxel pairs differences. An implementation of the boundary term in [1]_, suitable to be used with the `~medpy.graphcut.generate.graph_from_labels` function. Determines for each two supplied regions the voxels forming their border assuming :math:`ndim*2`-connectedness (e.g. :math:`3*2=6` for 3D). From the gradient magnitude values of each end-point voxel the border-voxel pairs, the highest one is selected and passed to a strictly positive and decreasing function :math:`g(x)`, which is defined as: .. math:: g(x) = \left(\frac{1}{1+|x|}\right)^k ,where :math:`k=2`. The final weight :math:`w_{i,j}` between two regions :math:`r_i` and :math:`r_j` is then determined by the sum of all these neighbour values: .. math:: w_{i,j} = \sum_{e_{m,n}\in F_{(r_i,r_j)}}g(\max(|I(m)|,|I(n)|)) , where :math:`F_{(r_i,r_j)}` is the set of border voxel-pairs :math:`e_{m,n}` between the regions :math:`r_i` and :math:`r_j` and :math:`|I(p)|` the absolute of the gradient magnitude at the voxel :math:`p` This boundary_function works as an edge indicator in the original image. In simpler words the weight (and therefore the energy) is obtained by summing the local contrast along the boundaries between two regions. Parameters ---------- graph : GCGraph The graph to add the weights to. label_image : ndarray The label image. Must contain consecutively labelled regions starting from index 1. gradient_image : ndarray The gradient image. Notes ----- This function requires the gradient magnitude image of the original image to be passed along. That means that `~medpy.graphcut.generate.graph_from_labels` has to be called with ``boundary_term_args`` set to the gradient image. This can be obtained e.g. with `generic_gradient_magnitude` and `prewitt` from `scipy.ndimage`. This function is tested on 2D and 3D images and theoretically works for all dimensionalities. References ---------- .. [1] Stawiaski J., Decenciere E., Bidlaut F. "Interactive Liver Tumor Segmentation Using Graph-cuts and watershed" MICCAI 2008 participation """ # convert to arrays if necessary label_image = scipy.asarray(label_image) gradient_image = scipy.asarray(gradient_image) if label_image.flags['F_CONTIGUOUS']: # strangely, this one is required to be ctype ordering label_image = scipy.ascontiguousarray(label_image) __check_label_image(label_image) for dim in range(label_image.ndim): # prepare slicer for all minus last and all minus first "row" slicer_from = [slice(None)] * label_image.ndim slicer_to = [slice(None)] * label_image.ndim slicer_from[dim] = slice(None, -1) slicer_to[dim] = slice(1, None) # slice views of keys keys_from = label_image[slicer_from] keys_to = label_image[slicer_to] # determine not equal keys valid_edges = keys_from != keys_to # determine largest gradient gradient_max = numpy.maximum(numpy.abs(gradient_image[slicer_from]), numpy.abs(gradient_image[slicer_to]))[valid_edges] # determine key order keys_max = numpy.maximum(keys_from, keys_to)[valid_edges] keys_min = numpy.minimum(keys_from, keys_to)[valid_edges] # set edges / nweights for k1, k2, val in zip(keys_min, keys_max, gradient_max): weight = math.pow(1./(1. + val), 2) # weight contribution of a single pixel weight = max(weight, sys.float_info.min) graph.set_nweight(k1 - 1 , k2 - 1, weight, weight)
[ "def", "boundary_stawiaski", "(", "graph", ",", "label_image", ",", "gradient_image", ")", ":", "# label image is not required to hold continuous ids or to start from 1", "# convert to arrays if necessary", "label_image", "=", "scipy", ".", "asarray", "(", "label_image", ")", "gradient_image", "=", "scipy", ".", "asarray", "(", "gradient_image", ")", "if", "label_image", ".", "flags", "[", "'F_CONTIGUOUS'", "]", ":", "# strangely, this one is required to be ctype ordering", "label_image", "=", "scipy", ".", "ascontiguousarray", "(", "label_image", ")", "__check_label_image", "(", "label_image", ")", "for", "dim", "in", "range", "(", "label_image", ".", "ndim", ")", ":", "# prepare slicer for all minus last and all minus first \"row\"", "slicer_from", "=", "[", "slice", "(", "None", ")", "]", "*", "label_image", ".", "ndim", "slicer_to", "=", "[", "slice", "(", "None", ")", "]", "*", "label_image", ".", "ndim", "slicer_from", "[", "dim", "]", "=", "slice", "(", "None", ",", "-", "1", ")", "slicer_to", "[", "dim", "]", "=", "slice", "(", "1", ",", "None", ")", "# slice views of keys", "keys_from", "=", "label_image", "[", "slicer_from", "]", "keys_to", "=", "label_image", "[", "slicer_to", "]", "# determine not equal keys", "valid_edges", "=", "keys_from", "!=", "keys_to", "# determine largest gradient", "gradient_max", "=", "numpy", ".", "maximum", "(", "numpy", ".", "abs", "(", "gradient_image", "[", "slicer_from", "]", ")", ",", "numpy", ".", "abs", "(", "gradient_image", "[", "slicer_to", "]", ")", ")", "[", "valid_edges", "]", "# determine key order", "keys_max", "=", "numpy", ".", "maximum", "(", "keys_from", ",", "keys_to", ")", "[", "valid_edges", "]", "keys_min", "=", "numpy", ".", "minimum", "(", "keys_from", ",", "keys_to", ")", "[", "valid_edges", "]", "# set edges / nweights", "for", "k1", ",", "k2", ",", "val", "in", "zip", "(", "keys_min", ",", "keys_max", ",", "gradient_max", ")", ":", "weight", "=", "math", ".", "pow", "(", "1.", "/", "(", "1.", "+", "val", ")", ",", "2", ")", "# weight contribution of a single pixel", "weight", "=", "max", "(", "weight", ",", "sys", ".", "float_info", ".", "min", ")", "graph", ".", "set_nweight", "(", "k1", "-", "1", ",", "k2", "-", "1", ",", "weight", ",", "weight", ")" ]
r""" Boundary term based on the sum of border voxel pairs differences. An implementation of the boundary term in [1]_, suitable to be used with the `~medpy.graphcut.generate.graph_from_labels` function. Determines for each two supplied regions the voxels forming their border assuming :math:`ndim*2`-connectedness (e.g. :math:`3*2=6` for 3D). From the gradient magnitude values of each end-point voxel the border-voxel pairs, the highest one is selected and passed to a strictly positive and decreasing function :math:`g(x)`, which is defined as: .. math:: g(x) = \left(\frac{1}{1+|x|}\right)^k ,where :math:`k=2`. The final weight :math:`w_{i,j}` between two regions :math:`r_i` and :math:`r_j` is then determined by the sum of all these neighbour values: .. math:: w_{i,j} = \sum_{e_{m,n}\in F_{(r_i,r_j)}}g(\max(|I(m)|,|I(n)|)) , where :math:`F_{(r_i,r_j)}` is the set of border voxel-pairs :math:`e_{m,n}` between the regions :math:`r_i` and :math:`r_j` and :math:`|I(p)|` the absolute of the gradient magnitude at the voxel :math:`p` This boundary_function works as an edge indicator in the original image. In simpler words the weight (and therefore the energy) is obtained by summing the local contrast along the boundaries between two regions. Parameters ---------- graph : GCGraph The graph to add the weights to. label_image : ndarray The label image. Must contain consecutively labelled regions starting from index 1. gradient_image : ndarray The gradient image. Notes ----- This function requires the gradient magnitude image of the original image to be passed along. That means that `~medpy.graphcut.generate.graph_from_labels` has to be called with ``boundary_term_args`` set to the gradient image. This can be obtained e.g. with `generic_gradient_magnitude` and `prewitt` from `scipy.ndimage`. This function is tested on 2D and 3D images and theoretically works for all dimensionalities. References ---------- .. [1] Stawiaski J., Decenciere E., Bidlaut F. "Interactive Liver Tumor Segmentation Using Graph-cuts and watershed" MICCAI 2008 participation
[ "r", "Boundary", "term", "based", "on", "the", "sum", "of", "border", "voxel", "pairs", "differences", ".", "An", "implementation", "of", "the", "boundary", "term", "in", "[", "1", "]", "_", "suitable", "to", "be", "used", "with", "the", "~medpy", ".", "graphcut", ".", "generate", ".", "graph_from_labels", "function", ".", "Determines", "for", "each", "two", "supplied", "regions", "the", "voxels", "forming", "their", "border", "assuming", ":", "math", ":", "ndim", "*", "2", "-", "connectedness", "(", "e", ".", "g", ".", ":", "math", ":", "3", "*", "2", "=", "6", "for", "3D", ")", ".", "From", "the", "gradient", "magnitude", "values", "of", "each", "end", "-", "point", "voxel", "the", "border", "-", "voxel", "pairs", "the", "highest", "one", "is", "selected", "and", "passed", "to", "a", "strictly", "positive", "and", "decreasing", "function", ":", "math", ":", "g", "(", "x", ")", "which", "is", "defined", "as", ":", "..", "math", "::", "g", "(", "x", ")", "=", "\\", "left", "(", "\\", "frac", "{", "1", "}", "{", "1", "+", "|x|", "}", "\\", "right", ")", "^k", "where", ":", "math", ":", "k", "=", "2", ".", "The", "final", "weight", ":", "math", ":", "w_", "{", "i", "j", "}", "between", "two", "regions", ":", "math", ":", "r_i", "and", ":", "math", ":", "r_j", "is", "then", "determined", "by", "the", "sum", "of", "all", "these", "neighbour", "values", ":", "..", "math", "::", "w_", "{", "i", "j", "}", "=", "\\", "sum_", "{", "e_", "{", "m", "n", "}", "\\", "in", "F_", "{", "(", "r_i", "r_j", ")", "}}", "g", "(", "\\", "max", "(", "|I", "(", "m", ")", "|", "|I", "(", "n", ")", "|", "))", "where", ":", "math", ":", "F_", "{", "(", "r_i", "r_j", ")", "}", "is", "the", "set", "of", "border", "voxel", "-", "pairs", ":", "math", ":", "e_", "{", "m", "n", "}", "between", "the", "regions", ":", "math", ":", "r_i", "and", ":", "math", ":", "r_j", "and", ":", "math", ":", "|I", "(", "p", ")", "|", "the", "absolute", "of", "the", "gradient", "magnitude", "at", "the", "voxel", ":", "math", ":", "p", "This", "boundary_function", "works", "as", "an", "edge", "indicator", "in", "the", "original", "image", ".", "In", "simpler", "words", "the", "weight", "(", "and", "therefore", "the", "energy", ")", "is", "obtained", "by", "summing", "the", "local", "contrast", "along", "the", "boundaries", "between", "two", "regions", ".", "Parameters", "----------", "graph", ":", "GCGraph", "The", "graph", "to", "add", "the", "weights", "to", ".", "label_image", ":", "ndarray", "The", "label", "image", ".", "Must", "contain", "consecutively", "labelled", "regions", "starting", "from", "index", "1", ".", "gradient_image", ":", "ndarray", "The", "gradient", "image", ".", "Notes", "-----", "This", "function", "requires", "the", "gradient", "magnitude", "image", "of", "the", "original", "image", "to", "be", "passed", "along", ".", "That", "means", "that", "~medpy", ".", "graphcut", ".", "generate", ".", "graph_from_labels", "has", "to", "be", "called", "with", "boundary_term_args", "set", "to", "the", "gradient", "image", ".", "This", "can", "be", "obtained", "e", ".", "g", ".", "with", "generic_gradient_magnitude", "and", "prewitt", "from", "scipy", ".", "ndimage", ".", "This", "function", "is", "tested", "on", "2D", "and", "3D", "images", "and", "theoretically", "works", "for", "all", "dimensionalities", ".", "References", "----------", "..", "[", "1", "]", "Stawiaski", "J", ".", "Decenciere", "E", ".", "Bidlaut", "F", ".", "Interactive", "Liver", "Tumor", "Segmentation", "Using", "Graph", "-", "cuts", "and", "watershed", "MICCAI", "2008", "participation" ]
python
train
tanghaibao/jcvi
jcvi/formats/gff.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L2409-L2462
def note(args): """ %prog note gffile > tabfile Extract certain attribute field for each feature. """ p = OptionParser(note.__doc__) p.add_option("--type", default=None, help="Only process certain types, multiple types allowed with comma") p.add_option("--attribute", default="Parent,Note", help="Attribute field to extract, multiple fields allowd with comma") p.add_option("--AED", type="float", help="Only extract lines with AED score <=") p.add_option("--exoncount", default=False, action="store_true", help="Get the exon count for each mRNA feat") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args type = opts.type if type: type = type.split(",") g = make_index(gffile) exoncounts = {} if opts.exoncount: for feat in g.features_of_type("mRNA"): nexons = 0 for c in g.children(feat.id, 1): if c.featuretype != "exon": continue nexons += 1 exoncounts[feat.id] = nexons attrib = opts.attribute.split(",") gff = Gff(gffile) seen = set() AED = opts.AED for g in gff: if type and g.type not in type: continue if AED is not None and float(g.attributes["_AED"][0]) > AED: continue keyval = [g.accn] + [",".join(g.attributes[x]) \ for x in attrib if x in g.attributes] if exoncounts: nexons = exoncounts.get(g.accn, 0) keyval.append(str(nexons)) keyval = tuple(keyval) if keyval not in seen: print("\t".join(keyval)) seen.add(keyval)
[ "def", "note", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "note", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--type\"", ",", "default", "=", "None", ",", "help", "=", "\"Only process certain types, multiple types allowed with comma\"", ")", "p", ".", "add_option", "(", "\"--attribute\"", ",", "default", "=", "\"Parent,Note\"", ",", "help", "=", "\"Attribute field to extract, multiple fields allowd with comma\"", ")", "p", ".", "add_option", "(", "\"--AED\"", ",", "type", "=", "\"float\"", ",", "help", "=", "\"Only extract lines with AED score <=\"", ")", "p", ".", "add_option", "(", "\"--exoncount\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Get the exon count for each mRNA feat\"", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "gffile", ",", "=", "args", "type", "=", "opts", ".", "type", "if", "type", ":", "type", "=", "type", ".", "split", "(", "\",\"", ")", "g", "=", "make_index", "(", "gffile", ")", "exoncounts", "=", "{", "}", "if", "opts", ".", "exoncount", ":", "for", "feat", "in", "g", ".", "features_of_type", "(", "\"mRNA\"", ")", ":", "nexons", "=", "0", "for", "c", "in", "g", ".", "children", "(", "feat", ".", "id", ",", "1", ")", ":", "if", "c", ".", "featuretype", "!=", "\"exon\"", ":", "continue", "nexons", "+=", "1", "exoncounts", "[", "feat", ".", "id", "]", "=", "nexons", "attrib", "=", "opts", ".", "attribute", ".", "split", "(", "\",\"", ")", "gff", "=", "Gff", "(", "gffile", ")", "seen", "=", "set", "(", ")", "AED", "=", "opts", ".", "AED", "for", "g", "in", "gff", ":", "if", "type", "and", "g", ".", "type", "not", "in", "type", ":", "continue", "if", "AED", "is", "not", "None", "and", "float", "(", "g", ".", "attributes", "[", "\"_AED\"", "]", "[", "0", "]", ")", ">", "AED", ":", "continue", "keyval", "=", "[", "g", ".", "accn", "]", "+", "[", "\",\"", ".", "join", "(", "g", ".", "attributes", "[", "x", "]", ")", "for", "x", "in", "attrib", "if", "x", "in", "g", ".", "attributes", "]", "if", "exoncounts", ":", "nexons", "=", "exoncounts", ".", "get", "(", "g", ".", "accn", ",", "0", ")", "keyval", ".", "append", "(", "str", "(", "nexons", ")", ")", "keyval", "=", "tuple", "(", "keyval", ")", "if", "keyval", "not", "in", "seen", ":", "print", "(", "\"\\t\"", ".", "join", "(", "keyval", ")", ")", "seen", ".", "add", "(", "keyval", ")" ]
%prog note gffile > tabfile Extract certain attribute field for each feature.
[ "%prog", "note", "gffile", ">", "tabfile" ]
python
train
XuShaohua/bcloud
bcloud/UploadPage.py
https://github.com/XuShaohua/bcloud/blob/4b54e0fdccf2b3013285fef05c97354cfa31697b/bcloud/UploadPage.py#L697-L711
def operate_selected_rows(self, operator): '''对选中的条目进行操作. operator - 处理函数 ''' model, tree_paths = self.selection.get_selected_rows() if not tree_paths: return fids = [] for tree_path in tree_paths: fids.append(model[tree_path][FID_COL]) for fid in fids: row = self.get_row_by_fid(fid) if row: operator(row)
[ "def", "operate_selected_rows", "(", "self", ",", "operator", ")", ":", "model", ",", "tree_paths", "=", "self", ".", "selection", ".", "get_selected_rows", "(", ")", "if", "not", "tree_paths", ":", "return", "fids", "=", "[", "]", "for", "tree_path", "in", "tree_paths", ":", "fids", ".", "append", "(", "model", "[", "tree_path", "]", "[", "FID_COL", "]", ")", "for", "fid", "in", "fids", ":", "row", "=", "self", ".", "get_row_by_fid", "(", "fid", ")", "if", "row", ":", "operator", "(", "row", ")" ]
对选中的条目进行操作. operator - 处理函数
[ "对选中的条目进行操作", "." ]
python
train
ciena/afkak
afkak/client.py
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/client.py#L803-L846
def _make_request_to_broker(self, broker, requestId, request, **kwArgs): """Send a request to the specified broker.""" def _timeout_request(broker, requestId): """The time we allotted for the request expired, cancel it.""" try: # FIXME: This should be done by calling .cancel() on the Deferred # returned by the broker client. broker.cancelRequest(requestId, reason=RequestTimedOutError( 'Request: {} cancelled due to timeout'.format(requestId))) except KeyError: # pragma: no cover This should never happen... log.exception('ERROR: Failed to find key for timed-out ' 'request. Broker: %r Req: %d', broker, requestId) raise if self._disconnect_on_timeout: broker.disconnect() def _alert_blocked_reactor(timeout, start): """Complain if this timer didn't fire before the timeout elapsed""" now = self.reactor.seconds() if now >= (start + timeout): log.warning('Reactor was starved for %r seconds', now - start) def _cancel_timeout(result, dc): """Request completed/cancelled, cancel the timeout delayedCall.""" if dc.active(): dc.cancel() return result # Make the request to the specified broker log.debug('_mrtb: sending request: %d to broker: %r', requestId, broker) d = broker.makeRequest(requestId, request, **kwArgs) # Set a delayedCall to fire if we don't get a reply in time dc = self.reactor.callLater( self.timeout, _timeout_request, broker, requestId) # Set a delayedCall to complain if the reactor has been blocked rc = self.reactor.callLater( (self.timeout * 0.9), _alert_blocked_reactor, self.timeout, self.reactor.seconds()) # Setup a callback on the request deferred to cancel both callLater d.addBoth(_cancel_timeout, dc) d.addBoth(_cancel_timeout, rc) return d
[ "def", "_make_request_to_broker", "(", "self", ",", "broker", ",", "requestId", ",", "request", ",", "*", "*", "kwArgs", ")", ":", "def", "_timeout_request", "(", "broker", ",", "requestId", ")", ":", "\"\"\"The time we allotted for the request expired, cancel it.\"\"\"", "try", ":", "# FIXME: This should be done by calling .cancel() on the Deferred", "# returned by the broker client.", "broker", ".", "cancelRequest", "(", "requestId", ",", "reason", "=", "RequestTimedOutError", "(", "'Request: {} cancelled due to timeout'", ".", "format", "(", "requestId", ")", ")", ")", "except", "KeyError", ":", "# pragma: no cover This should never happen...", "log", ".", "exception", "(", "'ERROR: Failed to find key for timed-out '", "'request. Broker: %r Req: %d'", ",", "broker", ",", "requestId", ")", "raise", "if", "self", ".", "_disconnect_on_timeout", ":", "broker", ".", "disconnect", "(", ")", "def", "_alert_blocked_reactor", "(", "timeout", ",", "start", ")", ":", "\"\"\"Complain if this timer didn't fire before the timeout elapsed\"\"\"", "now", "=", "self", ".", "reactor", ".", "seconds", "(", ")", "if", "now", ">=", "(", "start", "+", "timeout", ")", ":", "log", ".", "warning", "(", "'Reactor was starved for %r seconds'", ",", "now", "-", "start", ")", "def", "_cancel_timeout", "(", "result", ",", "dc", ")", ":", "\"\"\"Request completed/cancelled, cancel the timeout delayedCall.\"\"\"", "if", "dc", ".", "active", "(", ")", ":", "dc", ".", "cancel", "(", ")", "return", "result", "# Make the request to the specified broker", "log", ".", "debug", "(", "'_mrtb: sending request: %d to broker: %r'", ",", "requestId", ",", "broker", ")", "d", "=", "broker", ".", "makeRequest", "(", "requestId", ",", "request", ",", "*", "*", "kwArgs", ")", "# Set a delayedCall to fire if we don't get a reply in time", "dc", "=", "self", ".", "reactor", ".", "callLater", "(", "self", ".", "timeout", ",", "_timeout_request", ",", "broker", ",", "requestId", ")", "# Set a delayedCall to complain if the reactor has been blocked", "rc", "=", "self", ".", "reactor", ".", "callLater", "(", "(", "self", ".", "timeout", "*", "0.9", ")", ",", "_alert_blocked_reactor", ",", "self", ".", "timeout", ",", "self", ".", "reactor", ".", "seconds", "(", ")", ")", "# Setup a callback on the request deferred to cancel both callLater", "d", ".", "addBoth", "(", "_cancel_timeout", ",", "dc", ")", "d", ".", "addBoth", "(", "_cancel_timeout", ",", "rc", ")", "return", "d" ]
Send a request to the specified broker.
[ "Send", "a", "request", "to", "the", "specified", "broker", "." ]
python
train
pantsbuild/pants
src/python/pants/backend/jvm/tasks/classpath_util.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/jvm/tasks/classpath_util.py#L68-L78
def classpath(cls, targets, classpath_products, confs=('default',)): """Return the classpath as a list of paths covering all the passed targets. :param targets: Targets to build an aggregated classpath for. :param ClasspathProducts classpath_products: Product containing classpath elements. :param confs: The list of confs for use by this classpath. :returns: The classpath as a list of path elements. :rtype: list of string """ classpath_iter = cls._classpath_iter(classpath_products.get_for_targets(targets), confs=confs) return list(classpath_iter)
[ "def", "classpath", "(", "cls", ",", "targets", ",", "classpath_products", ",", "confs", "=", "(", "'default'", ",", ")", ")", ":", "classpath_iter", "=", "cls", ".", "_classpath_iter", "(", "classpath_products", ".", "get_for_targets", "(", "targets", ")", ",", "confs", "=", "confs", ")", "return", "list", "(", "classpath_iter", ")" ]
Return the classpath as a list of paths covering all the passed targets. :param targets: Targets to build an aggregated classpath for. :param ClasspathProducts classpath_products: Product containing classpath elements. :param confs: The list of confs for use by this classpath. :returns: The classpath as a list of path elements. :rtype: list of string
[ "Return", "the", "classpath", "as", "a", "list", "of", "paths", "covering", "all", "the", "passed", "targets", "." ]
python
train
juju/python-libjuju
juju/model.py
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/model.py#L597-L615
async def add_local_charm_dir(self, charm_dir, series): """Upload a local charm to the model. This will automatically generate an archive from the charm dir. :param charm_dir: Path to the charm directory :param series: Charm series """ fh = tempfile.NamedTemporaryFile() CharmArchiveGenerator(charm_dir).make_archive(fh.name) with fh: func = partial( self.add_local_charm, fh, series, os.stat(fh.name).st_size) charm_url = await self._connector.loop.run_in_executor(None, func) log.debug('Uploaded local charm: %s -> %s', charm_dir, charm_url) return charm_url
[ "async", "def", "add_local_charm_dir", "(", "self", ",", "charm_dir", ",", "series", ")", ":", "fh", "=", "tempfile", ".", "NamedTemporaryFile", "(", ")", "CharmArchiveGenerator", "(", "charm_dir", ")", ".", "make_archive", "(", "fh", ".", "name", ")", "with", "fh", ":", "func", "=", "partial", "(", "self", ".", "add_local_charm", ",", "fh", ",", "series", ",", "os", ".", "stat", "(", "fh", ".", "name", ")", ".", "st_size", ")", "charm_url", "=", "await", "self", ".", "_connector", ".", "loop", ".", "run_in_executor", "(", "None", ",", "func", ")", "log", ".", "debug", "(", "'Uploaded local charm: %s -> %s'", ",", "charm_dir", ",", "charm_url", ")", "return", "charm_url" ]
Upload a local charm to the model. This will automatically generate an archive from the charm dir. :param charm_dir: Path to the charm directory :param series: Charm series
[ "Upload", "a", "local", "charm", "to", "the", "model", "." ]
python
train
Accelize/pycosio
pycosio/_core/io_base_raw.py
https://github.com/Accelize/pycosio/blob/1cc1f8fdf5394d92918b7bae2bfa682169ccc48c/pycosio/_core/io_base_raw.py#L246-L260
def _peek(self, size=-1): """ Return bytes from the stream without advancing the position. Args: size (int): Number of bytes to read. -1 to read the full stream. Returns: bytes: bytes read """ with self._seek_lock: seek = self._seek with handle_os_exceptions(): return self._read_range(seek, seek + size)
[ "def", "_peek", "(", "self", ",", "size", "=", "-", "1", ")", ":", "with", "self", ".", "_seek_lock", ":", "seek", "=", "self", ".", "_seek", "with", "handle_os_exceptions", "(", ")", ":", "return", "self", ".", "_read_range", "(", "seek", ",", "seek", "+", "size", ")" ]
Return bytes from the stream without advancing the position. Args: size (int): Number of bytes to read. -1 to read the full stream. Returns: bytes: bytes read
[ "Return", "bytes", "from", "the", "stream", "without", "advancing", "the", "position", "." ]
python
train
jorahn/icy
icy/icy.py
https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/icy.py#L543-L550
def _find_key_cols(df): """Identify columns in a DataFrame that could be a unique key""" keys = [] for col in df: if len(df[col].unique()) == len(df[col]): keys.append(col) return keys
[ "def", "_find_key_cols", "(", "df", ")", ":", "keys", "=", "[", "]", "for", "col", "in", "df", ":", "if", "len", "(", "df", "[", "col", "]", ".", "unique", "(", ")", ")", "==", "len", "(", "df", "[", "col", "]", ")", ":", "keys", ".", "append", "(", "col", ")", "return", "keys" ]
Identify columns in a DataFrame that could be a unique key
[ "Identify", "columns", "in", "a", "DataFrame", "that", "could", "be", "a", "unique", "key" ]
python
train
peri-source/peri
peri/comp/comp.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/comp/comp.py#L101-L111
def set_values(self, params, values): """ Directly set the values corresponding to certain parameters. This does not necessarily trigger and update of the calculation, See also -------- :func:`~peri.comp.comp.ParameterGroup.update` : full update func """ for p, v in zip(util.listify(params), util.listify(values)): self.param_dict[p] = v
[ "def", "set_values", "(", "self", ",", "params", ",", "values", ")", ":", "for", "p", ",", "v", "in", "zip", "(", "util", ".", "listify", "(", "params", ")", ",", "util", ".", "listify", "(", "values", ")", ")", ":", "self", ".", "param_dict", "[", "p", "]", "=", "v" ]
Directly set the values corresponding to certain parameters. This does not necessarily trigger and update of the calculation, See also -------- :func:`~peri.comp.comp.ParameterGroup.update` : full update func
[ "Directly", "set", "the", "values", "corresponding", "to", "certain", "parameters", ".", "This", "does", "not", "necessarily", "trigger", "and", "update", "of", "the", "calculation", "See", "also", "--------", ":", "func", ":", "~peri", ".", "comp", ".", "comp", ".", "ParameterGroup", ".", "update", ":", "full", "update", "func" ]
python
valid
rigetti/grove
grove/tomography/utils.py
https://github.com/rigetti/grove/blob/dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3/grove/tomography/utils.py#L244-L268
def plot_pauli_transfer_matrix(ptransfermatrix, ax, labels, title): """ Visualize the Pauli Transfer Matrix of a process. :param numpy.ndarray ptransfermatrix: The Pauli Transfer Matrix :param ax: The matplotlib axes. :param labels: The labels for the operator basis states. :param title: The title for the plot :return: The modified axis object. :rtype: AxesSubplot """ im = ax.imshow(ptransfermatrix, interpolation="nearest", cmap=rigetti_3_color_cm, vmin=-1, vmax=1) dim = len(labels) plt.colorbar(im, ax=ax) ax.set_xticks(range(dim)) ax.set_xlabel("Input Pauli Operator", fontsize=20) ax.set_yticks(range(dim)) ax.set_ylabel("Output Pauli Operator", fontsize=20) ax.set_title(title, fontsize=25) ax.set_xticklabels(labels, rotation=45) ax.set_yticklabels(labels) ax.grid(False) return ax
[ "def", "plot_pauli_transfer_matrix", "(", "ptransfermatrix", ",", "ax", ",", "labels", ",", "title", ")", ":", "im", "=", "ax", ".", "imshow", "(", "ptransfermatrix", ",", "interpolation", "=", "\"nearest\"", ",", "cmap", "=", "rigetti_3_color_cm", ",", "vmin", "=", "-", "1", ",", "vmax", "=", "1", ")", "dim", "=", "len", "(", "labels", ")", "plt", ".", "colorbar", "(", "im", ",", "ax", "=", "ax", ")", "ax", ".", "set_xticks", "(", "range", "(", "dim", ")", ")", "ax", ".", "set_xlabel", "(", "\"Input Pauli Operator\"", ",", "fontsize", "=", "20", ")", "ax", ".", "set_yticks", "(", "range", "(", "dim", ")", ")", "ax", ".", "set_ylabel", "(", "\"Output Pauli Operator\"", ",", "fontsize", "=", "20", ")", "ax", ".", "set_title", "(", "title", ",", "fontsize", "=", "25", ")", "ax", ".", "set_xticklabels", "(", "labels", ",", "rotation", "=", "45", ")", "ax", ".", "set_yticklabels", "(", "labels", ")", "ax", ".", "grid", "(", "False", ")", "return", "ax" ]
Visualize the Pauli Transfer Matrix of a process. :param numpy.ndarray ptransfermatrix: The Pauli Transfer Matrix :param ax: The matplotlib axes. :param labels: The labels for the operator basis states. :param title: The title for the plot :return: The modified axis object. :rtype: AxesSubplot
[ "Visualize", "the", "Pauli", "Transfer", "Matrix", "of", "a", "process", "." ]
python
train
angr/angr
angr/analyses/cfg/cfg_emulated.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/cfg/cfg_emulated.py#L975-L1016
def _get_one_pending_job(self): """ Retrieve a pending job. :return: A CFGJob instance or None """ pending_job_key, pending_job = self._pending_jobs.popitem() pending_job_state = pending_job.state pending_job_call_stack = pending_job.call_stack pending_job_src_block_id = pending_job.src_block_id pending_job_src_exit_stmt_idx = pending_job.src_exit_stmt_idx self._deregister_analysis_job(pending_job.caller_func_addr, pending_job) # Let's check whether this address has been traced before. if pending_job_key in self._nodes: node = self._nodes[pending_job_key] if node in self.graph: pending_exit_addr = self._block_id_addr(pending_job_key) # That block has been traced before. Let's forget about it l.debug("Target 0x%08x has been traced before. Trying the next one...", pending_exit_addr) # However, we should still create the FakeRet edge self._graph_add_edge(pending_job_src_block_id, pending_job_key, jumpkind="Ijk_FakeRet", stmt_idx=pending_job_src_exit_stmt_idx, ins_addr=pending_job.src_exit_ins_addr) return None pending_job_state.history.jumpkind = 'Ijk_FakeRet' job = CFGJob(pending_job_state.addr, pending_job_state, self._context_sensitivity_level, src_block_id=pending_job_src_block_id, src_exit_stmt_idx=pending_job_src_exit_stmt_idx, src_ins_addr=pending_job.src_exit_ins_addr, call_stack=pending_job_call_stack, ) l.debug("Tracing a missing return exit %s", self._block_id_repr(pending_job_key)) return job
[ "def", "_get_one_pending_job", "(", "self", ")", ":", "pending_job_key", ",", "pending_job", "=", "self", ".", "_pending_jobs", ".", "popitem", "(", ")", "pending_job_state", "=", "pending_job", ".", "state", "pending_job_call_stack", "=", "pending_job", ".", "call_stack", "pending_job_src_block_id", "=", "pending_job", ".", "src_block_id", "pending_job_src_exit_stmt_idx", "=", "pending_job", ".", "src_exit_stmt_idx", "self", ".", "_deregister_analysis_job", "(", "pending_job", ".", "caller_func_addr", ",", "pending_job", ")", "# Let's check whether this address has been traced before.", "if", "pending_job_key", "in", "self", ".", "_nodes", ":", "node", "=", "self", ".", "_nodes", "[", "pending_job_key", "]", "if", "node", "in", "self", ".", "graph", ":", "pending_exit_addr", "=", "self", ".", "_block_id_addr", "(", "pending_job_key", ")", "# That block has been traced before. Let's forget about it", "l", ".", "debug", "(", "\"Target 0x%08x has been traced before. Trying the next one...\"", ",", "pending_exit_addr", ")", "# However, we should still create the FakeRet edge", "self", ".", "_graph_add_edge", "(", "pending_job_src_block_id", ",", "pending_job_key", ",", "jumpkind", "=", "\"Ijk_FakeRet\"", ",", "stmt_idx", "=", "pending_job_src_exit_stmt_idx", ",", "ins_addr", "=", "pending_job", ".", "src_exit_ins_addr", ")", "return", "None", "pending_job_state", ".", "history", ".", "jumpkind", "=", "'Ijk_FakeRet'", "job", "=", "CFGJob", "(", "pending_job_state", ".", "addr", ",", "pending_job_state", ",", "self", ".", "_context_sensitivity_level", ",", "src_block_id", "=", "pending_job_src_block_id", ",", "src_exit_stmt_idx", "=", "pending_job_src_exit_stmt_idx", ",", "src_ins_addr", "=", "pending_job", ".", "src_exit_ins_addr", ",", "call_stack", "=", "pending_job_call_stack", ",", ")", "l", ".", "debug", "(", "\"Tracing a missing return exit %s\"", ",", "self", ".", "_block_id_repr", "(", "pending_job_key", ")", ")", "return", "job" ]
Retrieve a pending job. :return: A CFGJob instance or None
[ "Retrieve", "a", "pending", "job", "." ]
python
train
tensorpack/tensorpack
tensorpack/tfutils/common.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/tfutils/common.py#L113-L125
def get_tensors_by_names(names): """ Get a list of tensors in the default graph by a list of names. Args: names (list): """ ret = [] G = tfv1.get_default_graph() for n in names: opn, varn = get_op_tensor_name(n) ret.append(G.get_tensor_by_name(varn)) return ret
[ "def", "get_tensors_by_names", "(", "names", ")", ":", "ret", "=", "[", "]", "G", "=", "tfv1", ".", "get_default_graph", "(", ")", "for", "n", "in", "names", ":", "opn", ",", "varn", "=", "get_op_tensor_name", "(", "n", ")", "ret", ".", "append", "(", "G", ".", "get_tensor_by_name", "(", "varn", ")", ")", "return", "ret" ]
Get a list of tensors in the default graph by a list of names. Args: names (list):
[ "Get", "a", "list", "of", "tensors", "in", "the", "default", "graph", "by", "a", "list", "of", "names", "." ]
python
train
saltstack/salt
salt/modules/consul.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/consul.py#L471-L504
def agent_members(consul_url=None, token=None, **kwargs): ''' Returns the members as seen by the local serf agent :param consul_url: The Consul server URL. :return: Returns the members as seen by the local serf agent CLI Example: .. code-block:: bash salt '*' consul.agent_members ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'wan' in kwargs: query_params['wan'] = kwargs['wan'] function = 'agent/members' ret = _query(consul_url=consul_url, function=function, token=token, method='GET', query_params=query_params) return ret
[ "def", "agent_members", "(", "consul_url", "=", "None", ",", "token", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "}", "query_params", "=", "{", "}", "if", "not", "consul_url", ":", "consul_url", "=", "_get_config", "(", ")", "if", "not", "consul_url", ":", "log", ".", "error", "(", "'No Consul URL found.'", ")", "ret", "[", "'message'", "]", "=", "'No Consul URL found.'", "ret", "[", "'res'", "]", "=", "False", "return", "ret", "if", "'wan'", "in", "kwargs", ":", "query_params", "[", "'wan'", "]", "=", "kwargs", "[", "'wan'", "]", "function", "=", "'agent/members'", "ret", "=", "_query", "(", "consul_url", "=", "consul_url", ",", "function", "=", "function", ",", "token", "=", "token", ",", "method", "=", "'GET'", ",", "query_params", "=", "query_params", ")", "return", "ret" ]
Returns the members as seen by the local serf agent :param consul_url: The Consul server URL. :return: Returns the members as seen by the local serf agent CLI Example: .. code-block:: bash salt '*' consul.agent_members
[ "Returns", "the", "members", "as", "seen", "by", "the", "local", "serf", "agent" ]
python
train
GoogleCloudPlatform/datastore-ndb-python
demo/app/fibo.py
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/demo/app/fibo.py#L32-L37
def fibonacci(n): """A recursive Fibonacci to exercise task switching.""" if n <= 1: raise ndb.Return(n) a, b = yield fibonacci(n - 1), fibonacci(n - 2) raise ndb.Return(a + b)
[ "def", "fibonacci", "(", "n", ")", ":", "if", "n", "<=", "1", ":", "raise", "ndb", ".", "Return", "(", "n", ")", "a", ",", "b", "=", "yield", "fibonacci", "(", "n", "-", "1", ")", ",", "fibonacci", "(", "n", "-", "2", ")", "raise", "ndb", ".", "Return", "(", "a", "+", "b", ")" ]
A recursive Fibonacci to exercise task switching.
[ "A", "recursive", "Fibonacci", "to", "exercise", "task", "switching", "." ]
python
train
ransford/sllurp
sllurp/llrp.py
https://github.com/ransford/sllurp/blob/d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1/sllurp/llrp.py#L408-L699
def handleMessage(self, lmsg): """Implements the LLRP client state machine.""" logger.debug('LLRPMessage received in state %s: %s', self.state, lmsg) msgName = lmsg.getName() lmsg.proto = self lmsg.peername = self.peername # call per-message callbacks logger.debug('starting message callbacks for %s', msgName) for fn in self._message_callbacks[msgName]: fn(lmsg) logger.debug('done with message callbacks for %s', msgName) # keepalives can occur at any time if msgName == 'KEEPALIVE': self.send_KEEPALIVE_ACK() return if msgName == 'RO_ACCESS_REPORT' and \ self.state != LLRPClient.STATE_INVENTORYING: logger.debug('ignoring RO_ACCESS_REPORT because not inventorying') return if msgName == 'READER_EVENT_NOTIFICATION' and \ self.state >= LLRPClient.STATE_CONNECTED: logger.debug('Got reader event notification') return logger.debug('in handleMessage(%s), there are %d Deferreds', msgName, len(self._deferreds[msgName])) ####### # LLRP client state machine follows. Beware: gets thorny. Note the # order of the LLRPClient.STATE_* fields. ####### # in DISCONNECTED, CONNECTING, and CONNECTED states, expect only # READER_EVENT_NOTIFICATION messages. if self.state in (LLRPClient.STATE_DISCONNECTED, LLRPClient.STATE_CONNECTING, LLRPClient.STATE_CONNECTED): if msgName != 'READER_EVENT_NOTIFICATION': logger.error('unexpected message %s while connecting', msgName) return if not lmsg.isSuccess(): rend = lmsg.msgdict[msgName]['ReaderEventNotificationData'] try: status = rend['ConnectionAttemptEvent']['Status'] except KeyError: status = '(unknown status)' logger.fatal('Could not start session on reader: %s', status) return self.processDeferreds(msgName, lmsg.isSuccess()) # a Deferred to call when we get GET_READER_CAPABILITIES_RESPONSE d = defer.Deferred() d.addCallback(self._setState_wrapper, LLRPClient.STATE_CONNECTED) d.addErrback(self.panic, 'GET_READER_CAPABILITIES failed') if (self.impinj_search_mode or self.impinj_tag_content_selector or self.impinj_extended_configuration or self.impinj_fixed_frequency_param): caps = defer.Deferred() caps.addCallback(self.send_GET_READER_CAPABILITIES, onCompletion=d) caps.addErrback(self.panic, 'ENABLE_IMPINJ_EXTENSIONS failed') self.send_ENABLE_IMPINJ_EXTENSIONS(onCompletion=caps) else: self.send_GET_READER_CAPABILITIES(self, onCompletion=d) elif self.state == LLRPClient.STATE_SENT_ENABLE_IMPINJ_EXTENSIONS: logger.debug(lmsg) if msgName != 'CUSTOM_MESSAGE': logger.error('unexpected response %s while enabling Impinj' 'extensions', msgName) return if not lmsg.isSuccess(): status = lmsg.msgdict[msgName]['LLRPStatus']['StatusCode'] err = lmsg.msgdict[msgName]['LLRPStatus']['ErrorDescription'] logger.fatal('Error %s enabling Impinj extensions: %s', status, err) return logger.debug('Successfully enabled Impinj extensions') self.processDeferreds(msgName, lmsg.isSuccess()) # in state SENT_GET_CAPABILITIES, expect GET_CAPABILITIES_RESPONSE; # respond to this message by advancing to state CONNECTED. elif self.state == LLRPClient.STATE_SENT_GET_CAPABILITIES: if msgName != 'GET_READER_CAPABILITIES_RESPONSE': logger.error('unexpected response %s getting capabilities', msgName) return if not lmsg.isSuccess(): status = lmsg.msgdict[msgName]['LLRPStatus']['StatusCode'] err = lmsg.msgdict[msgName]['LLRPStatus']['ErrorDescription'] logger.fatal('Error %s getting capabilities: %s', status, err) return self.capabilities = \ lmsg.msgdict['GET_READER_CAPABILITIES_RESPONSE'] logger.debug('Capabilities: %s', pprint.pformat(self.capabilities)) try: self.parseCapabilities(self.capabilities) except LLRPError as err: logger.exception('Capabilities mismatch') raise err self.processDeferreds(msgName, lmsg.isSuccess()) d = defer.Deferred() d.addCallback(self._setState_wrapper, LLRPClient.STATE_SENT_GET_CONFIG) d.addErrback(self.panic, 'GET_READER_CONFIG failed') self.send_GET_READER_CONFIG(onCompletion=d) elif self.state == LLRPClient.STATE_SENT_GET_CONFIG: if msgName not in ('GET_READER_CONFIG_RESPONSE', 'DELETE_ACCESSSPEC_RESPONSE', 'DELETE_ROSPEC_RESPONSE'): logger.error('unexpected response %s getting config', msgName) return if not lmsg.isSuccess(): status = lmsg.msgdict[msgName]['LLRPStatus']['StatusCode'] err = lmsg.msgdict[msgName]['LLRPStatus']['ErrorDescription'] logger.fatal('Error %s getting reader config: %s', status, err) return if msgName == 'GET_READER_CONFIG_RESPONSE': config = lmsg.msgdict['GET_READER_CONFIG_RESPONSE'] self.configuration = self.parseReaderConfig(config) logger.debug('Reader configuration: %s', self.configuration) self.processDeferreds(msgName, lmsg.isSuccess()) d = defer.Deferred() d.addCallback(self._setState_wrapper, LLRPClient.STATE_SENT_SET_CONFIG) d.addErrback(self.panic, 'SET_READER_CONFIG failed') self.send_ENABLE_EVENTS_AND_REPORTS() self.send_SET_READER_CONFIG(onCompletion=d) elif self.state == LLRPClient.STATE_SENT_SET_CONFIG: if msgName not in ('SET_READER_CONFIG_RESPONSE', 'GET_READER_CONFIG_RESPONSE', 'DELETE_ACCESSSPEC_RESPONSE'): logger.error('unexpected response %s setting config', msgName) return if not lmsg.isSuccess(): status = lmsg.msgdict[msgName]['LLRPStatus']['StatusCode'] err = lmsg.msgdict[msgName]['LLRPStatus']['ErrorDescription'] logger.fatal('Error %s setting reader config: %s', status, err) return self.processDeferreds(msgName, lmsg.isSuccess()) if self.reset_on_connect: d = self.stopPolitely(disconnect=False) if self.start_inventory: d.addCallback(self.startInventory) elif self.start_inventory: self.startInventory() # in state SENT_ADD_ROSPEC, expect only ADD_ROSPEC_RESPONSE; respond to # favorable ADD_ROSPEC_RESPONSE by enabling the added ROSpec and # advancing to state SENT_ENABLE_ROSPEC. elif self.state == LLRPClient.STATE_SENT_ADD_ROSPEC: if msgName != 'ADD_ROSPEC_RESPONSE': logger.error('unexpected response %s when adding ROSpec', msgName) return if not lmsg.isSuccess(): status = lmsg.msgdict[msgName]['LLRPStatus']['StatusCode'] err = lmsg.msgdict[msgName]['LLRPStatus']['ErrorDescription'] logger.fatal('Error %s adding ROSpec: %s', status, err) return self.processDeferreds(msgName, lmsg.isSuccess()) # in state SENT_ENABLE_ROSPEC, expect only ENABLE_ROSPEC_RESPONSE; # respond to favorable ENABLE_ROSPEC_RESPONSE by starting the enabled # ROSpec and advancing to state SENT_START_ROSPEC. elif self.state == LLRPClient.STATE_SENT_ENABLE_ROSPEC: if msgName != 'ENABLE_ROSPEC_RESPONSE': logger.error('unexpected response %s when enabling ROSpec', msgName) return if not lmsg.isSuccess(): status = lmsg.msgdict[msgName]['LLRPStatus']['StatusCode'] err = lmsg.msgdict[msgName]['LLRPStatus']['ErrorDescription'] logger.fatal('Error %s enabling ROSpec: %s', status, err) return self.processDeferreds(msgName, lmsg.isSuccess()) # in state PAUSING, we have sent a DISABLE_ROSPEC, so expect only # DISABLE_ROSPEC_RESPONSE. advance to state PAUSED. elif self.state == LLRPClient.STATE_PAUSING: if msgName != 'DISABLE_ROSPEC_RESPONSE': logger.error('unexpected response %s ' ' when disabling ROSpec', msgName) if not lmsg.isSuccess(): status = lmsg.msgdict[msgName]['LLRPStatus']['StatusCode'] err = lmsg.msgdict[msgName]['LLRPStatus']['ErrorDescription'] logger.error('DISABLE_ROSPEC failed with status %s: %s', status, err) logger.warn('Error %s disabling ROSpec: %s', status, err) self.processDeferreds(msgName, lmsg.isSuccess()) # in state SENT_START_ROSPEC, expect only START_ROSPEC_RESPONSE; # respond to favorable START_ROSPEC_RESPONSE by advancing to state # INVENTORYING. elif self.state == LLRPClient.STATE_SENT_START_ROSPEC: if msgName == 'RO_ACCESS_REPORT': return if msgName == 'READER_EVENT_NOTIFICATION': return if msgName != 'START_ROSPEC_RESPONSE': logger.error('unexpected response %s when starting ROSpec', msgName) if not lmsg.isSuccess(): status = lmsg.msgdict[msgName]['LLRPStatus']['StatusCode'] err = lmsg.msgdict[msgName]['LLRPStatus']['ErrorDescription'] logger.error('START_ROSPEC failed with status %s: %s', status, err) logger.fatal('Error %s starting ROSpec: %s', status, err) return self.processDeferreds(msgName, lmsg.isSuccess()) elif self.state == LLRPClient.STATE_INVENTORYING: if msgName not in ('RO_ACCESS_REPORT', 'READER_EVENT_NOTIFICATION', 'ADD_ACCESSSPEC_RESPONSE', 'ENABLE_ACCESSSPEC_RESPONSE', 'DISABLE_ACCESSSPEC_RESPONSE', 'DELETE_ACCESSSPEC_RESPONSE'): logger.error('unexpected message %s while inventorying', msgName) return self.processDeferreds(msgName, lmsg.isSuccess()) elif self.state == LLRPClient.STATE_SENT_DELETE_ACCESSSPEC: if msgName != 'DELETE_ACCESSSPEC_RESPONSE': logger.error('unexpected response %s when deleting AccessSpec', msgName) self.processDeferreds(msgName, lmsg.isSuccess()) elif self.state == LLRPClient.STATE_SENT_DELETE_ROSPEC: if msgName != 'DELETE_ROSPEC_RESPONSE': logger.error('unexpected response %s when deleting ROSpec', msgName) if lmsg.isSuccess(): if self.disconnecting: self.setState(LLRPClient.STATE_DISCONNECTED) else: self.setState(LLRPClient.STATE_CONNECTED) else: status = lmsg.msgdict[msgName]['LLRPStatus']['StatusCode'] err = lmsg.msgdict[msgName]['LLRPStatus']['ErrorDescription'] logger.error('DELETE_ROSPEC failed with status %s: %s', status, err) self.processDeferreds(msgName, lmsg.isSuccess()) if self.disconnecting: logger.info('disconnecting') self.transport.loseConnection() else: logger.warn('message %s received in unknown state!', msgName) if self._deferreds[msgName]: logger.error('there should NOT be Deferreds left for %s,' ' but there are!', msgName)
[ "def", "handleMessage", "(", "self", ",", "lmsg", ")", ":", "logger", ".", "debug", "(", "'LLRPMessage received in state %s: %s'", ",", "self", ".", "state", ",", "lmsg", ")", "msgName", "=", "lmsg", ".", "getName", "(", ")", "lmsg", ".", "proto", "=", "self", "lmsg", ".", "peername", "=", "self", ".", "peername", "# call per-message callbacks", "logger", ".", "debug", "(", "'starting message callbacks for %s'", ",", "msgName", ")", "for", "fn", "in", "self", ".", "_message_callbacks", "[", "msgName", "]", ":", "fn", "(", "lmsg", ")", "logger", ".", "debug", "(", "'done with message callbacks for %s'", ",", "msgName", ")", "# keepalives can occur at any time", "if", "msgName", "==", "'KEEPALIVE'", ":", "self", ".", "send_KEEPALIVE_ACK", "(", ")", "return", "if", "msgName", "==", "'RO_ACCESS_REPORT'", "and", "self", ".", "state", "!=", "LLRPClient", ".", "STATE_INVENTORYING", ":", "logger", ".", "debug", "(", "'ignoring RO_ACCESS_REPORT because not inventorying'", ")", "return", "if", "msgName", "==", "'READER_EVENT_NOTIFICATION'", "and", "self", ".", "state", ">=", "LLRPClient", ".", "STATE_CONNECTED", ":", "logger", ".", "debug", "(", "'Got reader event notification'", ")", "return", "logger", ".", "debug", "(", "'in handleMessage(%s), there are %d Deferreds'", ",", "msgName", ",", "len", "(", "self", ".", "_deferreds", "[", "msgName", "]", ")", ")", "#######", "# LLRP client state machine follows. Beware: gets thorny. Note the", "# order of the LLRPClient.STATE_* fields.", "#######", "# in DISCONNECTED, CONNECTING, and CONNECTED states, expect only", "# READER_EVENT_NOTIFICATION messages.", "if", "self", ".", "state", "in", "(", "LLRPClient", ".", "STATE_DISCONNECTED", ",", "LLRPClient", ".", "STATE_CONNECTING", ",", "LLRPClient", ".", "STATE_CONNECTED", ")", ":", "if", "msgName", "!=", "'READER_EVENT_NOTIFICATION'", ":", "logger", ".", "error", "(", "'unexpected message %s while connecting'", ",", "msgName", ")", "return", "if", "not", "lmsg", ".", "isSuccess", "(", ")", ":", "rend", "=", "lmsg", ".", "msgdict", "[", "msgName", "]", "[", "'ReaderEventNotificationData'", "]", "try", ":", "status", "=", "rend", "[", "'ConnectionAttemptEvent'", "]", "[", "'Status'", "]", "except", "KeyError", ":", "status", "=", "'(unknown status)'", "logger", ".", "fatal", "(", "'Could not start session on reader: %s'", ",", "status", ")", "return", "self", ".", "processDeferreds", "(", "msgName", ",", "lmsg", ".", "isSuccess", "(", ")", ")", "# a Deferred to call when we get GET_READER_CAPABILITIES_RESPONSE", "d", "=", "defer", ".", "Deferred", "(", ")", "d", ".", "addCallback", "(", "self", ".", "_setState_wrapper", ",", "LLRPClient", ".", "STATE_CONNECTED", ")", "d", ".", "addErrback", "(", "self", ".", "panic", ",", "'GET_READER_CAPABILITIES failed'", ")", "if", "(", "self", ".", "impinj_search_mode", "or", "self", ".", "impinj_tag_content_selector", "or", "self", ".", "impinj_extended_configuration", "or", "self", ".", "impinj_fixed_frequency_param", ")", ":", "caps", "=", "defer", ".", "Deferred", "(", ")", "caps", ".", "addCallback", "(", "self", ".", "send_GET_READER_CAPABILITIES", ",", "onCompletion", "=", "d", ")", "caps", ".", "addErrback", "(", "self", ".", "panic", ",", "'ENABLE_IMPINJ_EXTENSIONS failed'", ")", "self", ".", "send_ENABLE_IMPINJ_EXTENSIONS", "(", "onCompletion", "=", "caps", ")", "else", ":", "self", ".", "send_GET_READER_CAPABILITIES", "(", "self", ",", "onCompletion", "=", "d", ")", "elif", "self", ".", "state", "==", "LLRPClient", ".", "STATE_SENT_ENABLE_IMPINJ_EXTENSIONS", ":", "logger", ".", "debug", "(", "lmsg", ")", "if", "msgName", "!=", "'CUSTOM_MESSAGE'", ":", "logger", ".", "error", "(", "'unexpected response %s while enabling Impinj'", "'extensions'", ",", "msgName", ")", "return", "if", "not", "lmsg", ".", "isSuccess", "(", ")", ":", "status", "=", "lmsg", ".", "msgdict", "[", "msgName", "]", "[", "'LLRPStatus'", "]", "[", "'StatusCode'", "]", "err", "=", "lmsg", ".", "msgdict", "[", "msgName", "]", "[", "'LLRPStatus'", "]", "[", "'ErrorDescription'", "]", "logger", ".", "fatal", "(", "'Error %s enabling Impinj extensions: %s'", ",", "status", ",", "err", ")", "return", "logger", ".", "debug", "(", "'Successfully enabled Impinj extensions'", ")", "self", ".", "processDeferreds", "(", "msgName", ",", "lmsg", ".", "isSuccess", "(", ")", ")", "# in state SENT_GET_CAPABILITIES, expect GET_CAPABILITIES_RESPONSE;", "# respond to this message by advancing to state CONNECTED.", "elif", "self", ".", "state", "==", "LLRPClient", ".", "STATE_SENT_GET_CAPABILITIES", ":", "if", "msgName", "!=", "'GET_READER_CAPABILITIES_RESPONSE'", ":", "logger", ".", "error", "(", "'unexpected response %s getting capabilities'", ",", "msgName", ")", "return", "if", "not", "lmsg", ".", "isSuccess", "(", ")", ":", "status", "=", "lmsg", ".", "msgdict", "[", "msgName", "]", "[", "'LLRPStatus'", "]", "[", "'StatusCode'", "]", "err", "=", "lmsg", ".", "msgdict", "[", "msgName", "]", "[", "'LLRPStatus'", "]", "[", "'ErrorDescription'", "]", "logger", ".", "fatal", "(", "'Error %s getting capabilities: %s'", ",", "status", ",", "err", ")", "return", "self", ".", "capabilities", "=", "lmsg", ".", "msgdict", "[", "'GET_READER_CAPABILITIES_RESPONSE'", "]", "logger", ".", "debug", "(", "'Capabilities: %s'", ",", "pprint", ".", "pformat", "(", "self", ".", "capabilities", ")", ")", "try", ":", "self", ".", "parseCapabilities", "(", "self", ".", "capabilities", ")", "except", "LLRPError", "as", "err", ":", "logger", ".", "exception", "(", "'Capabilities mismatch'", ")", "raise", "err", "self", ".", "processDeferreds", "(", "msgName", ",", "lmsg", ".", "isSuccess", "(", ")", ")", "d", "=", "defer", ".", "Deferred", "(", ")", "d", ".", "addCallback", "(", "self", ".", "_setState_wrapper", ",", "LLRPClient", ".", "STATE_SENT_GET_CONFIG", ")", "d", ".", "addErrback", "(", "self", ".", "panic", ",", "'GET_READER_CONFIG failed'", ")", "self", ".", "send_GET_READER_CONFIG", "(", "onCompletion", "=", "d", ")", "elif", "self", ".", "state", "==", "LLRPClient", ".", "STATE_SENT_GET_CONFIG", ":", "if", "msgName", "not", "in", "(", "'GET_READER_CONFIG_RESPONSE'", ",", "'DELETE_ACCESSSPEC_RESPONSE'", ",", "'DELETE_ROSPEC_RESPONSE'", ")", ":", "logger", ".", "error", "(", "'unexpected response %s getting config'", ",", "msgName", ")", "return", "if", "not", "lmsg", ".", "isSuccess", "(", ")", ":", "status", "=", "lmsg", ".", "msgdict", "[", "msgName", "]", "[", "'LLRPStatus'", "]", "[", "'StatusCode'", "]", "err", "=", "lmsg", ".", "msgdict", "[", "msgName", "]", "[", "'LLRPStatus'", "]", "[", "'ErrorDescription'", "]", "logger", ".", "fatal", "(", "'Error %s getting reader config: %s'", ",", "status", ",", "err", ")", "return", "if", "msgName", "==", "'GET_READER_CONFIG_RESPONSE'", ":", "config", "=", "lmsg", ".", "msgdict", "[", "'GET_READER_CONFIG_RESPONSE'", "]", "self", ".", "configuration", "=", "self", ".", "parseReaderConfig", "(", "config", ")", "logger", ".", "debug", "(", "'Reader configuration: %s'", ",", "self", ".", "configuration", ")", "self", ".", "processDeferreds", "(", "msgName", ",", "lmsg", ".", "isSuccess", "(", ")", ")", "d", "=", "defer", ".", "Deferred", "(", ")", "d", ".", "addCallback", "(", "self", ".", "_setState_wrapper", ",", "LLRPClient", ".", "STATE_SENT_SET_CONFIG", ")", "d", ".", "addErrback", "(", "self", ".", "panic", ",", "'SET_READER_CONFIG failed'", ")", "self", ".", "send_ENABLE_EVENTS_AND_REPORTS", "(", ")", "self", ".", "send_SET_READER_CONFIG", "(", "onCompletion", "=", "d", ")", "elif", "self", ".", "state", "==", "LLRPClient", ".", "STATE_SENT_SET_CONFIG", ":", "if", "msgName", "not", "in", "(", "'SET_READER_CONFIG_RESPONSE'", ",", "'GET_READER_CONFIG_RESPONSE'", ",", "'DELETE_ACCESSSPEC_RESPONSE'", ")", ":", "logger", ".", "error", "(", "'unexpected response %s setting config'", ",", "msgName", ")", "return", "if", "not", "lmsg", ".", "isSuccess", "(", ")", ":", "status", "=", "lmsg", ".", "msgdict", "[", "msgName", "]", "[", "'LLRPStatus'", "]", "[", "'StatusCode'", "]", "err", "=", "lmsg", ".", "msgdict", "[", "msgName", "]", "[", "'LLRPStatus'", "]", "[", "'ErrorDescription'", "]", "logger", ".", "fatal", "(", "'Error %s setting reader config: %s'", ",", "status", ",", "err", ")", "return", "self", ".", "processDeferreds", "(", "msgName", ",", "lmsg", ".", "isSuccess", "(", ")", ")", "if", "self", ".", "reset_on_connect", ":", "d", "=", "self", ".", "stopPolitely", "(", "disconnect", "=", "False", ")", "if", "self", ".", "start_inventory", ":", "d", ".", "addCallback", "(", "self", ".", "startInventory", ")", "elif", "self", ".", "start_inventory", ":", "self", ".", "startInventory", "(", ")", "# in state SENT_ADD_ROSPEC, expect only ADD_ROSPEC_RESPONSE; respond to", "# favorable ADD_ROSPEC_RESPONSE by enabling the added ROSpec and", "# advancing to state SENT_ENABLE_ROSPEC.", "elif", "self", ".", "state", "==", "LLRPClient", ".", "STATE_SENT_ADD_ROSPEC", ":", "if", "msgName", "!=", "'ADD_ROSPEC_RESPONSE'", ":", "logger", ".", "error", "(", "'unexpected response %s when adding ROSpec'", ",", "msgName", ")", "return", "if", "not", "lmsg", ".", "isSuccess", "(", ")", ":", "status", "=", "lmsg", ".", "msgdict", "[", "msgName", "]", "[", "'LLRPStatus'", "]", "[", "'StatusCode'", "]", "err", "=", "lmsg", ".", "msgdict", "[", "msgName", "]", "[", "'LLRPStatus'", "]", "[", "'ErrorDescription'", "]", "logger", ".", "fatal", "(", "'Error %s adding ROSpec: %s'", ",", "status", ",", "err", ")", "return", "self", ".", "processDeferreds", "(", "msgName", ",", "lmsg", ".", "isSuccess", "(", ")", ")", "# in state SENT_ENABLE_ROSPEC, expect only ENABLE_ROSPEC_RESPONSE;", "# respond to favorable ENABLE_ROSPEC_RESPONSE by starting the enabled", "# ROSpec and advancing to state SENT_START_ROSPEC.", "elif", "self", ".", "state", "==", "LLRPClient", ".", "STATE_SENT_ENABLE_ROSPEC", ":", "if", "msgName", "!=", "'ENABLE_ROSPEC_RESPONSE'", ":", "logger", ".", "error", "(", "'unexpected response %s when enabling ROSpec'", ",", "msgName", ")", "return", "if", "not", "lmsg", ".", "isSuccess", "(", ")", ":", "status", "=", "lmsg", ".", "msgdict", "[", "msgName", "]", "[", "'LLRPStatus'", "]", "[", "'StatusCode'", "]", "err", "=", "lmsg", ".", "msgdict", "[", "msgName", "]", "[", "'LLRPStatus'", "]", "[", "'ErrorDescription'", "]", "logger", ".", "fatal", "(", "'Error %s enabling ROSpec: %s'", ",", "status", ",", "err", ")", "return", "self", ".", "processDeferreds", "(", "msgName", ",", "lmsg", ".", "isSuccess", "(", ")", ")", "# in state PAUSING, we have sent a DISABLE_ROSPEC, so expect only", "# DISABLE_ROSPEC_RESPONSE. advance to state PAUSED.", "elif", "self", ".", "state", "==", "LLRPClient", ".", "STATE_PAUSING", ":", "if", "msgName", "!=", "'DISABLE_ROSPEC_RESPONSE'", ":", "logger", ".", "error", "(", "'unexpected response %s '", "' when disabling ROSpec'", ",", "msgName", ")", "if", "not", "lmsg", ".", "isSuccess", "(", ")", ":", "status", "=", "lmsg", ".", "msgdict", "[", "msgName", "]", "[", "'LLRPStatus'", "]", "[", "'StatusCode'", "]", "err", "=", "lmsg", ".", "msgdict", "[", "msgName", "]", "[", "'LLRPStatus'", "]", "[", "'ErrorDescription'", "]", "logger", ".", "error", "(", "'DISABLE_ROSPEC failed with status %s: %s'", ",", "status", ",", "err", ")", "logger", ".", "warn", "(", "'Error %s disabling ROSpec: %s'", ",", "status", ",", "err", ")", "self", ".", "processDeferreds", "(", "msgName", ",", "lmsg", ".", "isSuccess", "(", ")", ")", "# in state SENT_START_ROSPEC, expect only START_ROSPEC_RESPONSE;", "# respond to favorable START_ROSPEC_RESPONSE by advancing to state", "# INVENTORYING.", "elif", "self", ".", "state", "==", "LLRPClient", ".", "STATE_SENT_START_ROSPEC", ":", "if", "msgName", "==", "'RO_ACCESS_REPORT'", ":", "return", "if", "msgName", "==", "'READER_EVENT_NOTIFICATION'", ":", "return", "if", "msgName", "!=", "'START_ROSPEC_RESPONSE'", ":", "logger", ".", "error", "(", "'unexpected response %s when starting ROSpec'", ",", "msgName", ")", "if", "not", "lmsg", ".", "isSuccess", "(", ")", ":", "status", "=", "lmsg", ".", "msgdict", "[", "msgName", "]", "[", "'LLRPStatus'", "]", "[", "'StatusCode'", "]", "err", "=", "lmsg", ".", "msgdict", "[", "msgName", "]", "[", "'LLRPStatus'", "]", "[", "'ErrorDescription'", "]", "logger", ".", "error", "(", "'START_ROSPEC failed with status %s: %s'", ",", "status", ",", "err", ")", "logger", ".", "fatal", "(", "'Error %s starting ROSpec: %s'", ",", "status", ",", "err", ")", "return", "self", ".", "processDeferreds", "(", "msgName", ",", "lmsg", ".", "isSuccess", "(", ")", ")", "elif", "self", ".", "state", "==", "LLRPClient", ".", "STATE_INVENTORYING", ":", "if", "msgName", "not", "in", "(", "'RO_ACCESS_REPORT'", ",", "'READER_EVENT_NOTIFICATION'", ",", "'ADD_ACCESSSPEC_RESPONSE'", ",", "'ENABLE_ACCESSSPEC_RESPONSE'", ",", "'DISABLE_ACCESSSPEC_RESPONSE'", ",", "'DELETE_ACCESSSPEC_RESPONSE'", ")", ":", "logger", ".", "error", "(", "'unexpected message %s while inventorying'", ",", "msgName", ")", "return", "self", ".", "processDeferreds", "(", "msgName", ",", "lmsg", ".", "isSuccess", "(", ")", ")", "elif", "self", ".", "state", "==", "LLRPClient", ".", "STATE_SENT_DELETE_ACCESSSPEC", ":", "if", "msgName", "!=", "'DELETE_ACCESSSPEC_RESPONSE'", ":", "logger", ".", "error", "(", "'unexpected response %s when deleting AccessSpec'", ",", "msgName", ")", "self", ".", "processDeferreds", "(", "msgName", ",", "lmsg", ".", "isSuccess", "(", ")", ")", "elif", "self", ".", "state", "==", "LLRPClient", ".", "STATE_SENT_DELETE_ROSPEC", ":", "if", "msgName", "!=", "'DELETE_ROSPEC_RESPONSE'", ":", "logger", ".", "error", "(", "'unexpected response %s when deleting ROSpec'", ",", "msgName", ")", "if", "lmsg", ".", "isSuccess", "(", ")", ":", "if", "self", ".", "disconnecting", ":", "self", ".", "setState", "(", "LLRPClient", ".", "STATE_DISCONNECTED", ")", "else", ":", "self", ".", "setState", "(", "LLRPClient", ".", "STATE_CONNECTED", ")", "else", ":", "status", "=", "lmsg", ".", "msgdict", "[", "msgName", "]", "[", "'LLRPStatus'", "]", "[", "'StatusCode'", "]", "err", "=", "lmsg", ".", "msgdict", "[", "msgName", "]", "[", "'LLRPStatus'", "]", "[", "'ErrorDescription'", "]", "logger", ".", "error", "(", "'DELETE_ROSPEC failed with status %s: %s'", ",", "status", ",", "err", ")", "self", ".", "processDeferreds", "(", "msgName", ",", "lmsg", ".", "isSuccess", "(", ")", ")", "if", "self", ".", "disconnecting", ":", "logger", ".", "info", "(", "'disconnecting'", ")", "self", ".", "transport", ".", "loseConnection", "(", ")", "else", ":", "logger", ".", "warn", "(", "'message %s received in unknown state!'", ",", "msgName", ")", "if", "self", ".", "_deferreds", "[", "msgName", "]", ":", "logger", ".", "error", "(", "'there should NOT be Deferreds left for %s,'", "' but there are!'", ",", "msgName", ")" ]
Implements the LLRP client state machine.
[ "Implements", "the", "LLRP", "client", "state", "machine", "." ]
python
train
Komnomnomnom/swigibpy
swigibpy.py
https://github.com/Komnomnomnom/swigibpy/blob/cfd307fdbfaffabc69a2dc037538d7e34a8b8daf/swigibpy.py#L1250-L1252
def reqScannerSubscription(self, tickerId, subscription, scannerSubscriptionOptions): """reqScannerSubscription(EClient self, int tickerId, ScannerSubscription subscription, TagValueListSPtr const & scannerSubscriptionOptions)""" return _swigibpy.EClient_reqScannerSubscription(self, tickerId, subscription, scannerSubscriptionOptions)
[ "def", "reqScannerSubscription", "(", "self", ",", "tickerId", ",", "subscription", ",", "scannerSubscriptionOptions", ")", ":", "return", "_swigibpy", ".", "EClient_reqScannerSubscription", "(", "self", ",", "tickerId", ",", "subscription", ",", "scannerSubscriptionOptions", ")" ]
reqScannerSubscription(EClient self, int tickerId, ScannerSubscription subscription, TagValueListSPtr const & scannerSubscriptionOptions)
[ "reqScannerSubscription", "(", "EClient", "self", "int", "tickerId", "ScannerSubscription", "subscription", "TagValueListSPtr", "const", "&", "scannerSubscriptionOptions", ")" ]
python
train
Murali-group/halp
halp/undirected_hypergraph.py
https://github.com/Murali-group/halp/blob/6eb27466ba84e2281e18f93b62aae5efb21ef8b3/halp/undirected_hypergraph.py#L489-L522
def remove_hyperedge(self, hyperedge_id): """Removes a hyperedge and its attributes from the hypergraph. :param hyperedge_id: ID of the hyperedge to be removed. :raises: ValueError -- No such hyperedge exists. Examples: :: >>> H = UndirectedHypergraph() >>> hyperedge_list = (["A", "B", "C"], ("A", "D"), set(["B", "D"])) >>> hyperedge_ids = H.add_hyperedges(hyperedge_list) >>> H.remove_hyperedge(hyperedge_ids[0]) >>> BD_id = H.get_hyperedge_id(set(["B", "D"])) >>> H.remove_hyperedge(BD_id) """ if not self.has_hyperedge_id(hyperedge_id): raise ValueError("No such hyperedge exists.") frozen_nodes = \ self._hyperedge_attributes[hyperedge_id]["__frozen_nodes"] # Remove this hyperedge from the star of every node in the hyperedge for node in frozen_nodes: self._star[node].remove(hyperedge_id) # Remove this set as the composer of the hyperedge del self._node_set_to_hyperedge[frozen_nodes] # Remove hyperedge's attributes dictionary del self._hyperedge_attributes[hyperedge_id]
[ "def", "remove_hyperedge", "(", "self", ",", "hyperedge_id", ")", ":", "if", "not", "self", ".", "has_hyperedge_id", "(", "hyperedge_id", ")", ":", "raise", "ValueError", "(", "\"No such hyperedge exists.\"", ")", "frozen_nodes", "=", "self", ".", "_hyperedge_attributes", "[", "hyperedge_id", "]", "[", "\"__frozen_nodes\"", "]", "# Remove this hyperedge from the star of every node in the hyperedge", "for", "node", "in", "frozen_nodes", ":", "self", ".", "_star", "[", "node", "]", ".", "remove", "(", "hyperedge_id", ")", "# Remove this set as the composer of the hyperedge", "del", "self", ".", "_node_set_to_hyperedge", "[", "frozen_nodes", "]", "# Remove hyperedge's attributes dictionary", "del", "self", ".", "_hyperedge_attributes", "[", "hyperedge_id", "]" ]
Removes a hyperedge and its attributes from the hypergraph. :param hyperedge_id: ID of the hyperedge to be removed. :raises: ValueError -- No such hyperedge exists. Examples: :: >>> H = UndirectedHypergraph() >>> hyperedge_list = (["A", "B", "C"], ("A", "D"), set(["B", "D"])) >>> hyperedge_ids = H.add_hyperedges(hyperedge_list) >>> H.remove_hyperedge(hyperedge_ids[0]) >>> BD_id = H.get_hyperedge_id(set(["B", "D"])) >>> H.remove_hyperedge(BD_id)
[ "Removes", "a", "hyperedge", "and", "its", "attributes", "from", "the", "hypergraph", "." ]
python
train
bastikr/boolean.py
boolean/boolean.py
https://github.com/bastikr/boolean.py/blob/e984df480afc60605e9501a0d3d54d667e8f7dbf/boolean/boolean.py#L1031-L1038
def literalize(self): """ Return an expression where NOTs are only occurring as literals. """ expr = self.demorgan() if isinstance(expr, self.__class__): return expr return expr.literalize()
[ "def", "literalize", "(", "self", ")", ":", "expr", "=", "self", ".", "demorgan", "(", ")", "if", "isinstance", "(", "expr", ",", "self", ".", "__class__", ")", ":", "return", "expr", "return", "expr", ".", "literalize", "(", ")" ]
Return an expression where NOTs are only occurring as literals.
[ "Return", "an", "expression", "where", "NOTs", "are", "only", "occurring", "as", "literals", "." ]
python
train
inasafe/inasafe
safe/common/utilities.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/common/utilities.py#L466-L477
def create_label(label_tuple, extra_label=None): """Return a label based on my_tuple (a,b) and extra label. a and b are string. The output will be something like: [a - b] extra_label """ if extra_label is not None: return '[' + ' - '.join(label_tuple) + '] ' + str(extra_label) else: return '[' + ' - '.join(label_tuple) + ']'
[ "def", "create_label", "(", "label_tuple", ",", "extra_label", "=", "None", ")", ":", "if", "extra_label", "is", "not", "None", ":", "return", "'['", "+", "' - '", ".", "join", "(", "label_tuple", ")", "+", "'] '", "+", "str", "(", "extra_label", ")", "else", ":", "return", "'['", "+", "' - '", ".", "join", "(", "label_tuple", ")", "+", "']'" ]
Return a label based on my_tuple (a,b) and extra label. a and b are string. The output will be something like: [a - b] extra_label
[ "Return", "a", "label", "based", "on", "my_tuple", "(", "a", "b", ")", "and", "extra", "label", "." ]
python
train
Azure/blobxfer
blobxfer/models/download.py
https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/models/download.py#L132-L149
def ensure_path_exists(self): # type: (LocalDestinationPath) -> None """Ensure path exists :param LocalDestinationPath self: this """ if self._is_dir is None: raise RuntimeError('is_dir not set') if self._is_dir: self._path.mkdir(mode=0o750, parents=True, exist_ok=True) else: if self._path.exists() and self._path.is_dir(): raise RuntimeError( ('destination path {} already exists and is a ' 'directory').format(self._path)) else: # ensure parent path exists and is created self._path.parent.mkdir( mode=0o750, parents=True, exist_ok=True)
[ "def", "ensure_path_exists", "(", "self", ")", ":", "# type: (LocalDestinationPath) -> None", "if", "self", ".", "_is_dir", "is", "None", ":", "raise", "RuntimeError", "(", "'is_dir not set'", ")", "if", "self", ".", "_is_dir", ":", "self", ".", "_path", ".", "mkdir", "(", "mode", "=", "0o750", ",", "parents", "=", "True", ",", "exist_ok", "=", "True", ")", "else", ":", "if", "self", ".", "_path", ".", "exists", "(", ")", "and", "self", ".", "_path", ".", "is_dir", "(", ")", ":", "raise", "RuntimeError", "(", "(", "'destination path {} already exists and is a '", "'directory'", ")", ".", "format", "(", "self", ".", "_path", ")", ")", "else", ":", "# ensure parent path exists and is created", "self", ".", "_path", ".", "parent", ".", "mkdir", "(", "mode", "=", "0o750", ",", "parents", "=", "True", ",", "exist_ok", "=", "True", ")" ]
Ensure path exists :param LocalDestinationPath self: this
[ "Ensure", "path", "exists", ":", "param", "LocalDestinationPath", "self", ":", "this" ]
python
train
espressif/esptool
esptool.py
https://github.com/espressif/esptool/blob/c583756c118039cfcfe256f7a3285618914d16a5/esptool.py#L888-L904
def read_status(self, num_bytes=2): """Read up to 24 bits (num_bytes) of SPI flash status register contents via RDSR, RDSR2, RDSR3 commands Not all SPI flash supports all three commands. The upper 1 or 2 bytes may be 0xFF. """ SPIFLASH_RDSR = 0x05 SPIFLASH_RDSR2 = 0x35 SPIFLASH_RDSR3 = 0x15 status = 0 shift = 0 for cmd in [SPIFLASH_RDSR, SPIFLASH_RDSR2, SPIFLASH_RDSR3][0:num_bytes]: status += self.run_spiflash_command(cmd, read_bits=8) << shift shift += 8 return status
[ "def", "read_status", "(", "self", ",", "num_bytes", "=", "2", ")", ":", "SPIFLASH_RDSR", "=", "0x05", "SPIFLASH_RDSR2", "=", "0x35", "SPIFLASH_RDSR3", "=", "0x15", "status", "=", "0", "shift", "=", "0", "for", "cmd", "in", "[", "SPIFLASH_RDSR", ",", "SPIFLASH_RDSR2", ",", "SPIFLASH_RDSR3", "]", "[", "0", ":", "num_bytes", "]", ":", "status", "+=", "self", ".", "run_spiflash_command", "(", "cmd", ",", "read_bits", "=", "8", ")", "<<", "shift", "shift", "+=", "8", "return", "status" ]
Read up to 24 bits (num_bytes) of SPI flash status register contents via RDSR, RDSR2, RDSR3 commands Not all SPI flash supports all three commands. The upper 1 or 2 bytes may be 0xFF.
[ "Read", "up", "to", "24", "bits", "(", "num_bytes", ")", "of", "SPI", "flash", "status", "register", "contents", "via", "RDSR", "RDSR2", "RDSR3", "commands" ]
python
train
thiezn/iperf3-python
iperf3/iperf3.py
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L552-L555
def num_streams(self): """The number of streams to use.""" self._num_streams = self.lib.iperf_get_test_num_streams(self._test) return self._num_streams
[ "def", "num_streams", "(", "self", ")", ":", "self", ".", "_num_streams", "=", "self", ".", "lib", ".", "iperf_get_test_num_streams", "(", "self", ".", "_test", ")", "return", "self", ".", "_num_streams" ]
The number of streams to use.
[ "The", "number", "of", "streams", "to", "use", "." ]
python
train
drj11/pypng
code/iccp.py
https://github.com/drj11/pypng/blob/b8220ca9f58e4c5bc1d507e713744fcb8c049225/code/iccp.py#L159-L204
def writeHeader(self, out, size=999): """Add default values to the instance's `d` dictionary, then write a header out onto the file stream. The size of the profile must be specified using the `size` argument. """ def defaultkey(d, key, value): """Add ``[key]==value`` to the dictionary `d`, but only if it does not have that key already. """ if key in d: return d[key] = value z = '\x00' * 4 defaults = dict(preferredCMM=z, version='02000000', profileclass=z, colourspace=z, pcs='XYZ ', created=writeICCdatetime(), acsp='acsp', platform=z, flag=0, manufacturer=z, model=0, deviceattributes=0, intent=0, pcsilluminant=encodefuns()['XYZ'](*D50()), creator=z, ) for k, v in defaults.items(): defaultkey(self.d, k, v) hl = map(self.d.__getitem__, ['preferredCMM', 'version', 'profileclass', 'colourspace', 'pcs', 'created', 'acsp', 'platform', 'flag', 'manufacturer', 'model', 'deviceattributes', 'intent', 'pcsilluminant', 'creator']) # Convert to struct.pack input hl[1] = int(hl[1], 16) out.write(struct.pack('>L4sL4s4s4s12s4s4sL4sLQL12s4s', size, *hl)) out.write('\x00' * 44) return self
[ "def", "writeHeader", "(", "self", ",", "out", ",", "size", "=", "999", ")", ":", "def", "defaultkey", "(", "d", ",", "key", ",", "value", ")", ":", "\"\"\"Add ``[key]==value`` to the dictionary `d`, but only if\n it does not have that key already.\n \"\"\"", "if", "key", "in", "d", ":", "return", "d", "[", "key", "]", "=", "value", "z", "=", "'\\x00'", "*", "4", "defaults", "=", "dict", "(", "preferredCMM", "=", "z", ",", "version", "=", "'02000000'", ",", "profileclass", "=", "z", ",", "colourspace", "=", "z", ",", "pcs", "=", "'XYZ '", ",", "created", "=", "writeICCdatetime", "(", ")", ",", "acsp", "=", "'acsp'", ",", "platform", "=", "z", ",", "flag", "=", "0", ",", "manufacturer", "=", "z", ",", "model", "=", "0", ",", "deviceattributes", "=", "0", ",", "intent", "=", "0", ",", "pcsilluminant", "=", "encodefuns", "(", ")", "[", "'XYZ'", "]", "(", "*", "D50", "(", ")", ")", ",", "creator", "=", "z", ",", ")", "for", "k", ",", "v", "in", "defaults", ".", "items", "(", ")", ":", "defaultkey", "(", "self", ".", "d", ",", "k", ",", "v", ")", "hl", "=", "map", "(", "self", ".", "d", ".", "__getitem__", ",", "[", "'preferredCMM'", ",", "'version'", ",", "'profileclass'", ",", "'colourspace'", ",", "'pcs'", ",", "'created'", ",", "'acsp'", ",", "'platform'", ",", "'flag'", ",", "'manufacturer'", ",", "'model'", ",", "'deviceattributes'", ",", "'intent'", ",", "'pcsilluminant'", ",", "'creator'", "]", ")", "# Convert to struct.pack input", "hl", "[", "1", "]", "=", "int", "(", "hl", "[", "1", "]", ",", "16", ")", "out", ".", "write", "(", "struct", ".", "pack", "(", "'>L4sL4s4s4s12s4s4sL4sLQL12s4s'", ",", "size", ",", "*", "hl", ")", ")", "out", ".", "write", "(", "'\\x00'", "*", "44", ")", "return", "self" ]
Add default values to the instance's `d` dictionary, then write a header out onto the file stream. The size of the profile must be specified using the `size` argument.
[ "Add", "default", "values", "to", "the", "instance", "s", "d", "dictionary", "then", "write", "a", "header", "out", "onto", "the", "file", "stream", ".", "The", "size", "of", "the", "profile", "must", "be", "specified", "using", "the", "size", "argument", "." ]
python
train
rGunti/CarPi-OBDDaemon
obddaemon/custom/Obd2DataParser.py
https://github.com/rGunti/CarPi-OBDDaemon/blob/6831c477b2a00617a0d2ea98b28f3bc5c1ba8e5f/obddaemon/custom/Obd2DataParser.py#L266-L280
def parse_0134_013b(v): """ Parses the O2 Sensor Value (0134 - 013B) and returns two values parsed from it: 1. Fuel-Air Equivalence [Ratio] as a float from 0 - 2 2. Current in [mA] as a float from -128 - 128 :param str v: :return tuple of float, float: """ try: trim_val = trim_obd_value(v) val_ab = int(trim_val[0:2], 16) val_cd = int(trim_val[2:4], 16) return (2 / 65536) * val_ab, val_cd - 128 except ValueError: return None, None
[ "def", "parse_0134_013b", "(", "v", ")", ":", "try", ":", "trim_val", "=", "trim_obd_value", "(", "v", ")", "val_ab", "=", "int", "(", "trim_val", "[", "0", ":", "2", "]", ",", "16", ")", "val_cd", "=", "int", "(", "trim_val", "[", "2", ":", "4", "]", ",", "16", ")", "return", "(", "2", "/", "65536", ")", "*", "val_ab", ",", "val_cd", "-", "128", "except", "ValueError", ":", "return", "None", ",", "None" ]
Parses the O2 Sensor Value (0134 - 013B) and returns two values parsed from it: 1. Fuel-Air Equivalence [Ratio] as a float from 0 - 2 2. Current in [mA] as a float from -128 - 128 :param str v: :return tuple of float, float:
[ "Parses", "the", "O2", "Sensor", "Value", "(", "0134", "-", "013B", ")", "and", "returns", "two", "values", "parsed", "from", "it", ":", "1", ".", "Fuel", "-", "Air", "Equivalence", "[", "Ratio", "]", "as", "a", "float", "from", "0", "-", "2", "2", ".", "Current", "in", "[", "mA", "]", "as", "a", "float", "from", "-", "128", "-", "128", ":", "param", "str", "v", ":", ":", "return", "tuple", "of", "float", "float", ":" ]
python
train
GiulioRossetti/ndlib
ndlib/models/DynamicCompostiteModel.py
https://github.com/GiulioRossetti/ndlib/blob/23ecf50c0f76ff2714471071ab9ecb600f4a9832/ndlib/models/DynamicCompostiteModel.py#L30-L70
def iteration(self, node_status=True): """ Execute a single model iteration :return: Iteration_id, Incremental node status (dictionary node->status) """ self.clean_initial_status(self.available_statuses.values()) actual_status = {node: nstatus for node, nstatus in future.utils.iteritems(self.status)} if self.actual_iteration == 0: self.actual_iteration += 1 delta, node_count, status_delta = self.status_delta(actual_status) if node_status: return {"iteration": 0, "status": actual_status.copy(), "node_count": node_count.copy(), "status_delta": status_delta.copy()} else: return {"iteration": 0, "status": {}, "node_count": node_count.copy(), "status_delta": status_delta.copy()} for u in self.graph.nodes(): u_status = self.status[u] for i in range(0, self.compartment_progressive): if u_status == self.available_statuses[self.compartment[i][0]]: rule = self.compartment[i][2] test = rule.execute(node=u, graph=self.graph, status=self.status, status_map=self.available_statuses, params=self.params) if test: actual_status[u] = self.available_statuses[self.compartment[i][1]] break delta, node_count, status_delta = self.status_delta(actual_status) self.status = actual_status self.actual_iteration += 1 if node_status: return {"iteration": self.actual_iteration - 1, "status": delta.copy(), "node_count": node_count.copy(), "status_delta": status_delta.copy()} else: return {"iteration": self.actual_iteration - 1, "status": {}, "node_count": node_count.copy(), "status_delta": status_delta.copy()}
[ "def", "iteration", "(", "self", ",", "node_status", "=", "True", ")", ":", "self", ".", "clean_initial_status", "(", "self", ".", "available_statuses", ".", "values", "(", ")", ")", "actual_status", "=", "{", "node", ":", "nstatus", "for", "node", ",", "nstatus", "in", "future", ".", "utils", ".", "iteritems", "(", "self", ".", "status", ")", "}", "if", "self", ".", "actual_iteration", "==", "0", ":", "self", ".", "actual_iteration", "+=", "1", "delta", ",", "node_count", ",", "status_delta", "=", "self", ".", "status_delta", "(", "actual_status", ")", "if", "node_status", ":", "return", "{", "\"iteration\"", ":", "0", ",", "\"status\"", ":", "actual_status", ".", "copy", "(", ")", ",", "\"node_count\"", ":", "node_count", ".", "copy", "(", ")", ",", "\"status_delta\"", ":", "status_delta", ".", "copy", "(", ")", "}", "else", ":", "return", "{", "\"iteration\"", ":", "0", ",", "\"status\"", ":", "{", "}", ",", "\"node_count\"", ":", "node_count", ".", "copy", "(", ")", ",", "\"status_delta\"", ":", "status_delta", ".", "copy", "(", ")", "}", "for", "u", "in", "self", ".", "graph", ".", "nodes", "(", ")", ":", "u_status", "=", "self", ".", "status", "[", "u", "]", "for", "i", "in", "range", "(", "0", ",", "self", ".", "compartment_progressive", ")", ":", "if", "u_status", "==", "self", ".", "available_statuses", "[", "self", ".", "compartment", "[", "i", "]", "[", "0", "]", "]", ":", "rule", "=", "self", ".", "compartment", "[", "i", "]", "[", "2", "]", "test", "=", "rule", ".", "execute", "(", "node", "=", "u", ",", "graph", "=", "self", ".", "graph", ",", "status", "=", "self", ".", "status", ",", "status_map", "=", "self", ".", "available_statuses", ",", "params", "=", "self", ".", "params", ")", "if", "test", ":", "actual_status", "[", "u", "]", "=", "self", ".", "available_statuses", "[", "self", ".", "compartment", "[", "i", "]", "[", "1", "]", "]", "break", "delta", ",", "node_count", ",", "status_delta", "=", "self", ".", "status_delta", "(", "actual_status", ")", "self", ".", "status", "=", "actual_status", "self", ".", "actual_iteration", "+=", "1", "if", "node_status", ":", "return", "{", "\"iteration\"", ":", "self", ".", "actual_iteration", "-", "1", ",", "\"status\"", ":", "delta", ".", "copy", "(", ")", ",", "\"node_count\"", ":", "node_count", ".", "copy", "(", ")", ",", "\"status_delta\"", ":", "status_delta", ".", "copy", "(", ")", "}", "else", ":", "return", "{", "\"iteration\"", ":", "self", ".", "actual_iteration", "-", "1", ",", "\"status\"", ":", "{", "}", ",", "\"node_count\"", ":", "node_count", ".", "copy", "(", ")", ",", "\"status_delta\"", ":", "status_delta", ".", "copy", "(", ")", "}" ]
Execute a single model iteration :return: Iteration_id, Incremental node status (dictionary node->status)
[ "Execute", "a", "single", "model", "iteration" ]
python
train
FutunnOpen/futuquant
futuquant/trade/trade_query.py
https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/trade/trade_query.py#L143-L158
def pack_req(cls, code, pl_ratio_min, pl_ratio_max, trd_env, acc_id, trd_mkt, conn_id): """Convert from user request for trading days to PLS request""" from futuquant.common.pb.Trd_GetPositionList_pb2 import Request req = Request() req.c2s.header.trdEnv = TRD_ENV_MAP[trd_env] req.c2s.header.accID = acc_id req.c2s.header.trdMarket = TRD_MKT_MAP[trd_mkt] if code: req.c2s.filterConditions.codeList.append(code) if pl_ratio_min is not None: req.c2s.filterPLRatioMin = float(pl_ratio_min) / 100.0 if pl_ratio_max is not None: req.c2s.filterPLRatioMax = float(pl_ratio_max) / 100.0 return pack_pb_req(req, ProtoId.Trd_GetPositionList, conn_id)
[ "def", "pack_req", "(", "cls", ",", "code", ",", "pl_ratio_min", ",", "pl_ratio_max", ",", "trd_env", ",", "acc_id", ",", "trd_mkt", ",", "conn_id", ")", ":", "from", "futuquant", ".", "common", ".", "pb", ".", "Trd_GetPositionList_pb2", "import", "Request", "req", "=", "Request", "(", ")", "req", ".", "c2s", ".", "header", ".", "trdEnv", "=", "TRD_ENV_MAP", "[", "trd_env", "]", "req", ".", "c2s", ".", "header", ".", "accID", "=", "acc_id", "req", ".", "c2s", ".", "header", ".", "trdMarket", "=", "TRD_MKT_MAP", "[", "trd_mkt", "]", "if", "code", ":", "req", ".", "c2s", ".", "filterConditions", ".", "codeList", ".", "append", "(", "code", ")", "if", "pl_ratio_min", "is", "not", "None", ":", "req", ".", "c2s", ".", "filterPLRatioMin", "=", "float", "(", "pl_ratio_min", ")", "/", "100.0", "if", "pl_ratio_max", "is", "not", "None", ":", "req", ".", "c2s", ".", "filterPLRatioMax", "=", "float", "(", "pl_ratio_max", ")", "/", "100.0", "return", "pack_pb_req", "(", "req", ",", "ProtoId", ".", "Trd_GetPositionList", ",", "conn_id", ")" ]
Convert from user request for trading days to PLS request
[ "Convert", "from", "user", "request", "for", "trading", "days", "to", "PLS", "request" ]
python
train
theislab/scanpy
scanpy/_exporting.py
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/_exporting.py#L231-L250
def write_hdf5_genes(E, gene_list, filename): '''SPRING standard: filename = main_spring_dir + "counts_norm_sparse_genes.hdf5"''' E = E.tocsc() hf = h5py.File(filename, 'w') counts_group = hf.create_group('counts') cix_group = hf.create_group('cell_ix') hf.attrs['ncells'] = E.shape[0] hf.attrs['ngenes'] = E.shape[1] for iG, g in enumerate(gene_list): counts = E[:,iG].A.squeeze() cell_ix = np.nonzero(counts)[0] counts = counts[cell_ix] counts_group.create_dataset(g, data = counts) cix_group.create_dataset(g, data = cell_ix) hf.close()
[ "def", "write_hdf5_genes", "(", "E", ",", "gene_list", ",", "filename", ")", ":", "E", "=", "E", ".", "tocsc", "(", ")", "hf", "=", "h5py", ".", "File", "(", "filename", ",", "'w'", ")", "counts_group", "=", "hf", ".", "create_group", "(", "'counts'", ")", "cix_group", "=", "hf", ".", "create_group", "(", "'cell_ix'", ")", "hf", ".", "attrs", "[", "'ncells'", "]", "=", "E", ".", "shape", "[", "0", "]", "hf", ".", "attrs", "[", "'ngenes'", "]", "=", "E", ".", "shape", "[", "1", "]", "for", "iG", ",", "g", "in", "enumerate", "(", "gene_list", ")", ":", "counts", "=", "E", "[", ":", ",", "iG", "]", ".", "A", ".", "squeeze", "(", ")", "cell_ix", "=", "np", ".", "nonzero", "(", "counts", ")", "[", "0", "]", "counts", "=", "counts", "[", "cell_ix", "]", "counts_group", ".", "create_dataset", "(", "g", ",", "data", "=", "counts", ")", "cix_group", ".", "create_dataset", "(", "g", ",", "data", "=", "cell_ix", ")", "hf", ".", "close", "(", ")" ]
SPRING standard: filename = main_spring_dir + "counts_norm_sparse_genes.hdf5"
[ "SPRING", "standard", ":", "filename", "=", "main_spring_dir", "+", "counts_norm_sparse_genes", ".", "hdf5" ]
python
train
Unidata/MetPy
metpy/plots/_util.py
https://github.com/Unidata/MetPy/blob/16f68a94919b9a82dcf9cada2169cf039129e67b/metpy/plots/_util.py#L103-L128
def add_metpy_logo(fig, x=10, y=25, zorder=100, size='small', **kwargs): """Add the MetPy logo to a figure. Adds an image of the MetPy logo to the figure. Parameters ---------- fig : `matplotlib.figure` The `figure` instance used for plotting x : int x position padding in pixels y : float y position padding in pixels zorder : int The zorder of the logo size : str Size of logo to be used. Can be 'small' for 75 px square or 'large' for 150 px square. Returns ------- `matplotlib.image.FigureImage` The `matplotlib.image.FigureImage` instance created """ return _add_logo(fig, x=x, y=y, zorder=zorder, which='metpy', size=size, **kwargs)
[ "def", "add_metpy_logo", "(", "fig", ",", "x", "=", "10", ",", "y", "=", "25", ",", "zorder", "=", "100", ",", "size", "=", "'small'", ",", "*", "*", "kwargs", ")", ":", "return", "_add_logo", "(", "fig", ",", "x", "=", "x", ",", "y", "=", "y", ",", "zorder", "=", "zorder", ",", "which", "=", "'metpy'", ",", "size", "=", "size", ",", "*", "*", "kwargs", ")" ]
Add the MetPy logo to a figure. Adds an image of the MetPy logo to the figure. Parameters ---------- fig : `matplotlib.figure` The `figure` instance used for plotting x : int x position padding in pixels y : float y position padding in pixels zorder : int The zorder of the logo size : str Size of logo to be used. Can be 'small' for 75 px square or 'large' for 150 px square. Returns ------- `matplotlib.image.FigureImage` The `matplotlib.image.FigureImage` instance created
[ "Add", "the", "MetPy", "logo", "to", "a", "figure", "." ]
python
train
estnltk/estnltk
estnltk/wordnet/eurown.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/eurown.py#L2037-L2045
def addInternalLink(self, link): '''Appends InternalLink ''' if isinstance(link, InternalLink): self.internalLinks.append(link) else: raise InternalLinkError( 'link Type should be InternalLink, not %s' % type(link))
[ "def", "addInternalLink", "(", "self", ",", "link", ")", ":", "if", "isinstance", "(", "link", ",", "InternalLink", ")", ":", "self", ".", "internalLinks", ".", "append", "(", "link", ")", "else", ":", "raise", "InternalLinkError", "(", "'link Type should be InternalLink, not %s'", "%", "type", "(", "link", ")", ")" ]
Appends InternalLink
[ "Appends", "InternalLink" ]
python
train
brainiak/brainiak
brainiak/fcma/voxelselector.py
https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/voxelselector.py#L255-L282
def _worker(self, clf): """Worker node's operation. Receiving tasks from the master to process and sending the result back Parameters ---------- clf: classification function the classifier to be used in cross validation Returns ------- None """ logger.debug( 'worker %d is running, waiting for tasks from master at rank %d' % (MPI.COMM_WORLD.Get_rank(), self.master_rank) ) comm = MPI.COMM_WORLD status = MPI.Status() while 1: task = comm.recv(source=self.master_rank, tag=MPI.ANY_TAG, status=status) if status.Get_tag(): break comm.send(self._voxel_scoring(task, clf), dest=self.master_rank)
[ "def", "_worker", "(", "self", ",", "clf", ")", ":", "logger", ".", "debug", "(", "'worker %d is running, waiting for tasks from master at rank %d'", "%", "(", "MPI", ".", "COMM_WORLD", ".", "Get_rank", "(", ")", ",", "self", ".", "master_rank", ")", ")", "comm", "=", "MPI", ".", "COMM_WORLD", "status", "=", "MPI", ".", "Status", "(", ")", "while", "1", ":", "task", "=", "comm", ".", "recv", "(", "source", "=", "self", ".", "master_rank", ",", "tag", "=", "MPI", ".", "ANY_TAG", ",", "status", "=", "status", ")", "if", "status", ".", "Get_tag", "(", ")", ":", "break", "comm", ".", "send", "(", "self", ".", "_voxel_scoring", "(", "task", ",", "clf", ")", ",", "dest", "=", "self", ".", "master_rank", ")" ]
Worker node's operation. Receiving tasks from the master to process and sending the result back Parameters ---------- clf: classification function the classifier to be used in cross validation Returns ------- None
[ "Worker", "node", "s", "operation", "." ]
python
train
nugget/python-insteonplm
insteonplm/tools.py
https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/tools.py#L127-L131
def async_aldb_loaded_callback(self): """Unlock the ALDB load lock when loading is complete.""" if self.aldb_load_lock.locked(): self.aldb_load_lock.release() _LOGGING.info('ALDB Loaded')
[ "def", "async_aldb_loaded_callback", "(", "self", ")", ":", "if", "self", ".", "aldb_load_lock", ".", "locked", "(", ")", ":", "self", ".", "aldb_load_lock", ".", "release", "(", ")", "_LOGGING", ".", "info", "(", "'ALDB Loaded'", ")" ]
Unlock the ALDB load lock when loading is complete.
[ "Unlock", "the", "ALDB", "load", "lock", "when", "loading", "is", "complete", "." ]
python
train
pytorch/text
torchtext/vocab.py
https://github.com/pytorch/text/blob/26bfce6869dc704f1d86792f9a681d453d7e7bb8/torchtext/vocab.py#L167-L189
def set_vectors(self, stoi, vectors, dim, unk_init=torch.Tensor.zero_): """ Set the vectors for the Vocab instance from a collection of Tensors. Arguments: stoi: A dictionary of string to the index of the associated vector in the `vectors` input argument. vectors: An indexed iterable (or other structure supporting __getitem__) that given an input index, returns a FloatTensor representing the vector for the token associated with the index. For example, vector[stoi["string"]] should return the vector for "string". dim: The dimensionality of the vectors. unk_init (callback): by default, initialize out-of-vocabulary word vectors to zero vectors; can be any function that takes in a Tensor and returns a Tensor of the same size. Default: torch.Tensor.zero_ """ self.vectors = torch.Tensor(len(self), dim) for i, token in enumerate(self.itos): wv_index = stoi.get(token, None) if wv_index is not None: self.vectors[i] = vectors[wv_index] else: self.vectors[i] = unk_init(self.vectors[i])
[ "def", "set_vectors", "(", "self", ",", "stoi", ",", "vectors", ",", "dim", ",", "unk_init", "=", "torch", ".", "Tensor", ".", "zero_", ")", ":", "self", ".", "vectors", "=", "torch", ".", "Tensor", "(", "len", "(", "self", ")", ",", "dim", ")", "for", "i", ",", "token", "in", "enumerate", "(", "self", ".", "itos", ")", ":", "wv_index", "=", "stoi", ".", "get", "(", "token", ",", "None", ")", "if", "wv_index", "is", "not", "None", ":", "self", ".", "vectors", "[", "i", "]", "=", "vectors", "[", "wv_index", "]", "else", ":", "self", ".", "vectors", "[", "i", "]", "=", "unk_init", "(", "self", ".", "vectors", "[", "i", "]", ")" ]
Set the vectors for the Vocab instance from a collection of Tensors. Arguments: stoi: A dictionary of string to the index of the associated vector in the `vectors` input argument. vectors: An indexed iterable (or other structure supporting __getitem__) that given an input index, returns a FloatTensor representing the vector for the token associated with the index. For example, vector[stoi["string"]] should return the vector for "string". dim: The dimensionality of the vectors. unk_init (callback): by default, initialize out-of-vocabulary word vectors to zero vectors; can be any function that takes in a Tensor and returns a Tensor of the same size. Default: torch.Tensor.zero_
[ "Set", "the", "vectors", "for", "the", "Vocab", "instance", "from", "a", "collection", "of", "Tensors", "." ]
python
train
core/uricore
uricore/wkz_wsgi.py
https://github.com/core/uricore/blob/dc5ef4be7bd93da4c39e5c1cbd1ae4f3ad3f1f2a/uricore/wkz_wsgi.py#L119-L134
def readline(self, size=None): """Reads one line from the stream.""" if self._pos >= self.limit: return self.on_exhausted() if size is None: size = self.limit - self._pos else: size = min(size, self.limit - self._pos) try: line = self._readline(size) except (ValueError, IOError): return self.on_disconnect() if size and not line: return self.on_disconnect() self._pos += len(line) return line
[ "def", "readline", "(", "self", ",", "size", "=", "None", ")", ":", "if", "self", ".", "_pos", ">=", "self", ".", "limit", ":", "return", "self", ".", "on_exhausted", "(", ")", "if", "size", "is", "None", ":", "size", "=", "self", ".", "limit", "-", "self", ".", "_pos", "else", ":", "size", "=", "min", "(", "size", ",", "self", ".", "limit", "-", "self", ".", "_pos", ")", "try", ":", "line", "=", "self", ".", "_readline", "(", "size", ")", "except", "(", "ValueError", ",", "IOError", ")", ":", "return", "self", ".", "on_disconnect", "(", ")", "if", "size", "and", "not", "line", ":", "return", "self", ".", "on_disconnect", "(", ")", "self", ".", "_pos", "+=", "len", "(", "line", ")", "return", "line" ]
Reads one line from the stream.
[ "Reads", "one", "line", "from", "the", "stream", "." ]
python
train
ladybug-tools/ladybug
ladybug/datacollection.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/datacollection.py#L540-L551
def from_json(cls, data): """Create a Data Collection from a dictionary. Args: { "header": A Ladybug Header, "values": An array of values, } """ assert 'header' in data, 'Required keyword "header" is missing!' assert 'values' in data, 'Required keyword "values" is missing!' return cls(Header.from_json(data['header']), data['values'])
[ "def", "from_json", "(", "cls", ",", "data", ")", ":", "assert", "'header'", "in", "data", ",", "'Required keyword \"header\" is missing!'", "assert", "'values'", "in", "data", ",", "'Required keyword \"values\" is missing!'", "return", "cls", "(", "Header", ".", "from_json", "(", "data", "[", "'header'", "]", ")", ",", "data", "[", "'values'", "]", ")" ]
Create a Data Collection from a dictionary. Args: { "header": A Ladybug Header, "values": An array of values, }
[ "Create", "a", "Data", "Collection", "from", "a", "dictionary", "." ]
python
train
mikedh/trimesh
trimesh/base.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/base.py#L1084-L1121
def update_faces(self, mask): """ In many cases, we will want to remove specific faces. However, there is additional bookkeeping to do this cleanly. This function updates the set of faces with a validity mask, as well as keeping track of normals and colors. Parameters --------- valid : (m) int or (len(self.faces)) bool Mask to remove faces """ # if the mesh is already empty we can't remove anything if self.is_empty: return mask = np.asanyarray(mask) if mask.dtype.name == 'bool' and mask.all(): # mask removes no faces so exit early return # try to save face normals before dumping cache cached_normals = self._cache['face_normals'] faces = self._data['faces'] # if Trimesh has been subclassed and faces have been moved from data # to cache, get faces from cache. if not util.is_shape(faces, (-1, 3)): faces = self._cache['faces'] # actually apply the mask self.faces = faces[mask] # apply the mask to the visual object self.visual.update_faces(mask) # if our normals were the correct shape apply them if util.is_shape(cached_normals, (-1, 3)): self.face_normals = cached_normals[mask]
[ "def", "update_faces", "(", "self", ",", "mask", ")", ":", "# if the mesh is already empty we can't remove anything", "if", "self", ".", "is_empty", ":", "return", "mask", "=", "np", ".", "asanyarray", "(", "mask", ")", "if", "mask", ".", "dtype", ".", "name", "==", "'bool'", "and", "mask", ".", "all", "(", ")", ":", "# mask removes no faces so exit early", "return", "# try to save face normals before dumping cache", "cached_normals", "=", "self", ".", "_cache", "[", "'face_normals'", "]", "faces", "=", "self", ".", "_data", "[", "'faces'", "]", "# if Trimesh has been subclassed and faces have been moved from data", "# to cache, get faces from cache.", "if", "not", "util", ".", "is_shape", "(", "faces", ",", "(", "-", "1", ",", "3", ")", ")", ":", "faces", "=", "self", ".", "_cache", "[", "'faces'", "]", "# actually apply the mask", "self", ".", "faces", "=", "faces", "[", "mask", "]", "# apply the mask to the visual object", "self", ".", "visual", ".", "update_faces", "(", "mask", ")", "# if our normals were the correct shape apply them", "if", "util", ".", "is_shape", "(", "cached_normals", ",", "(", "-", "1", ",", "3", ")", ")", ":", "self", ".", "face_normals", "=", "cached_normals", "[", "mask", "]" ]
In many cases, we will want to remove specific faces. However, there is additional bookkeeping to do this cleanly. This function updates the set of faces with a validity mask, as well as keeping track of normals and colors. Parameters --------- valid : (m) int or (len(self.faces)) bool Mask to remove faces
[ "In", "many", "cases", "we", "will", "want", "to", "remove", "specific", "faces", ".", "However", "there", "is", "additional", "bookkeeping", "to", "do", "this", "cleanly", ".", "This", "function", "updates", "the", "set", "of", "faces", "with", "a", "validity", "mask", "as", "well", "as", "keeping", "track", "of", "normals", "and", "colors", "." ]
python
train
robotools/fontParts
Lib/fontParts/base/font.py
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/font.py#L490-L531
def _getFlatKerning(self): """ This is the environment implementation of :meth:`BaseFont.getFlatKerning`. Subclasses may override this method. """ kernOrder = { (True, True): 0, # group group (True, False): 1, # group glyph (False, True): 2, # glyph group (False, False): 3, # glyph glyph } def kerningSortKeyFunc(pair): g1, g2 = pair g1grp = g1.startswith("public.kern1.") g2grp = g2.startswith("public.kern2.") return (kernOrder[g1grp, g2grp], pair) flatKerning = dict() kerning = self.kerning groups = self.groups for pair in sorted(self.kerning.keys(), key=kerningSortKeyFunc): kern = kerning[pair] (left, right) = pair if left.startswith("public.kern1."): left = groups.get(left, []) else: left = [left] if right.startswith("public.kern2."): right = groups.get(right, []) else: right = [right] for r in right: for l in left: flatKerning[(l, r)] = kern return flatKerning
[ "def", "_getFlatKerning", "(", "self", ")", ":", "kernOrder", "=", "{", "(", "True", ",", "True", ")", ":", "0", ",", "# group group", "(", "True", ",", "False", ")", ":", "1", ",", "# group glyph", "(", "False", ",", "True", ")", ":", "2", ",", "# glyph group", "(", "False", ",", "False", ")", ":", "3", ",", "# glyph glyph", "}", "def", "kerningSortKeyFunc", "(", "pair", ")", ":", "g1", ",", "g2", "=", "pair", "g1grp", "=", "g1", ".", "startswith", "(", "\"public.kern1.\"", ")", "g2grp", "=", "g2", ".", "startswith", "(", "\"public.kern2.\"", ")", "return", "(", "kernOrder", "[", "g1grp", ",", "g2grp", "]", ",", "pair", ")", "flatKerning", "=", "dict", "(", ")", "kerning", "=", "self", ".", "kerning", "groups", "=", "self", ".", "groups", "for", "pair", "in", "sorted", "(", "self", ".", "kerning", ".", "keys", "(", ")", ",", "key", "=", "kerningSortKeyFunc", ")", ":", "kern", "=", "kerning", "[", "pair", "]", "(", "left", ",", "right", ")", "=", "pair", "if", "left", ".", "startswith", "(", "\"public.kern1.\"", ")", ":", "left", "=", "groups", ".", "get", "(", "left", ",", "[", "]", ")", "else", ":", "left", "=", "[", "left", "]", "if", "right", ".", "startswith", "(", "\"public.kern2.\"", ")", ":", "right", "=", "groups", ".", "get", "(", "right", ",", "[", "]", ")", "else", ":", "right", "=", "[", "right", "]", "for", "r", "in", "right", ":", "for", "l", "in", "left", ":", "flatKerning", "[", "(", "l", ",", "r", ")", "]", "=", "kern", "return", "flatKerning" ]
This is the environment implementation of :meth:`BaseFont.getFlatKerning`. Subclasses may override this method.
[ "This", "is", "the", "environment", "implementation", "of", ":", "meth", ":", "BaseFont", ".", "getFlatKerning", "." ]
python
train
neighbordog/deviantart
deviantart/api.py
https://github.com/neighbordog/deviantart/blob/5612f1d5e2139a48c9d793d7fd19cde7e162d7b1/deviantart/api.py#L222-L295
def browse(self, endpoint="hot", category_path="", seed="", q="", timerange="24hr", tag="", offset=0, limit=10): """Fetch deviations from public endpoints :param endpoint: The endpoint from which the deviations will be fetched (hot/morelikethis/newest/undiscovered/popular/tags) :param category_path: category path to fetch from :param q: Search query term :param timerange: The timerange :param tag: The tag to browse :param offset: the pagination offset :param limit: the pagination limit """ if endpoint == "hot": response = self._req('/browse/hot', { "category_path":category_path, "offset":offset, "limit":limit }) elif endpoint == "morelikethis": if seed: response = self._req('/browse/morelikethis', { "seed":seed, "category_path":category_path, "offset":offset, "limit":limit }) else: raise DeviantartError("No seed defined.") elif endpoint == "newest": response = self._req('/browse/newest', { "category_path":category_path, "q":q, "offset":offset, "limit":limit }) elif endpoint == "undiscovered": response = self._req('/browse/undiscovered', { "category_path":category_path, "offset":offset, "limit":limit }) elif endpoint == "popular": response = self._req('/browse/popular', { "category_path":category_path, "q":q, "timerange":timerange, "offset":offset, "limit":limit }) elif endpoint == "tags": if tag: response = self._req('/browse/tags', { "tag":tag, "offset":offset, "limit":limit }) else: raise DeviantartError("No tag defined.") else: raise DeviantartError("Unknown endpoint.") deviations = [] for item in response['results']: d = Deviation() d.from_dict(item) deviations.append(d) return { "results" : deviations, "has_more" : response['has_more'], "next_offset" : response['next_offset'] }
[ "def", "browse", "(", "self", ",", "endpoint", "=", "\"hot\"", ",", "category_path", "=", "\"\"", ",", "seed", "=", "\"\"", ",", "q", "=", "\"\"", ",", "timerange", "=", "\"24hr\"", ",", "tag", "=", "\"\"", ",", "offset", "=", "0", ",", "limit", "=", "10", ")", ":", "if", "endpoint", "==", "\"hot\"", ":", "response", "=", "self", ".", "_req", "(", "'/browse/hot'", ",", "{", "\"category_path\"", ":", "category_path", ",", "\"offset\"", ":", "offset", ",", "\"limit\"", ":", "limit", "}", ")", "elif", "endpoint", "==", "\"morelikethis\"", ":", "if", "seed", ":", "response", "=", "self", ".", "_req", "(", "'/browse/morelikethis'", ",", "{", "\"seed\"", ":", "seed", ",", "\"category_path\"", ":", "category_path", ",", "\"offset\"", ":", "offset", ",", "\"limit\"", ":", "limit", "}", ")", "else", ":", "raise", "DeviantartError", "(", "\"No seed defined.\"", ")", "elif", "endpoint", "==", "\"newest\"", ":", "response", "=", "self", ".", "_req", "(", "'/browse/newest'", ",", "{", "\"category_path\"", ":", "category_path", ",", "\"q\"", ":", "q", ",", "\"offset\"", ":", "offset", ",", "\"limit\"", ":", "limit", "}", ")", "elif", "endpoint", "==", "\"undiscovered\"", ":", "response", "=", "self", ".", "_req", "(", "'/browse/undiscovered'", ",", "{", "\"category_path\"", ":", "category_path", ",", "\"offset\"", ":", "offset", ",", "\"limit\"", ":", "limit", "}", ")", "elif", "endpoint", "==", "\"popular\"", ":", "response", "=", "self", ".", "_req", "(", "'/browse/popular'", ",", "{", "\"category_path\"", ":", "category_path", ",", "\"q\"", ":", "q", ",", "\"timerange\"", ":", "timerange", ",", "\"offset\"", ":", "offset", ",", "\"limit\"", ":", "limit", "}", ")", "elif", "endpoint", "==", "\"tags\"", ":", "if", "tag", ":", "response", "=", "self", ".", "_req", "(", "'/browse/tags'", ",", "{", "\"tag\"", ":", "tag", ",", "\"offset\"", ":", "offset", ",", "\"limit\"", ":", "limit", "}", ")", "else", ":", "raise", "DeviantartError", "(", "\"No tag defined.\"", ")", "else", ":", "raise", "DeviantartError", "(", "\"Unknown endpoint.\"", ")", "deviations", "=", "[", "]", "for", "item", "in", "response", "[", "'results'", "]", ":", "d", "=", "Deviation", "(", ")", "d", ".", "from_dict", "(", "item", ")", "deviations", ".", "append", "(", "d", ")", "return", "{", "\"results\"", ":", "deviations", ",", "\"has_more\"", ":", "response", "[", "'has_more'", "]", ",", "\"next_offset\"", ":", "response", "[", "'next_offset'", "]", "}" ]
Fetch deviations from public endpoints :param endpoint: The endpoint from which the deviations will be fetched (hot/morelikethis/newest/undiscovered/popular/tags) :param category_path: category path to fetch from :param q: Search query term :param timerange: The timerange :param tag: The tag to browse :param offset: the pagination offset :param limit: the pagination limit
[ "Fetch", "deviations", "from", "public", "endpoints" ]
python
train
SeleniumHQ/selenium
py/selenium/webdriver/common/action_chains.py
https://github.com/SeleniumHQ/selenium/blob/df40c28b41d4b3953f90eaff84838a9ac052b84a/py/selenium/webdriver/common/action_chains.py#L310-L325
def release(self, on_element=None): """ Releasing a held mouse button on an element. :Args: - on_element: The element to mouse up. If None, releases on current mouse position. """ if on_element: self.move_to_element(on_element) if self._driver.w3c: self.w3c_actions.pointer_action.release() self.w3c_actions.key_action.pause() else: self._actions.append(lambda: self._driver.execute(Command.MOUSE_UP, {})) return self
[ "def", "release", "(", "self", ",", "on_element", "=", "None", ")", ":", "if", "on_element", ":", "self", ".", "move_to_element", "(", "on_element", ")", "if", "self", ".", "_driver", ".", "w3c", ":", "self", ".", "w3c_actions", ".", "pointer_action", ".", "release", "(", ")", "self", ".", "w3c_actions", ".", "key_action", ".", "pause", "(", ")", "else", ":", "self", ".", "_actions", ".", "append", "(", "lambda", ":", "self", ".", "_driver", ".", "execute", "(", "Command", ".", "MOUSE_UP", ",", "{", "}", ")", ")", "return", "self" ]
Releasing a held mouse button on an element. :Args: - on_element: The element to mouse up. If None, releases on current mouse position.
[ "Releasing", "a", "held", "mouse", "button", "on", "an", "element", "." ]
python
train
adrianliaw/PyCuber
pycuber/solver/cfop/f2l.py
https://github.com/adrianliaw/PyCuber/blob/e44b5ba48c831b964ce73d046fb813222771853f/pycuber/solver/cfop/f2l.py#L44-L53
def estimated_position(self): """ Get the estimated cubie of solved pair. """ corner = {"D":self.cube["D"]["D"]} edge = {} for cubie in (corner, edge): for face in self.pair: cubie.update({face:self.cube[face][face]}) return (Corner(**corner), Edge(**edge))
[ "def", "estimated_position", "(", "self", ")", ":", "corner", "=", "{", "\"D\"", ":", "self", ".", "cube", "[", "\"D\"", "]", "[", "\"D\"", "]", "}", "edge", "=", "{", "}", "for", "cubie", "in", "(", "corner", ",", "edge", ")", ":", "for", "face", "in", "self", ".", "pair", ":", "cubie", ".", "update", "(", "{", "face", ":", "self", ".", "cube", "[", "face", "]", "[", "face", "]", "}", ")", "return", "(", "Corner", "(", "*", "*", "corner", ")", ",", "Edge", "(", "*", "*", "edge", ")", ")" ]
Get the estimated cubie of solved pair.
[ "Get", "the", "estimated", "cubie", "of", "solved", "pair", "." ]
python
train
umutbozkurt/django-rest-framework-mongoengine
rest_framework_mongoengine/generics.py
https://github.com/umutbozkurt/django-rest-framework-mongoengine/blob/2fe6de53907b31a5e8b742e4c6b728942b5fa4f0/rest_framework_mongoengine/generics.py#L8-L13
def get_object_or_404(queryset, *args, **kwargs): """ replacement of rest_framework.generics and django.shrtcuts analogues """ try: return queryset.get(*args, **kwargs) except (ValueError, TypeError, DoesNotExist, ValidationError): raise Http404()
[ "def", "get_object_or_404", "(", "queryset", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "queryset", ".", "get", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "(", "ValueError", ",", "TypeError", ",", "DoesNotExist", ",", "ValidationError", ")", ":", "raise", "Http404", "(", ")" ]
replacement of rest_framework.generics and django.shrtcuts analogues
[ "replacement", "of", "rest_framework", ".", "generics", "and", "django", ".", "shrtcuts", "analogues" ]
python
train
scikit-tda/kepler-mapper
kmapper/visuals.py
https://github.com/scikit-tda/kepler-mapper/blob/d4ed39f6392b0a134dd573d7d9c4aa65fbef3a7d/kmapper/visuals.py#L212-L236
def build_histogram(data, colorscale=None, nbins=10): """ Build histogram of data based on values of color_function """ if colorscale is None: colorscale = colorscale_default # TODO: we should weave this method of handling colors into the normal build_histogram and combine both functions colorscale = _colors_to_rgb(colorscale) h_min, h_max = 0, 1 hist, bin_edges = np.histogram(data, range=(h_min, h_max), bins=nbins) bin_mids = np.mean(np.array(list(zip(bin_edges, bin_edges[1:]))), axis=1) histogram = [] max_bucket_value = max(hist) sum_bucket_value = sum(hist) for bar, mid in zip(hist, bin_mids): height = np.floor(((bar / max_bucket_value) * 100) + 0.5) perc = round((bar / sum_bucket_value) * 100.0, 1) color = _map_val2color(mid, 0.0, 1.0, colorscale) histogram.append({"height": height, "perc": perc, "color": color}) return histogram
[ "def", "build_histogram", "(", "data", ",", "colorscale", "=", "None", ",", "nbins", "=", "10", ")", ":", "if", "colorscale", "is", "None", ":", "colorscale", "=", "colorscale_default", "# TODO: we should weave this method of handling colors into the normal build_histogram and combine both functions", "colorscale", "=", "_colors_to_rgb", "(", "colorscale", ")", "h_min", ",", "h_max", "=", "0", ",", "1", "hist", ",", "bin_edges", "=", "np", ".", "histogram", "(", "data", ",", "range", "=", "(", "h_min", ",", "h_max", ")", ",", "bins", "=", "nbins", ")", "bin_mids", "=", "np", ".", "mean", "(", "np", ".", "array", "(", "list", "(", "zip", "(", "bin_edges", ",", "bin_edges", "[", "1", ":", "]", ")", ")", ")", ",", "axis", "=", "1", ")", "histogram", "=", "[", "]", "max_bucket_value", "=", "max", "(", "hist", ")", "sum_bucket_value", "=", "sum", "(", "hist", ")", "for", "bar", ",", "mid", "in", "zip", "(", "hist", ",", "bin_mids", ")", ":", "height", "=", "np", ".", "floor", "(", "(", "(", "bar", "/", "max_bucket_value", ")", "*", "100", ")", "+", "0.5", ")", "perc", "=", "round", "(", "(", "bar", "/", "sum_bucket_value", ")", "*", "100.0", ",", "1", ")", "color", "=", "_map_val2color", "(", "mid", ",", "0.0", ",", "1.0", ",", "colorscale", ")", "histogram", ".", "append", "(", "{", "\"height\"", ":", "height", ",", "\"perc\"", ":", "perc", ",", "\"color\"", ":", "color", "}", ")", "return", "histogram" ]
Build histogram of data based on values of color_function
[ "Build", "histogram", "of", "data", "based", "on", "values", "of", "color_function" ]
python
train
kivy/python-for-android
pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/filters.py
https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/filters.py#L338-L353
def do_indent(s, width=4, indentfirst=False): """Return a copy of the passed string, each line indented by 4 spaces. The first line is not indented. If you want to change the number of spaces or indent the first line too you can pass additional parameters to the filter: .. sourcecode:: jinja {{ mytext|indent(2, true) }} indent by two spaces and indent the first line too. """ indention = u' ' * width rv = (u'\n' + indention).join(s.splitlines()) if indentfirst: rv = indention + rv return rv
[ "def", "do_indent", "(", "s", ",", "width", "=", "4", ",", "indentfirst", "=", "False", ")", ":", "indention", "=", "u' '", "*", "width", "rv", "=", "(", "u'\\n'", "+", "indention", ")", ".", "join", "(", "s", ".", "splitlines", "(", ")", ")", "if", "indentfirst", ":", "rv", "=", "indention", "+", "rv", "return", "rv" ]
Return a copy of the passed string, each line indented by 4 spaces. The first line is not indented. If you want to change the number of spaces or indent the first line too you can pass additional parameters to the filter: .. sourcecode:: jinja {{ mytext|indent(2, true) }} indent by two spaces and indent the first line too.
[ "Return", "a", "copy", "of", "the", "passed", "string", "each", "line", "indented", "by", "4", "spaces", ".", "The", "first", "line", "is", "not", "indented", ".", "If", "you", "want", "to", "change", "the", "number", "of", "spaces", "or", "indent", "the", "first", "line", "too", "you", "can", "pass", "additional", "parameters", "to", "the", "filter", ":" ]
python
train
senaite/senaite.core
bika/lims/browser/batchfolder.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/batchfolder.py#L114-L120
def before_render(self): """Before template render hook """ super(BatchFolderContentsView, self).before_render() if self.context.portal_type == "BatchFolder": self.request.set("disable_border", 1)
[ "def", "before_render", "(", "self", ")", ":", "super", "(", "BatchFolderContentsView", ",", "self", ")", ".", "before_render", "(", ")", "if", "self", ".", "context", ".", "portal_type", "==", "\"BatchFolder\"", ":", "self", ".", "request", ".", "set", "(", "\"disable_border\"", ",", "1", ")" ]
Before template render hook
[ "Before", "template", "render", "hook" ]
python
train
inspirehep/refextract
refextract/documents/text.py
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/documents/text.py#L139-L164
def remove_and_record_multiple_spaces_in_line(line): """For a given string, locate all ocurrences of multiple spaces together in the line, record the number of spaces found at each position, and replace them with a single space. @param line: (string) the text line to be processed for multiple spaces. @return: (tuple) countaining a dictionary and a string. The dictionary contains information about the number of spaces removed at given positions in the line. For example, if 3 spaces were removed from the line at index '22', the dictionary would be set as follows: { 22 : 3 } The string that is also returned in this tuple is the line after multiple-space ocurrences have replaced with single spaces. """ removed_spaces = {} # get a collection of match objects for all instances of # multiple-spaces found in the line: multispace_matches = re_group_captured_multiple_space.finditer(line) # record the number of spaces found at each match position: for multispace in multispace_matches: removed_spaces[multispace.start()] = \ (multispace.end() - multispace.start() - 1) # now remove the multiple-spaces from the line, replacing with a # single space at each position: line = re_group_captured_multiple_space.sub(u' ', line) return (removed_spaces, line)
[ "def", "remove_and_record_multiple_spaces_in_line", "(", "line", ")", ":", "removed_spaces", "=", "{", "}", "# get a collection of match objects for all instances of", "# multiple-spaces found in the line:", "multispace_matches", "=", "re_group_captured_multiple_space", ".", "finditer", "(", "line", ")", "# record the number of spaces found at each match position:", "for", "multispace", "in", "multispace_matches", ":", "removed_spaces", "[", "multispace", ".", "start", "(", ")", "]", "=", "(", "multispace", ".", "end", "(", ")", "-", "multispace", ".", "start", "(", ")", "-", "1", ")", "# now remove the multiple-spaces from the line, replacing with a", "# single space at each position:", "line", "=", "re_group_captured_multiple_space", ".", "sub", "(", "u' '", ",", "line", ")", "return", "(", "removed_spaces", ",", "line", ")" ]
For a given string, locate all ocurrences of multiple spaces together in the line, record the number of spaces found at each position, and replace them with a single space. @param line: (string) the text line to be processed for multiple spaces. @return: (tuple) countaining a dictionary and a string. The dictionary contains information about the number of spaces removed at given positions in the line. For example, if 3 spaces were removed from the line at index '22', the dictionary would be set as follows: { 22 : 3 } The string that is also returned in this tuple is the line after multiple-space ocurrences have replaced with single spaces.
[ "For", "a", "given", "string", "locate", "all", "ocurrences", "of", "multiple", "spaces", "together", "in", "the", "line", "record", "the", "number", "of", "spaces", "found", "at", "each", "position", "and", "replace", "them", "with", "a", "single", "space", "." ]
python
train
cloudera/cm_api
python/src/cm_api/http_client.py
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/http_client.py#L150-L183
def execute(self, http_method, path, params=None, data=None, headers=None): """ Submit an HTTP request. @param http_method: GET, POST, PUT, DELETE @param path: The path of the resource. @param params: Key-value parameter data. @param data: The data to attach to the body of the request. @param headers: The headers to set for this request. @return: The result of urllib2.urlopen() """ # Prepare URL and params url = self._make_url(path, params) if http_method in ("GET", "DELETE"): if data is not None: self.logger.warn( "GET method does not pass any data. Path '%s'" % (path,)) data = None # Setup the request request = urllib2.Request(url, data) # Hack/workaround because urllib2 only does GET and POST request.get_method = lambda: http_method headers = self._get_headers(headers) for k, v in headers.items(): request.add_header(k, v) # Call it self.logger.debug("%s %s" % (http_method, url)) try: return self._opener.open(request) except urllib2.HTTPError, ex: raise self._exc_class(ex)
[ "def", "execute", "(", "self", ",", "http_method", ",", "path", ",", "params", "=", "None", ",", "data", "=", "None", ",", "headers", "=", "None", ")", ":", "# Prepare URL and params", "url", "=", "self", ".", "_make_url", "(", "path", ",", "params", ")", "if", "http_method", "in", "(", "\"GET\"", ",", "\"DELETE\"", ")", ":", "if", "data", "is", "not", "None", ":", "self", ".", "logger", ".", "warn", "(", "\"GET method does not pass any data. Path '%s'\"", "%", "(", "path", ",", ")", ")", "data", "=", "None", "# Setup the request", "request", "=", "urllib2", ".", "Request", "(", "url", ",", "data", ")", "# Hack/workaround because urllib2 only does GET and POST", "request", ".", "get_method", "=", "lambda", ":", "http_method", "headers", "=", "self", ".", "_get_headers", "(", "headers", ")", "for", "k", ",", "v", "in", "headers", ".", "items", "(", ")", ":", "request", ".", "add_header", "(", "k", ",", "v", ")", "# Call it", "self", ".", "logger", ".", "debug", "(", "\"%s %s\"", "%", "(", "http_method", ",", "url", ")", ")", "try", ":", "return", "self", ".", "_opener", ".", "open", "(", "request", ")", "except", "urllib2", ".", "HTTPError", ",", "ex", ":", "raise", "self", ".", "_exc_class", "(", "ex", ")" ]
Submit an HTTP request. @param http_method: GET, POST, PUT, DELETE @param path: The path of the resource. @param params: Key-value parameter data. @param data: The data to attach to the body of the request. @param headers: The headers to set for this request. @return: The result of urllib2.urlopen()
[ "Submit", "an", "HTTP", "request", ".", "@param", "http_method", ":", "GET", "POST", "PUT", "DELETE", "@param", "path", ":", "The", "path", "of", "the", "resource", ".", "@param", "params", ":", "Key", "-", "value", "parameter", "data", ".", "@param", "data", ":", "The", "data", "to", "attach", "to", "the", "body", "of", "the", "request", ".", "@param", "headers", ":", "The", "headers", "to", "set", "for", "this", "request", "." ]
python
train
pydata/xarray
xarray/plot/utils.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/plot/utils.py#L624-L649
def _infer_interval_breaks(coord, axis=0, check_monotonic=False): """ >>> _infer_interval_breaks(np.arange(5)) array([-0.5, 0.5, 1.5, 2.5, 3.5, 4.5]) >>> _infer_interval_breaks([[0, 1], [3, 4]], axis=1) array([[-0.5, 0.5, 1.5], [ 2.5, 3.5, 4.5]]) """ coord = np.asarray(coord) if check_monotonic and not _is_monotonic(coord, axis=axis): raise ValueError("The input coordinate is not sorted in increasing " "order along axis %d. This can lead to unexpected " "results. Consider calling the `sortby` method on " "the input DataArray. To plot data with categorical " "axes, consider using the `heatmap` function from " "the `seaborn` statistical plotting library." % axis) deltas = 0.5 * np.diff(coord, axis=axis) if deltas.size == 0: deltas = np.array(0.0) first = np.take(coord, [0], axis=axis) - np.take(deltas, [0], axis=axis) last = np.take(coord, [-1], axis=axis) + np.take(deltas, [-1], axis=axis) trim_last = tuple(slice(None, -1) if n == axis else slice(None) for n in range(coord.ndim)) return np.concatenate([first, coord[trim_last] + deltas, last], axis=axis)
[ "def", "_infer_interval_breaks", "(", "coord", ",", "axis", "=", "0", ",", "check_monotonic", "=", "False", ")", ":", "coord", "=", "np", ".", "asarray", "(", "coord", ")", "if", "check_monotonic", "and", "not", "_is_monotonic", "(", "coord", ",", "axis", "=", "axis", ")", ":", "raise", "ValueError", "(", "\"The input coordinate is not sorted in increasing \"", "\"order along axis %d. This can lead to unexpected \"", "\"results. Consider calling the `sortby` method on \"", "\"the input DataArray. To plot data with categorical \"", "\"axes, consider using the `heatmap` function from \"", "\"the `seaborn` statistical plotting library.\"", "%", "axis", ")", "deltas", "=", "0.5", "*", "np", ".", "diff", "(", "coord", ",", "axis", "=", "axis", ")", "if", "deltas", ".", "size", "==", "0", ":", "deltas", "=", "np", ".", "array", "(", "0.0", ")", "first", "=", "np", ".", "take", "(", "coord", ",", "[", "0", "]", ",", "axis", "=", "axis", ")", "-", "np", ".", "take", "(", "deltas", ",", "[", "0", "]", ",", "axis", "=", "axis", ")", "last", "=", "np", ".", "take", "(", "coord", ",", "[", "-", "1", "]", ",", "axis", "=", "axis", ")", "+", "np", ".", "take", "(", "deltas", ",", "[", "-", "1", "]", ",", "axis", "=", "axis", ")", "trim_last", "=", "tuple", "(", "slice", "(", "None", ",", "-", "1", ")", "if", "n", "==", "axis", "else", "slice", "(", "None", ")", "for", "n", "in", "range", "(", "coord", ".", "ndim", ")", ")", "return", "np", ".", "concatenate", "(", "[", "first", ",", "coord", "[", "trim_last", "]", "+", "deltas", ",", "last", "]", ",", "axis", "=", "axis", ")" ]
>>> _infer_interval_breaks(np.arange(5)) array([-0.5, 0.5, 1.5, 2.5, 3.5, 4.5]) >>> _infer_interval_breaks([[0, 1], [3, 4]], axis=1) array([[-0.5, 0.5, 1.5], [ 2.5, 3.5, 4.5]])
[ ">>>", "_infer_interval_breaks", "(", "np", ".", "arange", "(", "5", "))", "array", "(", "[", "-", "0", ".", "5", "0", ".", "5", "1", ".", "5", "2", ".", "5", "3", ".", "5", "4", ".", "5", "]", ")", ">>>", "_infer_interval_breaks", "(", "[[", "0", "1", "]", "[", "3", "4", "]]", "axis", "=", "1", ")", "array", "(", "[[", "-", "0", ".", "5", "0", ".", "5", "1", ".", "5", "]", "[", "2", ".", "5", "3", ".", "5", "4", ".", "5", "]]", ")" ]
python
train
pri22296/beautifultable
beautifultable/beautifultable.py
https://github.com/pri22296/beautifultable/blob/c9638f73dff4bb1f341c9ee783e4e47f26efba0b/beautifultable/beautifultable.py#L1188-L1205
def get_table_width(self): """Get the width of the table as number of characters. Column width should be set prior to calling this method. Returns ------- int Width of the table as number of characters. """ if self.column_count == 0: return 0 width = sum(self._column_widths) width += ((self._column_count - 1) * termwidth(self.column_separator_char)) width += termwidth(self.left_border_char) width += termwidth(self.right_border_char) return width
[ "def", "get_table_width", "(", "self", ")", ":", "if", "self", ".", "column_count", "==", "0", ":", "return", "0", "width", "=", "sum", "(", "self", ".", "_column_widths", ")", "width", "+=", "(", "(", "self", ".", "_column_count", "-", "1", ")", "*", "termwidth", "(", "self", ".", "column_separator_char", ")", ")", "width", "+=", "termwidth", "(", "self", ".", "left_border_char", ")", "width", "+=", "termwidth", "(", "self", ".", "right_border_char", ")", "return", "width" ]
Get the width of the table as number of characters. Column width should be set prior to calling this method. Returns ------- int Width of the table as number of characters.
[ "Get", "the", "width", "of", "the", "table", "as", "number", "of", "characters", "." ]
python
train
projectshift/shift-boiler
boiler/user/views_social.py
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/views_social.py#L60-L66
def next(self): """ Where to redirect after authorization """ next = request.args.get('next') if next is None: params = self.default_redirect_params next = url_for(self.default_redirect_endpoint, **params) return next
[ "def", "next", "(", "self", ")", ":", "next", "=", "request", ".", "args", ".", "get", "(", "'next'", ")", "if", "next", "is", "None", ":", "params", "=", "self", ".", "default_redirect_params", "next", "=", "url_for", "(", "self", ".", "default_redirect_endpoint", ",", "*", "*", "params", ")", "return", "next" ]
Where to redirect after authorization
[ "Where", "to", "redirect", "after", "authorization" ]
python
train
tanghaibao/jcvi
jcvi/apps/bwa.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/bwa.py#L208-L233
def sampe(args, opts): """ %prog sampe database.fasta read1.fq read2.fq Wrapper for `bwa sampe`. Output will be read1.sam. """ dbfile, read1file, read2file = args dbfile = check_index(dbfile) sai1file = check_aln(dbfile, read1file, cpus=opts.cpus) sai2file = check_aln(dbfile, read2file, cpus=opts.cpus) samfile, _, unmapped = get_samfile(read1file, dbfile, bam=opts.bam, unmapped=opts.unmapped) if not need_update((dbfile, sai1file, sai2file), samfile): logging.error("`{0}` exists. `bwa samse` already run.".format(samfile)) return "", samfile cmd = "bwa sampe " + " ".join((dbfile, sai1file, sai2file, read1file, read2file)) cmd += " " + opts.extra if opts.cutoff: cmd += " -a {0}".format(opts.cutoff) if opts.uniq: cmd += " -n 1" return cmd, samfile
[ "def", "sampe", "(", "args", ",", "opts", ")", ":", "dbfile", ",", "read1file", ",", "read2file", "=", "args", "dbfile", "=", "check_index", "(", "dbfile", ")", "sai1file", "=", "check_aln", "(", "dbfile", ",", "read1file", ",", "cpus", "=", "opts", ".", "cpus", ")", "sai2file", "=", "check_aln", "(", "dbfile", ",", "read2file", ",", "cpus", "=", "opts", ".", "cpus", ")", "samfile", ",", "_", ",", "unmapped", "=", "get_samfile", "(", "read1file", ",", "dbfile", ",", "bam", "=", "opts", ".", "bam", ",", "unmapped", "=", "opts", ".", "unmapped", ")", "if", "not", "need_update", "(", "(", "dbfile", ",", "sai1file", ",", "sai2file", ")", ",", "samfile", ")", ":", "logging", ".", "error", "(", "\"`{0}` exists. `bwa samse` already run.\"", ".", "format", "(", "samfile", ")", ")", "return", "\"\"", ",", "samfile", "cmd", "=", "\"bwa sampe \"", "+", "\" \"", ".", "join", "(", "(", "dbfile", ",", "sai1file", ",", "sai2file", ",", "read1file", ",", "read2file", ")", ")", "cmd", "+=", "\" \"", "+", "opts", ".", "extra", "if", "opts", ".", "cutoff", ":", "cmd", "+=", "\" -a {0}\"", ".", "format", "(", "opts", ".", "cutoff", ")", "if", "opts", ".", "uniq", ":", "cmd", "+=", "\" -n 1\"", "return", "cmd", ",", "samfile" ]
%prog sampe database.fasta read1.fq read2.fq Wrapper for `bwa sampe`. Output will be read1.sam.
[ "%prog", "sampe", "database", ".", "fasta", "read1", ".", "fq", "read2", ".", "fq" ]
python
train
acutesoftware/virtual-AI-simulator
vais/battle.py
https://github.com/acutesoftware/virtual-AI-simulator/blob/57de679a5b1a58c38fefe6aea58af1f3a7e79c58/vais/battle.py#L129-L145
def show_message(self, c_attack, c_defend, result, dmg, print_console='Yes'): """ function to wrap the display of the battle messages """ perc_health_att = '[' + str(round((c_attack.stats['Health']*100) / c_attack.stats['max_health'] )) + '%]' perc_health_def = '[' + str(round((c_defend.stats['Health']*100) / c_defend.stats['max_health'] )) + '%]' if result == 'Miss': txt = c_attack.name + ' ' + perc_health_att.rjust(6) + ' miss ' + c_defend.name + ' ' + perc_health_def.rjust(6) elif result == 'Crit': txt = c_attack.name + ' ' + perc_health_att.rjust(6) + ' CRIT ' + c_defend.name + ' ' + perc_health_def.rjust(6) txt += ' for ' + str(dmg) else: txt = c_attack.name + ' ' + perc_health_att.rjust(6) + ' hits ' + c_defend.name + ' ' + perc_health_def.rjust(6) txt += ' for ' + str(dmg) if print_console == 'Yes': print(txt)
[ "def", "show_message", "(", "self", ",", "c_attack", ",", "c_defend", ",", "result", ",", "dmg", ",", "print_console", "=", "'Yes'", ")", ":", "perc_health_att", "=", "'['", "+", "str", "(", "round", "(", "(", "c_attack", ".", "stats", "[", "'Health'", "]", "*", "100", ")", "/", "c_attack", ".", "stats", "[", "'max_health'", "]", ")", ")", "+", "'%]'", "perc_health_def", "=", "'['", "+", "str", "(", "round", "(", "(", "c_defend", ".", "stats", "[", "'Health'", "]", "*", "100", ")", "/", "c_defend", ".", "stats", "[", "'max_health'", "]", ")", ")", "+", "'%]'", "if", "result", "==", "'Miss'", ":", "txt", "=", "c_attack", ".", "name", "+", "' '", "+", "perc_health_att", ".", "rjust", "(", "6", ")", "+", "' miss '", "+", "c_defend", ".", "name", "+", "' '", "+", "perc_health_def", ".", "rjust", "(", "6", ")", "elif", "result", "==", "'Crit'", ":", "txt", "=", "c_attack", ".", "name", "+", "' '", "+", "perc_health_att", ".", "rjust", "(", "6", ")", "+", "' CRIT '", "+", "c_defend", ".", "name", "+", "' '", "+", "perc_health_def", ".", "rjust", "(", "6", ")", "txt", "+=", "' for '", "+", "str", "(", "dmg", ")", "else", ":", "txt", "=", "c_attack", ".", "name", "+", "' '", "+", "perc_health_att", ".", "rjust", "(", "6", ")", "+", "' hits '", "+", "c_defend", ".", "name", "+", "' '", "+", "perc_health_def", ".", "rjust", "(", "6", ")", "txt", "+=", "' for '", "+", "str", "(", "dmg", ")", "if", "print_console", "==", "'Yes'", ":", "print", "(", "txt", ")" ]
function to wrap the display of the battle messages
[ "function", "to", "wrap", "the", "display", "of", "the", "battle", "messages" ]
python
train
blockcypher/blockcypher-python
blockcypher/api.py
https://github.com/blockcypher/blockcypher-python/blob/7601ea21916957ff279384fd699527ff9c28a56e/blockcypher/api.py#L1143-L1171
def get_wallet_addresses(wallet_name, api_key, is_hd_wallet=False, zero_balance=None, used=None, omit_addresses=False, coin_symbol='btc'): ''' Returns a list of wallet addresses as well as some meta-data ''' assert is_valid_coin_symbol(coin_symbol) assert api_key assert len(wallet_name) <= 25, wallet_name assert zero_balance in (None, True, False) assert used in (None, True, False) assert isinstance(omit_addresses, bool), omit_addresses params = {'token': api_key} kwargs = {'hd/' if is_hd_wallet else '': wallet_name} # hack! url = make_url(coin_symbol, 'wallets', **kwargs) if zero_balance is True: params['zerobalance'] = 'true' elif zero_balance is False: params['zerobalance'] = 'false' if used is True: params['used'] = 'true' elif used is False: params['used'] = 'false' if omit_addresses: params['omitWalletAddresses'] = 'true' r = requests.get(url, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS) return get_valid_json(r)
[ "def", "get_wallet_addresses", "(", "wallet_name", ",", "api_key", ",", "is_hd_wallet", "=", "False", ",", "zero_balance", "=", "None", ",", "used", "=", "None", ",", "omit_addresses", "=", "False", ",", "coin_symbol", "=", "'btc'", ")", ":", "assert", "is_valid_coin_symbol", "(", "coin_symbol", ")", "assert", "api_key", "assert", "len", "(", "wallet_name", ")", "<=", "25", ",", "wallet_name", "assert", "zero_balance", "in", "(", "None", ",", "True", ",", "False", ")", "assert", "used", "in", "(", "None", ",", "True", ",", "False", ")", "assert", "isinstance", "(", "omit_addresses", ",", "bool", ")", ",", "omit_addresses", "params", "=", "{", "'token'", ":", "api_key", "}", "kwargs", "=", "{", "'hd/'", "if", "is_hd_wallet", "else", "''", ":", "wallet_name", "}", "# hack!", "url", "=", "make_url", "(", "coin_symbol", ",", "'wallets'", ",", "*", "*", "kwargs", ")", "if", "zero_balance", "is", "True", ":", "params", "[", "'zerobalance'", "]", "=", "'true'", "elif", "zero_balance", "is", "False", ":", "params", "[", "'zerobalance'", "]", "=", "'false'", "if", "used", "is", "True", ":", "params", "[", "'used'", "]", "=", "'true'", "elif", "used", "is", "False", ":", "params", "[", "'used'", "]", "=", "'false'", "if", "omit_addresses", ":", "params", "[", "'omitWalletAddresses'", "]", "=", "'true'", "r", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "params", ",", "verify", "=", "True", ",", "timeout", "=", "TIMEOUT_IN_SECONDS", ")", "return", "get_valid_json", "(", "r", ")" ]
Returns a list of wallet addresses as well as some meta-data
[ "Returns", "a", "list", "of", "wallet", "addresses", "as", "well", "as", "some", "meta", "-", "data" ]
python
train
ynop/audiomate
audiomate/corpus/io/voxforge.py
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/corpus/io/voxforge.py#L188-L234
def parse_speaker_info(readme_path): """ Parse speaker info and return tuple (idx, gender). """ idx = None gender = issuers.Gender.UNKNOWN age_group = issuers.AgeGroup.UNKNOWN native_lang = None with open(readme_path, 'r', errors='ignore') as f: for raw_line in f: line = raw_line.strip() if line is not None and line is not '': line = line.rstrip(';.') parts = line.split(':', maxsplit=1) if len(parts) > 1: key = parts[0].strip().lower() value = parts[1].strip() if key == 'user name': idx = value value = value.lower() if key == 'gender': if value in ['männlich', 'male', 'mnnlich']: gender = issuers.Gender.MALE elif value in ['weiblich', 'female', '[female]']: gender = issuers.Gender.FEMALE if key == 'age range': if value in ['erwachsener', 'adult', '[adult]', '[erwachsener]']: age_group = issuers.AgeGroup.ADULT elif value in ['senior', '[senior']: age_group = issuers.AgeGroup.SENIOR elif value in ['youth', 'jugendlicher', '[youth]', '[jugendlicher]']: age_group = issuers.AgeGroup.YOUTH elif value in ['kind', 'child']: age_group = issuers.AgeGroup.CHILD if key == 'language': if value in ['de', 'ger', 'deu', '[de]']: native_lang = 'deu' elif value in ['en', 'eng', '[en]']: native_lang = 'eng' return issuers.Speaker(idx, gender=gender, age_group=age_group, native_language=native_lang)
[ "def", "parse_speaker_info", "(", "readme_path", ")", ":", "idx", "=", "None", "gender", "=", "issuers", ".", "Gender", ".", "UNKNOWN", "age_group", "=", "issuers", ".", "AgeGroup", ".", "UNKNOWN", "native_lang", "=", "None", "with", "open", "(", "readme_path", ",", "'r'", ",", "errors", "=", "'ignore'", ")", "as", "f", ":", "for", "raw_line", "in", "f", ":", "line", "=", "raw_line", ".", "strip", "(", ")", "if", "line", "is", "not", "None", "and", "line", "is", "not", "''", ":", "line", "=", "line", ".", "rstrip", "(", "';.'", ")", "parts", "=", "line", ".", "split", "(", "':'", ",", "maxsplit", "=", "1", ")", "if", "len", "(", "parts", ")", ">", "1", ":", "key", "=", "parts", "[", "0", "]", ".", "strip", "(", ")", ".", "lower", "(", ")", "value", "=", "parts", "[", "1", "]", ".", "strip", "(", ")", "if", "key", "==", "'user name'", ":", "idx", "=", "value", "value", "=", "value", ".", "lower", "(", ")", "if", "key", "==", "'gender'", ":", "if", "value", "in", "[", "'männlich',", " ", "male',", " ", "mnnlich']", ":", "", "gender", "=", "issuers", ".", "Gender", ".", "MALE", "elif", "value", "in", "[", "'weiblich'", ",", "'female'", ",", "'[female]'", "]", ":", "gender", "=", "issuers", ".", "Gender", ".", "FEMALE", "if", "key", "==", "'age range'", ":", "if", "value", "in", "[", "'erwachsener'", ",", "'adult'", ",", "'[adult]'", ",", "'[erwachsener]'", "]", ":", "age_group", "=", "issuers", ".", "AgeGroup", ".", "ADULT", "elif", "value", "in", "[", "'senior'", ",", "'[senior'", "]", ":", "age_group", "=", "issuers", ".", "AgeGroup", ".", "SENIOR", "elif", "value", "in", "[", "'youth'", ",", "'jugendlicher'", ",", "'[youth]'", ",", "'[jugendlicher]'", "]", ":", "age_group", "=", "issuers", ".", "AgeGroup", ".", "YOUTH", "elif", "value", "in", "[", "'kind'", ",", "'child'", "]", ":", "age_group", "=", "issuers", ".", "AgeGroup", ".", "CHILD", "if", "key", "==", "'language'", ":", "if", "value", "in", "[", "'de'", ",", "'ger'", ",", "'deu'", ",", "'[de]'", "]", ":", "native_lang", "=", "'deu'", "elif", "value", "in", "[", "'en'", ",", "'eng'", ",", "'[en]'", "]", ":", "native_lang", "=", "'eng'", "return", "issuers", ".", "Speaker", "(", "idx", ",", "gender", "=", "gender", ",", "age_group", "=", "age_group", ",", "native_language", "=", "native_lang", ")" ]
Parse speaker info and return tuple (idx, gender).
[ "Parse", "speaker", "info", "and", "return", "tuple", "(", "idx", "gender", ")", "." ]
python
train
digidotcom/python-devicecloud
devicecloud/monitor_tcp.py
https://github.com/digidotcom/python-devicecloud/blob/32529684a348a7830a269c32601604c78036bcb8/devicecloud/monitor_tcp.py#L320-L345
def _consume_queue(self): """ Continually blocks until data is on the internal queue, then calls the session's registered callback and sends a PublishMessageReceived if callback returned True. """ while True: session, block_id, raw_data = self._queue.get() data = json.loads(raw_data.decode('utf-8')) # decode as JSON try: result = session.callback(data) if result is None: self.log.warn("Callback %r returned None, expected boolean. Messages " "are not marked as received unless True is returned", session.callback) elif result: # Send a Successful PublishMessageReceived with the # block id sent in request if self._write_queue is not None: response_message = struct.pack('!HHH', PUBLISH_MESSAGE_RECEIVED, block_id, 200) self._write_queue.put((session.socket, response_message)) except Exception as exception: self.log.exception(exception) self._queue.task_done()
[ "def", "_consume_queue", "(", "self", ")", ":", "while", "True", ":", "session", ",", "block_id", ",", "raw_data", "=", "self", ".", "_queue", ".", "get", "(", ")", "data", "=", "json", ".", "loads", "(", "raw_data", ".", "decode", "(", "'utf-8'", ")", ")", "# decode as JSON", "try", ":", "result", "=", "session", ".", "callback", "(", "data", ")", "if", "result", "is", "None", ":", "self", ".", "log", ".", "warn", "(", "\"Callback %r returned None, expected boolean. Messages \"", "\"are not marked as received unless True is returned\"", ",", "session", ".", "callback", ")", "elif", "result", ":", "# Send a Successful PublishMessageReceived with the", "# block id sent in request", "if", "self", ".", "_write_queue", "is", "not", "None", ":", "response_message", "=", "struct", ".", "pack", "(", "'!HHH'", ",", "PUBLISH_MESSAGE_RECEIVED", ",", "block_id", ",", "200", ")", "self", ".", "_write_queue", ".", "put", "(", "(", "session", ".", "socket", ",", "response_message", ")", ")", "except", "Exception", "as", "exception", ":", "self", ".", "log", ".", "exception", "(", "exception", ")", "self", ".", "_queue", ".", "task_done", "(", ")" ]
Continually blocks until data is on the internal queue, then calls the session's registered callback and sends a PublishMessageReceived if callback returned True.
[ "Continually", "blocks", "until", "data", "is", "on", "the", "internal", "queue", "then", "calls", "the", "session", "s", "registered", "callback", "and", "sends", "a", "PublishMessageReceived", "if", "callback", "returned", "True", "." ]
python
train
kelproject/pykube
pykube/config.py
https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/config.py#L184-L188
def user(self): """ Returns the current user set by current context """ return self.users.get(self.contexts[self.current_context].get("user", ""), {})
[ "def", "user", "(", "self", ")", ":", "return", "self", ".", "users", ".", "get", "(", "self", ".", "contexts", "[", "self", ".", "current_context", "]", ".", "get", "(", "\"user\"", ",", "\"\"", ")", ",", "{", "}", ")" ]
Returns the current user set by current context
[ "Returns", "the", "current", "user", "set", "by", "current", "context" ]
python
train
lacava/few
few/variation.py
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/variation.py#L174-L176
def mutate(self,p_i,func_set,term_set): #, max_depth=2 """point mutation, addition, removal""" self.point_mutate(p_i,func_set,term_set)
[ "def", "mutate", "(", "self", ",", "p_i", ",", "func_set", ",", "term_set", ")", ":", "#, max_depth=2", "self", ".", "point_mutate", "(", "p_i", ",", "func_set", ",", "term_set", ")" ]
point mutation, addition, removal
[ "point", "mutation", "addition", "removal" ]
python
train
pivotal-energy-solutions/django-datatable-view
datatableview/helpers.py
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/helpers.py#L289-L399
def make_xeditable(instance=None, extra_attrs=[], *args, **kwargs): """ Converts the contents of the column into an ``<a>`` tag with the required DOM attributes to power the X-Editable UI. The following keyword arguments are all optional, but may be provided when pre-calling the helper, to customize the output of the helper once it is run per object record: * ``type`` - Defaults to the basic type of the HTML input ("text", "number", "datetime") * ``title`` - Defaults to an empty string, controls the HTML "title" attribute. * ``placeholder`` - Defaults to whatever "title" is, controls the HTML "placeholder" attribute. * ``url`` - Defaults to the ``request.path`` of the view, which will automatically serve the X-Editable interface as long as it inherits from ``XEditableDatatableView``. * ``source`` - Defaults to the ``request.path`` of the view, which will automatically serve X-Editable requests for ``choices`` data about a field. Supplying a list of names via ``extra_attrs`` will enable arbitrary other keyword arguments to be rendered in the HTML as attribute as well. ``extra_attrs`` serves as a whitelist of extra names so that unintended kwargs don't get rendered without your permission. """ if instance is None: # Preloading kwargs into the helper for deferred execution helper = partial(make_xeditable, extra_attrs=extra_attrs, *args, **kwargs) return helper # Immediate finalization, return the xeditable structure data = kwargs.get('default_value', instance) rich_data = kwargs.get('rich_value', data) # Compile values to appear as "data-*" attributes on the anchor tag default_attr_names = ['pk', 'type', 'url', 'source', 'title', 'placeholder'] valid_attr_names = set(default_attr_names + list(extra_attrs)) attrs = {} for k, v in kwargs.items(): if k in valid_attr_names: if k.startswith('data_'): k = k[5:] attrs['data-{0}'.format(k)] = v attrs['data-xeditable'] = "xeditable" # Assign default values where they are not provided field_name = kwargs['field_name'] # sent as a default kwarg to helpers if isinstance(field_name, (tuple, list)): # Legacy syntax field_name = field_name[1] if isinstance(field_name, (tuple, list)): raise ValueError("'make_xeditable' helper needs a single-field data column," " not {0!r}".format(field_name)) attrs['data-name'] = field_name if isinstance(rich_data, Model): attrs['data-value'] = rich_data.pk else: attrs['data-value'] = rich_data if 'data-pk' not in attrs: attrs['data-pk'] = instance.pk if 'data-url' not in attrs: # Look for a backup data-url provider_name = 'get_update_url' url_provider = getattr(kwargs.get('view'), provider_name, None) if not url_provider: url_provider = getattr(instance, provider_name, None) if not url_provider and 'view' in kwargs: url_provider = lambda field_name: kwargs['view'].request.path else: raise ValueError("'make_xeditable' cannot determine a value for 'url'.") if url_provider: attrs['data-url'] = url_provider(field_name=field_name) if 'data-placeholder' not in attrs: attrs['data-placeholder'] = attrs.get('data-title', "") if 'data-type' not in attrs: if hasattr(instance, '_meta'): # Try to fetch a reasonable type from the field's class if field_name == 'pk': # special field name not in Model._meta.fields field = instance._meta.pk else: field = resolve_orm_path(instance, field_name) if field.choices: field_type = 'select' else: field_type = XEDITABLE_FIELD_TYPES.get(field.get_internal_type(), 'text') else: field_type = 'text' attrs['data-type'] = field_type # type=select elements need to fetch their valid choice options from an AJAX endpoint. # Register the view for this lookup. if attrs['data-type'] in ('select', 'select2'): if 'data-source' not in attrs: if 'view' in kwargs: attrs['data-source'] = "{url}?{field_param}={fieldname}".format(**{ 'url': kwargs['view'].request.path, 'field_param': kwargs['view'].xeditable_fieldname_param, 'fieldname': field_name, }) if attrs['data-type'] == 'select2': attrs['data-source'] += '&select2=true' else: raise ValueError("'make_xeditable' cannot determine a value for 'source'.") # Choice fields will want to display their readable label instead of db data data = getattr(instance, 'get_{0}_display'.format(field_name), lambda: data)() data = u"""<a href="#"{attrs}>{data}</a>""".format(attrs=flatatt(attrs), data=data) return data
[ "def", "make_xeditable", "(", "instance", "=", "None", ",", "extra_attrs", "=", "[", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "instance", "is", "None", ":", "# Preloading kwargs into the helper for deferred execution", "helper", "=", "partial", "(", "make_xeditable", ",", "extra_attrs", "=", "extra_attrs", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "helper", "# Immediate finalization, return the xeditable structure", "data", "=", "kwargs", ".", "get", "(", "'default_value'", ",", "instance", ")", "rich_data", "=", "kwargs", ".", "get", "(", "'rich_value'", ",", "data", ")", "# Compile values to appear as \"data-*\" attributes on the anchor tag", "default_attr_names", "=", "[", "'pk'", ",", "'type'", ",", "'url'", ",", "'source'", ",", "'title'", ",", "'placeholder'", "]", "valid_attr_names", "=", "set", "(", "default_attr_names", "+", "list", "(", "extra_attrs", ")", ")", "attrs", "=", "{", "}", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "if", "k", "in", "valid_attr_names", ":", "if", "k", ".", "startswith", "(", "'data_'", ")", ":", "k", "=", "k", "[", "5", ":", "]", "attrs", "[", "'data-{0}'", ".", "format", "(", "k", ")", "]", "=", "v", "attrs", "[", "'data-xeditable'", "]", "=", "\"xeditable\"", "# Assign default values where they are not provided", "field_name", "=", "kwargs", "[", "'field_name'", "]", "# sent as a default kwarg to helpers", "if", "isinstance", "(", "field_name", ",", "(", "tuple", ",", "list", ")", ")", ":", "# Legacy syntax", "field_name", "=", "field_name", "[", "1", "]", "if", "isinstance", "(", "field_name", ",", "(", "tuple", ",", "list", ")", ")", ":", "raise", "ValueError", "(", "\"'make_xeditable' helper needs a single-field data column,\"", "\" not {0!r}\"", ".", "format", "(", "field_name", ")", ")", "attrs", "[", "'data-name'", "]", "=", "field_name", "if", "isinstance", "(", "rich_data", ",", "Model", ")", ":", "attrs", "[", "'data-value'", "]", "=", "rich_data", ".", "pk", "else", ":", "attrs", "[", "'data-value'", "]", "=", "rich_data", "if", "'data-pk'", "not", "in", "attrs", ":", "attrs", "[", "'data-pk'", "]", "=", "instance", ".", "pk", "if", "'data-url'", "not", "in", "attrs", ":", "# Look for a backup data-url", "provider_name", "=", "'get_update_url'", "url_provider", "=", "getattr", "(", "kwargs", ".", "get", "(", "'view'", ")", ",", "provider_name", ",", "None", ")", "if", "not", "url_provider", ":", "url_provider", "=", "getattr", "(", "instance", ",", "provider_name", ",", "None", ")", "if", "not", "url_provider", "and", "'view'", "in", "kwargs", ":", "url_provider", "=", "lambda", "field_name", ":", "kwargs", "[", "'view'", "]", ".", "request", ".", "path", "else", ":", "raise", "ValueError", "(", "\"'make_xeditable' cannot determine a value for 'url'.\"", ")", "if", "url_provider", ":", "attrs", "[", "'data-url'", "]", "=", "url_provider", "(", "field_name", "=", "field_name", ")", "if", "'data-placeholder'", "not", "in", "attrs", ":", "attrs", "[", "'data-placeholder'", "]", "=", "attrs", ".", "get", "(", "'data-title'", ",", "\"\"", ")", "if", "'data-type'", "not", "in", "attrs", ":", "if", "hasattr", "(", "instance", ",", "'_meta'", ")", ":", "# Try to fetch a reasonable type from the field's class", "if", "field_name", "==", "'pk'", ":", "# special field name not in Model._meta.fields", "field", "=", "instance", ".", "_meta", ".", "pk", "else", ":", "field", "=", "resolve_orm_path", "(", "instance", ",", "field_name", ")", "if", "field", ".", "choices", ":", "field_type", "=", "'select'", "else", ":", "field_type", "=", "XEDITABLE_FIELD_TYPES", ".", "get", "(", "field", ".", "get_internal_type", "(", ")", ",", "'text'", ")", "else", ":", "field_type", "=", "'text'", "attrs", "[", "'data-type'", "]", "=", "field_type", "# type=select elements need to fetch their valid choice options from an AJAX endpoint.", "# Register the view for this lookup.", "if", "attrs", "[", "'data-type'", "]", "in", "(", "'select'", ",", "'select2'", ")", ":", "if", "'data-source'", "not", "in", "attrs", ":", "if", "'view'", "in", "kwargs", ":", "attrs", "[", "'data-source'", "]", "=", "\"{url}?{field_param}={fieldname}\"", ".", "format", "(", "*", "*", "{", "'url'", ":", "kwargs", "[", "'view'", "]", ".", "request", ".", "path", ",", "'field_param'", ":", "kwargs", "[", "'view'", "]", ".", "xeditable_fieldname_param", ",", "'fieldname'", ":", "field_name", ",", "}", ")", "if", "attrs", "[", "'data-type'", "]", "==", "'select2'", ":", "attrs", "[", "'data-source'", "]", "+=", "'&select2=true'", "else", ":", "raise", "ValueError", "(", "\"'make_xeditable' cannot determine a value for 'source'.\"", ")", "# Choice fields will want to display their readable label instead of db data", "data", "=", "getattr", "(", "instance", ",", "'get_{0}_display'", ".", "format", "(", "field_name", ")", ",", "lambda", ":", "data", ")", "(", ")", "data", "=", "u\"\"\"<a href=\"#\"{attrs}>{data}</a>\"\"\"", ".", "format", "(", "attrs", "=", "flatatt", "(", "attrs", ")", ",", "data", "=", "data", ")", "return", "data" ]
Converts the contents of the column into an ``<a>`` tag with the required DOM attributes to power the X-Editable UI. The following keyword arguments are all optional, but may be provided when pre-calling the helper, to customize the output of the helper once it is run per object record: * ``type`` - Defaults to the basic type of the HTML input ("text", "number", "datetime") * ``title`` - Defaults to an empty string, controls the HTML "title" attribute. * ``placeholder`` - Defaults to whatever "title" is, controls the HTML "placeholder" attribute. * ``url`` - Defaults to the ``request.path`` of the view, which will automatically serve the X-Editable interface as long as it inherits from ``XEditableDatatableView``. * ``source`` - Defaults to the ``request.path`` of the view, which will automatically serve X-Editable requests for ``choices`` data about a field. Supplying a list of names via ``extra_attrs`` will enable arbitrary other keyword arguments to be rendered in the HTML as attribute as well. ``extra_attrs`` serves as a whitelist of extra names so that unintended kwargs don't get rendered without your permission.
[ "Converts", "the", "contents", "of", "the", "column", "into", "an", "<a", ">", "tag", "with", "the", "required", "DOM", "attributes", "to", "power", "the", "X", "-", "Editable", "UI", "." ]
python
train
xolox/python-vcs-repo-mgr
vcs_repo_mgr/backends/git.py
https://github.com/xolox/python-vcs-repo-mgr/blob/fdad2441a3e7ba5deeeddfa1c2f5ebc00c393aed/vcs_repo_mgr/backends/git.py#L55-L58
def get_vcs_directory(context, directory): """Get the pathname of the directory containing the version control metadata files.""" nested = os.path.join(directory, '.git') return nested if context.is_directory(nested) else directory
[ "def", "get_vcs_directory", "(", "context", ",", "directory", ")", ":", "nested", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "'.git'", ")", "return", "nested", "if", "context", ".", "is_directory", "(", "nested", ")", "else", "directory" ]
Get the pathname of the directory containing the version control metadata files.
[ "Get", "the", "pathname", "of", "the", "directory", "containing", "the", "version", "control", "metadata", "files", "." ]
python
train
Cologler/fsoopify-python
fsoopify/nodes.py
https://github.com/Cologler/fsoopify-python/blob/83d45f16ae9abdea4fcc829373c32df501487dda/fsoopify/nodes.py#L299-L303
def has_file(self, name: str): ''' check whether this directory contains the file. ''' return os.path.isfile(self._path / name)
[ "def", "has_file", "(", "self", ",", "name", ":", "str", ")", ":", "return", "os", ".", "path", ".", "isfile", "(", "self", ".", "_path", "/", "name", ")" ]
check whether this directory contains the file.
[ "check", "whether", "this", "directory", "contains", "the", "file", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_map/__init__.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_map/__init__.py#L79-L92
def show_position(self): '''show map position click information''' pos = self.click_position dms = (mp_util.degrees_to_dms(pos[0]), mp_util.degrees_to_dms(pos[1])) msg = "Coordinates in WGS84\n" msg += "Decimal: %.6f %.6f\n" % (pos[0], pos[1]) msg += "DMS: %s %s\n" % (dms[0], dms[1]) msg += "Grid: %s\n" % mp_util.latlon_to_grid(pos) if self.logdir: logf = open(os.path.join(self.logdir, "positions.txt"), "a") logf.write("Position: %.6f %.6f at %s\n" % (pos[0], pos[1], time.ctime())) logf.close() posbox = MPMenuChildMessageDialog('Position', msg, font_size=32) posbox.show()
[ "def", "show_position", "(", "self", ")", ":", "pos", "=", "self", ".", "click_position", "dms", "=", "(", "mp_util", ".", "degrees_to_dms", "(", "pos", "[", "0", "]", ")", ",", "mp_util", ".", "degrees_to_dms", "(", "pos", "[", "1", "]", ")", ")", "msg", "=", "\"Coordinates in WGS84\\n\"", "msg", "+=", "\"Decimal: %.6f %.6f\\n\"", "%", "(", "pos", "[", "0", "]", ",", "pos", "[", "1", "]", ")", "msg", "+=", "\"DMS: %s %s\\n\"", "%", "(", "dms", "[", "0", "]", ",", "dms", "[", "1", "]", ")", "msg", "+=", "\"Grid: %s\\n\"", "%", "mp_util", ".", "latlon_to_grid", "(", "pos", ")", "if", "self", ".", "logdir", ":", "logf", "=", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "logdir", ",", "\"positions.txt\"", ")", ",", "\"a\"", ")", "logf", ".", "write", "(", "\"Position: %.6f %.6f at %s\\n\"", "%", "(", "pos", "[", "0", "]", ",", "pos", "[", "1", "]", ",", "time", ".", "ctime", "(", ")", ")", ")", "logf", ".", "close", "(", ")", "posbox", "=", "MPMenuChildMessageDialog", "(", "'Position'", ",", "msg", ",", "font_size", "=", "32", ")", "posbox", ".", "show", "(", ")" ]
show map position click information
[ "show", "map", "position", "click", "information" ]
python
train
vxgmichel/aiostream
aiostream/stream/select.py
https://github.com/vxgmichel/aiostream/blob/43bdf04ab19108a3f1b5a472062e1392a26cbcf8/aiostream/stream/select.py#L82-L93
async def filterindex(source, func): """Filter an asynchronous sequence using the index of the elements. The given function is synchronous, takes the index as an argument, and returns ``True`` if the corresponding should be forwarded, ``False`` otherwise. """ source = transform.enumerate.raw(source) async with streamcontext(source) as streamer: async for i, item in streamer: if func(i): yield item
[ "async", "def", "filterindex", "(", "source", ",", "func", ")", ":", "source", "=", "transform", ".", "enumerate", ".", "raw", "(", "source", ")", "async", "with", "streamcontext", "(", "source", ")", "as", "streamer", ":", "async", "for", "i", ",", "item", "in", "streamer", ":", "if", "func", "(", "i", ")", ":", "yield", "item" ]
Filter an asynchronous sequence using the index of the elements. The given function is synchronous, takes the index as an argument, and returns ``True`` if the corresponding should be forwarded, ``False`` otherwise.
[ "Filter", "an", "asynchronous", "sequence", "using", "the", "index", "of", "the", "elements", "." ]
python
train
Rapptz/discord.py
discord/client.py
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/client.py#L740-L767
def event(self, coro): """A decorator that registers an event to listen to. You can find more info about the events on the :ref:`documentation below <discord-api-events>`. The events must be a |corourl|_, if not, :exc:`TypeError` is raised. Example --------- .. code-block:: python3 @client.event async def on_ready(): print('Ready!') Raises -------- TypeError The coroutine passed is not actually a coroutine. """ if not asyncio.iscoroutinefunction(coro): raise TypeError('event registered must be a coroutine function') setattr(self, coro.__name__, coro) log.debug('%s has successfully been registered as an event', coro.__name__) return coro
[ "def", "event", "(", "self", ",", "coro", ")", ":", "if", "not", "asyncio", ".", "iscoroutinefunction", "(", "coro", ")", ":", "raise", "TypeError", "(", "'event registered must be a coroutine function'", ")", "setattr", "(", "self", ",", "coro", ".", "__name__", ",", "coro", ")", "log", ".", "debug", "(", "'%s has successfully been registered as an event'", ",", "coro", ".", "__name__", ")", "return", "coro" ]
A decorator that registers an event to listen to. You can find more info about the events on the :ref:`documentation below <discord-api-events>`. The events must be a |corourl|_, if not, :exc:`TypeError` is raised. Example --------- .. code-block:: python3 @client.event async def on_ready(): print('Ready!') Raises -------- TypeError The coroutine passed is not actually a coroutine.
[ "A", "decorator", "that", "registers", "an", "event", "to", "listen", "to", "." ]
python
train
sundarnagarajan/cffi_utils
cffi_utils/py2to3.py
https://github.com/sundarnagarajan/cffi_utils/blob/1d5ab2d2fcb962372228033106bc23f1d73d31fa/cffi_utils/py2to3.py#L306-L336
def get_rand_int(encoding='latin1', avoid=[]): ''' encoding-->str: one of ENCODINGS avoid-->list of int: to void (unprintable chars etc) Returns-->int that can be converted to requested encoding which is NOT in avoid ''' UNICODE_LIMIT = 0x10ffff # See: https://en.wikipedia.org/wiki/UTF-8#Invalid_code_points SURROGATE_RANGE = (0xD800, 0xDFFF) if encoding not in ENCODINGS: raise ValueError('Unsupported encoding: ' + str(encoding)) if encoding == 'ascii': maxord = 2 ** 7 elif encoding == 'latin1': maxord = 2 ** 8 elif encoding == 'utf16': maxord = 2 ** 16 elif encoding == 'utf8': maxord = 2 ** 32 elif encoding == 'utf32': maxord = 2 ** 32 rndint = random.randrange(0, min(maxord, UNICODE_LIMIT)) while ( (rndint in avoid) or (SURROGATE_RANGE[0] <= rndint <= SURROGATE_RANGE[1]) ): rndint = random.randrange(0, min(maxord, UNICODE_LIMIT)) return rndint
[ "def", "get_rand_int", "(", "encoding", "=", "'latin1'", ",", "avoid", "=", "[", "]", ")", ":", "UNICODE_LIMIT", "=", "0x10ffff", "# See: https://en.wikipedia.org/wiki/UTF-8#Invalid_code_points", "SURROGATE_RANGE", "=", "(", "0xD800", ",", "0xDFFF", ")", "if", "encoding", "not", "in", "ENCODINGS", ":", "raise", "ValueError", "(", "'Unsupported encoding: '", "+", "str", "(", "encoding", ")", ")", "if", "encoding", "==", "'ascii'", ":", "maxord", "=", "2", "**", "7", "elif", "encoding", "==", "'latin1'", ":", "maxord", "=", "2", "**", "8", "elif", "encoding", "==", "'utf16'", ":", "maxord", "=", "2", "**", "16", "elif", "encoding", "==", "'utf8'", ":", "maxord", "=", "2", "**", "32", "elif", "encoding", "==", "'utf32'", ":", "maxord", "=", "2", "**", "32", "rndint", "=", "random", ".", "randrange", "(", "0", ",", "min", "(", "maxord", ",", "UNICODE_LIMIT", ")", ")", "while", "(", "(", "rndint", "in", "avoid", ")", "or", "(", "SURROGATE_RANGE", "[", "0", "]", "<=", "rndint", "<=", "SURROGATE_RANGE", "[", "1", "]", ")", ")", ":", "rndint", "=", "random", ".", "randrange", "(", "0", ",", "min", "(", "maxord", ",", "UNICODE_LIMIT", ")", ")", "return", "rndint" ]
encoding-->str: one of ENCODINGS avoid-->list of int: to void (unprintable chars etc) Returns-->int that can be converted to requested encoding which is NOT in avoid
[ "encoding", "--", ">", "str", ":", "one", "of", "ENCODINGS", "avoid", "--", ">", "list", "of", "int", ":", "to", "void", "(", "unprintable", "chars", "etc", ")", "Returns", "--", ">", "int", "that", "can", "be", "converted", "to", "requested", "encoding", "which", "is", "NOT", "in", "avoid" ]
python
test
cmutel/constructive_geometries
constructive_geometries/cg.py
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/cg.py#L22-L27
def has_gis(wrapped, instance, args, kwargs): """Skip function execution if there are no presamples""" if gis: return wrapped(*args, **kwargs) else: warn(MISSING_GIS)
[ "def", "has_gis", "(", "wrapped", ",", "instance", ",", "args", ",", "kwargs", ")", ":", "if", "gis", ":", "return", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "warn", "(", "MISSING_GIS", ")" ]
Skip function execution if there are no presamples
[ "Skip", "function", "execution", "if", "there", "are", "no", "presamples" ]
python
train
lemieuxl/pyGenClean
pyGenClean/SexCheck/sex_check.py
https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/SexCheck/sex_check.py#L127-L154
def createGenderPlot(bfile, intensities, problematic_samples, format, out_prefix): """Creates the gender plot. :param bfile: the prefix of the input binary file. :param intensities: the file containing the intensities. :param problematic_samples: the file containing the problematic samples. :param format: the format of the output plot. :param out_prefix: the prefix of the output file. :type bfile: str :type intensities: str :type problematic_samples: str :type format: str :type out_prefix: str Creates the gender plot of the samples using the :py:mod:`pyGenClean.SexCheck.gender_plot` module. """ gender_plot_options = ["--bfile", bfile, "--intensities", intensities, "--sex-problems", problematic_samples, "--format", format, "--out", out_prefix] try: gender_plot.main(gender_plot_options) except gender_plot.ProgramError as e: msg = "gender plot: {}".format(e) raise ProgramError(msg)
[ "def", "createGenderPlot", "(", "bfile", ",", "intensities", ",", "problematic_samples", ",", "format", ",", "out_prefix", ")", ":", "gender_plot_options", "=", "[", "\"--bfile\"", ",", "bfile", ",", "\"--intensities\"", ",", "intensities", ",", "\"--sex-problems\"", ",", "problematic_samples", ",", "\"--format\"", ",", "format", ",", "\"--out\"", ",", "out_prefix", "]", "try", ":", "gender_plot", ".", "main", "(", "gender_plot_options", ")", "except", "gender_plot", ".", "ProgramError", "as", "e", ":", "msg", "=", "\"gender plot: {}\"", ".", "format", "(", "e", ")", "raise", "ProgramError", "(", "msg", ")" ]
Creates the gender plot. :param bfile: the prefix of the input binary file. :param intensities: the file containing the intensities. :param problematic_samples: the file containing the problematic samples. :param format: the format of the output plot. :param out_prefix: the prefix of the output file. :type bfile: str :type intensities: str :type problematic_samples: str :type format: str :type out_prefix: str Creates the gender plot of the samples using the :py:mod:`pyGenClean.SexCheck.gender_plot` module.
[ "Creates", "the", "gender", "plot", "." ]
python
train
Neurita/boyle
boyle/dicom/sets.py
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/dicom/sets.py#L151-L171
def get_dcm_reader(store_metadata=True, header_fields=None): """ Creates a lambda function to read DICOM files. If store_store_metadata is False, will only return the file path. Else if you give header_fields, will return only the set of of header_fields within a DicomFile object or the whole DICOM file if None. :return: function This function has only one parameter: file_path """ if not store_metadata: return lambda fpath: fpath if header_fields is None: build_dcm = lambda fpath: DicomFile(fpath) else: dicom_header = namedtuple('DicomHeader', header_fields) build_dcm = lambda fpath: dicom_header._make(DicomFile(fpath).get_attributes(header_fields)) return build_dcm
[ "def", "get_dcm_reader", "(", "store_metadata", "=", "True", ",", "header_fields", "=", "None", ")", ":", "if", "not", "store_metadata", ":", "return", "lambda", "fpath", ":", "fpath", "if", "header_fields", "is", "None", ":", "build_dcm", "=", "lambda", "fpath", ":", "DicomFile", "(", "fpath", ")", "else", ":", "dicom_header", "=", "namedtuple", "(", "'DicomHeader'", ",", "header_fields", ")", "build_dcm", "=", "lambda", "fpath", ":", "dicom_header", ".", "_make", "(", "DicomFile", "(", "fpath", ")", ".", "get_attributes", "(", "header_fields", ")", ")", "return", "build_dcm" ]
Creates a lambda function to read DICOM files. If store_store_metadata is False, will only return the file path. Else if you give header_fields, will return only the set of of header_fields within a DicomFile object or the whole DICOM file if None. :return: function This function has only one parameter: file_path
[ "Creates", "a", "lambda", "function", "to", "read", "DICOM", "files", ".", "If", "store_store_metadata", "is", "False", "will", "only", "return", "the", "file", "path", ".", "Else", "if", "you", "give", "header_fields", "will", "return", "only", "the", "set", "of", "of", "header_fields", "within", "a", "DicomFile", "object", "or", "the", "whole", "DICOM", "file", "if", "None", "." ]
python
valid
robhowley/nhlscrapi
nhlscrapi/games/playbyplay.py
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/games/playbyplay.py#L84-L102
def compute_stats(self): """ Compute the stats defined in ``self.cum_stats``. :returns: collection of all computed :py:class:`.AccumulateStats` :rtype: dict """ if not self.__have_stats: if self.init_cs_teams and self.cum_stats: self.__init_cs_teams() for play in self._rep_reader.parse_plays_stream(): p = Play(**play) self.__wrapped_plays.append(p) if self.cum_stats: self.__process(p, self.cum_stats, 'update') self.__have_stats = True return self.cum_stats
[ "def", "compute_stats", "(", "self", ")", ":", "if", "not", "self", ".", "__have_stats", ":", "if", "self", ".", "init_cs_teams", "and", "self", ".", "cum_stats", ":", "self", ".", "__init_cs_teams", "(", ")", "for", "play", "in", "self", ".", "_rep_reader", ".", "parse_plays_stream", "(", ")", ":", "p", "=", "Play", "(", "*", "*", "play", ")", "self", ".", "__wrapped_plays", ".", "append", "(", "p", ")", "if", "self", ".", "cum_stats", ":", "self", ".", "__process", "(", "p", ",", "self", ".", "cum_stats", ",", "'update'", ")", "self", ".", "__have_stats", "=", "True", "return", "self", ".", "cum_stats" ]
Compute the stats defined in ``self.cum_stats``. :returns: collection of all computed :py:class:`.AccumulateStats` :rtype: dict
[ "Compute", "the", "stats", "defined", "in", "self", ".", "cum_stats", ".", ":", "returns", ":", "collection", "of", "all", "computed", ":", "py", ":", "class", ":", ".", "AccumulateStats", ":", "rtype", ":", "dict" ]
python
train
Kortemme-Lab/klab
klab/bio/uniprot.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/uniprot.py#L52-L122
def uniprot_map(from_scheme, to_scheme, list_of_from_ids, cache_dir = None, silent = True): '''Maps from one ID scheme to another using the UniProt service. list_of_ids should be a list of strings. This function was adapted from http://www.uniprot.org/faq/28#id_mapping_examples which also gives examples of valid values for from_scheme and to_scheme. Note that some conversions are not directly possible e.g. PDB_ID (PDB) to UPARC (UniParc). They need to go through an intermediary format like ACC (UniProtKB AC) or ID (UniProtKB ID). This function returns a dict mapping the IDs in from_scheme to a list of sorted IDs in to_scheme. ''' try: assert(hasattr(list_of_from_ids, '__iter__')) except: raise Exception('The list_of_from_ids argument should be an iterable type (e.g. list).') full_mapping = {} cached_mapping_file = None if cache_dir: cached_mapping_file = os.path.join(cache_dir, '%s.%s' % (from_scheme, to_scheme)) if os.path.exists(cached_mapping_file): full_mapping = simplejson.loads(read_file(cached_mapping_file)) list_of_from_ids = set(list_of_from_ids) requested_mapping = {} remaining_ids = [] for id in list_of_from_ids: if full_mapping.get(id): requested_mapping[id] = full_mapping[id] else: remaining_ids.append(id) assert(set(remaining_ids + requested_mapping.keys()) == set(list_of_from_ids)) if remaining_ids: if not silent: print("Getting %s->%s mapping" % (from_scheme, to_scheme)) url = 'http://www.uniprot.org/mapping/' params = { 'from' : from_scheme, 'to' : to_scheme, 'format' : 'tab', 'query' : ' '.join(sorted(list(list_of_from_ids))), } data = urllib.urlencode(params) request = urllib2.Request(url, data) contact = "" # Please set your email address here to help us debug in case of problems. request.add_header('User-Agent', 'Python %s' % contact) response = urllib2.urlopen(request) page = response.read(200000) lines = page.split("\n") assert(lines[-1] == '') assert(lines[0].split("\t") == ['From', 'To']) for line in lines[1:-1]: tokens = line.split("\t") assert(len(tokens) == 2) assert(tokens[0] in list_of_from_ids) full_mapping[tokens[0]] = full_mapping.get(tokens[0], []) full_mapping[tokens[0]].append(tokens[1]) requested_mapping[tokens[0]] = requested_mapping.get(tokens[0], []) requested_mapping[tokens[0]].append(tokens[1]) # Sort the IDs for k, v in requested_mapping.iteritems(): #assert(len(v) == len(set(v))) requested_mapping[k] = sorted(set(v)) for k, v in full_mapping.iteritems(): #assert(len(v) == len(set(v))) full_mapping[k] = sorted(set(v)) if remaining_ids and cached_mapping_file: write_file(cached_mapping_file, simplejson.dumps(full_mapping)) return requested_mapping
[ "def", "uniprot_map", "(", "from_scheme", ",", "to_scheme", ",", "list_of_from_ids", ",", "cache_dir", "=", "None", ",", "silent", "=", "True", ")", ":", "try", ":", "assert", "(", "hasattr", "(", "list_of_from_ids", ",", "'__iter__'", ")", ")", "except", ":", "raise", "Exception", "(", "'The list_of_from_ids argument should be an iterable type (e.g. list).'", ")", "full_mapping", "=", "{", "}", "cached_mapping_file", "=", "None", "if", "cache_dir", ":", "cached_mapping_file", "=", "os", ".", "path", ".", "join", "(", "cache_dir", ",", "'%s.%s'", "%", "(", "from_scheme", ",", "to_scheme", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "cached_mapping_file", ")", ":", "full_mapping", "=", "simplejson", ".", "loads", "(", "read_file", "(", "cached_mapping_file", ")", ")", "list_of_from_ids", "=", "set", "(", "list_of_from_ids", ")", "requested_mapping", "=", "{", "}", "remaining_ids", "=", "[", "]", "for", "id", "in", "list_of_from_ids", ":", "if", "full_mapping", ".", "get", "(", "id", ")", ":", "requested_mapping", "[", "id", "]", "=", "full_mapping", "[", "id", "]", "else", ":", "remaining_ids", ".", "append", "(", "id", ")", "assert", "(", "set", "(", "remaining_ids", "+", "requested_mapping", ".", "keys", "(", ")", ")", "==", "set", "(", "list_of_from_ids", ")", ")", "if", "remaining_ids", ":", "if", "not", "silent", ":", "print", "(", "\"Getting %s->%s mapping\"", "%", "(", "from_scheme", ",", "to_scheme", ")", ")", "url", "=", "'http://www.uniprot.org/mapping/'", "params", "=", "{", "'from'", ":", "from_scheme", ",", "'to'", ":", "to_scheme", ",", "'format'", ":", "'tab'", ",", "'query'", ":", "' '", ".", "join", "(", "sorted", "(", "list", "(", "list_of_from_ids", ")", ")", ")", ",", "}", "data", "=", "urllib", ".", "urlencode", "(", "params", ")", "request", "=", "urllib2", ".", "Request", "(", "url", ",", "data", ")", "contact", "=", "\"\"", "# Please set your email address here to help us debug in case of problems.", "request", ".", "add_header", "(", "'User-Agent'", ",", "'Python %s'", "%", "contact", ")", "response", "=", "urllib2", ".", "urlopen", "(", "request", ")", "page", "=", "response", ".", "read", "(", "200000", ")", "lines", "=", "page", ".", "split", "(", "\"\\n\"", ")", "assert", "(", "lines", "[", "-", "1", "]", "==", "''", ")", "assert", "(", "lines", "[", "0", "]", ".", "split", "(", "\"\\t\"", ")", "==", "[", "'From'", ",", "'To'", "]", ")", "for", "line", "in", "lines", "[", "1", ":", "-", "1", "]", ":", "tokens", "=", "line", ".", "split", "(", "\"\\t\"", ")", "assert", "(", "len", "(", "tokens", ")", "==", "2", ")", "assert", "(", "tokens", "[", "0", "]", "in", "list_of_from_ids", ")", "full_mapping", "[", "tokens", "[", "0", "]", "]", "=", "full_mapping", ".", "get", "(", "tokens", "[", "0", "]", ",", "[", "]", ")", "full_mapping", "[", "tokens", "[", "0", "]", "]", ".", "append", "(", "tokens", "[", "1", "]", ")", "requested_mapping", "[", "tokens", "[", "0", "]", "]", "=", "requested_mapping", ".", "get", "(", "tokens", "[", "0", "]", ",", "[", "]", ")", "requested_mapping", "[", "tokens", "[", "0", "]", "]", ".", "append", "(", "tokens", "[", "1", "]", ")", "# Sort the IDs", "for", "k", ",", "v", "in", "requested_mapping", ".", "iteritems", "(", ")", ":", "#assert(len(v) == len(set(v)))", "requested_mapping", "[", "k", "]", "=", "sorted", "(", "set", "(", "v", ")", ")", "for", "k", ",", "v", "in", "full_mapping", ".", "iteritems", "(", ")", ":", "#assert(len(v) == len(set(v)))", "full_mapping", "[", "k", "]", "=", "sorted", "(", "set", "(", "v", ")", ")", "if", "remaining_ids", "and", "cached_mapping_file", ":", "write_file", "(", "cached_mapping_file", ",", "simplejson", ".", "dumps", "(", "full_mapping", ")", ")", "return", "requested_mapping" ]
Maps from one ID scheme to another using the UniProt service. list_of_ids should be a list of strings. This function was adapted from http://www.uniprot.org/faq/28#id_mapping_examples which also gives examples of valid values for from_scheme and to_scheme. Note that some conversions are not directly possible e.g. PDB_ID (PDB) to UPARC (UniParc). They need to go through an intermediary format like ACC (UniProtKB AC) or ID (UniProtKB ID). This function returns a dict mapping the IDs in from_scheme to a list of sorted IDs in to_scheme.
[ "Maps", "from", "one", "ID", "scheme", "to", "another", "using", "the", "UniProt", "service", ".", "list_of_ids", "should", "be", "a", "list", "of", "strings", ".", "This", "function", "was", "adapted", "from", "http", ":", "//", "www", ".", "uniprot", ".", "org", "/", "faq", "/", "28#id_mapping_examples", "which", "also", "gives", "examples", "of", "valid", "values", "for", "from_scheme", "and", "to_scheme", ".", "Note", "that", "some", "conversions", "are", "not", "directly", "possible", "e", ".", "g", ".", "PDB_ID", "(", "PDB", ")", "to", "UPARC", "(", "UniParc", ")", ".", "They", "need", "to", "go", "through", "an", "intermediary", "format", "like", "ACC", "(", "UniProtKB", "AC", ")", "or", "ID", "(", "UniProtKB", "ID", ")", ".", "This", "function", "returns", "a", "dict", "mapping", "the", "IDs", "in", "from_scheme", "to", "a", "list", "of", "sorted", "IDs", "in", "to_scheme", "." ]
python
train