repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
Calysto/calysto
calysto/ai/conx.py
https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L2071-L2124
def propagateFrom(self, startLayer, **args): """ Propagates activation through the network. Optionally, takes input layer names as keywords, and their associated activations. If input layer(s) are given, then propagate() will return the output layer's activation. If there is more than one output layer, then a dictionary is returned. Examples: >>> net = Network() # doctest: +ELLIPSIS Conx using seed: ... >>> net.addLayers(2, 5, 1) >>> len(net.propagate(input = [1, .5])) 1 """ for layerName in args: self[layerName].copyActivations(args[layerName]) # initialize netinput: started = 0 for layer in self.layers: if layer.name == startLayer: started = 1 continue # don't set this one if not started: continue if layer.type != 'Input' and layer.active: layer.netinput = (layer.weight).copy() # for each connection, in order: started = 0 for layer in self.layers: if layer.name == startLayer: started = 1 continue # don't get inputs into this one if not started: continue if layer.active: for connection in self.connections: if connection.active and connection.toLayer.name == layer.name and connection.fromLayer.active: connection.toLayer.netinput = connection.toLayer.netinput + \ Numeric.matrixmultiply(connection.fromLayer.activation,\ connection.weight) # propagate! if layer.type != 'Input': layer.activation = self.activationFunction(layer.netinput) for layer in self.layers: if layer.log and layer.active: layer.writeLog(self) self.count += 1 # counts number of times propagate() is called if len(args) != 0: dict = {} for layer in self.layers: if layer.type == "Output": dict[layer.name] = layer.activation.copy() if len(dict) == 1: return dict[list(dict.keys())[0]] else: return dict
[ "def", "propagateFrom", "(", "self", ",", "startLayer", ",", "*", "*", "args", ")", ":", "for", "layerName", "in", "args", ":", "self", "[", "layerName", "]", ".", "copyActivations", "(", "args", "[", "layerName", "]", ")", "# initialize netinput:", "started", "=", "0", "for", "layer", "in", "self", ".", "layers", ":", "if", "layer", ".", "name", "==", "startLayer", ":", "started", "=", "1", "continue", "# don't set this one", "if", "not", "started", ":", "continue", "if", "layer", ".", "type", "!=", "'Input'", "and", "layer", ".", "active", ":", "layer", ".", "netinput", "=", "(", "layer", ".", "weight", ")", ".", "copy", "(", ")", "# for each connection, in order:", "started", "=", "0", "for", "layer", "in", "self", ".", "layers", ":", "if", "layer", ".", "name", "==", "startLayer", ":", "started", "=", "1", "continue", "# don't get inputs into this one", "if", "not", "started", ":", "continue", "if", "layer", ".", "active", ":", "for", "connection", "in", "self", ".", "connections", ":", "if", "connection", ".", "active", "and", "connection", ".", "toLayer", ".", "name", "==", "layer", ".", "name", "and", "connection", ".", "fromLayer", ".", "active", ":", "connection", ".", "toLayer", ".", "netinput", "=", "connection", ".", "toLayer", ".", "netinput", "+", "Numeric", ".", "matrixmultiply", "(", "connection", ".", "fromLayer", ".", "activation", ",", "connection", ".", "weight", ")", "# propagate!", "if", "layer", ".", "type", "!=", "'Input'", ":", "layer", ".", "activation", "=", "self", ".", "activationFunction", "(", "layer", ".", "netinput", ")", "for", "layer", "in", "self", ".", "layers", ":", "if", "layer", ".", "log", "and", "layer", ".", "active", ":", "layer", ".", "writeLog", "(", "self", ")", "self", ".", "count", "+=", "1", "# counts number of times propagate() is called", "if", "len", "(", "args", ")", "!=", "0", ":", "dict", "=", "{", "}", "for", "layer", "in", "self", ".", "layers", ":", "if", "layer", ".", "type", "==", "\"Output\"", ":", "dict", "[", "layer", ".", "name", "]", "=", "layer", ".", "activation", ".", "copy", "(", ")", "if", "len", "(", "dict", ")", "==", "1", ":", "return", "dict", "[", "list", "(", "dict", ".", "keys", "(", ")", ")", "[", "0", "]", "]", "else", ":", "return", "dict" ]
Propagates activation through the network. Optionally, takes input layer names as keywords, and their associated activations. If input layer(s) are given, then propagate() will return the output layer's activation. If there is more than one output layer, then a dictionary is returned. Examples: >>> net = Network() # doctest: +ELLIPSIS Conx using seed: ... >>> net.addLayers(2, 5, 1) >>> len(net.propagate(input = [1, .5])) 1
[ "Propagates", "activation", "through", "the", "network", ".", "Optionally", "takes", "input", "layer", "names", "as", "keywords", "and", "their", "associated", "activations", ".", "If", "input", "layer", "(", "s", ")", "are", "given", "then", "propagate", "()", "will", "return", "the", "output", "layer", "s", "activation", ".", "If", "there", "is", "more", "than", "one", "output", "layer", "then", "a", "dictionary", "is", "returned", "." ]
python
train
tylucaskelley/licenser
licenser/licenser.py
https://github.com/tylucaskelley/licenser/blob/6b7394fdaab7707c4c33201c4d023097452b46bc/licenser/licenser.py#L230-L250
def generate_license(args): ''' Creates a LICENSE or LICENSE.txt file in the current directory. Reads from the 'assets' folder and looks for placeholders enclosed in curly braces. Arguments: - (tuple) Name, email, license, project, ext, year ''' with open(cwd + licenses_loc + args[2]) as f: license = f.read() license = license.format(name=args[0], email=args[1], license=args[2], project=args[3], year=args[5]) with open('LICENSE' + args[4], 'w') as f: f.write(license) print('licenser: license file added to current directory')
[ "def", "generate_license", "(", "args", ")", ":", "with", "open", "(", "cwd", "+", "licenses_loc", "+", "args", "[", "2", "]", ")", "as", "f", ":", "license", "=", "f", ".", "read", "(", ")", "license", "=", "license", ".", "format", "(", "name", "=", "args", "[", "0", "]", ",", "email", "=", "args", "[", "1", "]", ",", "license", "=", "args", "[", "2", "]", ",", "project", "=", "args", "[", "3", "]", ",", "year", "=", "args", "[", "5", "]", ")", "with", "open", "(", "'LICENSE'", "+", "args", "[", "4", "]", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "license", ")", "print", "(", "'licenser: license file added to current directory'", ")" ]
Creates a LICENSE or LICENSE.txt file in the current directory. Reads from the 'assets' folder and looks for placeholders enclosed in curly braces. Arguments: - (tuple) Name, email, license, project, ext, year
[ "Creates", "a", "LICENSE", "or", "LICENSE", ".", "txt", "file", "in", "the", "current", "directory", ".", "Reads", "from", "the", "assets", "folder", "and", "looks", "for", "placeholders", "enclosed", "in", "curly", "braces", "." ]
python
train
shoebot/shoebot
shoebot/gui/gtk_window.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/shoebot/gui/gtk_window.py#L171-L186
def schedule_snapshot(self, format): """ Tell the canvas to perform a snapshot when it's finished rendering :param format: :return: """ bot = self.bot canvas = self.bot.canvas script = bot._namespace['__file__'] if script: filename = os.path.splitext(script)[0] + '.' + format else: filename = 'output.' + format f = canvas.output_closure(filename, self.bot._frame) self.scheduled_snapshots.append(f)
[ "def", "schedule_snapshot", "(", "self", ",", "format", ")", ":", "bot", "=", "self", ".", "bot", "canvas", "=", "self", ".", "bot", ".", "canvas", "script", "=", "bot", ".", "_namespace", "[", "'__file__'", "]", "if", "script", ":", "filename", "=", "os", ".", "path", ".", "splitext", "(", "script", ")", "[", "0", "]", "+", "'.'", "+", "format", "else", ":", "filename", "=", "'output.'", "+", "format", "f", "=", "canvas", ".", "output_closure", "(", "filename", ",", "self", ".", "bot", ".", "_frame", ")", "self", ".", "scheduled_snapshots", ".", "append", "(", "f", ")" ]
Tell the canvas to perform a snapshot when it's finished rendering :param format: :return:
[ "Tell", "the", "canvas", "to", "perform", "a", "snapshot", "when", "it", "s", "finished", "rendering", ":", "param", "format", ":", ":", "return", ":" ]
python
valid
lambdalisue/django-permission
src/permission/handlers.py
https://github.com/lambdalisue/django-permission/blob/580f7a1f857701d06ccf41163f188ac04fbc4fac/src/permission/handlers.py#L88-L113
def get_supported_permissions(self): """ Get permissions which this handler can treat. Specified with :attr:`includes` and :attr:`excludes` of this instance. Returns ------- set A set instance of `app_label.codename` formatted permission strings """ if not hasattr(self, '_perms_cache'): if (self.includes and isinstance(self.includes, collections.Callable)): includes = self.includes(self) else: includes = self.includes or [] if (self.excludes and isinstance(self.excludes, collections.Callable)): excludes = self.excludes(self) else: excludes = self.excludes or [] includes = set(includes) excludes = set(excludes) includes = includes.difference(excludes) self._perms_cache = includes return self._perms_cache
[ "def", "get_supported_permissions", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_perms_cache'", ")", ":", "if", "(", "self", ".", "includes", "and", "isinstance", "(", "self", ".", "includes", ",", "collections", ".", "Callable", ")", ")", ":", "includes", "=", "self", ".", "includes", "(", "self", ")", "else", ":", "includes", "=", "self", ".", "includes", "or", "[", "]", "if", "(", "self", ".", "excludes", "and", "isinstance", "(", "self", ".", "excludes", ",", "collections", ".", "Callable", ")", ")", ":", "excludes", "=", "self", ".", "excludes", "(", "self", ")", "else", ":", "excludes", "=", "self", ".", "excludes", "or", "[", "]", "includes", "=", "set", "(", "includes", ")", "excludes", "=", "set", "(", "excludes", ")", "includes", "=", "includes", ".", "difference", "(", "excludes", ")", "self", ".", "_perms_cache", "=", "includes", "return", "self", ".", "_perms_cache" ]
Get permissions which this handler can treat. Specified with :attr:`includes` and :attr:`excludes` of this instance. Returns ------- set A set instance of `app_label.codename` formatted permission strings
[ "Get", "permissions", "which", "this", "handler", "can", "treat", ".", "Specified", "with", ":", "attr", ":", "includes", "and", ":", "attr", ":", "excludes", "of", "this", "instance", "." ]
python
train
senaite/senaite.core
bika/lims/browser/worksheet/views/printview.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/worksheet/views/printview.py#L207-L233
def _lab_data(self): """ Returns a dictionary that represents the lab object Keys: obj, title, url, address, confidence, accredited, accreditation_body, accreditation_logo, logo """ portal = self.context.portal_url.getPortalObject() lab = self.context.bika_setup.laboratory lab_address = lab.getPostalAddress() \ or lab.getBillingAddress() \ or lab.getPhysicalAddress() if lab_address: _keys = ['address', 'city', 'state', 'zip', 'country'] _list = ["<div>%s</div>" % lab_address.get(v) for v in _keys if lab_address.get(v)] lab_address = "".join(_list) else: lab_address = '' return {'obj': lab, 'title': to_utf8(lab.Title()), 'url': to_utf8(lab.getLabURL()), 'address': to_utf8(lab_address), 'confidence': lab.getConfidence(), 'accredited': lab.getLaboratoryAccredited(), 'accreditation_body': to_utf8(lab.getAccreditationBody()), 'accreditation_logo': lab.getAccreditationBodyLogo(), 'logo': "%s/logo_print.png" % portal.absolute_url()}
[ "def", "_lab_data", "(", "self", ")", ":", "portal", "=", "self", ".", "context", ".", "portal_url", ".", "getPortalObject", "(", ")", "lab", "=", "self", ".", "context", ".", "bika_setup", ".", "laboratory", "lab_address", "=", "lab", ".", "getPostalAddress", "(", ")", "or", "lab", ".", "getBillingAddress", "(", ")", "or", "lab", ".", "getPhysicalAddress", "(", ")", "if", "lab_address", ":", "_keys", "=", "[", "'address'", ",", "'city'", ",", "'state'", ",", "'zip'", ",", "'country'", "]", "_list", "=", "[", "\"<div>%s</div>\"", "%", "lab_address", ".", "get", "(", "v", ")", "for", "v", "in", "_keys", "if", "lab_address", ".", "get", "(", "v", ")", "]", "lab_address", "=", "\"\"", ".", "join", "(", "_list", ")", "else", ":", "lab_address", "=", "''", "return", "{", "'obj'", ":", "lab", ",", "'title'", ":", "to_utf8", "(", "lab", ".", "Title", "(", ")", ")", ",", "'url'", ":", "to_utf8", "(", "lab", ".", "getLabURL", "(", ")", ")", ",", "'address'", ":", "to_utf8", "(", "lab_address", ")", ",", "'confidence'", ":", "lab", ".", "getConfidence", "(", ")", ",", "'accredited'", ":", "lab", ".", "getLaboratoryAccredited", "(", ")", ",", "'accreditation_body'", ":", "to_utf8", "(", "lab", ".", "getAccreditationBody", "(", ")", ")", ",", "'accreditation_logo'", ":", "lab", ".", "getAccreditationBodyLogo", "(", ")", ",", "'logo'", ":", "\"%s/logo_print.png\"", "%", "portal", ".", "absolute_url", "(", ")", "}" ]
Returns a dictionary that represents the lab object Keys: obj, title, url, address, confidence, accredited, accreditation_body, accreditation_logo, logo
[ "Returns", "a", "dictionary", "that", "represents", "the", "lab", "object", "Keys", ":", "obj", "title", "url", "address", "confidence", "accredited", "accreditation_body", "accreditation_logo", "logo" ]
python
train
mar10/wsgidav
wsgidav/fs_dav_provider.py
https://github.com/mar10/wsgidav/blob/cec0d84222fc24bea01be1cea91729001963f172/wsgidav/fs_dav_provider.py#L252-L264
def create_empty_resource(self, name): """Create an empty (length-0) resource. See DAVResource.create_empty_resource() """ assert "/" not in name if self.provider.readonly: raise DAVError(HTTP_FORBIDDEN) path = util.join_uri(self.path, name) fp = self.provider._loc_to_file_path(path, self.environ) f = open(fp, "wb") f.close() return self.provider.get_resource_inst(path, self.environ)
[ "def", "create_empty_resource", "(", "self", ",", "name", ")", ":", "assert", "\"/\"", "not", "in", "name", "if", "self", ".", "provider", ".", "readonly", ":", "raise", "DAVError", "(", "HTTP_FORBIDDEN", ")", "path", "=", "util", ".", "join_uri", "(", "self", ".", "path", ",", "name", ")", "fp", "=", "self", ".", "provider", ".", "_loc_to_file_path", "(", "path", ",", "self", ".", "environ", ")", "f", "=", "open", "(", "fp", ",", "\"wb\"", ")", "f", ".", "close", "(", ")", "return", "self", ".", "provider", ".", "get_resource_inst", "(", "path", ",", "self", ".", "environ", ")" ]
Create an empty (length-0) resource. See DAVResource.create_empty_resource()
[ "Create", "an", "empty", "(", "length", "-", "0", ")", "resource", "." ]
python
valid
openid/python-openid
openid/yadis/etxrd.py
https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/yadis/etxrd.py#L159-L194
def getCanonicalID(iname, xrd_tree): """Return the CanonicalID from this XRDS document. @param iname: the XRI being resolved. @type iname: unicode @param xrd_tree: The XRDS output from the resolver. @type xrd_tree: ElementTree @returns: The XRI CanonicalID or None. @returntype: unicode or None """ xrd_list = xrd_tree.findall(xrd_tag) xrd_list.reverse() try: canonicalID = xri.XRI(xrd_list[0].findall(canonicalID_tag)[0].text) except IndexError: return None childID = canonicalID.lower() for xrd in xrd_list[1:]: # XXX: can't use rsplit until we require python >= 2.4. parent_sought = childID[:childID.rindex('!')] parent = xri.XRI(xrd.findtext(canonicalID_tag)) if parent_sought != parent.lower(): raise XRDSFraud("%r can not come from %s" % (childID, parent)) childID = parent_sought root = xri.rootAuthority(iname) if not xri.providerIsAuthoritative(root, childID): raise XRDSFraud("%r can not come from root %r" % (childID, root)) return canonicalID
[ "def", "getCanonicalID", "(", "iname", ",", "xrd_tree", ")", ":", "xrd_list", "=", "xrd_tree", ".", "findall", "(", "xrd_tag", ")", "xrd_list", ".", "reverse", "(", ")", "try", ":", "canonicalID", "=", "xri", ".", "XRI", "(", "xrd_list", "[", "0", "]", ".", "findall", "(", "canonicalID_tag", ")", "[", "0", "]", ".", "text", ")", "except", "IndexError", ":", "return", "None", "childID", "=", "canonicalID", ".", "lower", "(", ")", "for", "xrd", "in", "xrd_list", "[", "1", ":", "]", ":", "# XXX: can't use rsplit until we require python >= 2.4.", "parent_sought", "=", "childID", "[", ":", "childID", ".", "rindex", "(", "'!'", ")", "]", "parent", "=", "xri", ".", "XRI", "(", "xrd", ".", "findtext", "(", "canonicalID_tag", ")", ")", "if", "parent_sought", "!=", "parent", ".", "lower", "(", ")", ":", "raise", "XRDSFraud", "(", "\"%r can not come from %s\"", "%", "(", "childID", ",", "parent", ")", ")", "childID", "=", "parent_sought", "root", "=", "xri", ".", "rootAuthority", "(", "iname", ")", "if", "not", "xri", ".", "providerIsAuthoritative", "(", "root", ",", "childID", ")", ":", "raise", "XRDSFraud", "(", "\"%r can not come from root %r\"", "%", "(", "childID", ",", "root", ")", ")", "return", "canonicalID" ]
Return the CanonicalID from this XRDS document. @param iname: the XRI being resolved. @type iname: unicode @param xrd_tree: The XRDS output from the resolver. @type xrd_tree: ElementTree @returns: The XRI CanonicalID or None. @returntype: unicode or None
[ "Return", "the", "CanonicalID", "from", "this", "XRDS", "document", "." ]
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/visuals/visual.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/visuals/visual.py#L486-L502
def detach(self, filt, view=None): """Detach a filter. Parameters ---------- filt : object The filter to detach. view : instance of VisualView | None The view to use. """ if view is None: self._vshare.filters.remove(filt) for view in self._vshare.views.keys(): filt._detach(view) else: view._filters.remove(filt) filt._detach(view)
[ "def", "detach", "(", "self", ",", "filt", ",", "view", "=", "None", ")", ":", "if", "view", "is", "None", ":", "self", ".", "_vshare", ".", "filters", ".", "remove", "(", "filt", ")", "for", "view", "in", "self", ".", "_vshare", ".", "views", ".", "keys", "(", ")", ":", "filt", ".", "_detach", "(", "view", ")", "else", ":", "view", ".", "_filters", ".", "remove", "(", "filt", ")", "filt", ".", "_detach", "(", "view", ")" ]
Detach a filter. Parameters ---------- filt : object The filter to detach. view : instance of VisualView | None The view to use.
[ "Detach", "a", "filter", "." ]
python
train
guaix-ucm/numina
numina/user/baserun.py
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/user/baserun.py#L145-L151
def create_recipe_file_logger(logger, logfile, logformat): """Redirect Recipe log messages to a file.""" recipe_formatter = logging.Formatter(logformat) fh = logging.FileHandler(logfile, mode='w') fh.setLevel(logging.DEBUG) fh.setFormatter(recipe_formatter) return fh
[ "def", "create_recipe_file_logger", "(", "logger", ",", "logfile", ",", "logformat", ")", ":", "recipe_formatter", "=", "logging", ".", "Formatter", "(", "logformat", ")", "fh", "=", "logging", ".", "FileHandler", "(", "logfile", ",", "mode", "=", "'w'", ")", "fh", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "fh", ".", "setFormatter", "(", "recipe_formatter", ")", "return", "fh" ]
Redirect Recipe log messages to a file.
[ "Redirect", "Recipe", "log", "messages", "to", "a", "file", "." ]
python
train
NerdWalletOSS/savage
src/savage/utils.py
https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/utils.py#L51-L80
def get_column_attribute(row, col_name, use_dirty=True, dialect=None): """ :param row: the row object :param col_name: the column name :param use_dirty: whether to return the dirty value of the column :param dialect: if not None, should be a :py:class:`~sqlalchemy.engine.interfaces.Dialect`. If \ specified, this function will process the column attribute into the dialect type before \ returning it; useful if one is using user defined column types in their mappers. :return: if :any:`use_dirty`, this will return the value of col_name on the row before it was \ changed; else this will return getattr(row, col_name) """ def identity(x): return x bind_processor = None if dialect: column_type = getattr(type(row), col_name).type bind_processor = get_bind_processor(column_type, dialect) bind_processor = bind_processor or identity current_value = bind_processor(getattr(row, col_name)) if use_dirty: return current_value hist = getattr(inspect(row).attrs, col_name).history if not hist.has_changes(): return current_value elif hist.deleted: return bind_processor(hist.deleted[0]) return None
[ "def", "get_column_attribute", "(", "row", ",", "col_name", ",", "use_dirty", "=", "True", ",", "dialect", "=", "None", ")", ":", "def", "identity", "(", "x", ")", ":", "return", "x", "bind_processor", "=", "None", "if", "dialect", ":", "column_type", "=", "getattr", "(", "type", "(", "row", ")", ",", "col_name", ")", ".", "type", "bind_processor", "=", "get_bind_processor", "(", "column_type", ",", "dialect", ")", "bind_processor", "=", "bind_processor", "or", "identity", "current_value", "=", "bind_processor", "(", "getattr", "(", "row", ",", "col_name", ")", ")", "if", "use_dirty", ":", "return", "current_value", "hist", "=", "getattr", "(", "inspect", "(", "row", ")", ".", "attrs", ",", "col_name", ")", ".", "history", "if", "not", "hist", ".", "has_changes", "(", ")", ":", "return", "current_value", "elif", "hist", ".", "deleted", ":", "return", "bind_processor", "(", "hist", ".", "deleted", "[", "0", "]", ")", "return", "None" ]
:param row: the row object :param col_name: the column name :param use_dirty: whether to return the dirty value of the column :param dialect: if not None, should be a :py:class:`~sqlalchemy.engine.interfaces.Dialect`. If \ specified, this function will process the column attribute into the dialect type before \ returning it; useful if one is using user defined column types in their mappers. :return: if :any:`use_dirty`, this will return the value of col_name on the row before it was \ changed; else this will return getattr(row, col_name)
[ ":", "param", "row", ":", "the", "row", "object", ":", "param", "col_name", ":", "the", "column", "name", ":", "param", "use_dirty", ":", "whether", "to", "return", "the", "dirty", "value", "of", "the", "column", ":", "param", "dialect", ":", "if", "not", "None", "should", "be", "a", ":", "py", ":", "class", ":", "~sqlalchemy", ".", "engine", ".", "interfaces", ".", "Dialect", ".", "If", "\\", "specified", "this", "function", "will", "process", "the", "column", "attribute", "into", "the", "dialect", "type", "before", "\\", "returning", "it", ";", "useful", "if", "one", "is", "using", "user", "defined", "column", "types", "in", "their", "mappers", "." ]
python
train
rameshg87/pyremotevbox
pyremotevbox/ZSI/dispatch.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/dispatch.py#L269-L273
def AsHandler(request=None, modules=None, **kw): '''Dispatch from within ModPython.''' ps = ParsedSoap(request) kw['request'] = request _Dispatch(ps, modules, _ModPythonSendXML, _ModPythonSendFault, **kw)
[ "def", "AsHandler", "(", "request", "=", "None", ",", "modules", "=", "None", ",", "*", "*", "kw", ")", ":", "ps", "=", "ParsedSoap", "(", "request", ")", "kw", "[", "'request'", "]", "=", "request", "_Dispatch", "(", "ps", ",", "modules", ",", "_ModPythonSendXML", ",", "_ModPythonSendFault", ",", "*", "*", "kw", ")" ]
Dispatch from within ModPython.
[ "Dispatch", "from", "within", "ModPython", "." ]
python
train
jelmer/python-fastimport
fastimport/commands.py
https://github.com/jelmer/python-fastimport/blob/5cef9e037b7d7b37f58f522ac9ea4e343e6a1dff/fastimport/commands.py#L83-L112
def dump_str(self, names=None, child_lists=None, verbose=False): """Dump fields as a string. For debugging. :param names: the list of fields to include or None for all public fields :param child_lists: dictionary of child command names to fields for that child command to include :param verbose: if True, prefix each line with the command class and display fields as a dictionary; if False, dump just the field values with tabs between them """ interesting = {} if names is None: fields = [ k for k in list(self.__dict__.keys()) if not k.startswith(b'_') ] else: fields = names for field in fields: value = self.__dict__.get(field) if field in self._binary and value is not None: value = b'(...)' interesting[field] = value if verbose: return "%s: %s" % (self.__class__.__name__, interesting) else: return "\t".join([repr(interesting[k]) for k in fields])
[ "def", "dump_str", "(", "self", ",", "names", "=", "None", ",", "child_lists", "=", "None", ",", "verbose", "=", "False", ")", ":", "interesting", "=", "{", "}", "if", "names", "is", "None", ":", "fields", "=", "[", "k", "for", "k", "in", "list", "(", "self", ".", "__dict__", ".", "keys", "(", ")", ")", "if", "not", "k", ".", "startswith", "(", "b'_'", ")", "]", "else", ":", "fields", "=", "names", "for", "field", "in", "fields", ":", "value", "=", "self", ".", "__dict__", ".", "get", "(", "field", ")", "if", "field", "in", "self", ".", "_binary", "and", "value", "is", "not", "None", ":", "value", "=", "b'(...)'", "interesting", "[", "field", "]", "=", "value", "if", "verbose", ":", "return", "\"%s: %s\"", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "interesting", ")", "else", ":", "return", "\"\\t\"", ".", "join", "(", "[", "repr", "(", "interesting", "[", "k", "]", ")", "for", "k", "in", "fields", "]", ")" ]
Dump fields as a string. For debugging. :param names: the list of fields to include or None for all public fields :param child_lists: dictionary of child command names to fields for that child command to include :param verbose: if True, prefix each line with the command class and display fields as a dictionary; if False, dump just the field values with tabs between them
[ "Dump", "fields", "as", "a", "string", "." ]
python
train
stevearc/pyramid_webpack
pyramid_webpack/__init__.py
https://github.com/stevearc/pyramid_webpack/blob/4fcad26271fd6e8c270e19c7943240fea6d8c484/pyramid_webpack/__init__.py#L212-L223
def get_webpack(request, name='DEFAULT'): """ Get the Webpack object for a given webpack config. Called at most once per request per config name. """ if not hasattr(request, '_webpack_map'): request._webpack_map = {} wp = request._webpack_map.get(name) if wp is None: wp = request._webpack_map[name] = Webpack(request, name) return wp
[ "def", "get_webpack", "(", "request", ",", "name", "=", "'DEFAULT'", ")", ":", "if", "not", "hasattr", "(", "request", ",", "'_webpack_map'", ")", ":", "request", ".", "_webpack_map", "=", "{", "}", "wp", "=", "request", ".", "_webpack_map", ".", "get", "(", "name", ")", "if", "wp", "is", "None", ":", "wp", "=", "request", ".", "_webpack_map", "[", "name", "]", "=", "Webpack", "(", "request", ",", "name", ")", "return", "wp" ]
Get the Webpack object for a given webpack config. Called at most once per request per config name.
[ "Get", "the", "Webpack", "object", "for", "a", "given", "webpack", "config", "." ]
python
train
PBR/MQ2
MQ2/mq2.py
https://github.com/PBR/MQ2/blob/6d84dea47e6751333004743f588f03158e35c28d/MQ2/mq2.py#L58-L92
def _get_arguments(): # pragma: no cover """ Handle the command line arguments given to this program """ LOG.debug('Parse command line argument') parser = argparse.ArgumentParser( description='Command line interface for the MQ² program') parser.add_argument( '-z', '--zipfile', dest='inputzip', default=None, help='Zip file containing the input files.') parser.add_argument( '-d', '--dir', dest='inputdir', default=None, help='Path to a local folder containing the input files.') parser.add_argument( '-f', '--file', dest='inputfile', default=None, help='Path to a local input file.') parser.add_argument( '--lod', default=3, help='LOD threshold to use to assess the significance of a LOD \ value for a QTL.') parser.add_argument( '--session', default=None, help='Session to analyze if required.') parser.add_argument( '--verbose', action='store_true', help="Gives more info about what's going on") parser.add_argument( '--debug', action='store_true', help="Outputs debugging information") parser.add_argument( '--version', action='version', version='MQ² version: %s' % __version__) return parser.parse_args()
[ "def", "_get_arguments", "(", ")", ":", "# pragma: no cover", "LOG", ".", "debug", "(", "'Parse command line argument'", ")", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Command line interface for the MQ² program')", "", "parser", ".", "add_argument", "(", "'-z'", ",", "'--zipfile'", ",", "dest", "=", "'inputzip'", ",", "default", "=", "None", ",", "help", "=", "'Zip file containing the input files.'", ")", "parser", ".", "add_argument", "(", "'-d'", ",", "'--dir'", ",", "dest", "=", "'inputdir'", ",", "default", "=", "None", ",", "help", "=", "'Path to a local folder containing the input files.'", ")", "parser", ".", "add_argument", "(", "'-f'", ",", "'--file'", ",", "dest", "=", "'inputfile'", ",", "default", "=", "None", ",", "help", "=", "'Path to a local input file.'", ")", "parser", ".", "add_argument", "(", "'--lod'", ",", "default", "=", "3", ",", "help", "=", "'LOD threshold to use to assess the significance of a LOD \\\n value for a QTL.'", ")", "parser", ".", "add_argument", "(", "'--session'", ",", "default", "=", "None", ",", "help", "=", "'Session to analyze if required.'", ")", "parser", ".", "add_argument", "(", "'--verbose'", ",", "action", "=", "'store_true'", ",", "help", "=", "\"Gives more info about what's going on\"", ")", "parser", ".", "add_argument", "(", "'--debug'", ",", "action", "=", "'store_true'", ",", "help", "=", "\"Outputs debugging information\"", ")", "parser", ".", "add_argument", "(", "'--version'", ",", "action", "=", "'version'", ",", "version", "=", "'MQ² version: %s' ", " ", "_version__)", "", "return", "parser", ".", "parse_args", "(", ")" ]
Handle the command line arguments given to this program
[ "Handle", "the", "command", "line", "arguments", "given", "to", "this", "program" ]
python
train
aichaos/rivescript-python
rivescript/rivescript.py
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L663-L676
def set_substitution(self, what, rep): """Set a substitution. Equivalent to ``! sub`` in RiveScript code. :param str what: The original text to replace. :param str rep: The text to replace it with. Set this to ``None`` to delete the substitution. """ if rep is None: # Unset the variable. if what in self._subs: del self._subs[what] self._subs[what] = rep
[ "def", "set_substitution", "(", "self", ",", "what", ",", "rep", ")", ":", "if", "rep", "is", "None", ":", "# Unset the variable.", "if", "what", "in", "self", ".", "_subs", ":", "del", "self", ".", "_subs", "[", "what", "]", "self", ".", "_subs", "[", "what", "]", "=", "rep" ]
Set a substitution. Equivalent to ``! sub`` in RiveScript code. :param str what: The original text to replace. :param str rep: The text to replace it with. Set this to ``None`` to delete the substitution.
[ "Set", "a", "substitution", "." ]
python
train
sbarham/dsrt
build/lib/dsrt/data/transform/Padder.py
https://github.com/sbarham/dsrt/blob/bc664739f2f52839461d3e72773b71146fd56a9a/build/lib/dsrt/data/transform/Padder.py#L18-L40
def pad_dialogues(self, dialogues): """ Pad the entire dataset. This involves adding padding at the end of each sentence, and in the case of a hierarchical model, it also involves adding padding at the end of each dialogue, so that every training sample (dialogue) has the same dimension. """ self.log('info', 'Padding the dialogues ...') empty_turn = [self.config['pad-d']] * (self.properties['max-utterance-length'] + 1) for i, d in enumerate(dialogues): for j, u in enumerate(d): dif = self.properties['max-utterance-length'] - len(u) + 1 dialogues[i][j] += [self.config['pad-u']] * dif # only pad the dialogue if we're training a hierarchical model if self.config['hierarchical']: dif = self.properties['max-dialogue-length'] - len(d) dialogues[i] += [empty_turn] * dif return dialogues
[ "def", "pad_dialogues", "(", "self", ",", "dialogues", ")", ":", "self", ".", "log", "(", "'info'", ",", "'Padding the dialogues ...'", ")", "empty_turn", "=", "[", "self", ".", "config", "[", "'pad-d'", "]", "]", "*", "(", "self", ".", "properties", "[", "'max-utterance-length'", "]", "+", "1", ")", "for", "i", ",", "d", "in", "enumerate", "(", "dialogues", ")", ":", "for", "j", ",", "u", "in", "enumerate", "(", "d", ")", ":", "dif", "=", "self", ".", "properties", "[", "'max-utterance-length'", "]", "-", "len", "(", "u", ")", "+", "1", "dialogues", "[", "i", "]", "[", "j", "]", "+=", "[", "self", ".", "config", "[", "'pad-u'", "]", "]", "*", "dif", "# only pad the dialogue if we're training a hierarchical model", "if", "self", ".", "config", "[", "'hierarchical'", "]", ":", "dif", "=", "self", ".", "properties", "[", "'max-dialogue-length'", "]", "-", "len", "(", "d", ")", "dialogues", "[", "i", "]", "+=", "[", "empty_turn", "]", "*", "dif", "return", "dialogues" ]
Pad the entire dataset. This involves adding padding at the end of each sentence, and in the case of a hierarchical model, it also involves adding padding at the end of each dialogue, so that every training sample (dialogue) has the same dimension.
[ "Pad", "the", "entire", "dataset", ".", "This", "involves", "adding", "padding", "at", "the", "end", "of", "each", "sentence", "and", "in", "the", "case", "of", "a", "hierarchical", "model", "it", "also", "involves", "adding", "padding", "at", "the", "end", "of", "each", "dialogue", "so", "that", "every", "training", "sample", "(", "dialogue", ")", "has", "the", "same", "dimension", "." ]
python
train
GNS3/gns3-server
gns3server/compute/docker/docker_vm.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/docker/docker_vm.py#L857-L886
def start_capture(self, adapter_number, output_file): """ Starts a packet capture. :param adapter_number: adapter number :param output_file: PCAP destination file for the capture """ try: adapter = self._ethernet_adapters[adapter_number] except KeyError: raise DockerError("Adapter {adapter_number} doesn't exist on Docker VM '{name}'".format(name=self.name, adapter_number=adapter_number)) nio = adapter.get_nio(0) if not nio: raise DockerError("Adapter {} is not connected".format(adapter_number)) if nio.capturing: raise DockerError("Packet capture is already activated on adapter {adapter_number}".format(adapter_number=adapter_number)) nio.startPacketCapture(output_file) if self.status == "started" and self.ubridge: yield from self._start_ubridge_capture(adapter_number, output_file) log.info("Docker VM '{name}' [{id}]: starting packet capture on adapter {adapter_number}".format(name=self.name, id=self.id, adapter_number=adapter_number))
[ "def", "start_capture", "(", "self", ",", "adapter_number", ",", "output_file", ")", ":", "try", ":", "adapter", "=", "self", ".", "_ethernet_adapters", "[", "adapter_number", "]", "except", "KeyError", ":", "raise", "DockerError", "(", "\"Adapter {adapter_number} doesn't exist on Docker VM '{name}'\"", ".", "format", "(", "name", "=", "self", ".", "name", ",", "adapter_number", "=", "adapter_number", ")", ")", "nio", "=", "adapter", ".", "get_nio", "(", "0", ")", "if", "not", "nio", ":", "raise", "DockerError", "(", "\"Adapter {} is not connected\"", ".", "format", "(", "adapter_number", ")", ")", "if", "nio", ".", "capturing", ":", "raise", "DockerError", "(", "\"Packet capture is already activated on adapter {adapter_number}\"", ".", "format", "(", "adapter_number", "=", "adapter_number", ")", ")", "nio", ".", "startPacketCapture", "(", "output_file", ")", "if", "self", ".", "status", "==", "\"started\"", "and", "self", ".", "ubridge", ":", "yield", "from", "self", ".", "_start_ubridge_capture", "(", "adapter_number", ",", "output_file", ")", "log", ".", "info", "(", "\"Docker VM '{name}' [{id}]: starting packet capture on adapter {adapter_number}\"", ".", "format", "(", "name", "=", "self", ".", "name", ",", "id", "=", "self", ".", "id", ",", "adapter_number", "=", "adapter_number", ")", ")" ]
Starts a packet capture. :param adapter_number: adapter number :param output_file: PCAP destination file for the capture
[ "Starts", "a", "packet", "capture", "." ]
python
train
kstaniek/condoor
condoor/connection.py
https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/connection.py#L794-L835
def description_record(self): """Return dict describing :class:`condoor.Connection` object. Example:: {'connections': [{'chain': [{'driver_name': 'eXR', 'family': 'ASR9K', 'hostname': 'vkg3', 'is_console': True, 'is_target': True, 'mode': 'global', 'os_type': 'eXR', 'os_version': '6.1.2.06I', 'platform': 'ASR-9904', 'prompt': 'RP/0/RSP0/CPU0:vkg3#', 'udi': {'description': 'ASR-9904 AC Chassis', 'name': 'Rack 0', 'pid': 'ASR-9904-AC', 'sn': 'FOX2024GKDE ', 'vid': 'V01'}}]}, {'chain': [{'driver_name': 'generic', 'family': None, 'hostname': '172.27.41.52:2045', 'is_console': None, 'is_target': True, 'mode': None, 'os_type': None, 'os_version': None, 'platform': None, 'prompt': None, 'udi': None}]}], 'last_chain': 0} """ if self.connection_chains: return { 'connections': [{'chain': [device.device_info for device in chain.devices]} for chain in self.connection_chains], 'last_chain': self._last_chain_index, } else: raise ConnectionError("Device not connected")
[ "def", "description_record", "(", "self", ")", ":", "if", "self", ".", "connection_chains", ":", "return", "{", "'connections'", ":", "[", "{", "'chain'", ":", "[", "device", ".", "device_info", "for", "device", "in", "chain", ".", "devices", "]", "}", "for", "chain", "in", "self", ".", "connection_chains", "]", ",", "'last_chain'", ":", "self", ".", "_last_chain_index", ",", "}", "else", ":", "raise", "ConnectionError", "(", "\"Device not connected\"", ")" ]
Return dict describing :class:`condoor.Connection` object. Example:: {'connections': [{'chain': [{'driver_name': 'eXR', 'family': 'ASR9K', 'hostname': 'vkg3', 'is_console': True, 'is_target': True, 'mode': 'global', 'os_type': 'eXR', 'os_version': '6.1.2.06I', 'platform': 'ASR-9904', 'prompt': 'RP/0/RSP0/CPU0:vkg3#', 'udi': {'description': 'ASR-9904 AC Chassis', 'name': 'Rack 0', 'pid': 'ASR-9904-AC', 'sn': 'FOX2024GKDE ', 'vid': 'V01'}}]}, {'chain': [{'driver_name': 'generic', 'family': None, 'hostname': '172.27.41.52:2045', 'is_console': None, 'is_target': True, 'mode': None, 'os_type': None, 'os_version': None, 'platform': None, 'prompt': None, 'udi': None}]}], 'last_chain': 0}
[ "Return", "dict", "describing", ":", "class", ":", "condoor", ".", "Connection", "object", "." ]
python
train
miLibris/flask-rest-jsonapi
flask_rest_jsonapi/resource.py
https://github.com/miLibris/flask-rest-jsonapi/blob/ecc8f2cd2b54cc0bfae7acd6cffcda0ba1140c43/flask_rest_jsonapi/resource.py#L289-L299
def delete(self, *args, **kwargs): """Delete an object""" self.before_delete(args, kwargs) self.delete_object(kwargs) result = {'meta': {'message': 'Object successfully deleted'}} final_result = self.after_delete(result) return final_result
[ "def", "delete", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "before_delete", "(", "args", ",", "kwargs", ")", "self", ".", "delete_object", "(", "kwargs", ")", "result", "=", "{", "'meta'", ":", "{", "'message'", ":", "'Object successfully deleted'", "}", "}", "final_result", "=", "self", ".", "after_delete", "(", "result", ")", "return", "final_result" ]
Delete an object
[ "Delete", "an", "object" ]
python
train
openstack/networking-cisco
networking_cisco/neutronclient/hostingdevicescheduler.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/neutronclient/hostingdevicescheduler.py#L109-L114
def disassociate_hosting_device_with_config_agent( self, client, config_agent_id, hosting_device_id): """Disassociates a hosting_device with a config agent.""" return client.delete((ConfigAgentHandlingHostingDevice.resource_path + CFG_AGENT_HOSTING_DEVICES + "/%s") % ( config_agent_id, hosting_device_id))
[ "def", "disassociate_hosting_device_with_config_agent", "(", "self", ",", "client", ",", "config_agent_id", ",", "hosting_device_id", ")", ":", "return", "client", ".", "delete", "(", "(", "ConfigAgentHandlingHostingDevice", ".", "resource_path", "+", "CFG_AGENT_HOSTING_DEVICES", "+", "\"/%s\"", ")", "%", "(", "config_agent_id", ",", "hosting_device_id", ")", ")" ]
Disassociates a hosting_device with a config agent.
[ "Disassociates", "a", "hosting_device", "with", "a", "config", "agent", "." ]
python
train
ynop/audiomate
audiomate/utils/textfile.py
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/utils/textfile.py#L54-L76
def read_key_value_lines(path, separator=' ', default_value=''): """ Reads lines of a text file with two columns as key/value dictionary. Parameters: path (str): Path to the file. separator (str): Separator that is used to split key and value. default_value (str): If no value is given this value is used. Returns: dict: A dictionary with first column as key and second as value. """ gen = read_separated_lines_generator(path, separator, 2) dic = {} for record in gen: if len(record) > 1: dic[record[0]] = record[1] elif len(record) > 0: dic[record[0]] = default_value return dic
[ "def", "read_key_value_lines", "(", "path", ",", "separator", "=", "' '", ",", "default_value", "=", "''", ")", ":", "gen", "=", "read_separated_lines_generator", "(", "path", ",", "separator", ",", "2", ")", "dic", "=", "{", "}", "for", "record", "in", "gen", ":", "if", "len", "(", "record", ")", ">", "1", ":", "dic", "[", "record", "[", "0", "]", "]", "=", "record", "[", "1", "]", "elif", "len", "(", "record", ")", ">", "0", ":", "dic", "[", "record", "[", "0", "]", "]", "=", "default_value", "return", "dic" ]
Reads lines of a text file with two columns as key/value dictionary. Parameters: path (str): Path to the file. separator (str): Separator that is used to split key and value. default_value (str): If no value is given this value is used. Returns: dict: A dictionary with first column as key and second as value.
[ "Reads", "lines", "of", "a", "text", "file", "with", "two", "columns", "as", "key", "/", "value", "dictionary", "." ]
python
train
arviz-devs/arviz
arviz/stats/diagnostics.py
https://github.com/arviz-devs/arviz/blob/d04d8da07f029fd2931f48d2f7f324cf393e5277/arviz/stats/diagnostics.py#L220-L229
def _rhat_ufunc(ary): """Ufunc for computing effective sample size. This can be used on an xarray Dataset, using `xr.apply_ufunc(_neff_ufunc, ..., input_core_dims=(('chain', 'draw'),)) """ target = np.empty(ary.shape[:-2]) for idx in np.ndindex(target.shape): target[idx] = _get_split_rhat(ary[idx]) return target
[ "def", "_rhat_ufunc", "(", "ary", ")", ":", "target", "=", "np", ".", "empty", "(", "ary", ".", "shape", "[", ":", "-", "2", "]", ")", "for", "idx", "in", "np", ".", "ndindex", "(", "target", ".", "shape", ")", ":", "target", "[", "idx", "]", "=", "_get_split_rhat", "(", "ary", "[", "idx", "]", ")", "return", "target" ]
Ufunc for computing effective sample size. This can be used on an xarray Dataset, using `xr.apply_ufunc(_neff_ufunc, ..., input_core_dims=(('chain', 'draw'),))
[ "Ufunc", "for", "computing", "effective", "sample", "size", "." ]
python
train
chaoss/grimoirelab-manuscripts
manuscripts2/report.py
https://github.com/chaoss/grimoirelab-manuscripts/blob/94a3ad4f11bfbcd6c5190e01cb5d3e47a5187cd9/manuscripts2/report.py#L134-L148
def get_metric_index(self, data_source): """ This function will return the elasticsearch index for a corresponding data source. It chooses in between the default and the user inputed es indices and returns the user inputed one if it is available. :param data_source: the data source for which the index has to be returned :returns: an elasticsearch index name """ if data_source in self.index_dict: index = self.index_dict[data_source] else: index = self.class2index[self.ds2class[data_source]] return Index(index_name=index)
[ "def", "get_metric_index", "(", "self", ",", "data_source", ")", ":", "if", "data_source", "in", "self", ".", "index_dict", ":", "index", "=", "self", ".", "index_dict", "[", "data_source", "]", "else", ":", "index", "=", "self", ".", "class2index", "[", "self", ".", "ds2class", "[", "data_source", "]", "]", "return", "Index", "(", "index_name", "=", "index", ")" ]
This function will return the elasticsearch index for a corresponding data source. It chooses in between the default and the user inputed es indices and returns the user inputed one if it is available. :param data_source: the data source for which the index has to be returned :returns: an elasticsearch index name
[ "This", "function", "will", "return", "the", "elasticsearch", "index", "for", "a", "corresponding", "data", "source", ".", "It", "chooses", "in", "between", "the", "default", "and", "the", "user", "inputed", "es", "indices", "and", "returns", "the", "user", "inputed", "one", "if", "it", "is", "available", "." ]
python
train
Genida/archan
src/archan/config.py
https://github.com/Genida/archan/blob/a026d3105c7e86f30e6c9507b93ceb736684bfdc/src/archan/config.py#L290-L296
def checker_from_dict(self, dct): """Return a checker instance from a dict object.""" checker_identifier = list(dct.keys())[0] checker_class = self.get_checker(checker_identifier) if checker_class: return checker_class(**dct[checker_identifier]) return None
[ "def", "checker_from_dict", "(", "self", ",", "dct", ")", ":", "checker_identifier", "=", "list", "(", "dct", ".", "keys", "(", ")", ")", "[", "0", "]", "checker_class", "=", "self", ".", "get_checker", "(", "checker_identifier", ")", "if", "checker_class", ":", "return", "checker_class", "(", "*", "*", "dct", "[", "checker_identifier", "]", ")", "return", "None" ]
Return a checker instance from a dict object.
[ "Return", "a", "checker", "instance", "from", "a", "dict", "object", "." ]
python
train
Microsoft/knack
knack/parser.py
https://github.com/Microsoft/knack/blob/5f1a480a33f103e2688c46eef59fb2d9eaf2baad/knack/parser.py#L249-L256
def parse_args(self, args=None, namespace=None): """ Overrides argparse.ArgumentParser.parse_args Enables '@'-prefixed files to be expanded before arguments are processed by ArgumentParser.parse_args as usual """ self._expand_prefixed_files(args) return super(CLICommandParser, self).parse_args(args)
[ "def", "parse_args", "(", "self", ",", "args", "=", "None", ",", "namespace", "=", "None", ")", ":", "self", ".", "_expand_prefixed_files", "(", "args", ")", "return", "super", "(", "CLICommandParser", ",", "self", ")", ".", "parse_args", "(", "args", ")" ]
Overrides argparse.ArgumentParser.parse_args Enables '@'-prefixed files to be expanded before arguments are processed by ArgumentParser.parse_args as usual
[ "Overrides", "argparse", ".", "ArgumentParser", ".", "parse_args" ]
python
train
closeio/quotequail
quotequail/_html.py
https://github.com/closeio/quotequail/blob/8a3960c033d595b25a8bbc2c340be898e3065b5f/quotequail/_html.py#L389-L400
def get_line_info(tree, max_lines=None): """ Shortcut for indented_tree_line_generator() that returns an array of start references, an array of corresponding end references (see tree_line_generator() docs), and an array of corresponding lines. """ line_gen = indented_tree_line_generator(tree, max_lines=max_lines) line_gen_result = list(zip(*line_gen)) if line_gen_result: return line_gen_result else: return [], [], []
[ "def", "get_line_info", "(", "tree", ",", "max_lines", "=", "None", ")", ":", "line_gen", "=", "indented_tree_line_generator", "(", "tree", ",", "max_lines", "=", "max_lines", ")", "line_gen_result", "=", "list", "(", "zip", "(", "*", "line_gen", ")", ")", "if", "line_gen_result", ":", "return", "line_gen_result", "else", ":", "return", "[", "]", ",", "[", "]", ",", "[", "]" ]
Shortcut for indented_tree_line_generator() that returns an array of start references, an array of corresponding end references (see tree_line_generator() docs), and an array of corresponding lines.
[ "Shortcut", "for", "indented_tree_line_generator", "()", "that", "returns", "an", "array", "of", "start", "references", "an", "array", "of", "corresponding", "end", "references", "(", "see", "tree_line_generator", "()", "docs", ")", "and", "an", "array", "of", "corresponding", "lines", "." ]
python
train
numenta/nupic
src/nupic/frameworks/opf/htm_prediction_model_classifier_helper.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/htm_prediction_model_classifier_helper.py#L365-L391
def _deleteRangeFromKNN(self, start=0, end=None): """ This method will remove any stored records within the range from start to end. Noninclusive of end. parameters ------------ start - integer representing the ROWID of the start of the deletion range, end - integer representing the ROWID of the end of the deletion range, if None, it will default to end. """ classifier = self.htm_prediction_model._getAnomalyClassifier() knn = classifier.getSelf()._knn prototype_idx = numpy.array( classifier.getSelf().getParameter('categoryRecencyList')) if end is None: end = prototype_idx.max() + 1 idsIdxToDelete = numpy.logical_and(prototype_idx >= start, prototype_idx < end) idsToDelete = prototype_idx[idsIdxToDelete] nProtos = knn._numPatterns knn.removeIds(idsToDelete.tolist()) assert knn._numPatterns == nProtos - len(idsToDelete)
[ "def", "_deleteRangeFromKNN", "(", "self", ",", "start", "=", "0", ",", "end", "=", "None", ")", ":", "classifier", "=", "self", ".", "htm_prediction_model", ".", "_getAnomalyClassifier", "(", ")", "knn", "=", "classifier", ".", "getSelf", "(", ")", ".", "_knn", "prototype_idx", "=", "numpy", ".", "array", "(", "classifier", ".", "getSelf", "(", ")", ".", "getParameter", "(", "'categoryRecencyList'", ")", ")", "if", "end", "is", "None", ":", "end", "=", "prototype_idx", ".", "max", "(", ")", "+", "1", "idsIdxToDelete", "=", "numpy", ".", "logical_and", "(", "prototype_idx", ">=", "start", ",", "prototype_idx", "<", "end", ")", "idsToDelete", "=", "prototype_idx", "[", "idsIdxToDelete", "]", "nProtos", "=", "knn", ".", "_numPatterns", "knn", ".", "removeIds", "(", "idsToDelete", ".", "tolist", "(", ")", ")", "assert", "knn", ".", "_numPatterns", "==", "nProtos", "-", "len", "(", "idsToDelete", ")" ]
This method will remove any stored records within the range from start to end. Noninclusive of end. parameters ------------ start - integer representing the ROWID of the start of the deletion range, end - integer representing the ROWID of the end of the deletion range, if None, it will default to end.
[ "This", "method", "will", "remove", "any", "stored", "records", "within", "the", "range", "from", "start", "to", "end", ".", "Noninclusive", "of", "end", "." ]
python
valid
sveetch/boussole
boussole/parser.py
https://github.com/sveetch/boussole/blob/22cc644e9d633f41ebfc167d427a71c1726cee21/boussole/parser.py#L94-L118
def flatten_rules(self, declarations): """ Flatten returned import rules from regex. Because import rules can contains multiple items in the same rule (called multiline import rule), the regex ``REGEX_IMPORT_RULE`` return a list of unquoted items for each rule. Args: declarations (list): A SCSS source. Returns: list: Given SCSS source with all comments removed. """ rules = [] for protocole, paths in declarations: # If there is a protocole (like 'url), drop it if protocole: continue # Unquote and possibly split multiple rule in the same declaration rules.extend([self.strip_quotes(v.strip()) for v in paths.split(',')]) return list(filter(self.filter_rules, rules))
[ "def", "flatten_rules", "(", "self", ",", "declarations", ")", ":", "rules", "=", "[", "]", "for", "protocole", ",", "paths", "in", "declarations", ":", "# If there is a protocole (like 'url), drop it", "if", "protocole", ":", "continue", "# Unquote and possibly split multiple rule in the same declaration", "rules", ".", "extend", "(", "[", "self", ".", "strip_quotes", "(", "v", ".", "strip", "(", ")", ")", "for", "v", "in", "paths", ".", "split", "(", "','", ")", "]", ")", "return", "list", "(", "filter", "(", "self", ".", "filter_rules", ",", "rules", ")", ")" ]
Flatten returned import rules from regex. Because import rules can contains multiple items in the same rule (called multiline import rule), the regex ``REGEX_IMPORT_RULE`` return a list of unquoted items for each rule. Args: declarations (list): A SCSS source. Returns: list: Given SCSS source with all comments removed.
[ "Flatten", "returned", "import", "rules", "from", "regex", "." ]
python
train
tensorflow/probability
tensorflow_probability/python/distributions/vector_exponential_linear_operator.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/vector_exponential_linear_operator.py#L278-L287
def _mode_mean_shape(self): """Shape for the mode/mean Tensors.""" shape = tensorshape_util.concatenate(self.batch_shape, self.event_shape) has_static_shape = tensorshape_util.is_fully_defined(shape) if not has_static_shape: shape = tf.concat([ self.batch_shape_tensor(), self.event_shape_tensor(), ], 0) return shape
[ "def", "_mode_mean_shape", "(", "self", ")", ":", "shape", "=", "tensorshape_util", ".", "concatenate", "(", "self", ".", "batch_shape", ",", "self", ".", "event_shape", ")", "has_static_shape", "=", "tensorshape_util", ".", "is_fully_defined", "(", "shape", ")", "if", "not", "has_static_shape", ":", "shape", "=", "tf", ".", "concat", "(", "[", "self", ".", "batch_shape_tensor", "(", ")", ",", "self", ".", "event_shape_tensor", "(", ")", ",", "]", ",", "0", ")", "return", "shape" ]
Shape for the mode/mean Tensors.
[ "Shape", "for", "the", "mode", "/", "mean", "Tensors", "." ]
python
test
jcrist/skein
skein/ui.py
https://github.com/jcrist/skein/blob/16f8b1d3b3d9f79f36e2f152e45893339a1793e8/skein/ui.py#L115-L126
def remove_page(self, route): """Remove a proxied page from the Web UI. Parameters ---------- route : str The route for the proxied page. Must be a valid path *segment* in a url (e.g. ``foo`` in ``/foo/bar/baz``). Routes must be unique across the application. """ req = proto.RemoveProxyRequest(route=route) self._client._call('RemoveProxy', req)
[ "def", "remove_page", "(", "self", ",", "route", ")", ":", "req", "=", "proto", ".", "RemoveProxyRequest", "(", "route", "=", "route", ")", "self", ".", "_client", ".", "_call", "(", "'RemoveProxy'", ",", "req", ")" ]
Remove a proxied page from the Web UI. Parameters ---------- route : str The route for the proxied page. Must be a valid path *segment* in a url (e.g. ``foo`` in ``/foo/bar/baz``). Routes must be unique across the application.
[ "Remove", "a", "proxied", "page", "from", "the", "Web", "UI", "." ]
python
train
KelSolaar/Umbra
umbra/ui/themes.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/themes.py#L41-L67
def get_format(**kwargs): """ Returns a `QTextCharFormat <http://doc.qt.nokia.com/qtextcharformat.html>`_ format. :param \*\*kwargs: Format settings. :type \*\*kwargs: dict :return: Format. :rtype: QTextCharFormat """ settings = foundations.data_structures.Structure(**{"format": QTextCharFormat(), "background_color": None, "color": None, "font_weight": None, "font_point_size": None, "italic": False}) settings.update(kwargs) format = QTextCharFormat(settings.format) settings.background_color and format.setBackground(settings.background_color) settings.color and format.setForeground(settings.color) settings.font_weight and format.setFontWeight(settings.font_weight) settings.font_point_size and format.setFontPointSize(settings.font_point_size) settings.italic and format.setFontItalic(True) return format
[ "def", "get_format", "(", "*", "*", "kwargs", ")", ":", "settings", "=", "foundations", ".", "data_structures", ".", "Structure", "(", "*", "*", "{", "\"format\"", ":", "QTextCharFormat", "(", ")", ",", "\"background_color\"", ":", "None", ",", "\"color\"", ":", "None", ",", "\"font_weight\"", ":", "None", ",", "\"font_point_size\"", ":", "None", ",", "\"italic\"", ":", "False", "}", ")", "settings", ".", "update", "(", "kwargs", ")", "format", "=", "QTextCharFormat", "(", "settings", ".", "format", ")", "settings", ".", "background_color", "and", "format", ".", "setBackground", "(", "settings", ".", "background_color", ")", "settings", ".", "color", "and", "format", ".", "setForeground", "(", "settings", ".", "color", ")", "settings", ".", "font_weight", "and", "format", ".", "setFontWeight", "(", "settings", ".", "font_weight", ")", "settings", ".", "font_point_size", "and", "format", ".", "setFontPointSize", "(", "settings", ".", "font_point_size", ")", "settings", ".", "italic", "and", "format", ".", "setFontItalic", "(", "True", ")", "return", "format" ]
Returns a `QTextCharFormat <http://doc.qt.nokia.com/qtextcharformat.html>`_ format. :param \*\*kwargs: Format settings. :type \*\*kwargs: dict :return: Format. :rtype: QTextCharFormat
[ "Returns", "a", "QTextCharFormat", "<http", ":", "//", "doc", ".", "qt", ".", "nokia", ".", "com", "/", "qtextcharformat", ".", "html", ">", "_", "format", "." ]
python
train
project-rig/rig
rig/machine_control/common.py
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/common.py#L20-L62
def unpack_sver_response_version(packet): """For internal use. Unpack the version-related parts of an sver (aka CMD_VERSION) response. Parameters ---------- packet : :py:class:`~rig.machine_control.packets.SCPPacket` The packet recieved in response to the version command. Returns ------- software_name : string The name of the software running on the remote machine. (major, minor, patch) : (int, int, int) The numerical part of the semantic version number. labels : string Any labels in the version number (e.g. '-dev'). May be an empty string. """ software_name = packet.data.decode("utf-8") legacy_version_field = packet.arg2 >> 16 if legacy_version_field != 0xFFFF: # Legacy version encoding: just encoded in decimal fixed-point in the # integer. major = legacy_version_field // 100 minor = legacy_version_field % 100 patch = 0 labels = "" else: # Semantic Version encoding: packed after the null-terminator of the # software name in the version string. software_name, _, version_number = software_name.partition("\0") match = VERSION_NUMBER_REGEX.match(version_number.rstrip("\0")) assert match, "Malformed version number: {}".format(version_number) major = int(match.group(1)) minor = int(match.group(2)) patch = int(match.group(3)) labels = match.group(4) or "" return (software_name.rstrip("\0"), (major, minor, patch), labels)
[ "def", "unpack_sver_response_version", "(", "packet", ")", ":", "software_name", "=", "packet", ".", "data", ".", "decode", "(", "\"utf-8\"", ")", "legacy_version_field", "=", "packet", ".", "arg2", ">>", "16", "if", "legacy_version_field", "!=", "0xFFFF", ":", "# Legacy version encoding: just encoded in decimal fixed-point in the", "# integer.", "major", "=", "legacy_version_field", "//", "100", "minor", "=", "legacy_version_field", "%", "100", "patch", "=", "0", "labels", "=", "\"\"", "else", ":", "# Semantic Version encoding: packed after the null-terminator of the", "# software name in the version string.", "software_name", ",", "_", ",", "version_number", "=", "software_name", ".", "partition", "(", "\"\\0\"", ")", "match", "=", "VERSION_NUMBER_REGEX", ".", "match", "(", "version_number", ".", "rstrip", "(", "\"\\0\"", ")", ")", "assert", "match", ",", "\"Malformed version number: {}\"", ".", "format", "(", "version_number", ")", "major", "=", "int", "(", "match", ".", "group", "(", "1", ")", ")", "minor", "=", "int", "(", "match", ".", "group", "(", "2", ")", ")", "patch", "=", "int", "(", "match", ".", "group", "(", "3", ")", ")", "labels", "=", "match", ".", "group", "(", "4", ")", "or", "\"\"", "return", "(", "software_name", ".", "rstrip", "(", "\"\\0\"", ")", ",", "(", "major", ",", "minor", ",", "patch", ")", ",", "labels", ")" ]
For internal use. Unpack the version-related parts of an sver (aka CMD_VERSION) response. Parameters ---------- packet : :py:class:`~rig.machine_control.packets.SCPPacket` The packet recieved in response to the version command. Returns ------- software_name : string The name of the software running on the remote machine. (major, minor, patch) : (int, int, int) The numerical part of the semantic version number. labels : string Any labels in the version number (e.g. '-dev'). May be an empty string.
[ "For", "internal", "use", ".", "Unpack", "the", "version", "-", "related", "parts", "of", "an", "sver", "(", "aka", "CMD_VERSION", ")", "response", "." ]
python
train
assemblerflow/flowcraft
flowcraft/generator/inspect.py
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L642-L713
def _update_trace_info(self, fields, hm): """Parses a trace line and updates the :attr:`status_info` attribute. Parameters ---------- fields : list List of the tab-seperated elements of the trace line hm : dict Maps the column IDs to their position in the fields argument. This dictionary object is retrieve from :func:`_header_mapping`. """ process = fields[hm["process"]] if process not in self.processes: return # Get information from a single line of trace file info = dict((column, fields[pos]) for column, pos in hm.items()) # The headers that will be used to populate the process process_tag_headers = ["realtime", "rss", "rchar", "wchar"] for h in process_tag_headers: # In the rare occasion the tag is parsed first in the trace # file than the log file, add the new tag. if info["tag"] not in self.process_tags[process]: # If the 'start' tag is present in the trace, use that # information. If not, it will be parsed in the log file. try: timestart = info["start"].split()[1] except KeyError: timestart = None self.process_tags[process][info["tag"]] = { "workdir": self._expand_path(info["hash"]), "start": timestart } if h in info and info["tag"] != "-": if h != "realtime" and info[h] != "-": self.process_tags[process][info["tag"]][h] = \ round(self._size_coverter(info[h]), 2) else: self.process_tags[process][info["tag"]][h] = info[h] # Set allocated cpu and memory information to process if "cpus" in info and not self.processes[process]["cpus"]: self.processes[process]["cpus"] = info["cpus"] if "memory" in info and not self.processes[process]["memory"]: try: self.processes[process]["memory"] = self._size_coverter( info["memory"]) except ValueError: self.processes[process]["memory"] = None if info["hash"] in self.stored_ids: return # If the task hash code is provided, expand it to the work directory # and add a new entry if "hash" in info: hs = info["hash"] info["work_dir"] = self._expand_path(hs) if "tag" in info: tag = info["tag"] if tag != "-" and tag not in self.samples and \ tag.split()[0] not in self.samples: self.samples.append(tag) self.trace_info[process].append(info) self.stored_ids.append(info["hash"])
[ "def", "_update_trace_info", "(", "self", ",", "fields", ",", "hm", ")", ":", "process", "=", "fields", "[", "hm", "[", "\"process\"", "]", "]", "if", "process", "not", "in", "self", ".", "processes", ":", "return", "# Get information from a single line of trace file", "info", "=", "dict", "(", "(", "column", ",", "fields", "[", "pos", "]", ")", "for", "column", ",", "pos", "in", "hm", ".", "items", "(", ")", ")", "# The headers that will be used to populate the process", "process_tag_headers", "=", "[", "\"realtime\"", ",", "\"rss\"", ",", "\"rchar\"", ",", "\"wchar\"", "]", "for", "h", "in", "process_tag_headers", ":", "# In the rare occasion the tag is parsed first in the trace", "# file than the log file, add the new tag.", "if", "info", "[", "\"tag\"", "]", "not", "in", "self", ".", "process_tags", "[", "process", "]", ":", "# If the 'start' tag is present in the trace, use that", "# information. If not, it will be parsed in the log file.", "try", ":", "timestart", "=", "info", "[", "\"start\"", "]", ".", "split", "(", ")", "[", "1", "]", "except", "KeyError", ":", "timestart", "=", "None", "self", ".", "process_tags", "[", "process", "]", "[", "info", "[", "\"tag\"", "]", "]", "=", "{", "\"workdir\"", ":", "self", ".", "_expand_path", "(", "info", "[", "\"hash\"", "]", ")", ",", "\"start\"", ":", "timestart", "}", "if", "h", "in", "info", "and", "info", "[", "\"tag\"", "]", "!=", "\"-\"", ":", "if", "h", "!=", "\"realtime\"", "and", "info", "[", "h", "]", "!=", "\"-\"", ":", "self", ".", "process_tags", "[", "process", "]", "[", "info", "[", "\"tag\"", "]", "]", "[", "h", "]", "=", "round", "(", "self", ".", "_size_coverter", "(", "info", "[", "h", "]", ")", ",", "2", ")", "else", ":", "self", ".", "process_tags", "[", "process", "]", "[", "info", "[", "\"tag\"", "]", "]", "[", "h", "]", "=", "info", "[", "h", "]", "# Set allocated cpu and memory information to process", "if", "\"cpus\"", "in", "info", "and", "not", "self", ".", "processes", "[", "process", "]", "[", "\"cpus\"", "]", ":", "self", ".", "processes", "[", "process", "]", "[", "\"cpus\"", "]", "=", "info", "[", "\"cpus\"", "]", "if", "\"memory\"", "in", "info", "and", "not", "self", ".", "processes", "[", "process", "]", "[", "\"memory\"", "]", ":", "try", ":", "self", ".", "processes", "[", "process", "]", "[", "\"memory\"", "]", "=", "self", ".", "_size_coverter", "(", "info", "[", "\"memory\"", "]", ")", "except", "ValueError", ":", "self", ".", "processes", "[", "process", "]", "[", "\"memory\"", "]", "=", "None", "if", "info", "[", "\"hash\"", "]", "in", "self", ".", "stored_ids", ":", "return", "# If the task hash code is provided, expand it to the work directory", "# and add a new entry", "if", "\"hash\"", "in", "info", ":", "hs", "=", "info", "[", "\"hash\"", "]", "info", "[", "\"work_dir\"", "]", "=", "self", ".", "_expand_path", "(", "hs", ")", "if", "\"tag\"", "in", "info", ":", "tag", "=", "info", "[", "\"tag\"", "]", "if", "tag", "!=", "\"-\"", "and", "tag", "not", "in", "self", ".", "samples", "and", "tag", ".", "split", "(", ")", "[", "0", "]", "not", "in", "self", ".", "samples", ":", "self", ".", "samples", ".", "append", "(", "tag", ")", "self", ".", "trace_info", "[", "process", "]", ".", "append", "(", "info", ")", "self", ".", "stored_ids", ".", "append", "(", "info", "[", "\"hash\"", "]", ")" ]
Parses a trace line and updates the :attr:`status_info` attribute. Parameters ---------- fields : list List of the tab-seperated elements of the trace line hm : dict Maps the column IDs to their position in the fields argument. This dictionary object is retrieve from :func:`_header_mapping`.
[ "Parses", "a", "trace", "line", "and", "updates", "the", ":", "attr", ":", "status_info", "attribute", "." ]
python
test
facetoe/zenpy
zenpy/lib/api_objects/__init__.py
https://github.com/facetoe/zenpy/blob/34c54c7e408b9ed01604ddf8b3422204c8bf31ea/zenpy/lib/api_objects/__init__.py#L4678-L4683
def restricted_brands(self): """ | Comment: ids of all brands that this ticket form is restricted to """ if self.api and self.restricted_brand_ids: return self.api._get_restricted_brands(self.restricted_brand_ids)
[ "def", "restricted_brands", "(", "self", ")", ":", "if", "self", ".", "api", "and", "self", ".", "restricted_brand_ids", ":", "return", "self", ".", "api", ".", "_get_restricted_brands", "(", "self", ".", "restricted_brand_ids", ")" ]
| Comment: ids of all brands that this ticket form is restricted to
[ "|", "Comment", ":", "ids", "of", "all", "brands", "that", "this", "ticket", "form", "is", "restricted", "to" ]
python
train
crocs-muni/roca
roca/detect.py
https://github.com/crocs-muni/roca/blob/74ad6ce63c428d83dcffce9c5e26ef7b9e30faa5/roca/detect.py#L1473-L1506
def process_pgp(self, data, name): """ PGP key processing :param data: :param name: :return: """ ret = [] try: data = to_string(data) parts = re.split(r'-{5,}BEGIN', data) if len(parts) == 0: return if len(parts[0]) == 0: parts.pop(0) crt_arr = ['-----BEGIN' + x for x in parts] for idx, pem_rec in enumerate(crt_arr): try: pem_rec = pem_rec.strip() if len(pem_rec) == 0: continue ret.append(self.process_pgp_raw(pem_rec.encode(), name, idx)) except Exception as e: logger.error('Exception in processing PGP rec file %s: %s' % (name, e)) self.trace_logger.log(e) except Exception as e: logger.error('Exception in processing PGP file %s: %s' % (name, e)) self.trace_logger.log(e) return ret
[ "def", "process_pgp", "(", "self", ",", "data", ",", "name", ")", ":", "ret", "=", "[", "]", "try", ":", "data", "=", "to_string", "(", "data", ")", "parts", "=", "re", ".", "split", "(", "r'-{5,}BEGIN'", ",", "data", ")", "if", "len", "(", "parts", ")", "==", "0", ":", "return", "if", "len", "(", "parts", "[", "0", "]", ")", "==", "0", ":", "parts", ".", "pop", "(", "0", ")", "crt_arr", "=", "[", "'-----BEGIN'", "+", "x", "for", "x", "in", "parts", "]", "for", "idx", ",", "pem_rec", "in", "enumerate", "(", "crt_arr", ")", ":", "try", ":", "pem_rec", "=", "pem_rec", ".", "strip", "(", ")", "if", "len", "(", "pem_rec", ")", "==", "0", ":", "continue", "ret", ".", "append", "(", "self", ".", "process_pgp_raw", "(", "pem_rec", ".", "encode", "(", ")", ",", "name", ",", "idx", ")", ")", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "'Exception in processing PGP rec file %s: %s'", "%", "(", "name", ",", "e", ")", ")", "self", ".", "trace_logger", ".", "log", "(", "e", ")", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "'Exception in processing PGP file %s: %s'", "%", "(", "name", ",", "e", ")", ")", "self", ".", "trace_logger", ".", "log", "(", "e", ")", "return", "ret" ]
PGP key processing :param data: :param name: :return:
[ "PGP", "key", "processing", ":", "param", "data", ":", ":", "param", "name", ":", ":", "return", ":" ]
python
train
jjgomera/iapws
iapws/iapws97.py
https://github.com/jjgomera/iapws/blob/1e5812aab38212fb8a63736f61cdcfa427d223b1/iapws/iapws97.py#L106-L153
def _h13_s(s): """Define the boundary between Region 1 and 3, h=f(s) Parameters ---------- s : float Specific entropy, [kJ/kgK] Returns ------- h : float Specific enthalpy, [kJ/kg] Notes ------ Raise :class:`NotImplementedError` if input isn't in limit: * s(100MPa,623.15K) ≤ s ≤ s'(623.15K) References ---------- IAPWS, Revised Supplementary Release on Backward Equations p(h,s) for Region 3, Equations as a Function of h and s for the Region Boundaries, and an Equation Tsat(h,s) for Region 4 of the IAPWS Industrial Formulation 1997 for the Thermodynamic Properties of Water and Steam, http://www.iapws.org/relguide/Supp-phs3-2014.pdf. Eq 7 Examples -------- >>> _h13_s(3.7) 1632.525047 >>> _h13_s(3.5) 1566.104611 """ # Check input parameters if s < 3.397782955 or s > 3.77828134: raise NotImplementedError("Incoming out of bound") sigma = s/3.8 I = [0, 1, 1, 3, 5, 6] J = [0, -2, 2, -12, -4, -3] n = [0.913965547600543, -0.430944856041991e-4, 0.603235694765419e2, 0.117518273082168e-17, 0.220000904781292, -0.690815545851641e2] suma = 0 for i, j, ni in zip(I, J, n): suma += ni * (sigma-0.884)**i * (sigma-0.864)**j return 1700 * suma
[ "def", "_h13_s", "(", "s", ")", ":", "# Check input parameters", "if", "s", "<", "3.397782955", "or", "s", ">", "3.77828134", ":", "raise", "NotImplementedError", "(", "\"Incoming out of bound\"", ")", "sigma", "=", "s", "/", "3.8", "I", "=", "[", "0", ",", "1", ",", "1", ",", "3", ",", "5", ",", "6", "]", "J", "=", "[", "0", ",", "-", "2", ",", "2", ",", "-", "12", ",", "-", "4", ",", "-", "3", "]", "n", "=", "[", "0.913965547600543", ",", "-", "0.430944856041991e-4", ",", "0.603235694765419e2", ",", "0.117518273082168e-17", ",", "0.220000904781292", ",", "-", "0.690815545851641e2", "]", "suma", "=", "0", "for", "i", ",", "j", ",", "ni", "in", "zip", "(", "I", ",", "J", ",", "n", ")", ":", "suma", "+=", "ni", "*", "(", "sigma", "-", "0.884", ")", "**", "i", "*", "(", "sigma", "-", "0.864", ")", "**", "j", "return", "1700", "*", "suma" ]
Define the boundary between Region 1 and 3, h=f(s) Parameters ---------- s : float Specific entropy, [kJ/kgK] Returns ------- h : float Specific enthalpy, [kJ/kg] Notes ------ Raise :class:`NotImplementedError` if input isn't in limit: * s(100MPa,623.15K) ≤ s ≤ s'(623.15K) References ---------- IAPWS, Revised Supplementary Release on Backward Equations p(h,s) for Region 3, Equations as a Function of h and s for the Region Boundaries, and an Equation Tsat(h,s) for Region 4 of the IAPWS Industrial Formulation 1997 for the Thermodynamic Properties of Water and Steam, http://www.iapws.org/relguide/Supp-phs3-2014.pdf. Eq 7 Examples -------- >>> _h13_s(3.7) 1632.525047 >>> _h13_s(3.5) 1566.104611
[ "Define", "the", "boundary", "between", "Region", "1", "and", "3", "h", "=", "f", "(", "s", ")" ]
python
train
Duke-GCB/DukeDSClient
ddsc/core/fileuploader.py
https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/fileuploader.py#L215-L221
def _show_retry_warning(host): """ Displays a message on stderr that we lost connection to a host and will retry. :param host: str: name of the host we are trying to communicate with """ sys.stderr.write("\nConnection to {} failed. Retrying.\n".format(host)) sys.stderr.flush()
[ "def", "_show_retry_warning", "(", "host", ")", ":", "sys", ".", "stderr", ".", "write", "(", "\"\\nConnection to {} failed. Retrying.\\n\"", ".", "format", "(", "host", ")", ")", "sys", ".", "stderr", ".", "flush", "(", ")" ]
Displays a message on stderr that we lost connection to a host and will retry. :param host: str: name of the host we are trying to communicate with
[ "Displays", "a", "message", "on", "stderr", "that", "we", "lost", "connection", "to", "a", "host", "and", "will", "retry", ".", ":", "param", "host", ":", "str", ":", "name", "of", "the", "host", "we", "are", "trying", "to", "communicate", "with" ]
python
train
wummel/patool
patoolib/programs/py_tarfile.py
https://github.com/wummel/patool/blob/d7e64d9fd60faaa4b3f824bd97c43ce59b185c40/patoolib/programs/py_tarfile.py#L46-L56
def create_tar (archive, compression, cmd, verbosity, interactive, filenames): """Create a TAR archive with the tarfile Python module.""" mode = get_tar_mode(compression) try: with tarfile.open(archive, mode) as tfile: for filename in filenames: tfile.add(filename) except Exception as err: msg = "error creating %s: %s" % (archive, err) raise util.PatoolError(msg) return None
[ "def", "create_tar", "(", "archive", ",", "compression", ",", "cmd", ",", "verbosity", ",", "interactive", ",", "filenames", ")", ":", "mode", "=", "get_tar_mode", "(", "compression", ")", "try", ":", "with", "tarfile", ".", "open", "(", "archive", ",", "mode", ")", "as", "tfile", ":", "for", "filename", "in", "filenames", ":", "tfile", ".", "add", "(", "filename", ")", "except", "Exception", "as", "err", ":", "msg", "=", "\"error creating %s: %s\"", "%", "(", "archive", ",", "err", ")", "raise", "util", ".", "PatoolError", "(", "msg", ")", "return", "None" ]
Create a TAR archive with the tarfile Python module.
[ "Create", "a", "TAR", "archive", "with", "the", "tarfile", "Python", "module", "." ]
python
train
twisted/txacme
src/txacme/util.py
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/util.py#L20-L29
def generate_private_key(key_type): """ Generate a random private key using sensible parameters. :param str key_type: The type of key to generate. One of: ``rsa``. """ if key_type == u'rsa': return rsa.generate_private_key( public_exponent=65537, key_size=2048, backend=default_backend()) raise ValueError(key_type)
[ "def", "generate_private_key", "(", "key_type", ")", ":", "if", "key_type", "==", "u'rsa'", ":", "return", "rsa", ".", "generate_private_key", "(", "public_exponent", "=", "65537", ",", "key_size", "=", "2048", ",", "backend", "=", "default_backend", "(", ")", ")", "raise", "ValueError", "(", "key_type", ")" ]
Generate a random private key using sensible parameters. :param str key_type: The type of key to generate. One of: ``rsa``.
[ "Generate", "a", "random", "private", "key", "using", "sensible", "parameters", "." ]
python
train
Clinical-Genomics/scout
scout/adapter/mongo/hgnc.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/hgnc.py#L302-L320
def gene_by_alias(self, symbol, build='37'): """Return a iterable with hgnc_genes. If the gene symbol is listed as primary the iterable will only have one result. If not the iterable will include all hgnc genes that have the symbol as an alias. Args: symbol(str) build(str) Returns: res(pymongo.Cursor(dict)) """ res = self.hgnc_collection.find({'hgnc_symbol': symbol, 'build':build}) if res.count() == 0: res = self.hgnc_collection.find({'aliases': symbol, 'build':build}) return res
[ "def", "gene_by_alias", "(", "self", ",", "symbol", ",", "build", "=", "'37'", ")", ":", "res", "=", "self", ".", "hgnc_collection", ".", "find", "(", "{", "'hgnc_symbol'", ":", "symbol", ",", "'build'", ":", "build", "}", ")", "if", "res", ".", "count", "(", ")", "==", "0", ":", "res", "=", "self", ".", "hgnc_collection", ".", "find", "(", "{", "'aliases'", ":", "symbol", ",", "'build'", ":", "build", "}", ")", "return", "res" ]
Return a iterable with hgnc_genes. If the gene symbol is listed as primary the iterable will only have one result. If not the iterable will include all hgnc genes that have the symbol as an alias. Args: symbol(str) build(str) Returns: res(pymongo.Cursor(dict))
[ "Return", "a", "iterable", "with", "hgnc_genes", "." ]
python
test
katerina7479/pypdflite
pypdflite/pdfdocument.py
https://github.com/katerina7479/pypdflite/blob/ac2501f30d6619eae9dea5644717575ca9263d0a/pypdflite/pdfdocument.py#L703-L712
def _get_orientation_changes(self): """ Returns a list of the pages that have orientation changes.""" self.orientation_changes = [] for page in self.pages: if page.orientation_change is True: self.orientation_changes.append(page.index) else: pass return self.orientation_changes
[ "def", "_get_orientation_changes", "(", "self", ")", ":", "self", ".", "orientation_changes", "=", "[", "]", "for", "page", "in", "self", ".", "pages", ":", "if", "page", ".", "orientation_change", "is", "True", ":", "self", ".", "orientation_changes", ".", "append", "(", "page", ".", "index", ")", "else", ":", "pass", "return", "self", ".", "orientation_changes" ]
Returns a list of the pages that have orientation changes.
[ "Returns", "a", "list", "of", "the", "pages", "that", "have", "orientation", "changes", "." ]
python
test
Kronuz/pyScss
scss/ast.py
https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/scss/ast.py#L471-L493
def evaluate_call_args(self, calculator): """Interpreting this literal as a function call, return a 2-tuple of ``(args, kwargs)``. """ args = [] kwargs = OrderedDict() # Sass kwargs preserve order for var_node, value_node in self.argpairs: value = value_node.evaluate(calculator, divide=True) if var_node is None: # Positional args.append(value) else: # Named if not isinstance(var_node, Variable): raise TypeError( "Expected variable name, got {0!r}".format(var_node)) kwargs[var_node.name] = value # Slurpy arguments go on the end of the args if self.slurp: args.extend(self.slurp.evaluate(calculator, divide=True)) return args, kwargs
[ "def", "evaluate_call_args", "(", "self", ",", "calculator", ")", ":", "args", "=", "[", "]", "kwargs", "=", "OrderedDict", "(", ")", "# Sass kwargs preserve order", "for", "var_node", ",", "value_node", "in", "self", ".", "argpairs", ":", "value", "=", "value_node", ".", "evaluate", "(", "calculator", ",", "divide", "=", "True", ")", "if", "var_node", "is", "None", ":", "# Positional", "args", ".", "append", "(", "value", ")", "else", ":", "# Named", "if", "not", "isinstance", "(", "var_node", ",", "Variable", ")", ":", "raise", "TypeError", "(", "\"Expected variable name, got {0!r}\"", ".", "format", "(", "var_node", ")", ")", "kwargs", "[", "var_node", ".", "name", "]", "=", "value", "# Slurpy arguments go on the end of the args", "if", "self", ".", "slurp", ":", "args", ".", "extend", "(", "self", ".", "slurp", ".", "evaluate", "(", "calculator", ",", "divide", "=", "True", ")", ")", "return", "args", ",", "kwargs" ]
Interpreting this literal as a function call, return a 2-tuple of ``(args, kwargs)``.
[ "Interpreting", "this", "literal", "as", "a", "function", "call", "return", "a", "2", "-", "tuple", "of", "(", "args", "kwargs", ")", "." ]
python
train
Erotemic/utool
utool/util_inspect.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_inspect.py#L3030-L3069
def argparse_funckw(func, defaults={}, **kwargs): """ allows kwargs to be specified on the commandline from testfuncs Args: func (function): Kwargs: lbl, verbose, only_specified, force_keys, type_hint, alias_dict Returns: dict: funckw CommandLine: python -m utool.util_inspect argparse_funckw SeeAlso: exec_funckw recursive_parse_kwargs parse_kwarg_keys Example: >>> # ENABLE_DOCTEST >>> from utool.util_inspect import * # NOQA >>> import utool as ut >>> func = get_instance_attrnames >>> funckw = argparse_funckw(func) >>> result = ('funckw = %s' % (ut.repr3(funckw),)) >>> print(result) funckw = { 'default': True, 'with_methods': True, 'with_properties': True, } """ import utool as ut funckw_ = ut.get_funckw(func, recursive=True) funckw_.update(defaults) funckw = ut.argparse_dict(funckw_, **kwargs) return funckw
[ "def", "argparse_funckw", "(", "func", ",", "defaults", "=", "{", "}", ",", "*", "*", "kwargs", ")", ":", "import", "utool", "as", "ut", "funckw_", "=", "ut", ".", "get_funckw", "(", "func", ",", "recursive", "=", "True", ")", "funckw_", ".", "update", "(", "defaults", ")", "funckw", "=", "ut", ".", "argparse_dict", "(", "funckw_", ",", "*", "*", "kwargs", ")", "return", "funckw" ]
allows kwargs to be specified on the commandline from testfuncs Args: func (function): Kwargs: lbl, verbose, only_specified, force_keys, type_hint, alias_dict Returns: dict: funckw CommandLine: python -m utool.util_inspect argparse_funckw SeeAlso: exec_funckw recursive_parse_kwargs parse_kwarg_keys Example: >>> # ENABLE_DOCTEST >>> from utool.util_inspect import * # NOQA >>> import utool as ut >>> func = get_instance_attrnames >>> funckw = argparse_funckw(func) >>> result = ('funckw = %s' % (ut.repr3(funckw),)) >>> print(result) funckw = { 'default': True, 'with_methods': True, 'with_properties': True, }
[ "allows", "kwargs", "to", "be", "specified", "on", "the", "commandline", "from", "testfuncs" ]
python
train
qiniu/python-sdk
qiniu/services/compute/qcos_api.py
https://github.com/qiniu/python-sdk/blob/a69fbef4e3e6ea1ebe09f4610a5b18bb2c17de59/qiniu/services/compute/qcos_api.py#L671-L685
def get_web_proxy(self, backend): """获取一次性代理地址 对内网地址获取一个一次性的外部可访问的代理地址 Args: - backend: 后端地址,如:"10.128.0.1:8080" Returns: 返回一个tuple对象,其格式为(<result>, <ResponseInfo>) - result 成功返回代理地址信息,失败返回{"error": "<errMsg string>"} - ResponseInfo 请求的Response信息 """ url = '{0}/v3/webproxy'.format(self.host) return self.__post(url, {'backend': backend})
[ "def", "get_web_proxy", "(", "self", ",", "backend", ")", ":", "url", "=", "'{0}/v3/webproxy'", ".", "format", "(", "self", ".", "host", ")", "return", "self", ".", "__post", "(", "url", ",", "{", "'backend'", ":", "backend", "}", ")" ]
获取一次性代理地址 对内网地址获取一个一次性的外部可访问的代理地址 Args: - backend: 后端地址,如:"10.128.0.1:8080" Returns: 返回一个tuple对象,其格式为(<result>, <ResponseInfo>) - result 成功返回代理地址信息,失败返回{"error": "<errMsg string>"} - ResponseInfo 请求的Response信息
[ "获取一次性代理地址" ]
python
train
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L4760-L4768
def setOverlayAutoCurveDistanceRangeInMeters(self, ulOverlayHandle, fMinDistanceInMeters, fMaxDistanceInMeters): """ For high-quality curved overlays only, sets the distance range in meters from the overlay used to automatically curve the surface around the viewer. Min is distance is when the surface will be most curved. Max is when least curved. """ fn = self.function_table.setOverlayAutoCurveDistanceRangeInMeters result = fn(ulOverlayHandle, fMinDistanceInMeters, fMaxDistanceInMeters) return result
[ "def", "setOverlayAutoCurveDistanceRangeInMeters", "(", "self", ",", "ulOverlayHandle", ",", "fMinDistanceInMeters", ",", "fMaxDistanceInMeters", ")", ":", "fn", "=", "self", ".", "function_table", ".", "setOverlayAutoCurveDistanceRangeInMeters", "result", "=", "fn", "(", "ulOverlayHandle", ",", "fMinDistanceInMeters", ",", "fMaxDistanceInMeters", ")", "return", "result" ]
For high-quality curved overlays only, sets the distance range in meters from the overlay used to automatically curve the surface around the viewer. Min is distance is when the surface will be most curved. Max is when least curved.
[ "For", "high", "-", "quality", "curved", "overlays", "only", "sets", "the", "distance", "range", "in", "meters", "from", "the", "overlay", "used", "to", "automatically", "curve", "the", "surface", "around", "the", "viewer", ".", "Min", "is", "distance", "is", "when", "the", "surface", "will", "be", "most", "curved", ".", "Max", "is", "when", "least", "curved", "." ]
python
train
croscon/fleaker
fleaker/peewee/fields/arrow.py
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/peewee/fields/arrow.py#L68-L80
def python_value(self, value): """Return the value in the data base as an arrow object. Returns: arrow.Arrow: An instance of arrow with the field filled in. """ value = super(ArrowDateTimeField, self).python_value(value) if (isinstance(value, (datetime.datetime, datetime.date, string_types))): return arrow.get(value) return value
[ "def", "python_value", "(", "self", ",", "value", ")", ":", "value", "=", "super", "(", "ArrowDateTimeField", ",", "self", ")", ".", "python_value", "(", "value", ")", "if", "(", "isinstance", "(", "value", ",", "(", "datetime", ".", "datetime", ",", "datetime", ".", "date", ",", "string_types", ")", ")", ")", ":", "return", "arrow", ".", "get", "(", "value", ")", "return", "value" ]
Return the value in the data base as an arrow object. Returns: arrow.Arrow: An instance of arrow with the field filled in.
[ "Return", "the", "value", "in", "the", "data", "base", "as", "an", "arrow", "object", "." ]
python
train
apache/incubator-heron
heron/statemgrs/src/python/zkstatemanager.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/statemgrs/src/python/zkstatemanager.py#L140-L159
def get_topology(self, topologyName, callback=None): """ get topologies """ isWatching = False # Temp dict used to return result # if callback is not provided. ret = { "result": None } if callback: isWatching = True else: def callback(data): """Custom callback to get the topologies right now.""" ret["result"] = data self._get_topology_with_watch(topologyName, callback, isWatching) # The topologies are now populated with the data. return ret["result"]
[ "def", "get_topology", "(", "self", ",", "topologyName", ",", "callback", "=", "None", ")", ":", "isWatching", "=", "False", "# Temp dict used to return result", "# if callback is not provided.", "ret", "=", "{", "\"result\"", ":", "None", "}", "if", "callback", ":", "isWatching", "=", "True", "else", ":", "def", "callback", "(", "data", ")", ":", "\"\"\"Custom callback to get the topologies right now.\"\"\"", "ret", "[", "\"result\"", "]", "=", "data", "self", ".", "_get_topology_with_watch", "(", "topologyName", ",", "callback", ",", "isWatching", ")", "# The topologies are now populated with the data.", "return", "ret", "[", "\"result\"", "]" ]
get topologies
[ "get", "topologies" ]
python
valid
daethnir/authprogs
authprogs/authprogs.py
https://github.com/daethnir/authprogs/blob/0b1e13a609ebeabdb0f10d11fc5dc6e0b20c0343/authprogs/authprogs.py#L138-L152
def get_client_ip(self): """Return the client IP from the environment.""" if self.client_ip: return self.client_ip try: client = os.environ.get('SSH_CONNECTION', os.environ.get('SSH_CLIENT')) self.client_ip = client.split()[0] self.logdebug('client_ip: %s\n' % self.client_ip) return self.client_ip except: raise SSHEnvironmentError('cannot identify the ssh client ' 'IP address')
[ "def", "get_client_ip", "(", "self", ")", ":", "if", "self", ".", "client_ip", ":", "return", "self", ".", "client_ip", "try", ":", "client", "=", "os", ".", "environ", ".", "get", "(", "'SSH_CONNECTION'", ",", "os", ".", "environ", ".", "get", "(", "'SSH_CLIENT'", ")", ")", "self", ".", "client_ip", "=", "client", ".", "split", "(", ")", "[", "0", "]", "self", ".", "logdebug", "(", "'client_ip: %s\\n'", "%", "self", ".", "client_ip", ")", "return", "self", ".", "client_ip", "except", ":", "raise", "SSHEnvironmentError", "(", "'cannot identify the ssh client '", "'IP address'", ")" ]
Return the client IP from the environment.
[ "Return", "the", "client", "IP", "from", "the", "environment", "." ]
python
train
FocusLab/Albertson
albertson/base.py
https://github.com/FocusLab/Albertson/blob/a42f9873559df9188c40c34fdffb079d78eaa3fe/albertson/base.py#L55-L63
def get_conn(self, aws_access_key=None, aws_secret_key=None): ''' Hook point for overriding how the CounterPool gets its connection to AWS. ''' return boto.connect_dynamodb( aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key, )
[ "def", "get_conn", "(", "self", ",", "aws_access_key", "=", "None", ",", "aws_secret_key", "=", "None", ")", ":", "return", "boto", ".", "connect_dynamodb", "(", "aws_access_key_id", "=", "aws_access_key", ",", "aws_secret_access_key", "=", "aws_secret_key", ",", ")" ]
Hook point for overriding how the CounterPool gets its connection to AWS.
[ "Hook", "point", "for", "overriding", "how", "the", "CounterPool", "gets", "its", "connection", "to", "AWS", "." ]
python
valid
frawau/aiolifx
aiolifx/aiolifx.py
https://github.com/frawau/aiolifx/blob/9bd8c5e6d291f4c79314989402f7e2c6476d5851/aiolifx/aiolifx.py#L1168-L1222
def datagram_received(self, data, addr): """Method run when data is received from the devices This method will unpack the data according to the LIFX protocol. If a new device is found, the Light device will be created and started aa a DatagramProtocol and will be registered with the parent. :param data: raw data :type data: bytestring :param addr: sender IP address 2-tuple for IPv4, 4-tuple for IPv6 :type addr: tuple """ response = unpack_lifx_message(data) response.ip_addr = addr[0] mac_addr = response.target_addr if mac_addr == BROADCAST_MAC: return if type(response) == StateService and response.service == 1: # only look for UDP services # discovered remote_port = response.port elif type(response) == LightState: # looks like the lights are volunteering LigthState after booting remote_port = UDP_BROADCAST_PORT else: return if self.ipv6prefix: family = socket.AF_INET6 remote_ip = mac_to_ipv6_linklocal(mac_addr, self.ipv6prefix) else: family = socket.AF_INET remote_ip = response.ip_addr if mac_addr in self.lights: # rediscovered light = self.lights[mac_addr] # nothing to do if light.registered: return light.cleanup() light.ip_addr = remote_ip light.port = remote_port else: # newly discovered light = Light(self.loop, mac_addr, remote_ip, remote_port, parent=self) self.lights[mac_addr] = light coro = self.loop.create_datagram_endpoint( lambda: light, family=family, remote_addr=(remote_ip, remote_port)) light.task = self.loop.create_task(coro)
[ "def", "datagram_received", "(", "self", ",", "data", ",", "addr", ")", ":", "response", "=", "unpack_lifx_message", "(", "data", ")", "response", ".", "ip_addr", "=", "addr", "[", "0", "]", "mac_addr", "=", "response", ".", "target_addr", "if", "mac_addr", "==", "BROADCAST_MAC", ":", "return", "if", "type", "(", "response", ")", "==", "StateService", "and", "response", ".", "service", "==", "1", ":", "# only look for UDP services", "# discovered", "remote_port", "=", "response", ".", "port", "elif", "type", "(", "response", ")", "==", "LightState", ":", "# looks like the lights are volunteering LigthState after booting", "remote_port", "=", "UDP_BROADCAST_PORT", "else", ":", "return", "if", "self", ".", "ipv6prefix", ":", "family", "=", "socket", ".", "AF_INET6", "remote_ip", "=", "mac_to_ipv6_linklocal", "(", "mac_addr", ",", "self", ".", "ipv6prefix", ")", "else", ":", "family", "=", "socket", ".", "AF_INET", "remote_ip", "=", "response", ".", "ip_addr", "if", "mac_addr", "in", "self", ".", "lights", ":", "# rediscovered", "light", "=", "self", ".", "lights", "[", "mac_addr", "]", "# nothing to do", "if", "light", ".", "registered", ":", "return", "light", ".", "cleanup", "(", ")", "light", ".", "ip_addr", "=", "remote_ip", "light", ".", "port", "=", "remote_port", "else", ":", "# newly discovered", "light", "=", "Light", "(", "self", ".", "loop", ",", "mac_addr", ",", "remote_ip", ",", "remote_port", ",", "parent", "=", "self", ")", "self", ".", "lights", "[", "mac_addr", "]", "=", "light", "coro", "=", "self", ".", "loop", ".", "create_datagram_endpoint", "(", "lambda", ":", "light", ",", "family", "=", "family", ",", "remote_addr", "=", "(", "remote_ip", ",", "remote_port", ")", ")", "light", ".", "task", "=", "self", ".", "loop", ".", "create_task", "(", "coro", ")" ]
Method run when data is received from the devices This method will unpack the data according to the LIFX protocol. If a new device is found, the Light device will be created and started aa a DatagramProtocol and will be registered with the parent. :param data: raw data :type data: bytestring :param addr: sender IP address 2-tuple for IPv4, 4-tuple for IPv6 :type addr: tuple
[ "Method", "run", "when", "data", "is", "received", "from", "the", "devices" ]
python
train
angr/angr
angr/state_plugins/abstract_memory.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/abstract_memory.py#L168-L177
def dbg_print(self, indent=0): """ Print out debugging information """ print("%sA-locs:" % (" " * indent)) for aloc_id, aloc in self._alocs.items(): print("%s<0x%x> %s" % (" " * (indent + 2), aloc_id, aloc)) print("%sMemory:" % (" " * indent)) self.memory.dbg_print(indent=indent + 2)
[ "def", "dbg_print", "(", "self", ",", "indent", "=", "0", ")", ":", "print", "(", "\"%sA-locs:\"", "%", "(", "\" \"", "*", "indent", ")", ")", "for", "aloc_id", ",", "aloc", "in", "self", ".", "_alocs", ".", "items", "(", ")", ":", "print", "(", "\"%s<0x%x> %s\"", "%", "(", "\" \"", "*", "(", "indent", "+", "2", ")", ",", "aloc_id", ",", "aloc", ")", ")", "print", "(", "\"%sMemory:\"", "%", "(", "\" \"", "*", "indent", ")", ")", "self", ".", "memory", ".", "dbg_print", "(", "indent", "=", "indent", "+", "2", ")" ]
Print out debugging information
[ "Print", "out", "debugging", "information" ]
python
train
facelessuser/pyspelling
pyspelling/__init__.py
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/__init__.py#L260-L268
def _to_flags(self, text): """Convert text representation of flags to actual flags.""" flags = 0 for x in text.split('|'): value = x.strip().upper() if value: flags |= self.GLOB_FLAG_MAP.get(value, 0) return flags
[ "def", "_to_flags", "(", "self", ",", "text", ")", ":", "flags", "=", "0", "for", "x", "in", "text", ".", "split", "(", "'|'", ")", ":", "value", "=", "x", ".", "strip", "(", ")", ".", "upper", "(", ")", "if", "value", ":", "flags", "|=", "self", ".", "GLOB_FLAG_MAP", ".", "get", "(", "value", ",", "0", ")", "return", "flags" ]
Convert text representation of flags to actual flags.
[ "Convert", "text", "representation", "of", "flags", "to", "actual", "flags", "." ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Action.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Action.py#L628-L642
def print_cmd_line(self, s, target, source, env): """ In python 3, and in some of our tests, sys.stdout is a String io object, and it takes unicode strings only In other cases it's a regular Python 2.x file object which takes strings (bytes), and if you pass those a unicode object they try to decode with 'ascii' codec which fails if the cmd line has any hi-bit-set chars. This code assumes s is a regular string, but should work if it's unicode too. """ try: sys.stdout.write(s + u"\n") except UnicodeDecodeError: sys.stdout.write(s + "\n")
[ "def", "print_cmd_line", "(", "self", ",", "s", ",", "target", ",", "source", ",", "env", ")", ":", "try", ":", "sys", ".", "stdout", ".", "write", "(", "s", "+", "u\"\\n\"", ")", "except", "UnicodeDecodeError", ":", "sys", ".", "stdout", ".", "write", "(", "s", "+", "\"\\n\"", ")" ]
In python 3, and in some of our tests, sys.stdout is a String io object, and it takes unicode strings only In other cases it's a regular Python 2.x file object which takes strings (bytes), and if you pass those a unicode object they try to decode with 'ascii' codec which fails if the cmd line has any hi-bit-set chars. This code assumes s is a regular string, but should work if it's unicode too.
[ "In", "python", "3", "and", "in", "some", "of", "our", "tests", "sys", ".", "stdout", "is", "a", "String", "io", "object", "and", "it", "takes", "unicode", "strings", "only", "In", "other", "cases", "it", "s", "a", "regular", "Python", "2", ".", "x", "file", "object", "which", "takes", "strings", "(", "bytes", ")", "and", "if", "you", "pass", "those", "a", "unicode", "object", "they", "try", "to", "decode", "with", "ascii", "codec", "which", "fails", "if", "the", "cmd", "line", "has", "any", "hi", "-", "bit", "-", "set", "chars", ".", "This", "code", "assumes", "s", "is", "a", "regular", "string", "but", "should", "work", "if", "it", "s", "unicode", "too", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/controllers/state_icons.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/state_icons.py#L104-L115
def _get_state(self): """get state instance which was clicked on :return: State that represents the icon which was clicked on :rtype: rafcon.core.states.State """ selected = self.view.get_selected_items() if not selected: return shorthand, state_class = self.view.states[selected[0][0]] return state_class()
[ "def", "_get_state", "(", "self", ")", ":", "selected", "=", "self", ".", "view", ".", "get_selected_items", "(", ")", "if", "not", "selected", ":", "return", "shorthand", ",", "state_class", "=", "self", ".", "view", ".", "states", "[", "selected", "[", "0", "]", "[", "0", "]", "]", "return", "state_class", "(", ")" ]
get state instance which was clicked on :return: State that represents the icon which was clicked on :rtype: rafcon.core.states.State
[ "get", "state", "instance", "which", "was", "clicked", "on" ]
python
train
onelogin/python3-saml
src/onelogin/saml2/auth.py
https://github.com/onelogin/python3-saml/blob/064b7275fba1e5f39a9116ba1cdcc5d01fc34daa/src/onelogin/saml2/auth.py#L494-L531
def __build_signature(self, data, saml_type, sign_algorithm=OneLogin_Saml2_Constants.RSA_SHA1): """ Builds the Signature :param data: The Request data :type data: dict :param saml_type: The target URL the user should be redirected to :type saml_type: string SAMLRequest | SAMLResponse :param sign_algorithm: Signature algorithm method :type sign_algorithm: string """ assert saml_type in ('SAMLRequest', 'SAMLResponse') key = self.get_settings().get_sp_key() if not key: raise OneLogin_Saml2_Error( "Trying to sign the %s but can't load the SP private key." % saml_type, OneLogin_Saml2_Error.PRIVATE_KEY_NOT_FOUND ) msg = self.__build_sign_query(data[saml_type], data.get('RelayState', None), sign_algorithm, saml_type) sign_algorithm_transform_map = { OneLogin_Saml2_Constants.DSA_SHA1: xmlsec.Transform.DSA_SHA1, OneLogin_Saml2_Constants.RSA_SHA1: xmlsec.Transform.RSA_SHA1, OneLogin_Saml2_Constants.RSA_SHA256: xmlsec.Transform.RSA_SHA256, OneLogin_Saml2_Constants.RSA_SHA384: xmlsec.Transform.RSA_SHA384, OneLogin_Saml2_Constants.RSA_SHA512: xmlsec.Transform.RSA_SHA512 } sign_algorithm_transform = sign_algorithm_transform_map.get(sign_algorithm, xmlsec.Transform.RSA_SHA1) signature = OneLogin_Saml2_Utils.sign_binary(msg, key, sign_algorithm_transform, self.__settings.is_debug_active()) data['Signature'] = OneLogin_Saml2_Utils.b64encode(signature) data['SigAlg'] = sign_algorithm
[ "def", "__build_signature", "(", "self", ",", "data", ",", "saml_type", ",", "sign_algorithm", "=", "OneLogin_Saml2_Constants", ".", "RSA_SHA1", ")", ":", "assert", "saml_type", "in", "(", "'SAMLRequest'", ",", "'SAMLResponse'", ")", "key", "=", "self", ".", "get_settings", "(", ")", ".", "get_sp_key", "(", ")", "if", "not", "key", ":", "raise", "OneLogin_Saml2_Error", "(", "\"Trying to sign the %s but can't load the SP private key.\"", "%", "saml_type", ",", "OneLogin_Saml2_Error", ".", "PRIVATE_KEY_NOT_FOUND", ")", "msg", "=", "self", ".", "__build_sign_query", "(", "data", "[", "saml_type", "]", ",", "data", ".", "get", "(", "'RelayState'", ",", "None", ")", ",", "sign_algorithm", ",", "saml_type", ")", "sign_algorithm_transform_map", "=", "{", "OneLogin_Saml2_Constants", ".", "DSA_SHA1", ":", "xmlsec", ".", "Transform", ".", "DSA_SHA1", ",", "OneLogin_Saml2_Constants", ".", "RSA_SHA1", ":", "xmlsec", ".", "Transform", ".", "RSA_SHA1", ",", "OneLogin_Saml2_Constants", ".", "RSA_SHA256", ":", "xmlsec", ".", "Transform", ".", "RSA_SHA256", ",", "OneLogin_Saml2_Constants", ".", "RSA_SHA384", ":", "xmlsec", ".", "Transform", ".", "RSA_SHA384", ",", "OneLogin_Saml2_Constants", ".", "RSA_SHA512", ":", "xmlsec", ".", "Transform", ".", "RSA_SHA512", "}", "sign_algorithm_transform", "=", "sign_algorithm_transform_map", ".", "get", "(", "sign_algorithm", ",", "xmlsec", ".", "Transform", ".", "RSA_SHA1", ")", "signature", "=", "OneLogin_Saml2_Utils", ".", "sign_binary", "(", "msg", ",", "key", ",", "sign_algorithm_transform", ",", "self", ".", "__settings", ".", "is_debug_active", "(", ")", ")", "data", "[", "'Signature'", "]", "=", "OneLogin_Saml2_Utils", ".", "b64encode", "(", "signature", ")", "data", "[", "'SigAlg'", "]", "=", "sign_algorithm" ]
Builds the Signature :param data: The Request data :type data: dict :param saml_type: The target URL the user should be redirected to :type saml_type: string SAMLRequest | SAMLResponse :param sign_algorithm: Signature algorithm method :type sign_algorithm: string
[ "Builds", "the", "Signature", ":", "param", "data", ":", "The", "Request", "data", ":", "type", "data", ":", "dict" ]
python
train
wtsi-hgi/python-baton-wrapper
baton/_baton/baton_access_control_mappers.py
https://github.com/wtsi-hgi/python-baton-wrapper/blob/ae0c9e3630e2c4729a0614cc86f493688436b0b7/baton/_baton/baton_access_control_mappers.py#L208-L232
def _hijacked_run_baton_query( self, baton_binary: BatonBinary, program_arguments: List[str]=None, input_data: Any=None) -> List[Dict]: """ Hijacked `run_baton_query` method with hijacking to add the `--recursive` flag to calls to `baton-chmod` that originate from code called from frames with the ids in `self._hijack_frame_ids`. :param baton_binary: see `BatonRunner.run_baton_query` :param program_arguments: see `BatonRunner.run_baton_query` :param input_data: see `BatonRunner.run_baton_query` :return: see `BatonRunner.run_baton_query` """ if baton_binary == BatonBinary.BATON_CHMOD: current_frame = inspect.currentframe() def frame_code_in_same_file(frame) -> bool: return frame_back.f_code.co_filename == current_frame.f_code.co_filename frame_back = current_frame.f_back assert frame_code_in_same_file(frame_back) while frame_back is not None and frame_code_in_same_file(frame_back): if id(frame_back) in self._hijack_frame_ids: return self._original_run_baton_query(baton_binary, [BATON_CHMOD_RECURSIVE_FLAG], input_data) frame_back = frame_back.f_back return self._original_run_baton_query(baton_binary, program_arguments, input_data)
[ "def", "_hijacked_run_baton_query", "(", "self", ",", "baton_binary", ":", "BatonBinary", ",", "program_arguments", ":", "List", "[", "str", "]", "=", "None", ",", "input_data", ":", "Any", "=", "None", ")", "->", "List", "[", "Dict", "]", ":", "if", "baton_binary", "==", "BatonBinary", ".", "BATON_CHMOD", ":", "current_frame", "=", "inspect", ".", "currentframe", "(", ")", "def", "frame_code_in_same_file", "(", "frame", ")", "->", "bool", ":", "return", "frame_back", ".", "f_code", ".", "co_filename", "==", "current_frame", ".", "f_code", ".", "co_filename", "frame_back", "=", "current_frame", ".", "f_back", "assert", "frame_code_in_same_file", "(", "frame_back", ")", "while", "frame_back", "is", "not", "None", "and", "frame_code_in_same_file", "(", "frame_back", ")", ":", "if", "id", "(", "frame_back", ")", "in", "self", ".", "_hijack_frame_ids", ":", "return", "self", ".", "_original_run_baton_query", "(", "baton_binary", ",", "[", "BATON_CHMOD_RECURSIVE_FLAG", "]", ",", "input_data", ")", "frame_back", "=", "frame_back", ".", "f_back", "return", "self", ".", "_original_run_baton_query", "(", "baton_binary", ",", "program_arguments", ",", "input_data", ")" ]
Hijacked `run_baton_query` method with hijacking to add the `--recursive` flag to calls to `baton-chmod` that originate from code called from frames with the ids in `self._hijack_frame_ids`. :param baton_binary: see `BatonRunner.run_baton_query` :param program_arguments: see `BatonRunner.run_baton_query` :param input_data: see `BatonRunner.run_baton_query` :return: see `BatonRunner.run_baton_query`
[ "Hijacked", "run_baton_query", "method", "with", "hijacking", "to", "add", "the", "--", "recursive", "flag", "to", "calls", "to", "baton", "-", "chmod", "that", "originate", "from", "code", "called", "from", "frames", "with", "the", "ids", "in", "self", ".", "_hijack_frame_ids", ".", ":", "param", "baton_binary", ":", "see", "BatonRunner", ".", "run_baton_query", ":", "param", "program_arguments", ":", "see", "BatonRunner", ".", "run_baton_query", ":", "param", "input_data", ":", "see", "BatonRunner", ".", "run_baton_query", ":", "return", ":", "see", "BatonRunner", ".", "run_baton_query" ]
python
train
ioos/pyoos
pyoos/parsers/ioos/one/timeseries_profile.py
https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/parsers/ioos/one/timeseries_profile.py#L74-L88
def _get_point(self, profile, point): """ Finds the given point in the profile, or adds it in sorted z order. """ cur_points_z = [p.location.z for p in profile.elements] try: cur_idx = cur_points_z.index(point.z) return profile.elements[cur_idx] except ValueError: new_idx = bisect_left(cur_points_z, point.z) new_point = Point() new_point.location = sPoint(point) new_point.time = profile.time profile.elements.insert(new_idx, new_point) return new_point
[ "def", "_get_point", "(", "self", ",", "profile", ",", "point", ")", ":", "cur_points_z", "=", "[", "p", ".", "location", ".", "z", "for", "p", "in", "profile", ".", "elements", "]", "try", ":", "cur_idx", "=", "cur_points_z", ".", "index", "(", "point", ".", "z", ")", "return", "profile", ".", "elements", "[", "cur_idx", "]", "except", "ValueError", ":", "new_idx", "=", "bisect_left", "(", "cur_points_z", ",", "point", ".", "z", ")", "new_point", "=", "Point", "(", ")", "new_point", ".", "location", "=", "sPoint", "(", "point", ")", "new_point", ".", "time", "=", "profile", ".", "time", "profile", ".", "elements", ".", "insert", "(", "new_idx", ",", "new_point", ")", "return", "new_point" ]
Finds the given point in the profile, or adds it in sorted z order.
[ "Finds", "the", "given", "point", "in", "the", "profile", "or", "adds", "it", "in", "sorted", "z", "order", "." ]
python
train
spacetelescope/pysynphot
pysynphot/observationmode.py
https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/pysynphot/observationmode.py#L346-L366
def Sensitivity(self): """Sensitivity spectrum to convert flux in :math:`erg \\; cm^{-2} \\; s^{-1} \\; \\AA^{-1}` to :math:`count s^{-1} \\AA^{-1}`. Calculation is done by combining the throughput curves with :math:`\\frac{h \\; c}{\\lambda}` . Returns ------- sensitivity : `~pysynphot.spectrum.TabularSpectralElement` """ sensitivity = spectrum.TabularSpectralElement() product = self._multiplyThroughputs() sensitivity._wavetable = product.GetWaveSet() sensitivity._throughputtable = product(sensitivity._wavetable) * \ sensitivity._wavetable * self._constant return sensitivity
[ "def", "Sensitivity", "(", "self", ")", ":", "sensitivity", "=", "spectrum", ".", "TabularSpectralElement", "(", ")", "product", "=", "self", ".", "_multiplyThroughputs", "(", ")", "sensitivity", ".", "_wavetable", "=", "product", ".", "GetWaveSet", "(", ")", "sensitivity", ".", "_throughputtable", "=", "product", "(", "sensitivity", ".", "_wavetable", ")", "*", "sensitivity", ".", "_wavetable", "*", "self", ".", "_constant", "return", "sensitivity" ]
Sensitivity spectrum to convert flux in :math:`erg \\; cm^{-2} \\; s^{-1} \\; \\AA^{-1}` to :math:`count s^{-1} \\AA^{-1}`. Calculation is done by combining the throughput curves with :math:`\\frac{h \\; c}{\\lambda}` . Returns ------- sensitivity : `~pysynphot.spectrum.TabularSpectralElement`
[ "Sensitivity", "spectrum", "to", "convert", "flux", "in", ":", "math", ":", "erg", "\\\\", ";", "cm^", "{", "-", "2", "}", "\\\\", ";", "s^", "{", "-", "1", "}", "\\\\", ";", "\\\\", "AA^", "{", "-", "1", "}", "to", ":", "math", ":", "count", "s^", "{", "-", "1", "}", "\\\\", "AA^", "{", "-", "1", "}", ".", "Calculation", "is", "done", "by", "combining", "the", "throughput", "curves", "with", ":", "math", ":", "\\\\", "frac", "{", "h", "\\\\", ";", "c", "}", "{", "\\\\", "lambda", "}", "." ]
python
train
thiagopbueno/tf-rddlsim
tfrddlsim/viz/generic_visualizer.py
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/viz/generic_visualizer.py#L143-L149
def _render_round_init(self, horizon: int, non_fluents: NonFluents) -> None: '''Prints round init information about `horizon` and `non_fluents`.''' print('*********************************************************') print('>>> ROUND INIT, horizon = {}'.format(horizon)) print('*********************************************************') fluent_variables = self._compiler.rddl.non_fluent_variables self._render_fluent_timestep('non-fluents', non_fluents, fluent_variables)
[ "def", "_render_round_init", "(", "self", ",", "horizon", ":", "int", ",", "non_fluents", ":", "NonFluents", ")", "->", "None", ":", "print", "(", "'*********************************************************'", ")", "print", "(", "'>>> ROUND INIT, horizon = {}'", ".", "format", "(", "horizon", ")", ")", "print", "(", "'*********************************************************'", ")", "fluent_variables", "=", "self", ".", "_compiler", ".", "rddl", ".", "non_fluent_variables", "self", ".", "_render_fluent_timestep", "(", "'non-fluents'", ",", "non_fluents", ",", "fluent_variables", ")" ]
Prints round init information about `horizon` and `non_fluents`.
[ "Prints", "round", "init", "information", "about", "horizon", "and", "non_fluents", "." ]
python
train
tensorflow/tensorboard
tensorboard/plugins/interactive_inference/utils/inference_utils.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/interactive_inference/utils/inference_utils.py#L407-L447
def make_mutant_tuples(example_protos, original_feature, index_to_mutate, viz_params): """Return a list of `MutantFeatureValue`s and a list of mutant Examples. Args: example_protos: The examples to mutate. original_feature: A `OriginalFeatureList` that encapsulates the feature to mutate. index_to_mutate: The index of the int64_list or float_list to mutate. viz_params: A `VizParams` object that contains the UI state of the request. Returns: A list of `MutantFeatureValue`s and a list of mutant examples. """ mutant_features = make_mutant_features(original_feature, index_to_mutate, viz_params) mutant_examples = [] for example_proto in example_protos: for mutant_feature in mutant_features: copied_example = copy.deepcopy(example_proto) feature_name = mutant_feature.original_feature.feature_name try: feature_list = proto_value_for_feature(copied_example, feature_name) if index_to_mutate is None: new_values = mutant_feature.mutant_value else: new_values = list(feature_list) new_values[index_to_mutate] = mutant_feature.mutant_value del feature_list[:] feature_list.extend(new_values) mutant_examples.append(copied_example) except (ValueError, IndexError): # If the mutant value can't be set, still add the example to the # mutant_example even though no change was made. This is necessary to # allow for computation of global PD plots when not all examples have # the same number of feature values for a feature. mutant_examples.append(copied_example) return mutant_features, mutant_examples
[ "def", "make_mutant_tuples", "(", "example_protos", ",", "original_feature", ",", "index_to_mutate", ",", "viz_params", ")", ":", "mutant_features", "=", "make_mutant_features", "(", "original_feature", ",", "index_to_mutate", ",", "viz_params", ")", "mutant_examples", "=", "[", "]", "for", "example_proto", "in", "example_protos", ":", "for", "mutant_feature", "in", "mutant_features", ":", "copied_example", "=", "copy", ".", "deepcopy", "(", "example_proto", ")", "feature_name", "=", "mutant_feature", ".", "original_feature", ".", "feature_name", "try", ":", "feature_list", "=", "proto_value_for_feature", "(", "copied_example", ",", "feature_name", ")", "if", "index_to_mutate", "is", "None", ":", "new_values", "=", "mutant_feature", ".", "mutant_value", "else", ":", "new_values", "=", "list", "(", "feature_list", ")", "new_values", "[", "index_to_mutate", "]", "=", "mutant_feature", ".", "mutant_value", "del", "feature_list", "[", ":", "]", "feature_list", ".", "extend", "(", "new_values", ")", "mutant_examples", ".", "append", "(", "copied_example", ")", "except", "(", "ValueError", ",", "IndexError", ")", ":", "# If the mutant value can't be set, still add the example to the", "# mutant_example even though no change was made. This is necessary to", "# allow for computation of global PD plots when not all examples have", "# the same number of feature values for a feature.", "mutant_examples", ".", "append", "(", "copied_example", ")", "return", "mutant_features", ",", "mutant_examples" ]
Return a list of `MutantFeatureValue`s and a list of mutant Examples. Args: example_protos: The examples to mutate. original_feature: A `OriginalFeatureList` that encapsulates the feature to mutate. index_to_mutate: The index of the int64_list or float_list to mutate. viz_params: A `VizParams` object that contains the UI state of the request. Returns: A list of `MutantFeatureValue`s and a list of mutant examples.
[ "Return", "a", "list", "of", "MutantFeatureValue", "s", "and", "a", "list", "of", "mutant", "Examples", "." ]
python
train
maximtrp/scikit-posthocs
scikit_posthocs/_posthocs.py
https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L148-L262
def posthoc_conover(a, val_col=None, group_col=None, p_adjust=None, sort=True): '''Post hoc pairwise test for multiple comparisons of mean rank sums (Conover's test). May be used after Kruskal-Wallis one-way analysis of variance by ranks to do pairwise comparisons [1]_. Parameters ---------- a : array_like or pandas DataFrame object An array, any object exposing the array interface or a pandas DataFrame. Array must be two-dimensional. Second dimension may vary, i.e. groups may have different lengths. val_col : str, optional Name of a DataFrame column that contains dependent variable values (test or response variable). Values should have a non-nominal scale. Must be specified if `a` is a pandas DataFrame object. group_col : str, optional Name of a DataFrame column that contains independent variable values (grouping or predictor variable). Values should have a nominal scale (categorical). Must be specified if `a` is a pandas DataFrame object. p_adjust : str, optional Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp` for details. Available methods are: 'bonferroni' : one-step correction 'sidak' : one-step correction 'holm-sidak' : step-down method using Sidak adjustments 'holm' : step-down method using Bonferroni adjustments 'simes-hochberg' : step-up method (independent) 'hommel' : closed method based on Simes tests (non-negative) 'fdr_bh' : Benjamini/Hochberg (non-negative) 'fdr_by' : Benjamini/Yekutieli (negative) 'fdr_tsbh' : two stage fdr correction (non-negative) 'fdr_tsbky' : two stage fdr correction (non-negative) sort : bool, optional Specifies whether to sort DataFrame by `group_col` or not. Recommended unless you sort your data manually. Returns ------- result : pandas DataFrame P values. Notes ----- A tie correction are employed according to Conover [1]_. References ---------- .. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures, Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory. Examples -------- >>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]] >>> sp.posthoc_conover(x, p_adjust = 'holm') ''' def compare_conover(i, j): diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j]) B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j]) D = (n - 1. - H_cor) / (n - x_len) t_value = diff / np.sqrt(S2 * B * D) p_value = 2. * ss.t.sf(np.abs(t_value), df = n - x_len) return p_value x, _val_col, _group_col = __convert_to_df(a, val_col, group_col) if not sort: x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True) x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True) n = len(x.index) x_groups_unique = np.unique(x[_group_col]) x_len = x_groups_unique.size x_lens = x.groupby(_group_col)[_val_col].count() x['ranks'] = x[_val_col].rank() x_ranks_avg = x.groupby(_group_col)['ranks'].mean() x_ranks_sum = x.groupby(_group_col)['ranks'].sum() # ties vals = x.groupby('ranks').count()[_val_col].values tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1]) tie_sum = 0 if not tie_sum else tie_sum x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)]) H = (12. / (n * (n + 1.))) * np.sum(x_ranks_sum**2 / x_lens) - 3. * (n + 1.) H_cor = H / x_ties if x_ties == 1: S2 = n * (n + 1.) / 12. else: S2 = (1. / (n - 1.)) * (np.sum(x['ranks'] ** 2.) - (n * (((n + 1.)**2.) / 4.))) vs = np.zeros((x_len, x_len)) tri_upper = np.triu_indices(vs.shape[0], 1) tri_lower = np.tril_indices(vs.shape[0], -1) vs[:,:] = 0 combs = it.combinations(range(x_len), 2) for i, j in combs: vs[i, j] = compare_conover(x_groups_unique[i], x_groups_unique[j]) if p_adjust: vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1] vs[tri_lower] = vs.T[tri_lower] np.fill_diagonal(vs, -1) return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
[ "def", "posthoc_conover", "(", "a", ",", "val_col", "=", "None", ",", "group_col", "=", "None", ",", "p_adjust", "=", "None", ",", "sort", "=", "True", ")", ":", "def", "compare_conover", "(", "i", ",", "j", ")", ":", "diff", "=", "np", ".", "abs", "(", "x_ranks_avg", ".", "loc", "[", "i", "]", "-", "x_ranks_avg", ".", "loc", "[", "j", "]", ")", "B", "=", "(", "1.", "/", "x_lens", ".", "loc", "[", "i", "]", "+", "1.", "/", "x_lens", ".", "loc", "[", "j", "]", ")", "D", "=", "(", "n", "-", "1.", "-", "H_cor", ")", "/", "(", "n", "-", "x_len", ")", "t_value", "=", "diff", "/", "np", ".", "sqrt", "(", "S2", "*", "B", "*", "D", ")", "p_value", "=", "2.", "*", "ss", ".", "t", ".", "sf", "(", "np", ".", "abs", "(", "t_value", ")", ",", "df", "=", "n", "-", "x_len", ")", "return", "p_value", "x", ",", "_val_col", ",", "_group_col", "=", "__convert_to_df", "(", "a", ",", "val_col", ",", "group_col", ")", "if", "not", "sort", ":", "x", "[", "_group_col", "]", "=", "Categorical", "(", "x", "[", "_group_col", "]", ",", "categories", "=", "x", "[", "_group_col", "]", ".", "unique", "(", ")", ",", "ordered", "=", "True", ")", "x", ".", "sort_values", "(", "by", "=", "[", "_group_col", ",", "_val_col", "]", ",", "ascending", "=", "True", ",", "inplace", "=", "True", ")", "n", "=", "len", "(", "x", ".", "index", ")", "x_groups_unique", "=", "np", ".", "unique", "(", "x", "[", "_group_col", "]", ")", "x_len", "=", "x_groups_unique", ".", "size", "x_lens", "=", "x", ".", "groupby", "(", "_group_col", ")", "[", "_val_col", "]", ".", "count", "(", ")", "x", "[", "'ranks'", "]", "=", "x", "[", "_val_col", "]", ".", "rank", "(", ")", "x_ranks_avg", "=", "x", ".", "groupby", "(", "_group_col", ")", "[", "'ranks'", "]", ".", "mean", "(", ")", "x_ranks_sum", "=", "x", ".", "groupby", "(", "_group_col", ")", "[", "'ranks'", "]", ".", "sum", "(", ")", "# ties", "vals", "=", "x", ".", "groupby", "(", "'ranks'", ")", ".", "count", "(", ")", "[", "_val_col", "]", ".", "values", "tie_sum", "=", "np", ".", "sum", "(", "vals", "[", "vals", "!=", "1", "]", "**", "3", "-", "vals", "[", "vals", "!=", "1", "]", ")", "tie_sum", "=", "0", "if", "not", "tie_sum", "else", "tie_sum", "x_ties", "=", "np", ".", "min", "(", "[", "1.", ",", "1.", "-", "tie_sum", "/", "(", "n", "**", "3.", "-", "n", ")", "]", ")", "H", "=", "(", "12.", "/", "(", "n", "*", "(", "n", "+", "1.", ")", ")", ")", "*", "np", ".", "sum", "(", "x_ranks_sum", "**", "2", "/", "x_lens", ")", "-", "3.", "*", "(", "n", "+", "1.", ")", "H_cor", "=", "H", "/", "x_ties", "if", "x_ties", "==", "1", ":", "S2", "=", "n", "*", "(", "n", "+", "1.", ")", "/", "12.", "else", ":", "S2", "=", "(", "1.", "/", "(", "n", "-", "1.", ")", ")", "*", "(", "np", ".", "sum", "(", "x", "[", "'ranks'", "]", "**", "2.", ")", "-", "(", "n", "*", "(", "(", "(", "n", "+", "1.", ")", "**", "2.", ")", "/", "4.", ")", ")", ")", "vs", "=", "np", ".", "zeros", "(", "(", "x_len", ",", "x_len", ")", ")", "tri_upper", "=", "np", ".", "triu_indices", "(", "vs", ".", "shape", "[", "0", "]", ",", "1", ")", "tri_lower", "=", "np", ".", "tril_indices", "(", "vs", ".", "shape", "[", "0", "]", ",", "-", "1", ")", "vs", "[", ":", ",", ":", "]", "=", "0", "combs", "=", "it", ".", "combinations", "(", "range", "(", "x_len", ")", ",", "2", ")", "for", "i", ",", "j", "in", "combs", ":", "vs", "[", "i", ",", "j", "]", "=", "compare_conover", "(", "x_groups_unique", "[", "i", "]", ",", "x_groups_unique", "[", "j", "]", ")", "if", "p_adjust", ":", "vs", "[", "tri_upper", "]", "=", "multipletests", "(", "vs", "[", "tri_upper", "]", ",", "method", "=", "p_adjust", ")", "[", "1", "]", "vs", "[", "tri_lower", "]", "=", "vs", ".", "T", "[", "tri_lower", "]", "np", ".", "fill_diagonal", "(", "vs", ",", "-", "1", ")", "return", "DataFrame", "(", "vs", ",", "index", "=", "x_groups_unique", ",", "columns", "=", "x_groups_unique", ")" ]
Post hoc pairwise test for multiple comparisons of mean rank sums (Conover's test). May be used after Kruskal-Wallis one-way analysis of variance by ranks to do pairwise comparisons [1]_. Parameters ---------- a : array_like or pandas DataFrame object An array, any object exposing the array interface or a pandas DataFrame. Array must be two-dimensional. Second dimension may vary, i.e. groups may have different lengths. val_col : str, optional Name of a DataFrame column that contains dependent variable values (test or response variable). Values should have a non-nominal scale. Must be specified if `a` is a pandas DataFrame object. group_col : str, optional Name of a DataFrame column that contains independent variable values (grouping or predictor variable). Values should have a nominal scale (categorical). Must be specified if `a` is a pandas DataFrame object. p_adjust : str, optional Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp` for details. Available methods are: 'bonferroni' : one-step correction 'sidak' : one-step correction 'holm-sidak' : step-down method using Sidak adjustments 'holm' : step-down method using Bonferroni adjustments 'simes-hochberg' : step-up method (independent) 'hommel' : closed method based on Simes tests (non-negative) 'fdr_bh' : Benjamini/Hochberg (non-negative) 'fdr_by' : Benjamini/Yekutieli (negative) 'fdr_tsbh' : two stage fdr correction (non-negative) 'fdr_tsbky' : two stage fdr correction (non-negative) sort : bool, optional Specifies whether to sort DataFrame by `group_col` or not. Recommended unless you sort your data manually. Returns ------- result : pandas DataFrame P values. Notes ----- A tie correction are employed according to Conover [1]_. References ---------- .. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures, Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory. Examples -------- >>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]] >>> sp.posthoc_conover(x, p_adjust = 'holm')
[ "Post", "hoc", "pairwise", "test", "for", "multiple", "comparisons", "of", "mean", "rank", "sums", "(", "Conover", "s", "test", ")", ".", "May", "be", "used", "after", "Kruskal", "-", "Wallis", "one", "-", "way", "analysis", "of", "variance", "by", "ranks", "to", "do", "pairwise", "comparisons", "[", "1", "]", "_", "." ]
python
train
royi1000/py-libhdate
hdate/date.py
https://github.com/royi1000/py-libhdate/blob/12af759fb69f1d6403abed3762beaf5ace16a34b/hdate/date.py#L458-L505
def get_omer_string(omer): # pylint: disable=too-many-branches """Return a string representing the count of the Omer.""" # TODO: The following function should be simplified (see pylint) tens = [u"", u"עשרה", u"עשרים", u"שלושים", u"ארבעים"] ones = [u"", u"אחד", u"שנים", u"שלושה", u"ארבעה", u"חמשה", u"ששה", u"שבעה", u"שמונה", u"תשעה"] if not 0 < omer < 50: raise ValueError('Invalid Omer day: {}'.format(omer)) ten = omer // 10 one = omer % 10 omer_string = u'היום ' if 10 < omer < 20: omer_string += ones[one] + u' עשר' elif omer > 9: omer_string += ones[one] if one: omer_string += u' ו' if omer > 2: if omer > 20 or omer in [10, 20]: omer_string += tens[ten] if omer < 11: omer_string += ones[one] + u' ימים ' else: omer_string += u' יום ' elif omer == 1: omer_string += u'יום אחד ' else: # omer == 2 omer_string += u'שני ימים ' if omer > 6: omer_string += u'שהם ' weeks = omer // 7 days = omer % 7 if weeks > 2: omer_string += ones[weeks] + u' שבועות ' elif weeks == 1: omer_string += u'שבוע אחד ' else: # weeks == 2 omer_string += u'שני שבועות ' if days: omer_string += u'ו' if days > 2: omer_string += ones[days] + u' ימים ' elif days == 1: omer_string += u'יום אחד ' else: # days == 2 omer_string += u'שני ימים ' omer_string += u'לעומר' return omer_string
[ "def", "get_omer_string", "(", "omer", ")", ":", "# pylint: disable=too-many-branches", "# TODO: The following function should be simplified (see pylint)", "tens", "=", "[", "u\"\"", ",", "u\"עשרה\", u\"", "ע", "רים\", u\"שלושי", "ם", ", u\"ארבעים\"]", "", "", "", "ones", "=", "[", "u\"\"", ",", "u\"אחד\", u", "\"", "נים\", u\"שלו", "ש", "\", u\"ארבעה\", ", "u", "חמשה\",", "", "", "", "u\"ששה\", u", "\"", "בעה\", u\"שמו", "נ", "\", u\"תשעה\"]", "", "", "", "if", "not", "0", "<", "omer", "<", "50", ":", "raise", "ValueError", "(", "'Invalid Omer day: {}'", ".", "format", "(", "omer", ")", ")", "ten", "=", "omer", "//", "10", "one", "=", "omer", "%", "10", "omer_string", "=", "u'היום '", "if", "10", "<", "omer", "<", "20", ":", "omer_string", "+=", "ones", "[", "one", "]", "+", "u' עשר'", "elif", "omer", ">", "9", ":", "omer_string", "+=", "ones", "[", "one", "]", "if", "one", ":", "omer_string", "+=", "u' ו'", "if", "omer", ">", "2", ":", "if", "omer", ">", "20", "or", "omer", "in", "[", "10", ",", "20", "]", ":", "omer_string", "+=", "tens", "[", "ten", "]", "if", "omer", "<", "11", ":", "omer_string", "+=", "ones", "[", "one", "]", "+", "u' ימים '", "else", ":", "omer_string", "+=", "u' יום '", "elif", "omer", "==", "1", ":", "omer_string", "+=", "u'יום אחד '", "else", ":", "# omer == 2", "omer_string", "+=", "u'שני ימים '", "if", "omer", ">", "6", ":", "omer_string", "+=", "u'שהם '", "weeks", "=", "omer", "//", "7", "days", "=", "omer", "%", "7", "if", "weeks", ">", "2", ":", "omer_string", "+=", "ones", "[", "weeks", "]", "+", "u' שבועות '", "elif", "weeks", "==", "1", ":", "omer_string", "+=", "u'שבוע אחד '", "else", ":", "# weeks == 2", "omer_string", "+=", "u'שני שבועות '", "if", "days", ":", "omer_string", "+=", "u'ו'", "if", "days", ">", "2", ":", "omer_string", "+=", "ones", "[", "days", "]", "+", "u' ימים '", "elif", "days", "==", "1", ":", "omer_string", "+=", "u'יום אחד '", "else", ":", "# days == 2", "omer_string", "+=", "u'שני ימים '", "omer_string", "+=", "u'לעומר'", "return", "omer_string" ]
Return a string representing the count of the Omer.
[ "Return", "a", "string", "representing", "the", "count", "of", "the", "Omer", "." ]
python
train
dnouri/nolearn
nolearn/lasagne/util.py
https://github.com/dnouri/nolearn/blob/2ef346c869e80fc90247916e4aea5cfa7cf2edda/nolearn/lasagne/util.py#L57-L91
def get_real_filter(layers, img_size): """Get the real filter sizes of each layer involved in convoluation. See Xudong Cao: https://www.kaggle.com/c/datasciencebowl/forums/t/13166/happy-lantern-festival-report-and-code This does not yet take into consideration feature pooling, padding, striding and similar gimmicks. """ real_filter = np.zeros((len(layers), 2)) conv_mode = True first_conv_layer = True expon = np.ones((1, 2)) for i, layer in enumerate(layers[1:]): j = i + 1 if not conv_mode: real_filter[j] = img_size continue if is_conv2d(layer): if not first_conv_layer: new_filter = np.array(layer.filter_size) * expon real_filter[j] = new_filter else: new_filter = np.array(layer.filter_size) * expon real_filter[j] = new_filter first_conv_layer = False elif is_maxpool2d(layer): real_filter[j] = real_filter[i] expon *= np.array(layer.pool_size) else: conv_mode = False real_filter[j] = img_size real_filter[0] = img_size return real_filter
[ "def", "get_real_filter", "(", "layers", ",", "img_size", ")", ":", "real_filter", "=", "np", ".", "zeros", "(", "(", "len", "(", "layers", ")", ",", "2", ")", ")", "conv_mode", "=", "True", "first_conv_layer", "=", "True", "expon", "=", "np", ".", "ones", "(", "(", "1", ",", "2", ")", ")", "for", "i", ",", "layer", "in", "enumerate", "(", "layers", "[", "1", ":", "]", ")", ":", "j", "=", "i", "+", "1", "if", "not", "conv_mode", ":", "real_filter", "[", "j", "]", "=", "img_size", "continue", "if", "is_conv2d", "(", "layer", ")", ":", "if", "not", "first_conv_layer", ":", "new_filter", "=", "np", ".", "array", "(", "layer", ".", "filter_size", ")", "*", "expon", "real_filter", "[", "j", "]", "=", "new_filter", "else", ":", "new_filter", "=", "np", ".", "array", "(", "layer", ".", "filter_size", ")", "*", "expon", "real_filter", "[", "j", "]", "=", "new_filter", "first_conv_layer", "=", "False", "elif", "is_maxpool2d", "(", "layer", ")", ":", "real_filter", "[", "j", "]", "=", "real_filter", "[", "i", "]", "expon", "*=", "np", ".", "array", "(", "layer", ".", "pool_size", ")", "else", ":", "conv_mode", "=", "False", "real_filter", "[", "j", "]", "=", "img_size", "real_filter", "[", "0", "]", "=", "img_size", "return", "real_filter" ]
Get the real filter sizes of each layer involved in convoluation. See Xudong Cao: https://www.kaggle.com/c/datasciencebowl/forums/t/13166/happy-lantern-festival-report-and-code This does not yet take into consideration feature pooling, padding, striding and similar gimmicks.
[ "Get", "the", "real", "filter", "sizes", "of", "each", "layer", "involved", "in", "convoluation", ".", "See", "Xudong", "Cao", ":", "https", ":", "//", "www", ".", "kaggle", ".", "com", "/", "c", "/", "datasciencebowl", "/", "forums", "/", "t", "/", "13166", "/", "happy", "-", "lantern", "-", "festival", "-", "report", "-", "and", "-", "code", "This", "does", "not", "yet", "take", "into", "consideration", "feature", "pooling", "padding", "striding", "and", "similar", "gimmicks", "." ]
python
train
taskcluster/taskcluster-client.py
taskcluster/awsprovisionerevents.py
https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/awsprovisionerevents.py#L24-L60
def workerTypeCreated(self, *args, **kwargs): """ WorkerType Created Message When a new `workerType` is created a message will be published to this exchange. This exchange outputs: ``http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#``This exchange takes the following keys: * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required) * workerType: WorkerType that this message concerns. (required) * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified. """ ref = { 'exchange': 'worker-type-created', 'name': 'workerTypeCreated', 'routingKey': [ { 'constant': 'primary', 'multipleWords': False, 'name': 'routingKeyKind', }, { 'multipleWords': False, 'name': 'workerType', }, { 'multipleWords': True, 'name': 'reserved', }, ], 'schema': 'http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#', } return self._makeTopicExchange(ref, *args, **kwargs)
[ "def", "workerTypeCreated", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "ref", "=", "{", "'exchange'", ":", "'worker-type-created'", ",", "'name'", ":", "'workerTypeCreated'", ",", "'routingKey'", ":", "[", "{", "'constant'", ":", "'primary'", ",", "'multipleWords'", ":", "False", ",", "'name'", ":", "'routingKeyKind'", ",", "}", ",", "{", "'multipleWords'", ":", "False", ",", "'name'", ":", "'workerType'", ",", "}", ",", "{", "'multipleWords'", ":", "True", ",", "'name'", ":", "'reserved'", ",", "}", ",", "]", ",", "'schema'", ":", "'http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#'", ",", "}", "return", "self", ".", "_makeTopicExchange", "(", "ref", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
WorkerType Created Message When a new `workerType` is created a message will be published to this exchange. This exchange outputs: ``http://schemas.taskcluster.net/aws-provisioner/v1/worker-type-message.json#``This exchange takes the following keys: * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required) * workerType: WorkerType that this message concerns. (required) * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
[ "WorkerType", "Created", "Message" ]
python
train
josuebrunel/myql
myql/contrib/table/base.py
https://github.com/josuebrunel/myql/blob/891bad29cc83a81b3f5ebc4d0401d6f2c22f119e/myql/contrib/table/base.py#L56-L66
def _buildElementTree(self,): """Turns object into a Element Tree """ t_binder = ctree.Element(self.name) for k,v in self.__dict__.items(): if k not in ('name', 'urls', 'inputs', 'paging') and v : t_binder.set(k,v) self.etree = t_binder return t_binder
[ "def", "_buildElementTree", "(", "self", ",", ")", ":", "t_binder", "=", "ctree", ".", "Element", "(", "self", ".", "name", ")", "for", "k", ",", "v", "in", "self", ".", "__dict__", ".", "items", "(", ")", ":", "if", "k", "not", "in", "(", "'name'", ",", "'urls'", ",", "'inputs'", ",", "'paging'", ")", "and", "v", ":", "t_binder", ".", "set", "(", "k", ",", "v", ")", "self", ".", "etree", "=", "t_binder", "return", "t_binder" ]
Turns object into a Element Tree
[ "Turns", "object", "into", "a", "Element", "Tree" ]
python
train
Azure/blobxfer
blobxfer/models/offload.py
https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/models/offload.py#L82-L97
def _initialize_processes(self, target, num_workers, description): # type: (_MultiprocessOffload, function, int, str) -> None """Initialize processes :param _MultiprocessOffload self: this :param function target: target function for process :param int num_workers: number of worker processes :param str description: description """ if num_workers is None or num_workers < 1: raise ValueError('invalid num_workers: {}'.format(num_workers)) logger.debug('initializing {}{} processes'.format( num_workers, ' ' + description if not None else '')) for _ in range(num_workers): proc = multiprocessing.Process(target=target) proc.start() self._procs.append(proc)
[ "def", "_initialize_processes", "(", "self", ",", "target", ",", "num_workers", ",", "description", ")", ":", "# type: (_MultiprocessOffload, function, int, str) -> None", "if", "num_workers", "is", "None", "or", "num_workers", "<", "1", ":", "raise", "ValueError", "(", "'invalid num_workers: {}'", ".", "format", "(", "num_workers", ")", ")", "logger", ".", "debug", "(", "'initializing {}{} processes'", ".", "format", "(", "num_workers", ",", "' '", "+", "description", "if", "not", "None", "else", "''", ")", ")", "for", "_", "in", "range", "(", "num_workers", ")", ":", "proc", "=", "multiprocessing", ".", "Process", "(", "target", "=", "target", ")", "proc", ".", "start", "(", ")", "self", ".", "_procs", ".", "append", "(", "proc", ")" ]
Initialize processes :param _MultiprocessOffload self: this :param function target: target function for process :param int num_workers: number of worker processes :param str description: description
[ "Initialize", "processes", ":", "param", "_MultiprocessOffload", "self", ":", "this", ":", "param", "function", "target", ":", "target", "function", "for", "process", ":", "param", "int", "num_workers", ":", "number", "of", "worker", "processes", ":", "param", "str", "description", ":", "description" ]
python
train
flo-compbio/genometools
genometools/ncbi/sra/find_experiment_runs.py
https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/ncbi/sra/find_experiment_runs.py#L79-L140
def main(args=None): """Download all .sra from NCBI SRA for a given experiment ID. Parameters ---------- args: argparse.Namespace object, optional The argument values. If not specified, the values will be obtained by parsing the command line arguments using the `argparse` module. Returns ------- int Exit code (0 if no error occurred). """ if args is None: # parse command-line arguments parser = get_argument_parser() args = parser.parse_args() experiment_file = args.experiment_file output_file = args.output_file # log_file = args.log_file # quiet = args.quiet # verbose = args.verbose # logger = misc.get_logger(log_file=log_file, quiet=quiet, # verbose=verbose) host = 'ftp-trace.ncbi.nlm.nih.gov' user = 'anonymous' password = 'anonymous' # output_dir = download_dir + experiment_id + '/' # make sure output directory exists # misc.make_sure_dir_exists(output_dir) # logger.info('Created output directory: "%s".', output_dir) experiments = misc.read_single(experiment_file) runs = [] with ftputil.FTPHost(host, user, password) as ftp_host: for exp in experiments: exp_dir = '/sra/sra-instant/reads/ByExp/sra/SRX/%s/%s/' \ % (exp[:6], exp) ftp_host.chdir(exp_dir) run_folders = ftp_host.listdir(ftp_host.curdir) # logging.info('Found %d run folders.',len(run_folders)) for folder in run_folders: files = ftp_host.listdir(folder) assert len(files) == 1 runs.append((exp, folder)) with open(output_file, 'wb') as ofh: writer = csv.writer(ofh, dialect='excel-tab', lineterminator=os.linesep, quoting=csv.QUOTE_NONE) for r in runs: writer.writerow(r) return 0
[ "def", "main", "(", "args", "=", "None", ")", ":", "if", "args", "is", "None", ":", "# parse command-line arguments", "parser", "=", "get_argument_parser", "(", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "experiment_file", "=", "args", ".", "experiment_file", "output_file", "=", "args", ".", "output_file", "# log_file = args.log_file", "# quiet = args.quiet", "# verbose = args.verbose", "# logger = misc.get_logger(log_file=log_file, quiet=quiet,", "# verbose=verbose)", "host", "=", "'ftp-trace.ncbi.nlm.nih.gov'", "user", "=", "'anonymous'", "password", "=", "'anonymous'", "# output_dir = download_dir + experiment_id + '/'", "# make sure output directory exists", "# misc.make_sure_dir_exists(output_dir)", "# logger.info('Created output directory: \"%s\".', output_dir)", "experiments", "=", "misc", ".", "read_single", "(", "experiment_file", ")", "runs", "=", "[", "]", "with", "ftputil", ".", "FTPHost", "(", "host", ",", "user", ",", "password", ")", "as", "ftp_host", ":", "for", "exp", "in", "experiments", ":", "exp_dir", "=", "'/sra/sra-instant/reads/ByExp/sra/SRX/%s/%s/'", "%", "(", "exp", "[", ":", "6", "]", ",", "exp", ")", "ftp_host", ".", "chdir", "(", "exp_dir", ")", "run_folders", "=", "ftp_host", ".", "listdir", "(", "ftp_host", ".", "curdir", ")", "# logging.info('Found %d run folders.',len(run_folders))", "for", "folder", "in", "run_folders", ":", "files", "=", "ftp_host", ".", "listdir", "(", "folder", ")", "assert", "len", "(", "files", ")", "==", "1", "runs", ".", "append", "(", "(", "exp", ",", "folder", ")", ")", "with", "open", "(", "output_file", ",", "'wb'", ")", "as", "ofh", ":", "writer", "=", "csv", ".", "writer", "(", "ofh", ",", "dialect", "=", "'excel-tab'", ",", "lineterminator", "=", "os", ".", "linesep", ",", "quoting", "=", "csv", ".", "QUOTE_NONE", ")", "for", "r", "in", "runs", ":", "writer", ".", "writerow", "(", "r", ")", "return", "0" ]
Download all .sra from NCBI SRA for a given experiment ID. Parameters ---------- args: argparse.Namespace object, optional The argument values. If not specified, the values will be obtained by parsing the command line arguments using the `argparse` module. Returns ------- int Exit code (0 if no error occurred).
[ "Download", "all", ".", "sra", "from", "NCBI", "SRA", "for", "a", "given", "experiment", "ID", "." ]
python
train
mikedh/trimesh
trimesh/collision.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/collision.py#L441-L512
def min_distance_single(self, mesh, transform=None, return_name=False, return_data=False): """ Get the minimum distance between a single object and any object in the manager. Parameters --------------- mesh : Trimesh object The geometry of the collision object transform : (4,4) float Homogenous transform matrix for the object return_names : bool If true, return name of the closest object return_data : bool If true, a DistanceData object is returned as well Returns ------------- distance : float Min distance between mesh and any object in the manager name : str The name of the object in the manager that was closest data : DistanceData Extra data about the distance query """ if transform is None: transform = np.eye(4) # Create FCL data b = self._get_BVH(mesh) t = fcl.Transform(transform[:3, :3], transform[:3, 3]) o = fcl.CollisionObject(b, t) # Collide with manager's objects ddata = fcl.DistanceData() if return_data: ddata = fcl.DistanceData( fcl.DistanceRequest(enable_nearest_points=True), fcl.DistanceResult() ) self._manager.distance(o, ddata, fcl.defaultDistanceCallback) distance = ddata.result.min_distance # If we want to return the objects that were collision, collect them. name, data = None, None if return_name or return_data: cg = ddata.result.o1 if cg == b: cg = ddata.result.o2 name = self._extract_name(cg) names = (name, '__external') if cg == ddata.result.o2: names = reversed(names) data = DistanceData(names, ddata.result) if return_name and return_data: return distance, name, data elif return_name: return distance, name elif return_data: return distance, data else: return distance
[ "def", "min_distance_single", "(", "self", ",", "mesh", ",", "transform", "=", "None", ",", "return_name", "=", "False", ",", "return_data", "=", "False", ")", ":", "if", "transform", "is", "None", ":", "transform", "=", "np", ".", "eye", "(", "4", ")", "# Create FCL data", "b", "=", "self", ".", "_get_BVH", "(", "mesh", ")", "t", "=", "fcl", ".", "Transform", "(", "transform", "[", ":", "3", ",", ":", "3", "]", ",", "transform", "[", ":", "3", ",", "3", "]", ")", "o", "=", "fcl", ".", "CollisionObject", "(", "b", ",", "t", ")", "# Collide with manager's objects", "ddata", "=", "fcl", ".", "DistanceData", "(", ")", "if", "return_data", ":", "ddata", "=", "fcl", ".", "DistanceData", "(", "fcl", ".", "DistanceRequest", "(", "enable_nearest_points", "=", "True", ")", ",", "fcl", ".", "DistanceResult", "(", ")", ")", "self", ".", "_manager", ".", "distance", "(", "o", ",", "ddata", ",", "fcl", ".", "defaultDistanceCallback", ")", "distance", "=", "ddata", ".", "result", ".", "min_distance", "# If we want to return the objects that were collision, collect them.", "name", ",", "data", "=", "None", ",", "None", "if", "return_name", "or", "return_data", ":", "cg", "=", "ddata", ".", "result", ".", "o1", "if", "cg", "==", "b", ":", "cg", "=", "ddata", ".", "result", ".", "o2", "name", "=", "self", ".", "_extract_name", "(", "cg", ")", "names", "=", "(", "name", ",", "'__external'", ")", "if", "cg", "==", "ddata", ".", "result", ".", "o2", ":", "names", "=", "reversed", "(", "names", ")", "data", "=", "DistanceData", "(", "names", ",", "ddata", ".", "result", ")", "if", "return_name", "and", "return_data", ":", "return", "distance", ",", "name", ",", "data", "elif", "return_name", ":", "return", "distance", ",", "name", "elif", "return_data", ":", "return", "distance", ",", "data", "else", ":", "return", "distance" ]
Get the minimum distance between a single object and any object in the manager. Parameters --------------- mesh : Trimesh object The geometry of the collision object transform : (4,4) float Homogenous transform matrix for the object return_names : bool If true, return name of the closest object return_data : bool If true, a DistanceData object is returned as well Returns ------------- distance : float Min distance between mesh and any object in the manager name : str The name of the object in the manager that was closest data : DistanceData Extra data about the distance query
[ "Get", "the", "minimum", "distance", "between", "a", "single", "object", "and", "any", "object", "in", "the", "manager", "." ]
python
train
serkanyersen/underscore.py
src/underscore.py
https://github.com/serkanyersen/underscore.py/blob/07c25c3f0f789536e4ad47aa315faccc0da9602f/src/underscore.py#L1400-L1409
def unescape(self): """ Within an interpolation, evaluation, or escaping, remove HTML escaping that had been previously added. """ for i, k in enumerate(self._html_escape_table): v = self._html_escape_table[k] self.obj = self.obj.replace(v, k) return self._wrap(self.obj)
[ "def", "unescape", "(", "self", ")", ":", "for", "i", ",", "k", "in", "enumerate", "(", "self", ".", "_html_escape_table", ")", ":", "v", "=", "self", ".", "_html_escape_table", "[", "k", "]", "self", ".", "obj", "=", "self", ".", "obj", ".", "replace", "(", "v", ",", "k", ")", "return", "self", ".", "_wrap", "(", "self", ".", "obj", ")" ]
Within an interpolation, evaluation, or escaping, remove HTML escaping that had been previously added.
[ "Within", "an", "interpolation", "evaluation", "or", "escaping", "remove", "HTML", "escaping", "that", "had", "been", "previously", "added", "." ]
python
train
PyThaiNLP/pythainlp
pythainlp/ulmfit/__init__.py
https://github.com/PyThaiNLP/pythainlp/blob/e9a300b8a99dfd1a67a955e7c06f62e4afe0fbca/pythainlp/ulmfit/__init__.py#L105-L116
def ungroup_emoji(toks): "Ungroup emojis" res = [] for tok in toks: if emoji.emoji_count(tok) == len(tok): for char in tok: res.append(char) else: res.append(tok) return res
[ "def", "ungroup_emoji", "(", "toks", ")", ":", "res", "=", "[", "]", "for", "tok", "in", "toks", ":", "if", "emoji", ".", "emoji_count", "(", "tok", ")", "==", "len", "(", "tok", ")", ":", "for", "char", "in", "tok", ":", "res", ".", "append", "(", "char", ")", "else", ":", "res", ".", "append", "(", "tok", ")", "return", "res" ]
Ungroup emojis
[ "Ungroup", "emojis" ]
python
train
knipknap/exscript
Exscript/util/impl.py
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/util/impl.py#L85-L91
def serializeable_exc_info(thetype, ex, tb): """ Since traceback objects can not be pickled, this function manipulates exception info tuples before they are passed accross process boundaries. """ return thetype, ex, ''.join(traceback.format_exception(thetype, ex, tb))
[ "def", "serializeable_exc_info", "(", "thetype", ",", "ex", ",", "tb", ")", ":", "return", "thetype", ",", "ex", ",", "''", ".", "join", "(", "traceback", ".", "format_exception", "(", "thetype", ",", "ex", ",", "tb", ")", ")" ]
Since traceback objects can not be pickled, this function manipulates exception info tuples before they are passed accross process boundaries.
[ "Since", "traceback", "objects", "can", "not", "be", "pickled", "this", "function", "manipulates", "exception", "info", "tuples", "before", "they", "are", "passed", "accross", "process", "boundaries", "." ]
python
train
nerdvegas/rez
src/rez/resolver.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/resolver.py#L107-L123
def solve(self): """Perform the solve. """ with log_duration(self._print, "memcache get (resolve) took %s"): solver_dict = self._get_cached_solve() if solver_dict: self.from_cache = True self._set_result(solver_dict) else: self.from_cache = False solver = self._solve() solver_dict = self._solver_to_dict(solver) self._set_result(solver_dict) with log_duration(self._print, "memcache set (resolve) took %s"): self._set_cached_solve(solver_dict)
[ "def", "solve", "(", "self", ")", ":", "with", "log_duration", "(", "self", ".", "_print", ",", "\"memcache get (resolve) took %s\"", ")", ":", "solver_dict", "=", "self", ".", "_get_cached_solve", "(", ")", "if", "solver_dict", ":", "self", ".", "from_cache", "=", "True", "self", ".", "_set_result", "(", "solver_dict", ")", "else", ":", "self", ".", "from_cache", "=", "False", "solver", "=", "self", ".", "_solve", "(", ")", "solver_dict", "=", "self", ".", "_solver_to_dict", "(", "solver", ")", "self", ".", "_set_result", "(", "solver_dict", ")", "with", "log_duration", "(", "self", ".", "_print", ",", "\"memcache set (resolve) took %s\"", ")", ":", "self", ".", "_set_cached_solve", "(", "solver_dict", ")" ]
Perform the solve.
[ "Perform", "the", "solve", "." ]
python
train
Microsoft/nni
tools/nni_cmd/nnictl_utils.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/tools/nni_cmd/nnictl_utils.py#L474-L507
def export_trials_data(args): """export experiment metadata to csv """ nni_config = Config(get_config_filename(args)) rest_port = nni_config.get_config('restServerPort') rest_pid = nni_config.get_config('restServerPid') if not detect_process(rest_pid): print_error('Experiment is not running...') return running, response = check_rest_server_quick(rest_port) if running: response = rest_get(trial_jobs_url(rest_port), 20) if response is not None and check_response(response): content = json.loads(response.text) # dframe = pd.DataFrame.from_records([parse_trial_data(t_data) for t_data in content]) # dframe.to_csv(args.csv_path, sep='\t') records = parse_trial_data(content) if args.type == 'json': json_records = [] for trial in records: value = trial.pop('reward', None) trial_id = trial.pop('id', None) json_records.append({'parameter': trial, 'value': value, 'id': trial_id}) with open(args.path, 'w') as file: if args.type == 'csv': writer = csv.DictWriter(file, set.union(*[set(r.keys()) for r in records])) writer.writeheader() writer.writerows(records) else: json.dump(json_records, file) else: print_error('Export failed...') else: print_error('Restful server is not Running')
[ "def", "export_trials_data", "(", "args", ")", ":", "nni_config", "=", "Config", "(", "get_config_filename", "(", "args", ")", ")", "rest_port", "=", "nni_config", ".", "get_config", "(", "'restServerPort'", ")", "rest_pid", "=", "nni_config", ".", "get_config", "(", "'restServerPid'", ")", "if", "not", "detect_process", "(", "rest_pid", ")", ":", "print_error", "(", "'Experiment is not running...'", ")", "return", "running", ",", "response", "=", "check_rest_server_quick", "(", "rest_port", ")", "if", "running", ":", "response", "=", "rest_get", "(", "trial_jobs_url", "(", "rest_port", ")", ",", "20", ")", "if", "response", "is", "not", "None", "and", "check_response", "(", "response", ")", ":", "content", "=", "json", ".", "loads", "(", "response", ".", "text", ")", "# dframe = pd.DataFrame.from_records([parse_trial_data(t_data) for t_data in content])", "# dframe.to_csv(args.csv_path, sep='\\t')", "records", "=", "parse_trial_data", "(", "content", ")", "if", "args", ".", "type", "==", "'json'", ":", "json_records", "=", "[", "]", "for", "trial", "in", "records", ":", "value", "=", "trial", ".", "pop", "(", "'reward'", ",", "None", ")", "trial_id", "=", "trial", ".", "pop", "(", "'id'", ",", "None", ")", "json_records", ".", "append", "(", "{", "'parameter'", ":", "trial", ",", "'value'", ":", "value", ",", "'id'", ":", "trial_id", "}", ")", "with", "open", "(", "args", ".", "path", ",", "'w'", ")", "as", "file", ":", "if", "args", ".", "type", "==", "'csv'", ":", "writer", "=", "csv", ".", "DictWriter", "(", "file", ",", "set", ".", "union", "(", "*", "[", "set", "(", "r", ".", "keys", "(", ")", ")", "for", "r", "in", "records", "]", ")", ")", "writer", ".", "writeheader", "(", ")", "writer", ".", "writerows", "(", "records", ")", "else", ":", "json", ".", "dump", "(", "json_records", ",", "file", ")", "else", ":", "print_error", "(", "'Export failed...'", ")", "else", ":", "print_error", "(", "'Restful server is not Running'", ")" ]
export experiment metadata to csv
[ "export", "experiment", "metadata", "to", "csv" ]
python
train
ARMmbed/icetea
icetea_lib/Plugin/plugins/LocalAllocator/DutDetection.py
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/Plugin/plugins/LocalAllocator/DutDetection.py#L81-L102
def available_edbg_ports(self): """ Finds available EDBG COM ports. :return: list of available ports """ ports_available = sorted(list(list_ports.comports())) edbg_ports = [] for iport in ports_available: port = iport[0] desc = iport[1] hwid = iport[2] if str(desc).startswith("EDBG Virtual COM Port") or \ "VID:PID=03EB:2111" in str(hwid).upper(): # print("%-10s: %s (%s)\n" % (port, desc, hwid)) try: edbg_ports.index(port, 0) print("There is multiple %s ports with same number!" % port) except ValueError: edbg_ports.append(port) # print("Detected %i DUT's" % len(edbg_ports)) return edbg_ports
[ "def", "available_edbg_ports", "(", "self", ")", ":", "ports_available", "=", "sorted", "(", "list", "(", "list_ports", ".", "comports", "(", ")", ")", ")", "edbg_ports", "=", "[", "]", "for", "iport", "in", "ports_available", ":", "port", "=", "iport", "[", "0", "]", "desc", "=", "iport", "[", "1", "]", "hwid", "=", "iport", "[", "2", "]", "if", "str", "(", "desc", ")", ".", "startswith", "(", "\"EDBG Virtual COM Port\"", ")", "or", "\"VID:PID=03EB:2111\"", "in", "str", "(", "hwid", ")", ".", "upper", "(", ")", ":", "# print(\"%-10s: %s (%s)\\n\" % (port, desc, hwid))", "try", ":", "edbg_ports", ".", "index", "(", "port", ",", "0", ")", "print", "(", "\"There is multiple %s ports with same number!\"", "%", "port", ")", "except", "ValueError", ":", "edbg_ports", ".", "append", "(", "port", ")", "# print(\"Detected %i DUT's\" % len(edbg_ports))", "return", "edbg_ports" ]
Finds available EDBG COM ports. :return: list of available ports
[ "Finds", "available", "EDBG", "COM", "ports", "." ]
python
train
neurosynth/neurosynth
neurosynth/base/lexparser.py
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/base/lexparser.py#L70-L73
def p_list_and(self, p): 'list : list AND list' p[0] = pd.concat( [p[1], p[3]], axis=1).dropna().apply(self.func, axis=1)
[ "def", "p_list_and", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "pd", ".", "concat", "(", "[", "p", "[", "1", "]", ",", "p", "[", "3", "]", "]", ",", "axis", "=", "1", ")", ".", "dropna", "(", ")", ".", "apply", "(", "self", ".", "func", ",", "axis", "=", "1", ")" ]
list : list AND list
[ "list", ":", "list", "AND", "list" ]
python
test
welbornprod/colr
colr/controls.py
https://github.com/welbornprod/colr/blob/417117fdbddbc53142096685ac2af006b2bd0220/colr/controls.py#L178-L186
def move_pos(line=1, column=1, file=sys.stdout): """ Move the cursor to a new position. Values are 1-based, and default to 1. Esc[<line>;<column>H or Esc[<line>;<column>f """ move.pos(line=line, col=column).write(file=file)
[ "def", "move_pos", "(", "line", "=", "1", ",", "column", "=", "1", ",", "file", "=", "sys", ".", "stdout", ")", ":", "move", ".", "pos", "(", "line", "=", "line", ",", "col", "=", "column", ")", ".", "write", "(", "file", "=", "file", ")" ]
Move the cursor to a new position. Values are 1-based, and default to 1. Esc[<line>;<column>H or Esc[<line>;<column>f
[ "Move", "the", "cursor", "to", "a", "new", "position", ".", "Values", "are", "1", "-", "based", "and", "default", "to", "1", "." ]
python
train
xigt/xigt
xigt/ref.py
https://github.com/xigt/xigt/blob/3736fbb6d26887181de57bd189cbd731cec91289/xigt/ref.py#L104-L134
def selections(expression, keep_delimiters=True): """ Split the expression into individual selection expressions. The delimiters will be kept as separate items if keep_delimters=True. Also works on space-separated ID lists, although a sequence of space characters will be considered a delimiter. >>> selections('a1') ['a1'] >>> selections('a1[3:5]') ['a1[3:5]'] >>> selections('a1[3:5+6:7]') ['a1[3:5+6:7]'] >>> selections('a1[3:5+6:7]+a2[1:4]') ['a1[3:5+6:7]', '+', 'a2[1:4]'] >>> selections('a1[3:5+6:7]+a2[1:4]', keep_delimiters=False) ['a1[3:5+6:7]', 'a2[1:4]'] >>> selections('a1 a2 a3') ['a1', ' ', 'a2', ' ', 'a3'] """ tokens = [] for (pre, _id, _range) in robust_ref_re.findall(expression): if keep_delimiters and pre: tokens.append(pre) if _id: if _range: tokens.append('{}[{}]'.format(_id, _range)) else: tokens.append(_id) return tokens
[ "def", "selections", "(", "expression", ",", "keep_delimiters", "=", "True", ")", ":", "tokens", "=", "[", "]", "for", "(", "pre", ",", "_id", ",", "_range", ")", "in", "robust_ref_re", ".", "findall", "(", "expression", ")", ":", "if", "keep_delimiters", "and", "pre", ":", "tokens", ".", "append", "(", "pre", ")", "if", "_id", ":", "if", "_range", ":", "tokens", ".", "append", "(", "'{}[{}]'", ".", "format", "(", "_id", ",", "_range", ")", ")", "else", ":", "tokens", ".", "append", "(", "_id", ")", "return", "tokens" ]
Split the expression into individual selection expressions. The delimiters will be kept as separate items if keep_delimters=True. Also works on space-separated ID lists, although a sequence of space characters will be considered a delimiter. >>> selections('a1') ['a1'] >>> selections('a1[3:5]') ['a1[3:5]'] >>> selections('a1[3:5+6:7]') ['a1[3:5+6:7]'] >>> selections('a1[3:5+6:7]+a2[1:4]') ['a1[3:5+6:7]', '+', 'a2[1:4]'] >>> selections('a1[3:5+6:7]+a2[1:4]', keep_delimiters=False) ['a1[3:5+6:7]', 'a2[1:4]'] >>> selections('a1 a2 a3') ['a1', ' ', 'a2', ' ', 'a3']
[ "Split", "the", "expression", "into", "individual", "selection", "expressions", ".", "The", "delimiters", "will", "be", "kept", "as", "separate", "items", "if", "keep_delimters", "=", "True", ".", "Also", "works", "on", "space", "-", "separated", "ID", "lists", "although", "a", "sequence", "of", "space", "characters", "will", "be", "considered", "a", "delimiter", "." ]
python
train
bokeh/bokeh
bokeh/document/document.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/document/document.py#L226-L244
def add_next_tick_callback(self, callback): ''' Add callback to be invoked once on the next tick of the event loop. Args: callback (callable) : A callback function to execute on the next tick. Returns: NextTickCallback : can be used with ``remove_next_tick_callback`` .. note:: Next tick callbacks only work within the context of a Bokeh server session. This function will no effect when Bokeh outputs to standalone HTML or Jupyter notebook cells. ''' from ..server.callbacks import NextTickCallback cb = NextTickCallback(self, None) return self._add_session_callback(cb, callback, one_shot=True, originator=self.add_next_tick_callback)
[ "def", "add_next_tick_callback", "(", "self", ",", "callback", ")", ":", "from", ".", ".", "server", ".", "callbacks", "import", "NextTickCallback", "cb", "=", "NextTickCallback", "(", "self", ",", "None", ")", "return", "self", ".", "_add_session_callback", "(", "cb", ",", "callback", ",", "one_shot", "=", "True", ",", "originator", "=", "self", ".", "add_next_tick_callback", ")" ]
Add callback to be invoked once on the next tick of the event loop. Args: callback (callable) : A callback function to execute on the next tick. Returns: NextTickCallback : can be used with ``remove_next_tick_callback`` .. note:: Next tick callbacks only work within the context of a Bokeh server session. This function will no effect when Bokeh outputs to standalone HTML or Jupyter notebook cells.
[ "Add", "callback", "to", "be", "invoked", "once", "on", "the", "next", "tick", "of", "the", "event", "loop", "." ]
python
train
dronekit/dronekit-python
dronekit/__init__.py
https://github.com/dronekit/dronekit-python/blob/91c147fa61f521f5fff5d0cee06d07ed93614af8/dronekit/__init__.py#L2587-L2617
def rotate(self, pitch, roll, yaw): """ Rotate the gimbal to a specific vector. .. code-block:: python #Point the gimbal straight down vehicle.gimbal.rotate(-90, 0, 0) :param pitch: Gimbal pitch in degrees relative to the vehicle (see diagram for :ref:`attitude <figure_attitude>`). A value of 0 represents a camera pointed straight ahead relative to the front of the vehicle, while -90 points the camera straight down. :param roll: Gimbal roll in degrees relative to the vehicle (see diagram for :ref:`attitude <figure_attitude>`). :param yaw: Gimbal yaw in degrees relative to *global frame* (0 is North, 90 is West, 180 is South etc.) """ msg = self._vehicle.message_factory.mount_configure_encode( 0, 1, # target system, target component mavutil.mavlink.MAV_MOUNT_MODE_MAVLINK_TARGETING, #mount_mode 1, # stabilize roll 1, # stabilize pitch 1, # stabilize yaw ) self._vehicle.send_mavlink(msg) msg = self._vehicle.message_factory.mount_control_encode( 0, 1, # target system, target component pitch * 100, # pitch is in centidegrees roll * 100, # roll yaw * 100, # yaw is in centidegrees 0 # save position ) self._vehicle.send_mavlink(msg)
[ "def", "rotate", "(", "self", ",", "pitch", ",", "roll", ",", "yaw", ")", ":", "msg", "=", "self", ".", "_vehicle", ".", "message_factory", ".", "mount_configure_encode", "(", "0", ",", "1", ",", "# target system, target component", "mavutil", ".", "mavlink", ".", "MAV_MOUNT_MODE_MAVLINK_TARGETING", ",", "#mount_mode", "1", ",", "# stabilize roll", "1", ",", "# stabilize pitch", "1", ",", "# stabilize yaw", ")", "self", ".", "_vehicle", ".", "send_mavlink", "(", "msg", ")", "msg", "=", "self", ".", "_vehicle", ".", "message_factory", ".", "mount_control_encode", "(", "0", ",", "1", ",", "# target system, target component", "pitch", "*", "100", ",", "# pitch is in centidegrees", "roll", "*", "100", ",", "# roll", "yaw", "*", "100", ",", "# yaw is in centidegrees", "0", "# save position", ")", "self", ".", "_vehicle", ".", "send_mavlink", "(", "msg", ")" ]
Rotate the gimbal to a specific vector. .. code-block:: python #Point the gimbal straight down vehicle.gimbal.rotate(-90, 0, 0) :param pitch: Gimbal pitch in degrees relative to the vehicle (see diagram for :ref:`attitude <figure_attitude>`). A value of 0 represents a camera pointed straight ahead relative to the front of the vehicle, while -90 points the camera straight down. :param roll: Gimbal roll in degrees relative to the vehicle (see diagram for :ref:`attitude <figure_attitude>`). :param yaw: Gimbal yaw in degrees relative to *global frame* (0 is North, 90 is West, 180 is South etc.)
[ "Rotate", "the", "gimbal", "to", "a", "specific", "vector", "." ]
python
train
googleapis/google-cloud-python
storage/google/cloud/storage/blob.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/blob.py#L1496-L1527
def compose(self, sources, client=None): """Concatenate source blobs into this one. If :attr:`user_project` is set on the bucket, bills the API request to that project. :type sources: list of :class:`Blob` :param sources: blobs whose contents will be composed into this blob. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the blob's bucket. """ client = self._require_client(client) query_params = {} if self.user_project is not None: query_params["userProject"] = self.user_project request = { "sourceObjects": [{"name": source.name} for source in sources], "destination": self._properties.copy(), } api_response = client._connection.api_request( method="POST", path=self.path + "/compose", query_params=query_params, data=request, _target_object=self, ) self._set_properties(api_response)
[ "def", "compose", "(", "self", ",", "sources", ",", "client", "=", "None", ")", ":", "client", "=", "self", ".", "_require_client", "(", "client", ")", "query_params", "=", "{", "}", "if", "self", ".", "user_project", "is", "not", "None", ":", "query_params", "[", "\"userProject\"", "]", "=", "self", ".", "user_project", "request", "=", "{", "\"sourceObjects\"", ":", "[", "{", "\"name\"", ":", "source", ".", "name", "}", "for", "source", "in", "sources", "]", ",", "\"destination\"", ":", "self", ".", "_properties", ".", "copy", "(", ")", ",", "}", "api_response", "=", "client", ".", "_connection", ".", "api_request", "(", "method", "=", "\"POST\"", ",", "path", "=", "self", ".", "path", "+", "\"/compose\"", ",", "query_params", "=", "query_params", ",", "data", "=", "request", ",", "_target_object", "=", "self", ",", ")", "self", ".", "_set_properties", "(", "api_response", ")" ]
Concatenate source blobs into this one. If :attr:`user_project` is set on the bucket, bills the API request to that project. :type sources: list of :class:`Blob` :param sources: blobs whose contents will be composed into this blob. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the blob's bucket.
[ "Concatenate", "source", "blobs", "into", "this", "one", "." ]
python
train
coursera-dl/coursera-dl
coursera/api.py
https://github.com/coursera-dl/coursera-dl/blob/9b434bcf3c4011bf3181429fe674633ae5fb7d4d/coursera/api.py#L1394-L1410
def _extract_programming_immediate_instructions_text(self, element_id): """ Extract assignment text (instructions). @param element_id: Element id to extract assignment instructions from. @type element_id: str @return: List of assignment text (instructions). @rtype: [str] """ dom = get_page(self._session, OPENCOURSE_PROGRAMMING_IMMEDIATE_INSTRUCTIOINS_URL, json=True, course_id=self._course_id, element_id=element_id) return [element['assignmentInstructions']['definition']['value'] for element in dom['elements']]
[ "def", "_extract_programming_immediate_instructions_text", "(", "self", ",", "element_id", ")", ":", "dom", "=", "get_page", "(", "self", ".", "_session", ",", "OPENCOURSE_PROGRAMMING_IMMEDIATE_INSTRUCTIOINS_URL", ",", "json", "=", "True", ",", "course_id", "=", "self", ".", "_course_id", ",", "element_id", "=", "element_id", ")", "return", "[", "element", "[", "'assignmentInstructions'", "]", "[", "'definition'", "]", "[", "'value'", "]", "for", "element", "in", "dom", "[", "'elements'", "]", "]" ]
Extract assignment text (instructions). @param element_id: Element id to extract assignment instructions from. @type element_id: str @return: List of assignment text (instructions). @rtype: [str]
[ "Extract", "assignment", "text", "(", "instructions", ")", "." ]
python
train
invenia/Arbiter
arbiter/utils.py
https://github.com/invenia/Arbiter/blob/51008393ae8797da85bcd67807259a157f941dfd/arbiter/utils.py#L74-L100
def retry(retries=0, delay=timedelta(), conditions=[]): """ A decorator for making a function that retries on failure. Args: retries (Integral): The number of times to retry if a failure occurs. delay (timedelta, optional, 0 seconds): A timedelta representing the amount of time to delay between retries. conditions (list): A list of retry conditions. """ delay_in_seconds = delay.total_seconds() def decorator(function): """ The actual decorator for retrying. """ @wraps(function) def wrapper(*args, **kwargs): """ The actual wrapper for retrying. """ func = partial(function, *args, **kwargs) return retry_loop(retries, delay_in_seconds, conditions, func) return wrapper return decorator
[ "def", "retry", "(", "retries", "=", "0", ",", "delay", "=", "timedelta", "(", ")", ",", "conditions", "=", "[", "]", ")", ":", "delay_in_seconds", "=", "delay", ".", "total_seconds", "(", ")", "def", "decorator", "(", "function", ")", ":", "\"\"\"\n The actual decorator for retrying.\n \"\"\"", "@", "wraps", "(", "function", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"\n The actual wrapper for retrying.\n \"\"\"", "func", "=", "partial", "(", "function", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "retry_loop", "(", "retries", ",", "delay_in_seconds", ",", "conditions", ",", "func", ")", "return", "wrapper", "return", "decorator" ]
A decorator for making a function that retries on failure. Args: retries (Integral): The number of times to retry if a failure occurs. delay (timedelta, optional, 0 seconds): A timedelta representing the amount of time to delay between retries. conditions (list): A list of retry conditions.
[ "A", "decorator", "for", "making", "a", "function", "that", "retries", "on", "failure", "." ]
python
train
aboSamoor/polyglot
polyglot/utils.py
https://github.com/aboSamoor/polyglot/blob/d0d2aa8d06cec4e03bd96618ae960030f7069a17/polyglot/utils.py#L16-L25
def _open(file_, mode='r'): """Open file object given filenames, open files or even archives.""" if isinstance(file_, string_types): _, ext = path.splitext(file_) if ext in {'.bz2', '.gz'}: s = tarfile.open(file_) return s.extractfile(s.next()) else: return open(file_, mode) return file_
[ "def", "_open", "(", "file_", ",", "mode", "=", "'r'", ")", ":", "if", "isinstance", "(", "file_", ",", "string_types", ")", ":", "_", ",", "ext", "=", "path", ".", "splitext", "(", "file_", ")", "if", "ext", "in", "{", "'.bz2'", ",", "'.gz'", "}", ":", "s", "=", "tarfile", ".", "open", "(", "file_", ")", "return", "s", ".", "extractfile", "(", "s", ".", "next", "(", ")", ")", "else", ":", "return", "open", "(", "file_", ",", "mode", ")", "return", "file_" ]
Open file object given filenames, open files or even archives.
[ "Open", "file", "object", "given", "filenames", "open", "files", "or", "even", "archives", "." ]
python
train
sckott/pygbif
pygbif/utils/wkt_rewind.py
https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/utils/wkt_rewind.py#L6-L36
def wkt_rewind(x, digits = None): ''' reverse WKT winding order :param x: [str] WKT string :param digits: [int] number of digits after decimal to use for the return string. by default, we use the mean number of digits in your string. :return: a string Usage:: from pygbif import wkt_rewind x = 'POLYGON((144.6 13.2, 144.6 13.6, 144.9 13.6, 144.9 13.2, 144.6 13.2))' wkt_rewind(x) wkt_rewind(x, digits = 0) wkt_rewind(x, digits = 3) wkt_rewind(x, digits = 7) ''' z = wkt.loads(x) if digits is None: coords = z['coordinates'] nums = __flatten(coords) dec_n = [ decimal.Decimal(str(w)).as_tuple().exponent for w in nums ] digits = abs(statistics.mean(dec_n)) else: if not isinstance(digits, int): raise TypeError("'digits' must be an int") wound = rewind(z) back_to_wkt = wkt.dumps(wound, decimals = digits) return back_to_wkt
[ "def", "wkt_rewind", "(", "x", ",", "digits", "=", "None", ")", ":", "z", "=", "wkt", ".", "loads", "(", "x", ")", "if", "digits", "is", "None", ":", "coords", "=", "z", "[", "'coordinates'", "]", "nums", "=", "__flatten", "(", "coords", ")", "dec_n", "=", "[", "decimal", ".", "Decimal", "(", "str", "(", "w", ")", ")", ".", "as_tuple", "(", ")", ".", "exponent", "for", "w", "in", "nums", "]", "digits", "=", "abs", "(", "statistics", ".", "mean", "(", "dec_n", ")", ")", "else", ":", "if", "not", "isinstance", "(", "digits", ",", "int", ")", ":", "raise", "TypeError", "(", "\"'digits' must be an int\"", ")", "wound", "=", "rewind", "(", "z", ")", "back_to_wkt", "=", "wkt", ".", "dumps", "(", "wound", ",", "decimals", "=", "digits", ")", "return", "back_to_wkt" ]
reverse WKT winding order :param x: [str] WKT string :param digits: [int] number of digits after decimal to use for the return string. by default, we use the mean number of digits in your string. :return: a string Usage:: from pygbif import wkt_rewind x = 'POLYGON((144.6 13.2, 144.6 13.6, 144.9 13.6, 144.9 13.2, 144.6 13.2))' wkt_rewind(x) wkt_rewind(x, digits = 0) wkt_rewind(x, digits = 3) wkt_rewind(x, digits = 7)
[ "reverse", "WKT", "winding", "order" ]
python
train
rstoneback/pysat
pysat/instruments/supermag_magnetometer.py
https://github.com/rstoneback/pysat/blob/4ae1afd80e15e4449397d39dce8c3e969c32c422/pysat/instruments/supermag_magnetometer.py#L62-L128
def list_files(tag='', sat_id=None, data_path=None, format_str=None): """Return a Pandas Series of every file for chosen SuperMAG data Parameters ----------- tag : (string or NoneType) Denotes type of file to load. Accepted types are 'indices', 'all', 'stations', and '' (for just magnetometer measurements). (default='') sat_id : (string or NoneType) Specifies the satellite ID for a constellation. Not used. (default=None) data_path : (string or NoneType) Path to data directory. If None is specified, the value previously set in Instrument.files.data_path is used. (default=None) format_str : (string or NoneType) User specified file format. If None is specified, the default formats associated with the supplied tags are used. (default=None) Returns -------- pysat.Files.from_os : (pysat._files.Files) A class containing the verified available files """ if format_str is None and data_path is not None: file_base = 'supermag_magnetometer' if tag == "indices" or tag == "all": file_base += '_all' # Can't just download indices if tag == "indices": psplit = path.split(data_path[:-1]) data_path = path.join(psplit[0], "all", "") if tag == "stations": min_fmt = '_'.join([file_base, '{year:4d}.???']) doff = pds.DateOffset(years=1) else: min_fmt = '_'.join([file_base, '{year:4d}{month:02d}{day:02d}.???']) doff = pds.DateOffset(days=1) files = pysat.Files.from_os(data_path=data_path, format_str=min_fmt) # station files are once per year but we need to # create the illusion there is a file per year if not files.empty: files = files.sort_index() if tag == "stations": orig_files = files.copy() new_files = [] # Assigns the validity of each station file to be 1 year for orig in orig_files.iteritems(): files.ix[orig[0] + doff - pds.DateOffset(days=1)] = orig[1] files = files.sort_index() new_files.append(files.ix[orig[0]: orig[0] + doff - \ pds.DateOffset(days=1)].asfreq('D', method='pad')) files = pds.concat(new_files) files = files.dropna() files = files.sort_index() # add the date to the filename files = files + '_' + files.index.strftime('%Y-%m-%d') return files elif format_str is None: estr = 'A directory must be passed to the loading routine for SuperMAG' raise ValueError (estr) else: return pysat.Files.from_os(data_path=data_path, format_str=format_str)
[ "def", "list_files", "(", "tag", "=", "''", ",", "sat_id", "=", "None", ",", "data_path", "=", "None", ",", "format_str", "=", "None", ")", ":", "if", "format_str", "is", "None", "and", "data_path", "is", "not", "None", ":", "file_base", "=", "'supermag_magnetometer'", "if", "tag", "==", "\"indices\"", "or", "tag", "==", "\"all\"", ":", "file_base", "+=", "'_all'", "# Can't just download indices", "if", "tag", "==", "\"indices\"", ":", "psplit", "=", "path", ".", "split", "(", "data_path", "[", ":", "-", "1", "]", ")", "data_path", "=", "path", ".", "join", "(", "psplit", "[", "0", "]", ",", "\"all\"", ",", "\"\"", ")", "if", "tag", "==", "\"stations\"", ":", "min_fmt", "=", "'_'", ".", "join", "(", "[", "file_base", ",", "'{year:4d}.???'", "]", ")", "doff", "=", "pds", ".", "DateOffset", "(", "years", "=", "1", ")", "else", ":", "min_fmt", "=", "'_'", ".", "join", "(", "[", "file_base", ",", "'{year:4d}{month:02d}{day:02d}.???'", "]", ")", "doff", "=", "pds", ".", "DateOffset", "(", "days", "=", "1", ")", "files", "=", "pysat", ".", "Files", ".", "from_os", "(", "data_path", "=", "data_path", ",", "format_str", "=", "min_fmt", ")", "# station files are once per year but we need to", "# create the illusion there is a file per year ", "if", "not", "files", ".", "empty", ":", "files", "=", "files", ".", "sort_index", "(", ")", "if", "tag", "==", "\"stations\"", ":", "orig_files", "=", "files", ".", "copy", "(", ")", "new_files", "=", "[", "]", "# Assigns the validity of each station file to be 1 year", "for", "orig", "in", "orig_files", ".", "iteritems", "(", ")", ":", "files", ".", "ix", "[", "orig", "[", "0", "]", "+", "doff", "-", "pds", ".", "DateOffset", "(", "days", "=", "1", ")", "]", "=", "orig", "[", "1", "]", "files", "=", "files", ".", "sort_index", "(", ")", "new_files", ".", "append", "(", "files", ".", "ix", "[", "orig", "[", "0", "]", ":", "orig", "[", "0", "]", "+", "doff", "-", "pds", ".", "DateOffset", "(", "days", "=", "1", ")", "]", ".", "asfreq", "(", "'D'", ",", "method", "=", "'pad'", ")", ")", "files", "=", "pds", ".", "concat", "(", "new_files", ")", "files", "=", "files", ".", "dropna", "(", ")", "files", "=", "files", ".", "sort_index", "(", ")", "# add the date to the filename", "files", "=", "files", "+", "'_'", "+", "files", ".", "index", ".", "strftime", "(", "'%Y-%m-%d'", ")", "return", "files", "elif", "format_str", "is", "None", ":", "estr", "=", "'A directory must be passed to the loading routine for SuperMAG'", "raise", "ValueError", "(", "estr", ")", "else", ":", "return", "pysat", ".", "Files", ".", "from_os", "(", "data_path", "=", "data_path", ",", "format_str", "=", "format_str", ")" ]
Return a Pandas Series of every file for chosen SuperMAG data Parameters ----------- tag : (string or NoneType) Denotes type of file to load. Accepted types are 'indices', 'all', 'stations', and '' (for just magnetometer measurements). (default='') sat_id : (string or NoneType) Specifies the satellite ID for a constellation. Not used. (default=None) data_path : (string or NoneType) Path to data directory. If None is specified, the value previously set in Instrument.files.data_path is used. (default=None) format_str : (string or NoneType) User specified file format. If None is specified, the default formats associated with the supplied tags are used. (default=None) Returns -------- pysat.Files.from_os : (pysat._files.Files) A class containing the verified available files
[ "Return", "a", "Pandas", "Series", "of", "every", "file", "for", "chosen", "SuperMAG", "data" ]
python
train
ramses-tech/ramses
ramses/views.py
https://github.com/ramses-tech/ramses/blob/ea2e1e896325b7256cdf5902309e05fd98e0c14c/ramses/views.py#L37-L42
def set_object_acl(self, obj): """ Set object ACL on creation if not already present. """ if not obj._acl: from nefertari_guards import engine as guards_engine acl = self._factory(self.request).generate_item_acl(obj) obj._acl = guards_engine.ACLField.stringify_acl(acl)
[ "def", "set_object_acl", "(", "self", ",", "obj", ")", ":", "if", "not", "obj", ".", "_acl", ":", "from", "nefertari_guards", "import", "engine", "as", "guards_engine", "acl", "=", "self", ".", "_factory", "(", "self", ".", "request", ")", ".", "generate_item_acl", "(", "obj", ")", "obj", ".", "_acl", "=", "guards_engine", ".", "ACLField", ".", "stringify_acl", "(", "acl", ")" ]
Set object ACL on creation if not already present.
[ "Set", "object", "ACL", "on", "creation", "if", "not", "already", "present", "." ]
python
train
DataDog/integrations-core
kubernetes_state/datadog_checks/kubernetes_state/kubernetes_state.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/kubernetes_state/datadog_checks/kubernetes_state/kubernetes_state.py#L273-L296
def _condition_to_service_check(self, sample, sc_name, mapping, tags=None): """ Some metrics contains conditions, labels that have "condition" as name and "true", "false", or "unknown" as value. The metric value is expected to be a gauge equal to 0 or 1 in this case. For example: metric { label { name: "condition", value: "true" } # other labels here gauge { value: 1.0 } } This function evaluates metrics containing conditions and sends a service check based on a provided condition->check mapping dict """ if bool(sample[self.SAMPLE_VALUE]) is False: return # Ignore if gauge is not 1 condition = sample[self.SAMPLE_LABELS].get('condition') if condition: if condition in mapping: self.service_check(sc_name, mapping[condition], tags=tags) else: self.log.debug("Unable to handle %s - unknown condition %s" % (sc_name, condition))
[ "def", "_condition_to_service_check", "(", "self", ",", "sample", ",", "sc_name", ",", "mapping", ",", "tags", "=", "None", ")", ":", "if", "bool", "(", "sample", "[", "self", ".", "SAMPLE_VALUE", "]", ")", "is", "False", ":", "return", "# Ignore if gauge is not 1", "condition", "=", "sample", "[", "self", ".", "SAMPLE_LABELS", "]", ".", "get", "(", "'condition'", ")", "if", "condition", ":", "if", "condition", "in", "mapping", ":", "self", ".", "service_check", "(", "sc_name", ",", "mapping", "[", "condition", "]", ",", "tags", "=", "tags", ")", "else", ":", "self", ".", "log", ".", "debug", "(", "\"Unable to handle %s - unknown condition %s\"", "%", "(", "sc_name", ",", "condition", ")", ")" ]
Some metrics contains conditions, labels that have "condition" as name and "true", "false", or "unknown" as value. The metric value is expected to be a gauge equal to 0 or 1 in this case. For example: metric { label { name: "condition", value: "true" } # other labels here gauge { value: 1.0 } } This function evaluates metrics containing conditions and sends a service check based on a provided condition->check mapping dict
[ "Some", "metrics", "contains", "conditions", "labels", "that", "have", "condition", "as", "name", "and", "true", "false", "or", "unknown", "as", "value", ".", "The", "metric", "value", "is", "expected", "to", "be", "a", "gauge", "equal", "to", "0", "or", "1", "in", "this", "case", ".", "For", "example", ":" ]
python
train
freshbooks/refreshbooks
refreshbooks/adapters.py
https://github.com/freshbooks/refreshbooks/blob/cfd65ecd38cb6be3b61dbf6a01f93800603f34b1/refreshbooks/adapters.py#L28-L47
def encode_as_simple(name, value): """Creates an etree element following the simple field convention. Values are assumed to be strs, unicodes, ints, floats, or Decimals: >>> element = encode_as_simple('foo', '5') >>> element.tag == 'foo' True >>> element.text == '5' True >>> element = encode_as_simple('bar', 8) >>> element.tag == 'bar' True >>> element.text == '8' True """ if isinstance(value, objectify.ObjectifiedDataElement): return encode_as_simple(name, unicode(value)) if type(value) in _stringable_types: value = str(value) return elements.field(name, value)
[ "def", "encode_as_simple", "(", "name", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "objectify", ".", "ObjectifiedDataElement", ")", ":", "return", "encode_as_simple", "(", "name", ",", "unicode", "(", "value", ")", ")", "if", "type", "(", "value", ")", "in", "_stringable_types", ":", "value", "=", "str", "(", "value", ")", "return", "elements", ".", "field", "(", "name", ",", "value", ")" ]
Creates an etree element following the simple field convention. Values are assumed to be strs, unicodes, ints, floats, or Decimals: >>> element = encode_as_simple('foo', '5') >>> element.tag == 'foo' True >>> element.text == '5' True >>> element = encode_as_simple('bar', 8) >>> element.tag == 'bar' True >>> element.text == '8' True
[ "Creates", "an", "etree", "element", "following", "the", "simple", "field", "convention", ".", "Values", "are", "assumed", "to", "be", "strs", "unicodes", "ints", "floats", "or", "Decimals", ":", ">>>", "element", "=", "encode_as_simple", "(", "foo", "5", ")", ">>>", "element", ".", "tag", "==", "foo", "True", ">>>", "element", ".", "text", "==", "5", "True", ">>>", "element", "=", "encode_as_simple", "(", "bar", "8", ")", ">>>", "element", ".", "tag", "==", "bar", "True", ">>>", "element", ".", "text", "==", "8", "True" ]
python
train
scanny/python-pptx
pptx/text/fonts.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/text/fonts.py#L324-L339
def _iter_names(self): """ Generate a key/value pair for each name in this table. The key is a (platform_id, name_id) 2-tuple and the value is the unicode text corresponding to that key. """ table_format, count, strings_offset = self._table_header table_bytes = self._table_bytes for idx in range(count): platform_id, name_id, name = self._read_name( table_bytes, idx, strings_offset ) if name is None: continue yield ((platform_id, name_id), name)
[ "def", "_iter_names", "(", "self", ")", ":", "table_format", ",", "count", ",", "strings_offset", "=", "self", ".", "_table_header", "table_bytes", "=", "self", ".", "_table_bytes", "for", "idx", "in", "range", "(", "count", ")", ":", "platform_id", ",", "name_id", ",", "name", "=", "self", ".", "_read_name", "(", "table_bytes", ",", "idx", ",", "strings_offset", ")", "if", "name", "is", "None", ":", "continue", "yield", "(", "(", "platform_id", ",", "name_id", ")", ",", "name", ")" ]
Generate a key/value pair for each name in this table. The key is a (platform_id, name_id) 2-tuple and the value is the unicode text corresponding to that key.
[ "Generate", "a", "key", "/", "value", "pair", "for", "each", "name", "in", "this", "table", ".", "The", "key", "is", "a", "(", "platform_id", "name_id", ")", "2", "-", "tuple", "and", "the", "value", "is", "the", "unicode", "text", "corresponding", "to", "that", "key", "." ]
python
train
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/lxml/html/diff.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/lxml/html/diff.py#L581-L631
def fixup_chunks(chunks): """ This function takes a list of chunks and produces a list of tokens. """ tag_accum = [] cur_word = None result = [] for chunk in chunks: if isinstance(chunk, tuple): if chunk[0] == 'img': src = chunk[1] tag, trailing_whitespace = split_trailing_whitespace(chunk[2]) cur_word = tag_token('img', src, html_repr=tag, pre_tags=tag_accum, trailing_whitespace=trailing_whitespace) tag_accum = [] result.append(cur_word) elif chunk[0] == 'href': href = chunk[1] cur_word = href_token(href, pre_tags=tag_accum, trailing_whitespace=" ") tag_accum = [] result.append(cur_word) continue if is_word(chunk): chunk, trailing_whitespace = split_trailing_whitespace(chunk) cur_word = token(chunk, pre_tags=tag_accum, trailing_whitespace=trailing_whitespace) tag_accum = [] result.append(cur_word) elif is_start_tag(chunk): tag_accum.append(chunk) elif is_end_tag(chunk): if tag_accum: tag_accum.append(chunk) else: assert cur_word, ( "Weird state, cur_word=%r, result=%r, chunks=%r of %r" % (cur_word, result, chunk, chunks)) cur_word.post_tags.append(chunk) else: assert(0) if not result: return [token('', pre_tags=tag_accum)] else: result[-1].post_tags.extend(tag_accum) return result
[ "def", "fixup_chunks", "(", "chunks", ")", ":", "tag_accum", "=", "[", "]", "cur_word", "=", "None", "result", "=", "[", "]", "for", "chunk", "in", "chunks", ":", "if", "isinstance", "(", "chunk", ",", "tuple", ")", ":", "if", "chunk", "[", "0", "]", "==", "'img'", ":", "src", "=", "chunk", "[", "1", "]", "tag", ",", "trailing_whitespace", "=", "split_trailing_whitespace", "(", "chunk", "[", "2", "]", ")", "cur_word", "=", "tag_token", "(", "'img'", ",", "src", ",", "html_repr", "=", "tag", ",", "pre_tags", "=", "tag_accum", ",", "trailing_whitespace", "=", "trailing_whitespace", ")", "tag_accum", "=", "[", "]", "result", ".", "append", "(", "cur_word", ")", "elif", "chunk", "[", "0", "]", "==", "'href'", ":", "href", "=", "chunk", "[", "1", "]", "cur_word", "=", "href_token", "(", "href", ",", "pre_tags", "=", "tag_accum", ",", "trailing_whitespace", "=", "\" \"", ")", "tag_accum", "=", "[", "]", "result", ".", "append", "(", "cur_word", ")", "continue", "if", "is_word", "(", "chunk", ")", ":", "chunk", ",", "trailing_whitespace", "=", "split_trailing_whitespace", "(", "chunk", ")", "cur_word", "=", "token", "(", "chunk", ",", "pre_tags", "=", "tag_accum", ",", "trailing_whitespace", "=", "trailing_whitespace", ")", "tag_accum", "=", "[", "]", "result", ".", "append", "(", "cur_word", ")", "elif", "is_start_tag", "(", "chunk", ")", ":", "tag_accum", ".", "append", "(", "chunk", ")", "elif", "is_end_tag", "(", "chunk", ")", ":", "if", "tag_accum", ":", "tag_accum", ".", "append", "(", "chunk", ")", "else", ":", "assert", "cur_word", ",", "(", "\"Weird state, cur_word=%r, result=%r, chunks=%r of %r\"", "%", "(", "cur_word", ",", "result", ",", "chunk", ",", "chunks", ")", ")", "cur_word", ".", "post_tags", ".", "append", "(", "chunk", ")", "else", ":", "assert", "(", "0", ")", "if", "not", "result", ":", "return", "[", "token", "(", "''", ",", "pre_tags", "=", "tag_accum", ")", "]", "else", ":", "result", "[", "-", "1", "]", ".", "post_tags", ".", "extend", "(", "tag_accum", ")", "return", "result" ]
This function takes a list of chunks and produces a list of tokens.
[ "This", "function", "takes", "a", "list", "of", "chunks", "and", "produces", "a", "list", "of", "tokens", "." ]
python
test
websocket-client/websocket-client
websocket/_core.py
https://github.com/websocket-client/websocket-client/blob/3c25814664fef5b78716ed8841123ed1c0d17824/websocket/_core.py#L461-L515
def create_connection(url, timeout=None, class_=WebSocket, **options): """ connect to url and return websocket object. Connect to url and return the WebSocket object. Passing optional timeout parameter will set the timeout on the socket. If no timeout is supplied, the global default timeout setting returned by getdefauttimeout() is used. You can customize using 'options'. If you set "header" list object, you can set your own custom header. >>> conn = create_connection("ws://echo.websocket.org/", ... header=["User-Agent: MyProgram", ... "x-custom: header"]) timeout: socket timeout time. This value is integer. if you set None for this value, it means "use default_timeout value" class_: class to instantiate when creating the connection. It has to implement settimeout and connect. It's __init__ should be compatible with WebSocket.__init__, i.e. accept all of it's kwargs. options: "header" -> custom http header list or dict. "cookie" -> cookie value. "origin" -> custom origin url. "suppress_origin" -> suppress outputting origin header. "host" -> custom host header string. "http_proxy_host" - http proxy host name. "http_proxy_port" - http proxy port. If not set, set to 80. "http_no_proxy" - host names, which doesn't use proxy. "http_proxy_auth" - http proxy auth information. tuple of username and password. default is None "enable_multithread" -> enable lock for multithread. "redirect_limit" -> number of redirects to follow. "sockopt" -> socket options "sslopt" -> ssl option "subprotocols" - array of available sub protocols. default is None. "skip_utf8_validation" - skip utf8 validation. "socket" - pre-initialized stream socket. """ sockopt = options.pop("sockopt", []) sslopt = options.pop("sslopt", {}) fire_cont_frame = options.pop("fire_cont_frame", False) enable_multithread = options.pop("enable_multithread", False) skip_utf8_validation = options.pop("skip_utf8_validation", False) websock = class_(sockopt=sockopt, sslopt=sslopt, fire_cont_frame=fire_cont_frame, enable_multithread=enable_multithread, skip_utf8_validation=skip_utf8_validation, **options) websock.settimeout(timeout if timeout is not None else getdefaulttimeout()) websock.connect(url, **options) return websock
[ "def", "create_connection", "(", "url", ",", "timeout", "=", "None", ",", "class_", "=", "WebSocket", ",", "*", "*", "options", ")", ":", "sockopt", "=", "options", ".", "pop", "(", "\"sockopt\"", ",", "[", "]", ")", "sslopt", "=", "options", ".", "pop", "(", "\"sslopt\"", ",", "{", "}", ")", "fire_cont_frame", "=", "options", ".", "pop", "(", "\"fire_cont_frame\"", ",", "False", ")", "enable_multithread", "=", "options", ".", "pop", "(", "\"enable_multithread\"", ",", "False", ")", "skip_utf8_validation", "=", "options", ".", "pop", "(", "\"skip_utf8_validation\"", ",", "False", ")", "websock", "=", "class_", "(", "sockopt", "=", "sockopt", ",", "sslopt", "=", "sslopt", ",", "fire_cont_frame", "=", "fire_cont_frame", ",", "enable_multithread", "=", "enable_multithread", ",", "skip_utf8_validation", "=", "skip_utf8_validation", ",", "*", "*", "options", ")", "websock", ".", "settimeout", "(", "timeout", "if", "timeout", "is", "not", "None", "else", "getdefaulttimeout", "(", ")", ")", "websock", ".", "connect", "(", "url", ",", "*", "*", "options", ")", "return", "websock" ]
connect to url and return websocket object. Connect to url and return the WebSocket object. Passing optional timeout parameter will set the timeout on the socket. If no timeout is supplied, the global default timeout setting returned by getdefauttimeout() is used. You can customize using 'options'. If you set "header" list object, you can set your own custom header. >>> conn = create_connection("ws://echo.websocket.org/", ... header=["User-Agent: MyProgram", ... "x-custom: header"]) timeout: socket timeout time. This value is integer. if you set None for this value, it means "use default_timeout value" class_: class to instantiate when creating the connection. It has to implement settimeout and connect. It's __init__ should be compatible with WebSocket.__init__, i.e. accept all of it's kwargs. options: "header" -> custom http header list or dict. "cookie" -> cookie value. "origin" -> custom origin url. "suppress_origin" -> suppress outputting origin header. "host" -> custom host header string. "http_proxy_host" - http proxy host name. "http_proxy_port" - http proxy port. If not set, set to 80. "http_no_proxy" - host names, which doesn't use proxy. "http_proxy_auth" - http proxy auth information. tuple of username and password. default is None "enable_multithread" -> enable lock for multithread. "redirect_limit" -> number of redirects to follow. "sockopt" -> socket options "sslopt" -> ssl option "subprotocols" - array of available sub protocols. default is None. "skip_utf8_validation" - skip utf8 validation. "socket" - pre-initialized stream socket.
[ "connect", "to", "url", "and", "return", "websocket", "object", "." ]
python
train
kislyuk/aegea
aegea/packages/github3/orgs.py
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/orgs.py#L308-L351
def create_repo(self, name, description='', homepage='', private=False, has_issues=True, has_wiki=True, has_downloads=True, team_id=0, auto_init=False, gitignore_template=''): """Create a repository for this organization if the authenticated user is a member. :param str name: (required), name of the repository :param str description: (optional) :param str homepage: (optional) :param bool private: (optional), If ``True``, create a private repository. API default: ``False`` :param bool has_issues: (optional), If ``True``, enable issues for this repository. API default: ``True`` :param bool has_wiki: (optional), If ``True``, enable the wiki for this repository. API default: ``True`` :param bool has_downloads: (optional), If ``True``, enable downloads for this repository. API default: ``True`` :param int team_id: (optional), id of the team that will be granted access to this repository :param bool auto_init: (optional), auto initialize the repository. :param str gitignore_template: (optional), name of the template; this is ignored if auto_int = False. :returns: :class:`Repository <github3.repos.Repository>` .. warning: ``name`` should be no longer than 100 characters """ url = self._build_url('repos', base_url=self._api) data = {'name': name, 'description': description, 'homepage': homepage, 'private': private, 'has_issues': has_issues, 'has_wiki': has_wiki, 'has_downloads': has_downloads, 'auto_init': auto_init, 'gitignore_template': gitignore_template} if team_id > 0: data.update({'team_id': team_id}) json = self._json(self._post(url, data), 201) return Repository(json, self) if json else None
[ "def", "create_repo", "(", "self", ",", "name", ",", "description", "=", "''", ",", "homepage", "=", "''", ",", "private", "=", "False", ",", "has_issues", "=", "True", ",", "has_wiki", "=", "True", ",", "has_downloads", "=", "True", ",", "team_id", "=", "0", ",", "auto_init", "=", "False", ",", "gitignore_template", "=", "''", ")", ":", "url", "=", "self", ".", "_build_url", "(", "'repos'", ",", "base_url", "=", "self", ".", "_api", ")", "data", "=", "{", "'name'", ":", "name", ",", "'description'", ":", "description", ",", "'homepage'", ":", "homepage", ",", "'private'", ":", "private", ",", "'has_issues'", ":", "has_issues", ",", "'has_wiki'", ":", "has_wiki", ",", "'has_downloads'", ":", "has_downloads", ",", "'auto_init'", ":", "auto_init", ",", "'gitignore_template'", ":", "gitignore_template", "}", "if", "team_id", ">", "0", ":", "data", ".", "update", "(", "{", "'team_id'", ":", "team_id", "}", ")", "json", "=", "self", ".", "_json", "(", "self", ".", "_post", "(", "url", ",", "data", ")", ",", "201", ")", "return", "Repository", "(", "json", ",", "self", ")", "if", "json", "else", "None" ]
Create a repository for this organization if the authenticated user is a member. :param str name: (required), name of the repository :param str description: (optional) :param str homepage: (optional) :param bool private: (optional), If ``True``, create a private repository. API default: ``False`` :param bool has_issues: (optional), If ``True``, enable issues for this repository. API default: ``True`` :param bool has_wiki: (optional), If ``True``, enable the wiki for this repository. API default: ``True`` :param bool has_downloads: (optional), If ``True``, enable downloads for this repository. API default: ``True`` :param int team_id: (optional), id of the team that will be granted access to this repository :param bool auto_init: (optional), auto initialize the repository. :param str gitignore_template: (optional), name of the template; this is ignored if auto_int = False. :returns: :class:`Repository <github3.repos.Repository>` .. warning: ``name`` should be no longer than 100 characters
[ "Create", "a", "repository", "for", "this", "organization", "if", "the", "authenticated", "user", "is", "a", "member", "." ]
python
train
googledatalab/pydatalab
datalab/data/_sql_module.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/data/_sql_module.py#L33-L62
def _get_sql_args(parser, args=None): """ Parse a set of %%sql arguments or get the default value of the arguments. Args: parser: the argument parser to use. args: the argument flags. May be a string or a list. If omitted the empty string is used so we can get the default values for the arguments. These are all used to override the arg parser. Alternatively args may be a dictionary, in which case it overrides the default values from the arg parser. Returns: A dictionary of argument names and values. """ overrides = None if args is None: tokens = [] elif isinstance(args, basestring): command_line = ' '.join(args.split('\n')) tokens = shlex.split(command_line) elif isinstance(args, dict): overrides = args tokens = [] else: tokens = args args = {} if parser is None else vars(parser.parse_args(tokens)) if overrides: args.update(overrides) # Don't return any args that are None as we don't want to expand to 'None' return {arg: value for arg, value in args.items() if value is not None}
[ "def", "_get_sql_args", "(", "parser", ",", "args", "=", "None", ")", ":", "overrides", "=", "None", "if", "args", "is", "None", ":", "tokens", "=", "[", "]", "elif", "isinstance", "(", "args", ",", "basestring", ")", ":", "command_line", "=", "' '", ".", "join", "(", "args", ".", "split", "(", "'\\n'", ")", ")", "tokens", "=", "shlex", ".", "split", "(", "command_line", ")", "elif", "isinstance", "(", "args", ",", "dict", ")", ":", "overrides", "=", "args", "tokens", "=", "[", "]", "else", ":", "tokens", "=", "args", "args", "=", "{", "}", "if", "parser", "is", "None", "else", "vars", "(", "parser", ".", "parse_args", "(", "tokens", ")", ")", "if", "overrides", ":", "args", ".", "update", "(", "overrides", ")", "# Don't return any args that are None as we don't want to expand to 'None'", "return", "{", "arg", ":", "value", "for", "arg", ",", "value", "in", "args", ".", "items", "(", ")", "if", "value", "is", "not", "None", "}" ]
Parse a set of %%sql arguments or get the default value of the arguments. Args: parser: the argument parser to use. args: the argument flags. May be a string or a list. If omitted the empty string is used so we can get the default values for the arguments. These are all used to override the arg parser. Alternatively args may be a dictionary, in which case it overrides the default values from the arg parser. Returns: A dictionary of argument names and values.
[ "Parse", "a", "set", "of", "%%sql", "arguments", "or", "get", "the", "default", "value", "of", "the", "arguments", "." ]
python
train
trailofbits/manticore
manticore/native/cpu/bitwise.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/bitwise.py#L169-L184
def ASR(value, amount, width): """ The ARM ASR (arithmetic shift right) operation. :param value: Value to shift :type value: int or long or BitVec :param int amount: How many bits to shift it. :param int width: Width of the value :return: Resultant value :rtype int or BitVec """ if amount == 0: return value result, _ = ASR_C(value, amount, width) return result
[ "def", "ASR", "(", "value", ",", "amount", ",", "width", ")", ":", "if", "amount", "==", "0", ":", "return", "value", "result", ",", "_", "=", "ASR_C", "(", "value", ",", "amount", ",", "width", ")", "return", "result" ]
The ARM ASR (arithmetic shift right) operation. :param value: Value to shift :type value: int or long or BitVec :param int amount: How many bits to shift it. :param int width: Width of the value :return: Resultant value :rtype int or BitVec
[ "The", "ARM", "ASR", "(", "arithmetic", "shift", "right", ")", "operation", "." ]
python
valid
bachiraoun/pyrep
Repository.py
https://github.com/bachiraoun/pyrep/blob/0449bf2fad3e3e8dda855d4686a8869efeefd433/Repository.py#L1452-L1517
def create_package(self, path=None, name=None, mode=None): """ Create a tar file package of all the repository files and directories. Only files and directories that are tracked in the repository are stored in the package tar file. **N.B. On some systems packaging requires root permissions.** :Parameters: #. path (None, string): The real absolute path where to create the package. If None, it will be created in the same directory as the repository. If '.' or an empty string is passed, the current working directory will be used. #. name (None, string): The name to give to the package file If None, the package directory name will be used with the appropriate extension added. #. mode (None, string): The writing mode of the tarfile. If None, automatically the best compression mode will be chose. Available modes are ('w', 'w:', 'w:gz', 'w:bz2') """ # check mode assert mode in (None, 'w', 'w:', 'w:gz', 'w:bz2'), 'unkown archive mode %s'%str(mode) if mode is None: #mode = 'w:bz2' mode = 'w:' # get root if path is None: root = os.path.split(self.__path)[0] elif path.strip() in ('','.'): root = os.getcwd() else: root = os.path.realpath( os.path.expanduser(path) ) assert os.path.isdir(root), 'absolute path %s is not a valid directory'%path # get name if name is None: ext = mode.split(":") if len(ext) == 2: if len(ext[1]): ext = "."+ext[1] else: ext = '.tar' else: ext = '.tar' name = os.path.split(self.__path)[1]+ext # create tar file tarfilePath = os.path.join(root, name) try: tarHandler = tarfile.TarFile.open(tarfilePath, mode=mode) except Exception as e: raise Exception("Unable to create package (%s)"%e) # walk directory and create empty directories for dpath in sorted(list(self.walk_directories_path(recursive=True))): t = tarfile.TarInfo( dpath ) t.type = tarfile.DIRTYPE tarHandler.addfile(t) tarHandler.add(os.path.join(self.__path,dpath,self.__dirInfo), arcname=self.__dirInfo) # walk files and add to tar for fpath in self.walk_files_path(recursive=True): relaPath, fname = os.path.split(fpath) tarHandler.add(os.path.join(self.__path,fpath), arcname=fname) tarHandler.add(os.path.join(self.__path,relaPath,self.__fileInfo%fname), arcname=self.__fileInfo%fname) tarHandler.add(os.path.join(self.__path,relaPath,self.__fileClass%fname), arcname=self.__fileClass%fname) # save repository .pyrepinfo tarHandler.add(os.path.join(self.__path,self.__repoFile), arcname=".pyrepinfo") # close tar file tarHandler.close()
[ "def", "create_package", "(", "self", ",", "path", "=", "None", ",", "name", "=", "None", ",", "mode", "=", "None", ")", ":", "# check mode", "assert", "mode", "in", "(", "None", ",", "'w'", ",", "'w:'", ",", "'w:gz'", ",", "'w:bz2'", ")", ",", "'unkown archive mode %s'", "%", "str", "(", "mode", ")", "if", "mode", "is", "None", ":", "#mode = 'w:bz2'", "mode", "=", "'w:'", "# get root", "if", "path", "is", "None", ":", "root", "=", "os", ".", "path", ".", "split", "(", "self", ".", "__path", ")", "[", "0", "]", "elif", "path", ".", "strip", "(", ")", "in", "(", "''", ",", "'.'", ")", ":", "root", "=", "os", ".", "getcwd", "(", ")", "else", ":", "root", "=", "os", ".", "path", ".", "realpath", "(", "os", ".", "path", ".", "expanduser", "(", "path", ")", ")", "assert", "os", ".", "path", ".", "isdir", "(", "root", ")", ",", "'absolute path %s is not a valid directory'", "%", "path", "# get name", "if", "name", "is", "None", ":", "ext", "=", "mode", ".", "split", "(", "\":\"", ")", "if", "len", "(", "ext", ")", "==", "2", ":", "if", "len", "(", "ext", "[", "1", "]", ")", ":", "ext", "=", "\".\"", "+", "ext", "[", "1", "]", "else", ":", "ext", "=", "'.tar'", "else", ":", "ext", "=", "'.tar'", "name", "=", "os", ".", "path", ".", "split", "(", "self", ".", "__path", ")", "[", "1", "]", "+", "ext", "# create tar file", "tarfilePath", "=", "os", ".", "path", ".", "join", "(", "root", ",", "name", ")", "try", ":", "tarHandler", "=", "tarfile", ".", "TarFile", ".", "open", "(", "tarfilePath", ",", "mode", "=", "mode", ")", "except", "Exception", "as", "e", ":", "raise", "Exception", "(", "\"Unable to create package (%s)\"", "%", "e", ")", "# walk directory and create empty directories", "for", "dpath", "in", "sorted", "(", "list", "(", "self", ".", "walk_directories_path", "(", "recursive", "=", "True", ")", ")", ")", ":", "t", "=", "tarfile", ".", "TarInfo", "(", "dpath", ")", "t", ".", "type", "=", "tarfile", ".", "DIRTYPE", "tarHandler", ".", "addfile", "(", "t", ")", "tarHandler", ".", "add", "(", "os", ".", "path", ".", "join", "(", "self", ".", "__path", ",", "dpath", ",", "self", ".", "__dirInfo", ")", ",", "arcname", "=", "self", ".", "__dirInfo", ")", "# walk files and add to tar", "for", "fpath", "in", "self", ".", "walk_files_path", "(", "recursive", "=", "True", ")", ":", "relaPath", ",", "fname", "=", "os", ".", "path", ".", "split", "(", "fpath", ")", "tarHandler", ".", "add", "(", "os", ".", "path", ".", "join", "(", "self", ".", "__path", ",", "fpath", ")", ",", "arcname", "=", "fname", ")", "tarHandler", ".", "add", "(", "os", ".", "path", ".", "join", "(", "self", ".", "__path", ",", "relaPath", ",", "self", ".", "__fileInfo", "%", "fname", ")", ",", "arcname", "=", "self", ".", "__fileInfo", "%", "fname", ")", "tarHandler", ".", "add", "(", "os", ".", "path", ".", "join", "(", "self", ".", "__path", ",", "relaPath", ",", "self", ".", "__fileClass", "%", "fname", ")", ",", "arcname", "=", "self", ".", "__fileClass", "%", "fname", ")", "# save repository .pyrepinfo", "tarHandler", ".", "add", "(", "os", ".", "path", ".", "join", "(", "self", ".", "__path", ",", "self", ".", "__repoFile", ")", ",", "arcname", "=", "\".pyrepinfo\"", ")", "# close tar file", "tarHandler", ".", "close", "(", ")" ]
Create a tar file package of all the repository files and directories. Only files and directories that are tracked in the repository are stored in the package tar file. **N.B. On some systems packaging requires root permissions.** :Parameters: #. path (None, string): The real absolute path where to create the package. If None, it will be created in the same directory as the repository. If '.' or an empty string is passed, the current working directory will be used. #. name (None, string): The name to give to the package file If None, the package directory name will be used with the appropriate extension added. #. mode (None, string): The writing mode of the tarfile. If None, automatically the best compression mode will be chose. Available modes are ('w', 'w:', 'w:gz', 'w:bz2')
[ "Create", "a", "tar", "file", "package", "of", "all", "the", "repository", "files", "and", "directories", ".", "Only", "files", "and", "directories", "that", "are", "tracked", "in", "the", "repository", "are", "stored", "in", "the", "package", "tar", "file", "." ]
python
valid
samastur/pyimagediet
pyimagediet/process.py
https://github.com/samastur/pyimagediet/blob/480c6e171577df36e166590b031bc8891b3c9e7b/pyimagediet/process.py#L128-L167
def check_configuration(config): '''Check if configuration object is not malformed. :param config: configuration :type config: dict :return: is configuration correct? :rtype: bool ''' sections = ('commands', 'parameters', 'pipelines') # Check all sections are there and contain dicts for section in sections: if section not in config: error_msg = 'Error: Section {0} is missing.'.format(section) raise ConfigurationErrorDietException(error_msg) if not isinstance(config[section], dict): error_msg = 'Error: Section {0} is malformed.'.format(section) raise ConfigurationErrorDietException(error_msg) # Check every command has a corresponding parameters entry commands_cmds = set(list(config['commands'].keys())) parameters_cmds = set(list(config['parameters'].keys())) if commands_cmds != parameters_cmds: error_msg = ('Every command in commands and parameters section has to ' 'have a corresponding entry in the other section.') raise ConfigurationErrorDietException(error_msg) # Check pipelines section contains lists as values and each of them only # has entries listed in commands section for cmd in config['pipelines']: pipeline = config['pipelines'][cmd] if not isinstance(pipeline, list): error_msg = ('Error: Pipeline {0} is malformed. Values have to ' 'be a list of command names.').format(cmd) raise ConfigurationErrorDietException(error_msg) for tool in pipeline: if tool not in commands_cmds: error_msg = ('Error in pipeline {0}. "{1}" cannot be found ' 'among commands listed in commands ' 'section').format(cmd, tool) raise ConfigurationErrorDietException(error_msg)
[ "def", "check_configuration", "(", "config", ")", ":", "sections", "=", "(", "'commands'", ",", "'parameters'", ",", "'pipelines'", ")", "# Check all sections are there and contain dicts", "for", "section", "in", "sections", ":", "if", "section", "not", "in", "config", ":", "error_msg", "=", "'Error: Section {0} is missing.'", ".", "format", "(", "section", ")", "raise", "ConfigurationErrorDietException", "(", "error_msg", ")", "if", "not", "isinstance", "(", "config", "[", "section", "]", ",", "dict", ")", ":", "error_msg", "=", "'Error: Section {0} is malformed.'", ".", "format", "(", "section", ")", "raise", "ConfigurationErrorDietException", "(", "error_msg", ")", "# Check every command has a corresponding parameters entry", "commands_cmds", "=", "set", "(", "list", "(", "config", "[", "'commands'", "]", ".", "keys", "(", ")", ")", ")", "parameters_cmds", "=", "set", "(", "list", "(", "config", "[", "'parameters'", "]", ".", "keys", "(", ")", ")", ")", "if", "commands_cmds", "!=", "parameters_cmds", ":", "error_msg", "=", "(", "'Every command in commands and parameters section has to '", "'have a corresponding entry in the other section.'", ")", "raise", "ConfigurationErrorDietException", "(", "error_msg", ")", "# Check pipelines section contains lists as values and each of them only", "# has entries listed in commands section", "for", "cmd", "in", "config", "[", "'pipelines'", "]", ":", "pipeline", "=", "config", "[", "'pipelines'", "]", "[", "cmd", "]", "if", "not", "isinstance", "(", "pipeline", ",", "list", ")", ":", "error_msg", "=", "(", "'Error: Pipeline {0} is malformed. Values have to '", "'be a list of command names.'", ")", ".", "format", "(", "cmd", ")", "raise", "ConfigurationErrorDietException", "(", "error_msg", ")", "for", "tool", "in", "pipeline", ":", "if", "tool", "not", "in", "commands_cmds", ":", "error_msg", "=", "(", "'Error in pipeline {0}. \"{1}\" cannot be found '", "'among commands listed in commands '", "'section'", ")", ".", "format", "(", "cmd", ",", "tool", ")", "raise", "ConfigurationErrorDietException", "(", "error_msg", ")" ]
Check if configuration object is not malformed. :param config: configuration :type config: dict :return: is configuration correct? :rtype: bool
[ "Check", "if", "configuration", "object", "is", "not", "malformed", "." ]
python
train
tanghaibao/goatools
goatools/cli/find_enrichment.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/cli/find_enrichment.py#L201-L214
def _init_itemid2name(self): """Print gene symbols instead of gene IDs, if provided.""" if not hasattr(self.args, 'id2sym'): return None fin_id2sym = self.args.id2sym if fin_id2sym is not None and os.path.exists(fin_id2sym): id2sym = {} cmpl = re.compile(r'^\s*(\S+)[\s,;]+(\S+)') with open(fin_id2sym) as ifstrm: for line in ifstrm: mtch = cmpl.search(line) if mtch: id2sym[mtch.group(1)] = mtch.group(2) return id2sym
[ "def", "_init_itemid2name", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ".", "args", ",", "'id2sym'", ")", ":", "return", "None", "fin_id2sym", "=", "self", ".", "args", ".", "id2sym", "if", "fin_id2sym", "is", "not", "None", "and", "os", ".", "path", ".", "exists", "(", "fin_id2sym", ")", ":", "id2sym", "=", "{", "}", "cmpl", "=", "re", ".", "compile", "(", "r'^\\s*(\\S+)[\\s,;]+(\\S+)'", ")", "with", "open", "(", "fin_id2sym", ")", "as", "ifstrm", ":", "for", "line", "in", "ifstrm", ":", "mtch", "=", "cmpl", ".", "search", "(", "line", ")", "if", "mtch", ":", "id2sym", "[", "mtch", ".", "group", "(", "1", ")", "]", "=", "mtch", ".", "group", "(", "2", ")", "return", "id2sym" ]
Print gene symbols instead of gene IDs, if provided.
[ "Print", "gene", "symbols", "instead", "of", "gene", "IDs", "if", "provided", "." ]
python
train
mozilla-iot/webthing-python
webthing/thing.py
https://github.com/mozilla-iot/webthing-python/blob/65d467c89ed79d0bbc42b8b3c8f9e5a320edd237/webthing/thing.py#L390-L400
def remove_subscriber(self, ws): """ Remove a websocket subscriber. ws -- the websocket """ if ws in self.subscribers: self.subscribers.remove(ws) for name in self.available_events: self.remove_event_subscriber(name, ws)
[ "def", "remove_subscriber", "(", "self", ",", "ws", ")", ":", "if", "ws", "in", "self", ".", "subscribers", ":", "self", ".", "subscribers", ".", "remove", "(", "ws", ")", "for", "name", "in", "self", ".", "available_events", ":", "self", ".", "remove_event_subscriber", "(", "name", ",", "ws", ")" ]
Remove a websocket subscriber. ws -- the websocket
[ "Remove", "a", "websocket", "subscriber", "." ]
python
test
ewels/MultiQC
multiqc/modules/homer/tagdirectory.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/homer/tagdirectory.py#L348-L371
def parse_tag_info_chrs(self, f, convChr=True): """ Parse HOMER tagdirectory taginfo.txt file to extract chromosome coverage. """ parsed_data_total = OrderedDict() parsed_data_uniq = OrderedDict() remove = ["hap", "random", "chrUn", "cmd", "EBV", "GL", "NT_"] for l in f['f']: s = l.split("\t") key = s[0].strip() # skip header if '=' in l or len(s) != 3: continue if convChr: if any(x in key for x in remove): continue try: vT = float(s[1].strip()) vU = float(s[2].strip()) except ValueError: continue parsed_data_total[key] = vT parsed_data_uniq[key] = vU return [parsed_data_total, parsed_data_uniq]
[ "def", "parse_tag_info_chrs", "(", "self", ",", "f", ",", "convChr", "=", "True", ")", ":", "parsed_data_total", "=", "OrderedDict", "(", ")", "parsed_data_uniq", "=", "OrderedDict", "(", ")", "remove", "=", "[", "\"hap\"", ",", "\"random\"", ",", "\"chrUn\"", ",", "\"cmd\"", ",", "\"EBV\"", ",", "\"GL\"", ",", "\"NT_\"", "]", "for", "l", "in", "f", "[", "'f'", "]", ":", "s", "=", "l", ".", "split", "(", "\"\\t\"", ")", "key", "=", "s", "[", "0", "]", ".", "strip", "(", ")", "# skip header", "if", "'='", "in", "l", "or", "len", "(", "s", ")", "!=", "3", ":", "continue", "if", "convChr", ":", "if", "any", "(", "x", "in", "key", "for", "x", "in", "remove", ")", ":", "continue", "try", ":", "vT", "=", "float", "(", "s", "[", "1", "]", ".", "strip", "(", ")", ")", "vU", "=", "float", "(", "s", "[", "2", "]", ".", "strip", "(", ")", ")", "except", "ValueError", ":", "continue", "parsed_data_total", "[", "key", "]", "=", "vT", "parsed_data_uniq", "[", "key", "]", "=", "vU", "return", "[", "parsed_data_total", ",", "parsed_data_uniq", "]" ]
Parse HOMER tagdirectory taginfo.txt file to extract chromosome coverage.
[ "Parse", "HOMER", "tagdirectory", "taginfo", ".", "txt", "file", "to", "extract", "chromosome", "coverage", "." ]
python
train
inveniosoftware-attic/invenio-knowledge
invenio_knowledge/api.py
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/api.py#L427-L446
def get_kba_values(kb_name, searchname="", searchtype="s"): """Return an array of values "authority file" type = just values. :param kb_name: name of kb :param searchname: get these values, according to searchtype :param searchtype: s=substring, e=exact, , sw=startswith """ if searchtype == 's' and searchname: searchname = '%'+searchname+'%' if searchtype == 'sw' and searchname: # startswith searchname = searchname+'%' if not searchname: searchname = '%' query = db.session.query(models.KnwKBRVAL).join(models.KnwKB) \ .filter(models.KnwKBRVAL.m_value.like(searchname), models.KnwKB.name.like(kb_name)) return [(k.m_value,) for k in query.all()]
[ "def", "get_kba_values", "(", "kb_name", ",", "searchname", "=", "\"\"", ",", "searchtype", "=", "\"s\"", ")", ":", "if", "searchtype", "==", "'s'", "and", "searchname", ":", "searchname", "=", "'%'", "+", "searchname", "+", "'%'", "if", "searchtype", "==", "'sw'", "and", "searchname", ":", "# startswith", "searchname", "=", "searchname", "+", "'%'", "if", "not", "searchname", ":", "searchname", "=", "'%'", "query", "=", "db", ".", "session", ".", "query", "(", "models", ".", "KnwKBRVAL", ")", ".", "join", "(", "models", ".", "KnwKB", ")", ".", "filter", "(", "models", ".", "KnwKBRVAL", ".", "m_value", ".", "like", "(", "searchname", ")", ",", "models", ".", "KnwKB", ".", "name", ".", "like", "(", "kb_name", ")", ")", "return", "[", "(", "k", ".", "m_value", ",", ")", "for", "k", "in", "query", ".", "all", "(", ")", "]" ]
Return an array of values "authority file" type = just values. :param kb_name: name of kb :param searchname: get these values, according to searchtype :param searchtype: s=substring, e=exact, , sw=startswith
[ "Return", "an", "array", "of", "values", "authority", "file", "type", "=", "just", "values", "." ]
python
train