repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
toumorokoshi/transmute-core
transmute_core/frameworks/aiohttp/route.py
https://github.com/toumorokoshi/transmute-core/blob/a2c26625d5d8bab37e00038f9d615a26167fc7f4/transmute_core/frameworks/aiohttp/route.py#L6-L21
def add_route(app, fn, context=default_context): """ a decorator that adds a transmute route to the application. """ transmute_func = TransmuteFunction( fn, args_not_from_request=["request"] ) handler = create_handler(transmute_func, context=context) get_swagger_spec(app).add_func(transmute_func, context) for p in transmute_func.paths: aiohttp_path = _convert_to_aiohttp_path(p) resource = app.router.add_resource(aiohttp_path) for method in transmute_func.methods: resource.add_route(method, handler)
[ "def", "add_route", "(", "app", ",", "fn", ",", "context", "=", "default_context", ")", ":", "transmute_func", "=", "TransmuteFunction", "(", "fn", ",", "args_not_from_request", "=", "[", "\"request\"", "]", ")", "handler", "=", "create_handler", "(", "transmute_func", ",", "context", "=", "context", ")", "get_swagger_spec", "(", "app", ")", ".", "add_func", "(", "transmute_func", ",", "context", ")", "for", "p", "in", "transmute_func", ".", "paths", ":", "aiohttp_path", "=", "_convert_to_aiohttp_path", "(", "p", ")", "resource", "=", "app", ".", "router", ".", "add_resource", "(", "aiohttp_path", ")", "for", "method", "in", "transmute_func", ".", "methods", ":", "resource", ".", "add_route", "(", "method", ",", "handler", ")" ]
a decorator that adds a transmute route to the application.
[ "a", "decorator", "that", "adds", "a", "transmute", "route", "to", "the", "application", "." ]
python
train
35.8125
senaite/senaite.core
bika/lims/api/security.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/api/security.py#L160-L181
def get_roles_for_permission(permission, brain_or_object): """Return the roles of the permission that is granted on the object Code extracted from `IRoleManager.rolesOfPermission` :param permission: The permission to get the roles :param brain_or_object: Catalog brain or object :returns: List of roles having the permission """ obj = api.get_object(brain_or_object) valid_roles = get_valid_roles_for(obj) for item in obj.ac_inherited_permissions(1): name, value = item[:2] # found the requested permission if name == permission: # Permission maps a named permission to a set of attribute names permission = Permission(name, value, obj) roles = permission.getRoles() # return only valid roles that have the permission granted return filter(lambda r: r in valid_roles, roles) # Raise an error if the permission is invalid raise ValueError("The permission {} is invalid.".format(permission))
[ "def", "get_roles_for_permission", "(", "permission", ",", "brain_or_object", ")", ":", "obj", "=", "api", ".", "get_object", "(", "brain_or_object", ")", "valid_roles", "=", "get_valid_roles_for", "(", "obj", ")", "for", "item", "in", "obj", ".", "ac_inherited_permissions", "(", "1", ")", ":", "name", ",", "value", "=", "item", "[", ":", "2", "]", "# found the requested permission", "if", "name", "==", "permission", ":", "# Permission maps a named permission to a set of attribute names", "permission", "=", "Permission", "(", "name", ",", "value", ",", "obj", ")", "roles", "=", "permission", ".", "getRoles", "(", ")", "# return only valid roles that have the permission granted", "return", "filter", "(", "lambda", "r", ":", "r", "in", "valid_roles", ",", "roles", ")", "# Raise an error if the permission is invalid", "raise", "ValueError", "(", "\"The permission {} is invalid.\"", ".", "format", "(", "permission", ")", ")" ]
Return the roles of the permission that is granted on the object Code extracted from `IRoleManager.rolesOfPermission` :param permission: The permission to get the roles :param brain_or_object: Catalog brain or object :returns: List of roles having the permission
[ "Return", "the", "roles", "of", "the", "permission", "that", "is", "granted", "on", "the", "object" ]
python
train
45.363636
pypa/pipenv
pipenv/vendor/six.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/six.py#L497-L505
def remove_move(name): """Remove item from six.moves.""" try: delattr(_MovedItems, name) except AttributeError: try: del moves.__dict__[name] except KeyError: raise AttributeError("no such move, %r" % (name,))
[ "def", "remove_move", "(", "name", ")", ":", "try", ":", "delattr", "(", "_MovedItems", ",", "name", ")", "except", "AttributeError", ":", "try", ":", "del", "moves", ".", "__dict__", "[", "name", "]", "except", "KeyError", ":", "raise", "AttributeError", "(", "\"no such move, %r\"", "%", "(", "name", ",", ")", ")" ]
Remove item from six.moves.
[ "Remove", "item", "from", "six", ".", "moves", "." ]
python
train
29
woolfson-group/isambard
isambard/ampal/specifications/polymer_specs/helix.py
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/ampal/specifications/polymer_specs/helix.py#L123-L146
def from_start_and_end(cls, start, end, aa=None, helix_type='alpha'): """Creates a `Helix` between `start` and `end`. Parameters ---------- start : 3D Vector (tuple or list or numpy.array) The coordinate of the start of the helix primitive. end : 3D Vector (tuple or list or numpy.array) The coordinate of the end of the helix primitive. aa : int, optional Number of amino acids in the `Helix`. If `None, an appropriate number of residues are added. helix_type : str, optional Type of helix, can be: 'alpha', 'pi', '3-10', 'PPI', 'PPII', 'collagen'. """ start = numpy.array(start) end = numpy.array(end) if aa is None: rise_per_residue = _helix_parameters[helix_type][1] aa = int((numpy.linalg.norm(end - start) / rise_per_residue) + 1) instance = cls(aa=aa, helix_type=helix_type) instance.move_to(start=start, end=end) return instance
[ "def", "from_start_and_end", "(", "cls", ",", "start", ",", "end", ",", "aa", "=", "None", ",", "helix_type", "=", "'alpha'", ")", ":", "start", "=", "numpy", ".", "array", "(", "start", ")", "end", "=", "numpy", ".", "array", "(", "end", ")", "if", "aa", "is", "None", ":", "rise_per_residue", "=", "_helix_parameters", "[", "helix_type", "]", "[", "1", "]", "aa", "=", "int", "(", "(", "numpy", ".", "linalg", ".", "norm", "(", "end", "-", "start", ")", "/", "rise_per_residue", ")", "+", "1", ")", "instance", "=", "cls", "(", "aa", "=", "aa", ",", "helix_type", "=", "helix_type", ")", "instance", ".", "move_to", "(", "start", "=", "start", ",", "end", "=", "end", ")", "return", "instance" ]
Creates a `Helix` between `start` and `end`. Parameters ---------- start : 3D Vector (tuple or list or numpy.array) The coordinate of the start of the helix primitive. end : 3D Vector (tuple or list or numpy.array) The coordinate of the end of the helix primitive. aa : int, optional Number of amino acids in the `Helix`. If `None, an appropriate number of residues are added. helix_type : str, optional Type of helix, can be: 'alpha', 'pi', '3-10', 'PPI', 'PPII', 'collagen'.
[ "Creates", "a", "Helix", "between", "start", "and", "end", "." ]
python
train
42.583333
pjuren/pyokit
src/pyokit/datastruct/retrotransposon.py
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/datastruct/retrotransposon.py#L279-L308
def liftover(self, intersecting_region): """ Lift a region that overlaps the genomic occurrence of the retrotransposon to consensus sequence co-ordinates. This method will behave differently depending on whether this retrotransposon occurrance contains a full alignment or not. If it does, the alignment is used to do the liftover and an exact result is provided. If it does not, the coordinates are used to do the liftover, padding either the genomic region or consensus sequence (whichever is shorter) with equally spaced gaps to make the size of both match. :param intersecting_region: a region that intersects this occurrence. :return: list of GenomicInterval objects. This is a list because a genomic deletion of part of the retrotransposon can fragment the intersecting region and result in more than one returned interval. """ # a little sanity check here to make sure intersecting_region really does.. if not self.intersects(intersecting_region): raise RetrotransposonError("trying to lift " + str(intersecting_region) + " from genomic to transposon coordinates " + "in " + str(self) + ", but it doesn't " + "intersect!") if self.pairwise_alignment is not None: return self.pairwise_alignment.liftover(self.chrom, self.repeat_name(), intersecting_region.start, intersecting_region.end, trim=True) return self.liftover_coordinates(intersecting_region)
[ "def", "liftover", "(", "self", ",", "intersecting_region", ")", ":", "# a little sanity check here to make sure intersecting_region really does..", "if", "not", "self", ".", "intersects", "(", "intersecting_region", ")", ":", "raise", "RetrotransposonError", "(", "\"trying to lift \"", "+", "str", "(", "intersecting_region", ")", "+", "\" from genomic to transposon coordinates \"", "+", "\"in \"", "+", "str", "(", "self", ")", "+", "\", but it doesn't \"", "+", "\"intersect!\"", ")", "if", "self", ".", "pairwise_alignment", "is", "not", "None", ":", "return", "self", ".", "pairwise_alignment", ".", "liftover", "(", "self", ".", "chrom", ",", "self", ".", "repeat_name", "(", ")", ",", "intersecting_region", ".", "start", ",", "intersecting_region", ".", "end", ",", "trim", "=", "True", ")", "return", "self", ".", "liftover_coordinates", "(", "intersecting_region", ")" ]
Lift a region that overlaps the genomic occurrence of the retrotransposon to consensus sequence co-ordinates. This method will behave differently depending on whether this retrotransposon occurrance contains a full alignment or not. If it does, the alignment is used to do the liftover and an exact result is provided. If it does not, the coordinates are used to do the liftover, padding either the genomic region or consensus sequence (whichever is shorter) with equally spaced gaps to make the size of both match. :param intersecting_region: a region that intersects this occurrence. :return: list of GenomicInterval objects. This is a list because a genomic deletion of part of the retrotransposon can fragment the intersecting region and result in more than one returned interval.
[ "Lift", "a", "region", "that", "overlaps", "the", "genomic", "occurrence", "of", "the", "retrotransposon", "to", "consensus", "sequence", "co", "-", "ordinates", ".", "This", "method", "will", "behave", "differently", "depending", "on", "whether", "this", "retrotransposon", "occurrance", "contains", "a", "full", "alignment", "or", "not", ".", "If", "it", "does", "the", "alignment", "is", "used", "to", "do", "the", "liftover", "and", "an", "exact", "result", "is", "provided", ".", "If", "it", "does", "not", "the", "coordinates", "are", "used", "to", "do", "the", "liftover", "padding", "either", "the", "genomic", "region", "or", "consensus", "sequence", "(", "whichever", "is", "shorter", ")", "with", "equally", "spaced", "gaps", "to", "make", "the", "size", "of", "both", "match", "." ]
python
train
55.7
OpenTreeOfLife/peyotl
peyotl/nexson_proxy.py
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/nexson_proxy.py#L25-L49
def otu_iter_nexson_proxy(nexson_proxy, otu_sort=None): """otu_sort can be None (not sorted or stable), True (sorted by ID lexigraphically) or a key function for a sort function on list of otuIDs Note that if there are multiple OTU groups, the NexSON specifies the order of sorting of the groups (so the sort argument here only refers to the sorting of OTUs within a group) """ nexml_el = nexson_proxy._nexml_el og_order = nexml_el['^ot:otusElementOrder'] ogd = nexml_el['otusById'] for og_id in og_order: og = ogd[og_id] if otu_sort is None: for k, v in og: yield nexson_proxy._create_otu_proxy(k, v) else: key_list = list(og.keys()) if otu_sort is True: key_list.sort() else: key_list.sort(key=otu_sort) for k in key_list: v = og[k] yield nexson_proxy._create_otu_proxy(k, v)
[ "def", "otu_iter_nexson_proxy", "(", "nexson_proxy", ",", "otu_sort", "=", "None", ")", ":", "nexml_el", "=", "nexson_proxy", ".", "_nexml_el", "og_order", "=", "nexml_el", "[", "'^ot:otusElementOrder'", "]", "ogd", "=", "nexml_el", "[", "'otusById'", "]", "for", "og_id", "in", "og_order", ":", "og", "=", "ogd", "[", "og_id", "]", "if", "otu_sort", "is", "None", ":", "for", "k", ",", "v", "in", "og", ":", "yield", "nexson_proxy", ".", "_create_otu_proxy", "(", "k", ",", "v", ")", "else", ":", "key_list", "=", "list", "(", "og", ".", "keys", "(", ")", ")", "if", "otu_sort", "is", "True", ":", "key_list", ".", "sort", "(", ")", "else", ":", "key_list", ".", "sort", "(", "key", "=", "otu_sort", ")", "for", "k", "in", "key_list", ":", "v", "=", "og", "[", "k", "]", "yield", "nexson_proxy", ".", "_create_otu_proxy", "(", "k", ",", "v", ")" ]
otu_sort can be None (not sorted or stable), True (sorted by ID lexigraphically) or a key function for a sort function on list of otuIDs Note that if there are multiple OTU groups, the NexSON specifies the order of sorting of the groups (so the sort argument here only refers to the sorting of OTUs within a group)
[ "otu_sort", "can", "be", "None", "(", "not", "sorted", "or", "stable", ")", "True", "(", "sorted", "by", "ID", "lexigraphically", ")", "or", "a", "key", "function", "for", "a", "sort", "function", "on", "list", "of", "otuIDs" ]
python
train
38.64
jeremymcrae/denovonear
denovonear/ensembl_requester.py
https://github.com/jeremymcrae/denovonear/blob/feaab0fc77e89d70b31e8092899e4f0e68bac9fe/denovonear/ensembl_requester.py#L346-L366
def get_cds_ranges_for_transcript(self, transcript_id): """ obtain the sequence for a transcript from ensembl """ headers = {"content-type": "application/json"} self.attempt = 0 ext = "/overlap/id/{}?feature=cds".format(transcript_id) r = self.ensembl_request(ext, headers) cds_ranges = [] for cds_range in json.loads(r): if cds_range["Parent"] != transcript_id: continue start = cds_range["start"] end = cds_range["end"] cds_ranges.append((start, end)) return cds_ranges
[ "def", "get_cds_ranges_for_transcript", "(", "self", ",", "transcript_id", ")", ":", "headers", "=", "{", "\"content-type\"", ":", "\"application/json\"", "}", "self", ".", "attempt", "=", "0", "ext", "=", "\"/overlap/id/{}?feature=cds\"", ".", "format", "(", "transcript_id", ")", "r", "=", "self", ".", "ensembl_request", "(", "ext", ",", "headers", ")", "cds_ranges", "=", "[", "]", "for", "cds_range", "in", "json", ".", "loads", "(", "r", ")", ":", "if", "cds_range", "[", "\"Parent\"", "]", "!=", "transcript_id", ":", "continue", "start", "=", "cds_range", "[", "\"start\"", "]", "end", "=", "cds_range", "[", "\"end\"", "]", "cds_ranges", ".", "append", "(", "(", "start", ",", "end", ")", ")", "return", "cds_ranges" ]
obtain the sequence for a transcript from ensembl
[ "obtain", "the", "sequence", "for", "a", "transcript", "from", "ensembl" ]
python
train
30.904762
amaas-fintech/amaas-core-sdk-python
amaascore/market_data/interface.py
https://github.com/amaas-fintech/amaas-core-sdk-python/blob/347b71f8e776b2dde582b015e31b4802d91e8040/amaascore/market_data/interface.py#L254-L275
def last_available_business_date(self, asset_manager_id, asset_ids, page_no=None, page_size=None): """ Returns the last available business date for the assets so we know the starting date for new data which needs to be downloaded from data providers. This method can only be invoked by system user """ self.logger.info('Retrieving last available business dates for assets') url = '%s/last-available-business-date' % self.endpoint params = {'asset_manager_ids': [asset_manager_id], 'asset_ids': ','.join(asset_ids)} if page_no: params['page_no'] = page_no if page_size: params['page_size'] = page_size response = self.session.get(url, params=params) if response.ok: self.logger.info("Received %s assets' last available business date", len(response.json())) return response.json() else: self.logger.error(response.text) response.raise_for_status()
[ "def", "last_available_business_date", "(", "self", ",", "asset_manager_id", ",", "asset_ids", ",", "page_no", "=", "None", ",", "page_size", "=", "None", ")", ":", "self", ".", "logger", ".", "info", "(", "'Retrieving last available business dates for assets'", ")", "url", "=", "'%s/last-available-business-date'", "%", "self", ".", "endpoint", "params", "=", "{", "'asset_manager_ids'", ":", "[", "asset_manager_id", "]", ",", "'asset_ids'", ":", "','", ".", "join", "(", "asset_ids", ")", "}", "if", "page_no", ":", "params", "[", "'page_no'", "]", "=", "page_no", "if", "page_size", ":", "params", "[", "'page_size'", "]", "=", "page_size", "response", "=", "self", ".", "session", ".", "get", "(", "url", ",", "params", "=", "params", ")", "if", "response", ".", "ok", ":", "self", ".", "logger", ".", "info", "(", "\"Received %s assets' last available business date\"", ",", "len", "(", "response", ".", "json", "(", ")", ")", ")", "return", "response", ".", "json", "(", ")", "else", ":", "self", ".", "logger", ".", "error", "(", "response", ".", "text", ")", "response", ".", "raise_for_status", "(", ")" ]
Returns the last available business date for the assets so we know the starting date for new data which needs to be downloaded from data providers. This method can only be invoked by system user
[ "Returns", "the", "last", "available", "business", "date", "for", "the", "assets", "so", "we", "know", "the", "starting", "date", "for", "new", "data", "which", "needs", "to", "be", "downloaded", "from", "data", "providers", ".", "This", "method", "can", "only", "be", "invoked", "by", "system", "user" ]
python
train
46.727273
saltstack/salt
salt/states/boto_apigateway.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1492-L1499
def _get_pattern_for_schema(self, schema_name, httpStatus): ''' returns the pattern specified in a response schema ''' defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*' model = self._models().get(schema_name) patterns = self._find_patterns(model) return patterns[0] if patterns else defaultPattern
[ "def", "_get_pattern_for_schema", "(", "self", ",", "schema_name", ",", "httpStatus", ")", ":", "defaultPattern", "=", "'.+'", "if", "self", ".", "_is_http_error_rescode", "(", "httpStatus", ")", "else", "'.*'", "model", "=", "self", ".", "_models", "(", ")", ".", "get", "(", "schema_name", ")", "patterns", "=", "self", ".", "_find_patterns", "(", "model", ")", "return", "patterns", "[", "0", "]", "if", "patterns", "else", "defaultPattern" ]
returns the pattern specified in a response schema
[ "returns", "the", "pattern", "specified", "in", "a", "response", "schema" ]
python
train
46.375
njouanin/repool
repool/pool.py
https://github.com/njouanin/repool/blob/27102cf84cb382c0b2d935f8b8651aa7f8c2777e/repool/pool.py#L110-L116
def release(self, conn): """Release a previously acquired connection. The connection is put back into the pool.""" self._pool_lock.acquire() self._pool.put(ConnectionWrapper(self._pool, conn)) self._current_acquired -= 1 self._pool_lock.release()
[ "def", "release", "(", "self", ",", "conn", ")", ":", "self", ".", "_pool_lock", ".", "acquire", "(", ")", "self", ".", "_pool", ".", "put", "(", "ConnectionWrapper", "(", "self", ".", "_pool", ",", "conn", ")", ")", "self", ".", "_current_acquired", "-=", "1", "self", ".", "_pool_lock", ".", "release", "(", ")" ]
Release a previously acquired connection. The connection is put back into the pool.
[ "Release", "a", "previously", "acquired", "connection", ".", "The", "connection", "is", "put", "back", "into", "the", "pool", "." ]
python
train
41.142857
softlayer/softlayer-python
SoftLayer/CLI/file/snapshot/cancel.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/file/snapshot/cancel.py#L20-L40
def cli(env, volume_id, reason, immediate): """Cancel existing snapshot space for a given volume.""" file_storage_manager = SoftLayer.FileStorageManager(env.client) if not (env.skip_confirmations or formatting.no_going_back(volume_id)): raise exceptions.CLIAbort('Aborted') cancelled = file_storage_manager.cancel_snapshot_space( volume_id, reason, immediate) if cancelled: if immediate: click.echo('File volume with id %s has been marked' ' for immediate snapshot cancellation' % volume_id) else: click.echo('File volume with id %s has been marked' ' for snapshot cancellation' % volume_id) else: click.echo('Unable to cancel snapshot space for file volume %s' % volume_id)
[ "def", "cli", "(", "env", ",", "volume_id", ",", "reason", ",", "immediate", ")", ":", "file_storage_manager", "=", "SoftLayer", ".", "FileStorageManager", "(", "env", ".", "client", ")", "if", "not", "(", "env", ".", "skip_confirmations", "or", "formatting", ".", "no_going_back", "(", "volume_id", ")", ")", ":", "raise", "exceptions", ".", "CLIAbort", "(", "'Aborted'", ")", "cancelled", "=", "file_storage_manager", ".", "cancel_snapshot_space", "(", "volume_id", ",", "reason", ",", "immediate", ")", "if", "cancelled", ":", "if", "immediate", ":", "click", ".", "echo", "(", "'File volume with id %s has been marked'", "' for immediate snapshot cancellation'", "%", "volume_id", ")", "else", ":", "click", ".", "echo", "(", "'File volume with id %s has been marked'", "' for snapshot cancellation'", "%", "volume_id", ")", "else", ":", "click", ".", "echo", "(", "'Unable to cancel snapshot space for file volume %s'", "%", "volume_id", ")" ]
Cancel existing snapshot space for a given volume.
[ "Cancel", "existing", "snapshot", "space", "for", "a", "given", "volume", "." ]
python
train
38.619048
ska-sa/katcp-python
bench/benchserver.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/bench/benchserver.py#L17-L22
def request_add_sensor(self, sock, msg): """ add a sensor """ self.add_sensor(Sensor(int, 'int_sensor%d' % len(self._sensors), 'descr', 'unit', params=[-10, 10])) return Message.reply('add-sensor', 'ok')
[ "def", "request_add_sensor", "(", "self", ",", "sock", ",", "msg", ")", ":", "self", ".", "add_sensor", "(", "Sensor", "(", "int", ",", "'int_sensor%d'", "%", "len", "(", "self", ".", "_sensors", ")", ",", "'descr'", ",", "'unit'", ",", "params", "=", "[", "-", "10", ",", "10", "]", ")", ")", "return", "Message", ".", "reply", "(", "'add-sensor'", ",", "'ok'", ")" ]
add a sensor
[ "add", "a", "sensor" ]
python
train
43.5
gambogi/CSHLDAP
CSHLDAP.py
https://github.com/gambogi/CSHLDAP/blob/09cb754b1e72437834e0d8cb4c7ac1830cfa6829/CSHLDAP.py#L314-L324
def fullName(self): """ Returns a reliable full name (firstName lastName) for every member (as of the writing of this comment.) """ if self.givenName and self.sn: return "{0} {1}".format(self.givenName, self.sn) if self.givenName: return self.givenName if self.sn: return self.sn return self.uid
[ "def", "fullName", "(", "self", ")", ":", "if", "self", ".", "givenName", "and", "self", ".", "sn", ":", "return", "\"{0} {1}\"", ".", "format", "(", "self", ".", "givenName", ",", "self", ".", "sn", ")", "if", "self", ".", "givenName", ":", "return", "self", ".", "givenName", "if", "self", ".", "sn", ":", "return", "self", ".", "sn", "return", "self", ".", "uid" ]
Returns a reliable full name (firstName lastName) for every member (as of the writing of this comment.)
[ "Returns", "a", "reliable", "full", "name", "(", "firstName", "lastName", ")", "for", "every", "member", "(", "as", "of", "the", "writing", "of", "this", "comment", ".", ")" ]
python
train
34.636364
PeerAssets/pypeerassets
pypeerassets/card_parsers.py
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/card_parsers.py#L15-L23
def custom_parser(cards: list, parser: Optional[Callable[[list], Optional[list]]]=None) -> Optional[list]: '''parser for CUSTOM [1] issue mode, please provide your custom parser as argument''' if not parser: return cards else: return parser(cards)
[ "def", "custom_parser", "(", "cards", ":", "list", ",", "parser", ":", "Optional", "[", "Callable", "[", "[", "list", "]", ",", "Optional", "[", "list", "]", "]", "]", "=", "None", ")", "->", "Optional", "[", "list", "]", ":", "if", "not", "parser", ":", "return", "cards", "else", ":", "return", "parser", "(", "cards", ")" ]
parser for CUSTOM [1] issue mode, please provide your custom parser as argument
[ "parser", "for", "CUSTOM", "[", "1", "]", "issue", "mode", "please", "provide", "your", "custom", "parser", "as", "argument" ]
python
train
30.333333
log2timeline/dfvfs
dfvfs/file_io/fake_file_io.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/fake_file_io.py#L62-L97
def read(self, size=None): """Reads a byte string from the file-like object at the current offset. The function will read a byte string of the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed. """ if not self._is_open: raise IOError('Not opened.') if self._current_offset < 0: raise IOError( 'Invalid current offset: {0:d} value less than zero.'.format( self._current_offset)) if self._file_data is None or self._current_offset >= self._size: return b'' if size is None: size = self._size if self._current_offset + size > self._size: size = self._size - self._current_offset start_offset = self._current_offset self._current_offset += size return self._file_data[start_offset:self._current_offset]
[ "def", "read", "(", "self", ",", "size", "=", "None", ")", ":", "if", "not", "self", ".", "_is_open", ":", "raise", "IOError", "(", "'Not opened.'", ")", "if", "self", ".", "_current_offset", "<", "0", ":", "raise", "IOError", "(", "'Invalid current offset: {0:d} value less than zero.'", ".", "format", "(", "self", ".", "_current_offset", ")", ")", "if", "self", ".", "_file_data", "is", "None", "or", "self", ".", "_current_offset", ">=", "self", ".", "_size", ":", "return", "b''", "if", "size", "is", "None", ":", "size", "=", "self", ".", "_size", "if", "self", ".", "_current_offset", "+", "size", ">", "self", ".", "_size", ":", "size", "=", "self", ".", "_size", "-", "self", ".", "_current_offset", "start_offset", "=", "self", ".", "_current_offset", "self", ".", "_current_offset", "+=", "size", "return", "self", ".", "_file_data", "[", "start_offset", ":", "self", ".", "_current_offset", "]" ]
Reads a byte string from the file-like object at the current offset. The function will read a byte string of the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed.
[ "Reads", "a", "byte", "string", "from", "the", "file", "-", "like", "object", "at", "the", "current", "offset", "." ]
python
train
28.277778
allenai/allennlp
allennlp/semparse/domain_languages/wikitables_language.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/wikitables_language.py#L477-L488
def mode_number(self, rows: List[Row], column: NumberColumn) -> Number: """ Takes a list of rows and a column and returns the most frequent value under that column in those rows. """ most_frequent_list = self._get_most_frequent_values(rows, column) if not most_frequent_list: return 0.0 # type: ignore most_frequent_value = most_frequent_list[0] if not isinstance(most_frequent_value, Number): raise ExecutionError(f"Invalid valus for mode_number: {most_frequent_value}") return most_frequent_value
[ "def", "mode_number", "(", "self", ",", "rows", ":", "List", "[", "Row", "]", ",", "column", ":", "NumberColumn", ")", "->", "Number", ":", "most_frequent_list", "=", "self", ".", "_get_most_frequent_values", "(", "rows", ",", "column", ")", "if", "not", "most_frequent_list", ":", "return", "0.0", "# type: ignore", "most_frequent_value", "=", "most_frequent_list", "[", "0", "]", "if", "not", "isinstance", "(", "most_frequent_value", ",", "Number", ")", ":", "raise", "ExecutionError", "(", "f\"Invalid valus for mode_number: {most_frequent_value}\"", ")", "return", "most_frequent_value" ]
Takes a list of rows and a column and returns the most frequent value under that column in those rows.
[ "Takes", "a", "list", "of", "rows", "and", "a", "column", "and", "returns", "the", "most", "frequent", "value", "under", "that", "column", "in", "those", "rows", "." ]
python
train
48.666667
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_wp.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_wp.py#L60-L64
def wploader(self): '''per-sysid wploader''' if self.target_system not in self.wploader_by_sysid: self.wploader_by_sysid[self.target_system] = mavwp.MAVWPLoader() return self.wploader_by_sysid[self.target_system]
[ "def", "wploader", "(", "self", ")", ":", "if", "self", ".", "target_system", "not", "in", "self", ".", "wploader_by_sysid", ":", "self", ".", "wploader_by_sysid", "[", "self", ".", "target_system", "]", "=", "mavwp", ".", "MAVWPLoader", "(", ")", "return", "self", ".", "wploader_by_sysid", "[", "self", ".", "target_system", "]" ]
per-sysid wploader
[ "per", "-", "sysid", "wploader" ]
python
train
48.8
quantumlib/Cirq
cirq/ops/raw_types.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/ops/raw_types.py#L249-L261
def transform_qubits(self: TSelf_Operation, func: Callable[[Qid], Qid]) -> TSelf_Operation: """Returns the same operation, but with different qubits. Args: func: The function to use to turn each current qubit into a desired new qubit. Returns: The receiving operation but with qubits transformed by the given function. """ return self.with_qubits(*(func(q) for q in self.qubits))
[ "def", "transform_qubits", "(", "self", ":", "TSelf_Operation", ",", "func", ":", "Callable", "[", "[", "Qid", "]", ",", "Qid", "]", ")", "->", "TSelf_Operation", ":", "return", "self", ".", "with_qubits", "(", "*", "(", "func", "(", "q", ")", "for", "q", "in", "self", ".", "qubits", ")", ")" ]
Returns the same operation, but with different qubits. Args: func: The function to use to turn each current qubit into a desired new qubit. Returns: The receiving operation but with qubits transformed by the given function.
[ "Returns", "the", "same", "operation", "but", "with", "different", "qubits", "." ]
python
train
37.692308
hasgeek/coaster
coaster/sqlalchemy/mixins.py
https://github.com/hasgeek/coaster/blob/07f7eb5d5f516e22fa14fdf4dc70e0ae13ee398d/coaster/sqlalchemy/mixins.py#L605-L608
def make_name(self): """Autogenerates a :attr:`name` from :attr:`title_for_name`""" if self.title: self.name = six.text_type(make_name(self.title_for_name, maxlength=self.__name_length__))
[ "def", "make_name", "(", "self", ")", ":", "if", "self", ".", "title", ":", "self", ".", "name", "=", "six", ".", "text_type", "(", "make_name", "(", "self", ".", "title_for_name", ",", "maxlength", "=", "self", ".", "__name_length__", ")", ")" ]
Autogenerates a :attr:`name` from :attr:`title_for_name`
[ "Autogenerates", "a", ":", "attr", ":", "name", "from", ":", "attr", ":", "title_for_name" ]
python
train
53.25
pandas-dev/pandas
pandas/core/internals/blocks.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L1014-L1073
def coerce_to_target_dtype(self, other): """ coerce the current block to a dtype compat for other we will return a block, possibly object, and not raise we can also safely try to coerce to the same dtype and will receive the same block """ # if we cannot then coerce to object dtype, _ = infer_dtype_from(other, pandas_dtype=True) if is_dtype_equal(self.dtype, dtype): return self if self.is_bool or is_object_dtype(dtype) or is_bool_dtype(dtype): # we don't upcast to bool return self.astype(object) elif ((self.is_float or self.is_complex) and (is_integer_dtype(dtype) or is_float_dtype(dtype))): # don't coerce float/complex to int return self elif (self.is_datetime or is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype)): # not a datetime if not ((is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype)) and self.is_datetime): return self.astype(object) # don't upcast timezone with different timezone or no timezone mytz = getattr(self.dtype, 'tz', None) othertz = getattr(dtype, 'tz', None) if str(mytz) != str(othertz): return self.astype(object) raise AssertionError("possible recursion in " "coerce_to_target_dtype: {} {}".format( self, other)) elif (self.is_timedelta or is_timedelta64_dtype(dtype)): # not a timedelta if not (is_timedelta64_dtype(dtype) and self.is_timedelta): return self.astype(object) raise AssertionError("possible recursion in " "coerce_to_target_dtype: {} {}".format( self, other)) try: return self.astype(dtype) except (ValueError, TypeError, OverflowError): pass return self.astype(object)
[ "def", "coerce_to_target_dtype", "(", "self", ",", "other", ")", ":", "# if we cannot then coerce to object", "dtype", ",", "_", "=", "infer_dtype_from", "(", "other", ",", "pandas_dtype", "=", "True", ")", "if", "is_dtype_equal", "(", "self", ".", "dtype", ",", "dtype", ")", ":", "return", "self", "if", "self", ".", "is_bool", "or", "is_object_dtype", "(", "dtype", ")", "or", "is_bool_dtype", "(", "dtype", ")", ":", "# we don't upcast to bool", "return", "self", ".", "astype", "(", "object", ")", "elif", "(", "(", "self", ".", "is_float", "or", "self", ".", "is_complex", ")", "and", "(", "is_integer_dtype", "(", "dtype", ")", "or", "is_float_dtype", "(", "dtype", ")", ")", ")", ":", "# don't coerce float/complex to int", "return", "self", "elif", "(", "self", ".", "is_datetime", "or", "is_datetime64_dtype", "(", "dtype", ")", "or", "is_datetime64tz_dtype", "(", "dtype", ")", ")", ":", "# not a datetime", "if", "not", "(", "(", "is_datetime64_dtype", "(", "dtype", ")", "or", "is_datetime64tz_dtype", "(", "dtype", ")", ")", "and", "self", ".", "is_datetime", ")", ":", "return", "self", ".", "astype", "(", "object", ")", "# don't upcast timezone with different timezone or no timezone", "mytz", "=", "getattr", "(", "self", ".", "dtype", ",", "'tz'", ",", "None", ")", "othertz", "=", "getattr", "(", "dtype", ",", "'tz'", ",", "None", ")", "if", "str", "(", "mytz", ")", "!=", "str", "(", "othertz", ")", ":", "return", "self", ".", "astype", "(", "object", ")", "raise", "AssertionError", "(", "\"possible recursion in \"", "\"coerce_to_target_dtype: {} {}\"", ".", "format", "(", "self", ",", "other", ")", ")", "elif", "(", "self", ".", "is_timedelta", "or", "is_timedelta64_dtype", "(", "dtype", ")", ")", ":", "# not a timedelta", "if", "not", "(", "is_timedelta64_dtype", "(", "dtype", ")", "and", "self", ".", "is_timedelta", ")", ":", "return", "self", ".", "astype", "(", "object", ")", "raise", "AssertionError", "(", "\"possible recursion in \"", "\"coerce_to_target_dtype: {} {}\"", ".", "format", "(", "self", ",", "other", ")", ")", "try", ":", "return", "self", ".", "astype", "(", "dtype", ")", "except", "(", "ValueError", ",", "TypeError", ",", "OverflowError", ")", ":", "pass", "return", "self", ".", "astype", "(", "object", ")" ]
coerce the current block to a dtype compat for other we will return a block, possibly object, and not raise we can also safely try to coerce to the same dtype and will receive the same block
[ "coerce", "the", "current", "block", "to", "a", "dtype", "compat", "for", "other", "we", "will", "return", "a", "block", "possibly", "object", "and", "not", "raise" ]
python
train
34.6
aleju/imgaug
imgaug/augmenters/contrast.py
https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmenters/contrast.py#L501-L552
def LinearContrast(alpha=1, per_channel=False, name=None, deterministic=False, random_state=None): """Adjust contrast by scaling each pixel value to ``127 + alpha*(I_ij-127)``. dtype support:: See :func:`imgaug.augmenters.contrast.adjust_contrast_linear`. Parameters ---------- alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional Multiplier to linearly pronounce (>1.0), dampen (0.0 to 1.0) or invert (<0.0) the difference between each pixel value and the center value, e.g. ``127`` for ``uint8``. * If a number, then that value will be used for all images. * If a tuple ``(a, b)``, then a value from the range ``[a, b]`` will be used per image. * If a list, then a random value will be sampled from that list per image. * If a StochasticParameter, then a value will be sampled per image from that parameter. per_channel : bool or float, optional Whether to use the same value for all channels (False) or to sample a new value for each channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel` will be treated as True, otherwise as False. name : None or str, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. deterministic : bool, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. random_state : None or int or numpy.random.RandomState, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. Returns ------- _ContrastFuncWrapper Augmenter to perform contrast adjustment by linearly scaling the distance to 128. """ params1d = [ iap.handle_continuous_param(alpha, "alpha", value_range=None, tuple_to_uniform=True, list_to_choice=True) ] func = adjust_contrast_linear return _ContrastFuncWrapper( func, params1d, per_channel, dtypes_allowed=["uint8", "uint16", "uint32", "int8", "int16", "int32", "float16", "float32", "float64"], dtypes_disallowed=["uint64", "int64", "float96", "float128", "float256", "bool"], name=name if name is not None else ia.caller_name(), deterministic=deterministic, random_state=random_state )
[ "def", "LinearContrast", "(", "alpha", "=", "1", ",", "per_channel", "=", "False", ",", "name", "=", "None", ",", "deterministic", "=", "False", ",", "random_state", "=", "None", ")", ":", "params1d", "=", "[", "iap", ".", "handle_continuous_param", "(", "alpha", ",", "\"alpha\"", ",", "value_range", "=", "None", ",", "tuple_to_uniform", "=", "True", ",", "list_to_choice", "=", "True", ")", "]", "func", "=", "adjust_contrast_linear", "return", "_ContrastFuncWrapper", "(", "func", ",", "params1d", ",", "per_channel", ",", "dtypes_allowed", "=", "[", "\"uint8\"", ",", "\"uint16\"", ",", "\"uint32\"", ",", "\"int8\"", ",", "\"int16\"", ",", "\"int32\"", ",", "\"float16\"", ",", "\"float32\"", ",", "\"float64\"", "]", ",", "dtypes_disallowed", "=", "[", "\"uint64\"", ",", "\"int64\"", ",", "\"float96\"", ",", "\"float128\"", ",", "\"float256\"", ",", "\"bool\"", "]", ",", "name", "=", "name", "if", "name", "is", "not", "None", "else", "ia", ".", "caller_name", "(", ")", ",", "deterministic", "=", "deterministic", ",", "random_state", "=", "random_state", ")" ]
Adjust contrast by scaling each pixel value to ``127 + alpha*(I_ij-127)``. dtype support:: See :func:`imgaug.augmenters.contrast.adjust_contrast_linear`. Parameters ---------- alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional Multiplier to linearly pronounce (>1.0), dampen (0.0 to 1.0) or invert (<0.0) the difference between each pixel value and the center value, e.g. ``127`` for ``uint8``. * If a number, then that value will be used for all images. * If a tuple ``(a, b)``, then a value from the range ``[a, b]`` will be used per image. * If a list, then a random value will be sampled from that list per image. * If a StochasticParameter, then a value will be sampled per image from that parameter. per_channel : bool or float, optional Whether to use the same value for all channels (False) or to sample a new value for each channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel` will be treated as True, otherwise as False. name : None or str, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. deterministic : bool, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. random_state : None or int or numpy.random.RandomState, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. Returns ------- _ContrastFuncWrapper Augmenter to perform contrast adjustment by linearly scaling the distance to 128.
[ "Adjust", "contrast", "by", "scaling", "each", "pixel", "value", "to", "127", "+", "alpha", "*", "(", "I_ij", "-", "127", ")", "." ]
python
valid
44.480769
rackerlabs/timid
timid/environment.py
https://github.com/rackerlabs/timid/blob/b1c6aa159ab380a033740f4aa392cf0d125e0ac6/timid/environment.py#L399-L411
def declare_list(self, name, sep=os.pathsep): """ Declare an environment variable as a list-like special variable. This can be used even if the environment variable is not present. :param name: The name of the environment variable that should be considered list-like. :param sep: The separator to be used. Defaults to the value of ``os.pathsep``. """ self._declare_special(name, sep, ListVariable)
[ "def", "declare_list", "(", "self", ",", "name", ",", "sep", "=", "os", ".", "pathsep", ")", ":", "self", ".", "_declare_special", "(", "name", ",", "sep", ",", "ListVariable", ")" ]
Declare an environment variable as a list-like special variable. This can be used even if the environment variable is not present. :param name: The name of the environment variable that should be considered list-like. :param sep: The separator to be used. Defaults to the value of ``os.pathsep``.
[ "Declare", "an", "environment", "variable", "as", "a", "list", "-", "like", "special", "variable", ".", "This", "can", "be", "used", "even", "if", "the", "environment", "variable", "is", "not", "present", "." ]
python
test
37.923077
scalative/haas
haas/result.py
https://github.com/scalative/haas/blob/72c05216a2a80e5ee94d9cd8d05ed2b188725027/haas/result.py#L376-L398
def _restore_stdout(self): """Unhook stdout and stderr if buffering is enabled. """ if self.buffer: if self._mirror_output: output = sys.stdout.getvalue() error = sys.stderr.getvalue() if output: if not output.endswith('\n'): output += '\n' self._original_stdout.write(STDOUT_LINE % output) if error: if not error.endswith('\n'): error += '\n' self._original_stderr.write(STDERR_LINE % error) sys.stdout = self._original_stdout sys.stderr = self._original_stderr self._stdout_buffer.seek(0) self._stdout_buffer.truncate() self._stderr_buffer.seek(0) self._stderr_buffer.truncate()
[ "def", "_restore_stdout", "(", "self", ")", ":", "if", "self", ".", "buffer", ":", "if", "self", ".", "_mirror_output", ":", "output", "=", "sys", ".", "stdout", ".", "getvalue", "(", ")", "error", "=", "sys", ".", "stderr", ".", "getvalue", "(", ")", "if", "output", ":", "if", "not", "output", ".", "endswith", "(", "'\\n'", ")", ":", "output", "+=", "'\\n'", "self", ".", "_original_stdout", ".", "write", "(", "STDOUT_LINE", "%", "output", ")", "if", "error", ":", "if", "not", "error", ".", "endswith", "(", "'\\n'", ")", ":", "error", "+=", "'\\n'", "self", ".", "_original_stderr", ".", "write", "(", "STDERR_LINE", "%", "error", ")", "sys", ".", "stdout", "=", "self", ".", "_original_stdout", "sys", ".", "stderr", "=", "self", ".", "_original_stderr", "self", ".", "_stdout_buffer", ".", "seek", "(", "0", ")", "self", ".", "_stdout_buffer", ".", "truncate", "(", ")", "self", ".", "_stderr_buffer", ".", "seek", "(", "0", ")", "self", ".", "_stderr_buffer", ".", "truncate", "(", ")" ]
Unhook stdout and stderr if buffering is enabled.
[ "Unhook", "stdout", "and", "stderr", "if", "buffering", "is", "enabled", "." ]
python
train
37.391304
huntrar/scrape
scrape/scrape.py
https://github.com/huntrar/scrape/blob/bf877f6da5df3ed0f2bea60a95acf7df63c88002/scrape/scrape.py#L275-L289
def prompt_save_images(args): """Prompt user to save images when crawling (for pdf and HTML formats).""" if args['images'] or args['no_images']: return if (args['pdf'] or args['html']) and (args['crawl'] or args['crawl_all']): save_msg = ('Choosing to save images will greatly slow the' ' crawling process.\nSave images anyways? (y/n): ') try: save_images = utils.confirm_input(input(save_msg)) except (KeyboardInterrupt, EOFError): return args['images'] = save_images args['no_images'] = not save_images
[ "def", "prompt_save_images", "(", "args", ")", ":", "if", "args", "[", "'images'", "]", "or", "args", "[", "'no_images'", "]", ":", "return", "if", "(", "args", "[", "'pdf'", "]", "or", "args", "[", "'html'", "]", ")", "and", "(", "args", "[", "'crawl'", "]", "or", "args", "[", "'crawl_all'", "]", ")", ":", "save_msg", "=", "(", "'Choosing to save images will greatly slow the'", "' crawling process.\\nSave images anyways? (y/n): '", ")", "try", ":", "save_images", "=", "utils", ".", "confirm_input", "(", "input", "(", "save_msg", ")", ")", "except", "(", "KeyboardInterrupt", ",", "EOFError", ")", ":", "return", "args", "[", "'images'", "]", "=", "save_images", "args", "[", "'no_images'", "]", "=", "not", "save_images" ]
Prompt user to save images when crawling (for pdf and HTML formats).
[ "Prompt", "user", "to", "save", "images", "when", "crawling", "(", "for", "pdf", "and", "HTML", "formats", ")", "." ]
python
train
39.733333
swharden/SWHLab
doc/oldcode/swhlab/core/plot.py
https://github.com/swharden/SWHLab/blob/a86c3c65323cec809a4bd4f81919644927094bf5/doc/oldcode/swhlab/core/plot.py#L96-L152
def IV(abf,T1,T2,plotToo=True,color='b'): """ Given two time points (seconds) return IV data. Optionally plots a fancy graph (with errorbars) Returns [[AV],[SD]] for the given range. """ rangeData=abf.average_data([[T1,T2]]) #get the average data per sweep AV,SD=rangeData[:,0,0],rangeData[:,0,1] #separate by average and SD Xs=abf.clampValues(T1) #get clamp values at time point T1 if plotToo: new(abf) #do this so it's the right shape and size # plot the original sweep pylab.subplot(221) pylab.title("sweep data") pylab.xlabel("time (s)") pylab.ylabel("Measurement (%s)"%abf.units) sweep(abf,'all',protocol=False) pylab.axis([None,None,np.min(rangeData)-50,np.max(rangeData)+50]) pylab.axvspan(T1,T2,alpha=.1,color=color) #share measurement region pylab.margins(0,.1) # plot the data zoomed in pylab.subplot(223) pylab.title("measurement region") pylab.xlabel("time (s)") pylab.ylabel("Measurement (%s)"%abf.units) sweep(abf,'all',protocol=False) pylab.axis([T1-.05,T2+.05,np.min(rangeData)-50,np.max(rangeData)+50]) pylab.axvspan(T1,T2,alpha=.1,color=color) #share measurement region pylab.margins(0,.1) # plot the protocol pylab.subplot(222) pylab.title("protocol") pylab.xlabel("time (s)") pylab.ylabel("Command (%s)"%abf.unitsCommand) sweep(abf,'all',protocol=True) pylab.axvspan(T1,T2,alpha=.1,color=color) #share measurement region pylab.margins(0,.1) # plot the I/V pylab.subplot(224) pylab.grid(alpha=.5) pylab.title("command / measure relationship") pylab.xlabel("Command (%s)"%abf.unitsCommand) pylab.ylabel("Measurement (%s)"%abf.units) pylab.errorbar(Xs,AV,SD,capsize=0,marker='.',color=color) if abf.units=="pA": pylab.axhline(0,alpha=.5,lw=2,color='r',ls="--") pylab.axvline(-70,alpha=.5,lw=2,color='r',ls="--") else: pylab.axhline(-70,alpha=.5,lw=2,color='r',ls="--") pylab.axvline(0,alpha=.5,lw=2,color='r',ls="--") pylab.margins(.1,.1) annotate(abf) return AV,SD
[ "def", "IV", "(", "abf", ",", "T1", ",", "T2", ",", "plotToo", "=", "True", ",", "color", "=", "'b'", ")", ":", "rangeData", "=", "abf", ".", "average_data", "(", "[", "[", "T1", ",", "T2", "]", "]", ")", "#get the average data per sweep", "AV", ",", "SD", "=", "rangeData", "[", ":", ",", "0", ",", "0", "]", ",", "rangeData", "[", ":", ",", "0", ",", "1", "]", "#separate by average and SD", "Xs", "=", "abf", ".", "clampValues", "(", "T1", ")", "#get clamp values at time point T1", "if", "plotToo", ":", "new", "(", "abf", ")", "#do this so it's the right shape and size", "# plot the original sweep", "pylab", ".", "subplot", "(", "221", ")", "pylab", ".", "title", "(", "\"sweep data\"", ")", "pylab", ".", "xlabel", "(", "\"time (s)\"", ")", "pylab", ".", "ylabel", "(", "\"Measurement (%s)\"", "%", "abf", ".", "units", ")", "sweep", "(", "abf", ",", "'all'", ",", "protocol", "=", "False", ")", "pylab", ".", "axis", "(", "[", "None", ",", "None", ",", "np", ".", "min", "(", "rangeData", ")", "-", "50", ",", "np", ".", "max", "(", "rangeData", ")", "+", "50", "]", ")", "pylab", ".", "axvspan", "(", "T1", ",", "T2", ",", "alpha", "=", ".1", ",", "color", "=", "color", ")", "#share measurement region", "pylab", ".", "margins", "(", "0", ",", ".1", ")", "# plot the data zoomed in", "pylab", ".", "subplot", "(", "223", ")", "pylab", ".", "title", "(", "\"measurement region\"", ")", "pylab", ".", "xlabel", "(", "\"time (s)\"", ")", "pylab", ".", "ylabel", "(", "\"Measurement (%s)\"", "%", "abf", ".", "units", ")", "sweep", "(", "abf", ",", "'all'", ",", "protocol", "=", "False", ")", "pylab", ".", "axis", "(", "[", "T1", "-", ".05", ",", "T2", "+", ".05", ",", "np", ".", "min", "(", "rangeData", ")", "-", "50", ",", "np", ".", "max", "(", "rangeData", ")", "+", "50", "]", ")", "pylab", ".", "axvspan", "(", "T1", ",", "T2", ",", "alpha", "=", ".1", ",", "color", "=", "color", ")", "#share measurement region", "pylab", ".", "margins", "(", "0", ",", ".1", ")", "# plot the protocol", "pylab", ".", "subplot", "(", "222", ")", "pylab", ".", "title", "(", "\"protocol\"", ")", "pylab", ".", "xlabel", "(", "\"time (s)\"", ")", "pylab", ".", "ylabel", "(", "\"Command (%s)\"", "%", "abf", ".", "unitsCommand", ")", "sweep", "(", "abf", ",", "'all'", ",", "protocol", "=", "True", ")", "pylab", ".", "axvspan", "(", "T1", ",", "T2", ",", "alpha", "=", ".1", ",", "color", "=", "color", ")", "#share measurement region", "pylab", ".", "margins", "(", "0", ",", ".1", ")", "# plot the I/V", "pylab", ".", "subplot", "(", "224", ")", "pylab", ".", "grid", "(", "alpha", "=", ".5", ")", "pylab", ".", "title", "(", "\"command / measure relationship\"", ")", "pylab", ".", "xlabel", "(", "\"Command (%s)\"", "%", "abf", ".", "unitsCommand", ")", "pylab", ".", "ylabel", "(", "\"Measurement (%s)\"", "%", "abf", ".", "units", ")", "pylab", ".", "errorbar", "(", "Xs", ",", "AV", ",", "SD", ",", "capsize", "=", "0", ",", "marker", "=", "'.'", ",", "color", "=", "color", ")", "if", "abf", ".", "units", "==", "\"pA\"", ":", "pylab", ".", "axhline", "(", "0", ",", "alpha", "=", ".5", ",", "lw", "=", "2", ",", "color", "=", "'r'", ",", "ls", "=", "\"--\"", ")", "pylab", ".", "axvline", "(", "-", "70", ",", "alpha", "=", ".5", ",", "lw", "=", "2", ",", "color", "=", "'r'", ",", "ls", "=", "\"--\"", ")", "else", ":", "pylab", ".", "axhline", "(", "-", "70", ",", "alpha", "=", ".5", ",", "lw", "=", "2", ",", "color", "=", "'r'", ",", "ls", "=", "\"--\"", ")", "pylab", ".", "axvline", "(", "0", ",", "alpha", "=", ".5", ",", "lw", "=", "2", ",", "color", "=", "'r'", ",", "ls", "=", "\"--\"", ")", "pylab", ".", "margins", "(", ".1", ",", ".1", ")", "annotate", "(", "abf", ")", "return", "AV", ",", "SD" ]
Given two time points (seconds) return IV data. Optionally plots a fancy graph (with errorbars) Returns [[AV],[SD]] for the given range.
[ "Given", "two", "time", "points", "(", "seconds", ")", "return", "IV", "data", ".", "Optionally", "plots", "a", "fancy", "graph", "(", "with", "errorbars", ")", "Returns", "[[", "AV", "]", "[", "SD", "]]", "for", "the", "given", "range", "." ]
python
valid
38.912281
quantopian/pyfolio
pyfolio/plotting.py
https://github.com/quantopian/pyfolio/blob/712716ab0cdebbec9fabb25eea3bf40e4354749d/pyfolio/plotting.py#L1648-L1669
def show_worst_drawdown_periods(returns, top=5): """ Prints information about the worst drawdown periods. Prints peak dates, valley dates, recovery dates, and net drawdowns. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. top : int, optional Amount of top drawdowns periods to plot (default 5). """ drawdown_df = timeseries.gen_drawdown_table(returns, top=top) utils.print_table( drawdown_df.sort_values('Net drawdown in %', ascending=False), name='Worst drawdown periods', float_format='{0:.2f}'.format, )
[ "def", "show_worst_drawdown_periods", "(", "returns", ",", "top", "=", "5", ")", ":", "drawdown_df", "=", "timeseries", ".", "gen_drawdown_table", "(", "returns", ",", "top", "=", "top", ")", "utils", ".", "print_table", "(", "drawdown_df", ".", "sort_values", "(", "'Net drawdown in %'", ",", "ascending", "=", "False", ")", ",", "name", "=", "'Worst drawdown periods'", ",", "float_format", "=", "'{0:.2f}'", ".", "format", ",", ")" ]
Prints information about the worst drawdown periods. Prints peak dates, valley dates, recovery dates, and net drawdowns. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. top : int, optional Amount of top drawdowns periods to plot (default 5).
[ "Prints", "information", "about", "the", "worst", "drawdown", "periods", "." ]
python
valid
30.954545
Azure/azure-cosmos-python
azure/cosmos/cosmos_client.py
https://github.com/Azure/azure-cosmos-python/blob/dd01b3c5d308c6da83cfcaa0ab7083351a476353/azure/cosmos/cosmos_client.py#L2011-L2034
def DeleteAttachment(self, attachment_link, options=None): """Deletes an attachment. :param str attachment_link: The link to the attachment. :param dict options: The request options for the request. :return: The deleted Attachment. :rtype: dict """ if options is None: options = {} path = base.GetPathFromLink(attachment_link) attachment_id = base.GetResourceIdOrFullNameFromLink(attachment_link) return self.DeleteResource(path, 'attachments', attachment_id, None, options)
[ "def", "DeleteAttachment", "(", "self", ",", "attachment_link", ",", "options", "=", "None", ")", ":", "if", "options", "is", "None", ":", "options", "=", "{", "}", "path", "=", "base", ".", "GetPathFromLink", "(", "attachment_link", ")", "attachment_id", "=", "base", ".", "GetResourceIdOrFullNameFromLink", "(", "attachment_link", ")", "return", "self", ".", "DeleteResource", "(", "path", ",", "'attachments'", ",", "attachment_id", ",", "None", ",", "options", ")" ]
Deletes an attachment. :param str attachment_link: The link to the attachment. :param dict options: The request options for the request. :return: The deleted Attachment. :rtype: dict
[ "Deletes", "an", "attachment", "." ]
python
train
30.625
ValvePython/vpk
vpk/__init__.py
https://github.com/ValvePython/vpk/blob/cc522fc7febbf53efa5d58fcd1ad2103dae37ac8/vpk/__init__.py#L375-L422
def read_index_iter(self): """Generator function that reads the file index from the vpk file yeilds (file_path, metadata) """ with fopen(self.vpk_path, 'rb') as f: f.seek(self.header_length) while True: if self.version > 0 and f.tell() > self.tree_length + self.header_length: raise ValueError("Error parsing index (out of bounds)") ext = _read_cstring(f) if ext == '': break while True: path = _read_cstring(f) if path == '': break if path != ' ': path = os.path.join(path, '') else: path = '' while True: name = _read_cstring(f) if name == '': break (crc32, preload_length, archive_index, archive_offset, file_length, suffix, ) = metadata = list(struct.unpack("IHHIIH", f.read(18))) if suffix != 0xffff: raise ValueError("Error while parsing index") if archive_index == 0x7fff: metadata[3] = self.header_length + self.tree_length + archive_offset metadata = (f.read(preload_length),) + tuple(metadata[:-1]) yield path + name + '.' + ext, metadata
[ "def", "read_index_iter", "(", "self", ")", ":", "with", "fopen", "(", "self", ".", "vpk_path", ",", "'rb'", ")", "as", "f", ":", "f", ".", "seek", "(", "self", ".", "header_length", ")", "while", "True", ":", "if", "self", ".", "version", ">", "0", "and", "f", ".", "tell", "(", ")", ">", "self", ".", "tree_length", "+", "self", ".", "header_length", ":", "raise", "ValueError", "(", "\"Error parsing index (out of bounds)\"", ")", "ext", "=", "_read_cstring", "(", "f", ")", "if", "ext", "==", "''", ":", "break", "while", "True", ":", "path", "=", "_read_cstring", "(", "f", ")", "if", "path", "==", "''", ":", "break", "if", "path", "!=", "' '", ":", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "''", ")", "else", ":", "path", "=", "''", "while", "True", ":", "name", "=", "_read_cstring", "(", "f", ")", "if", "name", "==", "''", ":", "break", "(", "crc32", ",", "preload_length", ",", "archive_index", ",", "archive_offset", ",", "file_length", ",", "suffix", ",", ")", "=", "metadata", "=", "list", "(", "struct", ".", "unpack", "(", "\"IHHIIH\"", ",", "f", ".", "read", "(", "18", ")", ")", ")", "if", "suffix", "!=", "0xffff", ":", "raise", "ValueError", "(", "\"Error while parsing index\"", ")", "if", "archive_index", "==", "0x7fff", ":", "metadata", "[", "3", "]", "=", "self", ".", "header_length", "+", "self", ".", "tree_length", "+", "archive_offset", "metadata", "=", "(", "f", ".", "read", "(", "preload_length", ")", ",", ")", "+", "tuple", "(", "metadata", "[", ":", "-", "1", "]", ")", "yield", "path", "+", "name", "+", "'.'", "+", "ext", ",", "metadata" ]
Generator function that reads the file index from the vpk file yeilds (file_path, metadata)
[ "Generator", "function", "that", "reads", "the", "file", "index", "from", "the", "vpk", "file" ]
python
train
34.291667
ultrabug/py3status
py3status/module.py
https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/module.py#L119-L130
def load_from_namespace(module_name): """ Load a py3status bundled module. """ class_inst = None name = "py3status.modules.{}".format(module_name) py_mod = __import__(name) components = name.split(".") for comp in components[1:]: py_mod = getattr(py_mod, comp) class_inst = py_mod.Py3status() return class_inst
[ "def", "load_from_namespace", "(", "module_name", ")", ":", "class_inst", "=", "None", "name", "=", "\"py3status.modules.{}\"", ".", "format", "(", "module_name", ")", "py_mod", "=", "__import__", "(", "name", ")", "components", "=", "name", ".", "split", "(", "\".\"", ")", "for", "comp", "in", "components", "[", "1", ":", "]", ":", "py_mod", "=", "getattr", "(", "py_mod", ",", "comp", ")", "class_inst", "=", "py_mod", ".", "Py3status", "(", ")", "return", "class_inst" ]
Load a py3status bundled module.
[ "Load", "a", "py3status", "bundled", "module", "." ]
python
train
32.583333
yahoo/TensorFlowOnSpark
examples/cifar10/cifar10.py
https://github.com/yahoo/TensorFlowOnSpark/blob/5e4b6c185ab722fd0104ede0377e1149ea8d6f7c/examples/cifar10/cifar10.py#L298-L322
def _add_loss_summaries(total_loss): """Add summaries for losses in CIFAR-10 model. Generates moving average for all losses and associated summaries for visualizing the performance of the network. Args: total_loss: Total loss from loss(). Returns: loss_averages_op: op for generating moving averages of losses. """ # Compute the moving average of all individual losses and the total loss. loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') losses = tf.get_collection('losses') loss_averages_op = loss_averages.apply(losses + [total_loss]) # Attach a scalar summary to all individual losses and the total loss; do the # same for the averaged version of the losses. for l in losses + [total_loss]: # Name each loss as '(raw)' and name the moving average version of the loss # as the original loss name. tf.summary.scalar(l.op.name + ' (raw)', l) tf.summary.scalar(l.op.name, loss_averages.average(l)) return loss_averages_op
[ "def", "_add_loss_summaries", "(", "total_loss", ")", ":", "# Compute the moving average of all individual losses and the total loss.", "loss_averages", "=", "tf", ".", "train", ".", "ExponentialMovingAverage", "(", "0.9", ",", "name", "=", "'avg'", ")", "losses", "=", "tf", ".", "get_collection", "(", "'losses'", ")", "loss_averages_op", "=", "loss_averages", ".", "apply", "(", "losses", "+", "[", "total_loss", "]", ")", "# Attach a scalar summary to all individual losses and the total loss; do the", "# same for the averaged version of the losses.", "for", "l", "in", "losses", "+", "[", "total_loss", "]", ":", "# Name each loss as '(raw)' and name the moving average version of the loss", "# as the original loss name.", "tf", ".", "summary", ".", "scalar", "(", "l", ".", "op", ".", "name", "+", "' (raw)'", ",", "l", ")", "tf", ".", "summary", ".", "scalar", "(", "l", ".", "op", ".", "name", ",", "loss_averages", ".", "average", "(", "l", ")", ")", "return", "loss_averages_op" ]
Add summaries for losses in CIFAR-10 model. Generates moving average for all losses and associated summaries for visualizing the performance of the network. Args: total_loss: Total loss from loss(). Returns: loss_averages_op: op for generating moving averages of losses.
[ "Add", "summaries", "for", "losses", "in", "CIFAR", "-", "10", "model", "." ]
python
train
38.8
zqfang/GSEApy
gseapy/gsea.py
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/gsea.py#L179-L204
def _download_libraries(self, libname): """ download enrichr libraries.""" self._logger.info("Downloading and generating Enrichr library gene sets......") s = retry(5) # queery string ENRICHR_URL = 'http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary' query_string = '?mode=text&libraryName=%s' # get response = s.get( ENRICHR_URL + query_string % libname, timeout=None) if not response.ok: raise Exception('Error fetching enrichment results, check internet connection first.') # reformat to dict and save to disk mkdirs(DEFAULT_CACHE_PATH) genesets_dict = {} outname = "enrichr.%s.gmt"%libname gmtout = open(os.path.join(DEFAULT_CACHE_PATH, outname), "w") for line in response.iter_lines(chunk_size=1024, decode_unicode='utf-8'): line=line.strip() k = line.split("\t")[0] v = list(map(lambda x: x.split(",")[0], line.split("\t")[2:])) genesets_dict.update({ k: v}) outline = "%s\t\t%s\n"%(k, "\t".join(v)) gmtout.write(outline) gmtout.close() return genesets_dict
[ "def", "_download_libraries", "(", "self", ",", "libname", ")", ":", "self", ".", "_logger", ".", "info", "(", "\"Downloading and generating Enrichr library gene sets......\"", ")", "s", "=", "retry", "(", "5", ")", "# queery string", "ENRICHR_URL", "=", "'http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary'", "query_string", "=", "'?mode=text&libraryName=%s'", "# get", "response", "=", "s", ".", "get", "(", "ENRICHR_URL", "+", "query_string", "%", "libname", ",", "timeout", "=", "None", ")", "if", "not", "response", ".", "ok", ":", "raise", "Exception", "(", "'Error fetching enrichment results, check internet connection first.'", ")", "# reformat to dict and save to disk", "mkdirs", "(", "DEFAULT_CACHE_PATH", ")", "genesets_dict", "=", "{", "}", "outname", "=", "\"enrichr.%s.gmt\"", "%", "libname", "gmtout", "=", "open", "(", "os", ".", "path", ".", "join", "(", "DEFAULT_CACHE_PATH", ",", "outname", ")", ",", "\"w\"", ")", "for", "line", "in", "response", ".", "iter_lines", "(", "chunk_size", "=", "1024", ",", "decode_unicode", "=", "'utf-8'", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "k", "=", "line", ".", "split", "(", "\"\\t\"", ")", "[", "0", "]", "v", "=", "list", "(", "map", "(", "lambda", "x", ":", "x", ".", "split", "(", "\",\"", ")", "[", "0", "]", ",", "line", ".", "split", "(", "\"\\t\"", ")", "[", "2", ":", "]", ")", ")", "genesets_dict", ".", "update", "(", "{", "k", ":", "v", "}", ")", "outline", "=", "\"%s\\t\\t%s\\n\"", "%", "(", "k", ",", "\"\\t\"", ".", "join", "(", "v", ")", ")", "gmtout", ".", "write", "(", "outline", ")", "gmtout", ".", "close", "(", ")", "return", "genesets_dict" ]
download enrichr libraries.
[ "download", "enrichr", "libraries", "." ]
python
test
44.5
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/app/backends/_wx.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/app/backends/_wx.py#L107-L118
def _set_config(c): """Set gl configuration""" gl_attribs = [glcanvas.WX_GL_RGBA, glcanvas.WX_GL_DEPTH_SIZE, c['depth_size'], glcanvas.WX_GL_STENCIL_SIZE, c['stencil_size'], glcanvas.WX_GL_MIN_RED, c['red_size'], glcanvas.WX_GL_MIN_GREEN, c['green_size'], glcanvas.WX_GL_MIN_BLUE, c['blue_size'], glcanvas.WX_GL_MIN_ALPHA, c['alpha_size']] gl_attribs += [glcanvas.WX_GL_DOUBLEBUFFER] if c['double_buffer'] else [] gl_attribs += [glcanvas.WX_GL_STEREO] if c['stereo'] else [] return gl_attribs
[ "def", "_set_config", "(", "c", ")", ":", "gl_attribs", "=", "[", "glcanvas", ".", "WX_GL_RGBA", ",", "glcanvas", ".", "WX_GL_DEPTH_SIZE", ",", "c", "[", "'depth_size'", "]", ",", "glcanvas", ".", "WX_GL_STENCIL_SIZE", ",", "c", "[", "'stencil_size'", "]", ",", "glcanvas", ".", "WX_GL_MIN_RED", ",", "c", "[", "'red_size'", "]", ",", "glcanvas", ".", "WX_GL_MIN_GREEN", ",", "c", "[", "'green_size'", "]", ",", "glcanvas", ".", "WX_GL_MIN_BLUE", ",", "c", "[", "'blue_size'", "]", ",", "glcanvas", ".", "WX_GL_MIN_ALPHA", ",", "c", "[", "'alpha_size'", "]", "]", "gl_attribs", "+=", "[", "glcanvas", ".", "WX_GL_DOUBLEBUFFER", "]", "if", "c", "[", "'double_buffer'", "]", "else", "[", "]", "gl_attribs", "+=", "[", "glcanvas", ".", "WX_GL_STEREO", "]", "if", "c", "[", "'stereo'", "]", "else", "[", "]", "return", "gl_attribs" ]
Set gl configuration
[ "Set", "gl", "configuration" ]
python
train
50.75
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/connect/notifications.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/connect/notifications.py#L126-L140
def error(self): """Check if the async response is an error. Take care to call `is_done` before calling `error`. Note that the error messages are always encoded as strings. :raises CloudUnhandledError: When not checking `is_done` first :return: the error value/payload, if found. :rtype: str """ status_code, error_msg, payload = self.check_error() if status_code != 200 and not error_msg and not payload: return "Async error (%s). Status code: %r" % (self.async_id, status_code) return error_msg
[ "def", "error", "(", "self", ")", ":", "status_code", ",", "error_msg", ",", "payload", "=", "self", ".", "check_error", "(", ")", "if", "status_code", "!=", "200", "and", "not", "error_msg", "and", "not", "payload", ":", "return", "\"Async error (%s). Status code: %r\"", "%", "(", "self", ".", "async_id", ",", "status_code", ")", "return", "error_msg" ]
Check if the async response is an error. Take care to call `is_done` before calling `error`. Note that the error messages are always encoded as strings. :raises CloudUnhandledError: When not checking `is_done` first :return: the error value/payload, if found. :rtype: str
[ "Check", "if", "the", "async", "response", "is", "an", "error", "." ]
python
train
38.466667
timothydmorton/VESPA
vespa/stars/utils.py
https://github.com/timothydmorton/VESPA/blob/0446b54d48009f3655cfd1a3957ceea21d3adcaa/vespa/stars/utils.py#L176-L183
def fluxfrac(*mags): """Returns fraction of total flux in first argument, assuming all are magnitudes. """ Ftot = 0 for mag in mags: Ftot += 10**(-0.4*mag) F1 = 10**(-0.4*mags[0]) return F1/Ftot
[ "def", "fluxfrac", "(", "*", "mags", ")", ":", "Ftot", "=", "0", "for", "mag", "in", "mags", ":", "Ftot", "+=", "10", "**", "(", "-", "0.4", "*", "mag", ")", "F1", "=", "10", "**", "(", "-", "0.4", "*", "mags", "[", "0", "]", ")", "return", "F1", "/", "Ftot" ]
Returns fraction of total flux in first argument, assuming all are magnitudes.
[ "Returns", "fraction", "of", "total", "flux", "in", "first", "argument", "assuming", "all", "are", "magnitudes", "." ]
python
train
27.375
google/grr
grr/server/grr_response_server/flow_utils.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/flow_utils.py#L18-L45
def GetUserInfo(knowledge_base, user): # TODO: This docstring cannot be a raw literal because there are # issues with raw unicode literals on Python 2. Once support for Python 2 is # dropped, it can be made raw again. # pylint: disable=g-docstring-has-escape """Get a User protobuf for a specific user. Args: knowledge_base: An rdf_client.KnowledgeBase object. user: Username as string. May contain domain like DOMAIN\\user. Returns: A User rdfvalue or None """ # pylint: enable=g-docstring-has-escape if "\\" in user: domain, user = user.split("\\", 1) users = [ u for u in knowledge_base.users if u.username == user and u.userdomain == domain ] else: users = [u for u in knowledge_base.users if u.username == user] if not users: return else: return users[0]
[ "def", "GetUserInfo", "(", "knowledge_base", ",", "user", ")", ":", "# TODO: This docstring cannot be a raw literal because there are", "# issues with raw unicode literals on Python 2. Once support for Python 2 is", "# dropped, it can be made raw again.", "# pylint: disable=g-docstring-has-escape", "# pylint: enable=g-docstring-has-escape", "if", "\"\\\\\"", "in", "user", ":", "domain", ",", "user", "=", "user", ".", "split", "(", "\"\\\\\"", ",", "1", ")", "users", "=", "[", "u", "for", "u", "in", "knowledge_base", ".", "users", "if", "u", ".", "username", "==", "user", "and", "u", ".", "userdomain", "==", "domain", "]", "else", ":", "users", "=", "[", "u", "for", "u", "in", "knowledge_base", ".", "users", "if", "u", ".", "username", "==", "user", "]", "if", "not", "users", ":", "return", "else", ":", "return", "users", "[", "0", "]" ]
Get a User protobuf for a specific user. Args: knowledge_base: An rdf_client.KnowledgeBase object. user: Username as string. May contain domain like DOMAIN\\user. Returns: A User rdfvalue or None
[ "Get", "a", "User", "protobuf", "for", "a", "specific", "user", "." ]
python
train
29.035714
Esri/ArcREST
src/arcrest/common/geometry.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/common/geometry.py#L239-L251
def __geomToPointList(self, geom): """ converts a geometry object to a common.Geometry object """ if arcpyFound and isinstance(geom, arcpy.Multipoint): feature_geom = [] fPart = [] for part in geom: fPart = [] for pnt in part: fPart.append(Point(coord=[pnt.X, pnt.Y], wkid=geom.spatialReference.factoryCode, z=pnt.Z, m=pnt.M)) feature_geom.append(fPart) return feature_geom
[ "def", "__geomToPointList", "(", "self", ",", "geom", ")", ":", "if", "arcpyFound", "and", "isinstance", "(", "geom", ",", "arcpy", ".", "Multipoint", ")", ":", "feature_geom", "=", "[", "]", "fPart", "=", "[", "]", "for", "part", "in", "geom", ":", "fPart", "=", "[", "]", "for", "pnt", "in", "part", ":", "fPart", ".", "append", "(", "Point", "(", "coord", "=", "[", "pnt", ".", "X", ",", "pnt", ".", "Y", "]", ",", "wkid", "=", "geom", ".", "spatialReference", ".", "factoryCode", ",", "z", "=", "pnt", ".", "Z", ",", "m", "=", "pnt", ".", "M", ")", ")", "feature_geom", ".", "append", "(", "fPart", ")", "return", "feature_geom" ]
converts a geometry object to a common.Geometry object
[ "converts", "a", "geometry", "object", "to", "a", "common", ".", "Geometry", "object" ]
python
train
41.923077
lago-project/lago
lago/templates.py
https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/templates.py#L494-L503
def get_hash(self): """ Returns the associated hash for this template version Returns: str: Hash for this version """ if self._hash is None: self._hash = self._source.get_hash(self._handle).strip() return self._hash
[ "def", "get_hash", "(", "self", ")", ":", "if", "self", ".", "_hash", "is", "None", ":", "self", ".", "_hash", "=", "self", ".", "_source", ".", "get_hash", "(", "self", ".", "_handle", ")", ".", "strip", "(", ")", "return", "self", ".", "_hash" ]
Returns the associated hash for this template version Returns: str: Hash for this version
[ "Returns", "the", "associated", "hash", "for", "this", "template", "version" ]
python
train
27.9
vaab/colour
colour.py
https://github.com/vaab/colour/blob/11f138eb7841d2045160b378a2eec0c2321144c0/colour.py#L737-L781
def RGB_color_picker(obj): """Build a color representation from the string representation of an object This allows to quickly get a color from some data, with the additional benefit that the color will be the same as long as the (string representation of the) data is the same:: >>> from colour import RGB_color_picker, Color Same inputs produce the same result:: >>> RGB_color_picker("Something") == RGB_color_picker("Something") True ... but different inputs produce different colors:: >>> RGB_color_picker("Something") != RGB_color_picker("Something else") True In any case, we still get a ``Color`` object:: >>> isinstance(RGB_color_picker("Something"), Color) True """ ## Turn the input into a by 3-dividable string. SHA-384 is good because it ## divides into 3 components of the same size, which will be used to ## represent the RGB values of the color. digest = hashlib.sha384(str(obj).encode('utf-8')).hexdigest() ## Split the digest into 3 sub-strings of equivalent size. subsize = int(len(digest) / 3) splitted_digest = [digest[i * subsize: (i + 1) * subsize] for i in range(3)] ## Convert those hexadecimal sub-strings into integer and scale them down ## to the 0..1 range. max_value = float(int("f" * subsize, 16)) components = ( int(d, 16) ## Make a number from a list with hex digits / max_value ## Scale it down to [0.0, 1.0] for d in splitted_digest) return Color(rgb2hex(components))
[ "def", "RGB_color_picker", "(", "obj", ")", ":", "## Turn the input into a by 3-dividable string. SHA-384 is good because it", "## divides into 3 components of the same size, which will be used to", "## represent the RGB values of the color.", "digest", "=", "hashlib", ".", "sha384", "(", "str", "(", "obj", ")", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "(", ")", "## Split the digest into 3 sub-strings of equivalent size.", "subsize", "=", "int", "(", "len", "(", "digest", ")", "/", "3", ")", "splitted_digest", "=", "[", "digest", "[", "i", "*", "subsize", ":", "(", "i", "+", "1", ")", "*", "subsize", "]", "for", "i", "in", "range", "(", "3", ")", "]", "## Convert those hexadecimal sub-strings into integer and scale them down", "## to the 0..1 range.", "max_value", "=", "float", "(", "int", "(", "\"f\"", "*", "subsize", ",", "16", ")", ")", "components", "=", "(", "int", "(", "d", ",", "16", ")", "## Make a number from a list with hex digits", "/", "max_value", "## Scale it down to [0.0, 1.0]", "for", "d", "in", "splitted_digest", ")", "return", "Color", "(", "rgb2hex", "(", "components", ")", ")" ]
Build a color representation from the string representation of an object This allows to quickly get a color from some data, with the additional benefit that the color will be the same as long as the (string representation of the) data is the same:: >>> from colour import RGB_color_picker, Color Same inputs produce the same result:: >>> RGB_color_picker("Something") == RGB_color_picker("Something") True ... but different inputs produce different colors:: >>> RGB_color_picker("Something") != RGB_color_picker("Something else") True In any case, we still get a ``Color`` object:: >>> isinstance(RGB_color_picker("Something"), Color) True
[ "Build", "a", "color", "representation", "from", "the", "string", "representation", "of", "an", "object" ]
python
train
34.644444
Genida/dependenpy
src/dependenpy/node.py
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/node.py#L215-L225
def build_dependencies(self): """ Recursively build the dependencies for sub-modules and sub-packages. Iterate on node's modules then packages and call their build_dependencies methods. """ for m in self.modules: m.build_dependencies() for p in self.packages: p.build_dependencies()
[ "def", "build_dependencies", "(", "self", ")", ":", "for", "m", "in", "self", ".", "modules", ":", "m", ".", "build_dependencies", "(", ")", "for", "p", "in", "self", ".", "packages", ":", "p", ".", "build_dependencies", "(", ")" ]
Recursively build the dependencies for sub-modules and sub-packages. Iterate on node's modules then packages and call their build_dependencies methods.
[ "Recursively", "build", "the", "dependencies", "for", "sub", "-", "modules", "and", "sub", "-", "packages", "." ]
python
train
32.090909
trevisanj/f311
f311/collaboration.py
https://github.com/trevisanj/f311/blob/9e502a3d1e1f74d4290a8a0bae9a34ef8d7b29f7/f311/collaboration.py#L172-L196
def __setup(): """Will be executed in the first time someone calls classes_*() """ global __collaborators, __flag_first import f311 __flag_first = False for pkgname in f311.COLLABORATORS_C: try: pkg = importlib.import_module(pkgname) a99.get_python_logger().info("Imported collaborator package '{}'".format(pkgname)) try: if hasattr(pkg, "_setup_filetypes"): pkg._setup_filetypes() else: _collect_classes(pkg) __collaborators[pkgname] = pkg except: a99.get_python_logger().exception( "Actually, package '{}' gave error".format(pkgname)) raise except: a99.get_python_logger().warning("Failed to import package '{}".format(pkgname))
[ "def", "__setup", "(", ")", ":", "global", "__collaborators", ",", "__flag_first", "import", "f311", "__flag_first", "=", "False", "for", "pkgname", "in", "f311", ".", "COLLABORATORS_C", ":", "try", ":", "pkg", "=", "importlib", ".", "import_module", "(", "pkgname", ")", "a99", ".", "get_python_logger", "(", ")", ".", "info", "(", "\"Imported collaborator package '{}'\"", ".", "format", "(", "pkgname", ")", ")", "try", ":", "if", "hasattr", "(", "pkg", ",", "\"_setup_filetypes\"", ")", ":", "pkg", ".", "_setup_filetypes", "(", ")", "else", ":", "_collect_classes", "(", "pkg", ")", "__collaborators", "[", "pkgname", "]", "=", "pkg", "except", ":", "a99", ".", "get_python_logger", "(", ")", ".", "exception", "(", "\"Actually, package '{}' gave error\"", ".", "format", "(", "pkgname", ")", ")", "raise", "except", ":", "a99", ".", "get_python_logger", "(", ")", ".", "warning", "(", "\"Failed to import package '{}\"", ".", "format", "(", "pkgname", ")", ")" ]
Will be executed in the first time someone calls classes_*()
[ "Will", "be", "executed", "in", "the", "first", "time", "someone", "calls", "classes_", "*", "()" ]
python
train
33.84
alorence/pysvg-py3
pysvg/builders.py
https://github.com/alorence/pysvg-py3/blob/ce217a4da3ada44a71d3e2f391d37c67d95c724e/pysvg/builders.py#L119-L134
def createPolyline(self, points, strokewidth=1, stroke='black'): """ Creates a Polyline @type points: string in the form "x1,y1 x2,y2 x3,y3" @param points: all points relevant to the polygon @type strokewidth: string or int @param strokewidth: width of the pen used to draw @type stroke: string (either css constants like "black" or numerical values like "#FFFFFF") @param stroke: color with which to draw the outer limits @return: a polyline object """ style_dict = {'fill':'none', 'stroke-width':strokewidth, 'stroke':stroke} myStyle = StyleBuilder(style_dict) p = Polyline(points=points) p.set_style(myStyle.getStyle()) return p
[ "def", "createPolyline", "(", "self", ",", "points", ",", "strokewidth", "=", "1", ",", "stroke", "=", "'black'", ")", ":", "style_dict", "=", "{", "'fill'", ":", "'none'", ",", "'stroke-width'", ":", "strokewidth", ",", "'stroke'", ":", "stroke", "}", "myStyle", "=", "StyleBuilder", "(", "style_dict", ")", "p", "=", "Polyline", "(", "points", "=", "points", ")", "p", ".", "set_style", "(", "myStyle", ".", "getStyle", "(", ")", ")", "return", "p" ]
Creates a Polyline @type points: string in the form "x1,y1 x2,y2 x3,y3" @param points: all points relevant to the polygon @type strokewidth: string or int @param strokewidth: width of the pen used to draw @type stroke: string (either css constants like "black" or numerical values like "#FFFFFF") @param stroke: color with which to draw the outer limits @return: a polyline object
[ "Creates", "a", "Polyline" ]
python
train
46.4375
ponty/confduino
confduino/progremove.py
https://github.com/ponty/confduino/blob/f4c261e5e84997f145a8bdd001f471db74c9054b/confduino/progremove.py#L9-L21
def remove_programmer(programmer_id): """remove programmer. :param programmer_id: programmer id (e.g. 'avrisp') :rtype: None """ log.debug('remove %s', programmer_id) lines = programmers_txt().lines() lines = filter( lambda x: not x.strip().startswith(programmer_id + '.'), lines) programmers_txt().write_lines(lines)
[ "def", "remove_programmer", "(", "programmer_id", ")", ":", "log", ".", "debug", "(", "'remove %s'", ",", "programmer_id", ")", "lines", "=", "programmers_txt", "(", ")", ".", "lines", "(", ")", "lines", "=", "filter", "(", "lambda", "x", ":", "not", "x", ".", "strip", "(", ")", ".", "startswith", "(", "programmer_id", "+", "'.'", ")", ",", "lines", ")", "programmers_txt", "(", ")", ".", "write_lines", "(", "lines", ")" ]
remove programmer. :param programmer_id: programmer id (e.g. 'avrisp') :rtype: None
[ "remove", "programmer", "." ]
python
train
26.769231
raphaelvallat/pingouin
pingouin/regression.py
https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/regression.py#L226-L411
def logistic_regression(X, y, coef_only=False, alpha=0.05, as_dataframe=True, remove_na=False, **kwargs): """(Multiple) Binary logistic regression. Parameters ---------- X : np.array or list Predictor(s). Shape = (n_samples, n_features) or (n_samples,). y : np.array or list Dependent variable. Shape = (n_samples). Must be binary. coef_only : bool If True, return only the regression coefficients. alpha : float Alpha value used for the confidence intervals. CI = [alpha / 2 ; 1 - alpha / 2] as_dataframe : bool If True, returns a pandas DataFrame. If False, returns a dictionnary. remove_na : bool If True, apply a listwise deletion of missing values (i.e. the entire row is removed). **kwargs : optional Optional arguments passed to sklearn.linear_model.LogisticRegression. Returns ------- stats : dataframe or dict Logistic regression summary:: 'names' : name of variable(s) in the model (e.g. x1, x2...) 'coef' : regression coefficients 'se' : standard error 'z' : z-scores 'pval' : two-tailed p-values 'CI[2.5%]' : lower confidence interval 'CI[97.5%]' : upper confidence interval Notes ----- This is a wrapper around the :py:class:`sklearn.linear_model.LogisticRegression` class. Results have been compared against statsmodels and JASP. Note that the first coefficient is always the constant term (intercept) of the model. This function will not run if NaN values are either present in the target or predictors variables. Please remove them before runing the function. Adapted from a code found at https://gist.github.com/rspeare/77061e6e317896be29c6de9a85db301d Examples -------- 1. Simple binary logistic regression >>> import numpy as np >>> from pingouin import logistic_regression >>> np.random.seed(123) >>> x = np.random.normal(size=30) >>> y = np.random.randint(0, 2, size=30) >>> lom = logistic_regression(x, y) >>> lom.round(2) names coef se z pval CI[2.5%] CI[97.5%] 0 Intercept -0.27 0.37 -0.73 0.46 -0.99 0.45 1 x1 0.06 0.32 0.19 0.85 -0.56 0.68 2. Multiple binary logistic regression >>> np.random.seed(42) >>> z = np.random.normal(size=30) >>> X = np.column_stack((x, z)) >>> lom = logistic_regression(X, y) >>> print(lom['coef'].values) [-0.34933805 -0.0226106 -0.39453532] 3. Using a Pandas DataFrame >>> import pandas as pd >>> df = pd.DataFrame({'x': x, 'y': y, 'z': z}) >>> lom = logistic_regression(df[['x', 'z']], df['y']) >>> print(lom['coef'].values) [-0.34933805 -0.0226106 -0.39453532] 4. Return only the coefficients >>> logistic_regression(X, y, coef_only=True) array([-0.34933805, -0.0226106 , -0.39453532]) 4. Passing custom parameters to sklearn >>> lom = logistic_regression(X, y, solver='sag', max_iter=10000) >>> print(lom['coef'].values) [-0.34941889 -0.02261911 -0.39451064] """ # Check that sklearn is installed from pingouin.utils import _is_sklearn_installed _is_sklearn_installed(raise_error=True) from sklearn.linear_model import LogisticRegression # Extract names if X is a Dataframe or Series if isinstance(X, pd.DataFrame): names = X.keys().tolist() elif isinstance(X, pd.Series): names = [X.name] else: names = [] assert 0 < alpha < 1 assert y.ndim == 1, 'y must be one-dimensional.' # Convert to numpy array X = np.asarray(X) y = np.asarray(y) # Add axis if only one-dimensional array if X.ndim == 1: X = X[..., np.newaxis] # Check for NaN / Inf if remove_na: X, y = rm_na(X, y[..., np.newaxis], paired=True, axis='rows') y = np.squeeze(y) y_gd = np.isfinite(y).all() X_gd = np.isfinite(X).all() assert y_gd, 'Target variable contains NaN or Inf. Please remove them.' assert X_gd, 'Predictors contains NaN or Inf. Please remove them.' # Check that X and y have same length assert y.shape[0] == X.shape[0], 'X and y must have same number of samples' # Check that y is binary if np.unique(y).size != 2: raise ValueError('Dependent variable must be binary.') if not names: names = ['x' + str(i + 1) for i in range(X.shape[1])] # Add intercept in names names.insert(0, "Intercept") # Initialize and fit if 'solver' not in kwargs: kwargs['solver'] = 'lbfgs' if 'multi_class' not in kwargs: kwargs['multi_class'] = 'auto' lom = LogisticRegression(**kwargs) lom.fit(X, y) coef = np.append(lom.intercept_, lom.coef_) if coef_only: return coef # Design matrix -- add intercept X_design = np.column_stack((np.ones(X.shape[0]), X)) n, p = X_design.shape # Fisher Information Matrix denom = (2 * (1 + np.cosh(lom.decision_function(X)))) denom = np.tile(denom, (p, 1)).T fim = np.dot((X_design / denom).T, X_design) crao = np.linalg.inv(fim) # Standard error and Z-scores se = np.sqrt(np.diag(crao)) z_scores = coef / se # Two-tailed p-values pval = np.array([2 * norm.sf(abs(z)) for z in z_scores]) # Confidence intervals crit = norm.ppf(1 - alpha / 2) ll = coef - crit * se ul = coef + crit * se # Rename CI ll_name = 'CI[%.1f%%]' % (100 * alpha / 2) ul_name = 'CI[%.1f%%]' % (100 * (1 - alpha / 2)) # Create dict stats = {'names': names, 'coef': coef, 'se': se, 'z': z_scores, 'pval': pval, ll_name: ll, ul_name: ul} if as_dataframe: return pd.DataFrame.from_dict(stats) else: return stats
[ "def", "logistic_regression", "(", "X", ",", "y", ",", "coef_only", "=", "False", ",", "alpha", "=", "0.05", ",", "as_dataframe", "=", "True", ",", "remove_na", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# Check that sklearn is installed", "from", "pingouin", ".", "utils", "import", "_is_sklearn_installed", "_is_sklearn_installed", "(", "raise_error", "=", "True", ")", "from", "sklearn", ".", "linear_model", "import", "LogisticRegression", "# Extract names if X is a Dataframe or Series", "if", "isinstance", "(", "X", ",", "pd", ".", "DataFrame", ")", ":", "names", "=", "X", ".", "keys", "(", ")", ".", "tolist", "(", ")", "elif", "isinstance", "(", "X", ",", "pd", ".", "Series", ")", ":", "names", "=", "[", "X", ".", "name", "]", "else", ":", "names", "=", "[", "]", "assert", "0", "<", "alpha", "<", "1", "assert", "y", ".", "ndim", "==", "1", ",", "'y must be one-dimensional.'", "# Convert to numpy array", "X", "=", "np", ".", "asarray", "(", "X", ")", "y", "=", "np", ".", "asarray", "(", "y", ")", "# Add axis if only one-dimensional array", "if", "X", ".", "ndim", "==", "1", ":", "X", "=", "X", "[", "...", ",", "np", ".", "newaxis", "]", "# Check for NaN / Inf", "if", "remove_na", ":", "X", ",", "y", "=", "rm_na", "(", "X", ",", "y", "[", "...", ",", "np", ".", "newaxis", "]", ",", "paired", "=", "True", ",", "axis", "=", "'rows'", ")", "y", "=", "np", ".", "squeeze", "(", "y", ")", "y_gd", "=", "np", ".", "isfinite", "(", "y", ")", ".", "all", "(", ")", "X_gd", "=", "np", ".", "isfinite", "(", "X", ")", ".", "all", "(", ")", "assert", "y_gd", ",", "'Target variable contains NaN or Inf. Please remove them.'", "assert", "X_gd", ",", "'Predictors contains NaN or Inf. Please remove them.'", "# Check that X and y have same length", "assert", "y", ".", "shape", "[", "0", "]", "==", "X", ".", "shape", "[", "0", "]", ",", "'X and y must have same number of samples'", "# Check that y is binary", "if", "np", ".", "unique", "(", "y", ")", ".", "size", "!=", "2", ":", "raise", "ValueError", "(", "'Dependent variable must be binary.'", ")", "if", "not", "names", ":", "names", "=", "[", "'x'", "+", "str", "(", "i", "+", "1", ")", "for", "i", "in", "range", "(", "X", ".", "shape", "[", "1", "]", ")", "]", "# Add intercept in names", "names", ".", "insert", "(", "0", ",", "\"Intercept\"", ")", "# Initialize and fit", "if", "'solver'", "not", "in", "kwargs", ":", "kwargs", "[", "'solver'", "]", "=", "'lbfgs'", "if", "'multi_class'", "not", "in", "kwargs", ":", "kwargs", "[", "'multi_class'", "]", "=", "'auto'", "lom", "=", "LogisticRegression", "(", "*", "*", "kwargs", ")", "lom", ".", "fit", "(", "X", ",", "y", ")", "coef", "=", "np", ".", "append", "(", "lom", ".", "intercept_", ",", "lom", ".", "coef_", ")", "if", "coef_only", ":", "return", "coef", "# Design matrix -- add intercept", "X_design", "=", "np", ".", "column_stack", "(", "(", "np", ".", "ones", "(", "X", ".", "shape", "[", "0", "]", ")", ",", "X", ")", ")", "n", ",", "p", "=", "X_design", ".", "shape", "# Fisher Information Matrix", "denom", "=", "(", "2", "*", "(", "1", "+", "np", ".", "cosh", "(", "lom", ".", "decision_function", "(", "X", ")", ")", ")", ")", "denom", "=", "np", ".", "tile", "(", "denom", ",", "(", "p", ",", "1", ")", ")", ".", "T", "fim", "=", "np", ".", "dot", "(", "(", "X_design", "/", "denom", ")", ".", "T", ",", "X_design", ")", "crao", "=", "np", ".", "linalg", ".", "inv", "(", "fim", ")", "# Standard error and Z-scores", "se", "=", "np", ".", "sqrt", "(", "np", ".", "diag", "(", "crao", ")", ")", "z_scores", "=", "coef", "/", "se", "# Two-tailed p-values", "pval", "=", "np", ".", "array", "(", "[", "2", "*", "norm", ".", "sf", "(", "abs", "(", "z", ")", ")", "for", "z", "in", "z_scores", "]", ")", "# Confidence intervals", "crit", "=", "norm", ".", "ppf", "(", "1", "-", "alpha", "/", "2", ")", "ll", "=", "coef", "-", "crit", "*", "se", "ul", "=", "coef", "+", "crit", "*", "se", "# Rename CI", "ll_name", "=", "'CI[%.1f%%]'", "%", "(", "100", "*", "alpha", "/", "2", ")", "ul_name", "=", "'CI[%.1f%%]'", "%", "(", "100", "*", "(", "1", "-", "alpha", "/", "2", ")", ")", "# Create dict", "stats", "=", "{", "'names'", ":", "names", ",", "'coef'", ":", "coef", ",", "'se'", ":", "se", ",", "'z'", ":", "z_scores", ",", "'pval'", ":", "pval", ",", "ll_name", ":", "ll", ",", "ul_name", ":", "ul", "}", "if", "as_dataframe", ":", "return", "pd", ".", "DataFrame", ".", "from_dict", "(", "stats", ")", "else", ":", "return", "stats" ]
(Multiple) Binary logistic regression. Parameters ---------- X : np.array or list Predictor(s). Shape = (n_samples, n_features) or (n_samples,). y : np.array or list Dependent variable. Shape = (n_samples). Must be binary. coef_only : bool If True, return only the regression coefficients. alpha : float Alpha value used for the confidence intervals. CI = [alpha / 2 ; 1 - alpha / 2] as_dataframe : bool If True, returns a pandas DataFrame. If False, returns a dictionnary. remove_na : bool If True, apply a listwise deletion of missing values (i.e. the entire row is removed). **kwargs : optional Optional arguments passed to sklearn.linear_model.LogisticRegression. Returns ------- stats : dataframe or dict Logistic regression summary:: 'names' : name of variable(s) in the model (e.g. x1, x2...) 'coef' : regression coefficients 'se' : standard error 'z' : z-scores 'pval' : two-tailed p-values 'CI[2.5%]' : lower confidence interval 'CI[97.5%]' : upper confidence interval Notes ----- This is a wrapper around the :py:class:`sklearn.linear_model.LogisticRegression` class. Results have been compared against statsmodels and JASP. Note that the first coefficient is always the constant term (intercept) of the model. This function will not run if NaN values are either present in the target or predictors variables. Please remove them before runing the function. Adapted from a code found at https://gist.github.com/rspeare/77061e6e317896be29c6de9a85db301d Examples -------- 1. Simple binary logistic regression >>> import numpy as np >>> from pingouin import logistic_regression >>> np.random.seed(123) >>> x = np.random.normal(size=30) >>> y = np.random.randint(0, 2, size=30) >>> lom = logistic_regression(x, y) >>> lom.round(2) names coef se z pval CI[2.5%] CI[97.5%] 0 Intercept -0.27 0.37 -0.73 0.46 -0.99 0.45 1 x1 0.06 0.32 0.19 0.85 -0.56 0.68 2. Multiple binary logistic regression >>> np.random.seed(42) >>> z = np.random.normal(size=30) >>> X = np.column_stack((x, z)) >>> lom = logistic_regression(X, y) >>> print(lom['coef'].values) [-0.34933805 -0.0226106 -0.39453532] 3. Using a Pandas DataFrame >>> import pandas as pd >>> df = pd.DataFrame({'x': x, 'y': y, 'z': z}) >>> lom = logistic_regression(df[['x', 'z']], df['y']) >>> print(lom['coef'].values) [-0.34933805 -0.0226106 -0.39453532] 4. Return only the coefficients >>> logistic_regression(X, y, coef_only=True) array([-0.34933805, -0.0226106 , -0.39453532]) 4. Passing custom parameters to sklearn >>> lom = logistic_regression(X, y, solver='sag', max_iter=10000) >>> print(lom['coef'].values) [-0.34941889 -0.02261911 -0.39451064]
[ "(", "Multiple", ")", "Binary", "logistic", "regression", "." ]
python
train
30.596774
jantman/webhook2lambda2sqs
webhook2lambda2sqs/tf_generator.py
https://github.com/jantman/webhook2lambda2sqs/blob/c80c18d5a908ba8b8ee624dc3a977c633fba2b7c/webhook2lambda2sqs/tf_generator.py#L174-L195
def _generate_iam_invoke_role_policy(self): """ Generate the policy for the IAM role used by API Gateway to invoke the lambda function. Terraform name: aws_iam_role.invoke_role """ invoke_pol = { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Resource": ["*"], "Action": ["lambda:InvokeFunction"] } ] } self.tf_conf['resource']['aws_iam_role_policy']['invoke_policy'] = { 'name': self.resource_name + '-invoke', 'role': '${aws_iam_role.invoke_role.id}', 'policy': json.dumps(invoke_pol) }
[ "def", "_generate_iam_invoke_role_policy", "(", "self", ")", ":", "invoke_pol", "=", "{", "\"Version\"", ":", "\"2012-10-17\"", ",", "\"Statement\"", ":", "[", "{", "\"Effect\"", ":", "\"Allow\"", ",", "\"Resource\"", ":", "[", "\"*\"", "]", ",", "\"Action\"", ":", "[", "\"lambda:InvokeFunction\"", "]", "}", "]", "}", "self", ".", "tf_conf", "[", "'resource'", "]", "[", "'aws_iam_role_policy'", "]", "[", "'invoke_policy'", "]", "=", "{", "'name'", ":", "self", ".", "resource_name", "+", "'-invoke'", ",", "'role'", ":", "'${aws_iam_role.invoke_role.id}'", ",", "'policy'", ":", "json", ".", "dumps", "(", "invoke_pol", ")", "}" ]
Generate the policy for the IAM role used by API Gateway to invoke the lambda function. Terraform name: aws_iam_role.invoke_role
[ "Generate", "the", "policy", "for", "the", "IAM", "role", "used", "by", "API", "Gateway", "to", "invoke", "the", "lambda", "function", "." ]
python
train
32.681818
gamechanger/dusty
dusty/systems/hosts/__init__.py
https://github.com/gamechanger/dusty/blob/dc12de90bb6945023d6f43a8071e984313a1d984/dusty/systems/hosts/__init__.py#L7-L12
def _dusty_hosts_config(hosts_specs): """Return a string of all host rules required to match the given spec. This string is wrapped in the Dusty hosts header and footer so it can be easily removed later.""" rules = ''.join(['{} {}\n'.format(spec['forwarded_ip'], spec['host_address']) for spec in hosts_specs]) return config_file.create_config_section(rules)
[ "def", "_dusty_hosts_config", "(", "hosts_specs", ")", ":", "rules", "=", "''", ".", "join", "(", "[", "'{} {}\\n'", ".", "format", "(", "spec", "[", "'forwarded_ip'", "]", ",", "spec", "[", "'host_address'", "]", ")", "for", "spec", "in", "hosts_specs", "]", ")", "return", "config_file", ".", "create_config_section", "(", "rules", ")" ]
Return a string of all host rules required to match the given spec. This string is wrapped in the Dusty hosts header and footer so it can be easily removed later.
[ "Return", "a", "string", "of", "all", "host", "rules", "required", "to", "match", "the", "given", "spec", ".", "This", "string", "is", "wrapped", "in", "the", "Dusty", "hosts", "header", "and", "footer", "so", "it", "can", "be", "easily", "removed", "later", "." ]
python
valid
62.333333
beregond/jsonmodels
jsonmodels/validators.py
https://github.com/beregond/jsonmodels/blob/97a1a6b90a49490fc5a6078f49027055d2e13541/jsonmodels/validators.py#L73-L77
def modify_schema(self, field_schema): """Modify field schema.""" field_schema['maximum'] = self.maximum_value if self.exclusive: field_schema['exclusiveMaximum'] = True
[ "def", "modify_schema", "(", "self", ",", "field_schema", ")", ":", "field_schema", "[", "'maximum'", "]", "=", "self", ".", "maximum_value", "if", "self", ".", "exclusive", ":", "field_schema", "[", "'exclusiveMaximum'", "]", "=", "True" ]
Modify field schema.
[ "Modify", "field", "schema", "." ]
python
train
40.2
batiste/django-page-cms
pages/utils.py
https://github.com/batiste/django-page-cms/blob/3c72111eb7c3997a63c462c1776ffd8ce8c50a5d/pages/utils.py#L116-L136
def normalize_url(url): """Return a normalized url with trailing and without leading slash. >>> normalize_url(None) '/' >>> normalize_url('/') '/' >>> normalize_url('/foo/bar') '/foo/bar' >>> normalize_url('foo/bar') '/foo/bar' >>> normalize_url('/foo/bar/') '/foo/bar' """ if not url or len(url) == 0: return '/' if not url.startswith('/'): url = '/' + url if len(url) > 1 and url.endswith('/'): url = url[0:len(url) - 1] return url
[ "def", "normalize_url", "(", "url", ")", ":", "if", "not", "url", "or", "len", "(", "url", ")", "==", "0", ":", "return", "'/'", "if", "not", "url", ".", "startswith", "(", "'/'", ")", ":", "url", "=", "'/'", "+", "url", "if", "len", "(", "url", ")", ">", "1", "and", "url", ".", "endswith", "(", "'/'", ")", ":", "url", "=", "url", "[", "0", ":", "len", "(", "url", ")", "-", "1", "]", "return", "url" ]
Return a normalized url with trailing and without leading slash. >>> normalize_url(None) '/' >>> normalize_url('/') '/' >>> normalize_url('/foo/bar') '/foo/bar' >>> normalize_url('foo/bar') '/foo/bar' >>> normalize_url('/foo/bar/') '/foo/bar'
[ "Return", "a", "normalized", "url", "with", "trailing", "and", "without", "leading", "slash", "." ]
python
train
24.380952
tonyfischetti/sake
sakelib/acts.py
https://github.com/tonyfischetti/sake/blob/b7ad20fe8e7137db99a20ac06b8da26492601b00/sakelib/acts.py#L626-L675
def visualize(G, settings, filename="dependencies", no_graphviz=False): """ Uses networkX to draw a graphviz dot file either (a) calls the graphviz command "dot" to turn it into a SVG and remove the dotfile (default), or (b) if no_graphviz is True, just output the graphviz dot file Args: a NetworkX DiGraph the settings dictionary a filename (a default is provided a flag indicating whether graphviz should *not* be called Returns: 0 if everything worked will cause fatal error on failure """ error = settings["error"] if no_graphviz: write_dot_file(G, filename) return 0 write_dot_file(G, "tempdot") renderer = "svg" if re.search("\.jpg$", filename, re.IGNORECASE): renderer = "jpg" elif re.search("\.jpeg$", filename, re.IGNORECASE): renderer = "jpg" elif re.search("\.svg$", filename, re.IGNORECASE): renderer = "svg" elif re.search("\.png$", filename, re.IGNORECASE): renderer = "png" elif re.search("\.gif$", filename, re.IGNORECASE): renderer = "gif" elif re.search("\.ps$", filename, re.IGNORECASE): renderer = "ps" elif re.search("\.pdf$", filename, re.IGNORECASE): renderer = "pdf" else: renderer = "svg" filename += ".svg" command = "dot -T{} tempdot -o {}".format(renderer, filename) p = Popen(command, shell=True) p.communicate() if p.returncode: errmes = "Either graphviz is not installed, or its not on PATH" os.remove("tempdot") error(errmes) sys.exit(1) os.remove("tempdot") return 0
[ "def", "visualize", "(", "G", ",", "settings", ",", "filename", "=", "\"dependencies\"", ",", "no_graphviz", "=", "False", ")", ":", "error", "=", "settings", "[", "\"error\"", "]", "if", "no_graphviz", ":", "write_dot_file", "(", "G", ",", "filename", ")", "return", "0", "write_dot_file", "(", "G", ",", "\"tempdot\"", ")", "renderer", "=", "\"svg\"", "if", "re", ".", "search", "(", "\"\\.jpg$\"", ",", "filename", ",", "re", ".", "IGNORECASE", ")", ":", "renderer", "=", "\"jpg\"", "elif", "re", ".", "search", "(", "\"\\.jpeg$\"", ",", "filename", ",", "re", ".", "IGNORECASE", ")", ":", "renderer", "=", "\"jpg\"", "elif", "re", ".", "search", "(", "\"\\.svg$\"", ",", "filename", ",", "re", ".", "IGNORECASE", ")", ":", "renderer", "=", "\"svg\"", "elif", "re", ".", "search", "(", "\"\\.png$\"", ",", "filename", ",", "re", ".", "IGNORECASE", ")", ":", "renderer", "=", "\"png\"", "elif", "re", ".", "search", "(", "\"\\.gif$\"", ",", "filename", ",", "re", ".", "IGNORECASE", ")", ":", "renderer", "=", "\"gif\"", "elif", "re", ".", "search", "(", "\"\\.ps$\"", ",", "filename", ",", "re", ".", "IGNORECASE", ")", ":", "renderer", "=", "\"ps\"", "elif", "re", ".", "search", "(", "\"\\.pdf$\"", ",", "filename", ",", "re", ".", "IGNORECASE", ")", ":", "renderer", "=", "\"pdf\"", "else", ":", "renderer", "=", "\"svg\"", "filename", "+=", "\".svg\"", "command", "=", "\"dot -T{} tempdot -o {}\"", ".", "format", "(", "renderer", ",", "filename", ")", "p", "=", "Popen", "(", "command", ",", "shell", "=", "True", ")", "p", ".", "communicate", "(", ")", "if", "p", ".", "returncode", ":", "errmes", "=", "\"Either graphviz is not installed, or its not on PATH\"", "os", ".", "remove", "(", "\"tempdot\"", ")", "error", "(", "errmes", ")", "sys", ".", "exit", "(", "1", ")", "os", ".", "remove", "(", "\"tempdot\"", ")", "return", "0" ]
Uses networkX to draw a graphviz dot file either (a) calls the graphviz command "dot" to turn it into a SVG and remove the dotfile (default), or (b) if no_graphviz is True, just output the graphviz dot file Args: a NetworkX DiGraph the settings dictionary a filename (a default is provided a flag indicating whether graphviz should *not* be called Returns: 0 if everything worked will cause fatal error on failure
[ "Uses", "networkX", "to", "draw", "a", "graphviz", "dot", "file", "either", "(", "a", ")", "calls", "the", "graphviz", "command", "dot", "to", "turn", "it", "into", "a", "SVG", "and", "remove", "the", "dotfile", "(", "default", ")", "or", "(", "b", ")", "if", "no_graphviz", "is", "True", "just", "output", "the", "graphviz", "dot", "file" ]
python
valid
32.5
datacamp/sqlwhat
sqlwhat/checks/check_funcs.py
https://github.com/datacamp/sqlwhat/blob/9ae798c63124f994607a0e2c120b24ebbb2bdbe9/sqlwhat/checks/check_funcs.py#L127-L189
def check_row(state, index, missing_msg=None, expand_msg=None): """Zoom in on a particular row in the query result, by index. After zooming in on a row, which is represented as a single-row query result, you can use ``has_equal_value()`` to verify whether all columns in the zoomed in solution query result have a match in the student query result. Args: index: index of the row to zoom in on (zero-based indexed). missing_msg: if specified, this overrides the automatically generated feedback message in case the row is missing in the student query result. expand_msg: if specified, this overrides the automatically generated feedback message that is prepended to feedback messages that are thrown further in the SCT chain. :Example: Suppose we are testing the following SELECT statements * solution: ``SELECT artist_id as id, name FROM artists LIMIT 5`` * student : ``SELECT artist_id, name FROM artists LIMIT 2`` We can write the following SCTs: :: # fails, since row 3 at index 2 is not in the student result Ex().check_row(2) # passes, since row 2 at index 1 is in the student result Ex().check_row(0) """ if missing_msg is None: missing_msg = "The system wants to verify row {{index + 1}} of your query result, but couldn't find it. Have another look." if expand_msg is None: expand_msg = "Have another look at row {{index + 1}} in your query result. " msg_kwargs = {"index": index} # check that query returned something has_result(state) stu_res = state.student_result sol_res = state.solution_result n_sol = len(next(iter(sol_res.values()))) n_stu = len(next(iter(stu_res.values()))) if index >= n_sol: raise BaseException( "There are only {} rows in the solution query result, and you're trying to fetch the row at index {}".format( n_sol, index ) ) if index >= n_stu: _msg = state.build_message(missing_msg, fmt_kwargs=msg_kwargs) state.do_test(_msg) return state.to_child( append_message={"msg": expand_msg, "kwargs": msg_kwargs}, student_result={k: [v[index]] for k, v in stu_res.items()}, solution_result={k: [v[index]] for k, v in sol_res.items()}, )
[ "def", "check_row", "(", "state", ",", "index", ",", "missing_msg", "=", "None", ",", "expand_msg", "=", "None", ")", ":", "if", "missing_msg", "is", "None", ":", "missing_msg", "=", "\"The system wants to verify row {{index + 1}} of your query result, but couldn't find it. Have another look.\"", "if", "expand_msg", "is", "None", ":", "expand_msg", "=", "\"Have another look at row {{index + 1}} in your query result. \"", "msg_kwargs", "=", "{", "\"index\"", ":", "index", "}", "# check that query returned something", "has_result", "(", "state", ")", "stu_res", "=", "state", ".", "student_result", "sol_res", "=", "state", ".", "solution_result", "n_sol", "=", "len", "(", "next", "(", "iter", "(", "sol_res", ".", "values", "(", ")", ")", ")", ")", "n_stu", "=", "len", "(", "next", "(", "iter", "(", "stu_res", ".", "values", "(", ")", ")", ")", ")", "if", "index", ">=", "n_sol", ":", "raise", "BaseException", "(", "\"There are only {} rows in the solution query result, and you're trying to fetch the row at index {}\"", ".", "format", "(", "n_sol", ",", "index", ")", ")", "if", "index", ">=", "n_stu", ":", "_msg", "=", "state", ".", "build_message", "(", "missing_msg", ",", "fmt_kwargs", "=", "msg_kwargs", ")", "state", ".", "do_test", "(", "_msg", ")", "return", "state", ".", "to_child", "(", "append_message", "=", "{", "\"msg\"", ":", "expand_msg", ",", "\"kwargs\"", ":", "msg_kwargs", "}", ",", "student_result", "=", "{", "k", ":", "[", "v", "[", "index", "]", "]", "for", "k", ",", "v", "in", "stu_res", ".", "items", "(", ")", "}", ",", "solution_result", "=", "{", "k", ":", "[", "v", "[", "index", "]", "]", "for", "k", ",", "v", "in", "sol_res", ".", "items", "(", ")", "}", ",", ")" ]
Zoom in on a particular row in the query result, by index. After zooming in on a row, which is represented as a single-row query result, you can use ``has_equal_value()`` to verify whether all columns in the zoomed in solution query result have a match in the student query result. Args: index: index of the row to zoom in on (zero-based indexed). missing_msg: if specified, this overrides the automatically generated feedback message in case the row is missing in the student query result. expand_msg: if specified, this overrides the automatically generated feedback message that is prepended to feedback messages that are thrown further in the SCT chain. :Example: Suppose we are testing the following SELECT statements * solution: ``SELECT artist_id as id, name FROM artists LIMIT 5`` * student : ``SELECT artist_id, name FROM artists LIMIT 2`` We can write the following SCTs: :: # fails, since row 3 at index 2 is not in the student result Ex().check_row(2) # passes, since row 2 at index 1 is in the student result Ex().check_row(0)
[ "Zoom", "in", "on", "a", "particular", "row", "in", "the", "query", "result", "by", "index", "." ]
python
train
37.809524
edx/opaque-keys
opaque_keys/edx/locations.py
https://github.com/edx/opaque-keys/blob/9807168660c12e0551c8fdd58fd1bc6b0bcb0a54/opaque_keys/edx/locations.py#L61-L75
def replace(self, **kwargs): """ Return: a new :class:`SlashSeparatedCourseKey` with specific ``kwargs`` replacing their corresponding values. Using CourseLocator's replace function results in a mismatch of __init__ args and kwargs. Replace tries to instantiate a SlashSeparatedCourseKey object with CourseLocator args and kwargs. """ # Deprecation value is hard coded as True in __init__ and therefore does not need to be passed through. return SlashSeparatedCourseKey( kwargs.pop('org', self.org), kwargs.pop('course', self.course), kwargs.pop('run', self.run), **kwargs )
[ "def", "replace", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# Deprecation value is hard coded as True in __init__ and therefore does not need to be passed through.", "return", "SlashSeparatedCourseKey", "(", "kwargs", ".", "pop", "(", "'org'", ",", "self", ".", "org", ")", ",", "kwargs", ".", "pop", "(", "'course'", ",", "self", ".", "course", ")", ",", "kwargs", ".", "pop", "(", "'run'", ",", "self", ".", "run", ")", ",", "*", "*", "kwargs", ")" ]
Return: a new :class:`SlashSeparatedCourseKey` with specific ``kwargs`` replacing their corresponding values. Using CourseLocator's replace function results in a mismatch of __init__ args and kwargs. Replace tries to instantiate a SlashSeparatedCourseKey object with CourseLocator args and kwargs.
[ "Return", ":", "a", "new", ":", "class", ":", "SlashSeparatedCourseKey", "with", "specific", "kwargs", "replacing", "their", "corresponding", "values", "." ]
python
train
45.933333
planetarypy/planetaryimage
planetaryimage/pds3image.py
https://github.com/planetarypy/planetaryimage/blob/ee9aef4746ff7a003b1457565acb13f5f1db0375/planetaryimage/pds3image.py#L336-L341
def dtype(self): """Pixel data type.""" try: return self.data.dtype except AttributeError: return numpy.dtype('%s%d' % (self._sample_type, self._sample_bytes))
[ "def", "dtype", "(", "self", ")", ":", "try", ":", "return", "self", ".", "data", ".", "dtype", "except", "AttributeError", ":", "return", "numpy", ".", "dtype", "(", "'%s%d'", "%", "(", "self", ".", "_sample_type", ",", "self", ".", "_sample_bytes", ")", ")" ]
Pixel data type.
[ "Pixel", "data", "type", "." ]
python
train
33.666667
ze-phyr-us/django-libretto
django_libretto/url.py
https://github.com/ze-phyr-us/django-libretto/blob/b19d8aa21b9579ee91e81967a44d1c40f5588b17/django_libretto/url.py#L6-L31
def reverse(view, *args, **kwargs): ''' User-friendly reverse. Pass arguments and keyword arguments to Django's `reverse` as `args` and `kwargs` arguments, respectively. The special optional keyword argument `query` is a dictionary of query (or GET) parameters that can be appended to the `reverse`d URL. Example: reverse('products:category', categoryId = 5, query = {'page': 2}) is equivalent to django.core.urlresolvers.reverse('products:category', kwargs = {'categoryId': 5}) + '?page=2' ''' if 'query' in kwargs: query = kwargs.pop('query') else: query = None base = urlresolvers.reverse(view, args = args, kwargs = kwargs) if query: return '{}?{}'.format(base, django.utils.http.urlencode(query)) else: return base
[ "def", "reverse", "(", "view", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "'query'", "in", "kwargs", ":", "query", "=", "kwargs", ".", "pop", "(", "'query'", ")", "else", ":", "query", "=", "None", "base", "=", "urlresolvers", ".", "reverse", "(", "view", ",", "args", "=", "args", ",", "kwargs", "=", "kwargs", ")", "if", "query", ":", "return", "'{}?{}'", ".", "format", "(", "base", ",", "django", ".", "utils", ".", "http", ".", "urlencode", "(", "query", ")", ")", "else", ":", "return", "base" ]
User-friendly reverse. Pass arguments and keyword arguments to Django's `reverse` as `args` and `kwargs` arguments, respectively. The special optional keyword argument `query` is a dictionary of query (or GET) parameters that can be appended to the `reverse`d URL. Example: reverse('products:category', categoryId = 5, query = {'page': 2}) is equivalent to django.core.urlresolvers.reverse('products:category', kwargs = {'categoryId': 5}) + '?page=2'
[ "User", "-", "friendly", "reverse", ".", "Pass", "arguments", "and", "keyword", "arguments", "to", "Django", "s", "reverse", "as", "args", "and", "kwargs", "arguments", "respectively", "." ]
python
test
27.884615
openstack/hacking
hacking/checks/except_checks.py
https://github.com/openstack/hacking/blob/10e58f907181cac91d3b2af422c2458b04a1ec79/hacking/checks/except_checks.py#L169-L193
def hacking_assert_equal(logical_line, noqa): r"""Check that self.assertEqual and self.assertNotEqual are used. Okay: self.assertEqual(x, y) Okay: self.assertNotEqual(x, y) H204: self.assertTrue(x == y) H204: self.assertTrue(x != y) H204: self.assertFalse(x == y) H204: self.assertFalse(x != y) """ if noqa: return methods = ['assertTrue', 'assertFalse'] for method in methods: start = logical_line.find('.%s' % method) + 1 if start != 0: break else: return comparisons = [ast.Eq, ast.NotEq] checker = AssertTrueFalseChecker(methods, comparisons) checker.visit(ast.parse(logical_line)) if checker.error: yield start, 'H204: Use assert(Not)Equal()'
[ "def", "hacking_assert_equal", "(", "logical_line", ",", "noqa", ")", ":", "if", "noqa", ":", "return", "methods", "=", "[", "'assertTrue'", ",", "'assertFalse'", "]", "for", "method", "in", "methods", ":", "start", "=", "logical_line", ".", "find", "(", "'.%s'", "%", "method", ")", "+", "1", "if", "start", "!=", "0", ":", "break", "else", ":", "return", "comparisons", "=", "[", "ast", ".", "Eq", ",", "ast", ".", "NotEq", "]", "checker", "=", "AssertTrueFalseChecker", "(", "methods", ",", "comparisons", ")", "checker", ".", "visit", "(", "ast", ".", "parse", "(", "logical_line", ")", ")", "if", "checker", ".", "error", ":", "yield", "start", ",", "'H204: Use assert(Not)Equal()'" ]
r"""Check that self.assertEqual and self.assertNotEqual are used. Okay: self.assertEqual(x, y) Okay: self.assertNotEqual(x, y) H204: self.assertTrue(x == y) H204: self.assertTrue(x != y) H204: self.assertFalse(x == y) H204: self.assertFalse(x != y)
[ "r", "Check", "that", "self", ".", "assertEqual", "and", "self", ".", "assertNotEqual", "are", "used", "." ]
python
train
29.64
Iotic-Labs/py-IoticAgent
src/IoticAgent/IOT/Thing.py
https://github.com/Iotic-Labs/py-IoticAgent/blob/893e8582ad1dacfe32dfc0ee89452bbd6f57d28d/src/IoticAgent/IOT/Thing.py#L432-L445
def delete_feed(self, pid): """Delete a feed, identified by its local id. Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException) containing the error if the infrastructure detects a problem Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException) if there is a communications problem between you and the infrastructure `pid` (required) (string) local identifier of your feed you want to delete """ logger.info("delete_feed(pid=\"%s\") [lid=%s]", pid, self.__lid) return self.__delete_point(R_FEED, pid)
[ "def", "delete_feed", "(", "self", ",", "pid", ")", ":", "logger", ".", "info", "(", "\"delete_feed(pid=\\\"%s\\\") [lid=%s]\"", ",", "pid", ",", "self", ".", "__lid", ")", "return", "self", ".", "__delete_point", "(", "R_FEED", ",", "pid", ")" ]
Delete a feed, identified by its local id. Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException) containing the error if the infrastructure detects a problem Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException) if there is a communications problem between you and the infrastructure `pid` (required) (string) local identifier of your feed you want to delete
[ "Delete", "a", "feed", "identified", "by", "its", "local", "id", "." ]
python
train
44.428571
getsentry/raven-python
raven/transport/eventlet.py
https://github.com/getsentry/raven-python/blob/d891c20f0f930153f508e9d698d9de42e910face/raven/transport/eventlet.py#L47-L51
def send(self, url, data, headers): """ Spawn an async request to a remote webserver. """ eventlet.spawn(self._send_payload, (url, data, headers))
[ "def", "send", "(", "self", ",", "url", ",", "data", ",", "headers", ")", ":", "eventlet", ".", "spawn", "(", "self", ".", "_send_payload", ",", "(", "url", ",", "data", ",", "headers", ")", ")" ]
Spawn an async request to a remote webserver.
[ "Spawn", "an", "async", "request", "to", "a", "remote", "webserver", "." ]
python
train
34.8
KE-works/pykechain
pykechain/client.py
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/client.py#L843-L868
def team(self, name=None, id=None, is_hidden=False, **kwargs): """ Team of KE-chain. Provides a team of :class:`Team` of KE-chain. You can filter on team name or provide id. :param name: (optional) team name to filter :type name: basestring or None :param id: (optional) id of the user to filter :type id: basestring or None :param is_hidden: (optional) boolean to show non-hidden or hidden teams or both (None) (default is non-hidden) :type is_hidden: bool or None :param kwargs: Additional filtering keyword=value arguments :type kwargs: dict or None :return: List of :class:`Team` :raises NotFoundError: when a user could not be found :raises MultipleFoundError: when more than a single user can be found """ _teams = self.teams(name=name, id=id, **kwargs) if len(_teams) == 0: raise NotFoundError("No team criteria matches") if len(_teams) != 1: raise MultipleFoundError("Multiple teams fit criteria") return _teams[0]
[ "def", "team", "(", "self", ",", "name", "=", "None", ",", "id", "=", "None", ",", "is_hidden", "=", "False", ",", "*", "*", "kwargs", ")", ":", "_teams", "=", "self", ".", "teams", "(", "name", "=", "name", ",", "id", "=", "id", ",", "*", "*", "kwargs", ")", "if", "len", "(", "_teams", ")", "==", "0", ":", "raise", "NotFoundError", "(", "\"No team criteria matches\"", ")", "if", "len", "(", "_teams", ")", "!=", "1", ":", "raise", "MultipleFoundError", "(", "\"Multiple teams fit criteria\"", ")", "return", "_teams", "[", "0", "]" ]
Team of KE-chain. Provides a team of :class:`Team` of KE-chain. You can filter on team name or provide id. :param name: (optional) team name to filter :type name: basestring or None :param id: (optional) id of the user to filter :type id: basestring or None :param is_hidden: (optional) boolean to show non-hidden or hidden teams or both (None) (default is non-hidden) :type is_hidden: bool or None :param kwargs: Additional filtering keyword=value arguments :type kwargs: dict or None :return: List of :class:`Team` :raises NotFoundError: when a user could not be found :raises MultipleFoundError: when more than a single user can be found
[ "Team", "of", "KE", "-", "chain", "." ]
python
train
41.423077
hubo1016/namedstruct
namedstruct/namedstruct.py
https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L214-L225
def _extend(self, newsub): ''' Append a subclass (extension) after the base class. For parser internal use. ''' current = self while hasattr(current, '_sub'): current = current._sub _set(current, '_sub', newsub) try: object.__delattr__(self, '_extra') except: pass
[ "def", "_extend", "(", "self", ",", "newsub", ")", ":", "current", "=", "self", "while", "hasattr", "(", "current", ",", "'_sub'", ")", ":", "current", "=", "current", ".", "_sub", "_set", "(", "current", ",", "'_sub'", ",", "newsub", ")", "try", ":", "object", ".", "__delattr__", "(", "self", ",", "'_extra'", ")", "except", ":", "pass" ]
Append a subclass (extension) after the base class. For parser internal use.
[ "Append", "a", "subclass", "(", "extension", ")", "after", "the", "base", "class", ".", "For", "parser", "internal", "use", "." ]
python
train
29.416667
noxdafox/clipspy
clips/classes.py
https://github.com/noxdafox/clipspy/blob/b22d71a6da821c1715d8fa00d7d75cabc09ed364/clips/classes.py#L332-L334
def subclass(self, klass): """True if the Class is a subclass of the given one.""" return bool(lib.EnvSubclassP(self._env, self._cls, klass._cls))
[ "def", "subclass", "(", "self", ",", "klass", ")", ":", "return", "bool", "(", "lib", ".", "EnvSubclassP", "(", "self", ".", "_env", ",", "self", ".", "_cls", ",", "klass", ".", "_cls", ")", ")" ]
True if the Class is a subclass of the given one.
[ "True", "if", "the", "Class", "is", "a", "subclass", "of", "the", "given", "one", "." ]
python
train
53.333333
SoCo/SoCo
soco/cache.py
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/cache.py#L148-L170
def put(self, item, *args, **kwargs): """Put an item into the cache, for this combination of args and kwargs. Args: *args: any arguments. **kwargs: any keyword arguments. If ``timeout`` is specified as one of the keyword arguments, the item will remain available for retrieval for ``timeout`` seconds. If ``timeout`` is `None` or not specified, the ``default_timeout`` for this cache will be used. Specify a ``timeout`` of 0 (or ensure that the ``default_timeout`` for this cache is 0) if this item is not to be cached. """ if not self.enabled: return # Check for a timeout keyword, store and remove it. timeout = kwargs.pop('timeout', None) if timeout is None: timeout = self.default_timeout cache_key = self.make_key(args, kwargs) # Store the item, along with the time at which it will expire with self._cache_lock: self._cache[cache_key] = (time() + timeout, item)
[ "def", "put", "(", "self", ",", "item", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "enabled", ":", "return", "# Check for a timeout keyword, store and remove it.", "timeout", "=", "kwargs", ".", "pop", "(", "'timeout'", ",", "None", ")", "if", "timeout", "is", "None", ":", "timeout", "=", "self", ".", "default_timeout", "cache_key", "=", "self", ".", "make_key", "(", "args", ",", "kwargs", ")", "# Store the item, along with the time at which it will expire", "with", "self", ".", "_cache_lock", ":", "self", ".", "_cache", "[", "cache_key", "]", "=", "(", "time", "(", ")", "+", "timeout", ",", "item", ")" ]
Put an item into the cache, for this combination of args and kwargs. Args: *args: any arguments. **kwargs: any keyword arguments. If ``timeout`` is specified as one of the keyword arguments, the item will remain available for retrieval for ``timeout`` seconds. If ``timeout`` is `None` or not specified, the ``default_timeout`` for this cache will be used. Specify a ``timeout`` of 0 (or ensure that the ``default_timeout`` for this cache is 0) if this item is not to be cached.
[ "Put", "an", "item", "into", "the", "cache", "for", "this", "combination", "of", "args", "and", "kwargs", "." ]
python
train
47.304348
edx/edx-celeryutils
celery_utils/management/commands/cleanup_resolved_tasks.py
https://github.com/edx/edx-celeryutils/blob/d8745f5f0929ad154fad779a19fbefe7f51e9498/celery_utils/management/commands/cleanup_resolved_tasks.py#L25-L48
def add_arguments(self, parser): """ Add arguments to the command parser. Uses argparse syntax. See documentation at https://docs.python.org/3/library/argparse.html. """ parser.add_argument( '--dry-run', action='store_true', default=False, help="Output what we're going to do, but don't actually do it." ) parser.add_argument( '--task-name', '-t', default=None, help=u"Restrict cleanup to tasks matching the named task.", ) parser.add_argument( '--age', '-a', type=int, default=30, help=u"Only delete tasks that have been resolved for at least the specified number of days (default: 30)", )
[ "def", "add_arguments", "(", "self", ",", "parser", ")", ":", "parser", ".", "add_argument", "(", "'--dry-run'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "\"Output what we're going to do, but don't actually do it.\"", ")", "parser", ".", "add_argument", "(", "'--task-name'", ",", "'-t'", ",", "default", "=", "None", ",", "help", "=", "u\"Restrict cleanup to tasks matching the named task.\"", ",", ")", "parser", ".", "add_argument", "(", "'--age'", ",", "'-a'", ",", "type", "=", "int", ",", "default", "=", "30", ",", "help", "=", "u\"Only delete tasks that have been resolved for at least the specified number of days (default: 30)\"", ",", ")" ]
Add arguments to the command parser. Uses argparse syntax. See documentation at https://docs.python.org/3/library/argparse.html.
[ "Add", "arguments", "to", "the", "command", "parser", "." ]
python
train
32.875
polyaxon/polyaxon
polyaxon/action_manager/utils/email.py
https://github.com/polyaxon/polyaxon/blob/e1724f0756b1a42f9e7aa08a976584a84ef7f016/polyaxon/action_manager/utils/email.py#L9-L20
def render_mail_template(subject_template, body_template, context): """ Renders both the subject and body templates in the given context. Returns a tuple (subject, body) of the result. """ try: subject = strip_spaces(render_to_string(subject_template, context)) body = render_to_string(body_template, context) finally: pass return subject, body
[ "def", "render_mail_template", "(", "subject_template", ",", "body_template", ",", "context", ")", ":", "try", ":", "subject", "=", "strip_spaces", "(", "render_to_string", "(", "subject_template", ",", "context", ")", ")", "body", "=", "render_to_string", "(", "body_template", ",", "context", ")", "finally", ":", "pass", "return", "subject", ",", "body" ]
Renders both the subject and body templates in the given context. Returns a tuple (subject, body) of the result.
[ "Renders", "both", "the", "subject", "and", "body", "templates", "in", "the", "given", "context", ".", "Returns", "a", "tuple", "(", "subject", "body", ")", "of", "the", "result", "." ]
python
train
32.166667
FactoryBoy/factory_boy
factory/base.py
https://github.com/FactoryBoy/factory_boy/blob/edaa7c7f5a14065b229927903bd7989cc93cd069/factory/base.py#L753-L761
def use_strategy(new_strategy): """Force the use of a different strategy. This is an alternative to setting default_strategy in the class definition. """ def wrapped_class(klass): klass._meta.strategy = new_strategy return klass return wrapped_class
[ "def", "use_strategy", "(", "new_strategy", ")", ":", "def", "wrapped_class", "(", "klass", ")", ":", "klass", ".", "_meta", ".", "strategy", "=", "new_strategy", "return", "klass", "return", "wrapped_class" ]
Force the use of a different strategy. This is an alternative to setting default_strategy in the class definition.
[ "Force", "the", "use", "of", "a", "different", "strategy", "." ]
python
train
30.888889
yoavaviram/python-amazon-simple-product-api
amazon/api.py
https://github.com/yoavaviram/python-amazon-simple-product-api/blob/f1cb0e209145fcfac9444e4c733dd19deb59d31a/amazon/api.py#L290-L319
def cart_create(self, items, **kwargs): """CartCreate. :param items: A dictionary containing the items to be added to the cart. Or a list containing these dictionaries. It is not possible to create an empty cart! example: [{'offer_id': 'rt2ofih3f389nwiuhf8934z87o3f4h', 'quantity': 1}] :return: An :class:`~.AmazonCart`. """ if isinstance(items, dict): items = [items] if len(items) > 10: raise CartException("You can't add more than 10 items at once") offer_id_key_template = 'Item.{0}.OfferListingId' quantity_key_template = 'Item.{0}.Quantity' for i, item in enumerate(items): kwargs[offer_id_key_template.format(i)] = item['offer_id'] kwargs[quantity_key_template.format(i)] = item['quantity'] response = self.api.CartCreate(**kwargs) root = objectify.fromstring(response) return AmazonCart(root)
[ "def", "cart_create", "(", "self", ",", "items", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "items", ",", "dict", ")", ":", "items", "=", "[", "items", "]", "if", "len", "(", "items", ")", ">", "10", ":", "raise", "CartException", "(", "\"You can't add more than 10 items at once\"", ")", "offer_id_key_template", "=", "'Item.{0}.OfferListingId'", "quantity_key_template", "=", "'Item.{0}.Quantity'", "for", "i", ",", "item", "in", "enumerate", "(", "items", ")", ":", "kwargs", "[", "offer_id_key_template", ".", "format", "(", "i", ")", "]", "=", "item", "[", "'offer_id'", "]", "kwargs", "[", "quantity_key_template", ".", "format", "(", "i", ")", "]", "=", "item", "[", "'quantity'", "]", "response", "=", "self", ".", "api", ".", "CartCreate", "(", "*", "*", "kwargs", ")", "root", "=", "objectify", ".", "fromstring", "(", "response", ")", "return", "AmazonCart", "(", "root", ")" ]
CartCreate. :param items: A dictionary containing the items to be added to the cart. Or a list containing these dictionaries. It is not possible to create an empty cart! example: [{'offer_id': 'rt2ofih3f389nwiuhf8934z87o3f4h', 'quantity': 1}] :return: An :class:`~.AmazonCart`.
[ "CartCreate", ".", ":", "param", "items", ":", "A", "dictionary", "containing", "the", "items", "to", "be", "added", "to", "the", "cart", ".", "Or", "a", "list", "containing", "these", "dictionaries", ".", "It", "is", "not", "possible", "to", "create", "an", "empty", "cart!", "example", ":", "[", "{", "offer_id", ":", "rt2ofih3f389nwiuhf8934z87o3f4h", "quantity", ":", "1", "}", "]" ]
python
train
33.133333
librosa/librosa
librosa/core/time_frequency.py
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/time_frequency.py#L646-L697
def mel_to_hz(mels, htk=False): """Convert mel bin numbers to frequencies Examples -------- >>> librosa.mel_to_hz(3) 200. >>> librosa.mel_to_hz([1,2,3,4,5]) array([ 66.667, 133.333, 200. , 266.667, 333.333]) Parameters ---------- mels : np.ndarray [shape=(n,)], float mel bins to convert htk : bool use HTK formula instead of Slaney Returns ------- frequencies : np.ndarray [shape=(n,)] input mels in Hz See Also -------- hz_to_mel """ mels = np.asanyarray(mels) if htk: return 700.0 * (10.0**(mels / 2595.0) - 1.0) # Fill in the linear scale f_min = 0.0 f_sp = 200.0 / 3 freqs = f_min + f_sp * mels # And now the nonlinear scale min_log_hz = 1000.0 # beginning of log region (Hz) min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels) logstep = np.log(6.4) / 27.0 # step size for log region if mels.ndim: # If we have vector data, vectorize log_t = (mels >= min_log_mel) freqs[log_t] = min_log_hz * np.exp(logstep * (mels[log_t] - min_log_mel)) elif mels >= min_log_mel: # If we have scalar data, check directly freqs = min_log_hz * np.exp(logstep * (mels - min_log_mel)) return freqs
[ "def", "mel_to_hz", "(", "mels", ",", "htk", "=", "False", ")", ":", "mels", "=", "np", ".", "asanyarray", "(", "mels", ")", "if", "htk", ":", "return", "700.0", "*", "(", "10.0", "**", "(", "mels", "/", "2595.0", ")", "-", "1.0", ")", "# Fill in the linear scale", "f_min", "=", "0.0", "f_sp", "=", "200.0", "/", "3", "freqs", "=", "f_min", "+", "f_sp", "*", "mels", "# And now the nonlinear scale", "min_log_hz", "=", "1000.0", "# beginning of log region (Hz)", "min_log_mel", "=", "(", "min_log_hz", "-", "f_min", ")", "/", "f_sp", "# same (Mels)", "logstep", "=", "np", ".", "log", "(", "6.4", ")", "/", "27.0", "# step size for log region", "if", "mels", ".", "ndim", ":", "# If we have vector data, vectorize", "log_t", "=", "(", "mels", ">=", "min_log_mel", ")", "freqs", "[", "log_t", "]", "=", "min_log_hz", "*", "np", ".", "exp", "(", "logstep", "*", "(", "mels", "[", "log_t", "]", "-", "min_log_mel", ")", ")", "elif", "mels", ">=", "min_log_mel", ":", "# If we have scalar data, check directly", "freqs", "=", "min_log_hz", "*", "np", ".", "exp", "(", "logstep", "*", "(", "mels", "-", "min_log_mel", ")", ")", "return", "freqs" ]
Convert mel bin numbers to frequencies Examples -------- >>> librosa.mel_to_hz(3) 200. >>> librosa.mel_to_hz([1,2,3,4,5]) array([ 66.667, 133.333, 200. , 266.667, 333.333]) Parameters ---------- mels : np.ndarray [shape=(n,)], float mel bins to convert htk : bool use HTK formula instead of Slaney Returns ------- frequencies : np.ndarray [shape=(n,)] input mels in Hz See Also -------- hz_to_mel
[ "Convert", "mel", "bin", "numbers", "to", "frequencies" ]
python
test
25.211538
ubyssey/dispatch
dispatch/theme/widgets.py
https://github.com/ubyssey/dispatch/blob/8da6084fe61726f20e9cf675190480cfc45ee764/dispatch/theme/widgets.py#L132-L140
def prepare_data(self): """Prepare widget data for template.""" result = {} for field in self.fields: data = self.data.get(field.name) result[field.name] = field.prepare_data(data) return result
[ "def", "prepare_data", "(", "self", ")", ":", "result", "=", "{", "}", "for", "field", "in", "self", ".", "fields", ":", "data", "=", "self", ".", "data", ".", "get", "(", "field", ".", "name", ")", "result", "[", "field", ".", "name", "]", "=", "field", ".", "prepare_data", "(", "data", ")", "return", "result" ]
Prepare widget data for template.
[ "Prepare", "widget", "data", "for", "template", "." ]
python
test
27.111111
urinieto/msaf
msaf/algorithms/foote/segmenter.py
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/foote/segmenter.py#L31-L36
def compute_ssm(X, metric="seuclidean"): """Computes the self-similarity matrix of X.""" D = distance.pdist(X, metric=metric) D = distance.squareform(D) D /= D.max() return 1 - D
[ "def", "compute_ssm", "(", "X", ",", "metric", "=", "\"seuclidean\"", ")", ":", "D", "=", "distance", ".", "pdist", "(", "X", ",", "metric", "=", "metric", ")", "D", "=", "distance", ".", "squareform", "(", "D", ")", "D", "/=", "D", ".", "max", "(", ")", "return", "1", "-", "D" ]
Computes the self-similarity matrix of X.
[ "Computes", "the", "self", "-", "similarity", "matrix", "of", "X", "." ]
python
test
32.166667
phareous/insteonlocal
insteonlocal/Hub.py
https://github.com/phareous/insteonlocal/blob/a4544a17d143fb285852cb873e862c270d55dd00/insteonlocal/Hub.py#L301-L316
def id_request(self, device_id): """Get the device for the ID. ID request can return device type (cat/subcat), firmware ver, etc. Cat is status['is_high'], sub cat is status['id_mid']""" self.logger.info("\nid_request for device %s", device_id) device_id = device_id.upper() self.direct_command(device_id, '10', '00') sleep(2) status = self.get_buffer_status(device_id) if not status: sleep(1) status = self.get_buffer_status(device_id) return status
[ "def", "id_request", "(", "self", ",", "device_id", ")", ":", "self", ".", "logger", ".", "info", "(", "\"\\nid_request for device %s\"", ",", "device_id", ")", "device_id", "=", "device_id", ".", "upper", "(", ")", "self", ".", "direct_command", "(", "device_id", ",", "'10'", ",", "'00'", ")", "sleep", "(", "2", ")", "status", "=", "self", ".", "get_buffer_status", "(", "device_id", ")", "if", "not", "status", ":", "sleep", "(", "1", ")", "status", "=", "self", ".", "get_buffer_status", "(", "device_id", ")", "return", "status" ]
Get the device for the ID. ID request can return device type (cat/subcat), firmware ver, etc. Cat is status['is_high'], sub cat is status['id_mid']
[ "Get", "the", "device", "for", "the", "ID", ".", "ID", "request", "can", "return", "device", "type", "(", "cat", "/", "subcat", ")", "firmware", "ver", "etc", ".", "Cat", "is", "status", "[", "is_high", "]", "sub", "cat", "is", "status", "[", "id_mid", "]" ]
python
train
33.4375
jasonrbriggs/stomp.py
stomp/listener.py
https://github.com/jasonrbriggs/stomp.py/blob/643843c5fbf25fd24339dd0e69a9411c3d8b94c7/stomp/listener.py#L229-L240
def on_send(self, frame): """ Add the heartbeat header to the frame when connecting, and bump next outbound heartbeat timestamp. :param Frame frame: the Frame object """ if frame.cmd == CMD_CONNECT or frame.cmd == CMD_STOMP: if self.heartbeats != (0, 0): frame.headers[HDR_HEARTBEAT] = '%s,%s' % self.heartbeats if self.next_outbound_heartbeat is not None: self.next_outbound_heartbeat = monotonic() + self.send_sleep
[ "def", "on_send", "(", "self", ",", "frame", ")", ":", "if", "frame", ".", "cmd", "==", "CMD_CONNECT", "or", "frame", ".", "cmd", "==", "CMD_STOMP", ":", "if", "self", ".", "heartbeats", "!=", "(", "0", ",", "0", ")", ":", "frame", ".", "headers", "[", "HDR_HEARTBEAT", "]", "=", "'%s,%s'", "%", "self", ".", "heartbeats", "if", "self", ".", "next_outbound_heartbeat", "is", "not", "None", ":", "self", ".", "next_outbound_heartbeat", "=", "monotonic", "(", ")", "+", "self", ".", "send_sleep" ]
Add the heartbeat header to the frame when connecting, and bump next outbound heartbeat timestamp. :param Frame frame: the Frame object
[ "Add", "the", "heartbeat", "header", "to", "the", "frame", "when", "connecting", "and", "bump", "next", "outbound", "heartbeat", "timestamp", "." ]
python
train
41.916667
ibis-project/ibis
ibis/bigquery/udf/api.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/bigquery/udf/api.py#L19-L36
def create_udf_node(name, fields): """Create a new UDF node type. Parameters ---------- name : str Then name of the UDF node fields : OrderedDict Mapping of class member name to definition Returns ------- result : type A new BigQueryUDFNode subclass """ definition = next(_udf_name_cache[name]) external_name = '{}_{:d}'.format(name, definition) return type(external_name, (BigQueryUDFNode,), fields)
[ "def", "create_udf_node", "(", "name", ",", "fields", ")", ":", "definition", "=", "next", "(", "_udf_name_cache", "[", "name", "]", ")", "external_name", "=", "'{}_{:d}'", ".", "format", "(", "name", ",", "definition", ")", "return", "type", "(", "external_name", ",", "(", "BigQueryUDFNode", ",", ")", ",", "fields", ")" ]
Create a new UDF node type. Parameters ---------- name : str Then name of the UDF node fields : OrderedDict Mapping of class member name to definition Returns ------- result : type A new BigQueryUDFNode subclass
[ "Create", "a", "new", "UDF", "node", "type", "." ]
python
train
25.388889
serge-sans-paille/pythran
pythran/backend.py
https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/backend.py#L1275-L1289
def visit_Module(self, node): """ Build a compilation unit. """ # build all types deps = sorted(self.dependencies) headers = [Include(os.path.join("pythonic", "include", *t) + ".hpp") for t in deps] headers += [Include(os.path.join("pythonic", *t) + ".hpp") for t in deps] decls_n_defns = [self.visit(stmt) for stmt in node.body] decls, defns = zip(*[s for s in decls_n_defns if s]) nsbody = [s for ls in decls + defns for s in ls] ns = Namespace(pythran_ward + self.passmanager.module_name, nsbody) self.result = CompilationUnit(headers + [ns])
[ "def", "visit_Module", "(", "self", ",", "node", ")", ":", "# build all types", "deps", "=", "sorted", "(", "self", ".", "dependencies", ")", "headers", "=", "[", "Include", "(", "os", ".", "path", ".", "join", "(", "\"pythonic\"", ",", "\"include\"", ",", "*", "t", ")", "+", "\".hpp\"", ")", "for", "t", "in", "deps", "]", "headers", "+=", "[", "Include", "(", "os", ".", "path", ".", "join", "(", "\"pythonic\"", ",", "*", "t", ")", "+", "\".hpp\"", ")", "for", "t", "in", "deps", "]", "decls_n_defns", "=", "[", "self", ".", "visit", "(", "stmt", ")", "for", "stmt", "in", "node", ".", "body", "]", "decls", ",", "defns", "=", "zip", "(", "*", "[", "s", "for", "s", "in", "decls_n_defns", "if", "s", "]", ")", "nsbody", "=", "[", "s", "for", "ls", "in", "decls", "+", "defns", "for", "s", "in", "ls", "]", "ns", "=", "Namespace", "(", "pythran_ward", "+", "self", ".", "passmanager", ".", "module_name", ",", "nsbody", ")", "self", ".", "result", "=", "CompilationUnit", "(", "headers", "+", "[", "ns", "]", ")" ]
Build a compilation unit.
[ "Build", "a", "compilation", "unit", "." ]
python
train
43.533333
HdrHistogram/HdrHistogram_py
hdrh/histogram.py
https://github.com/HdrHistogram/HdrHistogram_py/blob/cb99981b0564a62e1aa02bd764efa6445923f8f7/hdrh/histogram.py#L424-L441
def adjust_internal_tacking_values(self, min_non_zero_index, max_index, total_added): '''Called during decoding and add to adjust the new min/max value and total count Args: min_non_zero_index min nonzero index of all added counts (-1 if none) max_index max index of all added counts (-1 if none) ''' if max_index >= 0: max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index)) self.max_value = max(self.max_value, max_value) if min_non_zero_index >= 0: min_value = self.get_value_from_index(min_non_zero_index) self.min_value = min(self.min_value, min_value) self.total_count += total_added
[ "def", "adjust_internal_tacking_values", "(", "self", ",", "min_non_zero_index", ",", "max_index", ",", "total_added", ")", ":", "if", "max_index", ">=", "0", ":", "max_value", "=", "self", ".", "get_highest_equivalent_value", "(", "self", ".", "get_value_from_index", "(", "max_index", ")", ")", "self", ".", "max_value", "=", "max", "(", "self", ".", "max_value", ",", "max_value", ")", "if", "min_non_zero_index", ">=", "0", ":", "min_value", "=", "self", ".", "get_value_from_index", "(", "min_non_zero_index", ")", "self", ".", "min_value", "=", "min", "(", "self", ".", "min_value", ",", "min_value", ")", "self", ".", "total_count", "+=", "total_added" ]
Called during decoding and add to adjust the new min/max value and total count Args: min_non_zero_index min nonzero index of all added counts (-1 if none) max_index max index of all added counts (-1 if none)
[ "Called", "during", "decoding", "and", "add", "to", "adjust", "the", "new", "min", "/", "max", "value", "and", "total", "count" ]
python
train
47
pip-services3-python/pip-services3-commons-python
pip_services3_commons/reflect/MethodReflector.py
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/reflect/MethodReflector.py#L42-L68
def has_method(obj, name): """ Checks if object has a method with specified name. :param obj: an object to introspect. :param name: a name of the method to check. :return: true if the object has the method and false if it doesn't. """ if obj == None: raise Exception("Object cannot be null") if name == None: raise Exception("Method name cannot be null") name = name.lower() for method_name in dir(obj): if method_name.lower() != name: continue method = getattr(obj, method_name) if MethodReflector._is_method(method, method_name): return True return False
[ "def", "has_method", "(", "obj", ",", "name", ")", ":", "if", "obj", "==", "None", ":", "raise", "Exception", "(", "\"Object cannot be null\"", ")", "if", "name", "==", "None", ":", "raise", "Exception", "(", "\"Method name cannot be null\"", ")", "name", "=", "name", ".", "lower", "(", ")", "for", "method_name", "in", "dir", "(", "obj", ")", ":", "if", "method_name", ".", "lower", "(", ")", "!=", "name", ":", "continue", "method", "=", "getattr", "(", "obj", ",", "method_name", ")", "if", "MethodReflector", ".", "_is_method", "(", "method", ",", "method_name", ")", ":", "return", "True", "return", "False" ]
Checks if object has a method with specified name. :param obj: an object to introspect. :param name: a name of the method to check. :return: true if the object has the method and false if it doesn't.
[ "Checks", "if", "object", "has", "a", "method", "with", "specified", "name", "." ]
python
train
26.925926
pandas-dev/pandas
pandas/core/generic.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L5733-L5839
def copy(self, deep=True): """ Make a copy of this object's indices and data. When ``deep=True`` (default), a new object will be created with a copy of the calling object's data and indices. Modifications to the data or indices of the copy will not be reflected in the original object (see notes below). When ``deep=False``, a new object will be created without copying the calling object's data or index (only references to the data and index are copied). Any changes to the data of the original will be reflected in the shallow copy (and vice versa). Parameters ---------- deep : bool, default True Make a deep copy, including a copy of the data and the indices. With ``deep=False`` neither the indices nor the data are copied. Returns ------- copy : Series, DataFrame or Panel Object type matches caller. Notes ----- When ``deep=True``, data is copied but actual Python objects will not be copied recursively, only the reference to the object. This is in contrast to `copy.deepcopy` in the Standard Library, which recursively copies object data (see examples below). While ``Index`` objects are copied when ``deep=True``, the underlying numpy array is not copied for performance reasons. Since ``Index`` is immutable, the underlying data can be safely shared and a copy is not needed. Examples -------- >>> s = pd.Series([1, 2], index=["a", "b"]) >>> s a 1 b 2 dtype: int64 >>> s_copy = s.copy() >>> s_copy a 1 b 2 dtype: int64 **Shallow copy versus default (deep) copy:** >>> s = pd.Series([1, 2], index=["a", "b"]) >>> deep = s.copy() >>> shallow = s.copy(deep=False) Shallow copy shares data and index with original. >>> s is shallow False >>> s.values is shallow.values and s.index is shallow.index True Deep copy has own copy of data and index. >>> s is deep False >>> s.values is deep.values or s.index is deep.index False Updates to the data shared by shallow copy and original is reflected in both; deep copy remains unchanged. >>> s[0] = 3 >>> shallow[1] = 4 >>> s a 3 b 4 dtype: int64 >>> shallow a 3 b 4 dtype: int64 >>> deep a 1 b 2 dtype: int64 Note that when copying an object containing Python objects, a deep copy will copy the data, but will not do so recursively. Updating a nested data object will be reflected in the deep copy. >>> s = pd.Series([[1, 2], [3, 4]]) >>> deep = s.copy() >>> s[0][0] = 10 >>> s 0 [10, 2] 1 [3, 4] dtype: object >>> deep 0 [10, 2] 1 [3, 4] dtype: object """ data = self._data.copy(deep=deep) return self._constructor(data).__finalize__(self)
[ "def", "copy", "(", "self", ",", "deep", "=", "True", ")", ":", "data", "=", "self", ".", "_data", ".", "copy", "(", "deep", "=", "deep", ")", "return", "self", ".", "_constructor", "(", "data", ")", ".", "__finalize__", "(", "self", ")" ]
Make a copy of this object's indices and data. When ``deep=True`` (default), a new object will be created with a copy of the calling object's data and indices. Modifications to the data or indices of the copy will not be reflected in the original object (see notes below). When ``deep=False``, a new object will be created without copying the calling object's data or index (only references to the data and index are copied). Any changes to the data of the original will be reflected in the shallow copy (and vice versa). Parameters ---------- deep : bool, default True Make a deep copy, including a copy of the data and the indices. With ``deep=False`` neither the indices nor the data are copied. Returns ------- copy : Series, DataFrame or Panel Object type matches caller. Notes ----- When ``deep=True``, data is copied but actual Python objects will not be copied recursively, only the reference to the object. This is in contrast to `copy.deepcopy` in the Standard Library, which recursively copies object data (see examples below). While ``Index`` objects are copied when ``deep=True``, the underlying numpy array is not copied for performance reasons. Since ``Index`` is immutable, the underlying data can be safely shared and a copy is not needed. Examples -------- >>> s = pd.Series([1, 2], index=["a", "b"]) >>> s a 1 b 2 dtype: int64 >>> s_copy = s.copy() >>> s_copy a 1 b 2 dtype: int64 **Shallow copy versus default (deep) copy:** >>> s = pd.Series([1, 2], index=["a", "b"]) >>> deep = s.copy() >>> shallow = s.copy(deep=False) Shallow copy shares data and index with original. >>> s is shallow False >>> s.values is shallow.values and s.index is shallow.index True Deep copy has own copy of data and index. >>> s is deep False >>> s.values is deep.values or s.index is deep.index False Updates to the data shared by shallow copy and original is reflected in both; deep copy remains unchanged. >>> s[0] = 3 >>> shallow[1] = 4 >>> s a 3 b 4 dtype: int64 >>> shallow a 3 b 4 dtype: int64 >>> deep a 1 b 2 dtype: int64 Note that when copying an object containing Python objects, a deep copy will copy the data, but will not do so recursively. Updating a nested data object will be reflected in the deep copy. >>> s = pd.Series([[1, 2], [3, 4]]) >>> deep = s.copy() >>> s[0][0] = 10 >>> s 0 [10, 2] 1 [3, 4] dtype: object >>> deep 0 [10, 2] 1 [3, 4] dtype: object
[ "Make", "a", "copy", "of", "this", "object", "s", "indices", "and", "data", "." ]
python
train
29.570093
google/python-adb
adb/adb_debug.py
https://github.com/google/python-adb/blob/d9b94b2dda555c14674c19806debb8449c0e9652/adb/adb_debug.py#L98-L125
def Shell(device, *command): """Runs a command on the device and prints the stdout. Args: command: Command to run on the target. """ if command: return device.StreamingShell(' '.join(command)) else: # Retrieve the initial terminal prompt to use as a delimiter for future reads terminal_prompt = device.InteractiveShell() print(terminal_prompt.decode('utf-8')) # Accept user input in a loop and write that into the interactive shells stdin, then print output while True: cmd = input('> ') if not cmd: continue elif cmd == 'exit': break else: stdout = device.InteractiveShell(cmd, strip_cmd=True, delim=terminal_prompt, strip_delim=True) if stdout: if isinstance(stdout, bytes): stdout = stdout.decode('utf-8') print(stdout) device.Close()
[ "def", "Shell", "(", "device", ",", "*", "command", ")", ":", "if", "command", ":", "return", "device", ".", "StreamingShell", "(", "' '", ".", "join", "(", "command", ")", ")", "else", ":", "# Retrieve the initial terminal prompt to use as a delimiter for future reads", "terminal_prompt", "=", "device", ".", "InteractiveShell", "(", ")", "print", "(", "terminal_prompt", ".", "decode", "(", "'utf-8'", ")", ")", "# Accept user input in a loop and write that into the interactive shells stdin, then print output", "while", "True", ":", "cmd", "=", "input", "(", "'> '", ")", "if", "not", "cmd", ":", "continue", "elif", "cmd", "==", "'exit'", ":", "break", "else", ":", "stdout", "=", "device", ".", "InteractiveShell", "(", "cmd", ",", "strip_cmd", "=", "True", ",", "delim", "=", "terminal_prompt", ",", "strip_delim", "=", "True", ")", "if", "stdout", ":", "if", "isinstance", "(", "stdout", ",", "bytes", ")", ":", "stdout", "=", "stdout", ".", "decode", "(", "'utf-8'", ")", "print", "(", "stdout", ")", "device", ".", "Close", "(", ")" ]
Runs a command on the device and prints the stdout. Args: command: Command to run on the target.
[ "Runs", "a", "command", "on", "the", "device", "and", "prints", "the", "stdout", "." ]
python
train
34.821429
google-research/batch-ppo
agents/parts/iterate_sequences.py
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/iterate_sequences.py#L26-L74
def iterate_sequences( consumer_fn, output_template, sequences, length, chunk_length=None, batch_size=None, num_epochs=1, padding_value=0): """Iterate over batches of chunks of sequences for multiple epochs. The batch dimension of the length tensor must be set because it is used to infer buffer sizes. Args: consumer_fn: Function creating the operation to process the data. output_template: Nested tensors of same shape and dtype as outputs. sequences: Nested collection of tensors with batch and time dimension. length: Tensor containing the length for each sequence. chunk_length: Split sequences into chunks of this size; optional. batch_size: Split epochs into batches of this size; optional. num_epochs: How many times to repeat over the data. padding_value: Value used for padding the last chunk after the sequence. Raises: ValueError: Unknown batch size of the length tensor. Returns: Concatenated nested tensors returned by the consumer. """ if not length.shape[0].value: raise ValueError('Batch size of length tensor must be set.') num_sequences = length.shape[0].value sequences = dict(sequence=sequences, length=length) dataset = tf.data.Dataset.from_tensor_slices(sequences) dataset = dataset.repeat(num_epochs) if chunk_length: dataset = dataset.map(remove_padding).flat_map( # pylint: disable=g-long-lambda lambda x: tf.data.Dataset.from_tensor_slices( chunk_sequence(x, chunk_length, padding_value))) num_chunks = tf.reduce_sum((length - 1) // chunk_length + 1) else: num_chunks = num_sequences if batch_size: dataset = dataset.shuffle(num_sequences // 2) dataset = dataset.batch(batch_size or num_sequences) dataset = dataset.prefetch(num_epochs) iterator = dataset.make_initializable_iterator() with tf.control_dependencies([iterator.initializer]): num_batches = num_epochs * num_chunks // (batch_size or num_sequences) return tf.scan( # pylint: disable=g-long-lambda lambda _1, index: consumer_fn(iterator.get_next()), tf.range(num_batches), output_template, parallel_iterations=1)
[ "def", "iterate_sequences", "(", "consumer_fn", ",", "output_template", ",", "sequences", ",", "length", ",", "chunk_length", "=", "None", ",", "batch_size", "=", "None", ",", "num_epochs", "=", "1", ",", "padding_value", "=", "0", ")", ":", "if", "not", "length", ".", "shape", "[", "0", "]", ".", "value", ":", "raise", "ValueError", "(", "'Batch size of length tensor must be set.'", ")", "num_sequences", "=", "length", ".", "shape", "[", "0", "]", ".", "value", "sequences", "=", "dict", "(", "sequence", "=", "sequences", ",", "length", "=", "length", ")", "dataset", "=", "tf", ".", "data", ".", "Dataset", ".", "from_tensor_slices", "(", "sequences", ")", "dataset", "=", "dataset", ".", "repeat", "(", "num_epochs", ")", "if", "chunk_length", ":", "dataset", "=", "dataset", ".", "map", "(", "remove_padding", ")", ".", "flat_map", "(", "# pylint: disable=g-long-lambda", "lambda", "x", ":", "tf", ".", "data", ".", "Dataset", ".", "from_tensor_slices", "(", "chunk_sequence", "(", "x", ",", "chunk_length", ",", "padding_value", ")", ")", ")", "num_chunks", "=", "tf", ".", "reduce_sum", "(", "(", "length", "-", "1", ")", "//", "chunk_length", "+", "1", ")", "else", ":", "num_chunks", "=", "num_sequences", "if", "batch_size", ":", "dataset", "=", "dataset", ".", "shuffle", "(", "num_sequences", "//", "2", ")", "dataset", "=", "dataset", ".", "batch", "(", "batch_size", "or", "num_sequences", ")", "dataset", "=", "dataset", ".", "prefetch", "(", "num_epochs", ")", "iterator", "=", "dataset", ".", "make_initializable_iterator", "(", ")", "with", "tf", ".", "control_dependencies", "(", "[", "iterator", ".", "initializer", "]", ")", ":", "num_batches", "=", "num_epochs", "*", "num_chunks", "//", "(", "batch_size", "or", "num_sequences", ")", "return", "tf", ".", "scan", "(", "# pylint: disable=g-long-lambda", "lambda", "_1", ",", "index", ":", "consumer_fn", "(", "iterator", ".", "get_next", "(", ")", ")", ",", "tf", ".", "range", "(", "num_batches", ")", ",", "output_template", ",", "parallel_iterations", "=", "1", ")" ]
Iterate over batches of chunks of sequences for multiple epochs. The batch dimension of the length tensor must be set because it is used to infer buffer sizes. Args: consumer_fn: Function creating the operation to process the data. output_template: Nested tensors of same shape and dtype as outputs. sequences: Nested collection of tensors with batch and time dimension. length: Tensor containing the length for each sequence. chunk_length: Split sequences into chunks of this size; optional. batch_size: Split epochs into batches of this size; optional. num_epochs: How many times to repeat over the data. padding_value: Value used for padding the last chunk after the sequence. Raises: ValueError: Unknown batch size of the length tensor. Returns: Concatenated nested tensors returned by the consumer.
[ "Iterate", "over", "batches", "of", "chunks", "of", "sequences", "for", "multiple", "epochs", "." ]
python
train
43.306122
rix0rrr/gcl
gcl/ast.py
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast.py#L516-L525
def applyIndex(self, lst, right): """Apply a list to something else.""" if len(right) != 1: raise exceptions.EvaluationError('%r can only be applied to one argument, got %r' % (self.left, self.right)) right = right[0] if isinstance(right, int): return lst[right] raise exceptions.EvaluationError("Can't apply %r to argument (%r): integer expected, got %r" % (self.left, self.right, right))
[ "def", "applyIndex", "(", "self", ",", "lst", ",", "right", ")", ":", "if", "len", "(", "right", ")", "!=", "1", ":", "raise", "exceptions", ".", "EvaluationError", "(", "'%r can only be applied to one argument, got %r'", "%", "(", "self", ".", "left", ",", "self", ".", "right", ")", ")", "right", "=", "right", "[", "0", "]", "if", "isinstance", "(", "right", ",", "int", ")", ":", "return", "lst", "[", "right", "]", "raise", "exceptions", ".", "EvaluationError", "(", "\"Can't apply %r to argument (%r): integer expected, got %r\"", "%", "(", "self", ".", "left", ",", "self", ".", "right", ",", "right", ")", ")" ]
Apply a list to something else.
[ "Apply", "a", "list", "to", "something", "else", "." ]
python
train
41.4
solvebio/solvebio-python
solvebio/cli/auth.py
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/cli/auth.py#L129-L138
def print_user(user): """ Prints information about the current user. """ email = user['email'] domain = user['account']['domain'] role = user['role'] print('You are logged-in to the "{0}" domain ' 'as {1} with role {2}.' .format(domain, email, role))
[ "def", "print_user", "(", "user", ")", ":", "email", "=", "user", "[", "'email'", "]", "domain", "=", "user", "[", "'account'", "]", "[", "'domain'", "]", "role", "=", "user", "[", "'role'", "]", "print", "(", "'You are logged-in to the \"{0}\" domain '", "'as {1} with role {2}.'", ".", "format", "(", "domain", ",", "email", ",", "role", ")", ")" ]
Prints information about the current user.
[ "Prints", "information", "about", "the", "current", "user", "." ]
python
test
28.9
GNS3/gns3-server
gns3server/compute/virtualbox/__init__.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/virtualbox/__init__.py#L174-L202
def list_vms(self, allow_clone=False): """ Gets VirtualBox VM list. """ vbox_vms = [] result = yield from self.execute("list", ["vms"]) for line in result: if len(line) == 0 or line[0] != '"' or line[-1:] != "}": continue # Broken output (perhaps a carriage return in VM name) vmname, _ = line.rsplit(' ', 1) vmname = vmname.strip('"') if vmname == "<inaccessible>": continue # ignore inaccessible VMs extra_data = yield from self.execute("getextradata", [vmname, "GNS3/Clone"]) if allow_clone or len(extra_data) == 0 or not extra_data[0].strip() == "Value: yes": # get the amount of RAM info_results = yield from self.execute("showvminfo", [vmname, "--machinereadable"]) ram = 0 for info in info_results: try: name, value = info.split('=', 1) if name.strip() == "memory": ram = int(value.strip()) break except ValueError: continue vbox_vms.append({"vmname": vmname, "ram": ram}) return vbox_vms
[ "def", "list_vms", "(", "self", ",", "allow_clone", "=", "False", ")", ":", "vbox_vms", "=", "[", "]", "result", "=", "yield", "from", "self", ".", "execute", "(", "\"list\"", ",", "[", "\"vms\"", "]", ")", "for", "line", "in", "result", ":", "if", "len", "(", "line", ")", "==", "0", "or", "line", "[", "0", "]", "!=", "'\"'", "or", "line", "[", "-", "1", ":", "]", "!=", "\"}\"", ":", "continue", "# Broken output (perhaps a carriage return in VM name)", "vmname", ",", "_", "=", "line", ".", "rsplit", "(", "' '", ",", "1", ")", "vmname", "=", "vmname", ".", "strip", "(", "'\"'", ")", "if", "vmname", "==", "\"<inaccessible>\"", ":", "continue", "# ignore inaccessible VMs", "extra_data", "=", "yield", "from", "self", ".", "execute", "(", "\"getextradata\"", ",", "[", "vmname", ",", "\"GNS3/Clone\"", "]", ")", "if", "allow_clone", "or", "len", "(", "extra_data", ")", "==", "0", "or", "not", "extra_data", "[", "0", "]", ".", "strip", "(", ")", "==", "\"Value: yes\"", ":", "# get the amount of RAM", "info_results", "=", "yield", "from", "self", ".", "execute", "(", "\"showvminfo\"", ",", "[", "vmname", ",", "\"--machinereadable\"", "]", ")", "ram", "=", "0", "for", "info", "in", "info_results", ":", "try", ":", "name", ",", "value", "=", "info", ".", "split", "(", "'='", ",", "1", ")", "if", "name", ".", "strip", "(", ")", "==", "\"memory\"", ":", "ram", "=", "int", "(", "value", ".", "strip", "(", ")", ")", "break", "except", "ValueError", ":", "continue", "vbox_vms", ".", "append", "(", "{", "\"vmname\"", ":", "vmname", ",", "\"ram\"", ":", "ram", "}", ")", "return", "vbox_vms" ]
Gets VirtualBox VM list.
[ "Gets", "VirtualBox", "VM", "list", "." ]
python
train
44.068966
senaite/senaite.core
bika/lims/browser/widgets/reflexrulewidget.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/widgets/reflexrulewidget.py#L316-L439
def getReflexRuleSetup(self): """ Return a json dict with all the setup data necessary to build the relations: - Relations between methods and analysis services options. - The current saved data the functions returns: {'<method_uid>': { 'analysisservices': { '<as_uid>': {'as_id': '<as_id>', 'as_title':'<as_title>', 'resultoptions': [,,]} '<as_uid>': {'as_id': '<as_id>', 'as_title':'<as_title>', 'resultoptions': [{ 'ResultText': 'Failed', 'ResultValue': '1', 'value': ''}, ... ]} }, 'as_keys': ['<as_uid>', '<as_uid>'], 'method_id': '<method_id>', 'method_tile': '<method_tile>' }, '<method_uid>': { 'analysisservices': { '<as_uid>': {'as_id': '<as_id>', 'as_title':'<as_title>', 'resultoptions': [,,]} '<as_uid>': {'as_id': '<as_id>', 'as_title':'<as_title>', 'resultoptions': [,,]} }, 'as_keys': ['<as_uid>', '<as_uid>'], 'method_id': '<method_id>', 'method_tile': '<method_tile>' }, 'saved_actions': {'rules': [ {'actions': [{'act_row_idx': 0, 'action': 'repeat', 'an_result_id': '', 'analyst': '', 'otherWS': current, 'setresultdiscrete': '', 'setresulton': 'original', 'setresultvalue': '', 'worksheettemplate': '70d48adfb34c4231a145f76a858e94cf',}], 'conditions': [{'analysisservice': 'd802cdbf1f4742c094d45997b1038f9c', 'and_or': 'no', 'cond_row_idx': 0, 'discreteresult': '', 'range0': '12', 'range1': '12'}], 'rulenumber': '1', 'trigger': 'submit'},...], 'method_id': '<method_uid>', 'method_tile': '<method_tile>', 'method_uid': '<method_uid>' } } """ relations = {} # Getting all the methods from the system pc = getToolByName(self, 'portal_catalog') methods = [obj.getObject() for obj in pc( portal_type='Method', is_active=True)] bsc = getToolByName(self, 'bika_setup_catalog') for method in methods: # Get the analysis services related to each method an_servs_brains = bsc( portal_type='AnalysisService', getMethodUIDs={ "query": method.UID(), "operator": "or" }) analysiservices = {} for analysiservice in an_servs_brains: analysiservice = analysiservice.getObject() # Getting the worksheet templates that could be used with the # analysis, those worksheet templates are the ones without # method and the ones with a method shared with the # analysis service. service_methods_uid = analysiservice.getAvailableMethodUIDs() query_dict = { 'portal_type': 'WorksheetTemplate', 'is_active': True, 'sort_on': 'sortable_title', 'getMethodUID': { "query": service_methods_uid + [''], "operator": "or" } } wst_brains = bsc(query_dict) analysiservices[analysiservice.UID()] = { 'as_id': analysiservice.getId(), 'as_title': analysiservice.Title(), 'resultoptions': analysiservice.getResultOptions() if analysiservice.getResultOptions() else [], 'wstoptions': [ (brain.UID, brain.Title) for brain in wst_brains] } # Make the json dict relations[method.UID()] = { 'method_id': method.getId(), 'method_tile': method.Title(), 'analysisservices': analysiservices, 'as_keys': analysiservices.keys(), } # Get the data saved in the object reflex_rule = self.aq_parent.aq_inner saved_method = reflex_rule.getMethod() relations['saved_actions'] = { 'method_uid': saved_method.UID() if saved_method else '', 'method_id': saved_method.getId() if saved_method else '', 'method_tile': saved_method.Title() if saved_method else '', 'rules': reflex_rule.getReflexRules(), } return json.dumps(relations)
[ "def", "getReflexRuleSetup", "(", "self", ")", ":", "relations", "=", "{", "}", "# Getting all the methods from the system", "pc", "=", "getToolByName", "(", "self", ",", "'portal_catalog'", ")", "methods", "=", "[", "obj", ".", "getObject", "(", ")", "for", "obj", "in", "pc", "(", "portal_type", "=", "'Method'", ",", "is_active", "=", "True", ")", "]", "bsc", "=", "getToolByName", "(", "self", ",", "'bika_setup_catalog'", ")", "for", "method", "in", "methods", ":", "# Get the analysis services related to each method", "an_servs_brains", "=", "bsc", "(", "portal_type", "=", "'AnalysisService'", ",", "getMethodUIDs", "=", "{", "\"query\"", ":", "method", ".", "UID", "(", ")", ",", "\"operator\"", ":", "\"or\"", "}", ")", "analysiservices", "=", "{", "}", "for", "analysiservice", "in", "an_servs_brains", ":", "analysiservice", "=", "analysiservice", ".", "getObject", "(", ")", "# Getting the worksheet templates that could be used with the", "# analysis, those worksheet templates are the ones without", "# method and the ones with a method shared with the", "# analysis service.", "service_methods_uid", "=", "analysiservice", ".", "getAvailableMethodUIDs", "(", ")", "query_dict", "=", "{", "'portal_type'", ":", "'WorksheetTemplate'", ",", "'is_active'", ":", "True", ",", "'sort_on'", ":", "'sortable_title'", ",", "'getMethodUID'", ":", "{", "\"query\"", ":", "service_methods_uid", "+", "[", "''", "]", ",", "\"operator\"", ":", "\"or\"", "}", "}", "wst_brains", "=", "bsc", "(", "query_dict", ")", "analysiservices", "[", "analysiservice", ".", "UID", "(", ")", "]", "=", "{", "'as_id'", ":", "analysiservice", ".", "getId", "(", ")", ",", "'as_title'", ":", "analysiservice", ".", "Title", "(", ")", ",", "'resultoptions'", ":", "analysiservice", ".", "getResultOptions", "(", ")", "if", "analysiservice", ".", "getResultOptions", "(", ")", "else", "[", "]", ",", "'wstoptions'", ":", "[", "(", "brain", ".", "UID", ",", "brain", ".", "Title", ")", "for", "brain", "in", "wst_brains", "]", "}", "# Make the json dict", "relations", "[", "method", ".", "UID", "(", ")", "]", "=", "{", "'method_id'", ":", "method", ".", "getId", "(", ")", ",", "'method_tile'", ":", "method", ".", "Title", "(", ")", ",", "'analysisservices'", ":", "analysiservices", ",", "'as_keys'", ":", "analysiservices", ".", "keys", "(", ")", ",", "}", "# Get the data saved in the object", "reflex_rule", "=", "self", ".", "aq_parent", ".", "aq_inner", "saved_method", "=", "reflex_rule", ".", "getMethod", "(", ")", "relations", "[", "'saved_actions'", "]", "=", "{", "'method_uid'", ":", "saved_method", ".", "UID", "(", ")", "if", "saved_method", "else", "''", ",", "'method_id'", ":", "saved_method", ".", "getId", "(", ")", "if", "saved_method", "else", "''", ",", "'method_tile'", ":", "saved_method", ".", "Title", "(", ")", "if", "saved_method", "else", "''", ",", "'rules'", ":", "reflex_rule", ".", "getReflexRules", "(", ")", ",", "}", "return", "json", ".", "dumps", "(", "relations", ")" ]
Return a json dict with all the setup data necessary to build the relations: - Relations between methods and analysis services options. - The current saved data the functions returns: {'<method_uid>': { 'analysisservices': { '<as_uid>': {'as_id': '<as_id>', 'as_title':'<as_title>', 'resultoptions': [,,]} '<as_uid>': {'as_id': '<as_id>', 'as_title':'<as_title>', 'resultoptions': [{ 'ResultText': 'Failed', 'ResultValue': '1', 'value': ''}, ... ]} }, 'as_keys': ['<as_uid>', '<as_uid>'], 'method_id': '<method_id>', 'method_tile': '<method_tile>' }, '<method_uid>': { 'analysisservices': { '<as_uid>': {'as_id': '<as_id>', 'as_title':'<as_title>', 'resultoptions': [,,]} '<as_uid>': {'as_id': '<as_id>', 'as_title':'<as_title>', 'resultoptions': [,,]} }, 'as_keys': ['<as_uid>', '<as_uid>'], 'method_id': '<method_id>', 'method_tile': '<method_tile>' }, 'saved_actions': {'rules': [ {'actions': [{'act_row_idx': 0, 'action': 'repeat', 'an_result_id': '', 'analyst': '', 'otherWS': current, 'setresultdiscrete': '', 'setresulton': 'original', 'setresultvalue': '', 'worksheettemplate': '70d48adfb34c4231a145f76a858e94cf',}], 'conditions': [{'analysisservice': 'd802cdbf1f4742c094d45997b1038f9c', 'and_or': 'no', 'cond_row_idx': 0, 'discreteresult': '', 'range0': '12', 'range1': '12'}], 'rulenumber': '1', 'trigger': 'submit'},...], 'method_id': '<method_uid>', 'method_tile': '<method_tile>', 'method_uid': '<method_uid>' } }
[ "Return", "a", "json", "dict", "with", "all", "the", "setup", "data", "necessary", "to", "build", "the", "relations", ":", "-", "Relations", "between", "methods", "and", "analysis", "services", "options", ".", "-", "The", "current", "saved", "data", "the", "functions", "returns", ":", "{", "<method_uid", ">", ":", "{", "analysisservices", ":", "{", "<as_uid", ">", ":", "{", "as_id", ":", "<as_id", ">", "as_title", ":", "<as_title", ">", "resultoptions", ":", "[", "]", "}", "<as_uid", ">", ":", "{", "as_id", ":", "<as_id", ">", "as_title", ":", "<as_title", ">", "resultoptions", ":", "[", "{", "ResultText", ":", "Failed", "ResultValue", ":", "1", "value", ":", "}", "...", "]", "}", "}", "as_keys", ":", "[", "<as_uid", ">", "<as_uid", ">", "]", "method_id", ":", "<method_id", ">", "method_tile", ":", "<method_tile", ">", "}", "<method_uid", ">", ":", "{", "analysisservices", ":", "{", "<as_uid", ">", ":", "{", "as_id", ":", "<as_id", ">", "as_title", ":", "<as_title", ">", "resultoptions", ":", "[", "]", "}", "<as_uid", ">", ":", "{", "as_id", ":", "<as_id", ">", "as_title", ":", "<as_title", ">", "resultoptions", ":", "[", "]", "}", "}", "as_keys", ":", "[", "<as_uid", ">", "<as_uid", ">", "]", "method_id", ":", "<method_id", ">", "method_tile", ":", "<method_tile", ">", "}", "saved_actions", ":", "{", "rules", ":", "[", "{", "actions", ":", "[", "{", "act_row_idx", ":", "0", "action", ":", "repeat", "an_result_id", ":", "analyst", ":", "otherWS", ":", "current", "setresultdiscrete", ":", "setresulton", ":", "original", "setresultvalue", ":", "worksheettemplate", ":", "70d48adfb34c4231a145f76a858e94cf", "}", "]", "conditions", ":", "[", "{", "analysisservice", ":", "d802cdbf1f4742c094d45997b1038f9c", "and_or", ":", "no", "cond_row_idx", ":", "0", "discreteresult", ":", "range0", ":", "12", "range1", ":", "12", "}", "]", "rulenumber", ":", "1", "trigger", ":", "submit", "}", "...", "]", "method_id", ":", "<method_uid", ">", "method_tile", ":", "<method_tile", ">", "method_uid", ":", "<method_uid", ">", "}", "}" ]
python
train
43.467742
tensorlayer/tensorlayer
tensorlayer/prepro.py
https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/prepro.py#L425-L464
def affine_respective_zoom_matrix(w_range=0.8, h_range=1.1): """Get affine transform matrix for zooming/scaling that height and width are changed independently. OpenCV format, x is width. Parameters ----------- w_range : float or tuple of 2 floats The zooming/scaling ratio of width, greater than 1 means larger. - float, a fixed ratio. - tuple of 2 floats, randomly sample a value as the ratio between 2 values. h_range : float or tuple of 2 floats The zooming/scaling ratio of height, greater than 1 means larger. - float, a fixed ratio. - tuple of 2 floats, randomly sample a value as the ratio between 2 values. Returns ------- numpy.array An affine transform matrix. """ if isinstance(h_range, (float, int)): zy = h_range elif isinstance(h_range, tuple): zy = np.random.uniform(h_range[0], h_range[1]) else: raise Exception("h_range: float or tuple of 2 floats") if isinstance(w_range, (float, int)): zx = w_range elif isinstance(w_range, tuple): zx = np.random.uniform(w_range[0], w_range[1]) else: raise Exception("w_range: float or tuple of 2 floats") zoom_matrix = np.array([[zx, 0, 0], \ [0, zy, 0], \ [0, 0, 1]]) return zoom_matrix
[ "def", "affine_respective_zoom_matrix", "(", "w_range", "=", "0.8", ",", "h_range", "=", "1.1", ")", ":", "if", "isinstance", "(", "h_range", ",", "(", "float", ",", "int", ")", ")", ":", "zy", "=", "h_range", "elif", "isinstance", "(", "h_range", ",", "tuple", ")", ":", "zy", "=", "np", ".", "random", ".", "uniform", "(", "h_range", "[", "0", "]", ",", "h_range", "[", "1", "]", ")", "else", ":", "raise", "Exception", "(", "\"h_range: float or tuple of 2 floats\"", ")", "if", "isinstance", "(", "w_range", ",", "(", "float", ",", "int", ")", ")", ":", "zx", "=", "w_range", "elif", "isinstance", "(", "w_range", ",", "tuple", ")", ":", "zx", "=", "np", ".", "random", ".", "uniform", "(", "w_range", "[", "0", "]", ",", "w_range", "[", "1", "]", ")", "else", ":", "raise", "Exception", "(", "\"w_range: float or tuple of 2 floats\"", ")", "zoom_matrix", "=", "np", ".", "array", "(", "[", "[", "zx", ",", "0", ",", "0", "]", ",", "[", "0", ",", "zy", ",", "0", "]", ",", "[", "0", ",", "0", ",", "1", "]", "]", ")", "return", "zoom_matrix" ]
Get affine transform matrix for zooming/scaling that height and width are changed independently. OpenCV format, x is width. Parameters ----------- w_range : float or tuple of 2 floats The zooming/scaling ratio of width, greater than 1 means larger. - float, a fixed ratio. - tuple of 2 floats, randomly sample a value as the ratio between 2 values. h_range : float or tuple of 2 floats The zooming/scaling ratio of height, greater than 1 means larger. - float, a fixed ratio. - tuple of 2 floats, randomly sample a value as the ratio between 2 values. Returns ------- numpy.array An affine transform matrix.
[ "Get", "affine", "transform", "matrix", "for", "zooming", "/", "scaling", "that", "height", "and", "width", "are", "changed", "independently", ".", "OpenCV", "format", "x", "is", "width", "." ]
python
valid
33.925
nerdvegas/rez
src/rez/status.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/status.py#L56-L69
def parent_suite(self): """Get the current parent suite. A parent suite exists when a context within a suite is active. That is, during execution of a tool within a suite, or after a user has entered an interactive shell in a suite context, for example via the command- line syntax 'tool +i', where 'tool' is an alias in a suite. Returns: `Suite` object, or None if there is no current parent suite. """ if self.context and self.context.parent_suite_path: return Suite.load(self.context.parent_suite_path) return None
[ "def", "parent_suite", "(", "self", ")", ":", "if", "self", ".", "context", "and", "self", ".", "context", ".", "parent_suite_path", ":", "return", "Suite", ".", "load", "(", "self", ".", "context", ".", "parent_suite_path", ")", "return", "None" ]
Get the current parent suite. A parent suite exists when a context within a suite is active. That is, during execution of a tool within a suite, or after a user has entered an interactive shell in a suite context, for example via the command- line syntax 'tool +i', where 'tool' is an alias in a suite. Returns: `Suite` object, or None if there is no current parent suite.
[ "Get", "the", "current", "parent", "suite", "." ]
python
train
43
pymoca/pymoca
src/pymoca/backends/xml/model.py
https://github.com/pymoca/pymoca/blob/14b5eb7425e96689de6cc5c10f400895d586a978/src/pymoca/backends/xml/model.py#L198-L213
def blt(f: List[SYM], x: List[SYM]) -> Dict[str, Any]: """ Sort equations by dependence """ J = ca.jacobian(f, x) nblock, rowperm, colperm, rowblock, colblock, coarserow, coarsecol = J.sparsity().btf() return { 'J': J, 'nblock': nblock, 'rowperm': rowperm, 'colperm': colperm, 'rowblock': rowblock, 'colblock': colblock, 'coarserow': coarserow, 'coarsecol': coarsecol }
[ "def", "blt", "(", "f", ":", "List", "[", "SYM", "]", ",", "x", ":", "List", "[", "SYM", "]", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "J", "=", "ca", ".", "jacobian", "(", "f", ",", "x", ")", "nblock", ",", "rowperm", ",", "colperm", ",", "rowblock", ",", "colblock", ",", "coarserow", ",", "coarsecol", "=", "J", ".", "sparsity", "(", ")", ".", "btf", "(", ")", "return", "{", "'J'", ":", "J", ",", "'nblock'", ":", "nblock", ",", "'rowperm'", ":", "rowperm", ",", "'colperm'", ":", "colperm", ",", "'rowblock'", ":", "rowblock", ",", "'colblock'", ":", "colblock", ",", "'coarserow'", ":", "coarserow", ",", "'coarsecol'", ":", "coarsecol", "}" ]
Sort equations by dependence
[ "Sort", "equations", "by", "dependence" ]
python
train
27.875
senaite/senaite.jsonapi
src/senaite/jsonapi/fieldmanagers.py
https://github.com/senaite/senaite.jsonapi/blob/871959f4b1c9edbb477e9456325527ca78e13ec6/src/senaite/jsonapi/fieldmanagers.py#L303-L312
def get(self, instance, **kw): """Get the value of the field """ # Gracefully avoid programming errors in Computed fields try: return self._get(instance, **kw) except AttributeError: logger.error("Could not get the value of the computed field '{}'" .format(self.get_field_name())) return None
[ "def", "get", "(", "self", ",", "instance", ",", "*", "*", "kw", ")", ":", "# Gracefully avoid programming errors in Computed fields", "try", ":", "return", "self", ".", "_get", "(", "instance", ",", "*", "*", "kw", ")", "except", "AttributeError", ":", "logger", ".", "error", "(", "\"Could not get the value of the computed field '{}'\"", ".", "format", "(", "self", ".", "get_field_name", "(", ")", ")", ")", "return", "None" ]
Get the value of the field
[ "Get", "the", "value", "of", "the", "field" ]
python
train
38.4
Trax-air/swagger-parser
swagger_parser/swagger_parser.py
https://github.com/Trax-air/swagger-parser/blob/d97f962a417e76320c59c33dcb223e4373e516d5/swagger_parser/swagger_parser.py#L679-L744
def validate_request(self, path, action, body=None, query=None): """Check if the given request is valid. Validates the body and the query # Rules to validate the BODY: # Let's limit this to mime types that either contain 'text' or 'json' # 1. if body is None, there must not be any required parameters in # the given schema # 2. if the mime type contains 'json', body must not be '', but can # be {} # 3. if the mime type contains 'text', body can be any string # 4. if no mime type ('consumes') is given.. DISALLOW # 5. if the body is empty ('' or {}), there must not be any required parameters # 6. if there is something in the body, it must adhere to the given schema # -> will call the validate body function Args: path: path of the request. action: action of the request(get, post, delete...). body: body of the request. query: dict with the query parameters. Returns: True if the request is valid, False otherwise. TODO: - For every http method, we might want to have some general checks before we go deeper into the parameters - Check form data parameters """ path_name, path_spec = self.get_path_spec(path) if path_spec is None: # reject unknown path logging.warn("there is no path") return False if action not in path_spec.keys(): # reject unknown http method logging.warn("this http method is unknown '{0}'".format(action)) return False action_spec = path_spec[action] # check general post body guidelines (body + mime type) if action == 'post': is_ok, msg = _validate_post_body(body, action_spec) if not is_ok: logging.warn("the general post body did not validate due to '{0}'".format(msg)) return False # If the body is empty and it validated so far, we can return here # unless there is something in the query parameters we need to check body_is_empty = body in [None, {}, ''] if body_is_empty and query is None: return True # Check body parameters is_ok, msg = self._validate_body_parameters(body, action_spec) if not is_ok: logging.warn("the parameters in the body did not validate due to '{0}'".format(msg)) return False # Check query parameters if query is not None and not self._validate_query_parameters(query, action_spec): return False return True
[ "def", "validate_request", "(", "self", ",", "path", ",", "action", ",", "body", "=", "None", ",", "query", "=", "None", ")", ":", "path_name", ",", "path_spec", "=", "self", ".", "get_path_spec", "(", "path", ")", "if", "path_spec", "is", "None", ":", "# reject unknown path", "logging", ".", "warn", "(", "\"there is no path\"", ")", "return", "False", "if", "action", "not", "in", "path_spec", ".", "keys", "(", ")", ":", "# reject unknown http method", "logging", ".", "warn", "(", "\"this http method is unknown '{0}'\"", ".", "format", "(", "action", ")", ")", "return", "False", "action_spec", "=", "path_spec", "[", "action", "]", "# check general post body guidelines (body + mime type)", "if", "action", "==", "'post'", ":", "is_ok", ",", "msg", "=", "_validate_post_body", "(", "body", ",", "action_spec", ")", "if", "not", "is_ok", ":", "logging", ".", "warn", "(", "\"the general post body did not validate due to '{0}'\"", ".", "format", "(", "msg", ")", ")", "return", "False", "# If the body is empty and it validated so far, we can return here", "# unless there is something in the query parameters we need to check", "body_is_empty", "=", "body", "in", "[", "None", ",", "{", "}", ",", "''", "]", "if", "body_is_empty", "and", "query", "is", "None", ":", "return", "True", "# Check body parameters", "is_ok", ",", "msg", "=", "self", ".", "_validate_body_parameters", "(", "body", ",", "action_spec", ")", "if", "not", "is_ok", ":", "logging", ".", "warn", "(", "\"the parameters in the body did not validate due to '{0}'\"", ".", "format", "(", "msg", ")", ")", "return", "False", "# Check query parameters", "if", "query", "is", "not", "None", "and", "not", "self", ".", "_validate_query_parameters", "(", "query", ",", "action_spec", ")", ":", "return", "False", "return", "True" ]
Check if the given request is valid. Validates the body and the query # Rules to validate the BODY: # Let's limit this to mime types that either contain 'text' or 'json' # 1. if body is None, there must not be any required parameters in # the given schema # 2. if the mime type contains 'json', body must not be '', but can # be {} # 3. if the mime type contains 'text', body can be any string # 4. if no mime type ('consumes') is given.. DISALLOW # 5. if the body is empty ('' or {}), there must not be any required parameters # 6. if there is something in the body, it must adhere to the given schema # -> will call the validate body function Args: path: path of the request. action: action of the request(get, post, delete...). body: body of the request. query: dict with the query parameters. Returns: True if the request is valid, False otherwise. TODO: - For every http method, we might want to have some general checks before we go deeper into the parameters - Check form data parameters
[ "Check", "if", "the", "given", "request", "is", "valid", ".", "Validates", "the", "body", "and", "the", "query" ]
python
train
40.560606
openstack/networking-cisco
networking_cisco/apps/saf/agent/iptables_driver.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/agent/iptables_driver.py#L90-L100
def _find_rule_no(self, mac): """Find rule number associated with a given mac.""" ipt_cmd = ['iptables', '-L', '--line-numbers'] cmdo = dsl.execute(ipt_cmd, self._root_helper, log_output=False) for o in cmdo.split('\n'): if mac in o.lower(): rule_no = o.split()[0] LOG.info('Found rule %(rule)s for %(mac)s.', {'rule': rule_no, 'mac': mac}) return rule_no
[ "def", "_find_rule_no", "(", "self", ",", "mac", ")", ":", "ipt_cmd", "=", "[", "'iptables'", ",", "'-L'", ",", "'--line-numbers'", "]", "cmdo", "=", "dsl", ".", "execute", "(", "ipt_cmd", ",", "self", ".", "_root_helper", ",", "log_output", "=", "False", ")", "for", "o", "in", "cmdo", ".", "split", "(", "'\\n'", ")", ":", "if", "mac", "in", "o", ".", "lower", "(", ")", ":", "rule_no", "=", "o", ".", "split", "(", ")", "[", "0", "]", "LOG", ".", "info", "(", "'Found rule %(rule)s for %(mac)s.'", ",", "{", "'rule'", ":", "rule_no", ",", "'mac'", ":", "mac", "}", ")", "return", "rule_no" ]
Find rule number associated with a given mac.
[ "Find", "rule", "number", "associated", "with", "a", "given", "mac", "." ]
python
train
42.090909
numenta/nupic
src/nupic/database/client_jobs_dao.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/database/client_jobs_dao.py#L2260-L2270
def modelsClearAll(self): """ Delete all models from the models table Parameters: ---------------------------------------------------------------- """ self._logger.info('Deleting all rows from models table %r', self.modelsTableName) with ConnectionFactory.get() as conn: query = 'DELETE FROM %s' % (self.modelsTableName) conn.cursor.execute(query)
[ "def", "modelsClearAll", "(", "self", ")", ":", "self", ".", "_logger", ".", "info", "(", "'Deleting all rows from models table %r'", ",", "self", ".", "modelsTableName", ")", "with", "ConnectionFactory", ".", "get", "(", ")", "as", "conn", ":", "query", "=", "'DELETE FROM %s'", "%", "(", "self", ".", "modelsTableName", ")", "conn", ".", "cursor", ".", "execute", "(", "query", ")" ]
Delete all models from the models table Parameters: ----------------------------------------------------------------
[ "Delete", "all", "models", "from", "the", "models", "table" ]
python
valid
36
openatx/facebook-wda
wda/__init__.py
https://github.com/openatx/facebook-wda/blob/aa644204620c6d5c7705a9c7452d8c0cc39330d5/wda/__init__.py#L229-L237
def source(self, format='xml', accessible=False): """ Args: format (str): only 'xml' and 'json' source types are supported accessible (bool): when set to true, format is always 'json' """ if accessible: return self.http.get('/wda/accessibleSource').value return self.http.get('source?format='+format).value
[ "def", "source", "(", "self", ",", "format", "=", "'xml'", ",", "accessible", "=", "False", ")", ":", "if", "accessible", ":", "return", "self", ".", "http", ".", "get", "(", "'/wda/accessibleSource'", ")", ".", "value", "return", "self", ".", "http", ".", "get", "(", "'source?format='", "+", "format", ")", ".", "value" ]
Args: format (str): only 'xml' and 'json' source types are supported accessible (bool): when set to true, format is always 'json'
[ "Args", ":", "format", "(", "str", ")", ":", "only", "xml", "and", "json", "source", "types", "are", "supported", "accessible", "(", "bool", ")", ":", "when", "set", "to", "true", "format", "is", "always", "json" ]
python
train
41.555556
mitsei/dlkit
dlkit/records/assessment/orthographic_visualization/orthographic_records.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/orthographic_visualization/orthographic_records.py#L462-L478
def _init_metadata(self): """stub""" super(LabelOrthoFacesAnswerFormRecord, self)._init_metadata() self._face_values_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'face_values'), 'element_label': 'Orthographic Face Values', 'instructions': '', 'required': True, 'read_only': False, 'linked': True, 'array': False, 'default_object_values': [{}], 'syntax': 'OBJECT', 'object_set': [] }
[ "def", "_init_metadata", "(", "self", ")", ":", "super", "(", "LabelOrthoFacesAnswerFormRecord", ",", "self", ")", ".", "_init_metadata", "(", ")", "self", ".", "_face_values_metadata", "=", "{", "'element_id'", ":", "Id", "(", "self", ".", "my_osid_object_form", ".", "_authority", ",", "self", ".", "my_osid_object_form", ".", "_namespace", ",", "'face_values'", ")", ",", "'element_label'", ":", "'Orthographic Face Values'", ",", "'instructions'", ":", "''", ",", "'required'", ":", "True", ",", "'read_only'", ":", "False", ",", "'linked'", ":", "True", ",", "'array'", ":", "False", ",", "'default_object_values'", ":", "[", "{", "}", "]", ",", "'syntax'", ":", "'OBJECT'", ",", "'object_set'", ":", "[", "]", "}" ]
stub
[ "stub" ]
python
train
37.352941
pyBookshelf/bookshelf
bookshelf/api_v2/pkg.py
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/pkg.py#L107-L121
def enable_apt_repositories(prefix, url, version, repositories): """ adds an apt repository """ with settings(hide('warnings', 'running', 'stdout'), warn_only=False, capture=True): sudo('apt-add-repository "%s %s %s %s"' % (prefix, url, version, repositories)) with hide('running', 'stdout'): output = sudo("DEBIAN_FRONTEND=noninteractive /usr/bin/apt-get update") if 'Some index files failed to download' in output: raise SystemExit(1) else: # if we didn't abort above, we should return True return True
[ "def", "enable_apt_repositories", "(", "prefix", ",", "url", ",", "version", ",", "repositories", ")", ":", "with", "settings", "(", "hide", "(", "'warnings'", ",", "'running'", ",", "'stdout'", ")", ",", "warn_only", "=", "False", ",", "capture", "=", "True", ")", ":", "sudo", "(", "'apt-add-repository \"%s %s %s %s\"'", "%", "(", "prefix", ",", "url", ",", "version", ",", "repositories", ")", ")", "with", "hide", "(", "'running'", ",", "'stdout'", ")", ":", "output", "=", "sudo", "(", "\"DEBIAN_FRONTEND=noninteractive /usr/bin/apt-get update\"", ")", "if", "'Some index files failed to download'", "in", "output", ":", "raise", "SystemExit", "(", "1", ")", "else", ":", "# if we didn't abort above, we should return True", "return", "True" ]
adds an apt repository
[ "adds", "an", "apt", "repository" ]
python
train
49.933333
vallis/libstempo
libstempo/plot.py
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/plot.py#L7-L38
def plotres(psr,deleted=False,group=None,**kwargs): """Plot residuals, compute unweighted rms residual.""" res, t, errs = psr.residuals(), psr.toas(), psr.toaerrs if (not deleted) and N.any(psr.deleted != 0): res, t, errs = res[psr.deleted == 0], t[psr.deleted == 0], errs[psr.deleted == 0] print("Plotting {0}/{1} nondeleted points.".format(len(res),psr.nobs)) meanres = math.sqrt(N.mean(res**2)) / 1e-6 if group is None: i = N.argsort(t) P.errorbar(t[i],res[i]/1e-6,yerr=errs[i],fmt='x',**kwargs) else: if (not deleted) and N.any(psr.deleted): flagmask = psr.flagvals(group)[~psr.deleted] else: flagmask = psr.flagvals(group) unique = list(set(flagmask)) for flagval in unique: f = (flagmask == flagval) flagres, flagt, flagerrs = res[f], t[f], errs[f] i = N.argsort(flagt) P.errorbar(flagt[i],flagres[i]/1e-6,yerr=flagerrs[i],fmt='x',**kwargs) P.legend(unique,numpoints=1,bbox_to_anchor=(1.1,1.1)) P.xlabel('MJD'); P.ylabel('res [us]') P.title("{0} - rms res = {1:.2f} us".format(psr.name,meanres))
[ "def", "plotres", "(", "psr", ",", "deleted", "=", "False", ",", "group", "=", "None", ",", "*", "*", "kwargs", ")", ":", "res", ",", "t", ",", "errs", "=", "psr", ".", "residuals", "(", ")", ",", "psr", ".", "toas", "(", ")", ",", "psr", ".", "toaerrs", "if", "(", "not", "deleted", ")", "and", "N", ".", "any", "(", "psr", ".", "deleted", "!=", "0", ")", ":", "res", ",", "t", ",", "errs", "=", "res", "[", "psr", ".", "deleted", "==", "0", "]", ",", "t", "[", "psr", ".", "deleted", "==", "0", "]", ",", "errs", "[", "psr", ".", "deleted", "==", "0", "]", "print", "(", "\"Plotting {0}/{1} nondeleted points.\"", ".", "format", "(", "len", "(", "res", ")", ",", "psr", ".", "nobs", ")", ")", "meanres", "=", "math", ".", "sqrt", "(", "N", ".", "mean", "(", "res", "**", "2", ")", ")", "/", "1e-6", "if", "group", "is", "None", ":", "i", "=", "N", ".", "argsort", "(", "t", ")", "P", ".", "errorbar", "(", "t", "[", "i", "]", ",", "res", "[", "i", "]", "/", "1e-6", ",", "yerr", "=", "errs", "[", "i", "]", ",", "fmt", "=", "'x'", ",", "*", "*", "kwargs", ")", "else", ":", "if", "(", "not", "deleted", ")", "and", "N", ".", "any", "(", "psr", ".", "deleted", ")", ":", "flagmask", "=", "psr", ".", "flagvals", "(", "group", ")", "[", "~", "psr", ".", "deleted", "]", "else", ":", "flagmask", "=", "psr", ".", "flagvals", "(", "group", ")", "unique", "=", "list", "(", "set", "(", "flagmask", ")", ")", "for", "flagval", "in", "unique", ":", "f", "=", "(", "flagmask", "==", "flagval", ")", "flagres", ",", "flagt", ",", "flagerrs", "=", "res", "[", "f", "]", ",", "t", "[", "f", "]", ",", "errs", "[", "f", "]", "i", "=", "N", ".", "argsort", "(", "flagt", ")", "P", ".", "errorbar", "(", "flagt", "[", "i", "]", ",", "flagres", "[", "i", "]", "/", "1e-6", ",", "yerr", "=", "flagerrs", "[", "i", "]", ",", "fmt", "=", "'x'", ",", "*", "*", "kwargs", ")", "P", ".", "legend", "(", "unique", ",", "numpoints", "=", "1", ",", "bbox_to_anchor", "=", "(", "1.1", ",", "1.1", ")", ")", "P", ".", "xlabel", "(", "'MJD'", ")", "P", ".", "ylabel", "(", "'res [us]'", ")", "P", ".", "title", "(", "\"{0} - rms res = {1:.2f} us\"", ".", "format", "(", "psr", ".", "name", ",", "meanres", ")", ")" ]
Plot residuals, compute unweighted rms residual.
[ "Plot", "residuals", "compute", "unweighted", "rms", "residual", "." ]
python
train
36.9375
pybel/pybel
src/pybel/struct/filters/node_predicates.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/filters/node_predicates.py#L177-L179
def has_activity(graph: BELGraph, node: BaseEntity) -> bool: """Return true if over any of the node's edges, it has a molecular activity.""" return _node_has_modifier(graph, node, ACTIVITY)
[ "def", "has_activity", "(", "graph", ":", "BELGraph", ",", "node", ":", "BaseEntity", ")", "->", "bool", ":", "return", "_node_has_modifier", "(", "graph", ",", "node", ",", "ACTIVITY", ")" ]
Return true if over any of the node's edges, it has a molecular activity.
[ "Return", "true", "if", "over", "any", "of", "the", "node", "s", "edges", "it", "has", "a", "molecular", "activity", "." ]
python
train
65
bxlab/bx-python
lib/bx/align/tools/thread.py
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/align/tools/thread.py#L10-L66
def thread( mafs, species ): """ Restrict an list of alignments to a given list of species by: 1) Removing components for any other species 2) Remove any columns containing all gaps Example: >>> import bx.align.maf >>> block1 = bx.align.maf.from_string( ''' ... a score=4964.0 ... s hg18.chr10 52686 44 + 135374737 GTGCTAACTTACTGCTCCACAGAAAACATCAATTCTGCTCATGC ... s rheMac2.chr20 58163346 43 - 88221753 ATATTATCTTAACATTAAAGA-AGAACAGTAATTCTGGTCATAA ... s panTro1.chrUn_random 208115356 44 - 240967748 GTGCTAACTGACTGCTCCAGAGAAAACATCAATTCTGTTCATGT ... s oryCun1.scaffold_175207 85970 22 + 212797 ----------------------AAAATATTAGTTATCACCATAT ... s bosTau2.chr23 23894492 43 + 41602928 AAACTACCTTAATGTCACAGG-AAACAATGTATgctgctgctgc ... ''' ) >>> block2 = bx.align.maf.from_string( ''' ... a score=9151.0 ... s hg18.chr10 52730 69 + 135374737 GCAGGTACAATTCATCAAGAAAG-GAATTACAACTTCAGAAATGTGTTCAAAATATATCCATACTT-TGAC ... s oryCun1.scaffold_175207 85992 71 + 212797 TCTAGTGCTCTCCAATAATATAATAGATTATAACTTCATATAATTATGTGAAATATAAGATTATTTATCAG ... s panTro1.chrUn_random 208115400 69 - 240967748 GCAGCTACTATTCATCAAGAAAG-GGATTACAACTTCAGAAATGTGTTCAAAGTGTATCCATACTT-TGAT ... s rheMac2.chr20 58163389 69 - 88221753 ACACATATTATTTCTTAACATGGAGGATTATATCTT-AAACATGTGTGCaaaatataaatatatat-tcaa ... ''' ) >>> mafs = [ block1, block2 ] >>> threaded = [ t for t in thread( mafs, [ "hg18", "panTro1" ] ) ] >>> len( threaded ) 2 >>> print(threaded[0]) a score=0.0 s hg18.chr10 52686 44 + 135374737 GTGCTAACTTACTGCTCCACAGAAAACATCAATTCTGCTCATGC s panTro1.chrUn_random 208115356 44 - 240967748 GTGCTAACTGACTGCTCCAGAGAAAACATCAATTCTGTTCATGT <BLANKLINE> >>> print(threaded[1]) a score=0.0 s hg18.chr10 52730 69 + 135374737 GCAGGTACAATTCATCAAGAAAGGAATTACAACTTCAGAAATGTGTTCAAAATATATCCATACTTTGAC s panTro1.chrUn_random 208115400 69 - 240967748 GCAGCTACTATTCATCAAGAAAGGGATTACAACTTCAGAAATGTGTTCAAAGTGTATCCATACTTTGAT <BLANKLINE> """ for m in mafs: new_maf = deepcopy( m ) new_components = get_components_for_species( new_maf, species ) if new_components: remove_all_gap_columns( new_components ) new_maf.components = new_components new_maf.score = 0.0 new_maf.text_size = len(new_components[0].text) yield new_maf
[ "def", "thread", "(", "mafs", ",", "species", ")", ":", "for", "m", "in", "mafs", ":", "new_maf", "=", "deepcopy", "(", "m", ")", "new_components", "=", "get_components_for_species", "(", "new_maf", ",", "species", ")", "if", "new_components", ":", "remove_all_gap_columns", "(", "new_components", ")", "new_maf", ".", "components", "=", "new_components", "new_maf", ".", "score", "=", "0.0", "new_maf", ".", "text_size", "=", "len", "(", "new_components", "[", "0", "]", ".", "text", ")", "yield", "new_maf" ]
Restrict an list of alignments to a given list of species by: 1) Removing components for any other species 2) Remove any columns containing all gaps Example: >>> import bx.align.maf >>> block1 = bx.align.maf.from_string( ''' ... a score=4964.0 ... s hg18.chr10 52686 44 + 135374737 GTGCTAACTTACTGCTCCACAGAAAACATCAATTCTGCTCATGC ... s rheMac2.chr20 58163346 43 - 88221753 ATATTATCTTAACATTAAAGA-AGAACAGTAATTCTGGTCATAA ... s panTro1.chrUn_random 208115356 44 - 240967748 GTGCTAACTGACTGCTCCAGAGAAAACATCAATTCTGTTCATGT ... s oryCun1.scaffold_175207 85970 22 + 212797 ----------------------AAAATATTAGTTATCACCATAT ... s bosTau2.chr23 23894492 43 + 41602928 AAACTACCTTAATGTCACAGG-AAACAATGTATgctgctgctgc ... ''' ) >>> block2 = bx.align.maf.from_string( ''' ... a score=9151.0 ... s hg18.chr10 52730 69 + 135374737 GCAGGTACAATTCATCAAGAAAG-GAATTACAACTTCAGAAATGTGTTCAAAATATATCCATACTT-TGAC ... s oryCun1.scaffold_175207 85992 71 + 212797 TCTAGTGCTCTCCAATAATATAATAGATTATAACTTCATATAATTATGTGAAATATAAGATTATTTATCAG ... s panTro1.chrUn_random 208115400 69 - 240967748 GCAGCTACTATTCATCAAGAAAG-GGATTACAACTTCAGAAATGTGTTCAAAGTGTATCCATACTT-TGAT ... s rheMac2.chr20 58163389 69 - 88221753 ACACATATTATTTCTTAACATGGAGGATTATATCTT-AAACATGTGTGCaaaatataaatatatat-tcaa ... ''' ) >>> mafs = [ block1, block2 ] >>> threaded = [ t for t in thread( mafs, [ "hg18", "panTro1" ] ) ] >>> len( threaded ) 2 >>> print(threaded[0]) a score=0.0 s hg18.chr10 52686 44 + 135374737 GTGCTAACTTACTGCTCCACAGAAAACATCAATTCTGCTCATGC s panTro1.chrUn_random 208115356 44 - 240967748 GTGCTAACTGACTGCTCCAGAGAAAACATCAATTCTGTTCATGT <BLANKLINE> >>> print(threaded[1]) a score=0.0 s hg18.chr10 52730 69 + 135374737 GCAGGTACAATTCATCAAGAAAGGAATTACAACTTCAGAAATGTGTTCAAAATATATCCATACTTTGAC s panTro1.chrUn_random 208115400 69 - 240967748 GCAGCTACTATTCATCAAGAAAGGGATTACAACTTCAGAAATGTGTTCAAAGTGTATCCATACTTTGAT <BLANKLINE>
[ "Restrict", "an", "list", "of", "alignments", "to", "a", "given", "list", "of", "species", "by", ":", "1", ")", "Removing", "components", "for", "any", "other", "species", "2", ")", "Remove", "any", "columns", "containing", "all", "gaps", "Example", ":", ">>>", "import", "bx", ".", "align", ".", "maf", ">>>", "block1", "=", "bx", ".", "align", ".", "maf", ".", "from_string", "(", "...", "a", "score", "=", "4964", ".", "0", "...", "s", "hg18", ".", "chr10", "52686", "44", "+", "135374737", "GTGCTAACTTACTGCTCCACAGAAAACATCAATTCTGCTCATGC", "...", "s", "rheMac2", ".", "chr20", "58163346", "43", "-", "88221753", "ATATTATCTTAACATTAAAGA", "-", "AGAACAGTAATTCTGGTCATAA", "...", "s", "panTro1", ".", "chrUn_random", "208115356", "44", "-", "240967748", "GTGCTAACTGACTGCTCCAGAGAAAACATCAATTCTGTTCATGT", "...", "s", "oryCun1", ".", "scaffold_175207", "85970", "22", "+", "212797", "----------------------", "AAAATATTAGTTATCACCATAT", "...", "s", "bosTau2", ".", "chr23", "23894492", "43", "+", "41602928", "AAACTACCTTAATGTCACAGG", "-", "AAACAATGTATgctgctgctgc", "...", ")", ">>>", "block2", "=", "bx", ".", "align", ".", "maf", ".", "from_string", "(", "...", "a", "score", "=", "9151", ".", "0", "...", "s", "hg18", ".", "chr10", "52730", "69", "+", "135374737", "GCAGGTACAATTCATCAAGAAAG", "-", "GAATTACAACTTCAGAAATGTGTTCAAAATATATCCATACTT", "-", "TGAC", "...", "s", "oryCun1", ".", "scaffold_175207", "85992", "71", "+", "212797", "TCTAGTGCTCTCCAATAATATAATAGATTATAACTTCATATAATTATGTGAAATATAAGATTATTTATCAG", "...", "s", "panTro1", ".", "chrUn_random", "208115400", "69", "-", "240967748", "GCAGCTACTATTCATCAAGAAAG", "-", "GGATTACAACTTCAGAAATGTGTTCAAAGTGTATCCATACTT", "-", "TGAT", "...", "s", "rheMac2", ".", "chr20", "58163389", "69", "-", "88221753", "ACACATATTATTTCTTAACATGGAGGATTATATCTT", "-", "AAACATGTGTGCaaaatataaatatatat", "-", "tcaa", "...", ")", ">>>", "mafs", "=", "[", "block1", "block2", "]", ">>>", "threaded", "=", "[", "t", "for", "t", "in", "thread", "(", "mafs", "[", "hg18", "panTro1", "]", ")", "]", ">>>", "len", "(", "threaded", ")", "2", ">>>", "print", "(", "threaded", "[", "0", "]", ")", "a", "score", "=", "0", ".", "0", "s", "hg18", ".", "chr10", "52686", "44", "+", "135374737", "GTGCTAACTTACTGCTCCACAGAAAACATCAATTCTGCTCATGC", "s", "panTro1", ".", "chrUn_random", "208115356", "44", "-", "240967748", "GTGCTAACTGACTGCTCCAGAGAAAACATCAATTCTGTTCATGT", "<BLANKLINE", ">" ]
python
train
44.087719
apache/incubator-heron
heron/instance/src/python/utils/topology/topology_context_impl.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/instance/src/python/utils/topology/topology_context_impl.py#L110-L116
def get_component_tasks(self, component_id): """Returns the task ids allocated for the given component id""" ret = [] for task_id, comp_id in self.task_to_component_map.items(): if comp_id == component_id: ret.append(task_id) return ret
[ "def", "get_component_tasks", "(", "self", ",", "component_id", ")", ":", "ret", "=", "[", "]", "for", "task_id", ",", "comp_id", "in", "self", ".", "task_to_component_map", ".", "items", "(", ")", ":", "if", "comp_id", "==", "component_id", ":", "ret", ".", "append", "(", "task_id", ")", "return", "ret" ]
Returns the task ids allocated for the given component id
[ "Returns", "the", "task", "ids", "allocated", "for", "the", "given", "component", "id" ]
python
valid
37.142857
mrcagney/gtfstk
gtfstk/validators.py
https://github.com/mrcagney/gtfstk/blob/c91494e6fefc02523889655a0dc92d1c0eee8d03/gtfstk/validators.py#L1226-L1360
def check_stop_times( feed: "Feed", *, as_df: bool = False, include_warnings: bool = False ) -> List: """ Analog of :func:`check_agency` for ``feed.stop_times``. """ table = "stop_times" problems = [] # Preliminary checks if feed.stop_times is None: problems.append(["error", "Missing table", table, []]) else: f = feed.stop_times.copy().sort_values(["trip_id", "stop_sequence"]) problems = check_for_required_columns(problems, table, f) if problems: return format_problems(problems, as_df=as_df) if include_warnings: problems = check_for_invalid_columns(problems, table, f) # Check trip_id problems = check_column_linked_id( problems, table, f, "trip_id", feed.trips ) # Check arrival_time and departure_time v = lambda x: pd.isnull(x) or valid_time(x) for col in ["arrival_time", "departure_time"]: problems = check_column(problems, table, f, col, v) # Check that arrival and departure times exist for the first and last # stop of each trip and for each timepoint. # For feeds with many trips, iterating through the stop time rows is # faster than uisg groupby. if "timepoint" not in f.columns: f["timepoint"] = np.nan # This will not mess up later timepoint check indices = [] prev_tid = None prev_atime = 1 prev_dtime = 1 for i, tid, atime, dtime, tp in f[ ["trip_id", "arrival_time", "departure_time", "timepoint"] ].itertuples(): if tid != prev_tid: # Check last stop of previous trip if pd.isnull(prev_atime) or pd.isnull(prev_dtime): indices.append(i - 1) # Check first stop of current trip if pd.isnull(atime) or pd.isnull(dtime): indices.append(i) elif tp == 1 and (pd.isnull(atime) or pd.isnull(dtime)): # Failure at timepoint indices.append(i) prev_tid = tid prev_atime = atime prev_dtime = dtime if indices: problems.append( [ "error", "First/last/time point arrival/departure time missing", table, indices, ] ) # Check stop_id problems = check_column_linked_id( problems, table, f, "stop_id", feed.stops ) # Check for duplicated (trip_id, stop_sequence) pairs cond = f[["trip_id", "stop_sequence"]].dropna().duplicated() problems = check_table( problems, table, f, cond, "Repeated pair (trip_id, stop_sequence)" ) # Check stop_headsign problems = check_column( problems, table, f, "stop_headsign", valid_str, column_required=False ) # Check pickup_type and drop_off_type for col in ["pickup_type", "drop_off_type"]: v = lambda x: x in range(4) problems = check_column( problems, table, f, col, v, column_required=False ) # Check if shape_dist_traveled decreases on a trip if "shape_dist_traveled" in f.columns: g = f.dropna(subset=["shape_dist_traveled"]) indices = [] prev_tid = None prev_dist = -1 for i, tid, dist in g[["trip_id", "shape_dist_traveled"]].itertuples(): if tid == prev_tid and dist < prev_dist: indices.append(i) prev_tid = tid prev_dist = dist if indices: problems.append( [ "error", "shape_dist_traveled decreases on a trip", table, indices, ] ) # Check timepoint v = lambda x: x in range(2) problems = check_column( problems, table, f, "timepoint", v, column_required=False ) if include_warnings: # Check for duplicated (trip_id, departure_time) pairs cond = f[["trip_id", "departure_time"]].duplicated() problems = check_table( problems, table, f, cond, "Repeated pair (trip_id, departure_time)", "warning", ) return format_problems(problems, as_df=as_df)
[ "def", "check_stop_times", "(", "feed", ":", "\"Feed\"", ",", "*", ",", "as_df", ":", "bool", "=", "False", ",", "include_warnings", ":", "bool", "=", "False", ")", "->", "List", ":", "table", "=", "\"stop_times\"", "problems", "=", "[", "]", "# Preliminary checks", "if", "feed", ".", "stop_times", "is", "None", ":", "problems", ".", "append", "(", "[", "\"error\"", ",", "\"Missing table\"", ",", "table", ",", "[", "]", "]", ")", "else", ":", "f", "=", "feed", ".", "stop_times", ".", "copy", "(", ")", ".", "sort_values", "(", "[", "\"trip_id\"", ",", "\"stop_sequence\"", "]", ")", "problems", "=", "check_for_required_columns", "(", "problems", ",", "table", ",", "f", ")", "if", "problems", ":", "return", "format_problems", "(", "problems", ",", "as_df", "=", "as_df", ")", "if", "include_warnings", ":", "problems", "=", "check_for_invalid_columns", "(", "problems", ",", "table", ",", "f", ")", "# Check trip_id", "problems", "=", "check_column_linked_id", "(", "problems", ",", "table", ",", "f", ",", "\"trip_id\"", ",", "feed", ".", "trips", ")", "# Check arrival_time and departure_time", "v", "=", "lambda", "x", ":", "pd", ".", "isnull", "(", "x", ")", "or", "valid_time", "(", "x", ")", "for", "col", "in", "[", "\"arrival_time\"", ",", "\"departure_time\"", "]", ":", "problems", "=", "check_column", "(", "problems", ",", "table", ",", "f", ",", "col", ",", "v", ")", "# Check that arrival and departure times exist for the first and last", "# stop of each trip and for each timepoint.", "# For feeds with many trips, iterating through the stop time rows is", "# faster than uisg groupby.", "if", "\"timepoint\"", "not", "in", "f", ".", "columns", ":", "f", "[", "\"timepoint\"", "]", "=", "np", ".", "nan", "# This will not mess up later timepoint check", "indices", "=", "[", "]", "prev_tid", "=", "None", "prev_atime", "=", "1", "prev_dtime", "=", "1", "for", "i", ",", "tid", ",", "atime", ",", "dtime", ",", "tp", "in", "f", "[", "[", "\"trip_id\"", ",", "\"arrival_time\"", ",", "\"departure_time\"", ",", "\"timepoint\"", "]", "]", ".", "itertuples", "(", ")", ":", "if", "tid", "!=", "prev_tid", ":", "# Check last stop of previous trip", "if", "pd", ".", "isnull", "(", "prev_atime", ")", "or", "pd", ".", "isnull", "(", "prev_dtime", ")", ":", "indices", ".", "append", "(", "i", "-", "1", ")", "# Check first stop of current trip", "if", "pd", ".", "isnull", "(", "atime", ")", "or", "pd", ".", "isnull", "(", "dtime", ")", ":", "indices", ".", "append", "(", "i", ")", "elif", "tp", "==", "1", "and", "(", "pd", ".", "isnull", "(", "atime", ")", "or", "pd", ".", "isnull", "(", "dtime", ")", ")", ":", "# Failure at timepoint", "indices", ".", "append", "(", "i", ")", "prev_tid", "=", "tid", "prev_atime", "=", "atime", "prev_dtime", "=", "dtime", "if", "indices", ":", "problems", ".", "append", "(", "[", "\"error\"", ",", "\"First/last/time point arrival/departure time missing\"", ",", "table", ",", "indices", ",", "]", ")", "# Check stop_id", "problems", "=", "check_column_linked_id", "(", "problems", ",", "table", ",", "f", ",", "\"stop_id\"", ",", "feed", ".", "stops", ")", "# Check for duplicated (trip_id, stop_sequence) pairs", "cond", "=", "f", "[", "[", "\"trip_id\"", ",", "\"stop_sequence\"", "]", "]", ".", "dropna", "(", ")", ".", "duplicated", "(", ")", "problems", "=", "check_table", "(", "problems", ",", "table", ",", "f", ",", "cond", ",", "\"Repeated pair (trip_id, stop_sequence)\"", ")", "# Check stop_headsign", "problems", "=", "check_column", "(", "problems", ",", "table", ",", "f", ",", "\"stop_headsign\"", ",", "valid_str", ",", "column_required", "=", "False", ")", "# Check pickup_type and drop_off_type", "for", "col", "in", "[", "\"pickup_type\"", ",", "\"drop_off_type\"", "]", ":", "v", "=", "lambda", "x", ":", "x", "in", "range", "(", "4", ")", "problems", "=", "check_column", "(", "problems", ",", "table", ",", "f", ",", "col", ",", "v", ",", "column_required", "=", "False", ")", "# Check if shape_dist_traveled decreases on a trip", "if", "\"shape_dist_traveled\"", "in", "f", ".", "columns", ":", "g", "=", "f", ".", "dropna", "(", "subset", "=", "[", "\"shape_dist_traveled\"", "]", ")", "indices", "=", "[", "]", "prev_tid", "=", "None", "prev_dist", "=", "-", "1", "for", "i", ",", "tid", ",", "dist", "in", "g", "[", "[", "\"trip_id\"", ",", "\"shape_dist_traveled\"", "]", "]", ".", "itertuples", "(", ")", ":", "if", "tid", "==", "prev_tid", "and", "dist", "<", "prev_dist", ":", "indices", ".", "append", "(", "i", ")", "prev_tid", "=", "tid", "prev_dist", "=", "dist", "if", "indices", ":", "problems", ".", "append", "(", "[", "\"error\"", ",", "\"shape_dist_traveled decreases on a trip\"", ",", "table", ",", "indices", ",", "]", ")", "# Check timepoint", "v", "=", "lambda", "x", ":", "x", "in", "range", "(", "2", ")", "problems", "=", "check_column", "(", "problems", ",", "table", ",", "f", ",", "\"timepoint\"", ",", "v", ",", "column_required", "=", "False", ")", "if", "include_warnings", ":", "# Check for duplicated (trip_id, departure_time) pairs", "cond", "=", "f", "[", "[", "\"trip_id\"", ",", "\"departure_time\"", "]", "]", ".", "duplicated", "(", ")", "problems", "=", "check_table", "(", "problems", ",", "table", ",", "f", ",", "cond", ",", "\"Repeated pair (trip_id, departure_time)\"", ",", "\"warning\"", ",", ")", "return", "format_problems", "(", "problems", ",", "as_df", "=", "as_df", ")" ]
Analog of :func:`check_agency` for ``feed.stop_times``.
[ "Analog", "of", ":", "func", ":", "check_agency", "for", "feed", ".", "stop_times", "." ]
python
train
30.37037
splunk/splunk-sdk-python
examples/analytics/bottle.py
https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/examples/analytics/bottle.py#L546-L599
def route(self, path=None, method='GET', callback=None, name=None, apply=None, skip=None, **config): """ A decorator to bind a function to a request URL. Example:: @app.route('/hello/:name') def hello(name): return 'Hello %s' % name The ``:name`` part is a wildcard. See :class:`Router` for syntax details. :param path: Request path or a list of paths to listen to. If no path is specified, it is automatically generated from the signature of the function. :param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of methods to listen to. (default: `GET`) :param callback: An optional shortcut to avoid the decorator syntax. ``route(..., callback=func)`` equals ``route(...)(func)`` :param name: The name for this route. (default: None) :param apply: A decorator or plugin or a list of plugins. These are applied to the route callback in addition to installed plugins. :param skip: A list of plugins, plugin classes or names. Matching plugins are not installed to this route. ``True`` skips all. Any additional keyword arguments are stored as route-specific configuration and passed to plugins (see :meth:`Plugin.apply`). """ if callable(path): path, callback = None, path plugins = makelist(apply) skiplist = makelist(skip) if 'decorate' in config: depr("The 'decorate' parameter was renamed to 'apply'") # 0.9 plugins += makelist(config.pop('decorate')) if config.pop('no_hooks', False): depr("The no_hooks parameter is no longer used. Add 'hooks' to the"\ " list of skipped plugins instead.") # 0.9 skiplist.append('hooks') static = config.get('static', False) # depr 0.9 def decorator(callback): for rule in makelist(path) or yieldroutes(callback): for verb in makelist(method): verb = verb.upper() cfg = dict(rule=rule, method=verb, callback=callback, name=name, app=self, config=config, apply=plugins, skip=skiplist) self.routes.append(cfg) cfg['id'] = self.routes.index(cfg) self.router.add(rule, verb, cfg['id'], name=name, static=static) if DEBUG: self.ccache[cfg['id']] = self._build_callback(cfg) return callback return decorator(callback) if callback else decorator
[ "def", "route", "(", "self", ",", "path", "=", "None", ",", "method", "=", "'GET'", ",", "callback", "=", "None", ",", "name", "=", "None", ",", "apply", "=", "None", ",", "skip", "=", "None", ",", "*", "*", "config", ")", ":", "if", "callable", "(", "path", ")", ":", "path", ",", "callback", "=", "None", ",", "path", "plugins", "=", "makelist", "(", "apply", ")", "skiplist", "=", "makelist", "(", "skip", ")", "if", "'decorate'", "in", "config", ":", "depr", "(", "\"The 'decorate' parameter was renamed to 'apply'\"", ")", "# 0.9", "plugins", "+=", "makelist", "(", "config", ".", "pop", "(", "'decorate'", ")", ")", "if", "config", ".", "pop", "(", "'no_hooks'", ",", "False", ")", ":", "depr", "(", "\"The no_hooks parameter is no longer used. Add 'hooks' to the\"", "\" list of skipped plugins instead.\"", ")", "# 0.9", "skiplist", ".", "append", "(", "'hooks'", ")", "static", "=", "config", ".", "get", "(", "'static'", ",", "False", ")", "# depr 0.9", "def", "decorator", "(", "callback", ")", ":", "for", "rule", "in", "makelist", "(", "path", ")", "or", "yieldroutes", "(", "callback", ")", ":", "for", "verb", "in", "makelist", "(", "method", ")", ":", "verb", "=", "verb", ".", "upper", "(", ")", "cfg", "=", "dict", "(", "rule", "=", "rule", ",", "method", "=", "verb", ",", "callback", "=", "callback", ",", "name", "=", "name", ",", "app", "=", "self", ",", "config", "=", "config", ",", "apply", "=", "plugins", ",", "skip", "=", "skiplist", ")", "self", ".", "routes", ".", "append", "(", "cfg", ")", "cfg", "[", "'id'", "]", "=", "self", ".", "routes", ".", "index", "(", "cfg", ")", "self", ".", "router", ".", "add", "(", "rule", ",", "verb", ",", "cfg", "[", "'id'", "]", ",", "name", "=", "name", ",", "static", "=", "static", ")", "if", "DEBUG", ":", "self", ".", "ccache", "[", "cfg", "[", "'id'", "]", "]", "=", "self", ".", "_build_callback", "(", "cfg", ")", "return", "callback", "return", "decorator", "(", "callback", ")", "if", "callback", "else", "decorator" ]
A decorator to bind a function to a request URL. Example:: @app.route('/hello/:name') def hello(name): return 'Hello %s' % name The ``:name`` part is a wildcard. See :class:`Router` for syntax details. :param path: Request path or a list of paths to listen to. If no path is specified, it is automatically generated from the signature of the function. :param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of methods to listen to. (default: `GET`) :param callback: An optional shortcut to avoid the decorator syntax. ``route(..., callback=func)`` equals ``route(...)(func)`` :param name: The name for this route. (default: None) :param apply: A decorator or plugin or a list of plugins. These are applied to the route callback in addition to installed plugins. :param skip: A list of plugins, plugin classes or names. Matching plugins are not installed to this route. ``True`` skips all. Any additional keyword arguments are stored as route-specific configuration and passed to plugins (see :meth:`Plugin.apply`).
[ "A", "decorator", "to", "bind", "a", "function", "to", "a", "request", "URL", ".", "Example", "::" ]
python
train
49.555556
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/frontend/qt/console/rich_ipython_widget.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/qt/console/rich_ipython_widget.py#L199-L205
def _get_image(self, name): """ Returns the QImage stored as the ImageResource with 'name'. """ document = self._control.document() image = document.resource(QtGui.QTextDocument.ImageResource, QtCore.QUrl(name)) return image
[ "def", "_get_image", "(", "self", ",", "name", ")", ":", "document", "=", "self", ".", "_control", ".", "document", "(", ")", "image", "=", "document", ".", "resource", "(", "QtGui", ".", "QTextDocument", ".", "ImageResource", ",", "QtCore", ".", "QUrl", "(", "name", ")", ")", "return", "image" ]
Returns the QImage stored as the ImageResource with 'name'.
[ "Returns", "the", "QImage", "stored", "as", "the", "ImageResource", "with", "name", "." ]
python
test
41.714286
etcher-be/emiz
emiz/weather/noaa/__init__.py
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/weather/noaa/__init__.py#L32-L48
def retrieve_metar(station_icao) -> typing.Tuple[typing.Optional[str], typing.Optional[str]]: """ Retrieves a METAR string from an online database Args: station_icao: ICAO of the station Returns: tuple of error, metar_str """ url = _BASE_METAR_URL.format(station=station_icao) with requests.get(url) as resp: if not resp.ok: return f'unable to obtain METAR for station {station_icao}\n' \ f'Got to "http://tgftp.nws.noaa.gov/data/observations/metar/stations" ' \ f'for a list of valid stations', None return None, resp.content.decode().split('\n')[1]
[ "def", "retrieve_metar", "(", "station_icao", ")", "->", "typing", ".", "Tuple", "[", "typing", ".", "Optional", "[", "str", "]", ",", "typing", ".", "Optional", "[", "str", "]", "]", ":", "url", "=", "_BASE_METAR_URL", ".", "format", "(", "station", "=", "station_icao", ")", "with", "requests", ".", "get", "(", "url", ")", "as", "resp", ":", "if", "not", "resp", ".", "ok", ":", "return", "f'unable to obtain METAR for station {station_icao}\\n'", "f'Got to \"http://tgftp.nws.noaa.gov/data/observations/metar/stations\" '", "f'for a list of valid stations'", ",", "None", "return", "None", ",", "resp", ".", "content", ".", "decode", "(", ")", ".", "split", "(", "'\\n'", ")", "[", "1", "]" ]
Retrieves a METAR string from an online database Args: station_icao: ICAO of the station Returns: tuple of error, metar_str
[ "Retrieves", "a", "METAR", "string", "from", "an", "online", "database" ]
python
train
38
sdispater/orator
orator/schema/blueprint.py
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/schema/blueprint.py#L740-L755
def _add_command(self, name, **parameters): """ Add a new command to the blueprint. :param name: The command name :type name: str :param parameters: The command parameters :type parameters: dict :rtype: Fluent """ command = self._create_command(name, **parameters) self._commands.append(command) return command
[ "def", "_add_command", "(", "self", ",", "name", ",", "*", "*", "parameters", ")", ":", "command", "=", "self", ".", "_create_command", "(", "name", ",", "*", "*", "parameters", ")", "self", ".", "_commands", ".", "append", "(", "command", ")", "return", "command" ]
Add a new command to the blueprint. :param name: The command name :type name: str :param parameters: The command parameters :type parameters: dict :rtype: Fluent
[ "Add", "a", "new", "command", "to", "the", "blueprint", "." ]
python
train
24.1875
ctuning/ck
ck/kernel.py
https://github.com/ctuning/ck/blob/7e009814e975f8742790d3106340088a46223714/ck/kernel.py#L4958-L5001
def browser(i): """ Input: { (template) - use this web template (repo_uoa) - (module_uoa) - (data_uoa) - view a given entry (extra_url) - extra URL } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 } """ # Check if ck-web is installed r=find({'module_uoa':'module', 'data_uoa':'wfe'}) if r['return']>0: if r['return']!=16: return r out('Seems like ck-web repository is not installed (can\'t find wfe module)!') out('Please, install it via "ck pull repo:ck-web" and try again!') return {'return':0} t=i.get('template','') ruoa=i.get('repo_uoa','') muoa=i.get('module_uoa','') duoa=i.get('data_uoa','') cid='' if duoa!='' or muoa!='' or ruoa!='': if ruoa!='': cid=ruoa+':' if muoa!='': cid+=muoa+':' if duoa!='': cid+=duoa # Starting web service and asking to open page return access({'action':'start', 'module_uoa':'web', 'browser':'yes', 'template':t, 'cid':cid, 'extra_url':i.get('extra_url','')})
[ "def", "browser", "(", "i", ")", ":", "# Check if ck-web is installed", "r", "=", "find", "(", "{", "'module_uoa'", ":", "'module'", ",", "'data_uoa'", ":", "'wfe'", "}", ")", "if", "r", "[", "'return'", "]", ">", "0", ":", "if", "r", "[", "'return'", "]", "!=", "16", ":", "return", "r", "out", "(", "'Seems like ck-web repository is not installed (can\\'t find wfe module)!'", ")", "out", "(", "'Please, install it via \"ck pull repo:ck-web\" and try again!'", ")", "return", "{", "'return'", ":", "0", "}", "t", "=", "i", ".", "get", "(", "'template'", ",", "''", ")", "ruoa", "=", "i", ".", "get", "(", "'repo_uoa'", ",", "''", ")", "muoa", "=", "i", ".", "get", "(", "'module_uoa'", ",", "''", ")", "duoa", "=", "i", ".", "get", "(", "'data_uoa'", ",", "''", ")", "cid", "=", "''", "if", "duoa", "!=", "''", "or", "muoa", "!=", "''", "or", "ruoa", "!=", "''", ":", "if", "ruoa", "!=", "''", ":", "cid", "=", "ruoa", "+", "':'", "if", "muoa", "!=", "''", ":", "cid", "+=", "muoa", "+", "':'", "if", "duoa", "!=", "''", ":", "cid", "+=", "duoa", "# Starting web service and asking to open page", "return", "access", "(", "{", "'action'", ":", "'start'", ",", "'module_uoa'", ":", "'web'", ",", "'browser'", ":", "'yes'", ",", "'template'", ":", "t", ",", "'cid'", ":", "cid", ",", "'extra_url'", ":", "i", ".", "get", "(", "'extra_url'", ",", "''", ")", "}", ")" ]
Input: { (template) - use this web template (repo_uoa) - (module_uoa) - (data_uoa) - view a given entry (extra_url) - extra URL } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 }
[ "Input", ":", "{", "(", "template", ")", "-", "use", "this", "web", "template", "(", "repo_uoa", ")", "-", "(", "module_uoa", ")", "-", "(", "data_uoa", ")", "-", "view", "a", "given", "entry", "(", "extra_url", ")", "-", "extra", "URL", "}" ]
python
train
28.227273
choderalab/pymbar
examples/parallel-tempering-2dpmf/parallel-tempering-2dpmf.py
https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/examples/parallel-tempering-2dpmf/parallel-tempering-2dpmf.py#L58-L76
def read_file(filename): """Read contents of the specified file. Parameters: ----------- filename : str The name of the file to be read Returns: lines : list of str The contents of the file, split by line """ infile = open(filename, 'r') lines = infile.readlines() infile.close() return lines
[ "def", "read_file", "(", "filename", ")", ":", "infile", "=", "open", "(", "filename", ",", "'r'", ")", "lines", "=", "infile", ".", "readlines", "(", ")", "infile", ".", "close", "(", ")", "return", "lines" ]
Read contents of the specified file. Parameters: ----------- filename : str The name of the file to be read Returns: lines : list of str The contents of the file, split by line
[ "Read", "contents", "of", "the", "specified", "file", "." ]
python
train
17.052632