repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
OzymandiasTheGreat/python-libinput
libinput/event.py
https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/event.py#L600-L623
def seat_slot(self): """The seat slot of the touch event. A seat slot is a non-negative seat wide unique identifier of an active touch point. Events from single touch devices will be represented as one individual touch point per device. For events not of type :attr:`~libinput.constant.EventType.TOUCH_DOWN`, :attr:`~libinput.constant.EventType.TOUCH_UP`, :attr:`~libinput.constant.EventType.TOUCH_MOTION` or :attr:`~libinput.constant.EventType.TOUCH_CANCEL`, this property raises :exc:`AssertionError`. Returns: int: The seat slot of the touch event. Raises: AssertionError """ if self.type == EventType.TOUCH_FRAME: raise AttributeError(_wrong_prop.format(self.type)) return self._libinput.libinput_event_touch_get_seat_slot(self._handle)
[ "def", "seat_slot", "(", "self", ")", ":", "if", "self", ".", "type", "==", "EventType", ".", "TOUCH_FRAME", ":", "raise", "AttributeError", "(", "_wrong_prop", ".", "format", "(", "self", ".", "type", ")", ")", "return", "self", ".", "_libinput", ".", "libinput_event_touch_get_seat_slot", "(", "self", ".", "_handle", ")" ]
The seat slot of the touch event. A seat slot is a non-negative seat wide unique identifier of an active touch point. Events from single touch devices will be represented as one individual touch point per device. For events not of type :attr:`~libinput.constant.EventType.TOUCH_DOWN`, :attr:`~libinput.constant.EventType.TOUCH_UP`, :attr:`~libinput.constant.EventType.TOUCH_MOTION` or :attr:`~libinput.constant.EventType.TOUCH_CANCEL`, this property raises :exc:`AssertionError`. Returns: int: The seat slot of the touch event. Raises: AssertionError
[ "The", "seat", "slot", "of", "the", "touch", "event", "." ]
python
train
florianholzapfel/panasonic-viera
panasonic_viera/__init__.py
https://github.com/florianholzapfel/panasonic-viera/blob/bf912ff6eb03b59e3dde30b994a0fb1d883eb873/panasonic_viera/__init__.py#L107-L135
def soap_request(self, url, urn, action, params, body_elem="m"): """Send a SOAP request to the TV.""" soap_body = ( '<?xml version="1.0" encoding="utf-8"?>' '<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"' ' s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">' '<s:Body>' '<{body_elem}:{action} xmlns:{body_elem}="urn:{urn}">' '{params}' '</{body_elem}:{action}>' '</s:Body>' '</s:Envelope>' ).format(action=action, urn=urn, params=params, body_elem=body_elem).encode('utf-8') headers = { 'Host': '{}:{}'.format(self._host, self._port), 'Content-Length': len(soap_body), 'Content-Type': 'text/xml; charset=utf-8"', 'SOAPAction': '"urn:{}#{}"'.format(urn, action), } url = 'http://{}:{}/{}'.format(self._host, self._port, url) _LOGGER.debug("Sending to %s:\n%s\n%s", url, headers, soap_body) req = Request(url, soap_body, headers) res = urlopen(req, timeout=5).read() _LOGGER.debug("Response: %s", res) return res
[ "def", "soap_request", "(", "self", ",", "url", ",", "urn", ",", "action", ",", "params", ",", "body_elem", "=", "\"m\"", ")", ":", "soap_body", "=", "(", "'<?xml version=\"1.0\" encoding=\"utf-8\"?>'", "'<s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\"'", "' s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">'", "'<s:Body>'", "'<{body_elem}:{action} xmlns:{body_elem}=\"urn:{urn}\">'", "'{params}'", "'</{body_elem}:{action}>'", "'</s:Body>'", "'</s:Envelope>'", ")", ".", "format", "(", "action", "=", "action", ",", "urn", "=", "urn", ",", "params", "=", "params", ",", "body_elem", "=", "body_elem", ")", ".", "encode", "(", "'utf-8'", ")", "headers", "=", "{", "'Host'", ":", "'{}:{}'", ".", "format", "(", "self", ".", "_host", ",", "self", ".", "_port", ")", ",", "'Content-Length'", ":", "len", "(", "soap_body", ")", ",", "'Content-Type'", ":", "'text/xml; charset=utf-8\"'", ",", "'SOAPAction'", ":", "'\"urn:{}#{}\"'", ".", "format", "(", "urn", ",", "action", ")", ",", "}", "url", "=", "'http://{}:{}/{}'", ".", "format", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "url", ")", "_LOGGER", ".", "debug", "(", "\"Sending to %s:\\n%s\\n%s\"", ",", "url", ",", "headers", ",", "soap_body", ")", "req", "=", "Request", "(", "url", ",", "soap_body", ",", "headers", ")", "res", "=", "urlopen", "(", "req", ",", "timeout", "=", "5", ")", ".", "read", "(", ")", "_LOGGER", ".", "debug", "(", "\"Response: %s\"", ",", "res", ")", "return", "res" ]
Send a SOAP request to the TV.
[ "Send", "a", "SOAP", "request", "to", "the", "TV", "." ]
python
train
ellmetha/django-machina
machina/apps/forum_permission/shortcuts.py
https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_permission/shortcuts.py#L43-L55
def remove_perm(perm, user_or_group, forum=None): """ Remove a permission to a user (anonymous or not) or a group. """ user, group = get_identity(user_or_group) perm = ForumPermission.objects.get(codename=perm) if user: UserForumPermission.objects.filter( forum=forum, permission=perm, user=user if not user.is_anonymous else None, anonymous_user=user.is_anonymous, ).delete() if group: GroupForumPermission.objects.filter(forum=forum, permission=perm, group=group).delete()
[ "def", "remove_perm", "(", "perm", ",", "user_or_group", ",", "forum", "=", "None", ")", ":", "user", ",", "group", "=", "get_identity", "(", "user_or_group", ")", "perm", "=", "ForumPermission", ".", "objects", ".", "get", "(", "codename", "=", "perm", ")", "if", "user", ":", "UserForumPermission", ".", "objects", ".", "filter", "(", "forum", "=", "forum", ",", "permission", "=", "perm", ",", "user", "=", "user", "if", "not", "user", ".", "is_anonymous", "else", "None", ",", "anonymous_user", "=", "user", ".", "is_anonymous", ",", ")", ".", "delete", "(", ")", "if", "group", ":", "GroupForumPermission", ".", "objects", ".", "filter", "(", "forum", "=", "forum", ",", "permission", "=", "perm", ",", "group", "=", "group", ")", ".", "delete", "(", ")" ]
Remove a permission to a user (anonymous or not) or a group.
[ "Remove", "a", "permission", "to", "a", "user", "(", "anonymous", "or", "not", ")", "or", "a", "group", "." ]
python
train
scikit-umfpack/scikit-umfpack
scikits/umfpack/umfpack.py
https://github.com/scikit-umfpack/scikit-umfpack/blob/a2102ef92f4dd060138e72bb5d7c444f8ec49cbc/scikits/umfpack/umfpack.py#L615-L620
def free_symbolic(self): """Free symbolic data""" if self._symbolic is not None: self.funs.free_symbolic(self._symbolic) self._symbolic = None self.mtx = None
[ "def", "free_symbolic", "(", "self", ")", ":", "if", "self", ".", "_symbolic", "is", "not", "None", ":", "self", ".", "funs", ".", "free_symbolic", "(", "self", ".", "_symbolic", ")", "self", ".", "_symbolic", "=", "None", "self", ".", "mtx", "=", "None" ]
Free symbolic data
[ "Free", "symbolic", "data" ]
python
train
benoitkugler/abstractDataLibrary
pyDLib/Core/formats.py
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/formats.py#L204-L210
def in_date(objet, pattern): """ abstractSearch dans une date datetime.date""" if objet: pattern = re.sub(" ", '', pattern) objet_str = abstractRender.date(objet) return bool(re.search(pattern, objet_str)) return False
[ "def", "in_date", "(", "objet", ",", "pattern", ")", ":", "if", "objet", ":", "pattern", "=", "re", ".", "sub", "(", "\" \"", ",", "''", ",", "pattern", ")", "objet_str", "=", "abstractRender", ".", "date", "(", "objet", ")", "return", "bool", "(", "re", ".", "search", "(", "pattern", ",", "objet_str", ")", ")", "return", "False" ]
abstractSearch dans une date datetime.date
[ "abstractSearch", "dans", "une", "date", "datetime", ".", "date" ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/tfvc/tfvc_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/tfvc/tfvc_client.py#L54-L81
def get_branches(self, project=None, include_parent=None, include_children=None, include_deleted=None, include_links=None): """GetBranches. Get a collection of branch roots -- first-level children, branches with no parents. :param str project: Project ID or project name :param bool include_parent: Return the parent branch, if there is one. Default: False :param bool include_children: Return the child branches for each root branch. Default: False :param bool include_deleted: Return deleted branches. Default: False :param bool include_links: Return links. Default: False :rtype: [TfvcBranch] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if include_parent is not None: query_parameters['includeParent'] = self._serialize.query('include_parent', include_parent, 'bool') if include_children is not None: query_parameters['includeChildren'] = self._serialize.query('include_children', include_children, 'bool') if include_deleted is not None: query_parameters['includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool') if include_links is not None: query_parameters['includeLinks'] = self._serialize.query('include_links', include_links, 'bool') response = self._send(http_method='GET', location_id='bc1f417e-239d-42e7-85e1-76e80cb2d6eb', version='5.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TfvcBranch]', self._unwrap_collection(response))
[ "def", "get_branches", "(", "self", ",", "project", "=", "None", ",", "include_parent", "=", "None", ",", "include_children", "=", "None", ",", "include_deleted", "=", "None", ",", "include_links", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'str'", ")", "query_parameters", "=", "{", "}", "if", "include_parent", "is", "not", "None", ":", "query_parameters", "[", "'includeParent'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'include_parent'", ",", "include_parent", ",", "'bool'", ")", "if", "include_children", "is", "not", "None", ":", "query_parameters", "[", "'includeChildren'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'include_children'", ",", "include_children", ",", "'bool'", ")", "if", "include_deleted", "is", "not", "None", ":", "query_parameters", "[", "'includeDeleted'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'include_deleted'", ",", "include_deleted", ",", "'bool'", ")", "if", "include_links", "is", "not", "None", ":", "query_parameters", "[", "'includeLinks'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'include_links'", ",", "include_links", ",", "'bool'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'bc1f417e-239d-42e7-85e1-76e80cb2d6eb'", ",", "version", "=", "'5.0'", ",", "route_values", "=", "route_values", ",", "query_parameters", "=", "query_parameters", ")", "return", "self", ".", "_deserialize", "(", "'[TfvcBranch]'", ",", "self", ".", "_unwrap_collection", "(", "response", ")", ")" ]
GetBranches. Get a collection of branch roots -- first-level children, branches with no parents. :param str project: Project ID or project name :param bool include_parent: Return the parent branch, if there is one. Default: False :param bool include_children: Return the child branches for each root branch. Default: False :param bool include_deleted: Return deleted branches. Default: False :param bool include_links: Return links. Default: False :rtype: [TfvcBranch]
[ "GetBranches", ".", "Get", "a", "collection", "of", "branch", "roots", "--", "first", "-", "level", "children", "branches", "with", "no", "parents", ".", ":", "param", "str", "project", ":", "Project", "ID", "or", "project", "name", ":", "param", "bool", "include_parent", ":", "Return", "the", "parent", "branch", "if", "there", "is", "one", ".", "Default", ":", "False", ":", "param", "bool", "include_children", ":", "Return", "the", "child", "branches", "for", "each", "root", "branch", ".", "Default", ":", "False", ":", "param", "bool", "include_deleted", ":", "Return", "deleted", "branches", ".", "Default", ":", "False", ":", "param", "bool", "include_links", ":", "Return", "links", ".", "Default", ":", "False", ":", "rtype", ":", "[", "TfvcBranch", "]" ]
python
train
heuer/cablemap
cablemap.core/cablemap/core/reader.py
https://github.com/heuer/cablemap/blob/42066c8fc2972d237a2c35578e14525aaf705f38/cablemap.core/cablemap/core/reader.py#L302-L315
def parse_transmission_id(header, reference_id=None): """\ Returns the transmission ID of the cable. If no transmission identifier was found, ``None`` is returned. `header` The cable's header `reference_id` The cable's reference ID. """ m = _TID_PATTERN.search(header) if not m: return None return m.group(1)
[ "def", "parse_transmission_id", "(", "header", ",", "reference_id", "=", "None", ")", ":", "m", "=", "_TID_PATTERN", ".", "search", "(", "header", ")", "if", "not", "m", ":", "return", "None", "return", "m", ".", "group", "(", "1", ")" ]
\ Returns the transmission ID of the cable. If no transmission identifier was found, ``None`` is returned. `header` The cable's header `reference_id` The cable's reference ID.
[ "\\", "Returns", "the", "transmission", "ID", "of", "the", "cable", ".", "If", "no", "transmission", "identifier", "was", "found", "None", "is", "returned", "." ]
python
train
Locu/chronology
pykronos/pykronos/client.py
https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/pykronos/pykronos/client.py#L296-L310
def get_streams(self, namespace=None): """ Queries the Kronos server and fetches a list of streams available to be read. """ request_dict = {} namespace = namespace or self.namespace if namespace is not None: request_dict['namespace'] = namespace response = self._make_request(self._streams_url, data=request_dict, stream=True) for line in response.iter_lines(): if line: yield line
[ "def", "get_streams", "(", "self", ",", "namespace", "=", "None", ")", ":", "request_dict", "=", "{", "}", "namespace", "=", "namespace", "or", "self", ".", "namespace", "if", "namespace", "is", "not", "None", ":", "request_dict", "[", "'namespace'", "]", "=", "namespace", "response", "=", "self", ".", "_make_request", "(", "self", ".", "_streams_url", ",", "data", "=", "request_dict", ",", "stream", "=", "True", ")", "for", "line", "in", "response", ".", "iter_lines", "(", ")", ":", "if", "line", ":", "yield", "line" ]
Queries the Kronos server and fetches a list of streams available to be read.
[ "Queries", "the", "Kronos", "server", "and", "fetches", "a", "list", "of", "streams", "available", "to", "be", "read", "." ]
python
train
spyder-ide/spyder
spyder/plugins/console/widgets/shell.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/console/widgets/shell.py#L283-L287
def on_new_line(self): """On new input line""" self.set_cursor_position('eof') self.current_prompt_pos = self.get_position('cursor') self.new_input_line = False
[ "def", "on_new_line", "(", "self", ")", ":", "self", ".", "set_cursor_position", "(", "'eof'", ")", "self", ".", "current_prompt_pos", "=", "self", ".", "get_position", "(", "'cursor'", ")", "self", ".", "new_input_line", "=", "False" ]
On new input line
[ "On", "new", "input", "line" ]
python
train
funkybob/antfarm
antfarm/request.py
https://github.com/funkybob/antfarm/blob/40a7cc450eba09a280b7bc8f7c68a807b0177c62/antfarm/request.py#L32-L37
def cookies(self): '''Simplified Cookie access''' return { key: self.raw_cookies[key].value for key in self.raw_cookies.keys() }
[ "def", "cookies", "(", "self", ")", ":", "return", "{", "key", ":", "self", ".", "raw_cookies", "[", "key", "]", ".", "value", "for", "key", "in", "self", ".", "raw_cookies", ".", "keys", "(", ")", "}" ]
Simplified Cookie access
[ "Simplified", "Cookie", "access" ]
python
train
crodjer/paster
paster/services.py
https://github.com/crodjer/paster/blob/0cd7230074850ba74e80c740a8bc2502645dd743/paster/services.py#L282-L305
def get_api_user_key(self, api_dev_key, username=None, password=None): ''' Get api user key to enable posts from user accounts if username and password available. Not getting an api_user_key means that the posts will be "guest" posts ''' username = username or get_config('pastebin', 'api_user_name') password = password or get_config('pastebin', 'api_user_password') if username and password: data = { 'api_user_name': username, 'api_user_password': password, 'api_dev_key': api_dev_key, } urlencoded_data = urllib.urlencode(data) req = urllib2.Request('http://pastebin.com/api/api_login.php', urlencoded_data) response = urllib2.urlopen(req) user_key = response.read() logging.debug("User key: %s" % user_key) return user_key else: logging.info("Pastebin: not using any user key") return ""
[ "def", "get_api_user_key", "(", "self", ",", "api_dev_key", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "username", "=", "username", "or", "get_config", "(", "'pastebin'", ",", "'api_user_name'", ")", "password", "=", "password", "or", "get_config", "(", "'pastebin'", ",", "'api_user_password'", ")", "if", "username", "and", "password", ":", "data", "=", "{", "'api_user_name'", ":", "username", ",", "'api_user_password'", ":", "password", ",", "'api_dev_key'", ":", "api_dev_key", ",", "}", "urlencoded_data", "=", "urllib", ".", "urlencode", "(", "data", ")", "req", "=", "urllib2", ".", "Request", "(", "'http://pastebin.com/api/api_login.php'", ",", "urlencoded_data", ")", "response", "=", "urllib2", ".", "urlopen", "(", "req", ")", "user_key", "=", "response", ".", "read", "(", ")", "logging", ".", "debug", "(", "\"User key: %s\"", "%", "user_key", ")", "return", "user_key", "else", ":", "logging", ".", "info", "(", "\"Pastebin: not using any user key\"", ")", "return", "\"\"" ]
Get api user key to enable posts from user accounts if username and password available. Not getting an api_user_key means that the posts will be "guest" posts
[ "Get", "api", "user", "key", "to", "enable", "posts", "from", "user", "accounts", "if", "username", "and", "password", "available", ".", "Not", "getting", "an", "api_user_key", "means", "that", "the", "posts", "will", "be", "guest", "posts" ]
python
train
CxAalto/gtfspy
gtfspy/routing/connection_scan.py
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/routing/connection_scan.py#L92-L103
def _scan_footpaths(self, stop_id, walk_departure_time): """ Scan the footpaths originating from stop_id Parameters ---------- stop_id: int """ for _, neighbor, data in self._walk_network.edges_iter(nbunch=[stop_id], data=True): d_walk = data["d_walk"] arrival_time = walk_departure_time + d_walk / self._walk_speed self._update_stop_label(neighbor, arrival_time)
[ "def", "_scan_footpaths", "(", "self", ",", "stop_id", ",", "walk_departure_time", ")", ":", "for", "_", ",", "neighbor", ",", "data", "in", "self", ".", "_walk_network", ".", "edges_iter", "(", "nbunch", "=", "[", "stop_id", "]", ",", "data", "=", "True", ")", ":", "d_walk", "=", "data", "[", "\"d_walk\"", "]", "arrival_time", "=", "walk_departure_time", "+", "d_walk", "/", "self", ".", "_walk_speed", "self", ".", "_update_stop_label", "(", "neighbor", ",", "arrival_time", ")" ]
Scan the footpaths originating from stop_id Parameters ---------- stop_id: int
[ "Scan", "the", "footpaths", "originating", "from", "stop_id" ]
python
valid
F483/btctxstore
btctxstore/api.py
https://github.com/F483/btctxstore/blob/5790ace3a3d4c9bcc759e7c931fc4a57d40b6c25/btctxstore/api.py#L177-L180
def sign_unicode(self, wif, message): """Signing <unicode> with <wif> private key.""" hexdata = binascii.hexlify(message.encode("utf-8")) return self.sign_data(wif, hexdata)
[ "def", "sign_unicode", "(", "self", ",", "wif", ",", "message", ")", ":", "hexdata", "=", "binascii", ".", "hexlify", "(", "message", ".", "encode", "(", "\"utf-8\"", ")", ")", "return", "self", ".", "sign_data", "(", "wif", ",", "hexdata", ")" ]
Signing <unicode> with <wif> private key.
[ "Signing", "<unicode", ">", "with", "<wif", ">", "private", "key", "." ]
python
train
Hackerfleet/hfos
modules/maps/hfos/map/TileTools.py
https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/modules/maps/hfos/map/TileTools.py#L16-L21
def clipValue(self, value, minValue, maxValue): ''' Makes sure that value is within a specific range. If not, then the lower or upper bounds is returned ''' return min(max(value, minValue), maxValue)
[ "def", "clipValue", "(", "self", ",", "value", ",", "minValue", ",", "maxValue", ")", ":", "return", "min", "(", "max", "(", "value", ",", "minValue", ")", ",", "maxValue", ")" ]
Makes sure that value is within a specific range. If not, then the lower or upper bounds is returned
[ "Makes", "sure", "that", "value", "is", "within", "a", "specific", "range", ".", "If", "not", "then", "the", "lower", "or", "upper", "bounds", "is", "returned" ]
python
train
paulovn/sparql-kernel
sparqlkernel/kernel.py
https://github.com/paulovn/sparql-kernel/blob/1d2d155ff5da72070cb2a98fae33ea8113fac782/sparqlkernel/kernel.py#L106-L133
def _send(self, data, msg_type='ok', silent=False): """ Send a response to the frontend and return an execute message @param data: response to send @param msg_type (str): message type: 'ok', 'raw', 'error', 'multi' @param silent (bool): suppress output @return (dict): the return value for the kernel """ # Data to send back if data is not None: # log the message try: self._klog.debug(u"msg to frontend (%d): %.160s...", silent, data) except Exception as e: self._klog.warn(u"can't log response: %s", e) # send it to the frontend if not silent: if msg_type != 'raw': data = data_msg(data, mtype=msg_type) self.send_response(self.iopub_socket, 'display_data', data) # Result message return {'status': 'error' if msg_type == 'error' else 'ok', # The base class will increment the execution count 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {} }
[ "def", "_send", "(", "self", ",", "data", ",", "msg_type", "=", "'ok'", ",", "silent", "=", "False", ")", ":", "# Data to send back", "if", "data", "is", "not", "None", ":", "# log the message", "try", ":", "self", ".", "_klog", ".", "debug", "(", "u\"msg to frontend (%d): %.160s...\"", ",", "silent", ",", "data", ")", "except", "Exception", "as", "e", ":", "self", ".", "_klog", ".", "warn", "(", "u\"can't log response: %s\"", ",", "e", ")", "# send it to the frontend", "if", "not", "silent", ":", "if", "msg_type", "!=", "'raw'", ":", "data", "=", "data_msg", "(", "data", ",", "mtype", "=", "msg_type", ")", "self", ".", "send_response", "(", "self", ".", "iopub_socket", ",", "'display_data'", ",", "data", ")", "# Result message", "return", "{", "'status'", ":", "'error'", "if", "msg_type", "==", "'error'", "else", "'ok'", ",", "# The base class will increment the execution count", "'execution_count'", ":", "self", ".", "execution_count", ",", "'payload'", ":", "[", "]", ",", "'user_expressions'", ":", "{", "}", "}" ]
Send a response to the frontend and return an execute message @param data: response to send @param msg_type (str): message type: 'ok', 'raw', 'error', 'multi' @param silent (bool): suppress output @return (dict): the return value for the kernel
[ "Send", "a", "response", "to", "the", "frontend", "and", "return", "an", "execute", "message" ]
python
train
istresearch/scrapy-cluster
rest/rest_service.py
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L50-L71
def error_catch(f): """Handle unexpected errors within the rest function.""" @wraps(f) def wrapper(*args, **kw): instance = args[0] try: result = f(*args, **kw) if isinstance(result, tuple): return jsonify(result[0]), result[1] else: return jsonify(result), 200 except Exception as e: ret_dict = instance._create_ret_object(instance.FAILURE, None, True, instance.UNKNOWN_ERROR) log_dict = deepcopy(ret_dict) log_dict['error']['cause'] = e.message log_dict['error']['exception'] = str(e) log_dict['error']['ex'] = traceback.format_exc() instance.logger.error("Uncaught Exception Thrown", log_dict) return jsonify(ret_dict), 500 return wrapper
[ "def", "error_catch", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "instance", "=", "args", "[", "0", "]", "try", ":", "result", "=", "f", "(", "*", "args", ",", "*", "*", "kw", ")", "if", "isinstance", "(", "result", ",", "tuple", ")", ":", "return", "jsonify", "(", "result", "[", "0", "]", ")", ",", "result", "[", "1", "]", "else", ":", "return", "jsonify", "(", "result", ")", ",", "200", "except", "Exception", "as", "e", ":", "ret_dict", "=", "instance", ".", "_create_ret_object", "(", "instance", ".", "FAILURE", ",", "None", ",", "True", ",", "instance", ".", "UNKNOWN_ERROR", ")", "log_dict", "=", "deepcopy", "(", "ret_dict", ")", "log_dict", "[", "'error'", "]", "[", "'cause'", "]", "=", "e", ".", "message", "log_dict", "[", "'error'", "]", "[", "'exception'", "]", "=", "str", "(", "e", ")", "log_dict", "[", "'error'", "]", "[", "'ex'", "]", "=", "traceback", ".", "format_exc", "(", ")", "instance", ".", "logger", ".", "error", "(", "\"Uncaught Exception Thrown\"", ",", "log_dict", ")", "return", "jsonify", "(", "ret_dict", ")", ",", "500", "return", "wrapper" ]
Handle unexpected errors within the rest function.
[ "Handle", "unexpected", "errors", "within", "the", "rest", "function", "." ]
python
train
BerkeleyAutomation/perception
perception/weight_sensor.py
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/weight_sensor.py#L35-L41
def start(self): """Start the sensor. """ if rospy.get_name() == '/unnamed': raise ValueError('Weight sensor must be run inside a ros node!') self._weight_subscriber = rospy.Subscriber('weight_sensor/weights', Float32MultiArray, self._weights_callback) self._running = True
[ "def", "start", "(", "self", ")", ":", "if", "rospy", ".", "get_name", "(", ")", "==", "'/unnamed'", ":", "raise", "ValueError", "(", "'Weight sensor must be run inside a ros node!'", ")", "self", ".", "_weight_subscriber", "=", "rospy", ".", "Subscriber", "(", "'weight_sensor/weights'", ",", "Float32MultiArray", ",", "self", ".", "_weights_callback", ")", "self", ".", "_running", "=", "True" ]
Start the sensor.
[ "Start", "the", "sensor", "." ]
python
train
PyCQA/pylint
pylint/message/message_store.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/message/message_store.py#L46-L53
def register_messages_from_checker(self, checker): """Register all messages from a checker. :param BaseChecker checker: """ checker.check_consistency() for message in checker.messages: self.register_message(message)
[ "def", "register_messages_from_checker", "(", "self", ",", "checker", ")", ":", "checker", ".", "check_consistency", "(", ")", "for", "message", "in", "checker", ".", "messages", ":", "self", ".", "register_message", "(", "message", ")" ]
Register all messages from a checker. :param BaseChecker checker:
[ "Register", "all", "messages", "from", "a", "checker", "." ]
python
test
hydraplatform/hydra-base
hydra_base/lib/rules.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/rules.py#L32-L38
def get_rules(scenario_id, **kwargs): """ Get all the rules for a given scenario. """ rules = db.DBSession.query(Rule).filter(Rule.scenario_id==scenario_id, Rule.status=='A').all() return rules
[ "def", "get_rules", "(", "scenario_id", ",", "*", "*", "kwargs", ")", ":", "rules", "=", "db", ".", "DBSession", ".", "query", "(", "Rule", ")", ".", "filter", "(", "Rule", ".", "scenario_id", "==", "scenario_id", ",", "Rule", ".", "status", "==", "'A'", ")", ".", "all", "(", ")", "return", "rules" ]
Get all the rules for a given scenario.
[ "Get", "all", "the", "rules", "for", "a", "given", "scenario", "." ]
python
train
IntelPython/mkl_fft
mkl_fft/_numpy_fft.py
https://github.com/IntelPython/mkl_fft/blob/54b3271d64666f9af9f11418b4ca43d69054eb94/mkl_fft/_numpy_fft.py#L499-L555
def ihfft(a, n=None, axis=-1, norm=None): """ Compute the inverse FFT of a signal which has Hermitian symmetry. Parameters ---------- a : array_like Input array. n : int, optional Length of the inverse FFT. Number of points along transformation axis in the input to use. If `n` is smaller than the length of the input, the input is cropped. If it is larger, the input is padded with zeros. If `n` is not given, the length of the input along the axis specified by `axis` is used. axis : int, optional Axis over which to compute the inverse FFT. If not given, the last axis is used. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : complex ndarray The truncated or zero-padded input, transformed along the axis indicated by `axis`, or the last one if `axis` is not specified. If `n` is even, the length of the transformed axis is ``(n/2)+1``. If `n` is odd, the length is ``(n+1)/2``. See also -------- hfft, irfft Notes ----- `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the opposite case: here the signal has Hermitian symmetry in the time domain and is real in the frequency domain. So here it's `hfft` for which you must supply the length of the result if it is to be odd: ``ihfft(hfft(a), len(a)) == a``, within numerical accuracy. Examples -------- >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4]) >>> np.fft.ifft(spectrum) array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j]) >>> np.fft.ihfft(spectrum) array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) """ # The copy may be required for multithreading. a = array(a, copy=True, dtype=float) if n is None: n = a.shape[axis] unitary = _unitary(norm) output = conjugate(rfft(a, n, axis)) return output * (1 / (sqrt(n) if unitary else n))
[ "def", "ihfft", "(", "a", ",", "n", "=", "None", ",", "axis", "=", "-", "1", ",", "norm", "=", "None", ")", ":", "# The copy may be required for multithreading.", "a", "=", "array", "(", "a", ",", "copy", "=", "True", ",", "dtype", "=", "float", ")", "if", "n", "is", "None", ":", "n", "=", "a", ".", "shape", "[", "axis", "]", "unitary", "=", "_unitary", "(", "norm", ")", "output", "=", "conjugate", "(", "rfft", "(", "a", ",", "n", ",", "axis", ")", ")", "return", "output", "*", "(", "1", "/", "(", "sqrt", "(", "n", ")", "if", "unitary", "else", "n", ")", ")" ]
Compute the inverse FFT of a signal which has Hermitian symmetry. Parameters ---------- a : array_like Input array. n : int, optional Length of the inverse FFT. Number of points along transformation axis in the input to use. If `n` is smaller than the length of the input, the input is cropped. If it is larger, the input is padded with zeros. If `n` is not given, the length of the input along the axis specified by `axis` is used. axis : int, optional Axis over which to compute the inverse FFT. If not given, the last axis is used. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : complex ndarray The truncated or zero-padded input, transformed along the axis indicated by `axis`, or the last one if `axis` is not specified. If `n` is even, the length of the transformed axis is ``(n/2)+1``. If `n` is odd, the length is ``(n+1)/2``. See also -------- hfft, irfft Notes ----- `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the opposite case: here the signal has Hermitian symmetry in the time domain and is real in the frequency domain. So here it's `hfft` for which you must supply the length of the result if it is to be odd: ``ihfft(hfft(a), len(a)) == a``, within numerical accuracy. Examples -------- >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4]) >>> np.fft.ifft(spectrum) array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j]) >>> np.fft.ihfft(spectrum) array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
[ "Compute", "the", "inverse", "FFT", "of", "a", "signal", "which", "has", "Hermitian", "symmetry", "." ]
python
train
Datary/scrapbag
scrapbag/strings.py
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/strings.py#L24-L34
def clean_markdown(text): """ Parse markdown sintaxt to html. """ result = text if isinstance(text, str): result = ''.join( BeautifulSoup(markdown(text), 'lxml').findAll(text=True)) return result
[ "def", "clean_markdown", "(", "text", ")", ":", "result", "=", "text", "if", "isinstance", "(", "text", ",", "str", ")", ":", "result", "=", "''", ".", "join", "(", "BeautifulSoup", "(", "markdown", "(", "text", ")", ",", "'lxml'", ")", ".", "findAll", "(", "text", "=", "True", ")", ")", "return", "result" ]
Parse markdown sintaxt to html.
[ "Parse", "markdown", "sintaxt", "to", "html", "." ]
python
train
agermanidis/autosub
autosub/__init__.py
https://github.com/agermanidis/autosub/blob/d32389cb76e63ec6959111c3f989a72f36f726fe/autosub/__init__.py#L353-L410
def main(): """ Run autosub as a command-line program. """ parser = argparse.ArgumentParser() parser.add_argument('source_path', help="Path to the video or audio file to subtitle", nargs='?') parser.add_argument('-C', '--concurrency', help="Number of concurrent API requests to make", type=int, default=DEFAULT_CONCURRENCY) parser.add_argument('-o', '--output', help="Output path for subtitles (by default, subtitles are saved in \ the same directory and name as the source path)") parser.add_argument('-F', '--format', help="Destination subtitle format", default=DEFAULT_SUBTITLE_FORMAT) parser.add_argument('-S', '--src-language', help="Language spoken in source file", default=DEFAULT_SRC_LANGUAGE) parser.add_argument('-D', '--dst-language', help="Desired language for the subtitles", default=DEFAULT_DST_LANGUAGE) parser.add_argument('-K', '--api-key', help="The Google Translate API key to be used. \ (Required for subtitle translation)") parser.add_argument('--list-formats', help="List all available subtitle formats", action='store_true') parser.add_argument('--list-languages', help="List all available source/destination languages", action='store_true') args = parser.parse_args() if args.list_formats: print("List of formats:") for subtitle_format in FORMATTERS: print("{format}".format(format=subtitle_format)) return 0 if args.list_languages: print("List of all languages:") for code, language in sorted(LANGUAGE_CODES.items()): print("{code}\t{language}".format(code=code, language=language)) return 0 if not validate(args): return 1 try: subtitle_file_path = generate_subtitles( source_path=args.source_path, concurrency=args.concurrency, src_language=args.src_language, dst_language=args.dst_language, api_key=args.api_key, subtitle_file_format=args.format, output=args.output, ) print("Subtitles file created at {}".format(subtitle_file_path)) except KeyboardInterrupt: return 1 return 0
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "parser", ".", "add_argument", "(", "'source_path'", ",", "help", "=", "\"Path to the video or audio file to subtitle\"", ",", "nargs", "=", "'?'", ")", "parser", ".", "add_argument", "(", "'-C'", ",", "'--concurrency'", ",", "help", "=", "\"Number of concurrent API requests to make\"", ",", "type", "=", "int", ",", "default", "=", "DEFAULT_CONCURRENCY", ")", "parser", ".", "add_argument", "(", "'-o'", ",", "'--output'", ",", "help", "=", "\"Output path for subtitles (by default, subtitles are saved in \\\n the same directory and name as the source path)\"", ")", "parser", ".", "add_argument", "(", "'-F'", ",", "'--format'", ",", "help", "=", "\"Destination subtitle format\"", ",", "default", "=", "DEFAULT_SUBTITLE_FORMAT", ")", "parser", ".", "add_argument", "(", "'-S'", ",", "'--src-language'", ",", "help", "=", "\"Language spoken in source file\"", ",", "default", "=", "DEFAULT_SRC_LANGUAGE", ")", "parser", ".", "add_argument", "(", "'-D'", ",", "'--dst-language'", ",", "help", "=", "\"Desired language for the subtitles\"", ",", "default", "=", "DEFAULT_DST_LANGUAGE", ")", "parser", ".", "add_argument", "(", "'-K'", ",", "'--api-key'", ",", "help", "=", "\"The Google Translate API key to be used. \\\n (Required for subtitle translation)\"", ")", "parser", ".", "add_argument", "(", "'--list-formats'", ",", "help", "=", "\"List all available subtitle formats\"", ",", "action", "=", "'store_true'", ")", "parser", ".", "add_argument", "(", "'--list-languages'", ",", "help", "=", "\"List all available source/destination languages\"", ",", "action", "=", "'store_true'", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "if", "args", ".", "list_formats", ":", "print", "(", "\"List of formats:\"", ")", "for", "subtitle_format", "in", "FORMATTERS", ":", "print", "(", "\"{format}\"", ".", "format", "(", "format", "=", "subtitle_format", ")", ")", "return", "0", "if", "args", ".", "list_languages", ":", "print", "(", "\"List of all languages:\"", ")", "for", "code", ",", "language", "in", "sorted", "(", "LANGUAGE_CODES", ".", "items", "(", ")", ")", ":", "print", "(", "\"{code}\\t{language}\"", ".", "format", "(", "code", "=", "code", ",", "language", "=", "language", ")", ")", "return", "0", "if", "not", "validate", "(", "args", ")", ":", "return", "1", "try", ":", "subtitle_file_path", "=", "generate_subtitles", "(", "source_path", "=", "args", ".", "source_path", ",", "concurrency", "=", "args", ".", "concurrency", ",", "src_language", "=", "args", ".", "src_language", ",", "dst_language", "=", "args", ".", "dst_language", ",", "api_key", "=", "args", ".", "api_key", ",", "subtitle_file_format", "=", "args", ".", "format", ",", "output", "=", "args", ".", "output", ",", ")", "print", "(", "\"Subtitles file created at {}\"", ".", "format", "(", "subtitle_file_path", ")", ")", "except", "KeyboardInterrupt", ":", "return", "1", "return", "0" ]
Run autosub as a command-line program.
[ "Run", "autosub", "as", "a", "command", "-", "line", "program", "." ]
python
train
TomasTomecek/sen
sen/tui/ui.py
https://github.com/TomasTomecek/sen/blob/239b4868125814e8bf5527708119fc08b35f6cc0/sen/tui/ui.py#L131-L147
def pick_and_display_buffer(self, i): """ pick i-th buffer from list and display it :param i: int :return: None """ if len(self.buffers) == 1: # we don't need to display anything # listing is already displayed return else: try: self.display_buffer(self.buffers[i]) except IndexError: # i > len self.display_buffer(self.buffers[0])
[ "def", "pick_and_display_buffer", "(", "self", ",", "i", ")", ":", "if", "len", "(", "self", ".", "buffers", ")", "==", "1", ":", "# we don't need to display anything", "# listing is already displayed", "return", "else", ":", "try", ":", "self", ".", "display_buffer", "(", "self", ".", "buffers", "[", "i", "]", ")", "except", "IndexError", ":", "# i > len", "self", ".", "display_buffer", "(", "self", ".", "buffers", "[", "0", "]", ")" ]
pick i-th buffer from list and display it :param i: int :return: None
[ "pick", "i", "-", "th", "buffer", "from", "list", "and", "display", "it" ]
python
train
F5Networks/f5-common-python
f5/bigip/resource.py
https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5/bigip/resource.py#L321-L335
def _check_exclusive_parameters(self, **kwargs): """Check for mutually exclusive attributes in kwargs. :raises ExclusiveAttributesPresent """ if len(self._meta_data['exclusive_attributes']) > 0: attr_set = set(list(iterkeys(kwargs))) ex_set = set(self._meta_data['exclusive_attributes'][0]) common_set = sorted(attr_set.intersection(ex_set)) if len(common_set) > 1: cset = ', '.join(common_set) error = 'Mutually exclusive arguments submitted. ' \ 'The following arguments cannot be set ' \ 'together: "%s".' % cset raise ExclusiveAttributesPresent(error)
[ "def", "_check_exclusive_parameters", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "self", ".", "_meta_data", "[", "'exclusive_attributes'", "]", ")", ">", "0", ":", "attr_set", "=", "set", "(", "list", "(", "iterkeys", "(", "kwargs", ")", ")", ")", "ex_set", "=", "set", "(", "self", ".", "_meta_data", "[", "'exclusive_attributes'", "]", "[", "0", "]", ")", "common_set", "=", "sorted", "(", "attr_set", ".", "intersection", "(", "ex_set", ")", ")", "if", "len", "(", "common_set", ")", ">", "1", ":", "cset", "=", "', '", ".", "join", "(", "common_set", ")", "error", "=", "'Mutually exclusive arguments submitted. '", "'The following arguments cannot be set '", "'together: \"%s\".'", "%", "cset", "raise", "ExclusiveAttributesPresent", "(", "error", ")" ]
Check for mutually exclusive attributes in kwargs. :raises ExclusiveAttributesPresent
[ "Check", "for", "mutually", "exclusive", "attributes", "in", "kwargs", "." ]
python
train
niccokunzmann/ObservableList
ObservableList/__init__.py
https://github.com/niccokunzmann/ObservableList/blob/e5f6a93d82d2d13b248c7840ae74f98a4ba58c90/ObservableList/__init__.py#L179-L182
def _notify_add(self, slice_): """Notify about an AddChange.""" change = AddChange(self, slice_) self.notify_observers(change)
[ "def", "_notify_add", "(", "self", ",", "slice_", ")", ":", "change", "=", "AddChange", "(", "self", ",", "slice_", ")", "self", ".", "notify_observers", "(", "change", ")" ]
Notify about an AddChange.
[ "Notify", "about", "an", "AddChange", "." ]
python
train
panosl/django-currencies
currencies/templatetags/currency.py
https://github.com/panosl/django-currencies/blob/8d4c6c202ad7c4cc06263ab2c1b1f969bbe99acd/currencies/templatetags/currency.py#L47-L56
def memoize_nullary(f): """ Memoizes a function that takes no arguments. The memoization lasts only as long as we hold a reference to the returned function. """ def func(): if not hasattr(func, 'retval'): func.retval = f() return func.retval return func
[ "def", "memoize_nullary", "(", "f", ")", ":", "def", "func", "(", ")", ":", "if", "not", "hasattr", "(", "func", ",", "'retval'", ")", ":", "func", ".", "retval", "=", "f", "(", ")", "return", "func", ".", "retval", "return", "func" ]
Memoizes a function that takes no arguments. The memoization lasts only as long as we hold a reference to the returned function.
[ "Memoizes", "a", "function", "that", "takes", "no", "arguments", ".", "The", "memoization", "lasts", "only", "as", "long", "as", "we", "hold", "a", "reference", "to", "the", "returned", "function", "." ]
python
train
CodyKochmann/graphdb
graphdb/RamGraphDB.py
https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/RamGraphDB.py#L223-L235
def delete_item(self, item): ''' removes an item from the db ''' for relation, dst in self.relations_of(item, True): self.delete_relation(item, relation, dst) #print(item, relation, dst) for src, relation in self.relations_to(item, True): self.delete_relation(src, relation, item) #print(src, relation, item) h = self._item_hash(item) if item in self: #print('deleting item:', item) self.nodes[h].clear() del self.nodes[h]
[ "def", "delete_item", "(", "self", ",", "item", ")", ":", "for", "relation", ",", "dst", "in", "self", ".", "relations_of", "(", "item", ",", "True", ")", ":", "self", ".", "delete_relation", "(", "item", ",", "relation", ",", "dst", ")", "#print(item, relation, dst)", "for", "src", ",", "relation", "in", "self", ".", "relations_to", "(", "item", ",", "True", ")", ":", "self", ".", "delete_relation", "(", "src", ",", "relation", ",", "item", ")", "#print(src, relation, item)", "h", "=", "self", ".", "_item_hash", "(", "item", ")", "if", "item", "in", "self", ":", "#print('deleting item:', item)", "self", ".", "nodes", "[", "h", "]", ".", "clear", "(", ")", "del", "self", ".", "nodes", "[", "h", "]" ]
removes an item from the db
[ "removes", "an", "item", "from", "the", "db" ]
python
train
doraemonext/wechat-python-sdk
wechat_sdk/basic.py
https://github.com/doraemonext/wechat-python-sdk/blob/bf6f6f3d4a5440feb73a51937059d7feddc335a0/wechat_sdk/basic.py#L226-L235
def response_image(self, media_id): """ 将 media_id 所代表的图片组装为符合微信服务器要求的响应数据 :param media_id: 图片的 MediaID :return: 符合微信服务器要求的 XML 响应数据 """ self._check_parse() response = ImageReply(message=self.__message, media_id=media_id).render() return self._encrypt_response(response)
[ "def", "response_image", "(", "self", ",", "media_id", ")", ":", "self", ".", "_check_parse", "(", ")", "response", "=", "ImageReply", "(", "message", "=", "self", ".", "__message", ",", "media_id", "=", "media_id", ")", ".", "render", "(", ")", "return", "self", ".", "_encrypt_response", "(", "response", ")" ]
将 media_id 所代表的图片组装为符合微信服务器要求的响应数据 :param media_id: 图片的 MediaID :return: 符合微信服务器要求的 XML 响应数据
[ "将", "media_id", "所代表的图片组装为符合微信服务器要求的响应数据", ":", "param", "media_id", ":", "图片的", "MediaID", ":", "return", ":", "符合微信服务器要求的", "XML", "响应数据" ]
python
valid
Grunny/zap-cli
zapcli/zap_helper.py
https://github.com/Grunny/zap-cli/blob/d58d4850ecfc5467badfac5e5bcc841d064bd419/zapcli/zap_helper.py#L293-L307
def disable_scanners(self, scanners): """ Enable the provided scanners by group and/or IDs. """ scanner_ids = [] for scanner in scanners: if scanner in self.scanner_groups: self.disable_scanners_by_group(scanner) elif scanner.isdigit(): scanner_ids.append(scanner) else: raise ZAPError('Invalid scanner "{0}" provided. Must be a valid group or numeric ID.'.format(scanner)) if scanner_ids: self.disable_scanners_by_ids(scanner_ids)
[ "def", "disable_scanners", "(", "self", ",", "scanners", ")", ":", "scanner_ids", "=", "[", "]", "for", "scanner", "in", "scanners", ":", "if", "scanner", "in", "self", ".", "scanner_groups", ":", "self", ".", "disable_scanners_by_group", "(", "scanner", ")", "elif", "scanner", ".", "isdigit", "(", ")", ":", "scanner_ids", ".", "append", "(", "scanner", ")", "else", ":", "raise", "ZAPError", "(", "'Invalid scanner \"{0}\" provided. Must be a valid group or numeric ID.'", ".", "format", "(", "scanner", ")", ")", "if", "scanner_ids", ":", "self", ".", "disable_scanners_by_ids", "(", "scanner_ids", ")" ]
Enable the provided scanners by group and/or IDs.
[ "Enable", "the", "provided", "scanners", "by", "group", "and", "/", "or", "IDs", "." ]
python
train
ska-sa/katcp-python
katcp/inspecting_client.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/inspecting_client.py#L707-L769
def inspect_sensors(self, name=None, timeout=None): """Inspect all or one sensor on the device. Update sensors index. Parameters ---------- name : str or None, optional Name of the sensor or None to get all sensors. timeout : float or None, optional Timeout for sensors inspection, None for no timeout Returns ------- Tornado future that resolves with: changes : :class:`~katcp.core.AttrDict` AttrDict with keys ``added`` and ``removed`` (of type :class:`set`), listing the sensors that have been added or removed respectively. Modified sensors are listed in both. If there are no changes, returns ``None`` instead. Example structure: {'added': set(['sens1', 'sens2']), 'removed': set(['sens10', 'sens20'])} """ if name is None: msg = katcp.Message.request('sensor-list') else: msg = katcp.Message.request('sensor-list', name) reply, informs = yield self.katcp_client.future_request( msg, timeout=timeout) self._logger.debug('{} received {} sensor-list informs, reply: {}' .format(self.bind_address_string, len(informs), reply)) if not reply.reply_ok(): # If an unknown sensor is specified the desired result is to return # an empty list, even though the request will fail if name is None or 'Unknown sensor' not in reply.arguments[1]: raise SyncError('Error reply during sync process: {}' .format(reply)) sensors_old = set(self._sensors_index.keys()) sensors_updated = set() for msg in informs: sen_name = msg.arguments[0] sensors_updated.add(sen_name) sen = {'description': msg.arguments[1], 'units': msg.arguments[2], 'sensor_type': msg.arguments[3], 'params': msg.arguments[4:]} self._update_index(self._sensors_index, sen_name, sen) added, removed = self._difference( sensors_old, sensors_updated, name, self._sensors_index) for sensor_name in removed: if sensor_name in self._sensor_object_cache: del self._sensor_object_cache[sensor_name] if added or removed: raise Return(AttrDict(added=added, removed=removed))
[ "def", "inspect_sensors", "(", "self", ",", "name", "=", "None", ",", "timeout", "=", "None", ")", ":", "if", "name", "is", "None", ":", "msg", "=", "katcp", ".", "Message", ".", "request", "(", "'sensor-list'", ")", "else", ":", "msg", "=", "katcp", ".", "Message", ".", "request", "(", "'sensor-list'", ",", "name", ")", "reply", ",", "informs", "=", "yield", "self", ".", "katcp_client", ".", "future_request", "(", "msg", ",", "timeout", "=", "timeout", ")", "self", ".", "_logger", ".", "debug", "(", "'{} received {} sensor-list informs, reply: {}'", ".", "format", "(", "self", ".", "bind_address_string", ",", "len", "(", "informs", ")", ",", "reply", ")", ")", "if", "not", "reply", ".", "reply_ok", "(", ")", ":", "# If an unknown sensor is specified the desired result is to return", "# an empty list, even though the request will fail", "if", "name", "is", "None", "or", "'Unknown sensor'", "not", "in", "reply", ".", "arguments", "[", "1", "]", ":", "raise", "SyncError", "(", "'Error reply during sync process: {}'", ".", "format", "(", "reply", ")", ")", "sensors_old", "=", "set", "(", "self", ".", "_sensors_index", ".", "keys", "(", ")", ")", "sensors_updated", "=", "set", "(", ")", "for", "msg", "in", "informs", ":", "sen_name", "=", "msg", ".", "arguments", "[", "0", "]", "sensors_updated", ".", "add", "(", "sen_name", ")", "sen", "=", "{", "'description'", ":", "msg", ".", "arguments", "[", "1", "]", ",", "'units'", ":", "msg", ".", "arguments", "[", "2", "]", ",", "'sensor_type'", ":", "msg", ".", "arguments", "[", "3", "]", ",", "'params'", ":", "msg", ".", "arguments", "[", "4", ":", "]", "}", "self", ".", "_update_index", "(", "self", ".", "_sensors_index", ",", "sen_name", ",", "sen", ")", "added", ",", "removed", "=", "self", ".", "_difference", "(", "sensors_old", ",", "sensors_updated", ",", "name", ",", "self", ".", "_sensors_index", ")", "for", "sensor_name", "in", "removed", ":", "if", "sensor_name", "in", "self", ".", "_sensor_object_cache", ":", "del", "self", ".", "_sensor_object_cache", "[", "sensor_name", "]", "if", "added", "or", "removed", ":", "raise", "Return", "(", "AttrDict", "(", "added", "=", "added", ",", "removed", "=", "removed", ")", ")" ]
Inspect all or one sensor on the device. Update sensors index. Parameters ---------- name : str or None, optional Name of the sensor or None to get all sensors. timeout : float or None, optional Timeout for sensors inspection, None for no timeout Returns ------- Tornado future that resolves with: changes : :class:`~katcp.core.AttrDict` AttrDict with keys ``added`` and ``removed`` (of type :class:`set`), listing the sensors that have been added or removed respectively. Modified sensors are listed in both. If there are no changes, returns ``None`` instead. Example structure: {'added': set(['sens1', 'sens2']), 'removed': set(['sens10', 'sens20'])}
[ "Inspect", "all", "or", "one", "sensor", "on", "the", "device", ".", "Update", "sensors", "index", "." ]
python
train
mitsei/dlkit
dlkit/services/logging_.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/logging_.py#L424-L432
def use_plenary_log_view(self): """Pass through to provider LogEntryLogSession.use_plenary_log_view""" self._log_view = PLENARY # self._get_provider_session('log_entry_log_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_plenary_log_view() except AttributeError: pass
[ "def", "use_plenary_log_view", "(", "self", ")", ":", "self", ".", "_log_view", "=", "PLENARY", "# self._get_provider_session('log_entry_log_session') # To make sure the session is tracked", "for", "session", "in", "self", ".", "_get_provider_sessions", "(", ")", ":", "try", ":", "session", ".", "use_plenary_log_view", "(", ")", "except", "AttributeError", ":", "pass" ]
Pass through to provider LogEntryLogSession.use_plenary_log_view
[ "Pass", "through", "to", "provider", "LogEntryLogSession", ".", "use_plenary_log_view" ]
python
train
Miserlou/Zappa
zappa/core.py
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L3067-L3073
def remove_api_gateway_logs(self, project_name): """ Removed all logs that are assigned to a given rest api id. """ for rest_api in self.get_rest_apis(project_name): for stage in self.apigateway_client.get_stages(restApiId=rest_api['id'])['item']: self.remove_log_group('API-Gateway-Execution-Logs_{}/{}'.format(rest_api['id'], stage['stageName']))
[ "def", "remove_api_gateway_logs", "(", "self", ",", "project_name", ")", ":", "for", "rest_api", "in", "self", ".", "get_rest_apis", "(", "project_name", ")", ":", "for", "stage", "in", "self", ".", "apigateway_client", ".", "get_stages", "(", "restApiId", "=", "rest_api", "[", "'id'", "]", ")", "[", "'item'", "]", ":", "self", ".", "remove_log_group", "(", "'API-Gateway-Execution-Logs_{}/{}'", ".", "format", "(", "rest_api", "[", "'id'", "]", ",", "stage", "[", "'stageName'", "]", ")", ")" ]
Removed all logs that are assigned to a given rest api id.
[ "Removed", "all", "logs", "that", "are", "assigned", "to", "a", "given", "rest", "api", "id", "." ]
python
train
foliant-docs/foliantcontrib.mkdocs
foliant/backends/mkdocs.py
https://github.com/foliant-docs/foliantcontrib.mkdocs/blob/5f71a47139ab1cb630f1b61d4cef1c0657001272/foliant/backends/mkdocs.py#L108-L147
def _get_pages_with_headings(self, pages: Dict) -> Dict: '''Update ``pages`` section of ``mkdocs.yml`` file with the content of top-level headings of source Markdown files. param pages: Dictionary with the data of ``pages`` section returns: Updated dictionary ''' def _recursive_process_pages(pages_subset, parent_is_dict): if isinstance(pages_subset, dict): new_pages_subset = {} for key, value in pages_subset.items(): if not key: key = self._mkdocs_config.get('default_subsection_title', '…') new_pages_subset[key] = _recursive_process_pages(value, True) elif isinstance(pages_subset, list): new_pages_subset = [] for item in pages_subset: new_pages_subset.append(_recursive_process_pages(item, False)) elif isinstance(pages_subset, str): if not parent_is_dict: new_pages_subset = self._get_page_with_optional_heading(pages_subset) else: new_pages_subset = pages_subset else: new_pages_subset = pages_subset return new_pages_subset new_pages = _recursive_process_pages(pages, False) self.logger.debug(f'All pages with their headings: {new_pages}') return new_pages
[ "def", "_get_pages_with_headings", "(", "self", ",", "pages", ":", "Dict", ")", "->", "Dict", ":", "def", "_recursive_process_pages", "(", "pages_subset", ",", "parent_is_dict", ")", ":", "if", "isinstance", "(", "pages_subset", ",", "dict", ")", ":", "new_pages_subset", "=", "{", "}", "for", "key", ",", "value", "in", "pages_subset", ".", "items", "(", ")", ":", "if", "not", "key", ":", "key", "=", "self", ".", "_mkdocs_config", ".", "get", "(", "'default_subsection_title'", ",", "'…')", "", "new_pages_subset", "[", "key", "]", "=", "_recursive_process_pages", "(", "value", ",", "True", ")", "elif", "isinstance", "(", "pages_subset", ",", "list", ")", ":", "new_pages_subset", "=", "[", "]", "for", "item", "in", "pages_subset", ":", "new_pages_subset", ".", "append", "(", "_recursive_process_pages", "(", "item", ",", "False", ")", ")", "elif", "isinstance", "(", "pages_subset", ",", "str", ")", ":", "if", "not", "parent_is_dict", ":", "new_pages_subset", "=", "self", ".", "_get_page_with_optional_heading", "(", "pages_subset", ")", "else", ":", "new_pages_subset", "=", "pages_subset", "else", ":", "new_pages_subset", "=", "pages_subset", "return", "new_pages_subset", "new_pages", "=", "_recursive_process_pages", "(", "pages", ",", "False", ")", "self", ".", "logger", ".", "debug", "(", "f'All pages with their headings: {new_pages}'", ")", "return", "new_pages" ]
Update ``pages`` section of ``mkdocs.yml`` file with the content of top-level headings of source Markdown files. param pages: Dictionary with the data of ``pages`` section returns: Updated dictionary
[ "Update", "pages", "section", "of", "mkdocs", ".", "yml", "file", "with", "the", "content", "of", "top", "-", "level", "headings", "of", "source", "Markdown", "files", "." ]
python
train
Deathnerd/pyterp
pyterp/__init__.py
https://github.com/Deathnerd/pyterp/blob/baf2957263685f03873f368226f5752da4e51f08/pyterp/__init__.py#L134-L142
def _increment_pointer(self): """ Increments the internal tape counter by 1 :raises PointerOutOfProgramRange: Raises an error if the result of incrementing the pointer would bring it outside of the tape space on the right """ self.pointer += 1 if self.pointer >= len(self.tape): raise PointerOutOfProgramRange("Pointer exceeded right-hand bound of tape")
[ "def", "_increment_pointer", "(", "self", ")", ":", "self", ".", "pointer", "+=", "1", "if", "self", ".", "pointer", ">=", "len", "(", "self", ".", "tape", ")", ":", "raise", "PointerOutOfProgramRange", "(", "\"Pointer exceeded right-hand bound of tape\"", ")" ]
Increments the internal tape counter by 1 :raises PointerOutOfProgramRange: Raises an error if the result of incrementing the pointer would bring it outside of the tape space on the right
[ "Increments", "the", "internal", "tape", "counter", "by", "1", ":", "raises", "PointerOutOfProgramRange", ":", "Raises", "an", "error", "if", "the", "result", "of", "incrementing", "the", "pointer", "would", "bring", "it", "outside", "of", "the", "tape", "space", "on", "the", "right" ]
python
train
shexSpec/grammar
parsers/python/pyshexc/parser_impl/parser_context.py
https://github.com/shexSpec/grammar/blob/4497cd1f73fa6703bca6e2cb53ba9c120f22e48c/parsers/python/pyshexc/parser_impl/parser_context.py#L65-L68
def iriref_to_str(self, ref: ShExDocParser.IRIREF) -> str: """ IRIREF: '<' (~[\u0000-\u0020=<>\"{}|^`\\] | UCHAR)* '>' """ rval = ref.getText()[1:-1].encode('utf-8').decode('unicode-escape') return rval if ':' in rval or not self.base else self.base.val + rval
[ "def", "iriref_to_str", "(", "self", ",", "ref", ":", "ShExDocParser", ".", "IRIREF", ")", "->", "str", ":", "rval", "=", "ref", ".", "getText", "(", ")", "[", "1", ":", "-", "1", "]", ".", "encode", "(", "'utf-8'", ")", ".", "decode", "(", "'unicode-escape'", ")", "return", "rval", "if", "':'", "in", "rval", "or", "not", "self", ".", "base", "else", "self", ".", "base", ".", "val", "+", "rval" ]
IRIREF: '<' (~[\u0000-\u0020=<>\"{}|^`\\] | UCHAR)* '>'
[ "IRIREF", ":", "<", "(", "~", "[", "\\", "u0000", "-", "\\", "u0020", "=", "<", ">", "\\", "{}", "|^", "\\\\", "]", "|", "UCHAR", ")", "*", ">" ]
python
train
deschler/django-modeltranslation
modeltranslation/utils.py
https://github.com/deschler/django-modeltranslation/blob/18fec04a5105cbd83fc3759f4fda20135b3a848c/modeltranslation/utils.py#L126-L149
def auto_populate(mode='all'): """ Overrides translation fields population mode (population mode decides which unprovided translations will be filled during model construction / loading). Example: with auto_populate('all'): s = Slugged.objects.create(title='foo') s.title_en == 'foo' // True s.title_de == 'foo' // True This method may be used to ensure consistency loading untranslated fixtures, with non-default language active: with auto_populate('required'): call_command('loaddata', 'fixture.json') """ current_population_mode = settings.AUTO_POPULATE settings.AUTO_POPULATE = mode try: yield finally: settings.AUTO_POPULATE = current_population_mode
[ "def", "auto_populate", "(", "mode", "=", "'all'", ")", ":", "current_population_mode", "=", "settings", ".", "AUTO_POPULATE", "settings", ".", "AUTO_POPULATE", "=", "mode", "try", ":", "yield", "finally", ":", "settings", ".", "AUTO_POPULATE", "=", "current_population_mode" ]
Overrides translation fields population mode (population mode decides which unprovided translations will be filled during model construction / loading). Example: with auto_populate('all'): s = Slugged.objects.create(title='foo') s.title_en == 'foo' // True s.title_de == 'foo' // True This method may be used to ensure consistency loading untranslated fixtures, with non-default language active: with auto_populate('required'): call_command('loaddata', 'fixture.json')
[ "Overrides", "translation", "fields", "population", "mode", "(", "population", "mode", "decides", "which", "unprovided", "translations", "will", "be", "filled", "during", "model", "construction", "/", "loading", ")", "." ]
python
train
zsims/dic
dic/container.py
https://github.com/zsims/dic/blob/bb4e615c236e6cfe804bd7286a5af081007325ce/dic/container.py#L200-L209
def register_callback(self, class_type, callback, component_scope=scope.InstancePerDependency, register_as=None): """ Registers the given class for creation via the given callback. :param class_type: The class type. :param callback: The function to call to create/get an instance, of the form fn(component_context) :param component_scope: The scope of the component, defaults to instance per dependency. :param register_as: The types to register the class as, defaults to the given class_type. """ registration = _CallbackRegistration(callback, component_scope()) self._register(class_type, registration, register_as)
[ "def", "register_callback", "(", "self", ",", "class_type", ",", "callback", ",", "component_scope", "=", "scope", ".", "InstancePerDependency", ",", "register_as", "=", "None", ")", ":", "registration", "=", "_CallbackRegistration", "(", "callback", ",", "component_scope", "(", ")", ")", "self", ".", "_register", "(", "class_type", ",", "registration", ",", "register_as", ")" ]
Registers the given class for creation via the given callback. :param class_type: The class type. :param callback: The function to call to create/get an instance, of the form fn(component_context) :param component_scope: The scope of the component, defaults to instance per dependency. :param register_as: The types to register the class as, defaults to the given class_type.
[ "Registers", "the", "given", "class", "for", "creation", "via", "the", "given", "callback", ".", ":", "param", "class_type", ":", "The", "class", "type", ".", ":", "param", "callback", ":", "The", "function", "to", "call", "to", "create", "/", "get", "an", "instance", "of", "the", "form", "fn", "(", "component_context", ")", ":", "param", "component_scope", ":", "The", "scope", "of", "the", "component", "defaults", "to", "instance", "per", "dependency", ".", ":", "param", "register_as", ":", "The", "types", "to", "register", "the", "class", "as", "defaults", "to", "the", "given", "class_type", "." ]
python
train
junaruga/rpm-py-installer
install.py
https://github.com/junaruga/rpm-py-installer/blob/12f45feb0ba533dec8d0d16ef1e9b7fb8cfbd4ed/install.py#L1841-L1857
def tar_extract(cls, tar_comp_file_path): """Extract tar.gz or tar bz2 file. It behaves like - tar xzf tar_gz_file_path - tar xjf tar_bz2_file_path It raises tarfile.ReadError if the file is broken. """ try: with contextlib.closing(tarfile.open(tar_comp_file_path)) as tar: tar.extractall() except tarfile.ReadError as e: message_format = ( 'Extract failed: ' 'tar_comp_file_path: {0}, reason: {1}' ) raise InstallError(message_format.format(tar_comp_file_path, e))
[ "def", "tar_extract", "(", "cls", ",", "tar_comp_file_path", ")", ":", "try", ":", "with", "contextlib", ".", "closing", "(", "tarfile", ".", "open", "(", "tar_comp_file_path", ")", ")", "as", "tar", ":", "tar", ".", "extractall", "(", ")", "except", "tarfile", ".", "ReadError", "as", "e", ":", "message_format", "=", "(", "'Extract failed: '", "'tar_comp_file_path: {0}, reason: {1}'", ")", "raise", "InstallError", "(", "message_format", ".", "format", "(", "tar_comp_file_path", ",", "e", ")", ")" ]
Extract tar.gz or tar bz2 file. It behaves like - tar xzf tar_gz_file_path - tar xjf tar_bz2_file_path It raises tarfile.ReadError if the file is broken.
[ "Extract", "tar", ".", "gz", "or", "tar", "bz2", "file", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xchartwidget/xchartscene.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xchartwidget/xchartscene.py#L676-L681
def setSceneRect( self, *args ): """ Overloads the set scene rect to handle rebuild information. """ super(XChartScene, self).setSceneRect(*args) self._dirty = True
[ "def", "setSceneRect", "(", "self", ",", "*", "args", ")", ":", "super", "(", "XChartScene", ",", "self", ")", ".", "setSceneRect", "(", "*", "args", ")", "self", ".", "_dirty", "=", "True" ]
Overloads the set scene rect to handle rebuild information.
[ "Overloads", "the", "set", "scene", "rect", "to", "handle", "rebuild", "information", "." ]
python
train
yeraydiazdiaz/lunr.py
lunr/vector.py
https://github.com/yeraydiazdiaz/lunr.py/blob/28ec3f6d4888295eed730211ee9617aa488d6ba3/lunr/vector.py#L152-L158
def similarity(self, other): """Calculates the cosine similarity between this vector and another vector.""" if self.magnitude == 0 or other.magnitude == 0: return 0 return self.dot(other) / self.magnitude
[ "def", "similarity", "(", "self", ",", "other", ")", ":", "if", "self", ".", "magnitude", "==", "0", "or", "other", ".", "magnitude", "==", "0", ":", "return", "0", "return", "self", ".", "dot", "(", "other", ")", "/", "self", ".", "magnitude" ]
Calculates the cosine similarity between this vector and another vector.
[ "Calculates", "the", "cosine", "similarity", "between", "this", "vector", "and", "another", "vector", "." ]
python
train
proteanhq/protean
src/protean/utils/importlib.py
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/utils/importlib.py#L20-L30
def import_from_string(val): """ Attempt to import a class from a string representation. """ try: module_path, class_name = val.rsplit('.', 1) module = import_module(module_path) return getattr(module, class_name) except (ImportError, AttributeError) as e: msg = f"Could not import {val}. {e.__class__.__name__}: {e}" raise ImportError(msg)
[ "def", "import_from_string", "(", "val", ")", ":", "try", ":", "module_path", ",", "class_name", "=", "val", ".", "rsplit", "(", "'.'", ",", "1", ")", "module", "=", "import_module", "(", "module_path", ")", "return", "getattr", "(", "module", ",", "class_name", ")", "except", "(", "ImportError", ",", "AttributeError", ")", "as", "e", ":", "msg", "=", "f\"Could not import {val}. {e.__class__.__name__}: {e}\"", "raise", "ImportError", "(", "msg", ")" ]
Attempt to import a class from a string representation.
[ "Attempt", "to", "import", "a", "class", "from", "a", "string", "representation", "." ]
python
train
pantsbuild/pants
src/python/pants/engine/scheduler.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/engine/scheduler.py#L502-L519
def product_request(self, product, subjects): """Executes a request for a single product for some subjects, and returns the products. :param class product: A product type for the request. :param list subjects: A list of subjects or Params instances for the request. :returns: A list of the requested products, with length match len(subjects). """ request = self.execution_request([product], subjects) returns, throws = self.execute(request) # Throw handling. if throws: unique_exceptions = tuple({t.exc for _, t in throws}) self._trace_on_error(unique_exceptions, request) # Everything is a Return: we rely on the fact that roots are ordered to preserve subject # order in output lists. return [ret.value for _, ret in returns]
[ "def", "product_request", "(", "self", ",", "product", ",", "subjects", ")", ":", "request", "=", "self", ".", "execution_request", "(", "[", "product", "]", ",", "subjects", ")", "returns", ",", "throws", "=", "self", ".", "execute", "(", "request", ")", "# Throw handling.", "if", "throws", ":", "unique_exceptions", "=", "tuple", "(", "{", "t", ".", "exc", "for", "_", ",", "t", "in", "throws", "}", ")", "self", ".", "_trace_on_error", "(", "unique_exceptions", ",", "request", ")", "# Everything is a Return: we rely on the fact that roots are ordered to preserve subject", "# order in output lists.", "return", "[", "ret", ".", "value", "for", "_", ",", "ret", "in", "returns", "]" ]
Executes a request for a single product for some subjects, and returns the products. :param class product: A product type for the request. :param list subjects: A list of subjects or Params instances for the request. :returns: A list of the requested products, with length match len(subjects).
[ "Executes", "a", "request", "for", "a", "single", "product", "for", "some", "subjects", "and", "returns", "the", "products", "." ]
python
train
timkpaine/pyEX
pyEX/marketdata/ws.py
https://github.com/timkpaine/pyEX/blob/91cf751dafdb208a0c8b5377945e5808b99f94ba/pyEX/marketdata/ws.py#L114-L118
def securityEventWS(symbols=None, on_data=None): '''https://iextrading.com/developer/docs/#security-event''' symbols = _strToList(symbols) sendinit = ({'symbols': symbols, 'channels': ['securityevent']},) return _stream(_wsURL('deep'), sendinit, on_data)
[ "def", "securityEventWS", "(", "symbols", "=", "None", ",", "on_data", "=", "None", ")", ":", "symbols", "=", "_strToList", "(", "symbols", ")", "sendinit", "=", "(", "{", "'symbols'", ":", "symbols", ",", "'channels'", ":", "[", "'securityevent'", "]", "}", ",", ")", "return", "_stream", "(", "_wsURL", "(", "'deep'", ")", ",", "sendinit", ",", "on_data", ")" ]
https://iextrading.com/developer/docs/#security-event
[ "https", ":", "//", "iextrading", ".", "com", "/", "developer", "/", "docs", "/", "#security", "-", "event" ]
python
valid
crunchyroll/ef-open
efopen/ef_cf_diff.py
https://github.com/crunchyroll/ef-open/blob/59fff3761af07a59f8f1c1682f2be004bdac15f7/efopen/ef_cf_diff.py#L277-L297
def evaluate_service_changes(services, envs, repo_root, func): """ Given a dict of services, and a list of environments, apply the diff function to evaluate the differences between the target environments and the rendered templates. Sub-services (names with '.' in them) are skipped. """ for service_name, service in services.iteritems(): for env_category in service['environments']: if env_category not in get_env_categories(envs): logger.debug('Skipping not-included environment `%s` for service `%s`', env_category, service_name) continue environment = generate_test_environment_name(env_category) cf_client = get_cloudformation_client(service_name, environment) func(service_name, service, environment, cf_client, repo_root)
[ "def", "evaluate_service_changes", "(", "services", ",", "envs", ",", "repo_root", ",", "func", ")", ":", "for", "service_name", ",", "service", "in", "services", ".", "iteritems", "(", ")", ":", "for", "env_category", "in", "service", "[", "'environments'", "]", ":", "if", "env_category", "not", "in", "get_env_categories", "(", "envs", ")", ":", "logger", ".", "debug", "(", "'Skipping not-included environment `%s` for service `%s`'", ",", "env_category", ",", "service_name", ")", "continue", "environment", "=", "generate_test_environment_name", "(", "env_category", ")", "cf_client", "=", "get_cloudformation_client", "(", "service_name", ",", "environment", ")", "func", "(", "service_name", ",", "service", ",", "environment", ",", "cf_client", ",", "repo_root", ")" ]
Given a dict of services, and a list of environments, apply the diff function to evaluate the differences between the target environments and the rendered templates. Sub-services (names with '.' in them) are skipped.
[ "Given", "a", "dict", "of", "services", "and", "a", "list", "of", "environments", "apply", "the", "diff", "function", "to", "evaluate", "the", "differences", "between", "the", "target", "environments", "and", "the", "rendered", "templates", "." ]
python
train
Pirionfr/pyLinky
pylinky/client.py
https://github.com/Pirionfr/pyLinky/blob/4372496bfcdd95ccfd2f017634cf02b38a2d6fd1/pylinky/client.py#L192-L196
def fetch_data(self): """Get the latest data from Enedis.""" for t in [HOURLY, DAILY, MONTHLY, YEARLY]: self._data[t] = self.get_data_per_period(t)
[ "def", "fetch_data", "(", "self", ")", ":", "for", "t", "in", "[", "HOURLY", ",", "DAILY", ",", "MONTHLY", ",", "YEARLY", "]", ":", "self", ".", "_data", "[", "t", "]", "=", "self", ".", "get_data_per_period", "(", "t", ")" ]
Get the latest data from Enedis.
[ "Get", "the", "latest", "data", "from", "Enedis", "." ]
python
test
praekelt/vumi-http-api
vumi_http_api/resource.py
https://github.com/praekelt/vumi-http-api/blob/0d7cf1cb71794c93272c19095cf8c37f4c250a59/vumi_http_api/resource.py#L105-L115
def is_within_content_length_limit(payload, api_config): """ Check that the message content is within the configured length limit. """ length_limit = api_config.get('content_length_limit') if (length_limit is not None) and (payload["content"] is not None): content_length = len(payload["content"]) if content_length > length_limit: return "Payload content too long: %s > %s" % ( content_length, length_limit) return None
[ "def", "is_within_content_length_limit", "(", "payload", ",", "api_config", ")", ":", "length_limit", "=", "api_config", ".", "get", "(", "'content_length_limit'", ")", "if", "(", "length_limit", "is", "not", "None", ")", "and", "(", "payload", "[", "\"content\"", "]", "is", "not", "None", ")", ":", "content_length", "=", "len", "(", "payload", "[", "\"content\"", "]", ")", "if", "content_length", ">", "length_limit", ":", "return", "\"Payload content too long: %s > %s\"", "%", "(", "content_length", ",", "length_limit", ")", "return", "None" ]
Check that the message content is within the configured length limit.
[ "Check", "that", "the", "message", "content", "is", "within", "the", "configured", "length", "limit", "." ]
python
train
limodou/uliweb
uliweb/orm/__init__.py
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/orm/__init__.py#L2754-L2762
def update(self, **kwargs): """ Execute update table set field = field+1 like statement """ if self.condition is not None: self.result = self.do_(self.model.table.update().where(self.condition).values(**kwargs)) else: self.result = self.do_(self.model.table.update().values(**kwargs)) return self.result
[ "def", "update", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "condition", "is", "not", "None", ":", "self", ".", "result", "=", "self", ".", "do_", "(", "self", ".", "model", ".", "table", ".", "update", "(", ")", ".", "where", "(", "self", ".", "condition", ")", ".", "values", "(", "*", "*", "kwargs", ")", ")", "else", ":", "self", ".", "result", "=", "self", ".", "do_", "(", "self", ".", "model", ".", "table", ".", "update", "(", ")", ".", "values", "(", "*", "*", "kwargs", ")", ")", "return", "self", ".", "result" ]
Execute update table set field = field+1 like statement
[ "Execute", "update", "table", "set", "field", "=", "field", "+", "1", "like", "statement" ]
python
train
rackerlabs/rackspace-python-neutronclient
neutronclient/v2_0/client.py
https://github.com/rackerlabs/rackspace-python-neutronclient/blob/5a5009a8fe078e3aa1d582176669f1b28ab26bef/neutronclient/v2_0/client.py#L1386-L1389
def show_firewall_rule(self, firewall_rule, **_params): """Fetches information of a certain firewall rule.""" return self.get(self.firewall_rule_path % (firewall_rule), params=_params)
[ "def", "show_firewall_rule", "(", "self", ",", "firewall_rule", ",", "*", "*", "_params", ")", ":", "return", "self", ".", "get", "(", "self", ".", "firewall_rule_path", "%", "(", "firewall_rule", ")", ",", "params", "=", "_params", ")" ]
Fetches information of a certain firewall rule.
[ "Fetches", "information", "of", "a", "certain", "firewall", "rule", "." ]
python
train
PyCQA/astroid
astroid/node_classes.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/node_classes.py#L623-L636
def tolineno(self): """The last line that this node appears on in the source code. :type: int or None """ if not self._astroid_fields: # can't have children lastchild = None else: lastchild = self.last_child() if lastchild is None: return self.fromlineno return lastchild.tolineno
[ "def", "tolineno", "(", "self", ")", ":", "if", "not", "self", ".", "_astroid_fields", ":", "# can't have children", "lastchild", "=", "None", "else", ":", "lastchild", "=", "self", ".", "last_child", "(", ")", "if", "lastchild", "is", "None", ":", "return", "self", ".", "fromlineno", "return", "lastchild", ".", "tolineno" ]
The last line that this node appears on in the source code. :type: int or None
[ "The", "last", "line", "that", "this", "node", "appears", "on", "in", "the", "source", "code", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/zmq/serialize.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/zmq/serialize.py#L126-L178
def unpack_apply_message(bufs, g=None, copy=True): """unpack f,args,kwargs from buffers packed by pack_apply_message() Returns: original f,args,kwargs""" bufs = list(bufs) # allow us to pop assert len(bufs) >= 3, "not enough buffers!" if not copy: for i in range(3): bufs[i] = bufs[i].bytes cf = pickle.loads(bufs.pop(0)) sargs = list(pickle.loads(bufs.pop(0))) skwargs = dict(pickle.loads(bufs.pop(0))) # print sargs, skwargs f = uncan(cf, g) for sa in sargs: if sa.data is None: m = bufs.pop(0) if sa.getTypeDescriptor() in ('buffer', 'ndarray'): # always use a buffer, until memoryviews get sorted out sa.data = buffer(m) # disable memoryview support # if copy: # sa.data = buffer(m) # else: # sa.data = m.buffer else: if copy: sa.data = m else: sa.data = m.bytes args = uncanSequence(map(unserialize, sargs), g) kwargs = {} for k in sorted(skwargs.iterkeys()): sa = skwargs[k] if sa.data is None: m = bufs.pop(0) if sa.getTypeDescriptor() in ('buffer', 'ndarray'): # always use a buffer, until memoryviews get sorted out sa.data = buffer(m) # disable memoryview support # if copy: # sa.data = buffer(m) # else: # sa.data = m.buffer else: if copy: sa.data = m else: sa.data = m.bytes kwargs[k] = uncan(unserialize(sa), g) return f,args,kwargs
[ "def", "unpack_apply_message", "(", "bufs", ",", "g", "=", "None", ",", "copy", "=", "True", ")", ":", "bufs", "=", "list", "(", "bufs", ")", "# allow us to pop", "assert", "len", "(", "bufs", ")", ">=", "3", ",", "\"not enough buffers!\"", "if", "not", "copy", ":", "for", "i", "in", "range", "(", "3", ")", ":", "bufs", "[", "i", "]", "=", "bufs", "[", "i", "]", ".", "bytes", "cf", "=", "pickle", ".", "loads", "(", "bufs", ".", "pop", "(", "0", ")", ")", "sargs", "=", "list", "(", "pickle", ".", "loads", "(", "bufs", ".", "pop", "(", "0", ")", ")", ")", "skwargs", "=", "dict", "(", "pickle", ".", "loads", "(", "bufs", ".", "pop", "(", "0", ")", ")", ")", "# print sargs, skwargs", "f", "=", "uncan", "(", "cf", ",", "g", ")", "for", "sa", "in", "sargs", ":", "if", "sa", ".", "data", "is", "None", ":", "m", "=", "bufs", ".", "pop", "(", "0", ")", "if", "sa", ".", "getTypeDescriptor", "(", ")", "in", "(", "'buffer'", ",", "'ndarray'", ")", ":", "# always use a buffer, until memoryviews get sorted out", "sa", ".", "data", "=", "buffer", "(", "m", ")", "# disable memoryview support", "# if copy:", "# sa.data = buffer(m)", "# else:", "# sa.data = m.buffer", "else", ":", "if", "copy", ":", "sa", ".", "data", "=", "m", "else", ":", "sa", ".", "data", "=", "m", ".", "bytes", "args", "=", "uncanSequence", "(", "map", "(", "unserialize", ",", "sargs", ")", ",", "g", ")", "kwargs", "=", "{", "}", "for", "k", "in", "sorted", "(", "skwargs", ".", "iterkeys", "(", ")", ")", ":", "sa", "=", "skwargs", "[", "k", "]", "if", "sa", ".", "data", "is", "None", ":", "m", "=", "bufs", ".", "pop", "(", "0", ")", "if", "sa", ".", "getTypeDescriptor", "(", ")", "in", "(", "'buffer'", ",", "'ndarray'", ")", ":", "# always use a buffer, until memoryviews get sorted out", "sa", ".", "data", "=", "buffer", "(", "m", ")", "# disable memoryview support", "# if copy:", "# sa.data = buffer(m)", "# else:", "# sa.data = m.buffer", "else", ":", "if", "copy", ":", "sa", ".", "data", "=", "m", "else", ":", "sa", ".", "data", "=", "m", ".", "bytes", "kwargs", "[", "k", "]", "=", "uncan", "(", "unserialize", "(", "sa", ")", ",", "g", ")", "return", "f", ",", "args", ",", "kwargs" ]
unpack f,args,kwargs from buffers packed by pack_apply_message() Returns: original f,args,kwargs
[ "unpack", "f", "args", "kwargs", "from", "buffers", "packed", "by", "pack_apply_message", "()", "Returns", ":", "original", "f", "args", "kwargs" ]
python
test
senaite/senaite.lims
src/senaite/lims/setuphandlers.py
https://github.com/senaite/senaite.lims/blob/3c7fc7b462321fb354c478c19b5c20f3014fa398/src/senaite/lims/setuphandlers.py#L60-L79
def pre_install(portal_setup): """Runs berfore the first import step of the *default* profile This handler is registered as a *pre_handler* in the generic setup profile :param portal_setup: SetupTool """ logger.info("SENAITE LIMS pre-install handler [BEGIN]") # https://docs.plone.org/develop/addons/components/genericsetup.html#custom-installer-code-setuphandlers-py profile_id = "profile-senaite.lims:default" context = portal_setup._getImportContext(profile_id) portal = context.getSite() # noqa # Only install the core once! qi = portal.portal_quickinstaller if not qi.isProductInstalled("bika.lims"): portal_setup.runAllImportStepsFromProfile("profile-bika.lims:default") logger.info("SENAITE LIMS pre-install handler [DONE]")
[ "def", "pre_install", "(", "portal_setup", ")", ":", "logger", ".", "info", "(", "\"SENAITE LIMS pre-install handler [BEGIN]\"", ")", "# https://docs.plone.org/develop/addons/components/genericsetup.html#custom-installer-code-setuphandlers-py", "profile_id", "=", "\"profile-senaite.lims:default\"", "context", "=", "portal_setup", ".", "_getImportContext", "(", "profile_id", ")", "portal", "=", "context", ".", "getSite", "(", ")", "# noqa", "# Only install the core once!", "qi", "=", "portal", ".", "portal_quickinstaller", "if", "not", "qi", ".", "isProductInstalled", "(", "\"bika.lims\"", ")", ":", "portal_setup", ".", "runAllImportStepsFromProfile", "(", "\"profile-bika.lims:default\"", ")", "logger", ".", "info", "(", "\"SENAITE LIMS pre-install handler [DONE]\"", ")" ]
Runs berfore the first import step of the *default* profile This handler is registered as a *pre_handler* in the generic setup profile :param portal_setup: SetupTool
[ "Runs", "berfore", "the", "first", "import", "step", "of", "the", "*", "default", "*", "profile" ]
python
train
materialsproject/pymatgen
pymatgen/analysis/quasiharmonic.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/quasiharmonic.py#L291-L312
def thermal_conductivity(self, temperature, volume): """ Eq(17) in 10.1103/PhysRevB.90.174107 Args: temperature (float): temperature in K volume (float): in Ang^3 Returns: float: thermal conductivity in W/K/m """ gamma = self.gruneisen_parameter(temperature, volume) theta_d = self.debye_temperature(volume) # K theta_a = theta_d * self.natoms**(-1./3.) # K prefactor = (0.849 * 3 * 4**(1./3.)) / (20. * np.pi**3) # kg/K^3/s^3 prefactor = prefactor * (self.kb/self.hbar)**3 * self.avg_mass kappa = prefactor / (gamma**2 - 0.514 * gamma + 0.228) # kg/K/s^3 * Ang = (kg m/s^2)/(Ks)*1e-10 # = N/(Ks)*1e-10 = Nm/(Kms)*1e-10 = W/K/m*1e-10 kappa = kappa * theta_a**2 * volume**(1./3.) * 1e-10 return kappa
[ "def", "thermal_conductivity", "(", "self", ",", "temperature", ",", "volume", ")", ":", "gamma", "=", "self", ".", "gruneisen_parameter", "(", "temperature", ",", "volume", ")", "theta_d", "=", "self", ".", "debye_temperature", "(", "volume", ")", "# K", "theta_a", "=", "theta_d", "*", "self", ".", "natoms", "**", "(", "-", "1.", "/", "3.", ")", "# K", "prefactor", "=", "(", "0.849", "*", "3", "*", "4", "**", "(", "1.", "/", "3.", ")", ")", "/", "(", "20.", "*", "np", ".", "pi", "**", "3", ")", "# kg/K^3/s^3", "prefactor", "=", "prefactor", "*", "(", "self", ".", "kb", "/", "self", ".", "hbar", ")", "**", "3", "*", "self", ".", "avg_mass", "kappa", "=", "prefactor", "/", "(", "gamma", "**", "2", "-", "0.514", "*", "gamma", "+", "0.228", ")", "# kg/K/s^3 * Ang = (kg m/s^2)/(Ks)*1e-10", "# = N/(Ks)*1e-10 = Nm/(Kms)*1e-10 = W/K/m*1e-10", "kappa", "=", "kappa", "*", "theta_a", "**", "2", "*", "volume", "**", "(", "1.", "/", "3.", ")", "*", "1e-10", "return", "kappa" ]
Eq(17) in 10.1103/PhysRevB.90.174107 Args: temperature (float): temperature in K volume (float): in Ang^3 Returns: float: thermal conductivity in W/K/m
[ "Eq", "(", "17", ")", "in", "10", ".", "1103", "/", "PhysRevB", ".", "90", ".", "174107" ]
python
train
Kortemme-Lab/klab
klab/bio/spackle.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/spackle.py#L222-L291
def add_atoms_linearly(self, start_atom, end_atom, new_atoms, jitterbug = 0.2): '''A low-level function which adds new_atoms between start_atom and end_atom. This function does not validate the input i.e. the calling functions are responsible for ensuring that the insertion makes sense. Returns the PDB file content with the new atoms added. These atoms are given fresh serial numbers, starting from the first serial number larger than the current serial numbers i.e. the ATOM serial numbers do not now necessarily increase in document order. The jitter adds some X, Y, Z variability to the new atoms. This is important in the Rosetta software suite when placing backbone atoms as colinearly placed atoms will break the dihedral angle calculations (the dihedral angle over 4 colinear atoms is undefined). ''' atom_name_map = { 'CA' : ' CA ', 'C' : ' C ', 'N' : ' N ', 'O' : ' O ', } assert(start_atom.residue.chain == end_atom.residue.chain) chain_id = start_atom.residue.chain # Initialize steps num_new_atoms = float(len(new_atoms)) X, Y, Z = start_atom.x, start_atom.y, start_atom.z x_step = (end_atom.x - X) / (num_new_atoms + 1.0) y_step = (end_atom.y - Y) / (num_new_atoms + 1.0) z_step = (end_atom.z - Z) / (num_new_atoms + 1.0) D = math.sqrt(x_step * x_step + y_step * y_step + z_step * z_step) jitter = 0 if jitterbug: jitter = (((x_step + y_step + z_step) / 3.0) * jitterbug) / D new_lines = [] next_serial_number = max(sorted(self.atoms.keys())) + 1 round = 0 for new_atom in new_atoms: X, Y, Z = X + x_step, Y + y_step, Z + z_step if jitter: if round % 3 == 0: X, Y = X + jitter, Y - jitter elif round % 3 == 1: Y, Z = Y + jitter, Z - jitter elif round % 3 == 2: Z, X = Z + jitter, X - jitter round += 1 residue_id, residue_type, atom_name = new_atom assert(len(residue_type) == 3) assert(len(residue_id) == 6) new_lines.append('ATOM {0} {1} {2} {3} {4:>8.3f}{5:>8.3f}{6:>8.3f} 1.00 0.00 '.format(str(next_serial_number).rjust(5), atom_name_map[atom_name], residue_type, residue_id, X, Y, Z)) next_serial_number += 1 new_pdb = [] in_start_residue = False for l in self.indexed_lines: if l[0] and l[3].serial_number == start_atom.serial_number: in_start_residue = True if in_start_residue and l[3].serial_number != start_atom.serial_number: new_pdb.extend(new_lines) #colortext.warning('\n'.join(new_lines)) in_start_residue = False if l[0]: #print(l[2]) new_pdb.append(l[2]) else: #print(l[1]) new_pdb.append(l[1]) return '\n'.join(new_pdb)
[ "def", "add_atoms_linearly", "(", "self", ",", "start_atom", ",", "end_atom", ",", "new_atoms", ",", "jitterbug", "=", "0.2", ")", ":", "atom_name_map", "=", "{", "'CA'", ":", "' CA '", ",", "'C'", ":", "' C '", ",", "'N'", ":", "' N '", ",", "'O'", ":", "' O '", ",", "}", "assert", "(", "start_atom", ".", "residue", ".", "chain", "==", "end_atom", ".", "residue", ".", "chain", ")", "chain_id", "=", "start_atom", ".", "residue", ".", "chain", "# Initialize steps", "num_new_atoms", "=", "float", "(", "len", "(", "new_atoms", ")", ")", "X", ",", "Y", ",", "Z", "=", "start_atom", ".", "x", ",", "start_atom", ".", "y", ",", "start_atom", ".", "z", "x_step", "=", "(", "end_atom", ".", "x", "-", "X", ")", "/", "(", "num_new_atoms", "+", "1.0", ")", "y_step", "=", "(", "end_atom", ".", "y", "-", "Y", ")", "/", "(", "num_new_atoms", "+", "1.0", ")", "z_step", "=", "(", "end_atom", ".", "z", "-", "Z", ")", "/", "(", "num_new_atoms", "+", "1.0", ")", "D", "=", "math", ".", "sqrt", "(", "x_step", "*", "x_step", "+", "y_step", "*", "y_step", "+", "z_step", "*", "z_step", ")", "jitter", "=", "0", "if", "jitterbug", ":", "jitter", "=", "(", "(", "(", "x_step", "+", "y_step", "+", "z_step", ")", "/", "3.0", ")", "*", "jitterbug", ")", "/", "D", "new_lines", "=", "[", "]", "next_serial_number", "=", "max", "(", "sorted", "(", "self", ".", "atoms", ".", "keys", "(", ")", ")", ")", "+", "1", "round", "=", "0", "for", "new_atom", "in", "new_atoms", ":", "X", ",", "Y", ",", "Z", "=", "X", "+", "x_step", ",", "Y", "+", "y_step", ",", "Z", "+", "z_step", "if", "jitter", ":", "if", "round", "%", "3", "==", "0", ":", "X", ",", "Y", "=", "X", "+", "jitter", ",", "Y", "-", "jitter", "elif", "round", "%", "3", "==", "1", ":", "Y", ",", "Z", "=", "Y", "+", "jitter", ",", "Z", "-", "jitter", "elif", "round", "%", "3", "==", "2", ":", "Z", ",", "X", "=", "Z", "+", "jitter", ",", "X", "-", "jitter", "round", "+=", "1", "residue_id", ",", "residue_type", ",", "atom_name", "=", "new_atom", "assert", "(", "len", "(", "residue_type", ")", "==", "3", ")", "assert", "(", "len", "(", "residue_id", ")", "==", "6", ")", "new_lines", ".", "append", "(", "'ATOM {0} {1} {2} {3} {4:>8.3f}{5:>8.3f}{6:>8.3f} 1.00 0.00 '", ".", "format", "(", "str", "(", "next_serial_number", ")", ".", "rjust", "(", "5", ")", ",", "atom_name_map", "[", "atom_name", "]", ",", "residue_type", ",", "residue_id", ",", "X", ",", "Y", ",", "Z", ")", ")", "next_serial_number", "+=", "1", "new_pdb", "=", "[", "]", "in_start_residue", "=", "False", "for", "l", "in", "self", ".", "indexed_lines", ":", "if", "l", "[", "0", "]", "and", "l", "[", "3", "]", ".", "serial_number", "==", "start_atom", ".", "serial_number", ":", "in_start_residue", "=", "True", "if", "in_start_residue", "and", "l", "[", "3", "]", ".", "serial_number", "!=", "start_atom", ".", "serial_number", ":", "new_pdb", ".", "extend", "(", "new_lines", ")", "#colortext.warning('\\n'.join(new_lines))", "in_start_residue", "=", "False", "if", "l", "[", "0", "]", ":", "#print(l[2])", "new_pdb", ".", "append", "(", "l", "[", "2", "]", ")", "else", ":", "#print(l[1])", "new_pdb", ".", "append", "(", "l", "[", "1", "]", ")", "return", "'\\n'", ".", "join", "(", "new_pdb", ")" ]
A low-level function which adds new_atoms between start_atom and end_atom. This function does not validate the input i.e. the calling functions are responsible for ensuring that the insertion makes sense. Returns the PDB file content with the new atoms added. These atoms are given fresh serial numbers, starting from the first serial number larger than the current serial numbers i.e. the ATOM serial numbers do not now necessarily increase in document order. The jitter adds some X, Y, Z variability to the new atoms. This is important in the Rosetta software suite when placing backbone atoms as colinearly placed atoms will break the dihedral angle calculations (the dihedral angle over 4 colinear atoms is undefined).
[ "A", "low", "-", "level", "function", "which", "adds", "new_atoms", "between", "start_atom", "and", "end_atom", ".", "This", "function", "does", "not", "validate", "the", "input", "i", ".", "e", ".", "the", "calling", "functions", "are", "responsible", "for", "ensuring", "that", "the", "insertion", "makes", "sense", "." ]
python
train
vtkiorg/vtki
vtki/plotting.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/plotting.py#L2293-L2306
def _save_image(image, filename, return_img=None): """Internal helper for saving a NumPy image array""" if not image.size: raise Exception('Empty image. Have you run plot() first?') # write screenshot to file if isinstance(filename, str): if isinstance(vtki.FIGURE_PATH, str) and not os.path.isabs(filename): filename = os.path.join(vtki.FIGURE_PATH, filename) if not return_img: return imageio.imwrite(filename, image) imageio.imwrite(filename, image) return image
[ "def", "_save_image", "(", "image", ",", "filename", ",", "return_img", "=", "None", ")", ":", "if", "not", "image", ".", "size", ":", "raise", "Exception", "(", "'Empty image. Have you run plot() first?'", ")", "# write screenshot to file", "if", "isinstance", "(", "filename", ",", "str", ")", ":", "if", "isinstance", "(", "vtki", ".", "FIGURE_PATH", ",", "str", ")", "and", "not", "os", ".", "path", ".", "isabs", "(", "filename", ")", ":", "filename", "=", "os", ".", "path", ".", "join", "(", "vtki", ".", "FIGURE_PATH", ",", "filename", ")", "if", "not", "return_img", ":", "return", "imageio", ".", "imwrite", "(", "filename", ",", "image", ")", "imageio", ".", "imwrite", "(", "filename", ",", "image", ")", "return", "image" ]
Internal helper for saving a NumPy image array
[ "Internal", "helper", "for", "saving", "a", "NumPy", "image", "array" ]
python
train
ray-project/ray
python/ray/experimental/serve/mixin.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/serve/mixin.py#L21-L30
def _execute_and_seal_error(method, arg, method_name): """Execute method with arg and return the result. If the method fails, return a RayTaskError so it can be sealed in the resultOID and retried by user. """ try: return method(arg) except Exception: return ray.worker.RayTaskError(method_name, traceback.format_exc())
[ "def", "_execute_and_seal_error", "(", "method", ",", "arg", ",", "method_name", ")", ":", "try", ":", "return", "method", "(", "arg", ")", "except", "Exception", ":", "return", "ray", ".", "worker", ".", "RayTaskError", "(", "method_name", ",", "traceback", ".", "format_exc", "(", ")", ")" ]
Execute method with arg and return the result. If the method fails, return a RayTaskError so it can be sealed in the resultOID and retried by user.
[ "Execute", "method", "with", "arg", "and", "return", "the", "result", "." ]
python
train
rene-aguirre/pywinusb
examples/mute_led.py
https://github.com/rene-aguirre/pywinusb/blob/954c4b2105d9f01cb0c50e24500bb747d4ecdc43/examples/mute_led.py#L13-L49
def set_mute(mute_value): "Browse for mute usages and set value" all_mutes = ( \ (0x8, 0x9), # LED page (0x1, 0xA7), # desktop page (0xb, 0x2f), ) all_target_usages = [hid.get_full_usage_id(u[0], u[1]) for u in all_mutes] # usually you'll find and open the target device, here we'll browse for the # current connected devices all_devices = hid.find_all_hid_devices() success = 0 if not all_devices: print("Can't any HID device!") else: # search for our target usage # target pageId, usageId for device in all_devices: try: device.open() # target 'to set' value could be in feature or output reports for report in device.find_output_reports() + device.find_feature_reports(): for target_usage in all_target_usages: if target_usage in report: # set our value and send report[target_usage] = value report.send() success += 1 finally: device.close() # fit to sys.exit() proper result values print("{0} Mute usage(s) set\n".format(success)) if success: return 0 return -1
[ "def", "set_mute", "(", "mute_value", ")", ":", "all_mutes", "=", "(", "(", "0x8", ",", "0x9", ")", ",", "# LED page", "(", "0x1", ",", "0xA7", ")", ",", "# desktop page", "(", "0xb", ",", "0x2f", ")", ",", ")", "all_target_usages", "=", "[", "hid", ".", "get_full_usage_id", "(", "u", "[", "0", "]", ",", "u", "[", "1", "]", ")", "for", "u", "in", "all_mutes", "]", "# usually you'll find and open the target device, here we'll browse for the", "# current connected devices", "all_devices", "=", "hid", ".", "find_all_hid_devices", "(", ")", "success", "=", "0", "if", "not", "all_devices", ":", "print", "(", "\"Can't any HID device!\"", ")", "else", ":", "# search for our target usage", "# target pageId, usageId", "for", "device", "in", "all_devices", ":", "try", ":", "device", ".", "open", "(", ")", "# target 'to set' value could be in feature or output reports", "for", "report", "in", "device", ".", "find_output_reports", "(", ")", "+", "device", ".", "find_feature_reports", "(", ")", ":", "for", "target_usage", "in", "all_target_usages", ":", "if", "target_usage", "in", "report", ":", "# set our value and send", "report", "[", "target_usage", "]", "=", "value", "report", ".", "send", "(", ")", "success", "+=", "1", "finally", ":", "device", ".", "close", "(", ")", "# fit to sys.exit() proper result values", "print", "(", "\"{0} Mute usage(s) set\\n\"", ".", "format", "(", "success", ")", ")", "if", "success", ":", "return", "0", "return", "-", "1" ]
Browse for mute usages and set value
[ "Browse", "for", "mute", "usages", "and", "set", "value" ]
python
train
mozilla-releng/scriptworker
scriptworker/cot/verify.py
https://github.com/mozilla-releng/scriptworker/blob/8e97bbd83b9b578565ec57904c966dd6ae4ef0ae/scriptworker/cot/verify.py#L169-L184
def get_all_links_in_chain(self): """Return all links in the chain of trust, including the target task. By default, we're checking a task and all its dependencies back to the tree, so the full chain is ``self.links`` + ``self``. However, we also support checking the decision task itself. In that case, we populate the decision task as a link in ``self.links``, and we don't need to add another check for ``self``. Returns: list: of all ``LinkOfTrust``s to verify. """ if self.is_decision() and self.get_link(self.task_id): return self.links return [self] + self.links
[ "def", "get_all_links_in_chain", "(", "self", ")", ":", "if", "self", ".", "is_decision", "(", ")", "and", "self", ".", "get_link", "(", "self", ".", "task_id", ")", ":", "return", "self", ".", "links", "return", "[", "self", "]", "+", "self", ".", "links" ]
Return all links in the chain of trust, including the target task. By default, we're checking a task and all its dependencies back to the tree, so the full chain is ``self.links`` + ``self``. However, we also support checking the decision task itself. In that case, we populate the decision task as a link in ``self.links``, and we don't need to add another check for ``self``. Returns: list: of all ``LinkOfTrust``s to verify.
[ "Return", "all", "links", "in", "the", "chain", "of", "trust", "including", "the", "target", "task", "." ]
python
train
UCBerkeleySETI/blimpy
blimpy/file_wrapper.py
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/file_wrapper.py#L576-L622
def read_data(self, f_start=None, f_stop=None,t_start=None, t_stop=None): """ Read data. """ self._setup_selection_range(f_start=f_start, f_stop=f_stop, t_start=t_start, t_stop=t_stop) #check if selection is small enough. if self.isheavy(): logger.warning("Selection size of %.2f GB, exceeding our size limit %.2f GB. Instance created, " "header loaded, but data not loaded, please try another (t,v) selection." % (self._calc_selection_size() / (1024. ** 3), self.MAX_DATA_ARRAY_SIZE / (1024. ** 3))) self.data = np.array([0],dtype=self._d_type) return None #Convert input frequencies into what their corresponding channel number would be. self._setup_chans() #Update frequencies ranges from channel number. self._setup_freqs() n_chans = self.header[b'nchans'] n_chans_selected = self.selection_shape[self.freq_axis] n_ifs = self.header[b'nifs'] # Load binary data f = open(self.filename, 'rb') f.seek(int(self.idx_data)) # now check to see how many integrations requested n_ints = self.t_stop - self.t_start # Seek to first integration f.seek(int(self.t_start * self._n_bytes * n_ifs * n_chans), 1) #Loading data self.data = np.zeros((n_ints, n_ifs, n_chans_selected), dtype=self._d_type) for ii in range(n_ints): for jj in range(n_ifs): f.seek(int(self._n_bytes * self.chan_start_idx), 1) # 1 = from current location dd = np.fromfile(f, count=n_chans_selected, dtype=self._d_type) # Reverse array if frequency axis is flipped # if self.header[b'foff'] < 0: # dd = dd[::-1] self.data[ii, jj] = dd f.seek(int(self._n_bytes * (n_chans - self.chan_stop_idx)), 1)
[ "def", "read_data", "(", "self", ",", "f_start", "=", "None", ",", "f_stop", "=", "None", ",", "t_start", "=", "None", ",", "t_stop", "=", "None", ")", ":", "self", ".", "_setup_selection_range", "(", "f_start", "=", "f_start", ",", "f_stop", "=", "f_stop", ",", "t_start", "=", "t_start", ",", "t_stop", "=", "t_stop", ")", "#check if selection is small enough.", "if", "self", ".", "isheavy", "(", ")", ":", "logger", ".", "warning", "(", "\"Selection size of %.2f GB, exceeding our size limit %.2f GB. Instance created, \"", "\"header loaded, but data not loaded, please try another (t,v) selection.\"", "%", "(", "self", ".", "_calc_selection_size", "(", ")", "/", "(", "1024.", "**", "3", ")", ",", "self", ".", "MAX_DATA_ARRAY_SIZE", "/", "(", "1024.", "**", "3", ")", ")", ")", "self", ".", "data", "=", "np", ".", "array", "(", "[", "0", "]", ",", "dtype", "=", "self", ".", "_d_type", ")", "return", "None", "#Convert input frequencies into what their corresponding channel number would be.", "self", ".", "_setup_chans", "(", ")", "#Update frequencies ranges from channel number.", "self", ".", "_setup_freqs", "(", ")", "n_chans", "=", "self", ".", "header", "[", "b'nchans'", "]", "n_chans_selected", "=", "self", ".", "selection_shape", "[", "self", ".", "freq_axis", "]", "n_ifs", "=", "self", ".", "header", "[", "b'nifs'", "]", "# Load binary data", "f", "=", "open", "(", "self", ".", "filename", ",", "'rb'", ")", "f", ".", "seek", "(", "int", "(", "self", ".", "idx_data", ")", ")", "# now check to see how many integrations requested", "n_ints", "=", "self", ".", "t_stop", "-", "self", ".", "t_start", "# Seek to first integration", "f", ".", "seek", "(", "int", "(", "self", ".", "t_start", "*", "self", ".", "_n_bytes", "*", "n_ifs", "*", "n_chans", ")", ",", "1", ")", "#Loading data", "self", ".", "data", "=", "np", ".", "zeros", "(", "(", "n_ints", ",", "n_ifs", ",", "n_chans_selected", ")", ",", "dtype", "=", "self", ".", "_d_type", ")", "for", "ii", "in", "range", "(", "n_ints", ")", ":", "for", "jj", "in", "range", "(", "n_ifs", ")", ":", "f", ".", "seek", "(", "int", "(", "self", ".", "_n_bytes", "*", "self", ".", "chan_start_idx", ")", ",", "1", ")", "# 1 = from current location", "dd", "=", "np", ".", "fromfile", "(", "f", ",", "count", "=", "n_chans_selected", ",", "dtype", "=", "self", ".", "_d_type", ")", "# Reverse array if frequency axis is flipped", "# if self.header[b'foff'] < 0:", "# dd = dd[::-1]", "self", ".", "data", "[", "ii", ",", "jj", "]", "=", "dd", "f", ".", "seek", "(", "int", "(", "self", ".", "_n_bytes", "*", "(", "n_chans", "-", "self", ".", "chan_stop_idx", ")", ")", ",", "1", ")" ]
Read data.
[ "Read", "data", "." ]
python
test
ornlneutronimaging/ImagingReso
ImagingReso/_utilities.py
https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/_utilities.py#L719-L732
def s_to_ev(offset_us, source_to_detector_m, array): """convert time (s) to energy (eV) Parameters: =========== numpy array of time in s offset_us: float. Delay of detector in us source_to_detector_m: float. Distance source to detector in m Returns: ======== numpy array of energy in eV """ lambda_a = 3956. * (array + offset_us * 1e-6) / source_to_detector_m return (81.787 / pow(lambda_a, 2)) / 1000.
[ "def", "s_to_ev", "(", "offset_us", ",", "source_to_detector_m", ",", "array", ")", ":", "lambda_a", "=", "3956.", "*", "(", "array", "+", "offset_us", "*", "1e-6", ")", "/", "source_to_detector_m", "return", "(", "81.787", "/", "pow", "(", "lambda_a", ",", "2", ")", ")", "/", "1000." ]
convert time (s) to energy (eV) Parameters: =========== numpy array of time in s offset_us: float. Delay of detector in us source_to_detector_m: float. Distance source to detector in m Returns: ======== numpy array of energy in eV
[ "convert", "time", "(", "s", ")", "to", "energy", "(", "eV", ")", "Parameters", ":", "===========", "numpy", "array", "of", "time", "in", "s", "offset_us", ":", "float", ".", "Delay", "of", "detector", "in", "us", "source_to_detector_m", ":", "float", ".", "Distance", "source", "to", "detector", "in", "m" ]
python
train
CityOfZion/neo-python
neo/Wallets/Wallet.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Wallets/Wallet.py#L168-L181
def AddNEP5Token(self, token): """ Add a NEP-5 compliant token to the wallet. Args: token (NEP5Token): an instance of type neo.Wallets.NEP5Token. Note: Prints a warning to the console if the token already exists in the wallet. """ if token.ScriptHash.ToBytes() in self._tokens.keys(): logger.error("Token already in wallet") return self._tokens[token.ScriptHash.ToBytes()] = token
[ "def", "AddNEP5Token", "(", "self", ",", "token", ")", ":", "if", "token", ".", "ScriptHash", ".", "ToBytes", "(", ")", "in", "self", ".", "_tokens", ".", "keys", "(", ")", ":", "logger", ".", "error", "(", "\"Token already in wallet\"", ")", "return", "self", ".", "_tokens", "[", "token", ".", "ScriptHash", ".", "ToBytes", "(", ")", "]", "=", "token" ]
Add a NEP-5 compliant token to the wallet. Args: token (NEP5Token): an instance of type neo.Wallets.NEP5Token. Note: Prints a warning to the console if the token already exists in the wallet.
[ "Add", "a", "NEP", "-", "5", "compliant", "token", "to", "the", "wallet", "." ]
python
train
05bit/peewee-async
peewee_async.py
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L278-L284
async def count(self, query, clear_limit=False): """Perform *COUNT* aggregated query asynchronously. :return: number of objects in ``select()`` query """ query = self._swap_database(query) return (await count(query, clear_limit=clear_limit))
[ "async", "def", "count", "(", "self", ",", "query", ",", "clear_limit", "=", "False", ")", ":", "query", "=", "self", ".", "_swap_database", "(", "query", ")", "return", "(", "await", "count", "(", "query", ",", "clear_limit", "=", "clear_limit", ")", ")" ]
Perform *COUNT* aggregated query asynchronously. :return: number of objects in ``select()`` query
[ "Perform", "*", "COUNT", "*", "aggregated", "query", "asynchronously", "." ]
python
train
jonathf/chaospy
chaospy/distributions/baseclass.py
https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/distributions/baseclass.py#L65-L96
def range(self, x_data=None): """ Generate the upper and lower bounds of a distribution. Args: x_data (numpy.ndarray) : The bounds might vary over the sample space. By providing x_data you can specify where in the space the bound should be taken. If omitted, a (pseudo-)random sample is used. Returns: (numpy.ndarray): The lower (out[0]) and upper (out[1]) bound where out.shape=(2,)+x_data.shape """ if x_data is None: try: x_data = evaluation.evaluate_inverse( self, numpy.array([[0.5]]*len(self))) except StochasticallyDependentError: x_data = approximation.find_interior_point(self) shape = (len(self),) if hasattr(self, "_range"): return self._range(x_data, {}) else: x_data = numpy.asfarray(x_data) shape = x_data.shape x_data = x_data.reshape(len(self), -1) q_data = evaluation.evaluate_bound(self, x_data) q_data = q_data.reshape((2,)+shape) return q_data
[ "def", "range", "(", "self", ",", "x_data", "=", "None", ")", ":", "if", "x_data", "is", "None", ":", "try", ":", "x_data", "=", "evaluation", ".", "evaluate_inverse", "(", "self", ",", "numpy", ".", "array", "(", "[", "[", "0.5", "]", "]", "*", "len", "(", "self", ")", ")", ")", "except", "StochasticallyDependentError", ":", "x_data", "=", "approximation", ".", "find_interior_point", "(", "self", ")", "shape", "=", "(", "len", "(", "self", ")", ",", ")", "if", "hasattr", "(", "self", ",", "\"_range\"", ")", ":", "return", "self", ".", "_range", "(", "x_data", ",", "{", "}", ")", "else", ":", "x_data", "=", "numpy", ".", "asfarray", "(", "x_data", ")", "shape", "=", "x_data", ".", "shape", "x_data", "=", "x_data", ".", "reshape", "(", "len", "(", "self", ")", ",", "-", "1", ")", "q_data", "=", "evaluation", ".", "evaluate_bound", "(", "self", ",", "x_data", ")", "q_data", "=", "q_data", ".", "reshape", "(", "(", "2", ",", ")", "+", "shape", ")", "return", "q_data" ]
Generate the upper and lower bounds of a distribution. Args: x_data (numpy.ndarray) : The bounds might vary over the sample space. By providing x_data you can specify where in the space the bound should be taken. If omitted, a (pseudo-)random sample is used. Returns: (numpy.ndarray): The lower (out[0]) and upper (out[1]) bound where out.shape=(2,)+x_data.shape
[ "Generate", "the", "upper", "and", "lower", "bounds", "of", "a", "distribution", "." ]
python
train
horazont/aioxmpp
aioxmpp/xso/model.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/xso/model.py#L812-L838
def handle_missing(self, instance, ctx): """ Handle a missing attribute on `instance`. This is called whenever no value for the attribute is found during parsing. The call to :meth:`missing` is independent of the value of `required`. If the `missing` callback is not :data:`None`, it is called with the `instance` and the `ctx` as arguments. If the returned value is not :data:`None`, it is used as the value of the attribute (validation takes place as if the value had been set from the code, not as if the value had been received from XML) and the handler returns. If the `missing` callback is :data:`None` or returns :data:`None`, the handling continues as normal: if `required` is true, a :class:`ValueError` is raised. """ if self.missing is not None: value = self.missing(instance, ctx) if value is not None: self._set_from_code(instance, value) return if self.default is _PropBase.NO_DEFAULT: raise ValueError("missing attribute {} on {}".format( tag_to_str(self.tag), tag_to_str(instance.TAG), ))
[ "def", "handle_missing", "(", "self", ",", "instance", ",", "ctx", ")", ":", "if", "self", ".", "missing", "is", "not", "None", ":", "value", "=", "self", ".", "missing", "(", "instance", ",", "ctx", ")", "if", "value", "is", "not", "None", ":", "self", ".", "_set_from_code", "(", "instance", ",", "value", ")", "return", "if", "self", ".", "default", "is", "_PropBase", ".", "NO_DEFAULT", ":", "raise", "ValueError", "(", "\"missing attribute {} on {}\"", ".", "format", "(", "tag_to_str", "(", "self", ".", "tag", ")", ",", "tag_to_str", "(", "instance", ".", "TAG", ")", ",", ")", ")" ]
Handle a missing attribute on `instance`. This is called whenever no value for the attribute is found during parsing. The call to :meth:`missing` is independent of the value of `required`. If the `missing` callback is not :data:`None`, it is called with the `instance` and the `ctx` as arguments. If the returned value is not :data:`None`, it is used as the value of the attribute (validation takes place as if the value had been set from the code, not as if the value had been received from XML) and the handler returns. If the `missing` callback is :data:`None` or returns :data:`None`, the handling continues as normal: if `required` is true, a :class:`ValueError` is raised.
[ "Handle", "a", "missing", "attribute", "on", "instance", ".", "This", "is", "called", "whenever", "no", "value", "for", "the", "attribute", "is", "found", "during", "parsing", ".", "The", "call", "to", ":", "meth", ":", "missing", "is", "independent", "of", "the", "value", "of", "required", "." ]
python
train
pletzer/pnumpy
src/pnGhostedDistArray.py
https://github.com/pletzer/pnumpy/blob/9e6d308be94a42637466b91ab1a7b4d64b4c29ae/src/pnGhostedDistArray.py#L118-L128
def gdaZeros(shape, dtype, numGhosts=1): """ ghosted distributed array zero constructor @param shape the shape of the array @param dtype the numpy data type @param numGhosts the number of ghosts (>= 0) """ res = GhostedDistArray(shape, dtype) res.setNumberOfGhosts(numGhosts) res[:] = 0 return res
[ "def", "gdaZeros", "(", "shape", ",", "dtype", ",", "numGhosts", "=", "1", ")", ":", "res", "=", "GhostedDistArray", "(", "shape", ",", "dtype", ")", "res", ".", "setNumberOfGhosts", "(", "numGhosts", ")", "res", "[", ":", "]", "=", "0", "return", "res" ]
ghosted distributed array zero constructor @param shape the shape of the array @param dtype the numpy data type @param numGhosts the number of ghosts (>= 0)
[ "ghosted", "distributed", "array", "zero", "constructor" ]
python
train
un33k/django-ipware
ipware/ip.py
https://github.com/un33k/django-ipware/blob/dc6b754137d1bb7d056ac206a6e0443aa3ed68dc/ipware/ip.py#L40-L46
def get_real_ip(request, right_most_proxy=False): """ Returns client's best-matched `real` `externally-routable` ip-address, or None @deprecated - Do not edit """ warnings.warn('get_real_ip is deprecated and will be removed in 3.0.', DeprecationWarning) return get_ip(request, real_ip_only=True, right_most_proxy=right_most_proxy)
[ "def", "get_real_ip", "(", "request", ",", "right_most_proxy", "=", "False", ")", ":", "warnings", ".", "warn", "(", "'get_real_ip is deprecated and will be removed in 3.0.'", ",", "DeprecationWarning", ")", "return", "get_ip", "(", "request", ",", "real_ip_only", "=", "True", ",", "right_most_proxy", "=", "right_most_proxy", ")" ]
Returns client's best-matched `real` `externally-routable` ip-address, or None @deprecated - Do not edit
[ "Returns", "client", "s", "best", "-", "matched", "real", "externally", "-", "routable", "ip", "-", "address", "or", "None" ]
python
train
lucaskjaero/PyCasia
pycasia/CASIA.py
https://github.com/lucaskjaero/PyCasia/blob/511ddb7809d788fc2c7bc7c1e8600db60bac8152/pycasia/CASIA.py#L164-L179
def load_dataset(self, dataset, verbose=True): """ Load a directory of gnt files. Yields the image and label in tuples. :param dataset: The directory to load. :return: Yields (Pillow.Image.Image, label) pairs. """ assert self.get_dataset(dataset) is True, "Datasets aren't properly downloaded, " \ "rerun to try again or download datasets manually." if verbose: print("Loading %s" % dataset) dataset_path = self.base_dataset_path + dataset for path in tqdm(glob.glob(dataset_path + "/*.gnt")): for image, label in self.load_gnt_file(path): yield image, label
[ "def", "load_dataset", "(", "self", ",", "dataset", ",", "verbose", "=", "True", ")", ":", "assert", "self", ".", "get_dataset", "(", "dataset", ")", "is", "True", ",", "\"Datasets aren't properly downloaded, \"", "\"rerun to try again or download datasets manually.\"", "if", "verbose", ":", "print", "(", "\"Loading %s\"", "%", "dataset", ")", "dataset_path", "=", "self", ".", "base_dataset_path", "+", "dataset", "for", "path", "in", "tqdm", "(", "glob", ".", "glob", "(", "dataset_path", "+", "\"/*.gnt\"", ")", ")", ":", "for", "image", ",", "label", "in", "self", ".", "load_gnt_file", "(", "path", ")", ":", "yield", "image", ",", "label" ]
Load a directory of gnt files. Yields the image and label in tuples. :param dataset: The directory to load. :return: Yields (Pillow.Image.Image, label) pairs.
[ "Load", "a", "directory", "of", "gnt", "files", ".", "Yields", "the", "image", "and", "label", "in", "tuples", ".", ":", "param", "dataset", ":", "The", "directory", "to", "load", ".", ":", "return", ":", "Yields", "(", "Pillow", ".", "Image", ".", "Image", "label", ")", "pairs", "." ]
python
train
redhat-cip/dci-control-server
dci/alembic/env.py
https://github.com/redhat-cip/dci-control-server/blob/b416cf935ec93e4fdd5741f61a21cabecf8454d2/dci/alembic/env.py#L34-L53
def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ app_conf = dci_config.generate_conf() url = app_conf['SQLALCHEMY_DATABASE_URI'] context.configure( url=url, target_metadata=target_metadata, literal_binds=True, ) with context.begin_transaction(): context.run_migrations()
[ "def", "run_migrations_offline", "(", ")", ":", "app_conf", "=", "dci_config", ".", "generate_conf", "(", ")", "url", "=", "app_conf", "[", "'SQLALCHEMY_DATABASE_URI'", "]", "context", ".", "configure", "(", "url", "=", "url", ",", "target_metadata", "=", "target_metadata", ",", "literal_binds", "=", "True", ",", ")", "with", "context", ".", "begin_transaction", "(", ")", ":", "context", ".", "run_migrations", "(", ")" ]
Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output.
[ "Run", "migrations", "in", "offline", "mode", "." ]
python
train
StackStorm/pybind
pybind/nos/v6_0_2f/rmon/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/rmon/__init__.py#L127-L148
def _set_alarm_entry(self, v, load=False): """ Setter method for alarm_entry, mapped from YANG variable /rmon/alarm_entry (list) If this variable is read-only (config: false) in the source YANG file, then _set_alarm_entry is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_alarm_entry() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("alarm_index",alarm_entry.alarm_entry, yang_name="alarm-entry", rest_name="alarm", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='alarm-index', extensions={u'tailf-common': {u'info': u'RMON alarm', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'alarm', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'rmon_alarm'}}), is_container='list', yang_name="alarm-entry", rest_name="alarm", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'RMON alarm', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'alarm', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'rmon_alarm'}}, namespace='urn:brocade.com:mgmt:brocade-rmon', defining_module='brocade-rmon', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """alarm_entry must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("alarm_index",alarm_entry.alarm_entry, yang_name="alarm-entry", rest_name="alarm", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='alarm-index', extensions={u'tailf-common': {u'info': u'RMON alarm', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'alarm', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'rmon_alarm'}}), is_container='list', yang_name="alarm-entry", rest_name="alarm", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'RMON alarm', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'alarm', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'rmon_alarm'}}, namespace='urn:brocade.com:mgmt:brocade-rmon', defining_module='brocade-rmon', yang_type='list', is_config=True)""", }) self.__alarm_entry = t if hasattr(self, '_set'): self._set()
[ "def", "_set_alarm_entry", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGListType", "(", "\"alarm_index\"", ",", "alarm_entry", ".", "alarm_entry", ",", "yang_name", "=", "\"alarm-entry\"", ",", "rest_name", "=", "\"alarm\"", ",", "parent", "=", "self", ",", "is_container", "=", "'list'", ",", "user_ordered", "=", "False", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "yang_keys", "=", "'alarm-index'", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'RMON alarm'", ",", "u'cli-no-key-completion'", ":", "None", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'cli-suppress-list-no'", ":", "None", ",", "u'cli-full-no'", ":", "None", ",", "u'alt-name'", ":", "u'alarm'", ",", "u'cli-compact-syntax'", ":", "None", ",", "u'cli-sequence-commands'", ":", "None", ",", "u'cli-suppress-key-abbreviation'", ":", "None", ",", "u'cli-incomplete-command'", ":", "None", ",", "u'callpoint'", ":", "u'rmon_alarm'", "}", "}", ")", ",", "is_container", "=", "'list'", ",", "yang_name", "=", "\"alarm-entry\"", ",", "rest_name", "=", "\"alarm\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'RMON alarm'", ",", "u'cli-no-key-completion'", ":", "None", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'cli-suppress-list-no'", ":", "None", ",", "u'cli-full-no'", ":", "None", ",", "u'alt-name'", ":", "u'alarm'", ",", "u'cli-compact-syntax'", ":", "None", ",", "u'cli-sequence-commands'", ":", "None", ",", "u'cli-suppress-key-abbreviation'", ":", "None", ",", "u'cli-incomplete-command'", ":", "None", ",", "u'callpoint'", ":", "u'rmon_alarm'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-rmon'", ",", "defining_module", "=", "'brocade-rmon'", ",", "yang_type", "=", "'list'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"alarm_entry must be of a type compatible with list\"\"\"", ",", "'defined-type'", ":", "\"list\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGListType(\"alarm_index\",alarm_entry.alarm_entry, yang_name=\"alarm-entry\", rest_name=\"alarm\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='alarm-index', extensions={u'tailf-common': {u'info': u'RMON alarm', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'alarm', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'rmon_alarm'}}), is_container='list', yang_name=\"alarm-entry\", rest_name=\"alarm\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'RMON alarm', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'alarm', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'rmon_alarm'}}, namespace='urn:brocade.com:mgmt:brocade-rmon', defining_module='brocade-rmon', yang_type='list', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__alarm_entry", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for alarm_entry, mapped from YANG variable /rmon/alarm_entry (list) If this variable is read-only (config: false) in the source YANG file, then _set_alarm_entry is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_alarm_entry() directly.
[ "Setter", "method", "for", "alarm_entry", "mapped", "from", "YANG", "variable", "/", "rmon", "/", "alarm_entry", "(", "list", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_alarm_entry", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_alarm_entry", "()", "directly", "." ]
python
train
lepture/flask-oauthlib
flask_oauthlib/client.py
https://github.com/lepture/flask-oauthlib/blob/9e6f152a5bb360e7496210da21561c3e6d41b0e1/flask_oauthlib/client.py#L57-L67
def init_app(self, app): """Init app with Flask instance. You can also pass the instance of Flask later:: oauth = OAuth() oauth.init_app(app) """ self.app = app app.extensions = getattr(app, 'extensions', {}) app.extensions[self.state_key] = self
[ "def", "init_app", "(", "self", ",", "app", ")", ":", "self", ".", "app", "=", "app", "app", ".", "extensions", "=", "getattr", "(", "app", ",", "'extensions'", ",", "{", "}", ")", "app", ".", "extensions", "[", "self", ".", "state_key", "]", "=", "self" ]
Init app with Flask instance. You can also pass the instance of Flask later:: oauth = OAuth() oauth.init_app(app)
[ "Init", "app", "with", "Flask", "instance", "." ]
python
test
wummel/linkchecker
linkcheck/plugins/__init__.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/plugins/__init__.py#L116-L118
def run_parser_plugins(self, url_data, pagetype): """Run parser plugins for given pagetype.""" run_plugins(self.parser_plugins, url_data, stop_after_match=True, pagetype=pagetype)
[ "def", "run_parser_plugins", "(", "self", ",", "url_data", ",", "pagetype", ")", ":", "run_plugins", "(", "self", ".", "parser_plugins", ",", "url_data", ",", "stop_after_match", "=", "True", ",", "pagetype", "=", "pagetype", ")" ]
Run parser plugins for given pagetype.
[ "Run", "parser", "plugins", "for", "given", "pagetype", "." ]
python
train
ericmjl/nxviz
nxviz/plots.py
https://github.com/ericmjl/nxviz/blob/6ea5823a8030a686f165fbe37d7a04d0f037ecc9/nxviz/plots.py#L261-L300
def compute_node_colors(self): """Compute the node colors. Also computes the colorbar.""" data = [self.graph.node[n][self.node_color] for n in self.nodes] if self.group_order == "alphabetically": data_reduced = sorted(list(set(data))) elif self.group_order == "default": data_reduced = list(unique_everseen(data)) dtype = infer_data_type(data) n_grps = num_discrete_groups(data) if dtype == "categorical" or dtype == "ordinal": if n_grps <= 8: cmap = get_cmap( cmaps["Accent_{0}".format(n_grps)].mpl_colormap ) else: cmap = n_group_colorpallet(n_grps) elif dtype == "continuous" and not is_data_diverging(data): cmap = get_cmap(cmaps["continuous"].mpl_colormap) elif dtype == "continuous" and is_data_diverging(data): cmap = get_cmap(cmaps["diverging"].mpl_colormap) for d in data: idx = data_reduced.index(d) / n_grps self.node_colors.append(cmap(idx)) # Add colorbar if required.ListedColormap logging.debug("length of data_reduced: {0}".format(len(data_reduced))) logging.debug("dtype: {0}".format(dtype)) if len(data_reduced) > 1 and dtype == "continuous": self.sm = plt.cm.ScalarMappable( cmap=cmap, norm=plt.Normalize( vmin=min(data_reduced), vmax=max(data_reduced), # noqa # noqa ), ) self.sm._A = []
[ "def", "compute_node_colors", "(", "self", ")", ":", "data", "=", "[", "self", ".", "graph", ".", "node", "[", "n", "]", "[", "self", ".", "node_color", "]", "for", "n", "in", "self", ".", "nodes", "]", "if", "self", ".", "group_order", "==", "\"alphabetically\"", ":", "data_reduced", "=", "sorted", "(", "list", "(", "set", "(", "data", ")", ")", ")", "elif", "self", ".", "group_order", "==", "\"default\"", ":", "data_reduced", "=", "list", "(", "unique_everseen", "(", "data", ")", ")", "dtype", "=", "infer_data_type", "(", "data", ")", "n_grps", "=", "num_discrete_groups", "(", "data", ")", "if", "dtype", "==", "\"categorical\"", "or", "dtype", "==", "\"ordinal\"", ":", "if", "n_grps", "<=", "8", ":", "cmap", "=", "get_cmap", "(", "cmaps", "[", "\"Accent_{0}\"", ".", "format", "(", "n_grps", ")", "]", ".", "mpl_colormap", ")", "else", ":", "cmap", "=", "n_group_colorpallet", "(", "n_grps", ")", "elif", "dtype", "==", "\"continuous\"", "and", "not", "is_data_diverging", "(", "data", ")", ":", "cmap", "=", "get_cmap", "(", "cmaps", "[", "\"continuous\"", "]", ".", "mpl_colormap", ")", "elif", "dtype", "==", "\"continuous\"", "and", "is_data_diverging", "(", "data", ")", ":", "cmap", "=", "get_cmap", "(", "cmaps", "[", "\"diverging\"", "]", ".", "mpl_colormap", ")", "for", "d", "in", "data", ":", "idx", "=", "data_reduced", ".", "index", "(", "d", ")", "/", "n_grps", "self", ".", "node_colors", ".", "append", "(", "cmap", "(", "idx", ")", ")", "# Add colorbar if required.ListedColormap", "logging", ".", "debug", "(", "\"length of data_reduced: {0}\"", ".", "format", "(", "len", "(", "data_reduced", ")", ")", ")", "logging", ".", "debug", "(", "\"dtype: {0}\"", ".", "format", "(", "dtype", ")", ")", "if", "len", "(", "data_reduced", ")", ">", "1", "and", "dtype", "==", "\"continuous\"", ":", "self", ".", "sm", "=", "plt", ".", "cm", ".", "ScalarMappable", "(", "cmap", "=", "cmap", ",", "norm", "=", "plt", ".", "Normalize", "(", "vmin", "=", "min", "(", "data_reduced", ")", ",", "vmax", "=", "max", "(", "data_reduced", ")", ",", "# noqa # noqa", ")", ",", ")", "self", ".", "sm", ".", "_A", "=", "[", "]" ]
Compute the node colors. Also computes the colorbar.
[ "Compute", "the", "node", "colors", ".", "Also", "computes", "the", "colorbar", "." ]
python
train
ethereum/py-evm
eth/chains/base.py
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L786-L799
def validate_gaslimit(self, header: BlockHeader) -> None: """ Validate the gas limit on the given header. """ parent_header = self.get_block_header_by_hash(header.parent_hash) low_bound, high_bound = compute_gas_limit_bounds(parent_header) if header.gas_limit < low_bound: raise ValidationError( "The gas limit on block {0} is too low: {1}. It must be at least {2}".format( encode_hex(header.hash), header.gas_limit, low_bound)) elif header.gas_limit > high_bound: raise ValidationError( "The gas limit on block {0} is too high: {1}. It must be at most {2}".format( encode_hex(header.hash), header.gas_limit, high_bound))
[ "def", "validate_gaslimit", "(", "self", ",", "header", ":", "BlockHeader", ")", "->", "None", ":", "parent_header", "=", "self", ".", "get_block_header_by_hash", "(", "header", ".", "parent_hash", ")", "low_bound", ",", "high_bound", "=", "compute_gas_limit_bounds", "(", "parent_header", ")", "if", "header", ".", "gas_limit", "<", "low_bound", ":", "raise", "ValidationError", "(", "\"The gas limit on block {0} is too low: {1}. It must be at least {2}\"", ".", "format", "(", "encode_hex", "(", "header", ".", "hash", ")", ",", "header", ".", "gas_limit", ",", "low_bound", ")", ")", "elif", "header", ".", "gas_limit", ">", "high_bound", ":", "raise", "ValidationError", "(", "\"The gas limit on block {0} is too high: {1}. It must be at most {2}\"", ".", "format", "(", "encode_hex", "(", "header", ".", "hash", ")", ",", "header", ".", "gas_limit", ",", "high_bound", ")", ")" ]
Validate the gas limit on the given header.
[ "Validate", "the", "gas", "limit", "on", "the", "given", "header", "." ]
python
train
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/iam/apis/aggregator_account_admin_api.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/iam/apis/aggregator_account_admin_api.py#L36-L58
def add_account_api_key_to_groups(self, account_id, api_key, body, **kwargs): # noqa: E501 """Add API key to a list of groups. # noqa: E501 An endpoint for adding API key to groups. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/api-keys/{apikey}/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.add_account_api_key_to_groups(account_id, api_key, body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str account_id: Account ID. (required) :param str api_key: The ID of the API key to be added to the group. (required) :param list[str] body: A list of IDs of the groups to be updated. (required) :return: UpdatedResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.add_account_api_key_to_groups_with_http_info(account_id, api_key, body, **kwargs) # noqa: E501 else: (data) = self.add_account_api_key_to_groups_with_http_info(account_id, api_key, body, **kwargs) # noqa: E501 return data
[ "def", "add_account_api_key_to_groups", "(", "self", ",", "account_id", ",", "api_key", ",", "body", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'asynchronous'", ")", ":", "return", "self", ".", "add_account_api_key_to_groups_with_http_info", "(", "account_id", ",", "api_key", ",", "body", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "add_account_api_key_to_groups_with_http_info", "(", "account_id", ",", "api_key", ",", "body", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
Add API key to a list of groups. # noqa: E501 An endpoint for adding API key to groups. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/api-keys/{apikey}/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.add_account_api_key_to_groups(account_id, api_key, body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str account_id: Account ID. (required) :param str api_key: The ID of the API key to be added to the group. (required) :param list[str] body: A list of IDs of the groups to be updated. (required) :return: UpdatedResponse If the method is called asynchronously, returns the request thread.
[ "Add", "API", "key", "to", "a", "list", "of", "groups", ".", "#", "noqa", ":", "E501" ]
python
train
horazont/aioxmpp
aioxmpp/muc/service.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/muc/service.py#L2438-L2464
def get_room_config(self, mucjid): """ Query and return the room configuration form for the given MUC. :param mucjid: JID of the room to query :type mucjid: bare :class:`~.JID` :return: data form template for the room configuration :rtype: :class:`aioxmpp.forms.Data` .. seealso:: :class:`~.ConfigurationForm` for a form template to work with the returned form .. versionadded:: 0.7 """ if mucjid is None or not mucjid.is_bare: raise ValueError("mucjid must be bare JID") iq = aioxmpp.stanza.IQ( type_=aioxmpp.structs.IQType.GET, to=mucjid, payload=muc_xso.OwnerQuery(), ) return (yield from self.client.send(iq)).form
[ "def", "get_room_config", "(", "self", ",", "mucjid", ")", ":", "if", "mucjid", "is", "None", "or", "not", "mucjid", ".", "is_bare", ":", "raise", "ValueError", "(", "\"mucjid must be bare JID\"", ")", "iq", "=", "aioxmpp", ".", "stanza", ".", "IQ", "(", "type_", "=", "aioxmpp", ".", "structs", ".", "IQType", ".", "GET", ",", "to", "=", "mucjid", ",", "payload", "=", "muc_xso", ".", "OwnerQuery", "(", ")", ",", ")", "return", "(", "yield", "from", "self", ".", "client", ".", "send", "(", "iq", ")", ")", ".", "form" ]
Query and return the room configuration form for the given MUC. :param mucjid: JID of the room to query :type mucjid: bare :class:`~.JID` :return: data form template for the room configuration :rtype: :class:`aioxmpp.forms.Data` .. seealso:: :class:`~.ConfigurationForm` for a form template to work with the returned form .. versionadded:: 0.7
[ "Query", "and", "return", "the", "room", "configuration", "form", "for", "the", "given", "MUC", "." ]
python
train
dadadel/pyment
pyment/docstring.py
https://github.com/dadadel/pyment/blob/3d1bdf87d083ff56230bd0bf7c5252e20552b7b6/pyment/docstring.py#L934-L966
def get_raise_description_indexes(self, data, prev=None): """Get from a docstring the next raise's description. In javadoc style it is after @param. :param data: string to parse :param prev: index after the param element name (Default value = None) :returns: start and end indexes of found element else (-1, -1) :rtype: tuple """ start, end = -1, -1 if not prev: _, prev = self.get_raise_indexes(data) if prev < 0: return -1, -1 m = re.match(r'\W*(\w+)', data[prev:]) if m: first = m.group(1) start = data[prev:].find(first) if start >= 0: start += prev if self.style['in'] in self.tagstyles + ['unknown']: end = self.get_elem_index(data[start:]) if end >= 0: end += start if self.style['in'] in ['params', 'unknown'] and end == -1: p1, _ = self.get_raise_indexes(data[start:]) if p1 >= 0: end = p1 else: end = len(data) return start, end
[ "def", "get_raise_description_indexes", "(", "self", ",", "data", ",", "prev", "=", "None", ")", ":", "start", ",", "end", "=", "-", "1", ",", "-", "1", "if", "not", "prev", ":", "_", ",", "prev", "=", "self", ".", "get_raise_indexes", "(", "data", ")", "if", "prev", "<", "0", ":", "return", "-", "1", ",", "-", "1", "m", "=", "re", ".", "match", "(", "r'\\W*(\\w+)'", ",", "data", "[", "prev", ":", "]", ")", "if", "m", ":", "first", "=", "m", ".", "group", "(", "1", ")", "start", "=", "data", "[", "prev", ":", "]", ".", "find", "(", "first", ")", "if", "start", ">=", "0", ":", "start", "+=", "prev", "if", "self", ".", "style", "[", "'in'", "]", "in", "self", ".", "tagstyles", "+", "[", "'unknown'", "]", ":", "end", "=", "self", ".", "get_elem_index", "(", "data", "[", "start", ":", "]", ")", "if", "end", ">=", "0", ":", "end", "+=", "start", "if", "self", ".", "style", "[", "'in'", "]", "in", "[", "'params'", ",", "'unknown'", "]", "and", "end", "==", "-", "1", ":", "p1", ",", "_", "=", "self", ".", "get_raise_indexes", "(", "data", "[", "start", ":", "]", ")", "if", "p1", ">=", "0", ":", "end", "=", "p1", "else", ":", "end", "=", "len", "(", "data", ")", "return", "start", ",", "end" ]
Get from a docstring the next raise's description. In javadoc style it is after @param. :param data: string to parse :param prev: index after the param element name (Default value = None) :returns: start and end indexes of found element else (-1, -1) :rtype: tuple
[ "Get", "from", "a", "docstring", "the", "next", "raise", "s", "description", ".", "In", "javadoc", "style", "it", "is", "after", "@param", "." ]
python
train
pypyr/pypyr-cli
pypyr/context.py
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L677-L737
def set_defaults(self, defaults): """Set defaults in context if keys do not exist already. Adds the input dict (defaults) into the context, only where keys in defaults do not already exist in context. Supports nested hierarchies. Example: Given a context like this: key1: value1 key2: key2.1: value2.1 key3: None And defaults input like this: key1: 'updated value here won't overwrite since it already exists' key2: key2.2: value2.2 key3: 'key 3 exists so I won't overwrite Will result in context: key1: value1 key2: key2.1: value2.1 key2.2: value2.2 key3: None Args: defaults: dict. Add this dict into context. Returns: None. All operations mutate this instance of context. """ def defaults_recurse(current, defaults): """Walk the current context tree in recursive inner function. On 1st iteration, current = self (i.e root of context) On subsequent recursive iterations, current is wherever you're at in the nested context hierarchy. Args: current: dict. Destination of merge. defaults: dict. Add this to current if keys don't exist already. """ for k, v in defaults.items(): # key supports interpolation k = self.get_formatted_string(k) if k in current: if types.are_all_this_type(Mapping, current[k], v): # it's dict-y, thus recurse through it to check if it # contains child items that don't exist in dest defaults_recurse(current[k], v) else: # since it's not in context already, add the default current[k] = self.get_formatted_iterable(v) # first iteration starts at context dict root defaults_recurse(self, defaults)
[ "def", "set_defaults", "(", "self", ",", "defaults", ")", ":", "def", "defaults_recurse", "(", "current", ",", "defaults", ")", ":", "\"\"\"Walk the current context tree in recursive inner function.\n\n On 1st iteration, current = self (i.e root of context)\n On subsequent recursive iterations, current is wherever you're at\n in the nested context hierarchy.\n\n Args:\n current: dict. Destination of merge.\n defaults: dict. Add this to current if keys don't exist\n already.\n\n \"\"\"", "for", "k", ",", "v", "in", "defaults", ".", "items", "(", ")", ":", "# key supports interpolation", "k", "=", "self", ".", "get_formatted_string", "(", "k", ")", "if", "k", "in", "current", ":", "if", "types", ".", "are_all_this_type", "(", "Mapping", ",", "current", "[", "k", "]", ",", "v", ")", ":", "# it's dict-y, thus recurse through it to check if it", "# contains child items that don't exist in dest", "defaults_recurse", "(", "current", "[", "k", "]", ",", "v", ")", "else", ":", "# since it's not in context already, add the default", "current", "[", "k", "]", "=", "self", ".", "get_formatted_iterable", "(", "v", ")", "# first iteration starts at context dict root", "defaults_recurse", "(", "self", ",", "defaults", ")" ]
Set defaults in context if keys do not exist already. Adds the input dict (defaults) into the context, only where keys in defaults do not already exist in context. Supports nested hierarchies. Example: Given a context like this: key1: value1 key2: key2.1: value2.1 key3: None And defaults input like this: key1: 'updated value here won't overwrite since it already exists' key2: key2.2: value2.2 key3: 'key 3 exists so I won't overwrite Will result in context: key1: value1 key2: key2.1: value2.1 key2.2: value2.2 key3: None Args: defaults: dict. Add this dict into context. Returns: None. All operations mutate this instance of context.
[ "Set", "defaults", "in", "context", "if", "keys", "do", "not", "exist", "already", "." ]
python
train
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/fda3bb39181437b6b8a0aa0185f21ae5f14385dd/autopep8.py#L3249-L3279
def commented_out_code_lines(source): """Return line numbers of comments that are likely code. Commented-out code is bad practice, but modifying it just adds even more clutter. """ line_numbers = [] try: for t in generate_tokens(source): token_type = t[0] token_string = t[1] start_row = t[2][0] line = t[4] # Ignore inline comments. if not line.lstrip().startswith('#'): continue if token_type == tokenize.COMMENT: stripped_line = token_string.lstrip('#').strip() if ( ' ' in stripped_line and '#' not in stripped_line and check_syntax(stripped_line) ): line_numbers.append(start_row) except (SyntaxError, tokenize.TokenError): pass return line_numbers
[ "def", "commented_out_code_lines", "(", "source", ")", ":", "line_numbers", "=", "[", "]", "try", ":", "for", "t", "in", "generate_tokens", "(", "source", ")", ":", "token_type", "=", "t", "[", "0", "]", "token_string", "=", "t", "[", "1", "]", "start_row", "=", "t", "[", "2", "]", "[", "0", "]", "line", "=", "t", "[", "4", "]", "# Ignore inline comments.", "if", "not", "line", ".", "lstrip", "(", ")", ".", "startswith", "(", "'#'", ")", ":", "continue", "if", "token_type", "==", "tokenize", ".", "COMMENT", ":", "stripped_line", "=", "token_string", ".", "lstrip", "(", "'#'", ")", ".", "strip", "(", ")", "if", "(", "' '", "in", "stripped_line", "and", "'#'", "not", "in", "stripped_line", "and", "check_syntax", "(", "stripped_line", ")", ")", ":", "line_numbers", ".", "append", "(", "start_row", ")", "except", "(", "SyntaxError", ",", "tokenize", ".", "TokenError", ")", ":", "pass", "return", "line_numbers" ]
Return line numbers of comments that are likely code. Commented-out code is bad practice, but modifying it just adds even more clutter.
[ "Return", "line", "numbers", "of", "comments", "that", "are", "likely", "code", "." ]
python
train
gbiggs/rtctree
rtctree/utils.py
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/utils.py#L87-L125
def get_num_columns_and_rows(widths, gap_width, term_width): '''Given a list of string widths, a width of the minimum gap to place between them, and the maximum width of the output (such as a terminal width), calculate the number of columns and rows, and the width of each column, for the optimal layout. ''' def calc_longest_width(widths, gap_width, ncols): longest = 0 rows = [widths[s:s + ncols] for s in range(0, len(widths), ncols)] col_widths = rows[0] # Column widths start at the first row widths for r in rows: for ii, c in enumerate(r): if c > col_widths[ii]: col_widths[ii] = c length = sum(col_widths) + gap_width * (ncols - 1) if length > longest: longest = length return longest, col_widths def calc_num_rows(num_items, cols): div, mod = divmod(num_items, cols) return div + (mod != 0) # Start with one row ncols = len(widths) # Calculate the width of the longest row as the longest set of item widths # ncols long and gap widths (gap_width * ncols - 1) that fits within the # terminal width. while ncols > 0: longest_width, col_widths = calc_longest_width(widths, gap_width, ncols) if longest_width < term_width: # This number of columns fits return calc_num_rows(len(widths), ncols), ncols, col_widths else: # This number of columns doesn't fit, so try one less ncols -= 1 # If got here, it all has to go in one column return len(widths), 1, 0
[ "def", "get_num_columns_and_rows", "(", "widths", ",", "gap_width", ",", "term_width", ")", ":", "def", "calc_longest_width", "(", "widths", ",", "gap_width", ",", "ncols", ")", ":", "longest", "=", "0", "rows", "=", "[", "widths", "[", "s", ":", "s", "+", "ncols", "]", "for", "s", "in", "range", "(", "0", ",", "len", "(", "widths", ")", ",", "ncols", ")", "]", "col_widths", "=", "rows", "[", "0", "]", "# Column widths start at the first row widths\r", "for", "r", "in", "rows", ":", "for", "ii", ",", "c", "in", "enumerate", "(", "r", ")", ":", "if", "c", ">", "col_widths", "[", "ii", "]", ":", "col_widths", "[", "ii", "]", "=", "c", "length", "=", "sum", "(", "col_widths", ")", "+", "gap_width", "*", "(", "ncols", "-", "1", ")", "if", "length", ">", "longest", ":", "longest", "=", "length", "return", "longest", ",", "col_widths", "def", "calc_num_rows", "(", "num_items", ",", "cols", ")", ":", "div", ",", "mod", "=", "divmod", "(", "num_items", ",", "cols", ")", "return", "div", "+", "(", "mod", "!=", "0", ")", "# Start with one row\r", "ncols", "=", "len", "(", "widths", ")", "# Calculate the width of the longest row as the longest set of item widths\r", "# ncols long and gap widths (gap_width * ncols - 1) that fits within the\r", "# terminal width.\r", "while", "ncols", ">", "0", ":", "longest_width", ",", "col_widths", "=", "calc_longest_width", "(", "widths", ",", "gap_width", ",", "ncols", ")", "if", "longest_width", "<", "term_width", ":", "# This number of columns fits\r", "return", "calc_num_rows", "(", "len", "(", "widths", ")", ",", "ncols", ")", ",", "ncols", ",", "col_widths", "else", ":", "# This number of columns doesn't fit, so try one less\r", "ncols", "-=", "1", "# If got here, it all has to go in one column\r", "return", "len", "(", "widths", ")", ",", "1", ",", "0" ]
Given a list of string widths, a width of the minimum gap to place between them, and the maximum width of the output (such as a terminal width), calculate the number of columns and rows, and the width of each column, for the optimal layout.
[ "Given", "a", "list", "of", "string", "widths", "a", "width", "of", "the", "minimum", "gap", "to", "place", "between", "them", "and", "the", "maximum", "width", "of", "the", "output", "(", "such", "as", "a", "terminal", "width", ")", "calculate", "the", "number", "of", "columns", "and", "rows", "and", "the", "width", "of", "each", "column", "for", "the", "optimal", "layout", "." ]
python
train
Infinidat/infi.clickhouse_orm
scripts/generate_ref.py
https://github.com/Infinidat/infi.clickhouse_orm/blob/595f2023e334e3925a5c3fbfdd6083a5992a7169/scripts/generate_ref.py#L7-L30
def _get_default_arg(args, defaults, arg_index): """ Method that determines if an argument has default value or not, and if yes what is the default value for the argument :param args: array of arguments, eg: ['first_arg', 'second_arg', 'third_arg'] :param defaults: array of default values, eg: (42, 'something') :param arg_index: index of the argument in the argument array for which, this function checks if a default value exists or not. And if default value exists it would return the default value. Example argument: 1 :return: Tuple of whether there is a default or not, and if yes the default value, eg: for index 2 i.e. for "second_arg" this function returns (True, 42) """ if not defaults: return DefaultArgSpec(False, None) args_with_no_defaults = len(args) - len(defaults) if arg_index < args_with_no_defaults: return DefaultArgSpec(False, None) else: value = defaults[arg_index - args_with_no_defaults] if (type(value) is str): value = '"%s"' % value return DefaultArgSpec(True, value)
[ "def", "_get_default_arg", "(", "args", ",", "defaults", ",", "arg_index", ")", ":", "if", "not", "defaults", ":", "return", "DefaultArgSpec", "(", "False", ",", "None", ")", "args_with_no_defaults", "=", "len", "(", "args", ")", "-", "len", "(", "defaults", ")", "if", "arg_index", "<", "args_with_no_defaults", ":", "return", "DefaultArgSpec", "(", "False", ",", "None", ")", "else", ":", "value", "=", "defaults", "[", "arg_index", "-", "args_with_no_defaults", "]", "if", "(", "type", "(", "value", ")", "is", "str", ")", ":", "value", "=", "'\"%s\"'", "%", "value", "return", "DefaultArgSpec", "(", "True", ",", "value", ")" ]
Method that determines if an argument has default value or not, and if yes what is the default value for the argument :param args: array of arguments, eg: ['first_arg', 'second_arg', 'third_arg'] :param defaults: array of default values, eg: (42, 'something') :param arg_index: index of the argument in the argument array for which, this function checks if a default value exists or not. And if default value exists it would return the default value. Example argument: 1 :return: Tuple of whether there is a default or not, and if yes the default value, eg: for index 2 i.e. for "second_arg" this function returns (True, 42)
[ "Method", "that", "determines", "if", "an", "argument", "has", "default", "value", "or", "not", "and", "if", "yes", "what", "is", "the", "default", "value", "for", "the", "argument" ]
python
train
geopython/OWSLib
owslib/swe/observation/waterml2.py
https://github.com/geopython/OWSLib/blob/96d47842401a129f1e86fa9f66dccef5a5a6872c/owslib/swe/observation/waterml2.py#L36-L41
def _parse_result(self): ''' Parse the result element of the observation type ''' if self.result is not None: result = self.result.find(nspv( "wml2:MeasurementTimeseries")) self.result = MeasurementTimeseries(result)
[ "def", "_parse_result", "(", "self", ")", ":", "if", "self", ".", "result", "is", "not", "None", ":", "result", "=", "self", ".", "result", ".", "find", "(", "nspv", "(", "\"wml2:MeasurementTimeseries\"", ")", ")", "self", ".", "result", "=", "MeasurementTimeseries", "(", "result", ")" ]
Parse the result element of the observation type
[ "Parse", "the", "result", "element", "of", "the", "observation", "type" ]
python
test
mosdef-hub/foyer
foyer/forcefield.py
https://github.com/mosdef-hub/foyer/blob/9e39c71208fc01a6cc7b7cbe5a533c56830681d3/foyer/forcefield.py#L108-L143
def _topology_from_parmed(structure, non_element_types): """Convert a ParmEd Structure to an OpenMM Topology.""" topology = app.Topology() residues = dict() for pmd_residue in structure.residues: chain = topology.addChain() omm_residue = topology.addResidue(pmd_residue.name, chain) residues[pmd_residue] = omm_residue atoms = dict() # pmd.Atom: omm.Atom for pmd_atom in structure.atoms: name = pmd_atom.name if pmd_atom.name in non_element_types: element = non_element_types[pmd_atom.name] else: if (isinstance(pmd_atom.atomic_number, int) and pmd_atom.atomic_number != 0): element = elem.Element.getByAtomicNumber(pmd_atom.atomic_number) else: element = elem.Element.getBySymbol(pmd_atom.name) omm_atom = topology.addAtom(name, element, residues[pmd_atom.residue]) atoms[pmd_atom] = omm_atom omm_atom.bond_partners = [] for bond in structure.bonds: atom1 = atoms[bond.atom1] atom2 = atoms[bond.atom2] topology.addBond(atom1, atom2) atom1.bond_partners.append(atom2) atom2.bond_partners.append(atom1) if structure.box_vectors and np.any([x._value for x in structure.box_vectors]): topology.setPeriodicBoxVectors(structure.box_vectors) positions = structure.positions return topology, positions
[ "def", "_topology_from_parmed", "(", "structure", ",", "non_element_types", ")", ":", "topology", "=", "app", ".", "Topology", "(", ")", "residues", "=", "dict", "(", ")", "for", "pmd_residue", "in", "structure", ".", "residues", ":", "chain", "=", "topology", ".", "addChain", "(", ")", "omm_residue", "=", "topology", ".", "addResidue", "(", "pmd_residue", ".", "name", ",", "chain", ")", "residues", "[", "pmd_residue", "]", "=", "omm_residue", "atoms", "=", "dict", "(", ")", "# pmd.Atom: omm.Atom", "for", "pmd_atom", "in", "structure", ".", "atoms", ":", "name", "=", "pmd_atom", ".", "name", "if", "pmd_atom", ".", "name", "in", "non_element_types", ":", "element", "=", "non_element_types", "[", "pmd_atom", ".", "name", "]", "else", ":", "if", "(", "isinstance", "(", "pmd_atom", ".", "atomic_number", ",", "int", ")", "and", "pmd_atom", ".", "atomic_number", "!=", "0", ")", ":", "element", "=", "elem", ".", "Element", ".", "getByAtomicNumber", "(", "pmd_atom", ".", "atomic_number", ")", "else", ":", "element", "=", "elem", ".", "Element", ".", "getBySymbol", "(", "pmd_atom", ".", "name", ")", "omm_atom", "=", "topology", ".", "addAtom", "(", "name", ",", "element", ",", "residues", "[", "pmd_atom", ".", "residue", "]", ")", "atoms", "[", "pmd_atom", "]", "=", "omm_atom", "omm_atom", ".", "bond_partners", "=", "[", "]", "for", "bond", "in", "structure", ".", "bonds", ":", "atom1", "=", "atoms", "[", "bond", ".", "atom1", "]", "atom2", "=", "atoms", "[", "bond", ".", "atom2", "]", "topology", ".", "addBond", "(", "atom1", ",", "atom2", ")", "atom1", ".", "bond_partners", ".", "append", "(", "atom2", ")", "atom2", ".", "bond_partners", ".", "append", "(", "atom1", ")", "if", "structure", ".", "box_vectors", "and", "np", ".", "any", "(", "[", "x", ".", "_value", "for", "x", "in", "structure", ".", "box_vectors", "]", ")", ":", "topology", ".", "setPeriodicBoxVectors", "(", "structure", ".", "box_vectors", ")", "positions", "=", "structure", ".", "positions", "return", "topology", ",", "positions" ]
Convert a ParmEd Structure to an OpenMM Topology.
[ "Convert", "a", "ParmEd", "Structure", "to", "an", "OpenMM", "Topology", "." ]
python
train
alejandroautalan/pygubu
pygubudesigner/uitreeeditor.py
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/uitreeeditor.py#L255-L275
def copy_to_clipboard(self): """ Copies selected items to clipboard. """ tree = self.treeview # get the selected item: selection = tree.selection() if selection: self.filter_remove(remember=True) root = ET.Element('selection') for item in selection: node = self.tree_node_to_xml('', item) root.append(node) # python2 issue try: text = ET.tostring(root, encoding='unicode') except LookupError: text = ET.tostring(root, encoding='UTF-8') tree.clipboard_clear() tree.clipboard_append(text) self.filter_restore()
[ "def", "copy_to_clipboard", "(", "self", ")", ":", "tree", "=", "self", ".", "treeview", "# get the selected item:", "selection", "=", "tree", ".", "selection", "(", ")", "if", "selection", ":", "self", ".", "filter_remove", "(", "remember", "=", "True", ")", "root", "=", "ET", ".", "Element", "(", "'selection'", ")", "for", "item", "in", "selection", ":", "node", "=", "self", ".", "tree_node_to_xml", "(", "''", ",", "item", ")", "root", ".", "append", "(", "node", ")", "# python2 issue", "try", ":", "text", "=", "ET", ".", "tostring", "(", "root", ",", "encoding", "=", "'unicode'", ")", "except", "LookupError", ":", "text", "=", "ET", ".", "tostring", "(", "root", ",", "encoding", "=", "'UTF-8'", ")", "tree", ".", "clipboard_clear", "(", ")", "tree", ".", "clipboard_append", "(", "text", ")", "self", ".", "filter_restore", "(", ")" ]
Copies selected items to clipboard.
[ "Copies", "selected", "items", "to", "clipboard", "." ]
python
train
thombashi/tcconfig
tcconfig/traffic_control.py
https://github.com/thombashi/tcconfig/blob/9612dcd6ac9c072e7aa9eb702a225c559936bad3/tcconfig/traffic_control.py#L213-L226
def get_tc_device(self): """ Return a device name that associated network communication direction. """ if self.direction == TrafficDirection.OUTGOING: return self.device if self.direction == TrafficDirection.INCOMING: return self.ifb_device raise ParameterError( "unknown direction", expected=TrafficDirection.LIST, value=self.direction )
[ "def", "get_tc_device", "(", "self", ")", ":", "if", "self", ".", "direction", "==", "TrafficDirection", ".", "OUTGOING", ":", "return", "self", ".", "device", "if", "self", ".", "direction", "==", "TrafficDirection", ".", "INCOMING", ":", "return", "self", ".", "ifb_device", "raise", "ParameterError", "(", "\"unknown direction\"", ",", "expected", "=", "TrafficDirection", ".", "LIST", ",", "value", "=", "self", ".", "direction", ")" ]
Return a device name that associated network communication direction.
[ "Return", "a", "device", "name", "that", "associated", "network", "communication", "direction", "." ]
python
train
batiste/django-page-cms
pages/templatetags/pages_tags.py
https://github.com/batiste/django-page-cms/blob/3c72111eb7c3997a63c462c1776ffd8ce8c50a5d/pages/templatetags/pages_tags.py#L535-L540
def do_imageplaceholder(parser, token): """ Method that parse the imageplaceholder template tag. """ name, params = parse_placeholder(parser, token) return ImagePlaceholderNode(name, **params)
[ "def", "do_imageplaceholder", "(", "parser", ",", "token", ")", ":", "name", ",", "params", "=", "parse_placeholder", "(", "parser", ",", "token", ")", "return", "ImagePlaceholderNode", "(", "name", ",", "*", "*", "params", ")" ]
Method that parse the imageplaceholder template tag.
[ "Method", "that", "parse", "the", "imageplaceholder", "template", "tag", "." ]
python
train
eisensheng/kaviar
kaviar/api.py
https://github.com/eisensheng/kaviar/blob/77ab934a3dd7b1cfabc0ec96acc0b8ed26edcb3f/kaviar/api.py#L54-L81
def kv_format_object(o, keys=None, separator=DEFAULT_SEPARATOR): """Formats an object's attributes. Useful for object representation implementation. Will skip methods or private attributes. For more details see :func:`kv_format`. :param o: Object to format. :param collections.Sequence keys: Explicit list of attributes to format. ``None`` means all public visible attribute for the given object will be formatted. :param str separator: Value between two pairs. :return: Formatted Object attributes. :rtype: :data:`six.text_type <six:six.text_type>` """ if keys is None: key_values = [] for k, v in ((x, getattr(o, x)) for x in sorted(dir(o))): if k.startswith('_') or isroutine(v): continue key_values += (k, v), else: key_values = ((k, getattr(o, k)) for k in keys) return kv_format_pairs(key_values, separator)
[ "def", "kv_format_object", "(", "o", ",", "keys", "=", "None", ",", "separator", "=", "DEFAULT_SEPARATOR", ")", ":", "if", "keys", "is", "None", ":", "key_values", "=", "[", "]", "for", "k", ",", "v", "in", "(", "(", "x", ",", "getattr", "(", "o", ",", "x", ")", ")", "for", "x", "in", "sorted", "(", "dir", "(", "o", ")", ")", ")", ":", "if", "k", ".", "startswith", "(", "'_'", ")", "or", "isroutine", "(", "v", ")", ":", "continue", "key_values", "+=", "(", "k", ",", "v", ")", ",", "else", ":", "key_values", "=", "(", "(", "k", ",", "getattr", "(", "o", ",", "k", ")", ")", "for", "k", "in", "keys", ")", "return", "kv_format_pairs", "(", "key_values", ",", "separator", ")" ]
Formats an object's attributes. Useful for object representation implementation. Will skip methods or private attributes. For more details see :func:`kv_format`. :param o: Object to format. :param collections.Sequence keys: Explicit list of attributes to format. ``None`` means all public visible attribute for the given object will be formatted. :param str separator: Value between two pairs. :return: Formatted Object attributes. :rtype: :data:`six.text_type <six:six.text_type>`
[ "Formats", "an", "object", "s", "attributes", ".", "Useful", "for", "object", "representation", "implementation", ".", "Will", "skip", "methods", "or", "private", "attributes", "." ]
python
train
spacetelescope/stsci.tools
lib/stsci/tools/irafutils.py
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/irafutils.py#L436-L475
def read(self, file, nbytes): """Read nbytes characters from file while running Tk mainloop""" if not capable.OF_GRAPHICS: raise RuntimeError("Cannot run this command without graphics") if isinstance(file, int): fd = file else: # Otherwise, assume we have Python file object try: fd = file.fileno() except: raise TypeError("file must be an integer or a filehandle/socket") init_tk_default_root() # harmless if already done self.widget = TKNTR._default_root if not self.widget: # no Tk widgets yet, so no need for mainloop # (shouldnt happen now with init_tk_default_root) s = [] while nbytes>0: snew = os.read(fd, nbytes) # returns bytes in PY3K if snew: if PY3K: snew = snew.decode('ascii','replace') s.append(snew) nbytes -= len(snew) else: # EOF -- just return what we have so far break return "".join(s) else: self.nbytes = nbytes self.value = [] self.widget.tk.createfilehandler(fd, TKNTR.READABLE | TKNTR.EXCEPTION, self._read) try: self.widget.mainloop() finally: self.widget.tk.deletefilehandler(fd) return "".join(self.value)
[ "def", "read", "(", "self", ",", "file", ",", "nbytes", ")", ":", "if", "not", "capable", ".", "OF_GRAPHICS", ":", "raise", "RuntimeError", "(", "\"Cannot run this command without graphics\"", ")", "if", "isinstance", "(", "file", ",", "int", ")", ":", "fd", "=", "file", "else", ":", "# Otherwise, assume we have Python file object", "try", ":", "fd", "=", "file", ".", "fileno", "(", ")", "except", ":", "raise", "TypeError", "(", "\"file must be an integer or a filehandle/socket\"", ")", "init_tk_default_root", "(", ")", "# harmless if already done", "self", ".", "widget", "=", "TKNTR", ".", "_default_root", "if", "not", "self", ".", "widget", ":", "# no Tk widgets yet, so no need for mainloop", "# (shouldnt happen now with init_tk_default_root)", "s", "=", "[", "]", "while", "nbytes", ">", "0", ":", "snew", "=", "os", ".", "read", "(", "fd", ",", "nbytes", ")", "# returns bytes in PY3K", "if", "snew", ":", "if", "PY3K", ":", "snew", "=", "snew", ".", "decode", "(", "'ascii'", ",", "'replace'", ")", "s", ".", "append", "(", "snew", ")", "nbytes", "-=", "len", "(", "snew", ")", "else", ":", "# EOF -- just return what we have so far", "break", "return", "\"\"", ".", "join", "(", "s", ")", "else", ":", "self", ".", "nbytes", "=", "nbytes", "self", ".", "value", "=", "[", "]", "self", ".", "widget", ".", "tk", ".", "createfilehandler", "(", "fd", ",", "TKNTR", ".", "READABLE", "|", "TKNTR", ".", "EXCEPTION", ",", "self", ".", "_read", ")", "try", ":", "self", ".", "widget", ".", "mainloop", "(", ")", "finally", ":", "self", ".", "widget", ".", "tk", ".", "deletefilehandler", "(", "fd", ")", "return", "\"\"", ".", "join", "(", "self", ".", "value", ")" ]
Read nbytes characters from file while running Tk mainloop
[ "Read", "nbytes", "characters", "from", "file", "while", "running", "Tk", "mainloop" ]
python
train
zomux/deepy
deepy/networks/network.py
https://github.com/zomux/deepy/blob/090fbad22a08a809b12951cd0d4984f5bd432698/deepy/networks/network.py#L251-L282
def load_params(self, path, exclude_free_params=False): """ Load parameters from file. """ if not os.path.exists(path): return; logging.info("loading parameters from %s" % path) # Decide which parameters to load if exclude_free_params: params_to_load = self.parameters else: params_to_load = self.all_parameters # Load parameters if path.endswith(".gz"): opener = gzip.open if path.lower().endswith('.gz') else open handle = opener(path, 'rb') saved_params = pickle.load(handle) handle.close() # Write parameters for target, source in zip(params_to_load, saved_params): logging.info('%s: setting value %s', target.name, source.shape) target.set_value(source) elif path.endswith(".npz"): arrs = np.load(path) # Write parameters for target, idx in zip(params_to_load, range(len(arrs.keys()))): source = arrs['arr_%d' % idx] logging.info('%s: setting value %s', target.name, source.shape) target.set_value(source) else: raise Exception("File format of %s is not supported, use '.gz' or '.npz' or '.uncompressed.gz'" % path) self.train_logger.load(path)
[ "def", "load_params", "(", "self", ",", "path", ",", "exclude_free_params", "=", "False", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "return", "logging", ".", "info", "(", "\"loading parameters from %s\"", "%", "path", ")", "# Decide which parameters to load", "if", "exclude_free_params", ":", "params_to_load", "=", "self", ".", "parameters", "else", ":", "params_to_load", "=", "self", ".", "all_parameters", "# Load parameters", "if", "path", ".", "endswith", "(", "\".gz\"", ")", ":", "opener", "=", "gzip", ".", "open", "if", "path", ".", "lower", "(", ")", ".", "endswith", "(", "'.gz'", ")", "else", "open", "handle", "=", "opener", "(", "path", ",", "'rb'", ")", "saved_params", "=", "pickle", ".", "load", "(", "handle", ")", "handle", ".", "close", "(", ")", "# Write parameters", "for", "target", ",", "source", "in", "zip", "(", "params_to_load", ",", "saved_params", ")", ":", "logging", ".", "info", "(", "'%s: setting value %s'", ",", "target", ".", "name", ",", "source", ".", "shape", ")", "target", ".", "set_value", "(", "source", ")", "elif", "path", ".", "endswith", "(", "\".npz\"", ")", ":", "arrs", "=", "np", ".", "load", "(", "path", ")", "# Write parameters", "for", "target", ",", "idx", "in", "zip", "(", "params_to_load", ",", "range", "(", "len", "(", "arrs", ".", "keys", "(", ")", ")", ")", ")", ":", "source", "=", "arrs", "[", "'arr_%d'", "%", "idx", "]", "logging", ".", "info", "(", "'%s: setting value %s'", ",", "target", ".", "name", ",", "source", ".", "shape", ")", "target", ".", "set_value", "(", "source", ")", "else", ":", "raise", "Exception", "(", "\"File format of %s is not supported, use '.gz' or '.npz' or '.uncompressed.gz'\"", "%", "path", ")", "self", ".", "train_logger", ".", "load", "(", "path", ")" ]
Load parameters from file.
[ "Load", "parameters", "from", "file", "." ]
python
test
pandas-dev/pandas
pandas/util/_doctools.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_doctools.py#L43-L101
def plot(self, left, right, labels=None, vertical=True): """ Plot left / right DataFrames in specified layout. Parameters ---------- left : list of DataFrames before operation is applied right : DataFrame of operation result labels : list of str to be drawn as titles of left DataFrames vertical : bool If True, use vertical layout. If False, use horizontal layout. """ import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec if not isinstance(left, list): left = [left] left = [self._conv(l) for l in left] right = self._conv(right) hcells, vcells = self._get_cells(left, right, vertical) if vertical: figsize = self.cell_width * hcells, self.cell_height * vcells else: # include margin for titles figsize = self.cell_width * hcells, self.cell_height * vcells fig = plt.figure(figsize=figsize) if vertical: gs = gridspec.GridSpec(len(left), hcells) # left max_left_cols = max(self._shape(l)[1] for l in left) max_left_rows = max(self._shape(l)[0] for l in left) for i, (l, label) in enumerate(zip(left, labels)): ax = fig.add_subplot(gs[i, 0:max_left_cols]) self._make_table(ax, l, title=label, height=1.0 / max_left_rows) # right ax = plt.subplot(gs[:, max_left_cols:]) self._make_table(ax, right, title='Result', height=1.05 / vcells) fig.subplots_adjust(top=0.9, bottom=0.05, left=0.05, right=0.95) else: max_rows = max(self._shape(df)[0] for df in left + [right]) height = 1.0 / np.max(max_rows) gs = gridspec.GridSpec(1, hcells) # left i = 0 for l, label in zip(left, labels): sp = self._shape(l) ax = fig.add_subplot(gs[0, i:i + sp[1]]) self._make_table(ax, l, title=label, height=height) i += sp[1] # right ax = plt.subplot(gs[0, i:]) self._make_table(ax, right, title='Result', height=height) fig.subplots_adjust(top=0.85, bottom=0.05, left=0.05, right=0.95) return fig
[ "def", "plot", "(", "self", ",", "left", ",", "right", ",", "labels", "=", "None", ",", "vertical", "=", "True", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "import", "matplotlib", ".", "gridspec", "as", "gridspec", "if", "not", "isinstance", "(", "left", ",", "list", ")", ":", "left", "=", "[", "left", "]", "left", "=", "[", "self", ".", "_conv", "(", "l", ")", "for", "l", "in", "left", "]", "right", "=", "self", ".", "_conv", "(", "right", ")", "hcells", ",", "vcells", "=", "self", ".", "_get_cells", "(", "left", ",", "right", ",", "vertical", ")", "if", "vertical", ":", "figsize", "=", "self", ".", "cell_width", "*", "hcells", ",", "self", ".", "cell_height", "*", "vcells", "else", ":", "# include margin for titles", "figsize", "=", "self", ".", "cell_width", "*", "hcells", ",", "self", ".", "cell_height", "*", "vcells", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "figsize", ")", "if", "vertical", ":", "gs", "=", "gridspec", ".", "GridSpec", "(", "len", "(", "left", ")", ",", "hcells", ")", "# left", "max_left_cols", "=", "max", "(", "self", ".", "_shape", "(", "l", ")", "[", "1", "]", "for", "l", "in", "left", ")", "max_left_rows", "=", "max", "(", "self", ".", "_shape", "(", "l", ")", "[", "0", "]", "for", "l", "in", "left", ")", "for", "i", ",", "(", "l", ",", "label", ")", "in", "enumerate", "(", "zip", "(", "left", ",", "labels", ")", ")", ":", "ax", "=", "fig", ".", "add_subplot", "(", "gs", "[", "i", ",", "0", ":", "max_left_cols", "]", ")", "self", ".", "_make_table", "(", "ax", ",", "l", ",", "title", "=", "label", ",", "height", "=", "1.0", "/", "max_left_rows", ")", "# right", "ax", "=", "plt", ".", "subplot", "(", "gs", "[", ":", ",", "max_left_cols", ":", "]", ")", "self", ".", "_make_table", "(", "ax", ",", "right", ",", "title", "=", "'Result'", ",", "height", "=", "1.05", "/", "vcells", ")", "fig", ".", "subplots_adjust", "(", "top", "=", "0.9", ",", "bottom", "=", "0.05", ",", "left", "=", "0.05", ",", "right", "=", "0.95", ")", "else", ":", "max_rows", "=", "max", "(", "self", ".", "_shape", "(", "df", ")", "[", "0", "]", "for", "df", "in", "left", "+", "[", "right", "]", ")", "height", "=", "1.0", "/", "np", ".", "max", "(", "max_rows", ")", "gs", "=", "gridspec", ".", "GridSpec", "(", "1", ",", "hcells", ")", "# left", "i", "=", "0", "for", "l", ",", "label", "in", "zip", "(", "left", ",", "labels", ")", ":", "sp", "=", "self", ".", "_shape", "(", "l", ")", "ax", "=", "fig", ".", "add_subplot", "(", "gs", "[", "0", ",", "i", ":", "i", "+", "sp", "[", "1", "]", "]", ")", "self", ".", "_make_table", "(", "ax", ",", "l", ",", "title", "=", "label", ",", "height", "=", "height", ")", "i", "+=", "sp", "[", "1", "]", "# right", "ax", "=", "plt", ".", "subplot", "(", "gs", "[", "0", ",", "i", ":", "]", ")", "self", ".", "_make_table", "(", "ax", ",", "right", ",", "title", "=", "'Result'", ",", "height", "=", "height", ")", "fig", ".", "subplots_adjust", "(", "top", "=", "0.85", ",", "bottom", "=", "0.05", ",", "left", "=", "0.05", ",", "right", "=", "0.95", ")", "return", "fig" ]
Plot left / right DataFrames in specified layout. Parameters ---------- left : list of DataFrames before operation is applied right : DataFrame of operation result labels : list of str to be drawn as titles of left DataFrames vertical : bool If True, use vertical layout. If False, use horizontal layout.
[ "Plot", "left", "/", "right", "DataFrames", "in", "specified", "layout", "." ]
python
train
projectshift/shift-schema
shiftschema/property.py
https://github.com/projectshift/shift-schema/blob/07787b540d3369bb37217ffbfbe629118edaf0eb/shiftschema/property.py#L135-L144
def validate_with_schema(self, model=None, context=None): """ Perform model validation with schema""" if self._schema is None or model is None: return result = self._schema.validate( model=model, context=context if self.use_context else None ) return result
[ "def", "validate_with_schema", "(", "self", ",", "model", "=", "None", ",", "context", "=", "None", ")", ":", "if", "self", ".", "_schema", "is", "None", "or", "model", "is", "None", ":", "return", "result", "=", "self", ".", "_schema", ".", "validate", "(", "model", "=", "model", ",", "context", "=", "context", "if", "self", ".", "use_context", "else", "None", ")", "return", "result" ]
Perform model validation with schema
[ "Perform", "model", "validation", "with", "schema" ]
python
train
dead-beef/markovchain
markovchain/parser.py
https://github.com/dead-beef/markovchain/blob/9bd10b2f01089341c4a875a0fa569d50caba22c7/markovchain/parser.py#L127-L140
def reset(self, state_size_changed=False): """Reset parser state. Parameters ---------- state_size_changed : `bool`, optional `True` if maximum state size changed (default: `False`). """ if state_size_changed: self.state = deque(repeat('', self.state_size), maxlen=self.state_size) else: self.state.extend(repeat('', self.state_size)) self.end = True
[ "def", "reset", "(", "self", ",", "state_size_changed", "=", "False", ")", ":", "if", "state_size_changed", ":", "self", ".", "state", "=", "deque", "(", "repeat", "(", "''", ",", "self", ".", "state_size", ")", ",", "maxlen", "=", "self", ".", "state_size", ")", "else", ":", "self", ".", "state", ".", "extend", "(", "repeat", "(", "''", ",", "self", ".", "state_size", ")", ")", "self", ".", "end", "=", "True" ]
Reset parser state. Parameters ---------- state_size_changed : `bool`, optional `True` if maximum state size changed (default: `False`).
[ "Reset", "parser", "state", "." ]
python
train
adamzap/landslide
landslide/generator.py
https://github.com/adamzap/landslide/blob/59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832/landslide/generator.py#L224-L232
def get_template_file(self): """ Retrieves Jinja2 template file path. """ if os.path.exists(os.path.join(self.theme_dir, 'base.html')): return os.path.join(self.theme_dir, 'base.html') default_dir = os.path.join(THEMES_DIR, 'default') if not os.path.exists(os.path.join(default_dir, 'base.html')): raise IOError(u"Cannot find base.html in default theme") return os.path.join(default_dir, 'base.html')
[ "def", "get_template_file", "(", "self", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "self", ".", "theme_dir", ",", "'base.html'", ")", ")", ":", "return", "os", ".", "path", ".", "join", "(", "self", ".", "theme_dir", ",", "'base.html'", ")", "default_dir", "=", "os", ".", "path", ".", "join", "(", "THEMES_DIR", ",", "'default'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "default_dir", ",", "'base.html'", ")", ")", ":", "raise", "IOError", "(", "u\"Cannot find base.html in default theme\"", ")", "return", "os", ".", "path", ".", "join", "(", "default_dir", ",", "'base.html'", ")" ]
Retrieves Jinja2 template file path.
[ "Retrieves", "Jinja2", "template", "file", "path", "." ]
python
train
miLibris/flask-rest-jsonapi
flask_rest_jsonapi/data_layers/filtering/alchemy.py
https://github.com/miLibris/flask-rest-jsonapi/blob/ecc8f2cd2b54cc0bfae7acd6cffcda0ba1140c43/flask_rest_jsonapi/data_layers/filtering/alchemy.py#L158-L168
def related_schema(self): """Get the related schema of a relationship field :return Schema: the related schema """ relationship_field = self.name if relationship_field not in get_relationships(self.schema): raise InvalidFilters("{} has no relationship attribute {}".format(self.schema.__name__, relationship_field)) return self.schema._declared_fields[relationship_field].schema.__class__
[ "def", "related_schema", "(", "self", ")", ":", "relationship_field", "=", "self", ".", "name", "if", "relationship_field", "not", "in", "get_relationships", "(", "self", ".", "schema", ")", ":", "raise", "InvalidFilters", "(", "\"{} has no relationship attribute {}\"", ".", "format", "(", "self", ".", "schema", ".", "__name__", ",", "relationship_field", ")", ")", "return", "self", ".", "schema", ".", "_declared_fields", "[", "relationship_field", "]", ".", "schema", ".", "__class__" ]
Get the related schema of a relationship field :return Schema: the related schema
[ "Get", "the", "related", "schema", "of", "a", "relationship", "field" ]
python
train
ltalirz/aiida-phtools
examples/cli.py
https://github.com/ltalirz/aiida-phtools/blob/acec3339425fe92d3f55e725a199123de9a1febc/examples/cli.py#L10-L59
def main(codelabel, submit): """Command line interface for testing and submitting calculations. Usage: ./cli.py CODENAME COMPUTER_NAME CODENAME from "verdi code setup" COMPUTER_NAME from "verdi computer setup" This script extends submit.py, adding flexibility in the selected code/computer. """ code = Code.get_from_string(codelabel) # set up calculation calc = code.new_calc() calc.label = "aiida_phtools example calculation" calc.description = "Computes proper pore surface as needed for persistence homology calculation" calc.set_max_wallclock_seconds(1 * 60) calc.set_withmpi(False) calc.set_resources({"num_machines": 1}) # Prepare input parameters PoreSurfaceParameters = DataFactory('phtools.surface') d = { 'accessible_surface_area': 300.0, 'target_volume': 40e3, 'sampling_method': 'random', } parameters = PoreSurfaceParameters(dict=d) calc.use_parameters(parameters) SinglefileData = DataFactory('singlefile') this_dir = os.path.dirname(os.path.realpath(__file__)) structure = SinglefileData(file=os.path.join(this_dir, 'HKUST-1.cssr')) calc.use_structure(structure) surface_sample = SinglefileData(file=os.path.join(this_dir, 'HKUST-1.vsa')) calc.use_surface_sample(surface_sample) if submit: calc.store_all() calc.submit() print("submitted calculation; calc=Calculation(uuid='{}') # ID={}"\ .format(calc.uuid,calc.dbnode.pk)) else: subfolder, script_filename = calc.submit_test() path = os.path.relpath(subfolder.abspath) print("submission test successful") print("Find remote folder in {}".format(path)) print("In order to actually submit, add '--submit'")
[ "def", "main", "(", "codelabel", ",", "submit", ")", ":", "code", "=", "Code", ".", "get_from_string", "(", "codelabel", ")", "# set up calculation", "calc", "=", "code", ".", "new_calc", "(", ")", "calc", ".", "label", "=", "\"aiida_phtools example calculation\"", "calc", ".", "description", "=", "\"Computes proper pore surface as needed for persistence homology calculation\"", "calc", ".", "set_max_wallclock_seconds", "(", "1", "*", "60", ")", "calc", ".", "set_withmpi", "(", "False", ")", "calc", ".", "set_resources", "(", "{", "\"num_machines\"", ":", "1", "}", ")", "# Prepare input parameters", "PoreSurfaceParameters", "=", "DataFactory", "(", "'phtools.surface'", ")", "d", "=", "{", "'accessible_surface_area'", ":", "300.0", ",", "'target_volume'", ":", "40e3", ",", "'sampling_method'", ":", "'random'", ",", "}", "parameters", "=", "PoreSurfaceParameters", "(", "dict", "=", "d", ")", "calc", ".", "use_parameters", "(", "parameters", ")", "SinglefileData", "=", "DataFactory", "(", "'singlefile'", ")", "this_dir", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", "structure", "=", "SinglefileData", "(", "file", "=", "os", ".", "path", ".", "join", "(", "this_dir", ",", "'HKUST-1.cssr'", ")", ")", "calc", ".", "use_structure", "(", "structure", ")", "surface_sample", "=", "SinglefileData", "(", "file", "=", "os", ".", "path", ".", "join", "(", "this_dir", ",", "'HKUST-1.vsa'", ")", ")", "calc", ".", "use_surface_sample", "(", "surface_sample", ")", "if", "submit", ":", "calc", ".", "store_all", "(", ")", "calc", ".", "submit", "(", ")", "print", "(", "\"submitted calculation; calc=Calculation(uuid='{}') # ID={}\"", ".", "format", "(", "calc", ".", "uuid", ",", "calc", ".", "dbnode", ".", "pk", ")", ")", "else", ":", "subfolder", ",", "script_filename", "=", "calc", ".", "submit_test", "(", ")", "path", "=", "os", ".", "path", ".", "relpath", "(", "subfolder", ".", "abspath", ")", "print", "(", "\"submission test successful\"", ")", "print", "(", "\"Find remote folder in {}\"", ".", "format", "(", "path", ")", ")", "print", "(", "\"In order to actually submit, add '--submit'\"", ")" ]
Command line interface for testing and submitting calculations. Usage: ./cli.py CODENAME COMPUTER_NAME CODENAME from "verdi code setup" COMPUTER_NAME from "verdi computer setup" This script extends submit.py, adding flexibility in the selected code/computer.
[ "Command", "line", "interface", "for", "testing", "and", "submitting", "calculations", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/structural/wham.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/wham.py#L19-L42
def run(items, background=None): """Detect copy number variations from batched set of samples using WHAM. """ if not background: background = [] background_bams = [] paired = vcfutils.get_paired_bams([x["align_bam"] for x in items], items) if paired: inputs = [paired.tumor_data] if paired.normal_bam: background = [paired.normal_data] background_bams = [paired.normal_bam] else: assert not background inputs, background = shared.find_case_control(items) background_bams = [x["align_bam"] for x in background] orig_vcf = _run_wham(inputs, background_bams) out = [] for data in inputs: if "sv" not in data: data["sv"] = [] final_vcf = shared.finalize_sv(orig_vcf, data, items) data["sv"].append({"variantcaller": "wham", "vrn_file": final_vcf}) out.append(data) return out
[ "def", "run", "(", "items", ",", "background", "=", "None", ")", ":", "if", "not", "background", ":", "background", "=", "[", "]", "background_bams", "=", "[", "]", "paired", "=", "vcfutils", ".", "get_paired_bams", "(", "[", "x", "[", "\"align_bam\"", "]", "for", "x", "in", "items", "]", ",", "items", ")", "if", "paired", ":", "inputs", "=", "[", "paired", ".", "tumor_data", "]", "if", "paired", ".", "normal_bam", ":", "background", "=", "[", "paired", ".", "normal_data", "]", "background_bams", "=", "[", "paired", ".", "normal_bam", "]", "else", ":", "assert", "not", "background", "inputs", ",", "background", "=", "shared", ".", "find_case_control", "(", "items", ")", "background_bams", "=", "[", "x", "[", "\"align_bam\"", "]", "for", "x", "in", "background", "]", "orig_vcf", "=", "_run_wham", "(", "inputs", ",", "background_bams", ")", "out", "=", "[", "]", "for", "data", "in", "inputs", ":", "if", "\"sv\"", "not", "in", "data", ":", "data", "[", "\"sv\"", "]", "=", "[", "]", "final_vcf", "=", "shared", ".", "finalize_sv", "(", "orig_vcf", ",", "data", ",", "items", ")", "data", "[", "\"sv\"", "]", ".", "append", "(", "{", "\"variantcaller\"", ":", "\"wham\"", ",", "\"vrn_file\"", ":", "final_vcf", "}", ")", "out", ".", "append", "(", "data", ")", "return", "out" ]
Detect copy number variations from batched set of samples using WHAM.
[ "Detect", "copy", "number", "variations", "from", "batched", "set", "of", "samples", "using", "WHAM", "." ]
python
train
jstitch/MambuPy
MambuPy/rest/mambubranch.py
https://github.com/jstitch/MambuPy/blob/2af98cc12e7ed5ec183b3e97644e880e70b79ee8/MambuPy/rest/mambubranch.py#L78-L106
def convertDict2Attrs(self, *args, **kwargs): """The trick for iterable Mambu Objects comes here: You iterate over each element of the responded List from Mambu, and create a Mambu Branch object for each one, initializing them one at a time, and changing the attrs attribute (which just holds a list of plain dictionaries) with a MambuBranch just created. .. todo:: pass a valid (perhaps default) urlfunc, and its corresponding id to entid to each MambuBranch, telling MambuStruct not to connect() by default. It's desirable to connect at any other further moment to refresh some element in the list. """ for n,b in enumerate(self.attrs): # ok ok, I'm modifying elements of a list while iterating it. BAD PRACTICE! try: params = self.params except AttributeError as aerr: params = {} kwargs.update(params) try: branch = self.mambubranchclass(urlfunc=None, entid=None, *args, **kwargs) except AttributeError as ae: self.mambubranchclass = MambuBranch branch = self.mambubranchclass(urlfunc=None, entid=None, *args, **kwargs) branch.init(b, *args, **kwargs) self.attrs[n] = branch
[ "def", "convertDict2Attrs", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "n", ",", "b", "in", "enumerate", "(", "self", ".", "attrs", ")", ":", "# ok ok, I'm modifying elements of a list while iterating it. BAD PRACTICE!", "try", ":", "params", "=", "self", ".", "params", "except", "AttributeError", "as", "aerr", ":", "params", "=", "{", "}", "kwargs", ".", "update", "(", "params", ")", "try", ":", "branch", "=", "self", ".", "mambubranchclass", "(", "urlfunc", "=", "None", ",", "entid", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "AttributeError", "as", "ae", ":", "self", ".", "mambubranchclass", "=", "MambuBranch", "branch", "=", "self", ".", "mambubranchclass", "(", "urlfunc", "=", "None", ",", "entid", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", "branch", ".", "init", "(", "b", ",", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "attrs", "[", "n", "]", "=", "branch" ]
The trick for iterable Mambu Objects comes here: You iterate over each element of the responded List from Mambu, and create a Mambu Branch object for each one, initializing them one at a time, and changing the attrs attribute (which just holds a list of plain dictionaries) with a MambuBranch just created. .. todo:: pass a valid (perhaps default) urlfunc, and its corresponding id to entid to each MambuBranch, telling MambuStruct not to connect() by default. It's desirable to connect at any other further moment to refresh some element in the list.
[ "The", "trick", "for", "iterable", "Mambu", "Objects", "comes", "here", ":" ]
python
train
pyca/pynacl
src/nacl/pwhash/__init__.py
https://github.com/pyca/pynacl/blob/0df0c2c7693fa5d316846111ce510702756f5feb/src/nacl/pwhash/__init__.py#L58-L75
def verify(password_hash, password): """ Takes a modular crypt encoded stored password hash derived using one of the algorithms supported by `libsodium` and checks if the user provided password will hash to the same string when using the parameters saved in the stored hash """ if password_hash.startswith(argon2id.STRPREFIX): return argon2id.verify(password_hash, password) elif password_hash.startswith(argon2i.STRPREFIX): return argon2id.verify(password_hash, password) elif password_hash.startswith(scrypt.STRPREFIX): return scrypt.verify(password_hash, password) else: raise(CryptPrefixError("given password_hash is not " "in a supported format" ) )
[ "def", "verify", "(", "password_hash", ",", "password", ")", ":", "if", "password_hash", ".", "startswith", "(", "argon2id", ".", "STRPREFIX", ")", ":", "return", "argon2id", ".", "verify", "(", "password_hash", ",", "password", ")", "elif", "password_hash", ".", "startswith", "(", "argon2i", ".", "STRPREFIX", ")", ":", "return", "argon2id", ".", "verify", "(", "password_hash", ",", "password", ")", "elif", "password_hash", ".", "startswith", "(", "scrypt", ".", "STRPREFIX", ")", ":", "return", "scrypt", ".", "verify", "(", "password_hash", ",", "password", ")", "else", ":", "raise", "(", "CryptPrefixError", "(", "\"given password_hash is not \"", "\"in a supported format\"", ")", ")" ]
Takes a modular crypt encoded stored password hash derived using one of the algorithms supported by `libsodium` and checks if the user provided password will hash to the same string when using the parameters saved in the stored hash
[ "Takes", "a", "modular", "crypt", "encoded", "stored", "password", "hash", "derived", "using", "one", "of", "the", "algorithms", "supported", "by", "libsodium", "and", "checks", "if", "the", "user", "provided", "password", "will", "hash", "to", "the", "same", "string", "when", "using", "the", "parameters", "saved", "in", "the", "stored", "hash" ]
python
train
fedora-python/pyp2rpm
pyp2rpm/utils.py
https://github.com/fedora-python/pyp2rpm/blob/853eb3d226689a5ccdcdb9358b1a3394fafbd2b5/pyp2rpm/utils.py#L119-L122
def unique_deps(deps): """Remove duplicities from deps list of the lists""" deps.sort() return list(k for k, _ in itertools.groupby(deps))
[ "def", "unique_deps", "(", "deps", ")", ":", "deps", ".", "sort", "(", ")", "return", "list", "(", "k", "for", "k", ",", "_", "in", "itertools", ".", "groupby", "(", "deps", ")", ")" ]
Remove duplicities from deps list of the lists
[ "Remove", "duplicities", "from", "deps", "list", "of", "the", "lists" ]
python
train
frmdstryr/enamlx
enamlx/qt/qt_graphics_view.py
https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/enamlx/qt/qt_graphics_view.py#L486-L507
def get_action(self, create=False): """ Get the shared widget action for this widget. This API is used to support widgets in tool bars and menus. Parameters ---------- create : bool, optional Whether to create the action if it doesn't already exist. The default is False. Returns ------- result : QWidgetAction or None The cached widget action or None, depending on arguments. """ action = self._widget_action if action is None and create: action = self._widget_action = QWidgetAction(None) action.setDefaultWidget(self.widget) return action
[ "def", "get_action", "(", "self", ",", "create", "=", "False", ")", ":", "action", "=", "self", ".", "_widget_action", "if", "action", "is", "None", "and", "create", ":", "action", "=", "self", ".", "_widget_action", "=", "QWidgetAction", "(", "None", ")", "action", ".", "setDefaultWidget", "(", "self", ".", "widget", ")", "return", "action" ]
Get the shared widget action for this widget. This API is used to support widgets in tool bars and menus. Parameters ---------- create : bool, optional Whether to create the action if it doesn't already exist. The default is False. Returns ------- result : QWidgetAction or None The cached widget action or None, depending on arguments.
[ "Get", "the", "shared", "widget", "action", "for", "this", "widget", "." ]
python
train
lltk/lltk
lltk/scraping.py
https://github.com/lltk/lltk/blob/d171de55c1b97695fddedf4b02401ae27bf1d634/lltk/scraping.py#L262-L269
def _first(self, tag): ''' Returns the first element with required POS-tag. ''' self.getelements() for element in self.elements: if tag in self.pos(element): return element return None
[ "def", "_first", "(", "self", ",", "tag", ")", ":", "self", ".", "getelements", "(", ")", "for", "element", "in", "self", ".", "elements", ":", "if", "tag", "in", "self", ".", "pos", "(", "element", ")", ":", "return", "element", "return", "None" ]
Returns the first element with required POS-tag.
[ "Returns", "the", "first", "element", "with", "required", "POS", "-", "tag", "." ]
python
train
Patreon/patreon-python
examples/flask/my_site/models/tables/db_wrapper.py
https://github.com/Patreon/patreon-python/blob/80c83f018d6bd93b83c188baff727c5e77e01ce6/examples/flask/my_site/models/tables/db_wrapper.py#L1-L7
def build_if_needed(db): """Little helper method for making tables in SQL-Alchemy with SQLite""" if len(db.engine.table_names()) == 0: # import all classes here from my_site.models.tables.user import User db.create_all()
[ "def", "build_if_needed", "(", "db", ")", ":", "if", "len", "(", "db", ".", "engine", ".", "table_names", "(", ")", ")", "==", "0", ":", "# import all classes here", "from", "my_site", ".", "models", ".", "tables", ".", "user", "import", "User", "db", ".", "create_all", "(", ")" ]
Little helper method for making tables in SQL-Alchemy with SQLite
[ "Little", "helper", "method", "for", "making", "tables", "in", "SQL", "-", "Alchemy", "with", "SQLite" ]
python
train