repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
niemasd/TreeSwift
treeswift/Tree.py
https://github.com/niemasd/TreeSwift/blob/7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917/treeswift/Tree.py#L550-L589
def label_to_node(self, selection='leaves'): '''Return a dictionary mapping labels (strings) to ``Node`` objects * If ``selection`` is ``"all"``, the dictionary will contain all nodes * If ``selection`` is ``"leaves"``, the dictionary will only contain leaves * If ``selection`` is ``"internal"``, the dictionary will only contain internal nodes * If ``selection`` is a ``set``, the dictionary will contain all nodes labeled by a label in ``selection`` * If multiple nodes are labeled by a given label, only the last (preorder traversal) will be obtained Args: ``selection`` (``str`` or ``set``): The selection of nodes to get * ``"all"`` to select all nodes * ``"leaves"`` to select leaves * ``"internal"`` to select internal nodes * A ``set`` of labels to specify nodes to select Returns: ``dict``: Dictionary mapping labels to the corresponding nodes ''' if not isinstance(selection,set) and not isinstance(selection,list) and (not isinstance(selection,str) or not (selection != 'all' or selection != 'leaves' or selection != 'internal')): raise RuntimeError('"selection" must be one of the strings "all", "leaves", or "internal", or it must be a set containing Node labels') if isinstance(selection, str): selection = selection[0] elif isinstance(selection,list): selection = set(selection) label_to_node = dict() for node in self.traverse_preorder(): if selection == 'a' or (selection == 'i' and not node.is_leaf()) or (selection == 'l' and node.is_leaf()) or str(node) in selection: label_to_node[str(node)] = node if not isinstance(selection,str) and len(label_to_node) != len(selection): warn("Not all given labels exist in the tree") return label_to_node
[ "def", "label_to_node", "(", "self", ",", "selection", "=", "'leaves'", ")", ":", "if", "not", "isinstance", "(", "selection", ",", "set", ")", "and", "not", "isinstance", "(", "selection", ",", "list", ")", "and", "(", "not", "isinstance", "(", "selection", ",", "str", ")", "or", "not", "(", "selection", "!=", "'all'", "or", "selection", "!=", "'leaves'", "or", "selection", "!=", "'internal'", ")", ")", ":", "raise", "RuntimeError", "(", "'\"selection\" must be one of the strings \"all\", \"leaves\", or \"internal\", or it must be a set containing Node labels'", ")", "if", "isinstance", "(", "selection", ",", "str", ")", ":", "selection", "=", "selection", "[", "0", "]", "elif", "isinstance", "(", "selection", ",", "list", ")", ":", "selection", "=", "set", "(", "selection", ")", "label_to_node", "=", "dict", "(", ")", "for", "node", "in", "self", ".", "traverse_preorder", "(", ")", ":", "if", "selection", "==", "'a'", "or", "(", "selection", "==", "'i'", "and", "not", "node", ".", "is_leaf", "(", ")", ")", "or", "(", "selection", "==", "'l'", "and", "node", ".", "is_leaf", "(", ")", ")", "or", "str", "(", "node", ")", "in", "selection", ":", "label_to_node", "[", "str", "(", "node", ")", "]", "=", "node", "if", "not", "isinstance", "(", "selection", ",", "str", ")", "and", "len", "(", "label_to_node", ")", "!=", "len", "(", "selection", ")", ":", "warn", "(", "\"Not all given labels exist in the tree\"", ")", "return", "label_to_node" ]
Return a dictionary mapping labels (strings) to ``Node`` objects * If ``selection`` is ``"all"``, the dictionary will contain all nodes * If ``selection`` is ``"leaves"``, the dictionary will only contain leaves * If ``selection`` is ``"internal"``, the dictionary will only contain internal nodes * If ``selection`` is a ``set``, the dictionary will contain all nodes labeled by a label in ``selection`` * If multiple nodes are labeled by a given label, only the last (preorder traversal) will be obtained Args: ``selection`` (``str`` or ``set``): The selection of nodes to get * ``"all"`` to select all nodes * ``"leaves"`` to select leaves * ``"internal"`` to select internal nodes * A ``set`` of labels to specify nodes to select Returns: ``dict``: Dictionary mapping labels to the corresponding nodes
[ "Return", "a", "dictionary", "mapping", "labels", "(", "strings", ")", "to", "Node", "objects" ]
python
train
47.775
clalancette/pycdlib
pycdlib/headervd.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/headervd.py#L637-L662
def remove_from_ptr_size(self, ptr_size): # type: (int) -> bool ''' Remove the space for a path table record from the volume descriptor. Parameters: ptr_size - The length of the Path Table Record being removed from this Volume Descriptor. Returns: True if extents need to be removed from the Volume Descriptor, False otherwise. ''' if not self._initialized: raise pycdlibexception.PyCdlibInternalError('This Volume Descriptor is not yet initialized') # Next remove from the Path Table Record size. self.path_tbl_size -= ptr_size new_extents = utils.ceiling_div(self.path_tbl_size, 4096) * 2 need_remove_extents = False if new_extents > self.path_table_num_extents: # This should never happen. raise pycdlibexception.PyCdlibInvalidInput('This should never happen') elif new_extents < self.path_table_num_extents: self.path_table_num_extents -= 2 need_remove_extents = True return need_remove_extents
[ "def", "remove_from_ptr_size", "(", "self", ",", "ptr_size", ")", ":", "# type: (int) -> bool", "if", "not", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'This Volume Descriptor is not yet initialized'", ")", "# Next remove from the Path Table Record size.", "self", ".", "path_tbl_size", "-=", "ptr_size", "new_extents", "=", "utils", ".", "ceiling_div", "(", "self", ".", "path_tbl_size", ",", "4096", ")", "*", "2", "need_remove_extents", "=", "False", "if", "new_extents", ">", "self", ".", "path_table_num_extents", ":", "# This should never happen.", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'This should never happen'", ")", "elif", "new_extents", "<", "self", ".", "path_table_num_extents", ":", "self", ".", "path_table_num_extents", "-=", "2", "need_remove_extents", "=", "True", "return", "need_remove_extents" ]
Remove the space for a path table record from the volume descriptor. Parameters: ptr_size - The length of the Path Table Record being removed from this Volume Descriptor. Returns: True if extents need to be removed from the Volume Descriptor, False otherwise.
[ "Remove", "the", "space", "for", "a", "path", "table", "record", "from", "the", "volume", "descriptor", "." ]
python
train
41.038462
cocagne/txdbus
txdbus/marshal.py
https://github.com/cocagne/txdbus/blob/eb424918764b7b93eecd2a4e2e5c2d0b2944407b/txdbus/marshal.py#L192-L218
def validateBusName(n): """ Verifies that the supplied name is a valid DBus Bus name. Throws an L{error.MarshallingError} if the format is invalid @type n: C{string} @param n: A DBus bus name """ try: if '.' not in n: raise Exception('At least two components required') if '..' in n: raise Exception('".." not allowed in bus names') if len(n) > 255: raise Exception('Name exceeds maximum length of 255') if n[0] == '.': raise Exception('Names may not begin with a "."') if n[0].isdigit(): raise Exception('Names may not begin with a digit') if bus_re.search(n): raise Exception( 'Names contains a character outside the set [A-Za-z0-9_.\\-:]') if not n[0] == ':' and dot_digit_re.search(n): raise Exception( 'No coponents of an interface name may begin with a digit') except Exception as e: raise MarshallingError('Invalid bus name "%s": %s' % (n, str(e)))
[ "def", "validateBusName", "(", "n", ")", ":", "try", ":", "if", "'.'", "not", "in", "n", ":", "raise", "Exception", "(", "'At least two components required'", ")", "if", "'..'", "in", "n", ":", "raise", "Exception", "(", "'\"..\" not allowed in bus names'", ")", "if", "len", "(", "n", ")", ">", "255", ":", "raise", "Exception", "(", "'Name exceeds maximum length of 255'", ")", "if", "n", "[", "0", "]", "==", "'.'", ":", "raise", "Exception", "(", "'Names may not begin with a \".\"'", ")", "if", "n", "[", "0", "]", ".", "isdigit", "(", ")", ":", "raise", "Exception", "(", "'Names may not begin with a digit'", ")", "if", "bus_re", ".", "search", "(", "n", ")", ":", "raise", "Exception", "(", "'Names contains a character outside the set [A-Za-z0-9_.\\\\-:]'", ")", "if", "not", "n", "[", "0", "]", "==", "':'", "and", "dot_digit_re", ".", "search", "(", "n", ")", ":", "raise", "Exception", "(", "'No coponents of an interface name may begin with a digit'", ")", "except", "Exception", "as", "e", ":", "raise", "MarshallingError", "(", "'Invalid bus name \"%s\": %s'", "%", "(", "n", ",", "str", "(", "e", ")", ")", ")" ]
Verifies that the supplied name is a valid DBus Bus name. Throws an L{error.MarshallingError} if the format is invalid @type n: C{string} @param n: A DBus bus name
[ "Verifies", "that", "the", "supplied", "name", "is", "a", "valid", "DBus", "Bus", "name", ".", "Throws", "an", "L", "{", "error", ".", "MarshallingError", "}", "if", "the", "format", "is", "invalid" ]
python
train
38.592593
ethereum/web3.py
web3/pm.py
https://github.com/ethereum/web3.py/blob/71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab/web3/pm.py#L565-L578
def get_release_data(self, package_name: str, version: str) -> Tuple[str, str, str]: """ Returns ``(package_name, version, manifest_uri)`` associated with the given package name and version, *if* they are published to the currently set registry. * Parameters: * ``name``: Must be a valid package name. * ``version``: Must be a valid package version. """ validate_package_name(package_name) validate_package_version(version) self._validate_set_registry() release_id = self.registry._get_release_id(package_name, version) return self.get_release_id_data(release_id)
[ "def", "get_release_data", "(", "self", ",", "package_name", ":", "str", ",", "version", ":", "str", ")", "->", "Tuple", "[", "str", ",", "str", ",", "str", "]", ":", "validate_package_name", "(", "package_name", ")", "validate_package_version", "(", "version", ")", "self", ".", "_validate_set_registry", "(", ")", "release_id", "=", "self", ".", "registry", ".", "_get_release_id", "(", "package_name", ",", "version", ")", "return", "self", ".", "get_release_id_data", "(", "release_id", ")" ]
Returns ``(package_name, version, manifest_uri)`` associated with the given package name and version, *if* they are published to the currently set registry. * Parameters: * ``name``: Must be a valid package name. * ``version``: Must be a valid package version.
[ "Returns", "(", "package_name", "version", "manifest_uri", ")", "associated", "with", "the", "given", "package", "name", "and", "version", "*", "if", "*", "they", "are", "published", "to", "the", "currently", "set", "registry", "." ]
python
train
46.785714
sci-bots/mpm
mpm/commands.py
https://github.com/sci-bots/mpm/blob/a69651cda4b37ee6b17df4fe0809249e7f4dc536/mpm/commands.py#L31-L42
def home_dir(): ''' Returns: str : Path to home directory (or ``Documents`` directory on Windows). ''' if os.name == 'nt': from win32com.shell import shell, shellcon return shell.SHGetFolderPath(0, shellcon.CSIDL_PERSONAL, 0, 0) else: return os.path.expanduser('~')
[ "def", "home_dir", "(", ")", ":", "if", "os", ".", "name", "==", "'nt'", ":", "from", "win32com", ".", "shell", "import", "shell", ",", "shellcon", "return", "shell", ".", "SHGetFolderPath", "(", "0", ",", "shellcon", ".", "CSIDL_PERSONAL", ",", "0", ",", "0", ")", "else", ":", "return", "os", ".", "path", ".", "expanduser", "(", "'~'", ")" ]
Returns: str : Path to home directory (or ``Documents`` directory on Windows).
[ "Returns", ":" ]
python
train
25.666667
dmlc/gluon-nlp
src/gluonnlp/data/utils.py
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/data/utils.py#L92-L133
def count_tokens(tokens, to_lower=False, counter=None): r"""Counts tokens in the specified string. For token_delim='(td)' and seq_delim='(sd)', a specified string of two sequences of tokens may look like:: (td)token1(td)token2(td)token3(td)(sd)(td)token4(td)token5(td)(sd) Parameters ---------- tokens : list of str A source list of tokens. to_lower : bool, default False Whether to convert the source source_str to the lower case. counter : Counter or None, default None The Counter instance to be updated with the counts of `tokens`. If None, return a new Counter instance counting tokens from `tokens`. Returns ------- The `counter` Counter instance after being updated with the token counts of `source_str`. If `counter` is None, return a new Counter instance counting tokens from `source_str`. Examples -------- >>> import re >>> source_str = ' Life is great ! \n life is good . \n' >>> source_str_tokens = filter(None, re.split(' |\n', source_str)) >>> gluonnlp.data.count_tokens(source_str_tokens) Counter({'is': 2, 'Life': 1, 'great': 1, '!': 1, 'life': 1, 'good': 1, '.': 1}) """ if to_lower: tokens = [t.lower() for t in tokens] if counter is None: return Counter(tokens) else: counter.update(tokens) return counter
[ "def", "count_tokens", "(", "tokens", ",", "to_lower", "=", "False", ",", "counter", "=", "None", ")", ":", "if", "to_lower", ":", "tokens", "=", "[", "t", ".", "lower", "(", ")", "for", "t", "in", "tokens", "]", "if", "counter", "is", "None", ":", "return", "Counter", "(", "tokens", ")", "else", ":", "counter", ".", "update", "(", "tokens", ")", "return", "counter" ]
r"""Counts tokens in the specified string. For token_delim='(td)' and seq_delim='(sd)', a specified string of two sequences of tokens may look like:: (td)token1(td)token2(td)token3(td)(sd)(td)token4(td)token5(td)(sd) Parameters ---------- tokens : list of str A source list of tokens. to_lower : bool, default False Whether to convert the source source_str to the lower case. counter : Counter or None, default None The Counter instance to be updated with the counts of `tokens`. If None, return a new Counter instance counting tokens from `tokens`. Returns ------- The `counter` Counter instance after being updated with the token counts of `source_str`. If `counter` is None, return a new Counter instance counting tokens from `source_str`. Examples -------- >>> import re >>> source_str = ' Life is great ! \n life is good . \n' >>> source_str_tokens = filter(None, re.split(' |\n', source_str)) >>> gluonnlp.data.count_tokens(source_str_tokens) Counter({'is': 2, 'Life': 1, 'great': 1, '!': 1, 'life': 1, 'good': 1, '.': 1})
[ "r", "Counts", "tokens", "in", "the", "specified", "string", "." ]
python
train
32.380952
mitsei/dlkit
dlkit/json_/assessment/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/sessions.py#L5704-L5746
def get_assessments_offered_by_query(self, assessment_offered_query): """Gets a list of ``AssessmentOffered`` elements matching the given assessment offered query. arg: assessment_offered_query (osid.assessment.AssessmentOfferedQuery): the assessment offered query return: (osid.assessment.AssessmentOfferedList) - the returned ``AssessmentOfferedList`` raise: NullArgument - ``assessment_offered_query`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred raise: Unsupported - ``assessment_offered_query`` is not of this service *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceQuerySession.get_resources_by_query and_list = list() or_list = list() for term in assessment_offered_query._query_terms: if '$in' in assessment_offered_query._query_terms[term] and '$nin' in assessment_offered_query._query_terms[term]: and_list.append( {'$or': [{term: {'$in': assessment_offered_query._query_terms[term]['$in']}}, {term: {'$nin': assessment_offered_query._query_terms[term]['$nin']}}]}) else: and_list.append({term: assessment_offered_query._query_terms[term]}) for term in assessment_offered_query._keyword_terms: or_list.append({term: assessment_offered_query._keyword_terms[term]}) if or_list: and_list.append({'$or': or_list}) view_filter = self._view_filter() if view_filter: and_list.append(view_filter) if and_list: query_terms = {'$and': and_list} collection = JSONClientValidated('assessment', collection='AssessmentOffered', runtime=self._runtime) result = collection.find(query_terms).sort('_id', DESCENDING) else: result = [] return objects.AssessmentOfferedList(result, runtime=self._runtime, proxy=self._proxy)
[ "def", "get_assessments_offered_by_query", "(", "self", ",", "assessment_offered_query", ")", ":", "# Implemented from template for", "# osid.resource.ResourceQuerySession.get_resources_by_query", "and_list", "=", "list", "(", ")", "or_list", "=", "list", "(", ")", "for", "term", "in", "assessment_offered_query", ".", "_query_terms", ":", "if", "'$in'", "in", "assessment_offered_query", ".", "_query_terms", "[", "term", "]", "and", "'$nin'", "in", "assessment_offered_query", ".", "_query_terms", "[", "term", "]", ":", "and_list", ".", "append", "(", "{", "'$or'", ":", "[", "{", "term", ":", "{", "'$in'", ":", "assessment_offered_query", ".", "_query_terms", "[", "term", "]", "[", "'$in'", "]", "}", "}", ",", "{", "term", ":", "{", "'$nin'", ":", "assessment_offered_query", ".", "_query_terms", "[", "term", "]", "[", "'$nin'", "]", "}", "}", "]", "}", ")", "else", ":", "and_list", ".", "append", "(", "{", "term", ":", "assessment_offered_query", ".", "_query_terms", "[", "term", "]", "}", ")", "for", "term", "in", "assessment_offered_query", ".", "_keyword_terms", ":", "or_list", ".", "append", "(", "{", "term", ":", "assessment_offered_query", ".", "_keyword_terms", "[", "term", "]", "}", ")", "if", "or_list", ":", "and_list", ".", "append", "(", "{", "'$or'", ":", "or_list", "}", ")", "view_filter", "=", "self", ".", "_view_filter", "(", ")", "if", "view_filter", ":", "and_list", ".", "append", "(", "view_filter", ")", "if", "and_list", ":", "query_terms", "=", "{", "'$and'", ":", "and_list", "}", "collection", "=", "JSONClientValidated", "(", "'assessment'", ",", "collection", "=", "'AssessmentOffered'", ",", "runtime", "=", "self", ".", "_runtime", ")", "result", "=", "collection", ".", "find", "(", "query_terms", ")", ".", "sort", "(", "'_id'", ",", "DESCENDING", ")", "else", ":", "result", "=", "[", "]", "return", "objects", ".", "AssessmentOfferedList", "(", "result", ",", "runtime", "=", "self", ".", "_runtime", ",", "proxy", "=", "self", ".", "_proxy", ")" ]
Gets a list of ``AssessmentOffered`` elements matching the given assessment offered query. arg: assessment_offered_query (osid.assessment.AssessmentOfferedQuery): the assessment offered query return: (osid.assessment.AssessmentOfferedList) - the returned ``AssessmentOfferedList`` raise: NullArgument - ``assessment_offered_query`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred raise: Unsupported - ``assessment_offered_query`` is not of this service *compliance: mandatory -- This method must be implemented.*
[ "Gets", "a", "list", "of", "AssessmentOffered", "elements", "matching", "the", "given", "assessment", "offered", "query", "." ]
python
train
52.069767
datadesk/slackdown
slackdown/__init__.py
https://github.com/datadesk/slackdown/blob/2c5c2faf2673d0d58183f590f234d2c7e1fe8508/slackdown/__init__.py#L247-L258
def handle_endtag(self, tag): """ Called by HTMLParser.feed when an end tag is found. """ if tag in PARENT_ELEMENTS: self.current_parent_element['tag'] = '' self.current_parent_element['attrs'] = '' if tag == 'li': self.parsing_li = True if tag != 'br': self.cleaned_html += '</{}>'.format(tag)
[ "def", "handle_endtag", "(", "self", ",", "tag", ")", ":", "if", "tag", "in", "PARENT_ELEMENTS", ":", "self", ".", "current_parent_element", "[", "'tag'", "]", "=", "''", "self", ".", "current_parent_element", "[", "'attrs'", "]", "=", "''", "if", "tag", "==", "'li'", ":", "self", ".", "parsing_li", "=", "True", "if", "tag", "!=", "'br'", ":", "self", ".", "cleaned_html", "+=", "'</{}>'", ".", "format", "(", "tag", ")" ]
Called by HTMLParser.feed when an end tag is found.
[ "Called", "by", "HTMLParser", ".", "feed", "when", "an", "end", "tag", "is", "found", "." ]
python
train
31.666667
scivision/gridaurora
gridaurora/calcemissions.py
https://github.com/scivision/gridaurora/blob/c3957b93c2201afff62bd104e0acead52c0d9e90/gridaurora/calcemissions.py#L169-L187
def catvl(z, ver, vnew, lamb, lambnew, br): """ trapz integrates over altitude axis, axis = -2 concatenate over reaction dimension, axis = -1 br: column integrated brightness lamb: wavelength [nm] ver: volume emission rate [photons / cm^-3 s^-3 ...] """ if ver is not None: br = np.concatenate((br, np.trapz(vnew, z, axis=-2)), axis=-1) # must come first! ver = np.concatenate((ver, vnew), axis=-1) lamb = np.concatenate((lamb, lambnew)) else: ver = vnew.copy(order='F') lamb = lambnew.copy() br = np.trapz(ver, z, axis=-2) return ver, lamb, br
[ "def", "catvl", "(", "z", ",", "ver", ",", "vnew", ",", "lamb", ",", "lambnew", ",", "br", ")", ":", "if", "ver", "is", "not", "None", ":", "br", "=", "np", ".", "concatenate", "(", "(", "br", ",", "np", ".", "trapz", "(", "vnew", ",", "z", ",", "axis", "=", "-", "2", ")", ")", ",", "axis", "=", "-", "1", ")", "# must come first!", "ver", "=", "np", ".", "concatenate", "(", "(", "ver", ",", "vnew", ")", ",", "axis", "=", "-", "1", ")", "lamb", "=", "np", ".", "concatenate", "(", "(", "lamb", ",", "lambnew", ")", ")", "else", ":", "ver", "=", "vnew", ".", "copy", "(", "order", "=", "'F'", ")", "lamb", "=", "lambnew", ".", "copy", "(", ")", "br", "=", "np", ".", "trapz", "(", "ver", ",", "z", ",", "axis", "=", "-", "2", ")", "return", "ver", ",", "lamb", ",", "br" ]
trapz integrates over altitude axis, axis = -2 concatenate over reaction dimension, axis = -1 br: column integrated brightness lamb: wavelength [nm] ver: volume emission rate [photons / cm^-3 s^-3 ...]
[ "trapz", "integrates", "over", "altitude", "axis", "axis", "=", "-", "2", "concatenate", "over", "reaction", "dimension", "axis", "=", "-", "1" ]
python
train
32.526316
opereto/pyopereto
pyopereto/client.py
https://github.com/opereto/pyopereto/blob/16ca987738a7e1b82b52b0b099794a74ed557223/pyopereto/client.py#L1098-L1113
def stop_process(self, pids, status='success'): ''' stop_process(self, pids, status='success') Stops a running process :Parameters: * *pid* (`string`) -- Identifier of an existing process * *result* (`string`) -- the value the process will be terminated with. Any of the following possible values: success , failure , error , warning , terminated ''' if status not in process_result_statuses: raise OperetoClientError('Invalid process result [%s]'%status) pids = self._get_pids(pids) for pid in pids: self._call_rest_api('post', '/processes/'+pid+'/terminate/'+status, error='Failed to stop process')
[ "def", "stop_process", "(", "self", ",", "pids", ",", "status", "=", "'success'", ")", ":", "if", "status", "not", "in", "process_result_statuses", ":", "raise", "OperetoClientError", "(", "'Invalid process result [%s]'", "%", "status", ")", "pids", "=", "self", ".", "_get_pids", "(", "pids", ")", "for", "pid", "in", "pids", ":", "self", ".", "_call_rest_api", "(", "'post'", ",", "'/processes/'", "+", "pid", "+", "'/terminate/'", "+", "status", ",", "error", "=", "'Failed to stop process'", ")" ]
stop_process(self, pids, status='success') Stops a running process :Parameters: * *pid* (`string`) -- Identifier of an existing process * *result* (`string`) -- the value the process will be terminated with. Any of the following possible values: success , failure , error , warning , terminated
[ "stop_process", "(", "self", "pids", "status", "=", "success", ")" ]
python
train
43.3125
Gandi/gandi.cli
gandi/cli/commands/disk.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/commands/disk.py#L122-L138
def detach(gandi, resource, background, force): """ Detach disks from currectly attached vm. Resource can be a disk name, or ID """ resource = sorted(tuple(set(resource))) if not force: proceed = click.confirm('Are you sure you want to detach %s?' % ', '.join(resource)) if not proceed: return result = gandi.disk.detach(resource, background) if background: gandi.pretty_echo(result) return result
[ "def", "detach", "(", "gandi", ",", "resource", ",", "background", ",", "force", ")", ":", "resource", "=", "sorted", "(", "tuple", "(", "set", "(", "resource", ")", ")", ")", "if", "not", "force", ":", "proceed", "=", "click", ".", "confirm", "(", "'Are you sure you want to detach %s?'", "%", "', '", ".", "join", "(", "resource", ")", ")", "if", "not", "proceed", ":", "return", "result", "=", "gandi", ".", "disk", ".", "detach", "(", "resource", ",", "background", ")", "if", "background", ":", "gandi", ".", "pretty_echo", "(", "result", ")", "return", "result" ]
Detach disks from currectly attached vm. Resource can be a disk name, or ID
[ "Detach", "disks", "from", "currectly", "attached", "vm", "." ]
python
train
28.470588
PRIArobotics/HedgehogUtils
hedgehog/utils/asyncio.py
https://github.com/PRIArobotics/HedgehogUtils/blob/cc368df270288c870cc66d707696ccb62823ca9c/hedgehog/utils/asyncio.py#L28-L37
def repeat_func_eof(func: Callable[[], Union[T, Awaitable[T]]], eof: Any, *, interval: float=0, use_is: bool=False) -> AsyncIterator[T]: """ Repeats the result of a 0-ary function until an `eof` item is reached. The `eof` item itself is not part of the resulting stream; by setting `use_is` to true, eof is checked by identity rather than equality. `times` and `interval` behave exactly like with `aiostream.create.repeat`. """ pred = (lambda item: item != eof) if not use_is else (lambda item: (item is not eof)) base = repeat_func.raw(func, interval=interval) return cast(AsyncIterator[T], stream.takewhile.raw(base, pred))
[ "def", "repeat_func_eof", "(", "func", ":", "Callable", "[", "[", "]", ",", "Union", "[", "T", ",", "Awaitable", "[", "T", "]", "]", "]", ",", "eof", ":", "Any", ",", "*", ",", "interval", ":", "float", "=", "0", ",", "use_is", ":", "bool", "=", "False", ")", "->", "AsyncIterator", "[", "T", "]", ":", "pred", "=", "(", "lambda", "item", ":", "item", "!=", "eof", ")", "if", "not", "use_is", "else", "(", "lambda", "item", ":", "(", "item", "is", "not", "eof", ")", ")", "base", "=", "repeat_func", ".", "raw", "(", "func", ",", "interval", "=", "interval", ")", "return", "cast", "(", "AsyncIterator", "[", "T", "]", ",", "stream", ".", "takewhile", ".", "raw", "(", "base", ",", "pred", ")", ")" ]
Repeats the result of a 0-ary function until an `eof` item is reached. The `eof` item itself is not part of the resulting stream; by setting `use_is` to true, eof is checked by identity rather than equality. `times` and `interval` behave exactly like with `aiostream.create.repeat`.
[ "Repeats", "the", "result", "of", "a", "0", "-", "ary", "function", "until", "an", "eof", "item", "is", "reached", ".", "The", "eof", "item", "itself", "is", "not", "part", "of", "the", "resulting", "stream", ";", "by", "setting", "use_is", "to", "true", "eof", "is", "checked", "by", "identity", "rather", "than", "equality", ".", "times", "and", "interval", "behave", "exactly", "like", "with", "aiostream", ".", "create", ".", "repeat", "." ]
python
train
65.2
gholt/swiftly
swiftly/dencrypt.py
https://github.com/gholt/swiftly/blob/5bcc1c65323b1caf1f85adbefd9fc4988c072149/swiftly/dencrypt.py#L102-L143
def aes_decrypt(key, stdin, chunk_size=65536): """ Generator that decrypts a content stream using AES 256 in CBC mode. :param key: Any string to use as the decryption key. :param stdin: Where to read the encrypted data from. :param chunk_size: Largest amount to read at once. """ if not AES256CBC_Support: raise Exception( 'AES256CBC not supported; likely pycrypto is not installed') # Always use 256-bit key key = hashlib.sha256(key).digest() # At least 16 and a multiple of 16 chunk_size = max(16, chunk_size >> 4 << 4) iv = stdin.read(16) while len(iv) < 16: chunk = stdin.read(16 - len(iv)) if not chunk: raise IOError('EOF reading IV') decryptor = Crypto.Cipher.AES.new(key, Crypto.Cipher.AES.MODE_CBC, iv) data = '' while True: chunk = stdin.read(chunk_size) if not chunk: if len(data) != 16: raise IOError('EOF reading encrypted stream') data = decryptor.decrypt(data) trailing = ord(data[-1]) if trailing > 15: raise IOError( 'EOF reading encrypted stream or trailing value corrupted ' '%s' % trailing) yield data[:trailing] break data += chunk if len(data) > 16: # Always leave at least one byte pending trailing = (len(data) % 16) or 16 yield decryptor.decrypt(data[:-trailing]) data = data[-trailing:]
[ "def", "aes_decrypt", "(", "key", ",", "stdin", ",", "chunk_size", "=", "65536", ")", ":", "if", "not", "AES256CBC_Support", ":", "raise", "Exception", "(", "'AES256CBC not supported; likely pycrypto is not installed'", ")", "# Always use 256-bit key", "key", "=", "hashlib", ".", "sha256", "(", "key", ")", ".", "digest", "(", ")", "# At least 16 and a multiple of 16", "chunk_size", "=", "max", "(", "16", ",", "chunk_size", ">>", "4", "<<", "4", ")", "iv", "=", "stdin", ".", "read", "(", "16", ")", "while", "len", "(", "iv", ")", "<", "16", ":", "chunk", "=", "stdin", ".", "read", "(", "16", "-", "len", "(", "iv", ")", ")", "if", "not", "chunk", ":", "raise", "IOError", "(", "'EOF reading IV'", ")", "decryptor", "=", "Crypto", ".", "Cipher", ".", "AES", ".", "new", "(", "key", ",", "Crypto", ".", "Cipher", ".", "AES", ".", "MODE_CBC", ",", "iv", ")", "data", "=", "''", "while", "True", ":", "chunk", "=", "stdin", ".", "read", "(", "chunk_size", ")", "if", "not", "chunk", ":", "if", "len", "(", "data", ")", "!=", "16", ":", "raise", "IOError", "(", "'EOF reading encrypted stream'", ")", "data", "=", "decryptor", ".", "decrypt", "(", "data", ")", "trailing", "=", "ord", "(", "data", "[", "-", "1", "]", ")", "if", "trailing", ">", "15", ":", "raise", "IOError", "(", "'EOF reading encrypted stream or trailing value corrupted '", "'%s'", "%", "trailing", ")", "yield", "data", "[", ":", "trailing", "]", "break", "data", "+=", "chunk", "if", "len", "(", "data", ")", ">", "16", ":", "# Always leave at least one byte pending", "trailing", "=", "(", "len", "(", "data", ")", "%", "16", ")", "or", "16", "yield", "decryptor", ".", "decrypt", "(", "data", "[", ":", "-", "trailing", "]", ")", "data", "=", "data", "[", "-", "trailing", ":", "]" ]
Generator that decrypts a content stream using AES 256 in CBC mode. :param key: Any string to use as the decryption key. :param stdin: Where to read the encrypted data from. :param chunk_size: Largest amount to read at once.
[ "Generator", "that", "decrypts", "a", "content", "stream", "using", "AES", "256", "in", "CBC", "mode", "." ]
python
test
36
cloudtools/stacker
stacker/util.py
https://github.com/cloudtools/stacker/blob/ad6013a03a560c46ba3c63c4d153336273e6da5d/stacker/util.py#L572-L582
def fetch_local_package(self, config): """Make a local path available to current stacker config. Args: config (dict): 'local' path config dictionary """ # Update sys.path & merge in remote configs (if necessary) self.update_paths_and_config(config=config, pkg_dir_name=config['source'], pkg_cache_dir=os.getcwd())
[ "def", "fetch_local_package", "(", "self", ",", "config", ")", ":", "# Update sys.path & merge in remote configs (if necessary)", "self", ".", "update_paths_and_config", "(", "config", "=", "config", ",", "pkg_dir_name", "=", "config", "[", "'source'", "]", ",", "pkg_cache_dir", "=", "os", ".", "getcwd", "(", ")", ")" ]
Make a local path available to current stacker config. Args: config (dict): 'local' path config dictionary
[ "Make", "a", "local", "path", "available", "to", "current", "stacker", "config", "." ]
python
train
39.181818
StanfordVL/robosuite
robosuite/environments/baxter_peg_in_hole.py
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/environments/baxter_peg_in_hole.py#L173-L193
def _peg_pose_in_hole_frame(self): """ A helper function that takes in a named data field and returns the pose of that object in the base frame. """ # World frame peg_pos_in_world = self.sim.data.get_body_xpos("cylinder") peg_rot_in_world = self.sim.data.get_body_xmat("cylinder").reshape((3, 3)) peg_pose_in_world = T.make_pose(peg_pos_in_world, peg_rot_in_world) # World frame hole_pos_in_world = self.sim.data.get_body_xpos("hole") hole_rot_in_world = self.sim.data.get_body_xmat("hole").reshape((3, 3)) hole_pose_in_world = T.make_pose(hole_pos_in_world, hole_rot_in_world) world_pose_in_hole = T.pose_inv(hole_pose_in_world) peg_pose_in_hole = T.pose_in_A_to_pose_in_B( peg_pose_in_world, world_pose_in_hole ) return peg_pose_in_hole
[ "def", "_peg_pose_in_hole_frame", "(", "self", ")", ":", "# World frame", "peg_pos_in_world", "=", "self", ".", "sim", ".", "data", ".", "get_body_xpos", "(", "\"cylinder\"", ")", "peg_rot_in_world", "=", "self", ".", "sim", ".", "data", ".", "get_body_xmat", "(", "\"cylinder\"", ")", ".", "reshape", "(", "(", "3", ",", "3", ")", ")", "peg_pose_in_world", "=", "T", ".", "make_pose", "(", "peg_pos_in_world", ",", "peg_rot_in_world", ")", "# World frame", "hole_pos_in_world", "=", "self", ".", "sim", ".", "data", ".", "get_body_xpos", "(", "\"hole\"", ")", "hole_rot_in_world", "=", "self", ".", "sim", ".", "data", ".", "get_body_xmat", "(", "\"hole\"", ")", ".", "reshape", "(", "(", "3", ",", "3", ")", ")", "hole_pose_in_world", "=", "T", ".", "make_pose", "(", "hole_pos_in_world", ",", "hole_rot_in_world", ")", "world_pose_in_hole", "=", "T", ".", "pose_inv", "(", "hole_pose_in_world", ")", "peg_pose_in_hole", "=", "T", ".", "pose_in_A_to_pose_in_B", "(", "peg_pose_in_world", ",", "world_pose_in_hole", ")", "return", "peg_pose_in_hole" ]
A helper function that takes in a named data field and returns the pose of that object in the base frame.
[ "A", "helper", "function", "that", "takes", "in", "a", "named", "data", "field", "and", "returns", "the", "pose", "of", "that", "object", "in", "the", "base", "frame", "." ]
python
train
41
DS-100/nb-to-gradescope
gs100/converter.py
https://github.com/DS-100/nb-to-gradescope/blob/1a2b37753c4913689557328a796543a767eb3932/gs100/converter.py#L266-L291
def create_question_pdfs(nb, pages_per_q, folder, zoom) -> list: """ Converts each cells in tbe notebook to a PDF named something like 'q04c.pdf'. Places PDFs in the specified folder and returns the list of created PDF locations. """ html_cells = nb_to_html_cells(nb) q_nums = nb_to_q_nums(nb) os.makedirs(folder, exist_ok=True) pdf_options = PDF_OPTS.copy() pdf_options['zoom'] = ZOOM_FACTOR * zoom pdf_names = [] for question, cell in zip(q_nums, html_cells): # Create question PDFs pdf_name = os.path.join(folder, '{}.pdf'.format(question)) pdfkit.from_string(cell.prettify(), pdf_name, options=pdf_options) pad_pdf_pages(pdf_name, pages_per_q) print('Created ' + pdf_name) pdf_names.append(pdf_name) return pdf_names
[ "def", "create_question_pdfs", "(", "nb", ",", "pages_per_q", ",", "folder", ",", "zoom", ")", "->", "list", ":", "html_cells", "=", "nb_to_html_cells", "(", "nb", ")", "q_nums", "=", "nb_to_q_nums", "(", "nb", ")", "os", ".", "makedirs", "(", "folder", ",", "exist_ok", "=", "True", ")", "pdf_options", "=", "PDF_OPTS", ".", "copy", "(", ")", "pdf_options", "[", "'zoom'", "]", "=", "ZOOM_FACTOR", "*", "zoom", "pdf_names", "=", "[", "]", "for", "question", ",", "cell", "in", "zip", "(", "q_nums", ",", "html_cells", ")", ":", "# Create question PDFs", "pdf_name", "=", "os", ".", "path", ".", "join", "(", "folder", ",", "'{}.pdf'", ".", "format", "(", "question", ")", ")", "pdfkit", ".", "from_string", "(", "cell", ".", "prettify", "(", ")", ",", "pdf_name", ",", "options", "=", "pdf_options", ")", "pad_pdf_pages", "(", "pdf_name", ",", "pages_per_q", ")", "print", "(", "'Created '", "+", "pdf_name", ")", "pdf_names", ".", "append", "(", "pdf_name", ")", "return", "pdf_names" ]
Converts each cells in tbe notebook to a PDF named something like 'q04c.pdf'. Places PDFs in the specified folder and returns the list of created PDF locations.
[ "Converts", "each", "cells", "in", "tbe", "notebook", "to", "a", "PDF", "named", "something", "like", "q04c", ".", "pdf", ".", "Places", "PDFs", "in", "the", "specified", "folder", "and", "returns", "the", "list", "of", "created", "PDF", "locations", "." ]
python
train
30.807692
openvax/isovar
isovar/variant_reads.py
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_reads.py#L48-L57
def reads_supporting_variants(variants, samfile, **kwargs): """ Given a SAM/BAM file and a collection of variants, generates a sequence of variants paired with reads which support each variant. """ for variant, allele_reads in reads_overlapping_variants( variants=variants, samfile=samfile, **kwargs): yield variant, filter_non_alt_reads_for_variant(variant, allele_reads)
[ "def", "reads_supporting_variants", "(", "variants", ",", "samfile", ",", "*", "*", "kwargs", ")", ":", "for", "variant", ",", "allele_reads", "in", "reads_overlapping_variants", "(", "variants", "=", "variants", ",", "samfile", "=", "samfile", ",", "*", "*", "kwargs", ")", ":", "yield", "variant", ",", "filter_non_alt_reads_for_variant", "(", "variant", ",", "allele_reads", ")" ]
Given a SAM/BAM file and a collection of variants, generates a sequence of variants paired with reads which support each variant.
[ "Given", "a", "SAM", "/", "BAM", "file", "and", "a", "collection", "of", "variants", "generates", "a", "sequence", "of", "variants", "paired", "with", "reads", "which", "support", "each", "variant", "." ]
python
train
42.7
RJT1990/pyflux
pyflux/ssm/dynlin.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/ssm/dynlin.py#L183-L287
def plot_predict(self, h=5, past_values=20, intervals=True, oos_data=None, **kwargs): """ Makes forecast with the estimated model Parameters ---------- h : int (default : 5) How many steps ahead would you like to forecast? past_values : int (default : 20) How many past observations to show on the forecast graph? intervals : Boolean Would you like to show 95% prediction intervals for the forecast? oos_data : pd.DataFrame Data for the variables to be used out of sample (ys can be NaNs) Returns ---------- - Plot of the forecast """ import matplotlib.pyplot as plt import seaborn as sns figsize = kwargs.get('figsize',(10,7)) nsims = kwargs.get('nsims', 200) if self.latent_variables.estimated is False: raise Exception("No latent variables estimated!") else: _, X_oos = dmatrices(self.formula, oos_data) X_oos = np.array([X_oos])[0] full_X = self.X.copy() full_X = np.append(full_X,X_oos,axis=0) Z = full_X date_index = self.shift_dates(h) # Retrieve data, dates and (transformed) latent variables if self.latent_variables.estimation_method in ['M-H']: lower_final = 0 upper_final = 0 plot_values_final = 0 plot_index = date_index[-h-past_values:] for i in range(nsims): t_params = self.draw_latent_variables(nsims=1).T[0] a, P = self._forecast_model(t_params, Z, h) smoothed_series = np.zeros(self.y.shape[0]+h) series_variance = np.zeros(self.y.shape[0]+h) for t in range(self.y.shape[0]+h): smoothed_series[t] = np.dot(Z[t],a[:,t]) series_variance[t] = np.dot(np.dot(Z[t],P[:,:,t]),Z[t].T) plot_values = smoothed_series[-h-past_values:] lower = smoothed_series[-h:] - 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(t_params[0]),0.5) upper = smoothed_series[-h:] + 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(t_params[0]),0.5) lower_final += np.append(plot_values[-h-1], lower) upper_final += np.append(plot_values[-h-1], upper) plot_values_final += plot_values plot_values_final = plot_values_final / nsims lower_final = lower_final / nsims upper_final = upper_final / nsims plt.figure(figsize=figsize) if intervals == True: plt.fill_between(date_index[-h-1:], lower_final, upper_final, alpha=0.2) plt.plot(plot_index, plot_values_final) plt.title("Forecast for " + self.data_name) plt.xlabel("Time") plt.ylabel(self.data_name) plt.show() else: a, P = self._forecast_model(self.latent_variables.get_z_values(), h) plot_values = a[0][-h-past_values:] forecasted_values = a[0][-h:] smoothed_series = np.zeros(self.y.shape[0]+h) series_variance = np.zeros(self.y.shape[0]+h) for t in range(self.y.shape[0]+h): smoothed_series[t] = np.dot(Z[t],a[:,t]) series_variance[t] = np.dot(np.dot(Z[t],P[:,:,t]),Z[t].T) lower = forecasted_values - 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(self.latent_variables.get_z_values()[0]),0.5) upper = forecasted_values + 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(self.latent_variables.get_z_values()[0]),0.5) lower = np.append(plot_values[-h-1],lower) upper = np.append(plot_values[-h-1],upper) plot_index = date_index[-h-past_values:] plt.figure(figsize=figsize) if intervals == True: plt.fill_between(date_index[-h-1:], lower, upper, alpha=0.2) plt.plot(plot_index,plot_values) plt.title("Forecast for " + self.data_name) plt.xlabel("Time") plt.ylabel(self.data_name) plt.show()
[ "def", "plot_predict", "(", "self", ",", "h", "=", "5", ",", "past_values", "=", "20", ",", "intervals", "=", "True", ",", "oos_data", "=", "None", ",", "*", "*", "kwargs", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "import", "seaborn", "as", "sns", "figsize", "=", "kwargs", ".", "get", "(", "'figsize'", ",", "(", "10", ",", "7", ")", ")", "nsims", "=", "kwargs", ".", "get", "(", "'nsims'", ",", "200", ")", "if", "self", ".", "latent_variables", ".", "estimated", "is", "False", ":", "raise", "Exception", "(", "\"No latent variables estimated!\"", ")", "else", ":", "_", ",", "X_oos", "=", "dmatrices", "(", "self", ".", "formula", ",", "oos_data", ")", "X_oos", "=", "np", ".", "array", "(", "[", "X_oos", "]", ")", "[", "0", "]", "full_X", "=", "self", ".", "X", ".", "copy", "(", ")", "full_X", "=", "np", ".", "append", "(", "full_X", ",", "X_oos", ",", "axis", "=", "0", ")", "Z", "=", "full_X", "date_index", "=", "self", ".", "shift_dates", "(", "h", ")", "# Retrieve data, dates and (transformed) latent variables ", "if", "self", ".", "latent_variables", ".", "estimation_method", "in", "[", "'M-H'", "]", ":", "lower_final", "=", "0", "upper_final", "=", "0", "plot_values_final", "=", "0", "plot_index", "=", "date_index", "[", "-", "h", "-", "past_values", ":", "]", "for", "i", "in", "range", "(", "nsims", ")", ":", "t_params", "=", "self", ".", "draw_latent_variables", "(", "nsims", "=", "1", ")", ".", "T", "[", "0", "]", "a", ",", "P", "=", "self", ".", "_forecast_model", "(", "t_params", ",", "Z", ",", "h", ")", "smoothed_series", "=", "np", ".", "zeros", "(", "self", ".", "y", ".", "shape", "[", "0", "]", "+", "h", ")", "series_variance", "=", "np", ".", "zeros", "(", "self", ".", "y", ".", "shape", "[", "0", "]", "+", "h", ")", "for", "t", "in", "range", "(", "self", ".", "y", ".", "shape", "[", "0", "]", "+", "h", ")", ":", "smoothed_series", "[", "t", "]", "=", "np", ".", "dot", "(", "Z", "[", "t", "]", ",", "a", "[", ":", ",", "t", "]", ")", "series_variance", "[", "t", "]", "=", "np", ".", "dot", "(", "np", ".", "dot", "(", "Z", "[", "t", "]", ",", "P", "[", ":", ",", ":", ",", "t", "]", ")", ",", "Z", "[", "t", "]", ".", "T", ")", "plot_values", "=", "smoothed_series", "[", "-", "h", "-", "past_values", ":", "]", "lower", "=", "smoothed_series", "[", "-", "h", ":", "]", "-", "1.96", "*", "np", ".", "power", "(", "P", "[", "0", "]", "[", "0", "]", "[", "-", "h", ":", "]", "+", "self", ".", "latent_variables", ".", "z_list", "[", "0", "]", ".", "prior", ".", "transform", "(", "t_params", "[", "0", "]", ")", ",", "0.5", ")", "upper", "=", "smoothed_series", "[", "-", "h", ":", "]", "+", "1.96", "*", "np", ".", "power", "(", "P", "[", "0", "]", "[", "0", "]", "[", "-", "h", ":", "]", "+", "self", ".", "latent_variables", ".", "z_list", "[", "0", "]", ".", "prior", ".", "transform", "(", "t_params", "[", "0", "]", ")", ",", "0.5", ")", "lower_final", "+=", "np", ".", "append", "(", "plot_values", "[", "-", "h", "-", "1", "]", ",", "lower", ")", "upper_final", "+=", "np", ".", "append", "(", "plot_values", "[", "-", "h", "-", "1", "]", ",", "upper", ")", "plot_values_final", "+=", "plot_values", "plot_values_final", "=", "plot_values_final", "/", "nsims", "lower_final", "=", "lower_final", "/", "nsims", "upper_final", "=", "upper_final", "/", "nsims", "plt", ".", "figure", "(", "figsize", "=", "figsize", ")", "if", "intervals", "==", "True", ":", "plt", ".", "fill_between", "(", "date_index", "[", "-", "h", "-", "1", ":", "]", ",", "lower_final", ",", "upper_final", ",", "alpha", "=", "0.2", ")", "plt", ".", "plot", "(", "plot_index", ",", "plot_values_final", ")", "plt", ".", "title", "(", "\"Forecast for \"", "+", "self", ".", "data_name", ")", "plt", ".", "xlabel", "(", "\"Time\"", ")", "plt", ".", "ylabel", "(", "self", ".", "data_name", ")", "plt", ".", "show", "(", ")", "else", ":", "a", ",", "P", "=", "self", ".", "_forecast_model", "(", "self", ".", "latent_variables", ".", "get_z_values", "(", ")", ",", "h", ")", "plot_values", "=", "a", "[", "0", "]", "[", "-", "h", "-", "past_values", ":", "]", "forecasted_values", "=", "a", "[", "0", "]", "[", "-", "h", ":", "]", "smoothed_series", "=", "np", ".", "zeros", "(", "self", ".", "y", ".", "shape", "[", "0", "]", "+", "h", ")", "series_variance", "=", "np", ".", "zeros", "(", "self", ".", "y", ".", "shape", "[", "0", "]", "+", "h", ")", "for", "t", "in", "range", "(", "self", ".", "y", ".", "shape", "[", "0", "]", "+", "h", ")", ":", "smoothed_series", "[", "t", "]", "=", "np", ".", "dot", "(", "Z", "[", "t", "]", ",", "a", "[", ":", ",", "t", "]", ")", "series_variance", "[", "t", "]", "=", "np", ".", "dot", "(", "np", ".", "dot", "(", "Z", "[", "t", "]", ",", "P", "[", ":", ",", ":", ",", "t", "]", ")", ",", "Z", "[", "t", "]", ".", "T", ")", "lower", "=", "forecasted_values", "-", "1.96", "*", "np", ".", "power", "(", "P", "[", "0", "]", "[", "0", "]", "[", "-", "h", ":", "]", "+", "self", ".", "latent_variables", ".", "z_list", "[", "0", "]", ".", "prior", ".", "transform", "(", "self", ".", "latent_variables", ".", "get_z_values", "(", ")", "[", "0", "]", ")", ",", "0.5", ")", "upper", "=", "forecasted_values", "+", "1.96", "*", "np", ".", "power", "(", "P", "[", "0", "]", "[", "0", "]", "[", "-", "h", ":", "]", "+", "self", ".", "latent_variables", ".", "z_list", "[", "0", "]", ".", "prior", ".", "transform", "(", "self", ".", "latent_variables", ".", "get_z_values", "(", ")", "[", "0", "]", ")", ",", "0.5", ")", "lower", "=", "np", ".", "append", "(", "plot_values", "[", "-", "h", "-", "1", "]", ",", "lower", ")", "upper", "=", "np", ".", "append", "(", "plot_values", "[", "-", "h", "-", "1", "]", ",", "upper", ")", "plot_index", "=", "date_index", "[", "-", "h", "-", "past_values", ":", "]", "plt", ".", "figure", "(", "figsize", "=", "figsize", ")", "if", "intervals", "==", "True", ":", "plt", ".", "fill_between", "(", "date_index", "[", "-", "h", "-", "1", ":", "]", ",", "lower", ",", "upper", ",", "alpha", "=", "0.2", ")", "plt", ".", "plot", "(", "plot_index", ",", "plot_values", ")", "plt", ".", "title", "(", "\"Forecast for \"", "+", "self", ".", "data_name", ")", "plt", ".", "xlabel", "(", "\"Time\"", ")", "plt", ".", "ylabel", "(", "self", ".", "data_name", ")", "plt", ".", "show", "(", ")" ]
Makes forecast with the estimated model Parameters ---------- h : int (default : 5) How many steps ahead would you like to forecast? past_values : int (default : 20) How many past observations to show on the forecast graph? intervals : Boolean Would you like to show 95% prediction intervals for the forecast? oos_data : pd.DataFrame Data for the variables to be used out of sample (ys can be NaNs) Returns ---------- - Plot of the forecast
[ "Makes", "forecast", "with", "the", "estimated", "model" ]
python
train
42.685714
arista-eosplus/pyeapi
pyeapi/api/vrfs.py
https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/api/vrfs.py#L98-L115
def _parse_rd(self, config): """ _parse_rd scans the provided configuration block and extracts the vrf rd. The return dict is intended to be merged into the response dict. Args: config (str): The vrf configuration block from the nodes running configuration Returns: dict: resource dict attribute """ match = RD_RE.search(config) if match: value = match.group('value') else: value = match return dict(rd=value)
[ "def", "_parse_rd", "(", "self", ",", "config", ")", ":", "match", "=", "RD_RE", ".", "search", "(", "config", ")", "if", "match", ":", "value", "=", "match", ".", "group", "(", "'value'", ")", "else", ":", "value", "=", "match", "return", "dict", "(", "rd", "=", "value", ")" ]
_parse_rd scans the provided configuration block and extracts the vrf rd. The return dict is intended to be merged into the response dict. Args: config (str): The vrf configuration block from the nodes running configuration Returns: dict: resource dict attribute
[ "_parse_rd", "scans", "the", "provided", "configuration", "block", "and", "extracts", "the", "vrf", "rd", ".", "The", "return", "dict", "is", "intended", "to", "be", "merged", "into", "the", "response", "dict", "." ]
python
train
29.888889
senaite/senaite.lims
src/senaite/lims/browser/spotlight/jsonapi.py
https://github.com/senaite/senaite.lims/blob/3c7fc7b462321fb354c478c19b5c20f3014fa398/src/senaite/lims/browser/spotlight/jsonapi.py#L109-L132
def make_query(catalog): """A function to prepare a query """ query = {} request = api.get_request() index = get_search_index_for(catalog) limit = request.form.get("limit") q = request.form.get("q") if len(q) > 0: query[index] = q + "*" else: return None portal_type = request.form.get("portal_type") if portal_type: if not isinstance(portal_type, list): portal_type = [portal_type] query["portal_type"] = portal_type if limit and limit.isdigit(): query["sort_limit"] = int(limit) return query
[ "def", "make_query", "(", "catalog", ")", ":", "query", "=", "{", "}", "request", "=", "api", ".", "get_request", "(", ")", "index", "=", "get_search_index_for", "(", "catalog", ")", "limit", "=", "request", ".", "form", ".", "get", "(", "\"limit\"", ")", "q", "=", "request", ".", "form", ".", "get", "(", "\"q\"", ")", "if", "len", "(", "q", ")", ">", "0", ":", "query", "[", "index", "]", "=", "q", "+", "\"*\"", "else", ":", "return", "None", "portal_type", "=", "request", ".", "form", ".", "get", "(", "\"portal_type\"", ")", "if", "portal_type", ":", "if", "not", "isinstance", "(", "portal_type", ",", "list", ")", ":", "portal_type", "=", "[", "portal_type", "]", "query", "[", "\"portal_type\"", "]", "=", "portal_type", "if", "limit", "and", "limit", ".", "isdigit", "(", ")", ":", "query", "[", "\"sort_limit\"", "]", "=", "int", "(", "limit", ")", "return", "query" ]
A function to prepare a query
[ "A", "function", "to", "prepare", "a", "query" ]
python
train
24.083333
Sliim/soundcloud-syncer
ssyncer/strack.py
https://github.com/Sliim/soundcloud-syncer/blob/f15142677bf8e5fb54f40b0eb9a36f21ba940ab6/ssyncer/strack.py#L130-L138
def gen_localdir(self, localdir): """ Generate local directory where track will be saved. Create it if not exists. """ directory = "{0}/{1}/".format(localdir, self.get("username")) if not os.path.exists(directory): os.makedirs(directory) return directory
[ "def", "gen_localdir", "(", "self", ",", "localdir", ")", ":", "directory", "=", "\"{0}/{1}/\"", ".", "format", "(", "localdir", ",", "self", ".", "get", "(", "\"username\"", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "directory", ")", ":", "os", ".", "makedirs", "(", "directory", ")", "return", "directory" ]
Generate local directory where track will be saved. Create it if not exists.
[ "Generate", "local", "directory", "where", "track", "will", "be", "saved", ".", "Create", "it", "if", "not", "exists", "." ]
python
train
34.888889
pygobject/pgi
pgi/importer.py
https://github.com/pygobject/pgi/blob/2090435df6241a15ec2a78379a36b738b728652c/pgi/importer.py#L78-L114
def _check_require_version(namespace, stacklevel): """A context manager which tries to give helpful warnings about missing gi.require_version() which could potentially break code if only an older version than expected is installed or a new version gets introduced. :: with _check_require_version("Gtk", stacklevel): load_namespace_and_overrides() """ repository = GIRepository() was_loaded = repository.is_registered(namespace) yield if was_loaded: # it was loaded before by another import which depended on this # namespace or by C code like libpeas return if namespace in ("GLib", "GObject", "Gio"): # part of glib (we have bigger problems if versions change there) return if get_required_version(namespace) is not None: # the version was forced using require_version() return version = repository.get_version(namespace) warnings.warn( "%(namespace)s was imported without specifying a version first. " "Use gi.require_version('%(namespace)s', '%(version)s') before " "import to ensure that the right version gets loaded." % {"namespace": namespace, "version": version}, PyGIWarning, stacklevel=stacklevel)
[ "def", "_check_require_version", "(", "namespace", ",", "stacklevel", ")", ":", "repository", "=", "GIRepository", "(", ")", "was_loaded", "=", "repository", ".", "is_registered", "(", "namespace", ")", "yield", "if", "was_loaded", ":", "# it was loaded before by another import which depended on this", "# namespace or by C code like libpeas", "return", "if", "namespace", "in", "(", "\"GLib\"", ",", "\"GObject\"", ",", "\"Gio\"", ")", ":", "# part of glib (we have bigger problems if versions change there)", "return", "if", "get_required_version", "(", "namespace", ")", "is", "not", "None", ":", "# the version was forced using require_version()", "return", "version", "=", "repository", ".", "get_version", "(", "namespace", ")", "warnings", ".", "warn", "(", "\"%(namespace)s was imported without specifying a version first. \"", "\"Use gi.require_version('%(namespace)s', '%(version)s') before \"", "\"import to ensure that the right version gets loaded.\"", "%", "{", "\"namespace\"", ":", "namespace", ",", "\"version\"", ":", "version", "}", ",", "PyGIWarning", ",", "stacklevel", "=", "stacklevel", ")" ]
A context manager which tries to give helpful warnings about missing gi.require_version() which could potentially break code if only an older version than expected is installed or a new version gets introduced. :: with _check_require_version("Gtk", stacklevel): load_namespace_and_overrides()
[ "A", "context", "manager", "which", "tries", "to", "give", "helpful", "warnings", "about", "missing", "gi", ".", "require_version", "()", "which", "could", "potentially", "break", "code", "if", "only", "an", "older", "version", "than", "expected", "is", "installed", "or", "a", "new", "version", "gets", "introduced", "." ]
python
train
33.810811
Erotemic/utool
utool/util_graph.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L1673-L1703
def bfs_multi_edges(G, source, reverse=False, keys=True, data=False): """Produce edges in a breadth-first-search starting at source. ----- Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py by D. Eppstein, July 2004. """ from collections import deque from functools import partial if reverse: G = G.reverse() edges_iter = partial(G.edges_iter, keys=keys, data=data) list(G.edges_iter('multitest', keys=True, data=True)) visited_nodes = set([source]) # visited_edges = set([]) queue = deque([(source, edges_iter(source))]) while queue: parent, edges = queue[0] try: edge = next(edges) edge_nodata = edge[0:3] # if edge_nodata not in visited_edges: yield edge # visited_edges.add(edge_nodata) child = edge_nodata[1] if child not in visited_nodes: visited_nodes.add(child) queue.append((child, edges_iter(child))) except StopIteration: queue.popleft()
[ "def", "bfs_multi_edges", "(", "G", ",", "source", ",", "reverse", "=", "False", ",", "keys", "=", "True", ",", "data", "=", "False", ")", ":", "from", "collections", "import", "deque", "from", "functools", "import", "partial", "if", "reverse", ":", "G", "=", "G", ".", "reverse", "(", ")", "edges_iter", "=", "partial", "(", "G", ".", "edges_iter", ",", "keys", "=", "keys", ",", "data", "=", "data", ")", "list", "(", "G", ".", "edges_iter", "(", "'multitest'", ",", "keys", "=", "True", ",", "data", "=", "True", ")", ")", "visited_nodes", "=", "set", "(", "[", "source", "]", ")", "# visited_edges = set([])", "queue", "=", "deque", "(", "[", "(", "source", ",", "edges_iter", "(", "source", ")", ")", "]", ")", "while", "queue", ":", "parent", ",", "edges", "=", "queue", "[", "0", "]", "try", ":", "edge", "=", "next", "(", "edges", ")", "edge_nodata", "=", "edge", "[", "0", ":", "3", "]", "# if edge_nodata not in visited_edges:", "yield", "edge", "# visited_edges.add(edge_nodata)", "child", "=", "edge_nodata", "[", "1", "]", "if", "child", "not", "in", "visited_nodes", ":", "visited_nodes", ".", "add", "(", "child", ")", "queue", ".", "append", "(", "(", "child", ",", "edges_iter", "(", "child", ")", ")", ")", "except", "StopIteration", ":", "queue", ".", "popleft", "(", ")" ]
Produce edges in a breadth-first-search starting at source. ----- Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py by D. Eppstein, July 2004.
[ "Produce", "edges", "in", "a", "breadth", "-", "first", "-", "search", "starting", "at", "source", ".", "-----", "Based", "on", "http", ":", "//", "www", ".", "ics", ".", "uci", ".", "edu", "/", "~eppstein", "/", "PADS", "/", "BFS", ".", "py", "by", "D", ".", "Eppstein", "July", "2004", "." ]
python
train
33.516129
minhhoit/yacms
yacms/project_template/fabfile.py
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/project_template/fabfile.py#L215-L222
def get_templates(): """ Returns each of the templates with env vars injected. """ injected = {} for name, data in templates.items(): injected[name] = dict([(k, v % env) for k, v in data.items()]) return injected
[ "def", "get_templates", "(", ")", ":", "injected", "=", "{", "}", "for", "name", ",", "data", "in", "templates", ".", "items", "(", ")", ":", "injected", "[", "name", "]", "=", "dict", "(", "[", "(", "k", ",", "v", "%", "env", ")", "for", "k", ",", "v", "in", "data", ".", "items", "(", ")", "]", ")", "return", "injected" ]
Returns each of the templates with env vars injected.
[ "Returns", "each", "of", "the", "templates", "with", "env", "vars", "injected", "." ]
python
train
29.625
sorgerlab/indra
indra/sources/ndex_cx/processor.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/ndex_cx/processor.py#L175-L204
def get_statements(self): """Convert network edges into Statements. Returns ------- list of Statements Converted INDRA Statements. """ edges = _get_dict_from_list('edges', self.cx) for edge in edges: edge_type = edge.get('i') if not edge_type: continue stmt_type = _stmt_map.get(edge_type) if stmt_type: id = edge['@id'] source_agent = self._node_agents.get(edge['s']) target_agent = self._node_agents.get(edge['t']) if not source_agent or not target_agent: logger.info("Skipping edge %s->%s: %s" % (self._node_names[edge['s']], self._node_names[edge['t']], edge)) continue ev = self._create_evidence(id) if stmt_type == Complex: stmt = stmt_type([source_agent, target_agent], evidence=ev) else: stmt = stmt_type(source_agent, target_agent, evidence=ev) self.statements.append(stmt) return self.statements
[ "def", "get_statements", "(", "self", ")", ":", "edges", "=", "_get_dict_from_list", "(", "'edges'", ",", "self", ".", "cx", ")", "for", "edge", "in", "edges", ":", "edge_type", "=", "edge", ".", "get", "(", "'i'", ")", "if", "not", "edge_type", ":", "continue", "stmt_type", "=", "_stmt_map", ".", "get", "(", "edge_type", ")", "if", "stmt_type", ":", "id", "=", "edge", "[", "'@id'", "]", "source_agent", "=", "self", ".", "_node_agents", ".", "get", "(", "edge", "[", "'s'", "]", ")", "target_agent", "=", "self", ".", "_node_agents", ".", "get", "(", "edge", "[", "'t'", "]", ")", "if", "not", "source_agent", "or", "not", "target_agent", ":", "logger", ".", "info", "(", "\"Skipping edge %s->%s: %s\"", "%", "(", "self", ".", "_node_names", "[", "edge", "[", "'s'", "]", "]", ",", "self", ".", "_node_names", "[", "edge", "[", "'t'", "]", "]", ",", "edge", ")", ")", "continue", "ev", "=", "self", ".", "_create_evidence", "(", "id", ")", "if", "stmt_type", "==", "Complex", ":", "stmt", "=", "stmt_type", "(", "[", "source_agent", ",", "target_agent", "]", ",", "evidence", "=", "ev", ")", "else", ":", "stmt", "=", "stmt_type", "(", "source_agent", ",", "target_agent", ",", "evidence", "=", "ev", ")", "self", ".", "statements", ".", "append", "(", "stmt", ")", "return", "self", ".", "statements" ]
Convert network edges into Statements. Returns ------- list of Statements Converted INDRA Statements.
[ "Convert", "network", "edges", "into", "Statements", "." ]
python
train
39.666667
google/mobly
mobly/controllers/android_device.py
https://github.com/google/mobly/blob/38ba2cf7d29a20e6a2fca1718eecb337df38db26/mobly/controllers/android_device.py#L101-L111
def destroy(ads): """Cleans up AndroidDevice objects. Args: ads: A list of AndroidDevice objects. """ for ad in ads: try: ad.services.stop_all() except: ad.log.exception('Failed to clean up properly.')
[ "def", "destroy", "(", "ads", ")", ":", "for", "ad", "in", "ads", ":", "try", ":", "ad", ".", "services", ".", "stop_all", "(", ")", "except", ":", "ad", ".", "log", ".", "exception", "(", "'Failed to clean up properly.'", ")" ]
Cleans up AndroidDevice objects. Args: ads: A list of AndroidDevice objects.
[ "Cleans", "up", "AndroidDevice", "objects", "." ]
python
train
23.272727
bokeh/bokeh
bokeh/themes/theme.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/themes/theme.py#L186-L200
def apply_to_model(self, model): ''' Apply this theme to a model. .. warning:: Typically, don't call this method directly. Instead, set the theme on the :class:`~bokeh.document.Document` the model is a part of. ''' model.apply_theme(self._for_class(model.__class__)) # a little paranoia because it would be Bad(tm) to mess # this up... would be nicer if python had a way to freeze # the dict. if len(_empty_dict) > 0: raise RuntimeError("Somebody put stuff in _empty_dict")
[ "def", "apply_to_model", "(", "self", ",", "model", ")", ":", "model", ".", "apply_theme", "(", "self", ".", "_for_class", "(", "model", ".", "__class__", ")", ")", "# a little paranoia because it would be Bad(tm) to mess", "# this up... would be nicer if python had a way to freeze", "# the dict.", "if", "len", "(", "_empty_dict", ")", ">", "0", ":", "raise", "RuntimeError", "(", "\"Somebody put stuff in _empty_dict\"", ")" ]
Apply this theme to a model. .. warning:: Typically, don't call this method directly. Instead, set the theme on the :class:`~bokeh.document.Document` the model is a part of.
[ "Apply", "this", "theme", "to", "a", "model", "." ]
python
train
37.466667
senaite/senaite.core
bika/lims/browser/dashboard/dashboard.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/dashboard/dashboard.py#L205-L228
def check_dashboard_cookie(self): """ Check if the dashboard cookie should exist through bikasetup configuration. If it should exist but doesn't exist yet, the function creates it with all values as default. If it should exist and already exists, it returns the value. Otherwise, the function returns None. :return: a dictionary of strings """ # Getting cookie cookie_raw = self.request.get(DASHBOARD_FILTER_COOKIE, None) # If it doesn't exist, create it with default values if cookie_raw is None: cookie_raw = self._create_raw_data() self.request.response.setCookie( DASHBOARD_FILTER_COOKIE, json.dumps(cookie_raw), quoted=False, path='/') return cookie_raw return get_strings(json.loads(cookie_raw))
[ "def", "check_dashboard_cookie", "(", "self", ")", ":", "# Getting cookie", "cookie_raw", "=", "self", ".", "request", ".", "get", "(", "DASHBOARD_FILTER_COOKIE", ",", "None", ")", "# If it doesn't exist, create it with default values", "if", "cookie_raw", "is", "None", ":", "cookie_raw", "=", "self", ".", "_create_raw_data", "(", ")", "self", ".", "request", ".", "response", ".", "setCookie", "(", "DASHBOARD_FILTER_COOKIE", ",", "json", ".", "dumps", "(", "cookie_raw", ")", ",", "quoted", "=", "False", ",", "path", "=", "'/'", ")", "return", "cookie_raw", "return", "get_strings", "(", "json", ".", "loads", "(", "cookie_raw", ")", ")" ]
Check if the dashboard cookie should exist through bikasetup configuration. If it should exist but doesn't exist yet, the function creates it with all values as default. If it should exist and already exists, it returns the value. Otherwise, the function returns None. :return: a dictionary of strings
[ "Check", "if", "the", "dashboard", "cookie", "should", "exist", "through", "bikasetup", "configuration", "." ]
python
train
37.166667
davidmogar/cucco
cucco/cucco.py
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/cucco.py#L84-L114
def _parse_stop_words_file(self, path): """Load stop words from the given path. Parse the stop words file, saving each word found in it in a set for the language of the file. This language is obtained from the file name. If the file doesn't exist, the method will have no effect. Args: path: Path to the stop words file. Returns: A boolean indicating whether the file was loaded. """ language = None loaded = False if os.path.isfile(path): self._logger.debug('Loading stop words in %s', path) language = path.split('-')[-1] if not language in self.__stop_words: self.__stop_words[language] = set() with codecs.open(path, 'r', 'UTF-8') as file: loaded = True for word in file: self.__stop_words[language].add(word.strip()) return loaded
[ "def", "_parse_stop_words_file", "(", "self", ",", "path", ")", ":", "language", "=", "None", "loaded", "=", "False", "if", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "self", ".", "_logger", ".", "debug", "(", "'Loading stop words in %s'", ",", "path", ")", "language", "=", "path", ".", "split", "(", "'-'", ")", "[", "-", "1", "]", "if", "not", "language", "in", "self", ".", "__stop_words", ":", "self", ".", "__stop_words", "[", "language", "]", "=", "set", "(", ")", "with", "codecs", ".", "open", "(", "path", ",", "'r'", ",", "'UTF-8'", ")", "as", "file", ":", "loaded", "=", "True", "for", "word", "in", "file", ":", "self", ".", "__stop_words", "[", "language", "]", ".", "add", "(", "word", ".", "strip", "(", ")", ")", "return", "loaded" ]
Load stop words from the given path. Parse the stop words file, saving each word found in it in a set for the language of the file. This language is obtained from the file name. If the file doesn't exist, the method will have no effect. Args: path: Path to the stop words file. Returns: A boolean indicating whether the file was loaded.
[ "Load", "stop", "words", "from", "the", "given", "path", "." ]
python
train
30.612903
tensorpack/tensorpack
examples/GAN/Image2Image.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/GAN/Image2Image.py#L149-L164
def split_input(img): """ img: an RGB image of shape (s, 2s, 3). :return: [input, output] """ # split the image into left + right pairs s = img.shape[0] assert img.shape[1] == 2 * s input, output = img[:, :s, :], img[:, s:, :] if args.mode == 'BtoA': input, output = output, input if IN_CH == 1: input = cv2.cvtColor(input, cv2.COLOR_RGB2GRAY)[:, :, np.newaxis] if OUT_CH == 1: output = cv2.cvtColor(output, cv2.COLOR_RGB2GRAY)[:, :, np.newaxis] return [input, output]
[ "def", "split_input", "(", "img", ")", ":", "# split the image into left + right pairs", "s", "=", "img", ".", "shape", "[", "0", "]", "assert", "img", ".", "shape", "[", "1", "]", "==", "2", "*", "s", "input", ",", "output", "=", "img", "[", ":", ",", ":", "s", ",", ":", "]", ",", "img", "[", ":", ",", "s", ":", ",", ":", "]", "if", "args", ".", "mode", "==", "'BtoA'", ":", "input", ",", "output", "=", "output", ",", "input", "if", "IN_CH", "==", "1", ":", "input", "=", "cv2", ".", "cvtColor", "(", "input", ",", "cv2", ".", "COLOR_RGB2GRAY", ")", "[", ":", ",", ":", ",", "np", ".", "newaxis", "]", "if", "OUT_CH", "==", "1", ":", "output", "=", "cv2", ".", "cvtColor", "(", "output", ",", "cv2", ".", "COLOR_RGB2GRAY", ")", "[", ":", ",", ":", ",", "np", ".", "newaxis", "]", "return", "[", "input", ",", "output", "]" ]
img: an RGB image of shape (s, 2s, 3). :return: [input, output]
[ "img", ":", "an", "RGB", "image", "of", "shape", "(", "s", "2s", "3", ")", ".", ":", "return", ":", "[", "input", "output", "]" ]
python
train
32.8125
libyal/dtfabric
dtfabric/runtime/data_maps.py
https://github.com/libyal/dtfabric/blob/0d2b5719fa257f6e5c661a406737ebcf8c8db266/dtfabric/runtime/data_maps.py#L1584-L1686
def _CompositeMapByteStream( self, byte_stream, byte_offset=0, context=None, **unused_kwargs): """Maps a sequence of composite data types on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. context (Optional[DataTypeMapContext]): data type map context. Returns: object: mapped value. Raises: MappingError: if the data type definition cannot be mapped on the byte stream. """ context_state = getattr(context, 'state', {}) attribute_index = context_state.get('attribute_index', 0) mapped_values = context_state.get('mapped_values', None) subcontext = context_state.get('context', None) if not mapped_values: mapped_values = self._structure_values_class() if not subcontext: subcontext = DataTypeMapContext(values={ type(mapped_values).__name__: mapped_values}) members_data_size = 0 for attribute_index in range(attribute_index, self._number_of_attributes): attribute_name = self._attribute_names[attribute_index] data_type_map = self._data_type_maps[attribute_index] member_definition = self._data_type_definition.members[attribute_index] condition = getattr(member_definition, 'condition', None) if condition: namespace = dict(subcontext.values) # Make sure __builtins__ contains an empty dictionary. namespace['__builtins__'] = {} try: condition_result = eval(condition, namespace) # pylint: disable=eval-used except Exception as exception: raise errors.MappingError( 'Unable to evaluate condition with error: {0!s}'.format( exception)) if not isinstance(condition_result, bool): raise errors.MappingError( 'Condition does not result in a boolean value') if not condition_result: continue if isinstance(member_definition, data_types.PaddingDefinition): _, byte_size = divmod( members_data_size, member_definition.alignment_size) if byte_size > 0: byte_size = member_definition.alignment_size - byte_size data_type_map.byte_size = byte_size try: value = data_type_map.MapByteStream( byte_stream, byte_offset=byte_offset, context=subcontext) setattr(mapped_values, attribute_name, value) except errors.ByteStreamTooSmallError as exception: context_state['attribute_index'] = attribute_index context_state['context'] = subcontext context_state['mapped_values'] = mapped_values raise errors.ByteStreamTooSmallError(exception) except Exception as exception: raise errors.MappingError(exception) supported_values = getattr(member_definition, 'values', None) if supported_values and value not in supported_values: raise errors.MappingError( 'Value: {0!s} not in supported values: {1:s}'.format( value, ', '.join([ '{0!s}'.format(value) for value in supported_values]))) byte_offset += subcontext.byte_size members_data_size += subcontext.byte_size if attribute_index != (self._number_of_attributes - 1): context_state['attribute_index'] = attribute_index context_state['context'] = subcontext context_state['mapped_values'] = mapped_values error_string = ( 'Unable to read: {0:s} from byte stream at offset: {1:d} ' 'with error: missing attribute: {2:d}').format( self._data_type_definition.name, byte_offset, attribute_index) raise errors.ByteStreamTooSmallError(error_string) if context: context.byte_size = members_data_size context.state = {} return mapped_values
[ "def", "_CompositeMapByteStream", "(", "self", ",", "byte_stream", ",", "byte_offset", "=", "0", ",", "context", "=", "None", ",", "*", "*", "unused_kwargs", ")", ":", "context_state", "=", "getattr", "(", "context", ",", "'state'", ",", "{", "}", ")", "attribute_index", "=", "context_state", ".", "get", "(", "'attribute_index'", ",", "0", ")", "mapped_values", "=", "context_state", ".", "get", "(", "'mapped_values'", ",", "None", ")", "subcontext", "=", "context_state", ".", "get", "(", "'context'", ",", "None", ")", "if", "not", "mapped_values", ":", "mapped_values", "=", "self", ".", "_structure_values_class", "(", ")", "if", "not", "subcontext", ":", "subcontext", "=", "DataTypeMapContext", "(", "values", "=", "{", "type", "(", "mapped_values", ")", ".", "__name__", ":", "mapped_values", "}", ")", "members_data_size", "=", "0", "for", "attribute_index", "in", "range", "(", "attribute_index", ",", "self", ".", "_number_of_attributes", ")", ":", "attribute_name", "=", "self", ".", "_attribute_names", "[", "attribute_index", "]", "data_type_map", "=", "self", ".", "_data_type_maps", "[", "attribute_index", "]", "member_definition", "=", "self", ".", "_data_type_definition", ".", "members", "[", "attribute_index", "]", "condition", "=", "getattr", "(", "member_definition", ",", "'condition'", ",", "None", ")", "if", "condition", ":", "namespace", "=", "dict", "(", "subcontext", ".", "values", ")", "# Make sure __builtins__ contains an empty dictionary.", "namespace", "[", "'__builtins__'", "]", "=", "{", "}", "try", ":", "condition_result", "=", "eval", "(", "condition", ",", "namespace", ")", "# pylint: disable=eval-used", "except", "Exception", "as", "exception", ":", "raise", "errors", ".", "MappingError", "(", "'Unable to evaluate condition with error: {0!s}'", ".", "format", "(", "exception", ")", ")", "if", "not", "isinstance", "(", "condition_result", ",", "bool", ")", ":", "raise", "errors", ".", "MappingError", "(", "'Condition does not result in a boolean value'", ")", "if", "not", "condition_result", ":", "continue", "if", "isinstance", "(", "member_definition", ",", "data_types", ".", "PaddingDefinition", ")", ":", "_", ",", "byte_size", "=", "divmod", "(", "members_data_size", ",", "member_definition", ".", "alignment_size", ")", "if", "byte_size", ">", "0", ":", "byte_size", "=", "member_definition", ".", "alignment_size", "-", "byte_size", "data_type_map", ".", "byte_size", "=", "byte_size", "try", ":", "value", "=", "data_type_map", ".", "MapByteStream", "(", "byte_stream", ",", "byte_offset", "=", "byte_offset", ",", "context", "=", "subcontext", ")", "setattr", "(", "mapped_values", ",", "attribute_name", ",", "value", ")", "except", "errors", ".", "ByteStreamTooSmallError", "as", "exception", ":", "context_state", "[", "'attribute_index'", "]", "=", "attribute_index", "context_state", "[", "'context'", "]", "=", "subcontext", "context_state", "[", "'mapped_values'", "]", "=", "mapped_values", "raise", "errors", ".", "ByteStreamTooSmallError", "(", "exception", ")", "except", "Exception", "as", "exception", ":", "raise", "errors", ".", "MappingError", "(", "exception", ")", "supported_values", "=", "getattr", "(", "member_definition", ",", "'values'", ",", "None", ")", "if", "supported_values", "and", "value", "not", "in", "supported_values", ":", "raise", "errors", ".", "MappingError", "(", "'Value: {0!s} not in supported values: {1:s}'", ".", "format", "(", "value", ",", "', '", ".", "join", "(", "[", "'{0!s}'", ".", "format", "(", "value", ")", "for", "value", "in", "supported_values", "]", ")", ")", ")", "byte_offset", "+=", "subcontext", ".", "byte_size", "members_data_size", "+=", "subcontext", ".", "byte_size", "if", "attribute_index", "!=", "(", "self", ".", "_number_of_attributes", "-", "1", ")", ":", "context_state", "[", "'attribute_index'", "]", "=", "attribute_index", "context_state", "[", "'context'", "]", "=", "subcontext", "context_state", "[", "'mapped_values'", "]", "=", "mapped_values", "error_string", "=", "(", "'Unable to read: {0:s} from byte stream at offset: {1:d} '", "'with error: missing attribute: {2:d}'", ")", ".", "format", "(", "self", ".", "_data_type_definition", ".", "name", ",", "byte_offset", ",", "attribute_index", ")", "raise", "errors", ".", "ByteStreamTooSmallError", "(", "error_string", ")", "if", "context", ":", "context", ".", "byte_size", "=", "members_data_size", "context", ".", "state", "=", "{", "}", "return", "mapped_values" ]
Maps a sequence of composite data types on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. context (Optional[DataTypeMapContext]): data type map context. Returns: object: mapped value. Raises: MappingError: if the data type definition cannot be mapped on the byte stream.
[ "Maps", "a", "sequence", "of", "composite", "data", "types", "on", "a", "byte", "stream", "." ]
python
train
36.495146
python-diamond/Diamond
src/collectors/smart/smart.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/smart/smart.py#L31-L43
def get_default_config(self): """ Returns default configuration options. """ config = super(SmartCollector, self).get_default_config() config.update({ 'path': 'smart', 'bin': 'smartctl', 'use_sudo': False, 'sudo_cmd': '/usr/bin/sudo', 'devices': '^disk[0-9]$|^sd[a-z]$|^hd[a-z]$', }) return config
[ "def", "get_default_config", "(", "self", ")", ":", "config", "=", "super", "(", "SmartCollector", ",", "self", ")", ".", "get_default_config", "(", ")", "config", ".", "update", "(", "{", "'path'", ":", "'smart'", ",", "'bin'", ":", "'smartctl'", ",", "'use_sudo'", ":", "False", ",", "'sudo_cmd'", ":", "'/usr/bin/sudo'", ",", "'devices'", ":", "'^disk[0-9]$|^sd[a-z]$|^hd[a-z]$'", ",", "}", ")", "return", "config" ]
Returns default configuration options.
[ "Returns", "default", "configuration", "options", "." ]
python
train
32.076923
razorpay/razorpay-python
razorpay/resources/token.py
https://github.com/razorpay/razorpay-python/blob/5bc63fd8452165a4b54556888492e555222c8afe/razorpay/resources/token.py#L24-L35
def all(self, customer_id, data={}, **kwargs): """" Get all tokens for given customer Id Args: customer_id : Customer Id for which tokens have to be fetched Returns: Token dicts for given cutomer Id """ url = "{}/{}/tokens".format(self.base_url, customer_id) return self.get_url(url, data, **kwargs)
[ "def", "all", "(", "self", ",", "customer_id", ",", "data", "=", "{", "}", ",", "*", "*", "kwargs", ")", ":", "url", "=", "\"{}/{}/tokens\"", ".", "format", "(", "self", ".", "base_url", ",", "customer_id", ")", "return", "self", ".", "get_url", "(", "url", ",", "data", ",", "*", "*", "kwargs", ")" ]
Get all tokens for given customer Id Args: customer_id : Customer Id for which tokens have to be fetched Returns: Token dicts for given cutomer Id
[ "Get", "all", "tokens", "for", "given", "customer", "Id" ]
python
train
30.833333
saltstack/salt
salt/modules/mine.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mine.py#L342-L362
def delete(fun): ''' Remove specific function contents of minion. Returns True on success. CLI Example: .. code-block:: bash salt '*' mine.delete 'network.interfaces' ''' if __opts__['file_client'] == 'local': data = __salt__['data.get']('mine_cache') if isinstance(data, dict) and fun in data: del data[fun] return __salt__['data.update']('mine_cache', data) load = { 'cmd': '_mine_delete', 'id': __opts__['id'], 'fun': fun, } return _mine_send(load, __opts__)
[ "def", "delete", "(", "fun", ")", ":", "if", "__opts__", "[", "'file_client'", "]", "==", "'local'", ":", "data", "=", "__salt__", "[", "'data.get'", "]", "(", "'mine_cache'", ")", "if", "isinstance", "(", "data", ",", "dict", ")", "and", "fun", "in", "data", ":", "del", "data", "[", "fun", "]", "return", "__salt__", "[", "'data.update'", "]", "(", "'mine_cache'", ",", "data", ")", "load", "=", "{", "'cmd'", ":", "'_mine_delete'", ",", "'id'", ":", "__opts__", "[", "'id'", "]", ",", "'fun'", ":", "fun", ",", "}", "return", "_mine_send", "(", "load", ",", "__opts__", ")" ]
Remove specific function contents of minion. Returns True on success. CLI Example: .. code-block:: bash salt '*' mine.delete 'network.interfaces'
[ "Remove", "specific", "function", "contents", "of", "minion", ".", "Returns", "True", "on", "success", "." ]
python
train
26.666667
delph-in/pydelphin
delphin/lib/pegre.py
https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/lib/pegre.py#L201-L210
def optional(e, default=Ignore): """ Create a PEG function to optionally match an expression. """ def match_optional(s, grm=None, pos=0): try: return e(s, grm, pos) except PegreError: return PegreResult(s, default, (pos, pos)) return match_optional
[ "def", "optional", "(", "e", ",", "default", "=", "Ignore", ")", ":", "def", "match_optional", "(", "s", ",", "grm", "=", "None", ",", "pos", "=", "0", ")", ":", "try", ":", "return", "e", "(", "s", ",", "grm", ",", "pos", ")", "except", "PegreError", ":", "return", "PegreResult", "(", "s", ",", "default", ",", "(", "pos", ",", "pos", ")", ")", "return", "match_optional" ]
Create a PEG function to optionally match an expression.
[ "Create", "a", "PEG", "function", "to", "optionally", "match", "an", "expression", "." ]
python
train
29.9
Sanji-IO/sanji
sanji/model/__init__.py
https://github.com/Sanji-IO/sanji/blob/5c54cc2772bdfeae3337f785de1957237b828b34/sanji/model/__init__.py#L118-L127
def removeAll(self): """Remove all objects Returns: len(int): affected rows """ before_len = len(self.model.db) self.model.db = [] if not self._batch.enable.is_set(): self.model.save_db() return before_len - len(self.model.db)
[ "def", "removeAll", "(", "self", ")", ":", "before_len", "=", "len", "(", "self", ".", "model", ".", "db", ")", "self", ".", "model", ".", "db", "=", "[", "]", "if", "not", "self", ".", "_batch", ".", "enable", ".", "is_set", "(", ")", ":", "self", ".", "model", ".", "save_db", "(", ")", "return", "before_len", "-", "len", "(", "self", ".", "model", ".", "db", ")" ]
Remove all objects Returns: len(int): affected rows
[ "Remove", "all", "objects", "Returns", ":", "len", "(", "int", ")", ":", "affected", "rows" ]
python
train
30.5
awacha/sastool
sastool/fitting/fitfunctions/sasbasic.py
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/sasbasic.py#L109-L130
def PorodGuinier(q, a, alpha, Rg): """Empirical Porod-Guinier scattering Inputs: ------- ``q``: independent variable ``a``: factor of the power-law branch ``alpha``: power-law exponent ``Rg``: radius of gyration Formula: -------- ``G * exp(-q^2*Rg^2/3)`` if ``q>q_sep`` and ``a*q^alpha`` otherwise. ``q_sep`` and ``G`` are determined from conditions of smoothness at the cross-over. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """ return PorodGuinierMulti(q, a, alpha, Rg)
[ "def", "PorodGuinier", "(", "q", ",", "a", ",", "alpha", ",", "Rg", ")", ":", "return", "PorodGuinierMulti", "(", "q", ",", "a", ",", "alpha", ",", "Rg", ")" ]
Empirical Porod-Guinier scattering Inputs: ------- ``q``: independent variable ``a``: factor of the power-law branch ``alpha``: power-law exponent ``Rg``: radius of gyration Formula: -------- ``G * exp(-q^2*Rg^2/3)`` if ``q>q_sep`` and ``a*q^alpha`` otherwise. ``q_sep`` and ``G`` are determined from conditions of smoothness at the cross-over. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719.
[ "Empirical", "Porod", "-", "Guinier", "scattering" ]
python
train
28.545455
dnanexus/dx-toolkit
src/python/dxpy/bindings/dxjob.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxjob.py#L233-L243
def remove_tags(self, tags, **kwargs): """ :param tags: Tags to remove from the job :type tags: list of strings Removes each of the specified tags from the job. Takes no action for tags that the job does not currently have. """ dxpy.api.job_remove_tags(self._dxid, {"tags": tags}, **kwargs)
[ "def", "remove_tags", "(", "self", ",", "tags", ",", "*", "*", "kwargs", ")", ":", "dxpy", ".", "api", ".", "job_remove_tags", "(", "self", ".", "_dxid", ",", "{", "\"tags\"", ":", "tags", "}", ",", "*", "*", "kwargs", ")" ]
:param tags: Tags to remove from the job :type tags: list of strings Removes each of the specified tags from the job. Takes no action for tags that the job does not currently have.
[ ":", "param", "tags", ":", "Tags", "to", "remove", "from", "the", "job", ":", "type", "tags", ":", "list", "of", "strings" ]
python
train
30.818182
andreikop/qutepart
qutepart/indenter/ruby.py
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/indenter/ruby.py#L77-L84
def _prevNonCommentBlock(self, block): """Return the closest non-empty line, ignoring comments (result <= line). Return -1 if the document """ block = self._prevNonEmptyBlock(block) while block.isValid() and self._isCommentBlock(block): block = self._prevNonEmptyBlock(block) return block
[ "def", "_prevNonCommentBlock", "(", "self", ",", "block", ")", ":", "block", "=", "self", ".", "_prevNonEmptyBlock", "(", "block", ")", "while", "block", ".", "isValid", "(", ")", "and", "self", ".", "_isCommentBlock", "(", "block", ")", ":", "block", "=", "self", ".", "_prevNonEmptyBlock", "(", "block", ")", "return", "block" ]
Return the closest non-empty line, ignoring comments (result <= line). Return -1 if the document
[ "Return", "the", "closest", "non", "-", "empty", "line", "ignoring", "comments", "(", "result", "<", "=", "line", ")", ".", "Return", "-", "1", "if", "the", "document" ]
python
train
42.625
rackerlabs/rackspace-python-neutronclient
neutronclient/v2_0/client.py
https://github.com/rackerlabs/rackspace-python-neutronclient/blob/5a5009a8fe078e3aa1d582176669f1b28ab26bef/neutronclient/v2_0/client.py#L983-L986
def retrieve_loadbalancer_status(self, loadbalancer, **_params): """Retrieves status for a certain load balancer.""" return self.get(self.lbaas_loadbalancer_path_status % (loadbalancer), params=_params)
[ "def", "retrieve_loadbalancer_status", "(", "self", ",", "loadbalancer", ",", "*", "*", "_params", ")", ":", "return", "self", ".", "get", "(", "self", ".", "lbaas_loadbalancer_path_status", "%", "(", "loadbalancer", ")", ",", "params", "=", "_params", ")" ]
Retrieves status for a certain load balancer.
[ "Retrieves", "status", "for", "a", "certain", "load", "balancer", "." ]
python
train
59.75
prompt-toolkit/ptpython
ptpython/repl.py
https://github.com/prompt-toolkit/ptpython/blob/b1bba26a491324cd65e0ef46c7b818c4b88fd993/ptpython/repl.py#L97-L166
def _execute(self, line): """ Evaluate the line and print the result. """ output = self.app.output # WORKAROUND: Due to a bug in Jedi, the current directory is removed # from sys.path. See: https://github.com/davidhalter/jedi/issues/1148 if '' not in sys.path: sys.path.insert(0, '') def compile_with_flags(code, mode): " Compile code with the right compiler flags. " return compile(code, '<stdin>', mode, flags=self.get_compiler_flags(), dont_inherit=True) if line.lstrip().startswith('\x1a'): # When the input starts with Ctrl-Z, quit the REPL. self.app.exit() elif line.lstrip().startswith('!'): # Run as shell command os.system(line[1:]) else: # Try eval first try: code = compile_with_flags(line, 'eval') result = eval(code, self.get_globals(), self.get_locals()) locals = self.get_locals() locals['_'] = locals['_%i' % self.current_statement_index] = result if result is not None: out_prompt = self.get_output_prompt() try: result_str = '%r\n' % (result, ) except UnicodeDecodeError: # In Python 2: `__repr__` should return a bytestring, # so to put it in a unicode context could raise an # exception that the 'ascii' codec can't decode certain # characters. Decode as utf-8 in that case. result_str = '%s\n' % repr(result).decode('utf-8') # Align every line to the first one. line_sep = '\n' + ' ' * fragment_list_width(out_prompt) result_str = line_sep.join(result_str.splitlines()) + '\n' # Write output tokens. if self.enable_syntax_highlighting: formatted_output = merge_formatted_text([ out_prompt, PygmentsTokens(list(_lex_python_result(result_str))), ]) else: formatted_output = FormattedText( out_prompt + [('', result_str)]) print_formatted_text( formatted_output, style=self._current_style, style_transformation=self.style_transformation, include_default_pygments_style=False) # If not a valid `eval` expression, run using `exec` instead. except SyntaxError: code = compile_with_flags(line, 'exec') six.exec_(code, self.get_globals(), self.get_locals()) output.flush()
[ "def", "_execute", "(", "self", ",", "line", ")", ":", "output", "=", "self", ".", "app", ".", "output", "# WORKAROUND: Due to a bug in Jedi, the current directory is removed", "# from sys.path. See: https://github.com/davidhalter/jedi/issues/1148", "if", "''", "not", "in", "sys", ".", "path", ":", "sys", ".", "path", ".", "insert", "(", "0", ",", "''", ")", "def", "compile_with_flags", "(", "code", ",", "mode", ")", ":", "\" Compile code with the right compiler flags. \"", "return", "compile", "(", "code", ",", "'<stdin>'", ",", "mode", ",", "flags", "=", "self", ".", "get_compiler_flags", "(", ")", ",", "dont_inherit", "=", "True", ")", "if", "line", ".", "lstrip", "(", ")", ".", "startswith", "(", "'\\x1a'", ")", ":", "# When the input starts with Ctrl-Z, quit the REPL.", "self", ".", "app", ".", "exit", "(", ")", "elif", "line", ".", "lstrip", "(", ")", ".", "startswith", "(", "'!'", ")", ":", "# Run as shell command", "os", ".", "system", "(", "line", "[", "1", ":", "]", ")", "else", ":", "# Try eval first", "try", ":", "code", "=", "compile_with_flags", "(", "line", ",", "'eval'", ")", "result", "=", "eval", "(", "code", ",", "self", ".", "get_globals", "(", ")", ",", "self", ".", "get_locals", "(", ")", ")", "locals", "=", "self", ".", "get_locals", "(", ")", "locals", "[", "'_'", "]", "=", "locals", "[", "'_%i'", "%", "self", ".", "current_statement_index", "]", "=", "result", "if", "result", "is", "not", "None", ":", "out_prompt", "=", "self", ".", "get_output_prompt", "(", ")", "try", ":", "result_str", "=", "'%r\\n'", "%", "(", "result", ",", ")", "except", "UnicodeDecodeError", ":", "# In Python 2: `__repr__` should return a bytestring,", "# so to put it in a unicode context could raise an", "# exception that the 'ascii' codec can't decode certain", "# characters. Decode as utf-8 in that case.", "result_str", "=", "'%s\\n'", "%", "repr", "(", "result", ")", ".", "decode", "(", "'utf-8'", ")", "# Align every line to the first one.", "line_sep", "=", "'\\n'", "+", "' '", "*", "fragment_list_width", "(", "out_prompt", ")", "result_str", "=", "line_sep", ".", "join", "(", "result_str", ".", "splitlines", "(", ")", ")", "+", "'\\n'", "# Write output tokens.", "if", "self", ".", "enable_syntax_highlighting", ":", "formatted_output", "=", "merge_formatted_text", "(", "[", "out_prompt", ",", "PygmentsTokens", "(", "list", "(", "_lex_python_result", "(", "result_str", ")", ")", ")", ",", "]", ")", "else", ":", "formatted_output", "=", "FormattedText", "(", "out_prompt", "+", "[", "(", "''", ",", "result_str", ")", "]", ")", "print_formatted_text", "(", "formatted_output", ",", "style", "=", "self", ".", "_current_style", ",", "style_transformation", "=", "self", ".", "style_transformation", ",", "include_default_pygments_style", "=", "False", ")", "# If not a valid `eval` expression, run using `exec` instead.", "except", "SyntaxError", ":", "code", "=", "compile_with_flags", "(", "line", ",", "'exec'", ")", "six", ".", "exec_", "(", "code", ",", "self", ".", "get_globals", "(", ")", ",", "self", ".", "get_locals", "(", ")", ")", "output", ".", "flush", "(", ")" ]
Evaluate the line and print the result.
[ "Evaluate", "the", "line", "and", "print", "the", "result", "." ]
python
train
41.457143
markovmodel/msmtools
msmtools/analysis/dense/pcca.py
https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/analysis/dense/pcca.py#L460-L492
def coarsegrain(P, n): """ Coarse-grains transition matrix P to n sets using PCCA Coarse-grains transition matrix P such that the dominant eigenvalues are preserved, using: ..math: \tilde{P} = M^T P M (M^T M)^{-1} See [2]_ for the derivation of this form from the coarse-graining method first derived in [1]_. References ---------- [1] S. Kube and M. Weber A coarse graining method for the identification of transition rates between molecular conformations. J. Chem. Phys. 126, 024103 (2007) [2] F. Noe, H. Wu, J.-H. Prinz and N. Plattner: Projected and hidden Markov models for calculating kinetics and metastable states of complex molecules J. Chem. Phys. 139, 184114 (2013) """ M = pcca(P, n) # coarse-grained transition matrix W = np.linalg.inv(np.dot(M.T, M)) A = np.dot(np.dot(M.T, P), M) P_coarse = np.dot(W, A) # symmetrize and renormalize to eliminate numerical errors from msmtools.analysis import stationary_distribution pi_coarse = np.dot(M.T, stationary_distribution(P)) X = np.dot(np.diag(pi_coarse), P_coarse) P_coarse = X / X.sum(axis=1)[:, None] return P_coarse
[ "def", "coarsegrain", "(", "P", ",", "n", ")", ":", "M", "=", "pcca", "(", "P", ",", "n", ")", "# coarse-grained transition matrix", "W", "=", "np", ".", "linalg", ".", "inv", "(", "np", ".", "dot", "(", "M", ".", "T", ",", "M", ")", ")", "A", "=", "np", ".", "dot", "(", "np", ".", "dot", "(", "M", ".", "T", ",", "P", ")", ",", "M", ")", "P_coarse", "=", "np", ".", "dot", "(", "W", ",", "A", ")", "# symmetrize and renormalize to eliminate numerical errors", "from", "msmtools", ".", "analysis", "import", "stationary_distribution", "pi_coarse", "=", "np", ".", "dot", "(", "M", ".", "T", ",", "stationary_distribution", "(", "P", ")", ")", "X", "=", "np", ".", "dot", "(", "np", ".", "diag", "(", "pi_coarse", ")", ",", "P_coarse", ")", "P_coarse", "=", "X", "/", "X", ".", "sum", "(", "axis", "=", "1", ")", "[", ":", ",", "None", "]", "return", "P_coarse" ]
Coarse-grains transition matrix P to n sets using PCCA Coarse-grains transition matrix P such that the dominant eigenvalues are preserved, using: ..math: \tilde{P} = M^T P M (M^T M)^{-1} See [2]_ for the derivation of this form from the coarse-graining method first derived in [1]_. References ---------- [1] S. Kube and M. Weber A coarse graining method for the identification of transition rates between molecular conformations. J. Chem. Phys. 126, 024103 (2007) [2] F. Noe, H. Wu, J.-H. Prinz and N. Plattner: Projected and hidden Markov models for calculating kinetics and metastable states of complex molecules J. Chem. Phys. 139, 184114 (2013)
[ "Coarse", "-", "grains", "transition", "matrix", "P", "to", "n", "sets", "using", "PCCA" ]
python
train
35.636364
hydraplatform/hydra-base
hydra_base/lib/scenario.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/scenario.py#L1062-L1074
def delete_resourcegroupitems(scenario_id, item_ids, **kwargs): """ Delete specified items in a group, in a scenario. """ user_id = int(kwargs.get('user_id')) #check the scenario exists _get_scenario(scenario_id, user_id) for item_id in item_ids: rgi = db.DBSession.query(ResourceGroupItem).\ filter(ResourceGroupItem.id==item_id).one() db.DBSession.delete(rgi) db.DBSession.flush()
[ "def", "delete_resourcegroupitems", "(", "scenario_id", ",", "item_ids", ",", "*", "*", "kwargs", ")", ":", "user_id", "=", "int", "(", "kwargs", ".", "get", "(", "'user_id'", ")", ")", "#check the scenario exists", "_get_scenario", "(", "scenario_id", ",", "user_id", ")", "for", "item_id", "in", "item_ids", ":", "rgi", "=", "db", ".", "DBSession", ".", "query", "(", "ResourceGroupItem", ")", ".", "filter", "(", "ResourceGroupItem", ".", "id", "==", "item_id", ")", ".", "one", "(", ")", "db", ".", "DBSession", ".", "delete", "(", "rgi", ")", "db", ".", "DBSession", ".", "flush", "(", ")" ]
Delete specified items in a group, in a scenario.
[ "Delete", "specified", "items", "in", "a", "group", "in", "a", "scenario", "." ]
python
train
33.769231
gem/oq-engine
openquake/commonlib/readinput.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/readinput.py#L1328-L1372
def get_input_files(oqparam, hazard=False): """ :param oqparam: an OqParam instance :param hazard: if True, consider only the hazard files :returns: input path names in a specific order """ fnames = [] # files entering in the checksum for key in oqparam.inputs: fname = oqparam.inputs[key] if hazard and key not in ('site_model', 'source_model_logic_tree', 'gsim_logic_tree', 'source'): continue # collect .hdf5 tables for the GSIMs, if any elif key == 'gsim_logic_tree': gsim_lt = get_gsim_lt(oqparam) for gsims in gsim_lt.values.values(): for gsim in gsims: table = getattr(gsim, 'GMPE_TABLE', None) if table: fnames.append(table) fnames.append(fname) elif key == 'source_model': # UCERF f = oqparam.inputs['source_model'] fnames.append(f) fname = nrml.read(f).sourceModel.UCERFSource['filename'] fnames.append(os.path.join(os.path.dirname(f), fname)) elif key == 'exposure': # fname is a list for exp in asset.Exposure.read_headers(fname): fnames.extend(exp.datafiles) fnames.extend(fname) elif isinstance(fname, dict): fnames.extend(fname.values()) elif isinstance(fname, list): for f in fname: if f == oqparam.input_dir: raise InvalidFile('%s there is an empty path in %s' % (oqparam.inputs['job_ini'], key)) fnames.extend(fname) elif key == 'source_model_logic_tree': for smpaths in logictree.collect_info(fname).smpaths.values(): fnames.extend(smpaths) fnames.append(fname) else: fnames.append(fname) return sorted(fnames)
[ "def", "get_input_files", "(", "oqparam", ",", "hazard", "=", "False", ")", ":", "fnames", "=", "[", "]", "# files entering in the checksum", "for", "key", "in", "oqparam", ".", "inputs", ":", "fname", "=", "oqparam", ".", "inputs", "[", "key", "]", "if", "hazard", "and", "key", "not", "in", "(", "'site_model'", ",", "'source_model_logic_tree'", ",", "'gsim_logic_tree'", ",", "'source'", ")", ":", "continue", "# collect .hdf5 tables for the GSIMs, if any", "elif", "key", "==", "'gsim_logic_tree'", ":", "gsim_lt", "=", "get_gsim_lt", "(", "oqparam", ")", "for", "gsims", "in", "gsim_lt", ".", "values", ".", "values", "(", ")", ":", "for", "gsim", "in", "gsims", ":", "table", "=", "getattr", "(", "gsim", ",", "'GMPE_TABLE'", ",", "None", ")", "if", "table", ":", "fnames", ".", "append", "(", "table", ")", "fnames", ".", "append", "(", "fname", ")", "elif", "key", "==", "'source_model'", ":", "# UCERF", "f", "=", "oqparam", ".", "inputs", "[", "'source_model'", "]", "fnames", ".", "append", "(", "f", ")", "fname", "=", "nrml", ".", "read", "(", "f", ")", ".", "sourceModel", ".", "UCERFSource", "[", "'filename'", "]", "fnames", ".", "append", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "f", ")", ",", "fname", ")", ")", "elif", "key", "==", "'exposure'", ":", "# fname is a list", "for", "exp", "in", "asset", ".", "Exposure", ".", "read_headers", "(", "fname", ")", ":", "fnames", ".", "extend", "(", "exp", ".", "datafiles", ")", "fnames", ".", "extend", "(", "fname", ")", "elif", "isinstance", "(", "fname", ",", "dict", ")", ":", "fnames", ".", "extend", "(", "fname", ".", "values", "(", ")", ")", "elif", "isinstance", "(", "fname", ",", "list", ")", ":", "for", "f", "in", "fname", ":", "if", "f", "==", "oqparam", ".", "input_dir", ":", "raise", "InvalidFile", "(", "'%s there is an empty path in %s'", "%", "(", "oqparam", ".", "inputs", "[", "'job_ini'", "]", ",", "key", ")", ")", "fnames", ".", "extend", "(", "fname", ")", "elif", "key", "==", "'source_model_logic_tree'", ":", "for", "smpaths", "in", "logictree", ".", "collect_info", "(", "fname", ")", ".", "smpaths", ".", "values", "(", ")", ":", "fnames", ".", "extend", "(", "smpaths", ")", "fnames", ".", "append", "(", "fname", ")", "else", ":", "fnames", ".", "append", "(", "fname", ")", "return", "sorted", "(", "fnames", ")" ]
:param oqparam: an OqParam instance :param hazard: if True, consider only the hazard files :returns: input path names in a specific order
[ ":", "param", "oqparam", ":", "an", "OqParam", "instance", ":", "param", "hazard", ":", "if", "True", "consider", "only", "the", "hazard", "files", ":", "returns", ":", "input", "path", "names", "in", "a", "specific", "order" ]
python
train
42.488889
Gjum/agarnet
agarnet/client.py
https://github.com/Gjum/agarnet/blob/63365ba32aa31c23a6d61438b556ceb8ed65631f/agarnet/client.py#L155-L200
def on_message(self, msg=None): """ Poll the websocket for a new packet. `Client.listen()` calls this. :param msg (string(byte array)): Optional. Parse the specified message instead of receiving a packet from the socket. """ if msg is None: try: msg = self.ws.recv() except Exception as e: self.subscriber.on_message_error( 'Error while receiving packet: %s' % str(e)) self.disconnect() return False if not msg: self.subscriber.on_message_error('Empty message received') return False buf = BufferStruct(msg) opcode = buf.pop_uint8() try: packet_name = packet_s2c[opcode] except KeyError: self.subscriber.on_message_error('Unknown packet %s' % opcode) return False if not self.ingame and packet_name in ingame_packets: self.subscriber.on_ingame() self.ingame = True parser = getattr(self, 'parse_%s' % packet_name) try: parser(buf) except BufferUnderflowError as e: msg = 'Parsing %s packet failed: %s' % (packet_name, e.args[0]) self.subscriber.on_message_error(msg) if len(buf.buffer) != 0: msg = 'Buffer not empty after parsing "%s" packet' % packet_name self.subscriber.on_message_error(msg) return packet_name
[ "def", "on_message", "(", "self", ",", "msg", "=", "None", ")", ":", "if", "msg", "is", "None", ":", "try", ":", "msg", "=", "self", ".", "ws", ".", "recv", "(", ")", "except", "Exception", "as", "e", ":", "self", ".", "subscriber", ".", "on_message_error", "(", "'Error while receiving packet: %s'", "%", "str", "(", "e", ")", ")", "self", ".", "disconnect", "(", ")", "return", "False", "if", "not", "msg", ":", "self", ".", "subscriber", ".", "on_message_error", "(", "'Empty message received'", ")", "return", "False", "buf", "=", "BufferStruct", "(", "msg", ")", "opcode", "=", "buf", ".", "pop_uint8", "(", ")", "try", ":", "packet_name", "=", "packet_s2c", "[", "opcode", "]", "except", "KeyError", ":", "self", ".", "subscriber", ".", "on_message_error", "(", "'Unknown packet %s'", "%", "opcode", ")", "return", "False", "if", "not", "self", ".", "ingame", "and", "packet_name", "in", "ingame_packets", ":", "self", ".", "subscriber", ".", "on_ingame", "(", ")", "self", ".", "ingame", "=", "True", "parser", "=", "getattr", "(", "self", ",", "'parse_%s'", "%", "packet_name", ")", "try", ":", "parser", "(", "buf", ")", "except", "BufferUnderflowError", "as", "e", ":", "msg", "=", "'Parsing %s packet failed: %s'", "%", "(", "packet_name", ",", "e", ".", "args", "[", "0", "]", ")", "self", ".", "subscriber", ".", "on_message_error", "(", "msg", ")", "if", "len", "(", "buf", ".", "buffer", ")", "!=", "0", ":", "msg", "=", "'Buffer not empty after parsing \"%s\" packet'", "%", "packet_name", "self", ".", "subscriber", ".", "on_message_error", "(", "msg", ")", "return", "packet_name" ]
Poll the websocket for a new packet. `Client.listen()` calls this. :param msg (string(byte array)): Optional. Parse the specified message instead of receiving a packet from the socket.
[ "Poll", "the", "websocket", "for", "a", "new", "packet", "." ]
python
train
32.086957
tornadoweb/tornado
tornado/httputil.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/httputil.py#L1016-L1029
def encode_username_password( username: Union[str, bytes], password: Union[str, bytes] ) -> bytes: """Encodes a username/password pair in the format used by HTTP auth. The return value is a byte string in the form ``username:password``. .. versionadded:: 5.1 """ if isinstance(username, unicode_type): username = unicodedata.normalize("NFC", username) if isinstance(password, unicode_type): password = unicodedata.normalize("NFC", password) return utf8(username) + b":" + utf8(password)
[ "def", "encode_username_password", "(", "username", ":", "Union", "[", "str", ",", "bytes", "]", ",", "password", ":", "Union", "[", "str", ",", "bytes", "]", ")", "->", "bytes", ":", "if", "isinstance", "(", "username", ",", "unicode_type", ")", ":", "username", "=", "unicodedata", ".", "normalize", "(", "\"NFC\"", ",", "username", ")", "if", "isinstance", "(", "password", ",", "unicode_type", ")", ":", "password", "=", "unicodedata", ".", "normalize", "(", "\"NFC\"", ",", "password", ")", "return", "utf8", "(", "username", ")", "+", "b\":\"", "+", "utf8", "(", "password", ")" ]
Encodes a username/password pair in the format used by HTTP auth. The return value is a byte string in the form ``username:password``. .. versionadded:: 5.1
[ "Encodes", "a", "username", "/", "password", "pair", "in", "the", "format", "used", "by", "HTTP", "auth", "." ]
python
train
37.357143
agoragames/haigha
haigha/connection.py
https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connection.py#L254-L274
def disconnect(self): ''' Disconnect from the current host, but do not update the closed state. After the transport is disconnected, the closed state will be True if this is called after a protocol shutdown, or False if the disconnect was in error. TODO: do we really need closed vs. connected states? this only adds complication and the whole reconnect process has been scrapped anyway. ''' self._connected = False if self._transport is not None: try: self._transport.disconnect() except Exception: self.logger.error( "Failed to disconnect from %s", self._host, exc_info=True) raise finally: self._transport = None
[ "def", "disconnect", "(", "self", ")", ":", "self", ".", "_connected", "=", "False", "if", "self", ".", "_transport", "is", "not", "None", ":", "try", ":", "self", ".", "_transport", ".", "disconnect", "(", ")", "except", "Exception", ":", "self", ".", "logger", ".", "error", "(", "\"Failed to disconnect from %s\"", ",", "self", ".", "_host", ",", "exc_info", "=", "True", ")", "raise", "finally", ":", "self", ".", "_transport", "=", "None" ]
Disconnect from the current host, but do not update the closed state. After the transport is disconnected, the closed state will be True if this is called after a protocol shutdown, or False if the disconnect was in error. TODO: do we really need closed vs. connected states? this only adds complication and the whole reconnect process has been scrapped anyway.
[ "Disconnect", "from", "the", "current", "host", "but", "do", "not", "update", "the", "closed", "state", ".", "After", "the", "transport", "is", "disconnected", "the", "closed", "state", "will", "be", "True", "if", "this", "is", "called", "after", "a", "protocol", "shutdown", "or", "False", "if", "the", "disconnect", "was", "in", "error", "." ]
python
train
37.952381
openstack/proliantutils
proliantutils/ilo/ris.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L1944-L1984
def get_default_bios_settings(self, only_allowed_settings=True): """Get default BIOS settings. :param: only_allowed_settings: True when only allowed BIOS settings are to be returned. If False, All the BIOS settings supported by iLO are returned. :return: a dictionary of default BIOS settings(factory settings). Depending on the 'only_allowed_settings', either only the allowed settings are returned or all the supported settings are returned. :raises: IloError, on an error from iLO. :raises: IloCommandNotSupportedError, if the command is not supported on the server. """ headers_bios, bios_uri, bios_settings = self._check_bios_resource() # Get the BaseConfig resource. try: base_config_uri = bios_settings['links']['BaseConfigs']['href'] except KeyError: msg = ("BaseConfigs resource not found. Couldn't apply the BIOS " "Settings.") raise exception.IloCommandNotSupportedError(msg) status, headers, config = self._rest_get(base_config_uri) if status != 200: msg = self._get_extended_error(config) raise exception.IloError(msg) for cfg in config['BaseConfigs']: default_settings = cfg.get('default') if default_settings: break else: msg = ("Default BIOS Settings not found in 'BaseConfigs' " "resource.") raise exception.IloCommandNotSupportedError(msg) if only_allowed_settings: return utils.apply_bios_properties_filter( default_settings, constants.SUPPORTED_BIOS_PROPERTIES) return default_settings
[ "def", "get_default_bios_settings", "(", "self", ",", "only_allowed_settings", "=", "True", ")", ":", "headers_bios", ",", "bios_uri", ",", "bios_settings", "=", "self", ".", "_check_bios_resource", "(", ")", "# Get the BaseConfig resource.", "try", ":", "base_config_uri", "=", "bios_settings", "[", "'links'", "]", "[", "'BaseConfigs'", "]", "[", "'href'", "]", "except", "KeyError", ":", "msg", "=", "(", "\"BaseConfigs resource not found. Couldn't apply the BIOS \"", "\"Settings.\"", ")", "raise", "exception", ".", "IloCommandNotSupportedError", "(", "msg", ")", "status", ",", "headers", ",", "config", "=", "self", ".", "_rest_get", "(", "base_config_uri", ")", "if", "status", "!=", "200", ":", "msg", "=", "self", ".", "_get_extended_error", "(", "config", ")", "raise", "exception", ".", "IloError", "(", "msg", ")", "for", "cfg", "in", "config", "[", "'BaseConfigs'", "]", ":", "default_settings", "=", "cfg", ".", "get", "(", "'default'", ")", "if", "default_settings", ":", "break", "else", ":", "msg", "=", "(", "\"Default BIOS Settings not found in 'BaseConfigs' \"", "\"resource.\"", ")", "raise", "exception", ".", "IloCommandNotSupportedError", "(", "msg", ")", "if", "only_allowed_settings", ":", "return", "utils", ".", "apply_bios_properties_filter", "(", "default_settings", ",", "constants", ".", "SUPPORTED_BIOS_PROPERTIES", ")", "return", "default_settings" ]
Get default BIOS settings. :param: only_allowed_settings: True when only allowed BIOS settings are to be returned. If False, All the BIOS settings supported by iLO are returned. :return: a dictionary of default BIOS settings(factory settings). Depending on the 'only_allowed_settings', either only the allowed settings are returned or all the supported settings are returned. :raises: IloError, on an error from iLO. :raises: IloCommandNotSupportedError, if the command is not supported on the server.
[ "Get", "default", "BIOS", "settings", "." ]
python
train
43.585366
aio-libs/aioredis
aioredis/commands/hash.py
https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/commands/hash.py#L22-L25
def hexists(self, key, field): """Determine if hash field exists.""" fut = self.execute(b'HEXISTS', key, field) return wait_convert(fut, bool)
[ "def", "hexists", "(", "self", ",", "key", ",", "field", ")", ":", "fut", "=", "self", ".", "execute", "(", "b'HEXISTS'", ",", "key", ",", "field", ")", "return", "wait_convert", "(", "fut", ",", "bool", ")" ]
Determine if hash field exists.
[ "Determine", "if", "hash", "field", "exists", "." ]
python
train
40.75
cackharot/suds-py3
suds/xsd/sxbase.py
https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/xsd/sxbase.py#L361-L380
def qualify(self): """ Convert attribute values, that are references to other objects, into I{qref}. Qualfied using default document namespace. Since many wsdls are written improperly: when the document does not define a default namespace, the schema target namespace is used to qualify references. """ defns = self.root.defaultNamespace() if Namespace.none(defns): defns = self.schema.tns for a in self.autoqualified(): ref = getattr(self, a) if ref is None: continue if isqref(ref): continue qref = qualify(ref, self.root, defns) log.debug('%s, convert %s="%s" to %s', self.id, a, ref, qref) setattr(self, a, qref)
[ "def", "qualify", "(", "self", ")", ":", "defns", "=", "self", ".", "root", ".", "defaultNamespace", "(", ")", "if", "Namespace", ".", "none", "(", "defns", ")", ":", "defns", "=", "self", ".", "schema", ".", "tns", "for", "a", "in", "self", ".", "autoqualified", "(", ")", ":", "ref", "=", "getattr", "(", "self", ",", "a", ")", "if", "ref", "is", "None", ":", "continue", "if", "isqref", "(", "ref", ")", ":", "continue", "qref", "=", "qualify", "(", "ref", ",", "self", ".", "root", ",", "defns", ")", "log", ".", "debug", "(", "'%s, convert %s=\"%s\" to %s'", ",", "self", ".", "id", ",", "a", ",", "ref", ",", "qref", ")", "setattr", "(", "self", ",", "a", ",", "qref", ")" ]
Convert attribute values, that are references to other objects, into I{qref}. Qualfied using default document namespace. Since many wsdls are written improperly: when the document does not define a default namespace, the schema target namespace is used to qualify references.
[ "Convert", "attribute", "values", "that", "are", "references", "to", "other", "objects", "into", "I", "{", "qref", "}", ".", "Qualfied", "using", "default", "document", "namespace", ".", "Since", "many", "wsdls", "are", "written", "improperly", ":", "when", "the", "document", "does", "not", "define", "a", "default", "namespace", "the", "schema", "target", "namespace", "is", "used", "to", "qualify", "references", "." ]
python
train
39.7
gwastro/pycbc
pycbc/population/rates_functions.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/population/rates_functions.py#L97-L141
def log_rho_bg(trigs, bins, counts): ''' Calculate the log of background fall-off Parameters ---------- trigs: array SNR values of all the triggers bins: string bins for histogrammed triggers path: string counts for histogrammed triggers Returns ------- array ''' trigs = np.atleast_1d(trigs) N = sum(counts) assert np.all(trigs >= np.min(bins)), \ 'Trigger SNR values cannot all be below the lowest bin limit!' # If there are any triggers that are louder than the max bin, put one # fictitious count in a bin that extends from the limits of the slide # triggers out to the loudest trigger. # If there is no counts for a foreground trigger put a fictitious count # in the background bin if np.any(trigs >= np.max(bins)): N = N + 1 #log_plimit = -np.log(N) - np.log(np.max(trigs) - bins[-1]) CHECK IT log_rhos = [] for t in trigs: if t >= np.max(bins): log_rhos.append(-log(N)-log(np.max(trigs) - bins[-1])) else: i = bisect.bisect(bins, t) - 1 if counts[i] == 0: counts[i] = 1 log_rhos.append(log(counts[i]) - log(bins[i+1] - bins[i]) - log(N)) return np.array(log_rhos)
[ "def", "log_rho_bg", "(", "trigs", ",", "bins", ",", "counts", ")", ":", "trigs", "=", "np", ".", "atleast_1d", "(", "trigs", ")", "N", "=", "sum", "(", "counts", ")", "assert", "np", ".", "all", "(", "trigs", ">=", "np", ".", "min", "(", "bins", ")", ")", ",", "'Trigger SNR values cannot all be below the lowest bin limit!'", "# If there are any triggers that are louder than the max bin, put one", "# fictitious count in a bin that extends from the limits of the slide", "# triggers out to the loudest trigger.", "# If there is no counts for a foreground trigger put a fictitious count", "# in the background bin", "if", "np", ".", "any", "(", "trigs", ">=", "np", ".", "max", "(", "bins", ")", ")", ":", "N", "=", "N", "+", "1", "#log_plimit = -np.log(N) - np.log(np.max(trigs) - bins[-1]) CHECK IT", "log_rhos", "=", "[", "]", "for", "t", "in", "trigs", ":", "if", "t", ">=", "np", ".", "max", "(", "bins", ")", ":", "log_rhos", ".", "append", "(", "-", "log", "(", "N", ")", "-", "log", "(", "np", ".", "max", "(", "trigs", ")", "-", "bins", "[", "-", "1", "]", ")", ")", "else", ":", "i", "=", "bisect", ".", "bisect", "(", "bins", ",", "t", ")", "-", "1", "if", "counts", "[", "i", "]", "==", "0", ":", "counts", "[", "i", "]", "=", "1", "log_rhos", ".", "append", "(", "log", "(", "counts", "[", "i", "]", ")", "-", "log", "(", "bins", "[", "i", "+", "1", "]", "-", "bins", "[", "i", "]", ")", "-", "log", "(", "N", ")", ")", "return", "np", ".", "array", "(", "log_rhos", ")" ]
Calculate the log of background fall-off Parameters ---------- trigs: array SNR values of all the triggers bins: string bins for histogrammed triggers path: string counts for histogrammed triggers Returns ------- array
[ "Calculate", "the", "log", "of", "background", "fall", "-", "off" ]
python
train
28.977778
ihmeuw/vivarium
src/vivarium/config_tree.py
https://github.com/ihmeuw/vivarium/blob/c5f5d50f775c8bf337d3aae1ff7c57c025a8e258/src/vivarium/config_tree.py#L294-L326
def _set_with_metadata(self, name, value, layer=None, source=None): """Set a value in the named layer with the given source. Parameters ---------- name : str The name of the value value The value to store layer : str, optional The name of the layer to store the value in. If none is supplied then the value will be stored in the outermost layer. source : str, optional The source to attribute the value to. Raises ------ TypeError if the ConfigTree is frozen """ if self._frozen: raise TypeError('Frozen ConfigTree does not support assignment') if isinstance(value, dict): if name not in self._children or not isinstance(self._children[name], ConfigTree): self._children[name] = ConfigTree(layers=list(self._layers)) self._children[name].update(value, layer, source) else: if name not in self._children or not isinstance(self._children[name], ConfigNode): self._children[name] = ConfigNode(list(self._layers)) child = self._children[name] child.set_value(value, layer, source)
[ "def", "_set_with_metadata", "(", "self", ",", "name", ",", "value", ",", "layer", "=", "None", ",", "source", "=", "None", ")", ":", "if", "self", ".", "_frozen", ":", "raise", "TypeError", "(", "'Frozen ConfigTree does not support assignment'", ")", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "if", "name", "not", "in", "self", ".", "_children", "or", "not", "isinstance", "(", "self", ".", "_children", "[", "name", "]", ",", "ConfigTree", ")", ":", "self", ".", "_children", "[", "name", "]", "=", "ConfigTree", "(", "layers", "=", "list", "(", "self", ".", "_layers", ")", ")", "self", ".", "_children", "[", "name", "]", ".", "update", "(", "value", ",", "layer", ",", "source", ")", "else", ":", "if", "name", "not", "in", "self", ".", "_children", "or", "not", "isinstance", "(", "self", ".", "_children", "[", "name", "]", ",", "ConfigNode", ")", ":", "self", ".", "_children", "[", "name", "]", "=", "ConfigNode", "(", "list", "(", "self", ".", "_layers", ")", ")", "child", "=", "self", ".", "_children", "[", "name", "]", "child", ".", "set_value", "(", "value", ",", "layer", ",", "source", ")" ]
Set a value in the named layer with the given source. Parameters ---------- name : str The name of the value value The value to store layer : str, optional The name of the layer to store the value in. If none is supplied then the value will be stored in the outermost layer. source : str, optional The source to attribute the value to. Raises ------ TypeError if the ConfigTree is frozen
[ "Set", "a", "value", "in", "the", "named", "layer", "with", "the", "given", "source", "." ]
python
train
37.454545
glormph/msstitch
src/app/readers/mzidplus.py
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/readers/mzidplus.py#L96-L102
def get_mzid_specfile_ids(mzidfn, namespace): """Returns mzid spectra data filenames and their IDs used in the mzIdentML file as a dict. Keys == IDs, values == fns""" sid_fn = {} for specdata in mzid_specdata_generator(mzidfn, namespace): sid_fn[specdata.attrib['id']] = specdata.attrib['name'] return sid_fn
[ "def", "get_mzid_specfile_ids", "(", "mzidfn", ",", "namespace", ")", ":", "sid_fn", "=", "{", "}", "for", "specdata", "in", "mzid_specdata_generator", "(", "mzidfn", ",", "namespace", ")", ":", "sid_fn", "[", "specdata", ".", "attrib", "[", "'id'", "]", "]", "=", "specdata", ".", "attrib", "[", "'name'", "]", "return", "sid_fn" ]
Returns mzid spectra data filenames and their IDs used in the mzIdentML file as a dict. Keys == IDs, values == fns
[ "Returns", "mzid", "spectra", "data", "filenames", "and", "their", "IDs", "used", "in", "the", "mzIdentML", "file", "as", "a", "dict", ".", "Keys", "==", "IDs", "values", "==", "fns" ]
python
train
47.142857
cloudant/python-cloudant
src/cloudant/client.py
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/client.py#L361-L375
def keys(self, remote=False): """ Returns the database names for this client. Default is to return only the locally cached database names, specify ``remote=True`` to make a remote request to include all databases. :param bool remote: Dictates whether the list of locally cached database names are returned or a remote request is made to include an up to date list of databases from the server. Defaults to False. :returns: List of database names """ if not remote: return list(super(CouchDB, self).keys()) return self.all_dbs()
[ "def", "keys", "(", "self", ",", "remote", "=", "False", ")", ":", "if", "not", "remote", ":", "return", "list", "(", "super", "(", "CouchDB", ",", "self", ")", ".", "keys", "(", ")", ")", "return", "self", ".", "all_dbs", "(", ")" ]
Returns the database names for this client. Default is to return only the locally cached database names, specify ``remote=True`` to make a remote request to include all databases. :param bool remote: Dictates whether the list of locally cached database names are returned or a remote request is made to include an up to date list of databases from the server. Defaults to False. :returns: List of database names
[ "Returns", "the", "database", "names", "for", "this", "client", ".", "Default", "is", "to", "return", "only", "the", "locally", "cached", "database", "names", "specify", "remote", "=", "True", "to", "make", "a", "remote", "request", "to", "include", "all", "databases", "." ]
python
train
41.6
nickmckay/LiPD-utilities
Matlab/bagit.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Matlab/bagit.py#L667-L706
def _parse_tags(tag_file): """Parses a tag file, according to RFC 2822. This includes line folding, permitting extra-long field values. See http://www.faqs.org/rfcs/rfc2822.html for more information. """ tag_name = None tag_value = None # Line folding is handled by yielding values only after we encounter # the start of a new tag, or if we pass the EOF. for num, line in enumerate(tag_file): # If byte-order mark ignore it for now. if num == 0: if line.startswith(BOM): line = line.lstrip(BOM) # Skip over any empty or blank lines. if len(line) == 0 or line.isspace(): continue elif line[0].isspace() and tag_value is not None: # folded line tag_value += line else: # Starting a new tag; yield the last one. if tag_name: yield (tag_name, tag_value.strip()) if ':' not in line: raise BagValidationError("invalid line '%s' in %s" % (line.strip(), os.path.basename(tag_file.name))) parts = line.strip().split(':', 1) tag_name = parts[0].strip() tag_value = parts[1] # Passed the EOF. All done after this. if tag_name: yield (tag_name, tag_value.strip())
[ "def", "_parse_tags", "(", "tag_file", ")", ":", "tag_name", "=", "None", "tag_value", "=", "None", "# Line folding is handled by yielding values only after we encounter", "# the start of a new tag, or if we pass the EOF.", "for", "num", ",", "line", "in", "enumerate", "(", "tag_file", ")", ":", "# If byte-order mark ignore it for now.", "if", "num", "==", "0", ":", "if", "line", ".", "startswith", "(", "BOM", ")", ":", "line", "=", "line", ".", "lstrip", "(", "BOM", ")", "# Skip over any empty or blank lines.", "if", "len", "(", "line", ")", "==", "0", "or", "line", ".", "isspace", "(", ")", ":", "continue", "elif", "line", "[", "0", "]", ".", "isspace", "(", ")", "and", "tag_value", "is", "not", "None", ":", "# folded line", "tag_value", "+=", "line", "else", ":", "# Starting a new tag; yield the last one.", "if", "tag_name", ":", "yield", "(", "tag_name", ",", "tag_value", ".", "strip", "(", ")", ")", "if", "':'", "not", "in", "line", ":", "raise", "BagValidationError", "(", "\"invalid line '%s' in %s\"", "%", "(", "line", ".", "strip", "(", ")", ",", "os", ".", "path", ".", "basename", "(", "tag_file", ".", "name", ")", ")", ")", "parts", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "':'", ",", "1", ")", "tag_name", "=", "parts", "[", "0", "]", ".", "strip", "(", ")", "tag_value", "=", "parts", "[", "1", "]", "# Passed the EOF. All done after this.", "if", "tag_name", ":", "yield", "(", "tag_name", ",", "tag_value", ".", "strip", "(", ")", ")" ]
Parses a tag file, according to RFC 2822. This includes line folding, permitting extra-long field values. See http://www.faqs.org/rfcs/rfc2822.html for more information.
[ "Parses", "a", "tag", "file", "according", "to", "RFC", "2822", ".", "This", "includes", "line", "folding", "permitting", "extra", "-", "long", "field", "values", "." ]
python
train
34.175
DataONEorg/d1_python
lib_common/src/d1_common/system_metadata.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/system_metadata.py#L199-L230
def are_equivalent_pyxb(a_pyxb, b_pyxb, ignore_timestamps=False): """Determine if SystemMetadata PyXB objects are semantically equivalent. Normalize then compare SystemMetadata PyXB objects for equivalency. Args: a_pyxb, b_pyxb : SystemMetadata PyXB objects to compare reset_timestamps: bool ``True``: Timestamps in the SystemMetadata are set to a standard value so that objects that are compared after normalization register as equivalent if only their timestamps differ. Returns: bool: **True** if SystemMetadata PyXB objects are semantically equivalent. Notes: The SystemMetadata is normalized by removing any redundant information and ordering all sections where there are no semantics associated with the order. The normalized SystemMetadata is intended to be semantically equivalent to the un-normalized one. """ normalize_in_place(a_pyxb, ignore_timestamps) normalize_in_place(b_pyxb, ignore_timestamps) a_xml = d1_common.xml.serialize_to_xml_str(a_pyxb) b_xml = d1_common.xml.serialize_to_xml_str(b_pyxb) are_equivalent = d1_common.xml.are_equivalent(a_xml, b_xml) if not are_equivalent: logger.debug('XML documents not equivalent:') logger.debug(d1_common.xml.format_diff_xml(a_xml, b_xml)) return are_equivalent
[ "def", "are_equivalent_pyxb", "(", "a_pyxb", ",", "b_pyxb", ",", "ignore_timestamps", "=", "False", ")", ":", "normalize_in_place", "(", "a_pyxb", ",", "ignore_timestamps", ")", "normalize_in_place", "(", "b_pyxb", ",", "ignore_timestamps", ")", "a_xml", "=", "d1_common", ".", "xml", ".", "serialize_to_xml_str", "(", "a_pyxb", ")", "b_xml", "=", "d1_common", ".", "xml", ".", "serialize_to_xml_str", "(", "b_pyxb", ")", "are_equivalent", "=", "d1_common", ".", "xml", ".", "are_equivalent", "(", "a_xml", ",", "b_xml", ")", "if", "not", "are_equivalent", ":", "logger", ".", "debug", "(", "'XML documents not equivalent:'", ")", "logger", ".", "debug", "(", "d1_common", ".", "xml", ".", "format_diff_xml", "(", "a_xml", ",", "b_xml", ")", ")", "return", "are_equivalent" ]
Determine if SystemMetadata PyXB objects are semantically equivalent. Normalize then compare SystemMetadata PyXB objects for equivalency. Args: a_pyxb, b_pyxb : SystemMetadata PyXB objects to compare reset_timestamps: bool ``True``: Timestamps in the SystemMetadata are set to a standard value so that objects that are compared after normalization register as equivalent if only their timestamps differ. Returns: bool: **True** if SystemMetadata PyXB objects are semantically equivalent. Notes: The SystemMetadata is normalized by removing any redundant information and ordering all sections where there are no semantics associated with the order. The normalized SystemMetadata is intended to be semantically equivalent to the un-normalized one.
[ "Determine", "if", "SystemMetadata", "PyXB", "objects", "are", "semantically", "equivalent", "." ]
python
train
41.5625
mikedh/trimesh
trimesh/util.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/util.py#L800-L820
def md5_object(obj): """ If an object is hashable, return the string of the MD5. Parameters ----------- obj: object Returns ---------- md5: str, MD5 hash """ hasher = hashlib.md5() if isinstance(obj, basestring) and PY3: # in python3 convert strings to bytes before hashing hasher.update(obj.encode('utf-8')) else: hasher.update(obj) md5 = hasher.hexdigest() return md5
[ "def", "md5_object", "(", "obj", ")", ":", "hasher", "=", "hashlib", ".", "md5", "(", ")", "if", "isinstance", "(", "obj", ",", "basestring", ")", "and", "PY3", ":", "# in python3 convert strings to bytes before hashing", "hasher", ".", "update", "(", "obj", ".", "encode", "(", "'utf-8'", ")", ")", "else", ":", "hasher", ".", "update", "(", "obj", ")", "md5", "=", "hasher", ".", "hexdigest", "(", ")", "return", "md5" ]
If an object is hashable, return the string of the MD5. Parameters ----------- obj: object Returns ---------- md5: str, MD5 hash
[ "If", "an", "object", "is", "hashable", "return", "the", "string", "of", "the", "MD5", "." ]
python
train
20.571429
tanghaibao/goatools
goatools/godag/obo_optional_attributes.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/godag/obo_optional_attributes.py#L62-L81
def _get_synonym(self, line): """Given line, return optional attribute synonym value in a namedtuple. Example synonym and its storage in a namedtuple: synonym: "The other white meat" EXACT MARKETING_SLOGAN [MEAT:00324, BACONBASE:03021] text: "The other white meat" scope: EXACT typename: MARKETING_SLOGAN dbxrefs: set(["MEAT:00324", "BACONBASE:03021"]) Example synonyms: "peptidase inhibitor complex" EXACT [GOC:bf, GOC:pr] "regulation of postsynaptic cytosolic calcium levels" EXACT syngo_official_label [] "tocopherol 13-hydroxylase activity" EXACT systematic_synonym [] """ mtch = self.attr2cmp['synonym'].match(line) text, scope, typename, dbxrefs, _ = mtch.groups() typename = typename.strip() dbxrefs = set(dbxrefs.split(', ')) if dbxrefs else set() return self.attr2cmp['synonym nt']._make([text, scope, typename, dbxrefs])
[ "def", "_get_synonym", "(", "self", ",", "line", ")", ":", "mtch", "=", "self", ".", "attr2cmp", "[", "'synonym'", "]", ".", "match", "(", "line", ")", "text", ",", "scope", ",", "typename", ",", "dbxrefs", ",", "_", "=", "mtch", ".", "groups", "(", ")", "typename", "=", "typename", ".", "strip", "(", ")", "dbxrefs", "=", "set", "(", "dbxrefs", ".", "split", "(", "', '", ")", ")", "if", "dbxrefs", "else", "set", "(", ")", "return", "self", ".", "attr2cmp", "[", "'synonym nt'", "]", ".", "_make", "(", "[", "text", ",", "scope", ",", "typename", ",", "dbxrefs", "]", ")" ]
Given line, return optional attribute synonym value in a namedtuple. Example synonym and its storage in a namedtuple: synonym: "The other white meat" EXACT MARKETING_SLOGAN [MEAT:00324, BACONBASE:03021] text: "The other white meat" scope: EXACT typename: MARKETING_SLOGAN dbxrefs: set(["MEAT:00324", "BACONBASE:03021"]) Example synonyms: "peptidase inhibitor complex" EXACT [GOC:bf, GOC:pr] "regulation of postsynaptic cytosolic calcium levels" EXACT syngo_official_label [] "tocopherol 13-hydroxylase activity" EXACT systematic_synonym []
[ "Given", "line", "return", "optional", "attribute", "synonym", "value", "in", "a", "namedtuple", "." ]
python
train
48.55
dpkp/kafka-python
kafka/client.py
https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/client.py#L599-L647
def send_produce_request(self, payloads=(), acks=1, timeout=1000, fail_on_error=True, callback=None): """ Encode and send some ProduceRequests ProduceRequests will be grouped by (topic, partition) and then sent to a specific broker. Output is a list of responses in the same order as the list of payloads specified Arguments: payloads (list of ProduceRequest): produce requests to send to kafka ProduceRequest payloads must not contain duplicates for any topic-partition. acks (int, optional): how many acks the servers should receive from replica brokers before responding to the request. If it is 0, the server will not send any response. If it is 1, the server will wait until the data is written to the local log before sending a response. If it is -1, the server will wait until the message is committed by all in-sync replicas before sending a response. For any value > 1, the server will wait for this number of acks to occur (but the server will never wait for more acknowledgements than there are in-sync replicas). defaults to 1. timeout (int, optional): maximum time in milliseconds the server can await the receipt of the number of acks, defaults to 1000. fail_on_error (bool, optional): raise exceptions on connection and server response errors, defaults to True. callback (function, optional): instead of returning the ProduceResponse, first pass it through this function, defaults to None. Returns: list of ProduceResponses, or callback results if supplied, in the order of input payloads """ encoder = functools.partial( KafkaProtocol.encode_produce_request, acks=acks, timeout=timeout) if acks == 0: decoder = None else: decoder = KafkaProtocol.decode_produce_response resps = self._send_broker_aware_request(payloads, encoder, decoder) return [resp if not callback else callback(resp) for resp in resps if resp is not None and (not fail_on_error or not self._raise_on_response_error(resp))]
[ "def", "send_produce_request", "(", "self", ",", "payloads", "=", "(", ")", ",", "acks", "=", "1", ",", "timeout", "=", "1000", ",", "fail_on_error", "=", "True", ",", "callback", "=", "None", ")", ":", "encoder", "=", "functools", ".", "partial", "(", "KafkaProtocol", ".", "encode_produce_request", ",", "acks", "=", "acks", ",", "timeout", "=", "timeout", ")", "if", "acks", "==", "0", ":", "decoder", "=", "None", "else", ":", "decoder", "=", "KafkaProtocol", ".", "decode_produce_response", "resps", "=", "self", ".", "_send_broker_aware_request", "(", "payloads", ",", "encoder", ",", "decoder", ")", "return", "[", "resp", "if", "not", "callback", "else", "callback", "(", "resp", ")", "for", "resp", "in", "resps", "if", "resp", "is", "not", "None", "and", "(", "not", "fail_on_error", "or", "not", "self", ".", "_raise_on_response_error", "(", "resp", ")", ")", "]" ]
Encode and send some ProduceRequests ProduceRequests will be grouped by (topic, partition) and then sent to a specific broker. Output is a list of responses in the same order as the list of payloads specified Arguments: payloads (list of ProduceRequest): produce requests to send to kafka ProduceRequest payloads must not contain duplicates for any topic-partition. acks (int, optional): how many acks the servers should receive from replica brokers before responding to the request. If it is 0, the server will not send any response. If it is 1, the server will wait until the data is written to the local log before sending a response. If it is -1, the server will wait until the message is committed by all in-sync replicas before sending a response. For any value > 1, the server will wait for this number of acks to occur (but the server will never wait for more acknowledgements than there are in-sync replicas). defaults to 1. timeout (int, optional): maximum time in milliseconds the server can await the receipt of the number of acks, defaults to 1000. fail_on_error (bool, optional): raise exceptions on connection and server response errors, defaults to True. callback (function, optional): instead of returning the ProduceResponse, first pass it through this function, defaults to None. Returns: list of ProduceResponses, or callback results if supplied, in the order of input payloads
[ "Encode", "and", "send", "some", "ProduceRequests" ]
python
train
48.673469
phoebe-project/phoebe2
phoebe/parameters/parameters.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/parameters/parameters.py#L1715-L1721
def set_quantity(self, twig=None, value=None, **kwargs): """ TODO: add documentation """ # TODO: handle twig having parameter key (value@, default_unit@, adjust@, etc) # TODO: does this return anything (update the docstring)? return self.get_parameter(twig=twig, **kwargs).set_quantity(value=value, **kwargs)
[ "def", "set_quantity", "(", "self", ",", "twig", "=", "None", ",", "value", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# TODO: handle twig having parameter key (value@, default_unit@, adjust@, etc)", "# TODO: does this return anything (update the docstring)?", "return", "self", ".", "get_parameter", "(", "twig", "=", "twig", ",", "*", "*", "kwargs", ")", ".", "set_quantity", "(", "value", "=", "value", ",", "*", "*", "kwargs", ")" ]
TODO: add documentation
[ "TODO", ":", "add", "documentation" ]
python
train
50
DEIB-GECO/PyGMQL
gmql/dataset/GMQLDataset.py
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/GMQLDataset.py#L1227-L1343
def group(self, meta=None, meta_aggregates=None, regs=None, regs_aggregates=None, meta_group_name="_group"): """ *Wrapper of* ``GROUP`` The GROUP operator is used for grouping both regions and/or metadata of input dataset samples according to distinct values of certain attributes (known as grouping attributes); new grouping attributes are added to samples in the output dataset, storing the results of aggregate function evaluations over metadata and/or regions in each group of samples. Samples having missing values for any of the grouping attributes are discarded. :param meta: (optional) a list of metadata attributes :param meta_aggregates: (optional) {'new_attr': fun} :param regs: (optional) a list of region fields :param regs_aggregates: {'new_attr': fun} :param meta_group_name: (optional) the name to give to the group attribute in the metadata :return: a new GMQLDataset Example of usage. We group samples by `antibody` and we aggregate the region pvalues taking the maximum value calling the new region field `maxPvalue`:: import gmql as gl d1 = gl.get_example_dataset("Example_Dataset_1") result = d1.group(meta=['antibody'], regs_aggregates={'maxPvalue': gl.MAX("pvalue")}) """ if isinstance(meta, list) and \ all([isinstance(x, str) for x in meta]): meta = Some(meta) elif meta is None: meta = none() else: raise TypeError("meta must be a list of strings. " "{} was provided".format(type(meta))) expBuild = self.pmg.getNewExpressionBuilder(self.__index) if isinstance(meta_aggregates, dict): metaAggregates = [] for k in meta_aggregates: if isinstance(k, str): item = meta_aggregates[k] if isinstance(item, (SUM, MIN, MAX, AVG, BAG, BAGD, STD, MEDIAN, COUNTSAMP)): functionName = item.get_aggregate_name() argument = item.get_argument() if argument is None: argument = none() else: argument = Some(argument) metaAggregates.append(expBuild.createMetaAggregateFunction(functionName, k, argument)) else: raise TypeError("the item of the dictionary must be an Aggregate of the following: " "SUM, MIN, MAX, AVG, BAG, BAGD, STD, COUNTSAMP. " "{} was provided".format(type(item))) else: raise TypeError("keys of meta_aggregates must be string. " "{} was provided".format(type(k))) metaAggregates = Some(metaAggregates) elif meta_aggregates is None: metaAggregates = none() else: raise TypeError("meta_aggregates must be a dictionary of Aggregate functions. " "{} was provided".format(type(meta_aggregates))) if isinstance(regs, list) and \ all([isinstance(x, str) for x in regs]): regs = Some(regs) elif regs is None: regs = none() else: raise TypeError("regs must be a list of strings. " "{} was provided".format(type(regs))) if isinstance(regs_aggregates, dict): regionAggregates = [] for k in regs_aggregates.keys(): if isinstance(k, str): item = regs_aggregates[k] if isinstance(item, (SUM, MIN, MAX, AVG, BAG, BAGD, MEDIAN, COUNT)): op_name = item.get_aggregate_name() op_argument = item.get_argument() if op_argument is None: op_argument = none() else: op_argument = Some(op_argument) regsToReg = expBuild.getRegionsToRegion(op_name, k, op_argument) regionAggregates.append(regsToReg) else: raise TypeError("the item of the dictionary must be an Aggregate of the following: " "SUM, MIN, MAX, AVG, BAG, BAGD, MEDIAN, COUNT. " "{} was provided".format(type(item))) else: raise TypeError("The key of new_reg_fields must be a string. " "{} was provided".format(type(k))) regionAggregates = Some(regionAggregates) elif regs_aggregates is None: regionAggregates = none() else: raise TypeError("new_reg_fields must be a list of dictionary. " "{} was provided".format(type(regs_aggregates))) if isinstance(meta_group_name, str): pass else: raise TypeError("meta_group_name must be a string. " "{} was provided".format(type(meta_group_name))) new_index = self.opmng.group(self.__index, meta, metaAggregates, meta_group_name, regs, regionAggregates) return GMQLDataset(index=new_index, location=self.location, local_sources=self._local_sources, remote_sources=self._remote_sources, meta_profile=self.meta_profile)
[ "def", "group", "(", "self", ",", "meta", "=", "None", ",", "meta_aggregates", "=", "None", ",", "regs", "=", "None", ",", "regs_aggregates", "=", "None", ",", "meta_group_name", "=", "\"_group\"", ")", ":", "if", "isinstance", "(", "meta", ",", "list", ")", "and", "all", "(", "[", "isinstance", "(", "x", ",", "str", ")", "for", "x", "in", "meta", "]", ")", ":", "meta", "=", "Some", "(", "meta", ")", "elif", "meta", "is", "None", ":", "meta", "=", "none", "(", ")", "else", ":", "raise", "TypeError", "(", "\"meta must be a list of strings. \"", "\"{} was provided\"", ".", "format", "(", "type", "(", "meta", ")", ")", ")", "expBuild", "=", "self", ".", "pmg", ".", "getNewExpressionBuilder", "(", "self", ".", "__index", ")", "if", "isinstance", "(", "meta_aggregates", ",", "dict", ")", ":", "metaAggregates", "=", "[", "]", "for", "k", "in", "meta_aggregates", ":", "if", "isinstance", "(", "k", ",", "str", ")", ":", "item", "=", "meta_aggregates", "[", "k", "]", "if", "isinstance", "(", "item", ",", "(", "SUM", ",", "MIN", ",", "MAX", ",", "AVG", ",", "BAG", ",", "BAGD", ",", "STD", ",", "MEDIAN", ",", "COUNTSAMP", ")", ")", ":", "functionName", "=", "item", ".", "get_aggregate_name", "(", ")", "argument", "=", "item", ".", "get_argument", "(", ")", "if", "argument", "is", "None", ":", "argument", "=", "none", "(", ")", "else", ":", "argument", "=", "Some", "(", "argument", ")", "metaAggregates", ".", "append", "(", "expBuild", ".", "createMetaAggregateFunction", "(", "functionName", ",", "k", ",", "argument", ")", ")", "else", ":", "raise", "TypeError", "(", "\"the item of the dictionary must be an Aggregate of the following: \"", "\"SUM, MIN, MAX, AVG, BAG, BAGD, STD, COUNTSAMP. \"", "\"{} was provided\"", ".", "format", "(", "type", "(", "item", ")", ")", ")", "else", ":", "raise", "TypeError", "(", "\"keys of meta_aggregates must be string. \"", "\"{} was provided\"", ".", "format", "(", "type", "(", "k", ")", ")", ")", "metaAggregates", "=", "Some", "(", "metaAggregates", ")", "elif", "meta_aggregates", "is", "None", ":", "metaAggregates", "=", "none", "(", ")", "else", ":", "raise", "TypeError", "(", "\"meta_aggregates must be a dictionary of Aggregate functions. \"", "\"{} was provided\"", ".", "format", "(", "type", "(", "meta_aggregates", ")", ")", ")", "if", "isinstance", "(", "regs", ",", "list", ")", "and", "all", "(", "[", "isinstance", "(", "x", ",", "str", ")", "for", "x", "in", "regs", "]", ")", ":", "regs", "=", "Some", "(", "regs", ")", "elif", "regs", "is", "None", ":", "regs", "=", "none", "(", ")", "else", ":", "raise", "TypeError", "(", "\"regs must be a list of strings. \"", "\"{} was provided\"", ".", "format", "(", "type", "(", "regs", ")", ")", ")", "if", "isinstance", "(", "regs_aggregates", ",", "dict", ")", ":", "regionAggregates", "=", "[", "]", "for", "k", "in", "regs_aggregates", ".", "keys", "(", ")", ":", "if", "isinstance", "(", "k", ",", "str", ")", ":", "item", "=", "regs_aggregates", "[", "k", "]", "if", "isinstance", "(", "item", ",", "(", "SUM", ",", "MIN", ",", "MAX", ",", "AVG", ",", "BAG", ",", "BAGD", ",", "MEDIAN", ",", "COUNT", ")", ")", ":", "op_name", "=", "item", ".", "get_aggregate_name", "(", ")", "op_argument", "=", "item", ".", "get_argument", "(", ")", "if", "op_argument", "is", "None", ":", "op_argument", "=", "none", "(", ")", "else", ":", "op_argument", "=", "Some", "(", "op_argument", ")", "regsToReg", "=", "expBuild", ".", "getRegionsToRegion", "(", "op_name", ",", "k", ",", "op_argument", ")", "regionAggregates", ".", "append", "(", "regsToReg", ")", "else", ":", "raise", "TypeError", "(", "\"the item of the dictionary must be an Aggregate of the following: \"", "\"SUM, MIN, MAX, AVG, BAG, BAGD, MEDIAN, COUNT. \"", "\"{} was provided\"", ".", "format", "(", "type", "(", "item", ")", ")", ")", "else", ":", "raise", "TypeError", "(", "\"The key of new_reg_fields must be a string. \"", "\"{} was provided\"", ".", "format", "(", "type", "(", "k", ")", ")", ")", "regionAggregates", "=", "Some", "(", "regionAggregates", ")", "elif", "regs_aggregates", "is", "None", ":", "regionAggregates", "=", "none", "(", ")", "else", ":", "raise", "TypeError", "(", "\"new_reg_fields must be a list of dictionary. \"", "\"{} was provided\"", ".", "format", "(", "type", "(", "regs_aggregates", ")", ")", ")", "if", "isinstance", "(", "meta_group_name", ",", "str", ")", ":", "pass", "else", ":", "raise", "TypeError", "(", "\"meta_group_name must be a string. \"", "\"{} was provided\"", ".", "format", "(", "type", "(", "meta_group_name", ")", ")", ")", "new_index", "=", "self", ".", "opmng", ".", "group", "(", "self", ".", "__index", ",", "meta", ",", "metaAggregates", ",", "meta_group_name", ",", "regs", ",", "regionAggregates", ")", "return", "GMQLDataset", "(", "index", "=", "new_index", ",", "location", "=", "self", ".", "location", ",", "local_sources", "=", "self", ".", "_local_sources", ",", "remote_sources", "=", "self", ".", "_remote_sources", ",", "meta_profile", "=", "self", ".", "meta_profile", ")" ]
*Wrapper of* ``GROUP`` The GROUP operator is used for grouping both regions and/or metadata of input dataset samples according to distinct values of certain attributes (known as grouping attributes); new grouping attributes are added to samples in the output dataset, storing the results of aggregate function evaluations over metadata and/or regions in each group of samples. Samples having missing values for any of the grouping attributes are discarded. :param meta: (optional) a list of metadata attributes :param meta_aggregates: (optional) {'new_attr': fun} :param regs: (optional) a list of region fields :param regs_aggregates: {'new_attr': fun} :param meta_group_name: (optional) the name to give to the group attribute in the metadata :return: a new GMQLDataset Example of usage. We group samples by `antibody` and we aggregate the region pvalues taking the maximum value calling the new region field `maxPvalue`:: import gmql as gl d1 = gl.get_example_dataset("Example_Dataset_1") result = d1.group(meta=['antibody'], regs_aggregates={'maxPvalue': gl.MAX("pvalue")})
[ "*", "Wrapper", "of", "*", "GROUP" ]
python
train
49.213675
mlperf/training
rnn_translator/pytorch/seq2seq/data/tokenizer.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/rnn_translator/pytorch/seq2seq/data/tokenizer.py#L88-L105
def detokenize(self, inputs, delim=' '): """ Detokenizes single sentence and removes token separator characters. :param inputs: sequence of tokens :param delim: tokenization delimiter returns: string representing detokenized sentence """ detok = delim.join([self.idx2tok[idx] for idx in inputs]) detok = detok.replace(self.separator + ' ', '') detok = detok.replace(self.separator, '') detok = detok.replace(config.BOS_TOKEN, '') detok = detok.replace(config.EOS_TOKEN, '') detok = detok.replace(config.PAD_TOKEN, '') detok = detok.strip() return detok
[ "def", "detokenize", "(", "self", ",", "inputs", ",", "delim", "=", "' '", ")", ":", "detok", "=", "delim", ".", "join", "(", "[", "self", ".", "idx2tok", "[", "idx", "]", "for", "idx", "in", "inputs", "]", ")", "detok", "=", "detok", ".", "replace", "(", "self", ".", "separator", "+", "' '", ",", "''", ")", "detok", "=", "detok", ".", "replace", "(", "self", ".", "separator", ",", "''", ")", "detok", "=", "detok", ".", "replace", "(", "config", ".", "BOS_TOKEN", ",", "''", ")", "detok", "=", "detok", ".", "replace", "(", "config", ".", "EOS_TOKEN", ",", "''", ")", "detok", "=", "detok", ".", "replace", "(", "config", ".", "PAD_TOKEN", ",", "''", ")", "detok", "=", "detok", ".", "strip", "(", ")", "return", "detok" ]
Detokenizes single sentence and removes token separator characters. :param inputs: sequence of tokens :param delim: tokenization delimiter returns: string representing detokenized sentence
[ "Detokenizes", "single", "sentence", "and", "removes", "token", "separator", "characters", "." ]
python
train
36.111111
haykkh/resultr
resultr.py
https://github.com/haykkh/resultr/blob/decd222a4c0d3ea75595fd546b797b60297623b6/resultr.py#L337-L477
def main(args): '''main entry point of app Arguments: args {namespace} -- arguments provided in cli ''' print("\nNote it's very possible that this doesn't work correctly so take what it gives with a bucketload of salt\n") ######################### # # # # # prompt # # # # # ######################### if not len(sys.argv) > 1: initialAnswers = askInitial() inputPath = pathlib.Path(initialAnswers['inputPath']) year = int(initialAnswers['year']) # create a list from every row badFormat = badFormater(inputPath) # create a list from every row howManyCandidates = len(badFormat) - 1 length = int(len(badFormat['Cand'])/2) finalReturn = [] if "Get your rank in the year" in initialAnswers['whatToDo']: candidateNumber = askCandidateNumber() weightedAverage = myGrades(year, candidateNumber, badFormat, length) rank = myRank(weightedAverage, badFormat, year, length) if "Get your weighted average" in initialAnswers['whatToDo']: finalReturn.append('Your weighted average for the year is: {:.2f}%'.format( weightedAverage)) finalReturn.append('Your rank is {}th of {} ({:.2f} percentile)'.format( rank, howManyCandidates, (rank * 100) / howManyCandidates)) elif "Get your weighted average" in initialAnswers['whatToDo']: candidateNumber = askCandidateNumber() weightedAverage = myGrades(year, candidateNumber, badFormat, length) finalReturn.append('Your weighted average for the year is: {:.2f}%'.format( weightedAverage)) if "Reformat results by module and output to csv" in initialAnswers['whatToDo']: formatOutputPath = pathlib.Path(askFormat()) goodFormat = goodFormater(badFormat, formatOutputPath, year, length) if "Plot the results by module" in initialAnswers['whatToDo']: howPlotAsk(goodFormat) elif "Plot the results by module" in initialAnswers['whatToDo']: goodFormat = goodFormater(badFormat, None, year, length) howPlotAsk(goodFormat) [print('\n', x) for x in finalReturn] ######################### # # # end # # prompt # # # # # ######################### ######################### # # # # # run with # # cli args # # # ######################### if len(sys.argv) > 1: if not args.input: inputPath = pathlib.Path(askInput()) else: inputPath = pathlib.Path(args.input) if not args.year: year = int(askYear()) else: year = int(args.year) # create a list from every row badFormat = badFormater(inputPath) # create a list from every row howManyCandidates = len(badFormat) - 1 length = int(len(badFormat['Cand'])/2) finalReturn = [] if args.rank: if not args.candidate: candidateNumber = askCandidateNumber() else: candidateNumber = args.candidate weightedAverage = myGrades(year, candidateNumber, badFormat, length) rank = myRank(weightedAverage, badFormat, year, length) if args.my: finalReturn.append('Your weighted average for the year is: {:.2f}%'.format( weightedAverage)) finalReturn.append('Your rank is {}th of {} ({:.2f} percentile)'.format( rank, howManyCandidates, (rank * 100) / howManyCandidates)) elif args.my: if not args.candidate: candidateNumber = askCandidateNumber() else: candidateNumber = args.candidate weightedAverage = myGrades(year, candidateNumber, badFormat, length) finalReturn.append('Your weighted average for the year is: {:.2f}%'.format( weightedAverage)) if args.format is not None: formatOutputPath = pathlib.Path(args.format) goodFormat = goodFormater(badFormat, formatOutputPath, year, length) if args.plot: howPlotArgs(goodFormat) elif args.plot: goodFormat = goodFormater(badFormat, None, year, length) howPlotArgs(goodFormat) [print('\n', x) for x in finalReturn] ######################### # # # end # # run with # # cli args # # # ######################### print('')
[ "def", "main", "(", "args", ")", ":", "print", "(", "\"\\nNote it's very possible that this doesn't work correctly so take what it gives with a bucketload of salt\\n\"", ")", "#########################", "# #", "# #", "# prompt #", "# #", "# #", "#########################", "if", "not", "len", "(", "sys", ".", "argv", ")", ">", "1", ":", "initialAnswers", "=", "askInitial", "(", ")", "inputPath", "=", "pathlib", ".", "Path", "(", "initialAnswers", "[", "'inputPath'", "]", ")", "year", "=", "int", "(", "initialAnswers", "[", "'year'", "]", ")", "# create a list from every row", "badFormat", "=", "badFormater", "(", "inputPath", ")", "# create a list from every row", "howManyCandidates", "=", "len", "(", "badFormat", ")", "-", "1", "length", "=", "int", "(", "len", "(", "badFormat", "[", "'Cand'", "]", ")", "/", "2", ")", "finalReturn", "=", "[", "]", "if", "\"Get your rank in the year\"", "in", "initialAnswers", "[", "'whatToDo'", "]", ":", "candidateNumber", "=", "askCandidateNumber", "(", ")", "weightedAverage", "=", "myGrades", "(", "year", ",", "candidateNumber", ",", "badFormat", ",", "length", ")", "rank", "=", "myRank", "(", "weightedAverage", ",", "badFormat", ",", "year", ",", "length", ")", "if", "\"Get your weighted average\"", "in", "initialAnswers", "[", "'whatToDo'", "]", ":", "finalReturn", ".", "append", "(", "'Your weighted average for the year is: {:.2f}%'", ".", "format", "(", "weightedAverage", ")", ")", "finalReturn", ".", "append", "(", "'Your rank is {}th of {} ({:.2f} percentile)'", ".", "format", "(", "rank", ",", "howManyCandidates", ",", "(", "rank", "*", "100", ")", "/", "howManyCandidates", ")", ")", "elif", "\"Get your weighted average\"", "in", "initialAnswers", "[", "'whatToDo'", "]", ":", "candidateNumber", "=", "askCandidateNumber", "(", ")", "weightedAverage", "=", "myGrades", "(", "year", ",", "candidateNumber", ",", "badFormat", ",", "length", ")", "finalReturn", ".", "append", "(", "'Your weighted average for the year is: {:.2f}%'", ".", "format", "(", "weightedAverage", ")", ")", "if", "\"Reformat results by module and output to csv\"", "in", "initialAnswers", "[", "'whatToDo'", "]", ":", "formatOutputPath", "=", "pathlib", ".", "Path", "(", "askFormat", "(", ")", ")", "goodFormat", "=", "goodFormater", "(", "badFormat", ",", "formatOutputPath", ",", "year", ",", "length", ")", "if", "\"Plot the results by module\"", "in", "initialAnswers", "[", "'whatToDo'", "]", ":", "howPlotAsk", "(", "goodFormat", ")", "elif", "\"Plot the results by module\"", "in", "initialAnswers", "[", "'whatToDo'", "]", ":", "goodFormat", "=", "goodFormater", "(", "badFormat", ",", "None", ",", "year", ",", "length", ")", "howPlotAsk", "(", "goodFormat", ")", "[", "print", "(", "'\\n'", ",", "x", ")", "for", "x", "in", "finalReturn", "]", "#########################", "# #", "# end #", "# prompt #", "# #", "# #", "#########################", "#########################", "# #", "# #", "# run with #", "# cli args #", "# #", "#########################", "if", "len", "(", "sys", ".", "argv", ")", ">", "1", ":", "if", "not", "args", ".", "input", ":", "inputPath", "=", "pathlib", ".", "Path", "(", "askInput", "(", ")", ")", "else", ":", "inputPath", "=", "pathlib", ".", "Path", "(", "args", ".", "input", ")", "if", "not", "args", ".", "year", ":", "year", "=", "int", "(", "askYear", "(", ")", ")", "else", ":", "year", "=", "int", "(", "args", ".", "year", ")", "# create a list from every row", "badFormat", "=", "badFormater", "(", "inputPath", ")", "# create a list from every row", "howManyCandidates", "=", "len", "(", "badFormat", ")", "-", "1", "length", "=", "int", "(", "len", "(", "badFormat", "[", "'Cand'", "]", ")", "/", "2", ")", "finalReturn", "=", "[", "]", "if", "args", ".", "rank", ":", "if", "not", "args", ".", "candidate", ":", "candidateNumber", "=", "askCandidateNumber", "(", ")", "else", ":", "candidateNumber", "=", "args", ".", "candidate", "weightedAverage", "=", "myGrades", "(", "year", ",", "candidateNumber", ",", "badFormat", ",", "length", ")", "rank", "=", "myRank", "(", "weightedAverage", ",", "badFormat", ",", "year", ",", "length", ")", "if", "args", ".", "my", ":", "finalReturn", ".", "append", "(", "'Your weighted average for the year is: {:.2f}%'", ".", "format", "(", "weightedAverage", ")", ")", "finalReturn", ".", "append", "(", "'Your rank is {}th of {} ({:.2f} percentile)'", ".", "format", "(", "rank", ",", "howManyCandidates", ",", "(", "rank", "*", "100", ")", "/", "howManyCandidates", ")", ")", "elif", "args", ".", "my", ":", "if", "not", "args", ".", "candidate", ":", "candidateNumber", "=", "askCandidateNumber", "(", ")", "else", ":", "candidateNumber", "=", "args", ".", "candidate", "weightedAverage", "=", "myGrades", "(", "year", ",", "candidateNumber", ",", "badFormat", ",", "length", ")", "finalReturn", ".", "append", "(", "'Your weighted average for the year is: {:.2f}%'", ".", "format", "(", "weightedAverage", ")", ")", "if", "args", ".", "format", "is", "not", "None", ":", "formatOutputPath", "=", "pathlib", ".", "Path", "(", "args", ".", "format", ")", "goodFormat", "=", "goodFormater", "(", "badFormat", ",", "formatOutputPath", ",", "year", ",", "length", ")", "if", "args", ".", "plot", ":", "howPlotArgs", "(", "goodFormat", ")", "elif", "args", ".", "plot", ":", "goodFormat", "=", "goodFormater", "(", "badFormat", ",", "None", ",", "year", ",", "length", ")", "howPlotArgs", "(", "goodFormat", ")", "[", "print", "(", "'\\n'", ",", "x", ")", "for", "x", "in", "finalReturn", "]", "#########################", "# #", "# end #", "# run with #", "# cli args #", "# #", "#########################", "print", "(", "''", ")" ]
main entry point of app Arguments: args {namespace} -- arguments provided in cli
[ "main", "entry", "point", "of", "app", "Arguments", ":", "args", "{", "namespace", "}", "--", "arguments", "provided", "in", "cli" ]
python
train
34.574468
mrstephenneal/mysql-toolkit
mysql/toolkit/components/operations/__init__.py
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/operations/__init__.py#L16-L18
def script(self, sql_script, split_algo='sql_split', prep_statements=True, dump_fails=True): """Wrapper method providing access to the SQLScript class's methods and properties.""" return Execute(sql_script, split_algo, prep_statements, dump_fails, self)
[ "def", "script", "(", "self", ",", "sql_script", ",", "split_algo", "=", "'sql_split'", ",", "prep_statements", "=", "True", ",", "dump_fails", "=", "True", ")", ":", "return", "Execute", "(", "sql_script", ",", "split_algo", ",", "prep_statements", ",", "dump_fails", ",", "self", ")" ]
Wrapper method providing access to the SQLScript class's methods and properties.
[ "Wrapper", "method", "providing", "access", "to", "the", "SQLScript", "class", "s", "methods", "and", "properties", "." ]
python
train
89
sileht/cotyledon
cotyledon/_service_manager.py
https://github.com/sileht/cotyledon/blob/319faa2673a986733d9a7622bee29e187f2e7391/cotyledon/_service_manager.py#L177-L197
def add(self, service, workers=1, args=None, kwargs=None): """Add a new service to the ServiceManager :param service: callable that return an instance of :py:class:`Service` :type service: callable :param workers: number of processes/workers for this service :type workers: int :param args: additional positional arguments for this service :type args: tuple :param kwargs: additional keywoard arguments for this service :type kwargs: dict :return: a service id :rtype: uuid.uuid4 """ _utils.check_callable(service, 'service') _utils.check_workers(workers, 1) service_id = uuid.uuid4() self._services[service_id] = _service.ServiceConfig( service_id, service, workers, args, kwargs) return service_id
[ "def", "add", "(", "self", ",", "service", ",", "workers", "=", "1", ",", "args", "=", "None", ",", "kwargs", "=", "None", ")", ":", "_utils", ".", "check_callable", "(", "service", ",", "'service'", ")", "_utils", ".", "check_workers", "(", "workers", ",", "1", ")", "service_id", "=", "uuid", ".", "uuid4", "(", ")", "self", ".", "_services", "[", "service_id", "]", "=", "_service", ".", "ServiceConfig", "(", "service_id", ",", "service", ",", "workers", ",", "args", ",", "kwargs", ")", "return", "service_id" ]
Add a new service to the ServiceManager :param service: callable that return an instance of :py:class:`Service` :type service: callable :param workers: number of processes/workers for this service :type workers: int :param args: additional positional arguments for this service :type args: tuple :param kwargs: additional keywoard arguments for this service :type kwargs: dict :return: a service id :rtype: uuid.uuid4
[ "Add", "a", "new", "service", "to", "the", "ServiceManager" ]
python
train
39.47619
mbj4668/pyang
pyang/translators/dsdl.py
https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L424-L430
def setup_top(self): """Create top-level elements of the hybrid schema.""" self.top_grammar = SchemaNode("grammar") self.top_grammar.attr = { "xmlns": "http://relaxng.org/ns/structure/1.0", "datatypeLibrary": "http://www.w3.org/2001/XMLSchema-datatypes"} self.tree = SchemaNode("start")
[ "def", "setup_top", "(", "self", ")", ":", "self", ".", "top_grammar", "=", "SchemaNode", "(", "\"grammar\"", ")", "self", ".", "top_grammar", ".", "attr", "=", "{", "\"xmlns\"", ":", "\"http://relaxng.org/ns/structure/1.0\"", ",", "\"datatypeLibrary\"", ":", "\"http://www.w3.org/2001/XMLSchema-datatypes\"", "}", "self", ".", "tree", "=", "SchemaNode", "(", "\"start\"", ")" ]
Create top-level elements of the hybrid schema.
[ "Create", "top", "-", "level", "elements", "of", "the", "hybrid", "schema", "." ]
python
train
48
Hackerfleet/hfos
hfos/tool/__init__.py
https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/tool/__init__.py#L78-L90
def _ask_password(): """Securely and interactively ask for a password""" password = "Foo" password_trial = "" while password != password_trial: password = getpass.getpass() password_trial = getpass.getpass(prompt="Repeat:") if password != password_trial: print("\nPasswords do not match!") return password
[ "def", "_ask_password", "(", ")", ":", "password", "=", "\"Foo\"", "password_trial", "=", "\"\"", "while", "password", "!=", "password_trial", ":", "password", "=", "getpass", ".", "getpass", "(", ")", "password_trial", "=", "getpass", ".", "getpass", "(", "prompt", "=", "\"Repeat:\"", ")", "if", "password", "!=", "password_trial", ":", "print", "(", "\"\\nPasswords do not match!\"", ")", "return", "password" ]
Securely and interactively ask for a password
[ "Securely", "and", "interactively", "ask", "for", "a", "password" ]
python
train
27.076923
se-esss-litterbox/Pynac
Pynac/Elements.py
https://github.com/se-esss-litterbox/Pynac/blob/97e20aa85d20112cd114faa54a8197c5d0f61209/Pynac/Elements.py#L294-L306
def dynacRepresentation(self): """ Return the Pynac representation of this Set4DAperture instance. """ details = [ self.energyDefnFlag.val, self.energy.val, self.phase.val, self.x.val, self.y.val, self.radius.val, ] return ['REJECT', [details]]
[ "def", "dynacRepresentation", "(", "self", ")", ":", "details", "=", "[", "self", ".", "energyDefnFlag", ".", "val", ",", "self", ".", "energy", ".", "val", ",", "self", ".", "phase", ".", "val", ",", "self", ".", "x", ".", "val", ",", "self", ".", "y", ".", "val", ",", "self", ".", "radius", ".", "val", ",", "]", "return", "[", "'REJECT'", ",", "[", "details", "]", "]" ]
Return the Pynac representation of this Set4DAperture instance.
[ "Return", "the", "Pynac", "representation", "of", "this", "Set4DAperture", "instance", "." ]
python
train
27.076923
quiltdata/quilt
compiler/quilt/tools/command.py
https://github.com/quiltdata/quilt/blob/651853e7e89a8af86e0ff26167e752efa5878c12/compiler/quilt/tools/command.py#L768-L784
def version_list(package): """ List the versions of a package. """ team, owner, pkg = parse_package(package) session = _get_session(team) response = session.get( "{url}/api/version/{owner}/{pkg}/".format( url=get_registry_url(team), owner=owner, pkg=pkg ) ) for version in response.json()['versions']: print("%s: %s" % (version['version'], version['hash']))
[ "def", "version_list", "(", "package", ")", ":", "team", ",", "owner", ",", "pkg", "=", "parse_package", "(", "package", ")", "session", "=", "_get_session", "(", "team", ")", "response", "=", "session", ".", "get", "(", "\"{url}/api/version/{owner}/{pkg}/\"", ".", "format", "(", "url", "=", "get_registry_url", "(", "team", ")", ",", "owner", "=", "owner", ",", "pkg", "=", "pkg", ")", ")", "for", "version", "in", "response", ".", "json", "(", ")", "[", "'versions'", "]", ":", "print", "(", "\"%s: %s\"", "%", "(", "version", "[", "'version'", "]", ",", "version", "[", "'hash'", "]", ")", ")" ]
List the versions of a package.
[ "List", "the", "versions", "of", "a", "package", "." ]
python
train
25.588235
globality-corp/microcosm
microcosm/metadata.py
https://github.com/globality-corp/microcosm/blob/6856200ca295da4269c8c1c9de7db0b97c1f4523/microcosm/metadata.py#L32-L48
def get_root_path(self, name): """ Attempt to compute a root path for a (hopefully importable) name. Based in part on Flask's `root_path` calculation. See: https://github.com/mitsuhiko/flask/blob/master/flask/helpers.py#L777 """ module = modules.get(name) if module is not None and hasattr(module, '__file__'): return dirname(abspath(module.__file__)) # Flask keeps looking at this point. We instead set the root path to None, # assume that the user doesn't need resource loading, and raise an error # when resolving the resource path. return None
[ "def", "get_root_path", "(", "self", ",", "name", ")", ":", "module", "=", "modules", ".", "get", "(", "name", ")", "if", "module", "is", "not", "None", "and", "hasattr", "(", "module", ",", "'__file__'", ")", ":", "return", "dirname", "(", "abspath", "(", "module", ".", "__file__", ")", ")", "# Flask keeps looking at this point. We instead set the root path to None,", "# assume that the user doesn't need resource loading, and raise an error", "# when resolving the resource path.", "return", "None" ]
Attempt to compute a root path for a (hopefully importable) name. Based in part on Flask's `root_path` calculation. See: https://github.com/mitsuhiko/flask/blob/master/flask/helpers.py#L777
[ "Attempt", "to", "compute", "a", "root", "path", "for", "a", "(", "hopefully", "importable", ")", "name", "." ]
python
train
37.588235
samirelanduk/quickplots
quickplots/charts.py
https://github.com/samirelanduk/quickplots/blob/59f5e6ff367b2c1c24ba7cf1805d03552034c6d8/quickplots/charts.py#L217-L232
def scatter(self, *args, **kwargs): """Adds a :py:class:`.ScatterSeries` to the chart. :param \*data: The data for the series as either (x,y) values or two big\ tuples/lists of x and y values respectively. :param str name: The name to be associated with the series. :param str color: The hex colour of the line. :param Number size: The size of each data point - generally the diameter. :param Number linewidth: The width in pixels of the data points' edge. :raises ValueError: if the size and length of the data doesn't match\ either format.""" if "color" not in kwargs: kwargs["color"] = self.next_color() series = ScatterSeries(*args, **kwargs) self.add_series(series)
[ "def", "scatter", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "\"color\"", "not", "in", "kwargs", ":", "kwargs", "[", "\"color\"", "]", "=", "self", ".", "next_color", "(", ")", "series", "=", "ScatterSeries", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "add_series", "(", "series", ")" ]
Adds a :py:class:`.ScatterSeries` to the chart. :param \*data: The data for the series as either (x,y) values or two big\ tuples/lists of x and y values respectively. :param str name: The name to be associated with the series. :param str color: The hex colour of the line. :param Number size: The size of each data point - generally the diameter. :param Number linewidth: The width in pixels of the data points' edge. :raises ValueError: if the size and length of the data doesn't match\ either format.
[ "Adds", "a", ":", "py", ":", "class", ":", ".", "ScatterSeries", "to", "the", "chart", "." ]
python
train
47.8125
jcrobak/parquet-python
parquet/__init__.py
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/__init__.py#L71-L84
def _read_footer(file_obj): """Read the footer from the given file object and returns a FileMetaData object. This method assumes that the fo references a valid parquet file. """ footer_size = _get_footer_size(file_obj) if logger.isEnabledFor(logging.DEBUG): logger.debug("Footer size in bytes: %s", footer_size) file_obj.seek(-(8 + footer_size), 2) # seek to beginning of footer tin = TFileTransport(file_obj) pin = TCompactProtocolFactory().get_protocol(tin) fmd = parquet_thrift.FileMetaData() fmd.read(pin) return fmd
[ "def", "_read_footer", "(", "file_obj", ")", ":", "footer_size", "=", "_get_footer_size", "(", "file_obj", ")", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "DEBUG", ")", ":", "logger", ".", "debug", "(", "\"Footer size in bytes: %s\"", ",", "footer_size", ")", "file_obj", ".", "seek", "(", "-", "(", "8", "+", "footer_size", ")", ",", "2", ")", "# seek to beginning of footer", "tin", "=", "TFileTransport", "(", "file_obj", ")", "pin", "=", "TCompactProtocolFactory", "(", ")", ".", "get_protocol", "(", "tin", ")", "fmd", "=", "parquet_thrift", ".", "FileMetaData", "(", ")", "fmd", ".", "read", "(", "pin", ")", "return", "fmd" ]
Read the footer from the given file object and returns a FileMetaData object. This method assumes that the fo references a valid parquet file.
[ "Read", "the", "footer", "from", "the", "given", "file", "object", "and", "returns", "a", "FileMetaData", "object", "." ]
python
train
40.071429
grycap/cpyutils
evaluate.py
https://github.com/grycap/cpyutils/blob/fa966fc6d2ae1e1e799e19941561aa79b617f1b1/evaluate.py#L368-L377
def p_kwl_kwl(self, p): ''' kwl : kwl SEPARATOR kwl ''' _LOGGER.debug("kwl -> kwl ; kwl") if p[3] is not None: p[0] = p[3] elif p[1] is not None: p[0] = p[1] else: p[0] = TypedClass(None, TypedClass.UNKNOWN)
[ "def", "p_kwl_kwl", "(", "self", ",", "p", ")", ":", "_LOGGER", ".", "debug", "(", "\"kwl -> kwl ; kwl\"", ")", "if", "p", "[", "3", "]", "is", "not", "None", ":", "p", "[", "0", "]", "=", "p", "[", "3", "]", "elif", "p", "[", "1", "]", "is", "not", "None", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "else", ":", "p", "[", "0", "]", "=", "TypedClass", "(", "None", ",", "TypedClass", ".", "UNKNOWN", ")" ]
kwl : kwl SEPARATOR kwl
[ "kwl", ":", "kwl", "SEPARATOR", "kwl" ]
python
train
28.3
twilio/twilio-python
twilio/rest/api/v2010/account/sip/domain/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/sip/domain/__init__.py#L369-L382
def auth(self): """ Access the auth :returns: twilio.rest.api.v2010.account.sip.domain.auth_types.AuthTypesList :rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.AuthTypesList """ if self._auth is None: self._auth = AuthTypesList( self._version, account_sid=self._solution['account_sid'], domain_sid=self._solution['sid'], ) return self._auth
[ "def", "auth", "(", "self", ")", ":", "if", "self", ".", "_auth", "is", "None", ":", "self", ".", "_auth", "=", "AuthTypesList", "(", "self", ".", "_version", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", "domain_sid", "=", "self", ".", "_solution", "[", "'sid'", "]", ",", ")", "return", "self", ".", "_auth" ]
Access the auth :returns: twilio.rest.api.v2010.account.sip.domain.auth_types.AuthTypesList :rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.AuthTypesList
[ "Access", "the", "auth" ]
python
train
33.428571
etcher-be/elib_miz
elib_miz/mission.py
https://github.com/etcher-be/elib_miz/blob/f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7/elib_miz/mission.py#L648-L659
def get_groups_from_category(self, category) -> typing.Iterator['Group']: """ Args: category: group category Returns: generator over all groups from a specific category in this coalition """ Mission.validator_group_category.validate(category, 'get_groups_from_category') for group in self.groups: if group.group_category == category: yield group
[ "def", "get_groups_from_category", "(", "self", ",", "category", ")", "->", "typing", ".", "Iterator", "[", "'Group'", "]", ":", "Mission", ".", "validator_group_category", ".", "validate", "(", "category", ",", "'get_groups_from_category'", ")", "for", "group", "in", "self", ".", "groups", ":", "if", "group", ".", "group_category", "==", "category", ":", "yield", "group" ]
Args: category: group category Returns: generator over all groups from a specific category in this coalition
[ "Args", ":", "category", ":", "group", "category" ]
python
train
35.333333
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L678-L690
def run_cmd_unit(self, sentry_unit, cmd): """Run a command on a unit, return the output and exit code.""" output, code = sentry_unit.run(cmd) if code == 0: self.log.debug('{} `{}` command returned {} ' '(OK)'.format(sentry_unit.info['unit_name'], cmd, code)) else: msg = ('{} `{}` command returned {} ' '{}'.format(sentry_unit.info['unit_name'], cmd, code, output)) amulet.raise_status(amulet.FAIL, msg=msg) return str(output), code
[ "def", "run_cmd_unit", "(", "self", ",", "sentry_unit", ",", "cmd", ")", ":", "output", ",", "code", "=", "sentry_unit", ".", "run", "(", "cmd", ")", "if", "code", "==", "0", ":", "self", ".", "log", ".", "debug", "(", "'{} `{}` command returned {} '", "'(OK)'", ".", "format", "(", "sentry_unit", ".", "info", "[", "'unit_name'", "]", ",", "cmd", ",", "code", ")", ")", "else", ":", "msg", "=", "(", "'{} `{}` command returned {} '", "'{}'", ".", "format", "(", "sentry_unit", ".", "info", "[", "'unit_name'", "]", ",", "cmd", ",", "code", ",", "output", ")", ")", "amulet", ".", "raise_status", "(", "amulet", ".", "FAIL", ",", "msg", "=", "msg", ")", "return", "str", "(", "output", ")", ",", "code" ]
Run a command on a unit, return the output and exit code.
[ "Run", "a", "command", "on", "a", "unit", "return", "the", "output", "and", "exit", "code", "." ]
python
train
47.230769
reingart/pyafipws
wslpg.py
https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/wslpg.py#L2656-L2701
def CargarFormatoPDF(self, archivo="liquidacion_form_c1116b_wslpg.csv"): "Cargo el formato de campos a generar desde una planilla CSV" # si no encuentro archivo, lo busco en el directorio predeterminado: if not os.path.exists(archivo): archivo = os.path.join(self.InstallDir, "plantillas", os.path.basename(archivo)) if DEBUG: print "abriendo archivo ", archivo # inicializo la lista de los elementos: self.elements = [] for lno, linea in enumerate(open(archivo.encode('latin1')).readlines()): if DEBUG: print "procesando linea ", lno, linea args = [] for i,v in enumerate(linea.split(";")): if not v.startswith("'"): v = v.replace(",",".") else: v = v#.decode('latin1') if v.strip()=='': v = None else: v = eval(v.strip()) args.append(v) # corrijo path relativo para las imágenes: if args[1] == 'I': if not os.path.exists(args[14]): args[14] = os.path.join(self.InstallDir, "plantillas", os.path.basename(args[14])) if DEBUG: print "NUEVO PATH:", args[14] self.AgregarCampoPDF(*args) self.AgregarCampoPDF("anulado", 'T', 150, 250, 0, 0, size=70, rotate=45, foreground=0x808080, priority=-1) if HOMO: self.AgregarCampoPDF("homo", 'T', 100, 250, 0, 0, size=70, rotate=45, foreground=0x808080, priority=-1) # cargo los elementos en la plantilla self.template.load_elements(self.elements) return True
[ "def", "CargarFormatoPDF", "(", "self", ",", "archivo", "=", "\"liquidacion_form_c1116b_wslpg.csv\"", ")", ":", "# si no encuentro archivo, lo busco en el directorio predeterminado:", "if", "not", "os", ".", "path", ".", "exists", "(", "archivo", ")", ":", "archivo", "=", "os", ".", "path", ".", "join", "(", "self", ".", "InstallDir", ",", "\"plantillas\"", ",", "os", ".", "path", ".", "basename", "(", "archivo", ")", ")", "if", "DEBUG", ":", "print", "\"abriendo archivo \"", ",", "archivo", "# inicializo la lista de los elementos:", "self", ".", "elements", "=", "[", "]", "for", "lno", ",", "linea", "in", "enumerate", "(", "open", "(", "archivo", ".", "encode", "(", "'latin1'", ")", ")", ".", "readlines", "(", ")", ")", ":", "if", "DEBUG", ":", "print", "\"procesando linea \"", ",", "lno", ",", "linea", "args", "=", "[", "]", "for", "i", ",", "v", "in", "enumerate", "(", "linea", ".", "split", "(", "\";\"", ")", ")", ":", "if", "not", "v", ".", "startswith", "(", "\"'\"", ")", ":", "v", "=", "v", ".", "replace", "(", "\",\"", ",", "\".\"", ")", "else", ":", "v", "=", "v", "#.decode('latin1')", "if", "v", ".", "strip", "(", ")", "==", "''", ":", "v", "=", "None", "else", ":", "v", "=", "eval", "(", "v", ".", "strip", "(", ")", ")", "args", ".", "append", "(", "v", ")", "# corrijo path relativo para las imágenes:", "if", "args", "[", "1", "]", "==", "'I'", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "args", "[", "14", "]", ")", ":", "args", "[", "14", "]", "=", "os", ".", "path", ".", "join", "(", "self", ".", "InstallDir", ",", "\"plantillas\"", ",", "os", ".", "path", ".", "basename", "(", "args", "[", "14", "]", ")", ")", "if", "DEBUG", ":", "print", "\"NUEVO PATH:\"", ",", "args", "[", "14", "]", "self", ".", "AgregarCampoPDF", "(", "*", "args", ")", "self", ".", "AgregarCampoPDF", "(", "\"anulado\"", ",", "'T'", ",", "150", ",", "250", ",", "0", ",", "0", ",", "size", "=", "70", ",", "rotate", "=", "45", ",", "foreground", "=", "0x808080", ",", "priority", "=", "-", "1", ")", "if", "HOMO", ":", "self", ".", "AgregarCampoPDF", "(", "\"homo\"", ",", "'T'", ",", "100", ",", "250", ",", "0", ",", "0", ",", "size", "=", "70", ",", "rotate", "=", "45", ",", "foreground", "=", "0x808080", ",", "priority", "=", "-", "1", ")", "# cargo los elementos en la plantilla", "self", ".", "template", ".", "load_elements", "(", "self", ".", "elements", ")", "return", "True" ]
Cargo el formato de campos a generar desde una planilla CSV
[ "Cargo", "el", "formato", "de", "campos", "a", "generar", "desde", "una", "planilla", "CSV" ]
python
train
38.76087
manolomartinez/greg
greg/aux_functions.py
https://github.com/manolomartinez/greg/blob/63bb24197c13087a01963ac439cd8380007d9467/greg/aux_functions.py#L214-L231
def download_handler(feed, placeholders): import shlex """ Parse and execute the download handler """ value = feed.retrieve_config('downloadhandler', 'greg') if value == 'greg': while os.path.isfile(placeholders.fullpath): placeholders.fullpath = placeholders.fullpath + '_' placeholders.filename = placeholders.filename + '_' urlretrieve(placeholders.link, placeholders.fullpath) else: value_list = shlex.split(value) instruction_list = [substitute_placeholders(part, placeholders) for part in value_list] returncode = subprocess.call(instruction_list) if returncode: raise URLError
[ "def", "download_handler", "(", "feed", ",", "placeholders", ")", ":", "import", "shlex", "value", "=", "feed", ".", "retrieve_config", "(", "'downloadhandler'", ",", "'greg'", ")", "if", "value", "==", "'greg'", ":", "while", "os", ".", "path", ".", "isfile", "(", "placeholders", ".", "fullpath", ")", ":", "placeholders", ".", "fullpath", "=", "placeholders", ".", "fullpath", "+", "'_'", "placeholders", ".", "filename", "=", "placeholders", ".", "filename", "+", "'_'", "urlretrieve", "(", "placeholders", ".", "link", ",", "placeholders", ".", "fullpath", ")", "else", ":", "value_list", "=", "shlex", ".", "split", "(", "value", ")", "instruction_list", "=", "[", "substitute_placeholders", "(", "part", ",", "placeholders", ")", "for", "part", "in", "value_list", "]", "returncode", "=", "subprocess", ".", "call", "(", "instruction_list", ")", "if", "returncode", ":", "raise", "URLError" ]
Parse and execute the download handler
[ "Parse", "and", "execute", "the", "download", "handler" ]
python
train
39.222222
shoebot/shoebot
shoebot/sbio/shell.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/shoebot/sbio/shell.py#L108-L142
def print_response(self, input='', keep=False, *args, **kwargs): """ print response, if cookie is set then print that each line :param args: :param keep: if True more output is to come :param cookie: set a custom cookie, if set to 'None' then self.cookie will be used. if set to 'False' disables cookie output entirely :return: """ cookie = kwargs.get('cookie') if cookie is None: cookie = self.cookie or '' status = kwargs.get('status') lines = input.splitlines() if status and not lines: lines = [''] if cookie: output_template = '{cookie} {status}{cookie_char}{line}' else: output_template = '{line}' for i, line in enumerate(lines): if i != len(lines) - 1 or keep is True: cookie_char = '>' else: # last line cookie_char = ':' print(output_template.format( cookie_char=cookie_char, cookie=cookie, status=status or '', line=line.strip()), file=self.stdout)
[ "def", "print_response", "(", "self", ",", "input", "=", "''", ",", "keep", "=", "False", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "cookie", "=", "kwargs", ".", "get", "(", "'cookie'", ")", "if", "cookie", "is", "None", ":", "cookie", "=", "self", ".", "cookie", "or", "''", "status", "=", "kwargs", ".", "get", "(", "'status'", ")", "lines", "=", "input", ".", "splitlines", "(", ")", "if", "status", "and", "not", "lines", ":", "lines", "=", "[", "''", "]", "if", "cookie", ":", "output_template", "=", "'{cookie} {status}{cookie_char}{line}'", "else", ":", "output_template", "=", "'{line}'", "for", "i", ",", "line", "in", "enumerate", "(", "lines", ")", ":", "if", "i", "!=", "len", "(", "lines", ")", "-", "1", "or", "keep", "is", "True", ":", "cookie_char", "=", "'>'", "else", ":", "# last line", "cookie_char", "=", "':'", "print", "(", "output_template", ".", "format", "(", "cookie_char", "=", "cookie_char", ",", "cookie", "=", "cookie", ",", "status", "=", "status", "or", "''", ",", "line", "=", "line", ".", "strip", "(", ")", ")", ",", "file", "=", "self", ".", "stdout", ")" ]
print response, if cookie is set then print that each line :param args: :param keep: if True more output is to come :param cookie: set a custom cookie, if set to 'None' then self.cookie will be used. if set to 'False' disables cookie output entirely :return:
[ "print", "response", "if", "cookie", "is", "set", "then", "print", "that", "each", "line", ":", "param", "args", ":", ":", "param", "keep", ":", "if", "True", "more", "output", "is", "to", "come", ":", "param", "cookie", ":", "set", "a", "custom", "cookie", "if", "set", "to", "None", "then", "self", ".", "cookie", "will", "be", "used", ".", "if", "set", "to", "False", "disables", "cookie", "output", "entirely", ":", "return", ":" ]
python
valid
34
remram44/rpaths
rpaths.py
https://github.com/remram44/rpaths/blob/e4ff55d985c4d643d9fd214539d45af39ae5a7cd/rpaths.py#L534-L552
def tempdir(cls, suffix='', prefix=None, dir=None): """Returns a new temporary directory. Arguments are as for :meth:`~rpaths.Path.tempfile`, except that the `text` argument is not accepted. The directory is readable, writable, and searchable only by the creating user. The caller is responsible for deleting the directory when done with it. """ if prefix is None: prefix = tempfile.template if dir is not None: # Note that this is not safe on Python 2 # There is no work around, apart from not using the tempfile module dir = str(Path(dir)) dirname = tempfile.mkdtemp(suffix, prefix, dir) return cls(dirname).absolute()
[ "def", "tempdir", "(", "cls", ",", "suffix", "=", "''", ",", "prefix", "=", "None", ",", "dir", "=", "None", ")", ":", "if", "prefix", "is", "None", ":", "prefix", "=", "tempfile", ".", "template", "if", "dir", "is", "not", "None", ":", "# Note that this is not safe on Python 2", "# There is no work around, apart from not using the tempfile module", "dir", "=", "str", "(", "Path", "(", "dir", ")", ")", "dirname", "=", "tempfile", ".", "mkdtemp", "(", "suffix", ",", "prefix", ",", "dir", ")", "return", "cls", "(", "dirname", ")", ".", "absolute", "(", ")" ]
Returns a new temporary directory. Arguments are as for :meth:`~rpaths.Path.tempfile`, except that the `text` argument is not accepted. The directory is readable, writable, and searchable only by the creating user. The caller is responsible for deleting the directory when done with it.
[ "Returns", "a", "new", "temporary", "directory", "." ]
python
train
39
materialsproject/pymatgen
pymatgen/analysis/eos.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/eos.py#L380-L386
def fit(self, order=3): """ Overriden since this eos works with volume**(2/3) instead of volume. """ x = self.volumes**(-2./3.) self.eos_params = np.polyfit(x, self.energies, order) self._set_params()
[ "def", "fit", "(", "self", ",", "order", "=", "3", ")", ":", "x", "=", "self", ".", "volumes", "**", "(", "-", "2.", "/", "3.", ")", "self", ".", "eos_params", "=", "np", ".", "polyfit", "(", "x", ",", "self", ".", "energies", ",", "order", ")", "self", ".", "_set_params", "(", ")" ]
Overriden since this eos works with volume**(2/3) instead of volume.
[ "Overriden", "since", "this", "eos", "works", "with", "volume", "**", "(", "2", "/", "3", ")", "instead", "of", "volume", "." ]
python
train
34.571429
ncolony/ncolony
ncolony/service.py
https://github.com/ncolony/ncolony/blob/6ac71bda1de6706fb34244ae4972e36db5f062d3/ncolony/service.py#L54-L92
def get(config, messages, freq, pidDir=None, reactor=None): """Return a service which monitors processes based on directory contents Construct and return a service that, when started, will run processes based on the contents of the 'config' directory, restarting them if file contents change and stopping them if the file is removed. It also listens for restart and restart-all messages on the 'messages' directory. :param config: string, location of configuration directory :param messages: string, location of messages directory :param freq: number, frequency to check for new messages and configuration updates :param pidDir: {twisted.python.filepath.FilePath} or None, location to keep pid files :param reactor: something implementing the interfaces {twisted.internet.interfaces.IReactorTime} and {twisted.internet.interfaces.IReactorProcess} and :returns: service, {twisted.application.interfaces.IService} """ ret = taservice.MultiService() args = () if reactor is not None: args = reactor, procmon = procmonlib.ProcessMonitor(*args) if pidDir is not None: protocols = TransportDirectoryDict(pidDir) procmon.protocols = protocols procmon.setName('procmon') receiver = process_events.Receiver(procmon) confcheck = directory_monitor.checker(config, receiver) confserv = internet.TimerService(freq, confcheck) confserv.setServiceParent(ret) messagecheck = directory_monitor.messages(messages, receiver) messageserv = internet.TimerService(freq, messagecheck) messageserv.setServiceParent(ret) procmon.setServiceParent(ret) return ret
[ "def", "get", "(", "config", ",", "messages", ",", "freq", ",", "pidDir", "=", "None", ",", "reactor", "=", "None", ")", ":", "ret", "=", "taservice", ".", "MultiService", "(", ")", "args", "=", "(", ")", "if", "reactor", "is", "not", "None", ":", "args", "=", "reactor", ",", "procmon", "=", "procmonlib", ".", "ProcessMonitor", "(", "*", "args", ")", "if", "pidDir", "is", "not", "None", ":", "protocols", "=", "TransportDirectoryDict", "(", "pidDir", ")", "procmon", ".", "protocols", "=", "protocols", "procmon", ".", "setName", "(", "'procmon'", ")", "receiver", "=", "process_events", ".", "Receiver", "(", "procmon", ")", "confcheck", "=", "directory_monitor", ".", "checker", "(", "config", ",", "receiver", ")", "confserv", "=", "internet", ".", "TimerService", "(", "freq", ",", "confcheck", ")", "confserv", ".", "setServiceParent", "(", "ret", ")", "messagecheck", "=", "directory_monitor", ".", "messages", "(", "messages", ",", "receiver", ")", "messageserv", "=", "internet", ".", "TimerService", "(", "freq", ",", "messagecheck", ")", "messageserv", ".", "setServiceParent", "(", "ret", ")", "procmon", ".", "setServiceParent", "(", "ret", ")", "return", "ret" ]
Return a service which monitors processes based on directory contents Construct and return a service that, when started, will run processes based on the contents of the 'config' directory, restarting them if file contents change and stopping them if the file is removed. It also listens for restart and restart-all messages on the 'messages' directory. :param config: string, location of configuration directory :param messages: string, location of messages directory :param freq: number, frequency to check for new messages and configuration updates :param pidDir: {twisted.python.filepath.FilePath} or None, location to keep pid files :param reactor: something implementing the interfaces {twisted.internet.interfaces.IReactorTime} and {twisted.internet.interfaces.IReactorProcess} and :returns: service, {twisted.application.interfaces.IService}
[ "Return", "a", "service", "which", "monitors", "processes", "based", "on", "directory", "contents" ]
python
test
44.076923
Contraz/demosys-py
demosys/finders/base.py
https://github.com/Contraz/demosys-py/blob/6466128a3029c4d09631420ccce73024025bd5b6/demosys/finders/base.py#L68-L80
def get_finder(import_path): """ Get a finder class from an import path. Raises ``demosys.core.exceptions.ImproperlyConfigured`` if the finder is not found. This function uses an lru cache. :param import_path: string representing an import path :return: An instance of the finder """ Finder = import_string(import_path) if not issubclass(Finder, BaseFileSystemFinder): raise ImproperlyConfigured('Finder {} is not a subclass of core.finders.FileSystemFinder'.format(import_path)) return Finder()
[ "def", "get_finder", "(", "import_path", ")", ":", "Finder", "=", "import_string", "(", "import_path", ")", "if", "not", "issubclass", "(", "Finder", ",", "BaseFileSystemFinder", ")", ":", "raise", "ImproperlyConfigured", "(", "'Finder {} is not a subclass of core.finders.FileSystemFinder'", ".", "format", "(", "import_path", ")", ")", "return", "Finder", "(", ")" ]
Get a finder class from an import path. Raises ``demosys.core.exceptions.ImproperlyConfigured`` if the finder is not found. This function uses an lru cache. :param import_path: string representing an import path :return: An instance of the finder
[ "Get", "a", "finder", "class", "from", "an", "import", "path", ".", "Raises", "demosys", ".", "core", ".", "exceptions", ".", "ImproperlyConfigured", "if", "the", "finder", "is", "not", "found", ".", "This", "function", "uses", "an", "lru", "cache", "." ]
python
valid
40.923077
gwastro/pycbc-glue
pycbc_glue/iterutils.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/iterutils.py#L193-L206
def flatten(sequence, levels = 1): """ Example: >>> nested = [[1,2], [[3]]] >>> list(flatten(nested)) [1, 2, [3]] """ if levels == 0: for x in sequence: yield x else: for x in sequence: for y in flatten(x, levels - 1): yield y
[ "def", "flatten", "(", "sequence", ",", "levels", "=", "1", ")", ":", "if", "levels", "==", "0", ":", "for", "x", "in", "sequence", ":", "yield", "x", "else", ":", "for", "x", "in", "sequence", ":", "for", "y", "in", "flatten", "(", "x", ",", "levels", "-", "1", ")", ":", "yield", "y" ]
Example: >>> nested = [[1,2], [[3]]] >>> list(flatten(nested)) [1, 2, [3]]
[ "Example", ":", ">>>", "nested", "=", "[[", "1", "2", "]", "[[", "3", "]]]", ">>>", "list", "(", "flatten", "(", "nested", "))", "[", "1", "2", "[", "3", "]]" ]
python
train
16.785714
CI-WATER/gsshapy
gsshapy/orm/cif.py
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cif.py#L863-L1001
def _writeStructureLink(self, link, fileObject, replaceParamFile): """ Write Structure Link to File Method """ fileObject.write('%s\n' % link.type) fileObject.write('NUMSTRUCTS %s\n' % link.numElements) # Retrieve lists of structures weirs = link.weirs culverts = link.culverts # Write weirs to file for weir in weirs: fileObject.write('STRUCTTYPE %s\n' % weir.type) # Check for replacement vars crestLength = vwp(weir.crestLength, replaceParamFile) crestLowElevation = vwp(weir.crestLowElevation, replaceParamFile) dischargeCoeffForward = vwp(weir.dischargeCoeffForward, replaceParamFile) dischargeCoeffReverse = vwp(weir.dischargeCoeffReverse, replaceParamFile) crestLowLocation = vwp(weir.crestLowLocation, replaceParamFile) steepSlope = vwp(weir.steepSlope, replaceParamFile) shallowSlope = vwp(weir.shallowSlope, replaceParamFile) if weir.crestLength != None: try: fileObject.write('CREST_LENGTH %.6f\n' % crestLength) except: fileObject.write('CREST_LENGTH %s\n' % crestLength) if weir.crestLowElevation != None: try: fileObject.write('CREST_LOW_ELEV %.6f\n' % crestLowElevation) except: fileObject.write('CREST_LOW_ELEV %s\n' % crestLowElevation) if weir.dischargeCoeffForward != None: try: fileObject.write('DISCHARGE_COEFF_FORWARD %.6f\n' % dischargeCoeffForward) except: fileObject.write('DISCHARGE_COEFF_FORWARD %s\n' % dischargeCoeffForward) if weir.dischargeCoeffReverse != None: try: fileObject.write('DISCHARGE_COEFF_REVERSE %.6f\n' % dischargeCoeffReverse) except: fileObject.write('DISCHARGE_COEFF_REVERSE %s\n' % dischargeCoeffReverse) if weir.crestLowLocation != None: fileObject.write('CREST_LOW_LOC %s\n' % crestLowLocation) if weir.steepSlope != None: try: fileObject.write('STEEP_SLOPE %.6f\n' % steepSlope) except: fileObject.write('STEEP_SLOPE %s\n' % steepSlope) if weir.shallowSlope != None: try: fileObject.write('SHALLOW_SLOPE %.6f\n' % shallowSlope) except: fileObject.write('SHALLOW_SLOPE %s\n' % shallowSlope) # Write culverts to file for culvert in culverts: fileObject.write('STRUCTTYPE %s\n' % culvert.type) # Check for replacement vars upstreamInvert = vwp(culvert.upstreamInvert, replaceParamFile) downstreamInvert = vwp(culvert.downstreamInvert, replaceParamFile) inletDischargeCoeff = vwp(culvert.inletDischargeCoeff, replaceParamFile) reverseFlowDischargeCoeff = vwp(culvert.reverseFlowDischargeCoeff, replaceParamFile) slope = vwp(culvert.slope, replaceParamFile) length = vwp(culvert.length, replaceParamFile) roughness = vwp(culvert.roughness, replaceParamFile) diameter = vwp(culvert.diameter, replaceParamFile) width = vwp(culvert.width, replaceParamFile) height = vwp(culvert.height, replaceParamFile) if culvert.upstreamInvert != None: try: fileObject.write('UPINVERT %.6f\n' % upstreamInvert) except: fileObject.write('UPINVERT %s\n' % upstreamInvert) if culvert.downstreamInvert != None: try: fileObject.write('DOWNINVERT %.6f\n' % downstreamInvert) except: fileObject.write('DOWNINVERT %s\n' % downstreamInvert) if culvert.inletDischargeCoeff != None: try: fileObject.write('INLET_DISCH_COEFF %.6f\n' % inletDischargeCoeff) except: fileObject.write('INLET_DISCH_COEFF %s\n' % inletDischargeCoeff) if culvert.reverseFlowDischargeCoeff != None: try: fileObject.write('REV_FLOW_DISCH_COEFF %.6f\n' % reverseFlowDischargeCoeff) except: fileObject.write('REV_FLOW_DISCH_COEFF %s\n' % reverseFlowDischargeCoeff) if culvert.slope != None: try: fileObject.write('SLOPE %.6f\n' % slope) except: fileObject.write('SLOPE %s\n' % slope) if culvert.length != None: try: fileObject.write('LENGTH %.6f\n' % length) except: fileObject.write('LENGTH %s\n' % length) if culvert.roughness != None: try: fileObject.write('ROUGH_COEFF %.6f\n' % roughness) except: fileObject.write('ROUGH_COEFF %s\n' % roughness) if culvert.diameter != None: try: fileObject.write('DIAMETER %.6f\n' % diameter) except: fileObject.write('DIAMETER %s\n' % diameter) if culvert.width != None: try: fileObject.write('WIDTH %.6f\n' % width) except: fileObject.write('WIDTH %s\n' % width) if culvert.height != None: try: fileObject.write('HEIGHT %.6f\n' % height) except: fileObject.write('HEIGHT %s\n' % height)
[ "def", "_writeStructureLink", "(", "self", ",", "link", ",", "fileObject", ",", "replaceParamFile", ")", ":", "fileObject", ".", "write", "(", "'%s\\n'", "%", "link", ".", "type", ")", "fileObject", ".", "write", "(", "'NUMSTRUCTS %s\\n'", "%", "link", ".", "numElements", ")", "# Retrieve lists of structures", "weirs", "=", "link", ".", "weirs", "culverts", "=", "link", ".", "culverts", "# Write weirs to file", "for", "weir", "in", "weirs", ":", "fileObject", ".", "write", "(", "'STRUCTTYPE %s\\n'", "%", "weir", ".", "type", ")", "# Check for replacement vars", "crestLength", "=", "vwp", "(", "weir", ".", "crestLength", ",", "replaceParamFile", ")", "crestLowElevation", "=", "vwp", "(", "weir", ".", "crestLowElevation", ",", "replaceParamFile", ")", "dischargeCoeffForward", "=", "vwp", "(", "weir", ".", "dischargeCoeffForward", ",", "replaceParamFile", ")", "dischargeCoeffReverse", "=", "vwp", "(", "weir", ".", "dischargeCoeffReverse", ",", "replaceParamFile", ")", "crestLowLocation", "=", "vwp", "(", "weir", ".", "crestLowLocation", ",", "replaceParamFile", ")", "steepSlope", "=", "vwp", "(", "weir", ".", "steepSlope", ",", "replaceParamFile", ")", "shallowSlope", "=", "vwp", "(", "weir", ".", "shallowSlope", ",", "replaceParamFile", ")", "if", "weir", ".", "crestLength", "!=", "None", ":", "try", ":", "fileObject", ".", "write", "(", "'CREST_LENGTH %.6f\\n'", "%", "crestLength", ")", "except", ":", "fileObject", ".", "write", "(", "'CREST_LENGTH %s\\n'", "%", "crestLength", ")", "if", "weir", ".", "crestLowElevation", "!=", "None", ":", "try", ":", "fileObject", ".", "write", "(", "'CREST_LOW_ELEV %.6f\\n'", "%", "crestLowElevation", ")", "except", ":", "fileObject", ".", "write", "(", "'CREST_LOW_ELEV %s\\n'", "%", "crestLowElevation", ")", "if", "weir", ".", "dischargeCoeffForward", "!=", "None", ":", "try", ":", "fileObject", ".", "write", "(", "'DISCHARGE_COEFF_FORWARD %.6f\\n'", "%", "dischargeCoeffForward", ")", "except", ":", "fileObject", ".", "write", "(", "'DISCHARGE_COEFF_FORWARD %s\\n'", "%", "dischargeCoeffForward", ")", "if", "weir", ".", "dischargeCoeffReverse", "!=", "None", ":", "try", ":", "fileObject", ".", "write", "(", "'DISCHARGE_COEFF_REVERSE %.6f\\n'", "%", "dischargeCoeffReverse", ")", "except", ":", "fileObject", ".", "write", "(", "'DISCHARGE_COEFF_REVERSE %s\\n'", "%", "dischargeCoeffReverse", ")", "if", "weir", ".", "crestLowLocation", "!=", "None", ":", "fileObject", ".", "write", "(", "'CREST_LOW_LOC %s\\n'", "%", "crestLowLocation", ")", "if", "weir", ".", "steepSlope", "!=", "None", ":", "try", ":", "fileObject", ".", "write", "(", "'STEEP_SLOPE %.6f\\n'", "%", "steepSlope", ")", "except", ":", "fileObject", ".", "write", "(", "'STEEP_SLOPE %s\\n'", "%", "steepSlope", ")", "if", "weir", ".", "shallowSlope", "!=", "None", ":", "try", ":", "fileObject", ".", "write", "(", "'SHALLOW_SLOPE %.6f\\n'", "%", "shallowSlope", ")", "except", ":", "fileObject", ".", "write", "(", "'SHALLOW_SLOPE %s\\n'", "%", "shallowSlope", ")", "# Write culverts to file", "for", "culvert", "in", "culverts", ":", "fileObject", ".", "write", "(", "'STRUCTTYPE %s\\n'", "%", "culvert", ".", "type", ")", "# Check for replacement vars", "upstreamInvert", "=", "vwp", "(", "culvert", ".", "upstreamInvert", ",", "replaceParamFile", ")", "downstreamInvert", "=", "vwp", "(", "culvert", ".", "downstreamInvert", ",", "replaceParamFile", ")", "inletDischargeCoeff", "=", "vwp", "(", "culvert", ".", "inletDischargeCoeff", ",", "replaceParamFile", ")", "reverseFlowDischargeCoeff", "=", "vwp", "(", "culvert", ".", "reverseFlowDischargeCoeff", ",", "replaceParamFile", ")", "slope", "=", "vwp", "(", "culvert", ".", "slope", ",", "replaceParamFile", ")", "length", "=", "vwp", "(", "culvert", ".", "length", ",", "replaceParamFile", ")", "roughness", "=", "vwp", "(", "culvert", ".", "roughness", ",", "replaceParamFile", ")", "diameter", "=", "vwp", "(", "culvert", ".", "diameter", ",", "replaceParamFile", ")", "width", "=", "vwp", "(", "culvert", ".", "width", ",", "replaceParamFile", ")", "height", "=", "vwp", "(", "culvert", ".", "height", ",", "replaceParamFile", ")", "if", "culvert", ".", "upstreamInvert", "!=", "None", ":", "try", ":", "fileObject", ".", "write", "(", "'UPINVERT %.6f\\n'", "%", "upstreamInvert", ")", "except", ":", "fileObject", ".", "write", "(", "'UPINVERT %s\\n'", "%", "upstreamInvert", ")", "if", "culvert", ".", "downstreamInvert", "!=", "None", ":", "try", ":", "fileObject", ".", "write", "(", "'DOWNINVERT %.6f\\n'", "%", "downstreamInvert", ")", "except", ":", "fileObject", ".", "write", "(", "'DOWNINVERT %s\\n'", "%", "downstreamInvert", ")", "if", "culvert", ".", "inletDischargeCoeff", "!=", "None", ":", "try", ":", "fileObject", ".", "write", "(", "'INLET_DISCH_COEFF %.6f\\n'", "%", "inletDischargeCoeff", ")", "except", ":", "fileObject", ".", "write", "(", "'INLET_DISCH_COEFF %s\\n'", "%", "inletDischargeCoeff", ")", "if", "culvert", ".", "reverseFlowDischargeCoeff", "!=", "None", ":", "try", ":", "fileObject", ".", "write", "(", "'REV_FLOW_DISCH_COEFF %.6f\\n'", "%", "reverseFlowDischargeCoeff", ")", "except", ":", "fileObject", ".", "write", "(", "'REV_FLOW_DISCH_COEFF %s\\n'", "%", "reverseFlowDischargeCoeff", ")", "if", "culvert", ".", "slope", "!=", "None", ":", "try", ":", "fileObject", ".", "write", "(", "'SLOPE %.6f\\n'", "%", "slope", ")", "except", ":", "fileObject", ".", "write", "(", "'SLOPE %s\\n'", "%", "slope", ")", "if", "culvert", ".", "length", "!=", "None", ":", "try", ":", "fileObject", ".", "write", "(", "'LENGTH %.6f\\n'", "%", "length", ")", "except", ":", "fileObject", ".", "write", "(", "'LENGTH %s\\n'", "%", "length", ")", "if", "culvert", ".", "roughness", "!=", "None", ":", "try", ":", "fileObject", ".", "write", "(", "'ROUGH_COEFF %.6f\\n'", "%", "roughness", ")", "except", ":", "fileObject", ".", "write", "(", "'ROUGH_COEFF %s\\n'", "%", "roughness", ")", "if", "culvert", ".", "diameter", "!=", "None", ":", "try", ":", "fileObject", ".", "write", "(", "'DIAMETER %.6f\\n'", "%", "diameter", ")", "except", ":", "fileObject", ".", "write", "(", "'DIAMETER %s\\n'", "%", "diameter", ")", "if", "culvert", ".", "width", "!=", "None", ":", "try", ":", "fileObject", ".", "write", "(", "'WIDTH %.6f\\n'", "%", "width", ")", "except", ":", "fileObject", ".", "write", "(", "'WIDTH %s\\n'", "%", "width", ")", "if", "culvert", ".", "height", "!=", "None", ":", "try", ":", "fileObject", ".", "write", "(", "'HEIGHT %.6f\\n'", "%", "height", ")", "except", ":", "fileObject", ".", "write", "(", "'HEIGHT %s\\n'", "%", "height", ")" ]
Write Structure Link to File Method
[ "Write", "Structure", "Link", "to", "File", "Method" ]
python
train
44.129496
pantsbuild/pants
src/python/pants/engine/native.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/engine/native.py#L341-L353
def extern_store_dict(self, context_handle, vals_ptr, vals_len): """Given storage and an array of Handles, return a new Handle to represent the dict. Array of handles alternates keys and values (i.e. key0, value0, key1, value1, ...). It is assumed that an even number of values were passed. """ c = self._ffi.from_handle(context_handle) tup = tuple(c.from_value(val[0]) for val in self._ffi.unpack(vals_ptr, vals_len)) d = dict() for i in range(0, len(tup), 2): d[tup[i]] = tup[i + 1] return c.to_value(d)
[ "def", "extern_store_dict", "(", "self", ",", "context_handle", ",", "vals_ptr", ",", "vals_len", ")", ":", "c", "=", "self", ".", "_ffi", ".", "from_handle", "(", "context_handle", ")", "tup", "=", "tuple", "(", "c", ".", "from_value", "(", "val", "[", "0", "]", ")", "for", "val", "in", "self", ".", "_ffi", ".", "unpack", "(", "vals_ptr", ",", "vals_len", ")", ")", "d", "=", "dict", "(", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "tup", ")", ",", "2", ")", ":", "d", "[", "tup", "[", "i", "]", "]", "=", "tup", "[", "i", "+", "1", "]", "return", "c", ".", "to_value", "(", "d", ")" ]
Given storage and an array of Handles, return a new Handle to represent the dict. Array of handles alternates keys and values (i.e. key0, value0, key1, value1, ...). It is assumed that an even number of values were passed.
[ "Given", "storage", "and", "an", "array", "of", "Handles", "return", "a", "new", "Handle", "to", "represent", "the", "dict", "." ]
python
train
41.307692
ioos/cc-plugin-ncei
cc_plugin_ncei/ncei_base.py
https://github.com/ioos/cc-plugin-ncei/blob/963fefd7fa43afd32657ac4c36aad4ddb4c25acf/cc_plugin_ncei/ncei_base.py#L459-L567
def check_recommended_global_attributes(self, dataset): ''' Check the global recommended attributes for 1.1 templates. These go an extra step besides just checking that they exist. :param netCDF4.Dataset dataset: An open netCDF dataset Basic "does it exist" checks are done in BaseNCEICheck:check_recommended :title = "" ; //..................................................... RECOMMENDED - Provide a useful title for the data in the file. (ACDD) :summary = "" ; //................................................... RECOMMENDED - Provide a useful summary or abstract for the data in the file. (ACDD) :source = "" ; //.................................................... RECOMMENDED - The input data sources regardless of the method of production method used. (CF) :platform = "platform_variable" ; //................................. RECOMMENDED - Refers to a variable containing information about the platform. May also put this in individual variables. Use NODC or ICES platform table. (NODC) :instrument = "instrument_parameter_variable" ; //................... RECOMMENDED - Refers to a variable containing information about the instrument. May also put this in individual variables. Use NODC or GCMD instrument table. (NODC) :uuid = "" ; //...................................................... RECOMMENDED - Machine readable unique identifier for each file. A new uuid is created whenever the file is changed. (NODC) :sea_name = "" ; //.................................................. RECOMMENDED - The names of the sea in which the data were collected. Use NODC sea names table. (NODC) :id = "" ; //........................................................ RECOMMENDED - Should be a human readable unique identifier for data set. (ACDD) :naming_authority = "" ; //.......................................... RECOMMENDED - Backward URL of institution (for example, gov.noaa.nodc). (ACDD) :time_coverage_start = "" ; //....................................... RECOMMENDED - Use ISO8601 for date and time. (ACDD) :time_coverage_end = "" ; //......................................... RECOMMENDED - Use ISO8601 for date and time.(ACDD) :time_coverage_resolution = "" ; //.................................. RECOMMENDED - For example, "point" or "minute averages". (ACDD) :geospatial_lat_min = 0.0f ; //...................................... RECOMMENDED - Replace with correct value. (ACDD) :geospatial_lat_max = 0.0f ; //...................................... RECOMMENDED - Replace with correct value. (ACDD) :geospatial_lat_units = "degrees_north" ; //......................... RECOMMENDED - Use UDUNITS compatible units. (ACDD) :geospatial_lat_resolution= "" ; //.................................. RECOMMENDED - For example, "point" or "10 degree grid". (ACDD) :geospatial_lon_min = 0.0f ; //...................................... RECOMMENDED - Replace with correct value. (ACDD) :geospatial_lon_max = 0.0f ; //...................................... RECOMMENDED - Replace with correct value. (ACDD) :geospatial_lon_units = "degrees_east"; //........................... RECOMMENDED - Use UDUNITS compatible units. (ACDD) :geospatial_lon_resolution= "" ; //.................................. RECOMMENDED - For example, "point" or "10 degree grid". (ACDD) :geospatial_vertical_min = 0.0f ; //................................. RECOMMENDED - Replace with correct value. (ACDD) :geospatial_vertical_max = 0.0f ; //................................. RECOMMENDED - Replace with correct value. (ACDD) :geospatial_vertical_units = "" ; //................................. RECOMMENDED - Use UDUNITS compatible units. (ACDD) :geospatial_vertical_resolution = "" ; //............................ RECOMMENDED - For example, "point" or "1 meter binned". (ACDD) :geospatial_vertical_positive = "" ; //.............................. RECOMMENDED - Use "up" or "down". (ACDD) :institution = "" ; //............................................... RECOMMENDED - Institution of the person or group that collected the data. An institution attribute can be used for each variable if variables come from more than one institution. (ACDD) :creator_name = "" ; //.............................................. RECOMMENDED - Name of the person who collected the data. (ACDD) :creator_url = "" ; //............................................... RECOMMENDED - URL for person who collected the data. (ACDD) :creator_email = "" ; //............................................. RECOMMENDED - Email address for person who collected the data. (ACDD) :project = "" ; //................................................... RECOMMENDED - Project the data was collected under. (ACDD) :processing_level = "" ; //.......................................... RECOMMENDED - Provide a description of the processing or quality control level of the data. (ACDD) :references = "" ; //................................................ RECOMMENDED - Published or web-based references that describe the data or methods used to produce it. (CF) :keywords_vocabulary = "" ; //....................................... RECOMMENDED - Identifies the controlled keyword vocabulary used to specify the values within the attribute "keywords". e.g. NASA/GCMD Earth Science Keywords (ACDD) :keywords = "" ; //.................................................. RECOMMENDED - A comma separated list of keywords coming from the keywords_vocabulary. (ACDD) :acknowledgment = "" ; //............................................ RECOMMENDED - Text to use to properly acknowledge use of the data. (ACDD) :comment = "" ; //................................................... RECOMMENDED - Provide useful additional information here. (ACDD and CF) :contributor_name = "" ; //.......................................... RECOMMENDED - A comma separated list of contributors to this data set. (ACDD) :contributor_role = "" ; //.......................................... RECOMMENDED - A comma separated list of their roles. (ACDD) :date_created = "" ; //.............................................. RECOMMENDED - Creation date of the netCDF. Use ISO8601 for date and time. (ACDD) :date_modified = "" ; //............................................. RECOMMENDED - Modification date of the netCDF. Use ISO8601 for date and time. (ACDD) :publisher_name = "" ; //............................................ RECOMMENDED - Publisher of the data. (ACDD) :publisher_email = "" ; //........................................... RECOMMENDED - Email address of the publisher of the data. (ACDD) :publisher_url = "" ; //............................................. RECOMMENDED - A URL for the publisher of the data. (ACDD) :history = "" ; //................................................... RECOMMENDED - Record changes made to the netCDF. (ACDD) :license = "" ; //................................................... RECOMMENDED - Describe the restrictions to data access and distribution. (ACDD) :metadata_link = "" ; //............................................. RECOMMENDED - This attribute provides a link to a complete metadata record for this data set or the collection that contains this data set. (ACDD) ''' recommended_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended global attributes') # Do any of the variables define platform ? variable_defined_platform = any((hasattr(var, 'platform') for var in dataset.variables)) if not variable_defined_platform: platform_name = getattr(dataset, 'platform', '') recommended_ctx.assert_true(platform_name and platform_name in dataset.variables, 'platform should exist and point to a variable.') sea_names = [sn.lower() for sn in util.get_sea_names()] sea_name = getattr(dataset, 'sea_name', '') sea_name = sea_name.replace(', ', ',') sea_name = sea_name.split(',') if sea_name else [] for sea in sea_name: recommended_ctx.assert_true( sea.lower() in sea_names, 'sea_name attribute should exist and should be from the NODC sea names list: {} is not a valid sea name'.format(sea) ) # Parse dates, check for ISO 8601 for attr in ['time_coverage_start', 'time_coverage_end', 'date_created', 'date_modified']: attr_value = getattr(dataset, attr, '') try: parse_datetime(attr_value) recommended_ctx.assert_true(True, '') # Score it True! except ISO8601Error: recommended_ctx.assert_true(False, '{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}'.format(attr, attr_value)) units = getattr(dataset, 'geospatial_lat_units', '').lower() recommended_ctx.assert_true(units == 'degrees_north', 'geospatial_lat_units attribute should be degrees_north: {}'.format(units)) units = getattr(dataset, 'geospatial_lon_units', '').lower() recommended_ctx.assert_true(units == 'degrees_east', 'geospatial_lon_units attribute should be degrees_east: {}'.format(units)) value = getattr(dataset, 'geospatial_vertical_positive', '') recommended_ctx.assert_true(value.lower() in ['up', 'down'], 'geospatial_vertical_positive attribute should be up or down: {}'.format(value)) # I hate english. ack_exists = any((getattr(dataset, attr, '') != '' for attr in ['acknowledgment', 'acknowledgement'])) recommended_ctx.assert_true(ack_exists, 'acknowledgement attribute should exist and not be empty') contributor_name = getattr(dataset, 'contributor_name', '') contributor_role = getattr(dataset, 'contributor_role', '') names = contributor_role.split(',') roles = contributor_role.split(',') recommended_ctx.assert_true(contributor_name != '', 'contributor_name should exist and not be empty.') recommended_ctx.assert_true(len(names) == len(roles), 'length of contributor names matches length of roles') recommended_ctx.assert_true(contributor_role != '', 'contributor_role should exist and not be empty.') recommended_ctx.assert_true(len(names) == len(roles), 'length of contributor names matches length of roles') if hasattr(dataset, 'comment'): recommended_ctx.assert_true(getattr(dataset, 'comment', '') != '', 'comment attribute should not be empty if specified') return recommended_ctx.to_result()
[ "def", "check_recommended_global_attributes", "(", "self", ",", "dataset", ")", ":", "recommended_ctx", "=", "TestCtx", "(", "BaseCheck", ".", "MEDIUM", ",", "'Recommended global attributes'", ")", "# Do any of the variables define platform ?", "variable_defined_platform", "=", "any", "(", "(", "hasattr", "(", "var", ",", "'platform'", ")", "for", "var", "in", "dataset", ".", "variables", ")", ")", "if", "not", "variable_defined_platform", ":", "platform_name", "=", "getattr", "(", "dataset", ",", "'platform'", ",", "''", ")", "recommended_ctx", ".", "assert_true", "(", "platform_name", "and", "platform_name", "in", "dataset", ".", "variables", ",", "'platform should exist and point to a variable.'", ")", "sea_names", "=", "[", "sn", ".", "lower", "(", ")", "for", "sn", "in", "util", ".", "get_sea_names", "(", ")", "]", "sea_name", "=", "getattr", "(", "dataset", ",", "'sea_name'", ",", "''", ")", "sea_name", "=", "sea_name", ".", "replace", "(", "', '", ",", "','", ")", "sea_name", "=", "sea_name", ".", "split", "(", "','", ")", "if", "sea_name", "else", "[", "]", "for", "sea", "in", "sea_name", ":", "recommended_ctx", ".", "assert_true", "(", "sea", ".", "lower", "(", ")", "in", "sea_names", ",", "'sea_name attribute should exist and should be from the NODC sea names list: {} is not a valid sea name'", ".", "format", "(", "sea", ")", ")", "# Parse dates, check for ISO 8601", "for", "attr", "in", "[", "'time_coverage_start'", ",", "'time_coverage_end'", ",", "'date_created'", ",", "'date_modified'", "]", ":", "attr_value", "=", "getattr", "(", "dataset", ",", "attr", ",", "''", ")", "try", ":", "parse_datetime", "(", "attr_value", ")", "recommended_ctx", ".", "assert_true", "(", "True", ",", "''", ")", "# Score it True!", "except", "ISO8601Error", ":", "recommended_ctx", ".", "assert_true", "(", "False", ",", "'{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}'", ".", "format", "(", "attr", ",", "attr_value", ")", ")", "units", "=", "getattr", "(", "dataset", ",", "'geospatial_lat_units'", ",", "''", ")", ".", "lower", "(", ")", "recommended_ctx", ".", "assert_true", "(", "units", "==", "'degrees_north'", ",", "'geospatial_lat_units attribute should be degrees_north: {}'", ".", "format", "(", "units", ")", ")", "units", "=", "getattr", "(", "dataset", ",", "'geospatial_lon_units'", ",", "''", ")", ".", "lower", "(", ")", "recommended_ctx", ".", "assert_true", "(", "units", "==", "'degrees_east'", ",", "'geospatial_lon_units attribute should be degrees_east: {}'", ".", "format", "(", "units", ")", ")", "value", "=", "getattr", "(", "dataset", ",", "'geospatial_vertical_positive'", ",", "''", ")", "recommended_ctx", ".", "assert_true", "(", "value", ".", "lower", "(", ")", "in", "[", "'up'", ",", "'down'", "]", ",", "'geospatial_vertical_positive attribute should be up or down: {}'", ".", "format", "(", "value", ")", ")", "# I hate english.", "ack_exists", "=", "any", "(", "(", "getattr", "(", "dataset", ",", "attr", ",", "''", ")", "!=", "''", "for", "attr", "in", "[", "'acknowledgment'", ",", "'acknowledgement'", "]", ")", ")", "recommended_ctx", ".", "assert_true", "(", "ack_exists", ",", "'acknowledgement attribute should exist and not be empty'", ")", "contributor_name", "=", "getattr", "(", "dataset", ",", "'contributor_name'", ",", "''", ")", "contributor_role", "=", "getattr", "(", "dataset", ",", "'contributor_role'", ",", "''", ")", "names", "=", "contributor_role", ".", "split", "(", "','", ")", "roles", "=", "contributor_role", ".", "split", "(", "','", ")", "recommended_ctx", ".", "assert_true", "(", "contributor_name", "!=", "''", ",", "'contributor_name should exist and not be empty.'", ")", "recommended_ctx", ".", "assert_true", "(", "len", "(", "names", ")", "==", "len", "(", "roles", ")", ",", "'length of contributor names matches length of roles'", ")", "recommended_ctx", ".", "assert_true", "(", "contributor_role", "!=", "''", ",", "'contributor_role should exist and not be empty.'", ")", "recommended_ctx", ".", "assert_true", "(", "len", "(", "names", ")", "==", "len", "(", "roles", ")", ",", "'length of contributor names matches length of roles'", ")", "if", "hasattr", "(", "dataset", ",", "'comment'", ")", ":", "recommended_ctx", ".", "assert_true", "(", "getattr", "(", "dataset", ",", "'comment'", ",", "''", ")", "!=", "''", ",", "'comment attribute should not be empty if specified'", ")", "return", "recommended_ctx", ".", "to_result", "(", ")" ]
Check the global recommended attributes for 1.1 templates. These go an extra step besides just checking that they exist. :param netCDF4.Dataset dataset: An open netCDF dataset Basic "does it exist" checks are done in BaseNCEICheck:check_recommended :title = "" ; //..................................................... RECOMMENDED - Provide a useful title for the data in the file. (ACDD) :summary = "" ; //................................................... RECOMMENDED - Provide a useful summary or abstract for the data in the file. (ACDD) :source = "" ; //.................................................... RECOMMENDED - The input data sources regardless of the method of production method used. (CF) :platform = "platform_variable" ; //................................. RECOMMENDED - Refers to a variable containing information about the platform. May also put this in individual variables. Use NODC or ICES platform table. (NODC) :instrument = "instrument_parameter_variable" ; //................... RECOMMENDED - Refers to a variable containing information about the instrument. May also put this in individual variables. Use NODC or GCMD instrument table. (NODC) :uuid = "" ; //...................................................... RECOMMENDED - Machine readable unique identifier for each file. A new uuid is created whenever the file is changed. (NODC) :sea_name = "" ; //.................................................. RECOMMENDED - The names of the sea in which the data were collected. Use NODC sea names table. (NODC) :id = "" ; //........................................................ RECOMMENDED - Should be a human readable unique identifier for data set. (ACDD) :naming_authority = "" ; //.......................................... RECOMMENDED - Backward URL of institution (for example, gov.noaa.nodc). (ACDD) :time_coverage_start = "" ; //....................................... RECOMMENDED - Use ISO8601 for date and time. (ACDD) :time_coverage_end = "" ; //......................................... RECOMMENDED - Use ISO8601 for date and time.(ACDD) :time_coverage_resolution = "" ; //.................................. RECOMMENDED - For example, "point" or "minute averages". (ACDD) :geospatial_lat_min = 0.0f ; //...................................... RECOMMENDED - Replace with correct value. (ACDD) :geospatial_lat_max = 0.0f ; //...................................... RECOMMENDED - Replace with correct value. (ACDD) :geospatial_lat_units = "degrees_north" ; //......................... RECOMMENDED - Use UDUNITS compatible units. (ACDD) :geospatial_lat_resolution= "" ; //.................................. RECOMMENDED - For example, "point" or "10 degree grid". (ACDD) :geospatial_lon_min = 0.0f ; //...................................... RECOMMENDED - Replace with correct value. (ACDD) :geospatial_lon_max = 0.0f ; //...................................... RECOMMENDED - Replace with correct value. (ACDD) :geospatial_lon_units = "degrees_east"; //........................... RECOMMENDED - Use UDUNITS compatible units. (ACDD) :geospatial_lon_resolution= "" ; //.................................. RECOMMENDED - For example, "point" or "10 degree grid". (ACDD) :geospatial_vertical_min = 0.0f ; //................................. RECOMMENDED - Replace with correct value. (ACDD) :geospatial_vertical_max = 0.0f ; //................................. RECOMMENDED - Replace with correct value. (ACDD) :geospatial_vertical_units = "" ; //................................. RECOMMENDED - Use UDUNITS compatible units. (ACDD) :geospatial_vertical_resolution = "" ; //............................ RECOMMENDED - For example, "point" or "1 meter binned". (ACDD) :geospatial_vertical_positive = "" ; //.............................. RECOMMENDED - Use "up" or "down". (ACDD) :institution = "" ; //............................................... RECOMMENDED - Institution of the person or group that collected the data. An institution attribute can be used for each variable if variables come from more than one institution. (ACDD) :creator_name = "" ; //.............................................. RECOMMENDED - Name of the person who collected the data. (ACDD) :creator_url = "" ; //............................................... RECOMMENDED - URL for person who collected the data. (ACDD) :creator_email = "" ; //............................................. RECOMMENDED - Email address for person who collected the data. (ACDD) :project = "" ; //................................................... RECOMMENDED - Project the data was collected under. (ACDD) :processing_level = "" ; //.......................................... RECOMMENDED - Provide a description of the processing or quality control level of the data. (ACDD) :references = "" ; //................................................ RECOMMENDED - Published or web-based references that describe the data or methods used to produce it. (CF) :keywords_vocabulary = "" ; //....................................... RECOMMENDED - Identifies the controlled keyword vocabulary used to specify the values within the attribute "keywords". e.g. NASA/GCMD Earth Science Keywords (ACDD) :keywords = "" ; //.................................................. RECOMMENDED - A comma separated list of keywords coming from the keywords_vocabulary. (ACDD) :acknowledgment = "" ; //............................................ RECOMMENDED - Text to use to properly acknowledge use of the data. (ACDD) :comment = "" ; //................................................... RECOMMENDED - Provide useful additional information here. (ACDD and CF) :contributor_name = "" ; //.......................................... RECOMMENDED - A comma separated list of contributors to this data set. (ACDD) :contributor_role = "" ; //.......................................... RECOMMENDED - A comma separated list of their roles. (ACDD) :date_created = "" ; //.............................................. RECOMMENDED - Creation date of the netCDF. Use ISO8601 for date and time. (ACDD) :date_modified = "" ; //............................................. RECOMMENDED - Modification date of the netCDF. Use ISO8601 for date and time. (ACDD) :publisher_name = "" ; //............................................ RECOMMENDED - Publisher of the data. (ACDD) :publisher_email = "" ; //........................................... RECOMMENDED - Email address of the publisher of the data. (ACDD) :publisher_url = "" ; //............................................. RECOMMENDED - A URL for the publisher of the data. (ACDD) :history = "" ; //................................................... RECOMMENDED - Record changes made to the netCDF. (ACDD) :license = "" ; //................................................... RECOMMENDED - Describe the restrictions to data access and distribution. (ACDD) :metadata_link = "" ; //............................................. RECOMMENDED - This attribute provides a link to a complete metadata record for this data set or the collection that contains this data set. (ACDD)
[ "Check", "the", "global", "recommended", "attributes", "for", "1", ".", "1", "templates", ".", "These", "go", "an", "extra", "step", "besides", "just", "checking", "that", "they", "exist", "." ]
python
train
98.192661
gem/oq-engine
openquake/hazardlib/mfd/youngs_coppersmith_1985.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/mfd/youngs_coppersmith_1985.py#L91-L94
def get_min_max_mag(self): "Return the minimum and maximum magnitudes" mag, num_bins = self._get_min_mag_and_num_bins() return mag, mag + self. bin_width * (num_bins - 1)
[ "def", "get_min_max_mag", "(", "self", ")", ":", "mag", ",", "num_bins", "=", "self", ".", "_get_min_mag_and_num_bins", "(", ")", "return", "mag", ",", "mag", "+", "self", ".", "bin_width", "*", "(", "num_bins", "-", "1", ")" ]
Return the minimum and maximum magnitudes
[ "Return", "the", "minimum", "and", "maximum", "magnitudes" ]
python
train
47.75
orbingol/NURBS-Python
geomdl/_operations.py
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/_operations.py#L60-L81
def normal_curve_single(obj, u, normalize): """ Evaluates the curve normal vector at the input parameter, u. Curve normal is calculated from the 2nd derivative of the curve at the input parameter, u. The output returns a list containing the starting point (i.e. origin) of the vector and the vector itself. :param obj: input curve :type obj: abstract.Curve :param u: parameter :type u: float :param normalize: if True, the returned vector is converted to a unit vector :type normalize: bool :return: a list containing "point" and "vector" pairs :rtype: tuple """ # 2nd derivative of the curve gives the normal ders = obj.derivatives(u, 2) point = ders[0] vector = linalg.vector_normalize(ders[2]) if normalize else ders[2] return tuple(point), tuple(vector)
[ "def", "normal_curve_single", "(", "obj", ",", "u", ",", "normalize", ")", ":", "# 2nd derivative of the curve gives the normal", "ders", "=", "obj", ".", "derivatives", "(", "u", ",", "2", ")", "point", "=", "ders", "[", "0", "]", "vector", "=", "linalg", ".", "vector_normalize", "(", "ders", "[", "2", "]", ")", "if", "normalize", "else", "ders", "[", "2", "]", "return", "tuple", "(", "point", ")", ",", "tuple", "(", "vector", ")" ]
Evaluates the curve normal vector at the input parameter, u. Curve normal is calculated from the 2nd derivative of the curve at the input parameter, u. The output returns a list containing the starting point (i.e. origin) of the vector and the vector itself. :param obj: input curve :type obj: abstract.Curve :param u: parameter :type u: float :param normalize: if True, the returned vector is converted to a unit vector :type normalize: bool :return: a list containing "point" and "vector" pairs :rtype: tuple
[ "Evaluates", "the", "curve", "normal", "vector", "at", "the", "input", "parameter", "u", "." ]
python
train
36.727273
PredixDev/predixpy
predix/admin/cf/apps.py
https://github.com/PredixDev/predixpy/blob/a0cb34cf40f716229351bb6d90d6ecace958c81f/predix/admin/cf/apps.py#L16-L24
def get_app_guid(self, app_name): """ Returns the GUID for the app instance with the given name. """ summary = self.space.get_space_summary() for app in summary['apps']: if app['name'] == app_name: return app['guid']
[ "def", "get_app_guid", "(", "self", ",", "app_name", ")", ":", "summary", "=", "self", ".", "space", ".", "get_space_summary", "(", ")", "for", "app", "in", "summary", "[", "'apps'", "]", ":", "if", "app", "[", "'name'", "]", "==", "app_name", ":", "return", "app", "[", "'guid'", "]" ]
Returns the GUID for the app instance with the given name.
[ "Returns", "the", "GUID", "for", "the", "app", "instance", "with", "the", "given", "name", "." ]
python
train
31.555556
Autodesk/aomi
aomi/model/backend.py
https://github.com/Autodesk/aomi/blob/84da2dfb0424837adf9c4ddc1aa352e942bb7a4a/aomi/model/backend.py#L165-L167
def unmount(self, client): """Unmounts a backend within Vault""" getattr(client, self.unmount_fun)(mount_point=self.path)
[ "def", "unmount", "(", "self", ",", "client", ")", ":", "getattr", "(", "client", ",", "self", ".", "unmount_fun", ")", "(", "mount_point", "=", "self", ".", "path", ")" ]
Unmounts a backend within Vault
[ "Unmounts", "a", "backend", "within", "Vault" ]
python
train
45
shazow/workerpool
workerpool/pools.py
https://github.com/shazow/workerpool/blob/2c5b29ec64ffbc94fc3623a4531eaf7c7c1a9ab5/workerpool/pools.py#L89-L103
def map(self, fn, *seq): "Perform a map operation distributed among the workers. Will " "block until done." results = Queue() args = zip(*seq) for seq in args: j = SimpleJob(results, fn, seq) self.put(j) # Aggregate results r = [] for i in range(len(list(args))): r.append(results.get()) return r
[ "def", "map", "(", "self", ",", "fn", ",", "*", "seq", ")", ":", "\"block until done.\"", "results", "=", "Queue", "(", ")", "args", "=", "zip", "(", "*", "seq", ")", "for", "seq", "in", "args", ":", "j", "=", "SimpleJob", "(", "results", ",", "fn", ",", "seq", ")", "self", ".", "put", "(", "j", ")", "# Aggregate results", "r", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "list", "(", "args", ")", ")", ")", ":", "r", ".", "append", "(", "results", ".", "get", "(", ")", ")", "return", "r" ]
Perform a map operation distributed among the workers. Will
[ "Perform", "a", "map", "operation", "distributed", "among", "the", "workers", ".", "Will" ]
python
train
26.133333
blockstack/pybitcoin
pybitcoin/rpc/bitcoind_client.py
https://github.com/blockstack/pybitcoin/blob/92c8da63c40f7418594b1ce395990c3f5a4787cc/pybitcoin/rpc/bitcoind_client.py#L138-L146
def broadcast_transaction(self, hex_tx): """ Dispatch a raw transaction to the network. """ resp = self.obj.sendrawtransaction(hex_tx) if len(resp) > 0: return {'transaction_hash': resp, 'success': True} else: return error_reply('Invalid response from bitcoind.')
[ "def", "broadcast_transaction", "(", "self", ",", "hex_tx", ")", ":", "resp", "=", "self", ".", "obj", ".", "sendrawtransaction", "(", "hex_tx", ")", "if", "len", "(", "resp", ")", ">", "0", ":", "return", "{", "'transaction_hash'", ":", "resp", ",", "'success'", ":", "True", "}", "else", ":", "return", "error_reply", "(", "'Invalid response from bitcoind.'", ")" ]
Dispatch a raw transaction to the network.
[ "Dispatch", "a", "raw", "transaction", "to", "the", "network", "." ]
python
train
35.555556
aiogram/aiogram
aiogram/dispatcher/dispatcher.py
https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/dispatcher/dispatcher.py#L987-L1019
def async_task(self, func): """ Execute handler as task and return None. Use this decorator for slow handlers (with timeouts) .. code-block:: python3 @dp.message_handler(commands=['command']) @dp.async_task async def cmd_with_timeout(message: types.Message): await asyncio.sleep(120) return SendMessage(message.chat.id, 'KABOOM').reply(message) :param func: :return: """ def process_response(task): try: response = task.result() except Exception as e: self.loop.create_task( self.errors_handlers.notify(types.Update.get_current(), e)) else: if isinstance(response, BaseResponse): self.loop.create_task(response.execute_response(self.bot)) @functools.wraps(func) async def wrapper(*args, **kwargs): task = self.loop.create_task(func(*args, **kwargs)) task.add_done_callback(process_response) return wrapper
[ "def", "async_task", "(", "self", ",", "func", ")", ":", "def", "process_response", "(", "task", ")", ":", "try", ":", "response", "=", "task", ".", "result", "(", ")", "except", "Exception", "as", "e", ":", "self", ".", "loop", ".", "create_task", "(", "self", ".", "errors_handlers", ".", "notify", "(", "types", ".", "Update", ".", "get_current", "(", ")", ",", "e", ")", ")", "else", ":", "if", "isinstance", "(", "response", ",", "BaseResponse", ")", ":", "self", ".", "loop", ".", "create_task", "(", "response", ".", "execute_response", "(", "self", ".", "bot", ")", ")", "@", "functools", ".", "wraps", "(", "func", ")", "async", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "task", "=", "self", ".", "loop", ".", "create_task", "(", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", "task", ".", "add_done_callback", "(", "process_response", ")", "return", "wrapper" ]
Execute handler as task and return None. Use this decorator for slow handlers (with timeouts) .. code-block:: python3 @dp.message_handler(commands=['command']) @dp.async_task async def cmd_with_timeout(message: types.Message): await asyncio.sleep(120) return SendMessage(message.chat.id, 'KABOOM').reply(message) :param func: :return:
[ "Execute", "handler", "as", "task", "and", "return", "None", ".", "Use", "this", "decorator", "for", "slow", "handlers", "(", "with", "timeouts", ")" ]
python
train
32.818182
rcbops/rpc_differ
rpc_differ/rpc_differ.py
https://github.com/rcbops/rpc_differ/blob/07c9e645b13f9af15d58bad533753d3a9447b78a/rpc_differ/rpc_differ.py#L51-L186
def create_parser(): """Setup argument Parsing.""" description = """RPC Release Diff Generator -------------------------- Finds changes in OpenStack-Ansible, OpenStack-Ansible roles, and OpenStack projects between two RPC-OpenStack revisions. """ parser = argparse.ArgumentParser( usage='%(prog)s', description=description, epilog='Licensed "Apache 2.0"', formatter_class=argparse.RawTextHelpFormatter ) parser.add_argument( 'old_commit', action='store', nargs=1, help="Git SHA of the older commit", ) parser.add_argument( 'new_commit', action='store', nargs=1, help="Git SHA of the newer commit", ) parser.add_argument( '--debug', action='store_true', default=False, help="Enable debug output", ) parser.add_argument( '--verbose', action='store_true', default=False, help="Enable verbose output", ) parser.add_argument( '-d', '--directory', action='store', default="~/.osa-differ", help="Git repo storage directory (default: ~/.osa-differ)", ) parser.add_argument( '-rroc', '--role-requirements-old-commit', action='store', default=None, help=( "Name of the Ansible role requirements file to read from the old " "commit, defaults to value of `--role-requirements`." ), ) parser.add_argument( '-rr', '--role-requirements', action='store', default=ROLE_REQ_FILE, help="Name of the ansible role requirements file to read", ) parser.add_argument( '-r', '--rpc-repo-url', action='store', default="https://github.com/rcbops/rpc-openstack", help="Github repository for the rpc-openstack project" ) parser.add_argument( '--osa-repo-url', action='store', default="https://git.openstack.org/openstack/openstack-ansible", help="URL of the openstack-ansible git repo" ) parser.add_argument( '-rpoc', '--rpc-product-old-commit', action='store', default=None, help=( "Set the RPC product version for the old commit, defaults to " "value of `--rpc-product`." ) ) parser.add_argument( '-rp', '--rpc-product', action='store', default="master", help="Set the RPC product version" ) parser.add_argument( '-u', '--update', action='store_true', default=False, help="Fetch latest changes to repo", ) parser.add_argument( '--version-mappings', action=osa_differ.VersionMappingsAction, help=( "Map dependency versions in cases where the old version no longer " "exists. The argument should be of the form " "'repo-name;old-version1:new-version1;old-version2:new-version2'." ), ) display_opts = parser.add_argument_group("Limit scope") display_opts.add_argument( "--skip-projects", action="store_true", help="Skip checking for changes in OpenStack projects" ) display_opts.add_argument( "--skip-roles", action="store_true", help="Skip checking for changes in OpenStack-Ansible roles" ) output_desc = ("Output is printed to stdout by default.") output_opts = parser.add_argument_group('Output options', output_desc) output_opts.add_argument( '--quiet', action='store_true', default=False, help="Do not output to stdout", ) output_opts.add_argument( '--gist', action='store_true', default=False, help="Output into a GitHub Gist", ) output_opts.add_argument( '--file', metavar="FILENAME", action='store', help="Output to a file", ) return parser
[ "def", "create_parser", "(", ")", ":", "description", "=", "\"\"\"RPC Release Diff Generator\n--------------------------\n\nFinds changes in OpenStack-Ansible, OpenStack-Ansible roles, and OpenStack\nprojects between two RPC-OpenStack revisions.\n\n\"\"\"", "parser", "=", "argparse", ".", "ArgumentParser", "(", "usage", "=", "'%(prog)s'", ",", "description", "=", "description", ",", "epilog", "=", "'Licensed \"Apache 2.0\"'", ",", "formatter_class", "=", "argparse", ".", "RawTextHelpFormatter", ")", "parser", ".", "add_argument", "(", "'old_commit'", ",", "action", "=", "'store'", ",", "nargs", "=", "1", ",", "help", "=", "\"Git SHA of the older commit\"", ",", ")", "parser", ".", "add_argument", "(", "'new_commit'", ",", "action", "=", "'store'", ",", "nargs", "=", "1", ",", "help", "=", "\"Git SHA of the newer commit\"", ",", ")", "parser", ".", "add_argument", "(", "'--debug'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "\"Enable debug output\"", ",", ")", "parser", ".", "add_argument", "(", "'--verbose'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "\"Enable verbose output\"", ",", ")", "parser", ".", "add_argument", "(", "'-d'", ",", "'--directory'", ",", "action", "=", "'store'", ",", "default", "=", "\"~/.osa-differ\"", ",", "help", "=", "\"Git repo storage directory (default: ~/.osa-differ)\"", ",", ")", "parser", ".", "add_argument", "(", "'-rroc'", ",", "'--role-requirements-old-commit'", ",", "action", "=", "'store'", ",", "default", "=", "None", ",", "help", "=", "(", "\"Name of the Ansible role requirements file to read from the old \"", "\"commit, defaults to value of `--role-requirements`.\"", ")", ",", ")", "parser", ".", "add_argument", "(", "'-rr'", ",", "'--role-requirements'", ",", "action", "=", "'store'", ",", "default", "=", "ROLE_REQ_FILE", ",", "help", "=", "\"Name of the ansible role requirements file to read\"", ",", ")", "parser", ".", "add_argument", "(", "'-r'", ",", "'--rpc-repo-url'", ",", "action", "=", "'store'", ",", "default", "=", "\"https://github.com/rcbops/rpc-openstack\"", ",", "help", "=", "\"Github repository for the rpc-openstack project\"", ")", "parser", ".", "add_argument", "(", "'--osa-repo-url'", ",", "action", "=", "'store'", ",", "default", "=", "\"https://git.openstack.org/openstack/openstack-ansible\"", ",", "help", "=", "\"URL of the openstack-ansible git repo\"", ")", "parser", ".", "add_argument", "(", "'-rpoc'", ",", "'--rpc-product-old-commit'", ",", "action", "=", "'store'", ",", "default", "=", "None", ",", "help", "=", "(", "\"Set the RPC product version for the old commit, defaults to \"", "\"value of `--rpc-product`.\"", ")", ")", "parser", ".", "add_argument", "(", "'-rp'", ",", "'--rpc-product'", ",", "action", "=", "'store'", ",", "default", "=", "\"master\"", ",", "help", "=", "\"Set the RPC product version\"", ")", "parser", ".", "add_argument", "(", "'-u'", ",", "'--update'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "\"Fetch latest changes to repo\"", ",", ")", "parser", ".", "add_argument", "(", "'--version-mappings'", ",", "action", "=", "osa_differ", ".", "VersionMappingsAction", ",", "help", "=", "(", "\"Map dependency versions in cases where the old version no longer \"", "\"exists. The argument should be of the form \"", "\"'repo-name;old-version1:new-version1;old-version2:new-version2'.\"", ")", ",", ")", "display_opts", "=", "parser", ".", "add_argument_group", "(", "\"Limit scope\"", ")", "display_opts", ".", "add_argument", "(", "\"--skip-projects\"", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Skip checking for changes in OpenStack projects\"", ")", "display_opts", ".", "add_argument", "(", "\"--skip-roles\"", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Skip checking for changes in OpenStack-Ansible roles\"", ")", "output_desc", "=", "(", "\"Output is printed to stdout by default.\"", ")", "output_opts", "=", "parser", ".", "add_argument_group", "(", "'Output options'", ",", "output_desc", ")", "output_opts", ".", "add_argument", "(", "'--quiet'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "\"Do not output to stdout\"", ",", ")", "output_opts", ".", "add_argument", "(", "'--gist'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "\"Output into a GitHub Gist\"", ",", ")", "output_opts", ".", "add_argument", "(", "'--file'", ",", "metavar", "=", "\"FILENAME\"", ",", "action", "=", "'store'", ",", "help", "=", "\"Output to a file\"", ",", ")", "return", "parser" ]
Setup argument Parsing.
[ "Setup", "argument", "Parsing", "." ]
python
train
28.345588
asascience-open/paegan-transport
paegan/transport/shoreline.py
https://github.com/asascience-open/paegan-transport/blob/99a7f4ea24f0f42d9b34d1fb0e87ab2c49315bd3/paegan/transport/shoreline.py#L104-L173
def intersect(self, **kwargs): """ Intersect a Line or Point Collection and the Shoreline Returns the point of intersection along the coastline Should also return a linestring buffer around the interseciton point so we can calculate the direction to bounce a particle. """ ls = None if "linestring" in kwargs: ls = kwargs.pop('linestring') spoint = Point(ls.coords[0]) epoint = Point(ls.coords[-1]) elif "start_point" and "end_point" in kwargs: spoint = kwargs.get('start_point') epoint = kwargs.get('end_point') ls = LineString(list(spoint.coords) + list(epoint.coords)) elif "single_point" in kwargs: spoint = kwargs.get('single_point') epoint = None ls = LineString(list(spoint.coords) + list(spoint.coords)) else: raise TypeError( "must provide a LineString geometry object, (2) Point geometry objects, or (1) Point geometry object" ) inter = False # If the current point lies outside of our current shapefile index, # re-query the shapefile in a buffer around this point if self._spatial_query_object is None or (self._spatial_query_object and not ls.within(self._spatial_query_object)): self.index(point=spoint) for element in self._geoms: prepped_element = prep(element) # Test if starting on land if prepped_element.contains(spoint): if epoint is None: # If we only passed in one point, return the intersection is true. return {'point': spoint, 'feature': None} else: # If we are testing a linestring, raise an exception that we started on land. raise Exception('Starting point on land: %s %s %s' % (spoint.envelope, epoint.envelope, element.envelope)) else: # If we are just checking a single point, continue looping. if epoint is None: continue inter = ls.intersection(element) if inter: # Return the first point in the linestring, and the linestring that it hit if isinstance(inter, MultiLineString): inter = inter.geoms[0] inter = Point(inter.coords[0]) smaller_int = inter.buffer(self._spatialbuffer) shorelines = element.exterior.intersection(smaller_int) if isinstance(shorelines, LineString): shorelines = [shorelines] else: shorelines = list(shorelines) for shore_segment in shorelines: # Once we find the linestring in the Polygon that was # intersected, break out and return if ls.touches(shore_segment): break return {'point': Point(inter.x, inter.y, 0), 'feature': shore_segment or None} return None
[ "def", "intersect", "(", "self", ",", "*", "*", "kwargs", ")", ":", "ls", "=", "None", "if", "\"linestring\"", "in", "kwargs", ":", "ls", "=", "kwargs", ".", "pop", "(", "'linestring'", ")", "spoint", "=", "Point", "(", "ls", ".", "coords", "[", "0", "]", ")", "epoint", "=", "Point", "(", "ls", ".", "coords", "[", "-", "1", "]", ")", "elif", "\"start_point\"", "and", "\"end_point\"", "in", "kwargs", ":", "spoint", "=", "kwargs", ".", "get", "(", "'start_point'", ")", "epoint", "=", "kwargs", ".", "get", "(", "'end_point'", ")", "ls", "=", "LineString", "(", "list", "(", "spoint", ".", "coords", ")", "+", "list", "(", "epoint", ".", "coords", ")", ")", "elif", "\"single_point\"", "in", "kwargs", ":", "spoint", "=", "kwargs", ".", "get", "(", "'single_point'", ")", "epoint", "=", "None", "ls", "=", "LineString", "(", "list", "(", "spoint", ".", "coords", ")", "+", "list", "(", "spoint", ".", "coords", ")", ")", "else", ":", "raise", "TypeError", "(", "\"must provide a LineString geometry object, (2) Point geometry objects, or (1) Point geometry object\"", ")", "inter", "=", "False", "# If the current point lies outside of our current shapefile index,", "# re-query the shapefile in a buffer around this point", "if", "self", ".", "_spatial_query_object", "is", "None", "or", "(", "self", ".", "_spatial_query_object", "and", "not", "ls", ".", "within", "(", "self", ".", "_spatial_query_object", ")", ")", ":", "self", ".", "index", "(", "point", "=", "spoint", ")", "for", "element", "in", "self", ".", "_geoms", ":", "prepped_element", "=", "prep", "(", "element", ")", "# Test if starting on land", "if", "prepped_element", ".", "contains", "(", "spoint", ")", ":", "if", "epoint", "is", "None", ":", "# If we only passed in one point, return the intersection is true.", "return", "{", "'point'", ":", "spoint", ",", "'feature'", ":", "None", "}", "else", ":", "# If we are testing a linestring, raise an exception that we started on land.", "raise", "Exception", "(", "'Starting point on land: %s %s %s'", "%", "(", "spoint", ".", "envelope", ",", "epoint", ".", "envelope", ",", "element", ".", "envelope", ")", ")", "else", ":", "# If we are just checking a single point, continue looping.", "if", "epoint", "is", "None", ":", "continue", "inter", "=", "ls", ".", "intersection", "(", "element", ")", "if", "inter", ":", "# Return the first point in the linestring, and the linestring that it hit", "if", "isinstance", "(", "inter", ",", "MultiLineString", ")", ":", "inter", "=", "inter", ".", "geoms", "[", "0", "]", "inter", "=", "Point", "(", "inter", ".", "coords", "[", "0", "]", ")", "smaller_int", "=", "inter", ".", "buffer", "(", "self", ".", "_spatialbuffer", ")", "shorelines", "=", "element", ".", "exterior", ".", "intersection", "(", "smaller_int", ")", "if", "isinstance", "(", "shorelines", ",", "LineString", ")", ":", "shorelines", "=", "[", "shorelines", "]", "else", ":", "shorelines", "=", "list", "(", "shorelines", ")", "for", "shore_segment", "in", "shorelines", ":", "# Once we find the linestring in the Polygon that was", "# intersected, break out and return", "if", "ls", ".", "touches", "(", "shore_segment", ")", ":", "break", "return", "{", "'point'", ":", "Point", "(", "inter", ".", "x", ",", "inter", ".", "y", ",", "0", ")", ",", "'feature'", ":", "shore_segment", "or", "None", "}", "return", "None" ]
Intersect a Line or Point Collection and the Shoreline Returns the point of intersection along the coastline Should also return a linestring buffer around the interseciton point so we can calculate the direction to bounce a particle.
[ "Intersect", "a", "Line", "or", "Point", "Collection", "and", "the", "Shoreline" ]
python
train
43.842857
hydpy-dev/hydpy
hydpy/core/filetools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/filetools.py#L873-L880
def save_file(self, filename, text): """Save the given text under the given control filename and the current path.""" if not filename.endswith('.py'): filename += '.py' path = os.path.join(self.currentpath, filename) with open(path, 'w', encoding="utf-8") as file_: file_.write(text)
[ "def", "save_file", "(", "self", ",", "filename", ",", "text", ")", ":", "if", "not", "filename", ".", "endswith", "(", "'.py'", ")", ":", "filename", "+=", "'.py'", "path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "currentpath", ",", "filename", ")", "with", "open", "(", "path", ",", "'w'", ",", "encoding", "=", "\"utf-8\"", ")", "as", "file_", ":", "file_", ".", "write", "(", "text", ")" ]
Save the given text under the given control filename and the current path.
[ "Save", "the", "given", "text", "under", "the", "given", "control", "filename", "and", "the", "current", "path", "." ]
python
train
42.5
spacetelescope/stsci.tools
lib/stsci/tools/configobj.py
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/configobj.py#L1957-L1982
def _set_configspec(self, section, copy): """ Called by validate. Handles setting the configspec on subsections including sections to be validated by __many__ """ configspec = section.configspec many = configspec.get('__many__') if isinstance(many, dict): for entry in section.sections: if entry not in configspec: section[entry].configspec = many for entry in configspec.sections: if entry == '__many__': continue if entry not in section: section[entry] = {} section[entry]._created = True if copy: # copy comments section.comments[entry] = configspec.comments.get(entry, []) section.inline_comments[entry] = configspec.inline_comments.get(entry, '') # Could be a scalar when we expect a section if isinstance(section[entry], Section): section[entry].configspec = configspec[entry]
[ "def", "_set_configspec", "(", "self", ",", "section", ",", "copy", ")", ":", "configspec", "=", "section", ".", "configspec", "many", "=", "configspec", ".", "get", "(", "'__many__'", ")", "if", "isinstance", "(", "many", ",", "dict", ")", ":", "for", "entry", "in", "section", ".", "sections", ":", "if", "entry", "not", "in", "configspec", ":", "section", "[", "entry", "]", ".", "configspec", "=", "many", "for", "entry", "in", "configspec", ".", "sections", ":", "if", "entry", "==", "'__many__'", ":", "continue", "if", "entry", "not", "in", "section", ":", "section", "[", "entry", "]", "=", "{", "}", "section", "[", "entry", "]", ".", "_created", "=", "True", "if", "copy", ":", "# copy comments", "section", ".", "comments", "[", "entry", "]", "=", "configspec", ".", "comments", ".", "get", "(", "entry", ",", "[", "]", ")", "section", ".", "inline_comments", "[", "entry", "]", "=", "configspec", ".", "inline_comments", ".", "get", "(", "entry", ",", "''", ")", "# Could be a scalar when we expect a section", "if", "isinstance", "(", "section", "[", "entry", "]", ",", "Section", ")", ":", "section", "[", "entry", "]", ".", "configspec", "=", "configspec", "[", "entry", "]" ]
Called by validate. Handles setting the configspec on subsections including sections to be validated by __many__
[ "Called", "by", "validate", ".", "Handles", "setting", "the", "configspec", "on", "subsections", "including", "sections", "to", "be", "validated", "by", "__many__" ]
python
train
40.730769
pyroscope/pyrocore
src/pyrocore/util/metafile.py
https://github.com/pyroscope/pyrocore/blob/89ad01346a570943d20311a0b488440975876612/src/pyrocore/util/metafile.py#L611-L671
def create(self, datapath, tracker_urls, comment=None, root_name=None, created_by=None, private=False, no_date=False, progress=None, callback=None): """ Create a metafile with the path given on object creation. Returns the last metafile dict that was written (as an object, not bencoded). """ if datapath: self.datapath = datapath try: tracker_urls = ['' + tracker_urls] except TypeError: tracker_urls = list(tracker_urls) multi_mode = len(tracker_urls) > 1 # TODO add optimization so the hashing happens only once for multiple URLs! for tracker_url in tracker_urls: # Lookup announce URLs from config file try: if urlparse.urlparse(tracker_url).scheme: tracker_alias = urlparse.urlparse(tracker_url).netloc.split(':')[0].split('.') tracker_alias = tracker_alias[-2 if len(tracker_alias) > 1 else 0] else: tracker_alias, tracker_url = config.lookup_announce_alias(tracker_url) tracker_url = tracker_url[0] except (KeyError, IndexError): raise error.UserError("Bad tracker URL %r, or unknown alias!" % (tracker_url,)) # Determine metafile name output_name = self.filename if multi_mode: # Add 2nd level of announce URL domain to metafile name output_name = list(os.path.splitext(output_name)) try: output_name[1:1] = '-' + tracker_alias except (IndexError,): self.LOG.error("Malformed announce URL %r, skipping!" % (tracker_url,)) continue output_name = ''.join(output_name) # Hash the data self.LOG.info("Creating %r for %s %r..." % ( output_name, "filenames read from" if self._fifo else "data in", self.datapath, )) meta, _ = self._make_meta(tracker_url, root_name, private, progress) # Add optional fields if comment: meta["comment"] = comment if created_by: meta["created by"] = created_by if not no_date: meta["creation date"] = int(time.time()) if callback: callback(meta) # Write metafile to disk self.LOG.debug("Writing %r..." % (output_name,)) bencode.bwrite(output_name, meta) return meta
[ "def", "create", "(", "self", ",", "datapath", ",", "tracker_urls", ",", "comment", "=", "None", ",", "root_name", "=", "None", ",", "created_by", "=", "None", ",", "private", "=", "False", ",", "no_date", "=", "False", ",", "progress", "=", "None", ",", "callback", "=", "None", ")", ":", "if", "datapath", ":", "self", ".", "datapath", "=", "datapath", "try", ":", "tracker_urls", "=", "[", "''", "+", "tracker_urls", "]", "except", "TypeError", ":", "tracker_urls", "=", "list", "(", "tracker_urls", ")", "multi_mode", "=", "len", "(", "tracker_urls", ")", ">", "1", "# TODO add optimization so the hashing happens only once for multiple URLs!", "for", "tracker_url", "in", "tracker_urls", ":", "# Lookup announce URLs from config file", "try", ":", "if", "urlparse", ".", "urlparse", "(", "tracker_url", ")", ".", "scheme", ":", "tracker_alias", "=", "urlparse", ".", "urlparse", "(", "tracker_url", ")", ".", "netloc", ".", "split", "(", "':'", ")", "[", "0", "]", ".", "split", "(", "'.'", ")", "tracker_alias", "=", "tracker_alias", "[", "-", "2", "if", "len", "(", "tracker_alias", ")", ">", "1", "else", "0", "]", "else", ":", "tracker_alias", ",", "tracker_url", "=", "config", ".", "lookup_announce_alias", "(", "tracker_url", ")", "tracker_url", "=", "tracker_url", "[", "0", "]", "except", "(", "KeyError", ",", "IndexError", ")", ":", "raise", "error", ".", "UserError", "(", "\"Bad tracker URL %r, or unknown alias!\"", "%", "(", "tracker_url", ",", ")", ")", "# Determine metafile name", "output_name", "=", "self", ".", "filename", "if", "multi_mode", ":", "# Add 2nd level of announce URL domain to metafile name", "output_name", "=", "list", "(", "os", ".", "path", ".", "splitext", "(", "output_name", ")", ")", "try", ":", "output_name", "[", "1", ":", "1", "]", "=", "'-'", "+", "tracker_alias", "except", "(", "IndexError", ",", ")", ":", "self", ".", "LOG", ".", "error", "(", "\"Malformed announce URL %r, skipping!\"", "%", "(", "tracker_url", ",", ")", ")", "continue", "output_name", "=", "''", ".", "join", "(", "output_name", ")", "# Hash the data", "self", ".", "LOG", ".", "info", "(", "\"Creating %r for %s %r...\"", "%", "(", "output_name", ",", "\"filenames read from\"", "if", "self", ".", "_fifo", "else", "\"data in\"", ",", "self", ".", "datapath", ",", ")", ")", "meta", ",", "_", "=", "self", ".", "_make_meta", "(", "tracker_url", ",", "root_name", ",", "private", ",", "progress", ")", "# Add optional fields", "if", "comment", ":", "meta", "[", "\"comment\"", "]", "=", "comment", "if", "created_by", ":", "meta", "[", "\"created by\"", "]", "=", "created_by", "if", "not", "no_date", ":", "meta", "[", "\"creation date\"", "]", "=", "int", "(", "time", ".", "time", "(", ")", ")", "if", "callback", ":", "callback", "(", "meta", ")", "# Write metafile to disk", "self", ".", "LOG", ".", "debug", "(", "\"Writing %r...\"", "%", "(", "output_name", ",", ")", ")", "bencode", ".", "bwrite", "(", "output_name", ",", "meta", ")", "return", "meta" ]
Create a metafile with the path given on object creation. Returns the last metafile dict that was written (as an object, not bencoded).
[ "Create", "a", "metafile", "with", "the", "path", "given", "on", "object", "creation", ".", "Returns", "the", "last", "metafile", "dict", "that", "was", "written", "(", "as", "an", "object", "not", "bencoded", ")", "." ]
python
train
42.147541
Kortemme-Lab/klab
klab/cloning/cloning.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/cloning/cloning.py#L48-L85
def make_codon_list(protein_seq, template_dna=None, include_stop=True): """ Return a list of codons that would be translated to the given protein sequence. Codons are picked first to minimize the mutations relative to a template DNA sequence and second to prefer "optimal" codons. """ codon_list = [] if template_dna is None: template_dna = [] # Reverse translate each codon, preferring (in order): # 1. The codon with the most similarity to the template codon. # 2. The codon with the highest natural usage. for i, res in enumerate(protein_seq.upper()): try: template_codon = template_dna[3*i:3*i+3] except IndexError: template_codon = '---' # Already sorted by natural codon usage possible_codons = dna.ecoli_reverse_translate[res] # Sort by similarity. Note that this is a stable sort. possible_codons.sort( key=lambda x: dna.num_mutations(x, template_codon)) # Pick the best codon. codon_list.append(possible_codons[0]) # Make sure the sequence ends with a stop codon. last_codon = codon_list[-1] stop_codons = dna.ecoli_reverse_translate['.'] if include_stop and last_codon not in stop_codons: codon_list.append(stop_codons[0]) return codon_list
[ "def", "make_codon_list", "(", "protein_seq", ",", "template_dna", "=", "None", ",", "include_stop", "=", "True", ")", ":", "codon_list", "=", "[", "]", "if", "template_dna", "is", "None", ":", "template_dna", "=", "[", "]", "# Reverse translate each codon, preferring (in order):", "# 1. The codon with the most similarity to the template codon.", "# 2. The codon with the highest natural usage.", "for", "i", ",", "res", "in", "enumerate", "(", "protein_seq", ".", "upper", "(", ")", ")", ":", "try", ":", "template_codon", "=", "template_dna", "[", "3", "*", "i", ":", "3", "*", "i", "+", "3", "]", "except", "IndexError", ":", "template_codon", "=", "'---'", "# Already sorted by natural codon usage", "possible_codons", "=", "dna", ".", "ecoli_reverse_translate", "[", "res", "]", "# Sort by similarity. Note that this is a stable sort.", "possible_codons", ".", "sort", "(", "key", "=", "lambda", "x", ":", "dna", ".", "num_mutations", "(", "x", ",", "template_codon", ")", ")", "# Pick the best codon.", "codon_list", ".", "append", "(", "possible_codons", "[", "0", "]", ")", "# Make sure the sequence ends with a stop codon.", "last_codon", "=", "codon_list", "[", "-", "1", "]", "stop_codons", "=", "dna", ".", "ecoli_reverse_translate", "[", "'.'", "]", "if", "include_stop", "and", "last_codon", "not", "in", "stop_codons", ":", "codon_list", ".", "append", "(", "stop_codons", "[", "0", "]", ")", "return", "codon_list" ]
Return a list of codons that would be translated to the given protein sequence. Codons are picked first to minimize the mutations relative to a template DNA sequence and second to prefer "optimal" codons.
[ "Return", "a", "list", "of", "codons", "that", "would", "be", "translated", "to", "the", "given", "protein", "sequence", ".", "Codons", "are", "picked", "first", "to", "minimize", "the", "mutations", "relative", "to", "a", "template", "DNA", "sequence", "and", "second", "to", "prefer", "optimal", "codons", "." ]
python
train
33.868421