repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
iotile/coretools
iotilecore/iotile/core/hw/virtual/base_runnable.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/virtual/base_runnable.py#L16-L45
def create_worker(self, func, interval, *args, **kwargs): """Spawn a worker thread running func. The worker will be automatically be started when start() is called and terminated when stop() is called on this object. This must be called only from the main thread, not from a worker thread. create_worker must not be called after stop() has been called. If it is called before start() is called, the thread is started when start() is called, otherwise it is started immediately. Args: func (callable): Either a function that will be called in a loop with a sleep of interval seconds with *args and **kwargs or a generator function that will be called once and expected to yield periodically so that the worker can check if it should be killed. interval (float): The time interval between invocations of func. This should not be 0 so that the thread doesn't peg the CPU and should be short enough so that the worker checks if it should be killed in a timely fashion. *args: Arguments that are passed to func as positional args **kwargs: Arguments that are passed to func as keyword args """ thread = StoppableWorkerThread(func, interval, args, kwargs) self._workers.append(thread) if self._started: thread.start()
[ "def", "create_worker", "(", "self", ",", "func", ",", "interval", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "thread", "=", "StoppableWorkerThread", "(", "func", ",", "interval", ",", "args", ",", "kwargs", ")", "self", ".", "_workers", ".", "append", "(", "thread", ")", "if", "self", ".", "_started", ":", "thread", ".", "start", "(", ")" ]
Spawn a worker thread running func. The worker will be automatically be started when start() is called and terminated when stop() is called on this object. This must be called only from the main thread, not from a worker thread. create_worker must not be called after stop() has been called. If it is called before start() is called, the thread is started when start() is called, otherwise it is started immediately. Args: func (callable): Either a function that will be called in a loop with a sleep of interval seconds with *args and **kwargs or a generator function that will be called once and expected to yield periodically so that the worker can check if it should be killed. interval (float): The time interval between invocations of func. This should not be 0 so that the thread doesn't peg the CPU and should be short enough so that the worker checks if it should be killed in a timely fashion. *args: Arguments that are passed to func as positional args **kwargs: Arguments that are passed to func as keyword args
[ "Spawn", "a", "worker", "thread", "running", "func", "." ]
python
train
spyder-ide/spyder
spyder/preferences/languageserver.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/preferences/languageserver.py#L512-L516
def selection(self, index): """Update selected row.""" self.update() self.isActiveWindow() self._parent.delete_btn.setEnabled(True)
[ "def", "selection", "(", "self", ",", "index", ")", ":", "self", ".", "update", "(", ")", "self", ".", "isActiveWindow", "(", ")", "self", ".", "_parent", ".", "delete_btn", ".", "setEnabled", "(", "True", ")" ]
Update selected row.
[ "Update", "selected", "row", "." ]
python
train
linkedin/Zopkio
zopkio/deployer.py
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/deployer.py#L164-L172
def sleep(self, unique_id, delay, configs=None): """ Pauses the process for the specified delay and then resumes it :Parameter unique_id: the name of the process :Parameter delay: delay time in seconds """ self.pause(unique_id, configs) time.sleep(delay) self.resume(unique_id, configs)
[ "def", "sleep", "(", "self", ",", "unique_id", ",", "delay", ",", "configs", "=", "None", ")", ":", "self", ".", "pause", "(", "unique_id", ",", "configs", ")", "time", ".", "sleep", "(", "delay", ")", "self", ".", "resume", "(", "unique_id", ",", "configs", ")" ]
Pauses the process for the specified delay and then resumes it :Parameter unique_id: the name of the process :Parameter delay: delay time in seconds
[ "Pauses", "the", "process", "for", "the", "specified", "delay", "and", "then", "resumes", "it" ]
python
train
ubccr/pinky
pinky/perception/figueras.py
https://github.com/ubccr/pinky/blob/e9d6e8ff72aa7f670b591e3bd3629cb879db1a93/pinky/perception/figueras.py#L300-L365
def checkEdges(ringSet, lookup, oatoms): """atoms, lookup -> ring atoms must be in the order of traversal around a ring! break an optimal non N2 node and return the largest ring found """ bondedAtoms = map( None, ringSet[:-1], ringSet[1:] ) bondedAtoms += [ (ringSet[-1], ringSet[0]) ] # form a lookup for the ringSet list atomSet = {} for atomID in ringSet: atomSet[atomID] = 1 results = [] # for each bond in the ring, break it and find the smallest # rings starting on either side of the bond # keep the largest but rememeber to add the bond back at the # end for atom1, atom2 in bondedAtoms: # break a single edge in the ring handle1 = atom1.handle handle2 = atom2.handle oatoms1 = oatoms[handle1] oatoms2 = oatoms[handle2] index1 = oatoms1.index(atom2) index2 = oatoms2.index(atom1) # break the bond del oatoms1[index1] del oatoms2[index2] ring1 = getRing(atom1, atomSet, lookup, oatoms) ring2 = getRing(atom2, atomSet, lookup, oatoms) # keep the larger of the two rings if len(ring1) > len(ring2): results.append((len(ring1), handle1, handle2, ring1)) else: results.append((len(ring2), handle2, handle1, ring2)) # retie the bond oatoms1.insert(index1, atom2) oatoms2.insert(index2, atom1) if not results: return None # find the smallest ring size, incidentHandle, adjacentHandle, smallestRing = min(results) # dereference the handles incident, adjacent = lookup[incidentHandle], lookup[adjacentHandle] # break the bond between the incident and adjacent atoms oatomsI = oatoms[incidentHandle] oatomsA = oatoms[adjacentHandle] assert incident in oatomsA assert adjacent in oatomsI oatomsI.remove(adjacent) oatomsA.remove(incident)
[ "def", "checkEdges", "(", "ringSet", ",", "lookup", ",", "oatoms", ")", ":", "bondedAtoms", "=", "map", "(", "None", ",", "ringSet", "[", ":", "-", "1", "]", ",", "ringSet", "[", "1", ":", "]", ")", "bondedAtoms", "+=", "[", "(", "ringSet", "[", "-", "1", "]", ",", "ringSet", "[", "0", "]", ")", "]", "# form a lookup for the ringSet list", "atomSet", "=", "{", "}", "for", "atomID", "in", "ringSet", ":", "atomSet", "[", "atomID", "]", "=", "1", "results", "=", "[", "]", "# for each bond in the ring, break it and find the smallest", "# rings starting on either side of the bond", "# keep the largest but rememeber to add the bond back at the", "# end", "for", "atom1", ",", "atom2", "in", "bondedAtoms", ":", "# break a single edge in the ring", "handle1", "=", "atom1", ".", "handle", "handle2", "=", "atom2", ".", "handle", "oatoms1", "=", "oatoms", "[", "handle1", "]", "oatoms2", "=", "oatoms", "[", "handle2", "]", "index1", "=", "oatoms1", ".", "index", "(", "atom2", ")", "index2", "=", "oatoms2", ".", "index", "(", "atom1", ")", "# break the bond", "del", "oatoms1", "[", "index1", "]", "del", "oatoms2", "[", "index2", "]", "ring1", "=", "getRing", "(", "atom1", ",", "atomSet", ",", "lookup", ",", "oatoms", ")", "ring2", "=", "getRing", "(", "atom2", ",", "atomSet", ",", "lookup", ",", "oatoms", ")", "# keep the larger of the two rings", "if", "len", "(", "ring1", ")", ">", "len", "(", "ring2", ")", ":", "results", ".", "append", "(", "(", "len", "(", "ring1", ")", ",", "handle1", ",", "handle2", ",", "ring1", ")", ")", "else", ":", "results", ".", "append", "(", "(", "len", "(", "ring2", ")", ",", "handle2", ",", "handle1", ",", "ring2", ")", ")", "# retie the bond", "oatoms1", ".", "insert", "(", "index1", ",", "atom2", ")", "oatoms2", ".", "insert", "(", "index2", ",", "atom1", ")", "if", "not", "results", ":", "return", "None", "# find the smallest ring", "size", ",", "incidentHandle", ",", "adjacentHandle", ",", "smallestRing", "=", "min", "(", "results", ")", "# dereference the handles", "incident", ",", "adjacent", "=", "lookup", "[", "incidentHandle", "]", ",", "lookup", "[", "adjacentHandle", "]", "# break the bond between the incident and adjacent atoms", "oatomsI", "=", "oatoms", "[", "incidentHandle", "]", "oatomsA", "=", "oatoms", "[", "adjacentHandle", "]", "assert", "incident", "in", "oatomsA", "assert", "adjacent", "in", "oatomsI", "oatomsI", ".", "remove", "(", "adjacent", ")", "oatomsA", ".", "remove", "(", "incident", ")" ]
atoms, lookup -> ring atoms must be in the order of traversal around a ring! break an optimal non N2 node and return the largest ring found
[ "atoms", "lookup", "-", ">", "ring", "atoms", "must", "be", "in", "the", "order", "of", "traversal", "around", "a", "ring!", "break", "an", "optimal", "non", "N2", "node", "and", "return", "the", "largest", "ring", "found" ]
python
train
gbiggs/rtctree
rtctree/component.py
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/component.py#L678-L697
def reset_in_ec(self, ec_index): '''Reset this component in an execution context. @param ec_index The index of the execution context to reset in. This index is into the total array of contexts, that is both owned and participating contexts. If the value of ec_index is greater than the length of @ref owned_ecs, that length is subtracted from ec_index and the result used as an index into @ref participating_ecs. ''' with self._mutex: if ec_index >= len(self.owned_ecs): ec_index -= len(self.owned_ecs) if ec_index >= len(self.participating_ecs): raise exceptions.BadECIndexError(ec_index) ec = self.participating_ecs[ec_index] else: ec = self.owned_ecs[ec_index] ec.reset_component(self._obj)
[ "def", "reset_in_ec", "(", "self", ",", "ec_index", ")", ":", "with", "self", ".", "_mutex", ":", "if", "ec_index", ">=", "len", "(", "self", ".", "owned_ecs", ")", ":", "ec_index", "-=", "len", "(", "self", ".", "owned_ecs", ")", "if", "ec_index", ">=", "len", "(", "self", ".", "participating_ecs", ")", ":", "raise", "exceptions", ".", "BadECIndexError", "(", "ec_index", ")", "ec", "=", "self", ".", "participating_ecs", "[", "ec_index", "]", "else", ":", "ec", "=", "self", ".", "owned_ecs", "[", "ec_index", "]", "ec", ".", "reset_component", "(", "self", ".", "_obj", ")" ]
Reset this component in an execution context. @param ec_index The index of the execution context to reset in. This index is into the total array of contexts, that is both owned and participating contexts. If the value of ec_index is greater than the length of @ref owned_ecs, that length is subtracted from ec_index and the result used as an index into @ref participating_ecs.
[ "Reset", "this", "component", "in", "an", "execution", "context", "." ]
python
train
cltk/cltk
cltk/prosody/latin/scanner.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/scanner.py#L82-L100
def _qu_fix(self, sents_syllables): """ Ensure that 'qu' is not treated as its own syllable. :param sents_syllables: Sentence of words of syllables. :return: syllabified syllables with 'qu' counted as a single consonant :rtype : list """ for sentence in sents_syllables: for word in sentence: for syllable in word: if 'qu' in syllable: qu_syll_index = word.index(syllable) next_syll = qu_syll_index + 1 fixed_syllable = [''.join(word[qu_syll_index: (next_syll + 1)])] word[qu_syll_index:(next_syll + 1)] = fixed_syllable return sents_syllables
[ "def", "_qu_fix", "(", "self", ",", "sents_syllables", ")", ":", "for", "sentence", "in", "sents_syllables", ":", "for", "word", "in", "sentence", ":", "for", "syllable", "in", "word", ":", "if", "'qu'", "in", "syllable", ":", "qu_syll_index", "=", "word", ".", "index", "(", "syllable", ")", "next_syll", "=", "qu_syll_index", "+", "1", "fixed_syllable", "=", "[", "''", ".", "join", "(", "word", "[", "qu_syll_index", ":", "(", "next_syll", "+", "1", ")", "]", ")", "]", "word", "[", "qu_syll_index", ":", "(", "next_syll", "+", "1", ")", "]", "=", "fixed_syllable", "return", "sents_syllables" ]
Ensure that 'qu' is not treated as its own syllable. :param sents_syllables: Sentence of words of syllables. :return: syllabified syllables with 'qu' counted as a single consonant :rtype : list
[ "Ensure", "that", "qu", "is", "not", "treated", "as", "its", "own", "syllable", ".", ":", "param", "sents_syllables", ":", "Sentence", "of", "words", "of", "syllables", ".", ":", "return", ":", "syllabified", "syllables", "with", "qu", "counted", "as", "a", "single", "consonant", ":", "rtype", ":", "list" ]
python
train
dpgaspar/Flask-AppBuilder
flask_appbuilder/base.py
https://github.com/dpgaspar/Flask-AppBuilder/blob/c293734c1b86e176a3ba57ee2deab6676d125576/flask_appbuilder/base.py#L295-L312
def _add_addon_views(self): """ Registers declared addon's """ for addon in self._addon_managers: addon_class = dynamic_class_import(addon) if addon_class: # Instantiate manager with appbuilder (self) addon_class = addon_class(self) try: addon_class.pre_process() addon_class.register_views() addon_class.post_process() self.addon_managers[addon] = addon_class log.info(LOGMSG_INF_FAB_ADDON_ADDED.format(str(addon))) except Exception as e: log.exception(e) log.error(LOGMSG_ERR_FAB_ADDON_PROCESS.format(addon, e))
[ "def", "_add_addon_views", "(", "self", ")", ":", "for", "addon", "in", "self", ".", "_addon_managers", ":", "addon_class", "=", "dynamic_class_import", "(", "addon", ")", "if", "addon_class", ":", "# Instantiate manager with appbuilder (self)", "addon_class", "=", "addon_class", "(", "self", ")", "try", ":", "addon_class", ".", "pre_process", "(", ")", "addon_class", ".", "register_views", "(", ")", "addon_class", ".", "post_process", "(", ")", "self", ".", "addon_managers", "[", "addon", "]", "=", "addon_class", "log", ".", "info", "(", "LOGMSG_INF_FAB_ADDON_ADDED", ".", "format", "(", "str", "(", "addon", ")", ")", ")", "except", "Exception", "as", "e", ":", "log", ".", "exception", "(", "e", ")", "log", ".", "error", "(", "LOGMSG_ERR_FAB_ADDON_PROCESS", ".", "format", "(", "addon", ",", "e", ")", ")" ]
Registers declared addon's
[ "Registers", "declared", "addon", "s" ]
python
train
fake-name/ChromeController
ChromeController/Generator/Generated.py
https://github.com/fake-name/ChromeController/blob/914dd136184e8f1165c7aa6ef30418aaf10c61f0/ChromeController/Generator/Generated.py#L3495-L3524
def DOM_getSearchResults(self, searchId, fromIndex, toIndex): """ Function path: DOM.getSearchResults Domain: DOM Method name: getSearchResults WARNING: This function is marked 'Experimental'! Parameters: Required arguments: 'searchId' (type: string) -> Unique search session identifier. 'fromIndex' (type: integer) -> Start index of the search result to be returned. 'toIndex' (type: integer) -> End index of the search result to be returned. Returns: 'nodeIds' (type: array) -> Ids of the search result nodes. Description: Returns search results from given <code>fromIndex</code> to given <code>toIndex</code> from the sarch with the given identifier. """ assert isinstance(searchId, (str,) ), "Argument 'searchId' must be of type '['str']'. Received type: '%s'" % type( searchId) assert isinstance(fromIndex, (int,) ), "Argument 'fromIndex' must be of type '['int']'. Received type: '%s'" % type( fromIndex) assert isinstance(toIndex, (int,) ), "Argument 'toIndex' must be of type '['int']'. Received type: '%s'" % type( toIndex) subdom_funcs = self.synchronous_command('DOM.getSearchResults', searchId= searchId, fromIndex=fromIndex, toIndex=toIndex) return subdom_funcs
[ "def", "DOM_getSearchResults", "(", "self", ",", "searchId", ",", "fromIndex", ",", "toIndex", ")", ":", "assert", "isinstance", "(", "searchId", ",", "(", "str", ",", ")", ")", ",", "\"Argument 'searchId' must be of type '['str']'. Received type: '%s'\"", "%", "type", "(", "searchId", ")", "assert", "isinstance", "(", "fromIndex", ",", "(", "int", ",", ")", ")", ",", "\"Argument 'fromIndex' must be of type '['int']'. Received type: '%s'\"", "%", "type", "(", "fromIndex", ")", "assert", "isinstance", "(", "toIndex", ",", "(", "int", ",", ")", ")", ",", "\"Argument 'toIndex' must be of type '['int']'. Received type: '%s'\"", "%", "type", "(", "toIndex", ")", "subdom_funcs", "=", "self", ".", "synchronous_command", "(", "'DOM.getSearchResults'", ",", "searchId", "=", "searchId", ",", "fromIndex", "=", "fromIndex", ",", "toIndex", "=", "toIndex", ")", "return", "subdom_funcs" ]
Function path: DOM.getSearchResults Domain: DOM Method name: getSearchResults WARNING: This function is marked 'Experimental'! Parameters: Required arguments: 'searchId' (type: string) -> Unique search session identifier. 'fromIndex' (type: integer) -> Start index of the search result to be returned. 'toIndex' (type: integer) -> End index of the search result to be returned. Returns: 'nodeIds' (type: array) -> Ids of the search result nodes. Description: Returns search results from given <code>fromIndex</code> to given <code>toIndex</code> from the sarch with the given identifier.
[ "Function", "path", ":", "DOM", ".", "getSearchResults", "Domain", ":", "DOM", "Method", "name", ":", "getSearchResults", "WARNING", ":", "This", "function", "is", "marked", "Experimental", "!", "Parameters", ":", "Required", "arguments", ":", "searchId", "(", "type", ":", "string", ")", "-", ">", "Unique", "search", "session", "identifier", ".", "fromIndex", "(", "type", ":", "integer", ")", "-", ">", "Start", "index", "of", "the", "search", "result", "to", "be", "returned", ".", "toIndex", "(", "type", ":", "integer", ")", "-", ">", "End", "index", "of", "the", "search", "result", "to", "be", "returned", ".", "Returns", ":", "nodeIds", "(", "type", ":", "array", ")", "-", ">", "Ids", "of", "the", "search", "result", "nodes", ".", "Description", ":", "Returns", "search", "results", "from", "given", "<code", ">", "fromIndex<", "/", "code", ">", "to", "given", "<code", ">", "toIndex<", "/", "code", ">", "from", "the", "sarch", "with", "the", "given", "identifier", "." ]
python
train
lra/mackup
mackup/config.py
https://github.com/lra/mackup/blob/ed0b5626b033f232868900bfd5108df448873725/mackup/config.py#L196-L221
def _parse_path(self): """ Parse the storage path in the config. Returns: str """ if self.engine == ENGINE_DROPBOX: path = get_dropbox_folder_location() elif self.engine == ENGINE_GDRIVE: path = get_google_drive_folder_location() elif self.engine == ENGINE_COPY: path = get_copy_folder_location() elif self.engine == ENGINE_ICLOUD: path = get_icloud_folder_location() elif self.engine == ENGINE_BOX: path = get_box_folder_location() elif self.engine == ENGINE_FS: if self._parser.has_option('storage', 'path'): cfg_path = self._parser.get('storage', 'path') path = os.path.join(os.environ['HOME'], cfg_path) else: raise ConfigError("The required 'path' can't be found while" " the 'file_system' engine is used.") return str(path)
[ "def", "_parse_path", "(", "self", ")", ":", "if", "self", ".", "engine", "==", "ENGINE_DROPBOX", ":", "path", "=", "get_dropbox_folder_location", "(", ")", "elif", "self", ".", "engine", "==", "ENGINE_GDRIVE", ":", "path", "=", "get_google_drive_folder_location", "(", ")", "elif", "self", ".", "engine", "==", "ENGINE_COPY", ":", "path", "=", "get_copy_folder_location", "(", ")", "elif", "self", ".", "engine", "==", "ENGINE_ICLOUD", ":", "path", "=", "get_icloud_folder_location", "(", ")", "elif", "self", ".", "engine", "==", "ENGINE_BOX", ":", "path", "=", "get_box_folder_location", "(", ")", "elif", "self", ".", "engine", "==", "ENGINE_FS", ":", "if", "self", ".", "_parser", ".", "has_option", "(", "'storage'", ",", "'path'", ")", ":", "cfg_path", "=", "self", ".", "_parser", ".", "get", "(", "'storage'", ",", "'path'", ")", "path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "'HOME'", "]", ",", "cfg_path", ")", "else", ":", "raise", "ConfigError", "(", "\"The required 'path' can't be found while\"", "\" the 'file_system' engine is used.\"", ")", "return", "str", "(", "path", ")" ]
Parse the storage path in the config. Returns: str
[ "Parse", "the", "storage", "path", "in", "the", "config", "." ]
python
train
aganezov/bg
bg/breakpoint_graph.py
https://github.com/aganezov/bg/blob/1ec758193441e49e7b34e0da09571480f4c24455/bg/breakpoint_graph.py#L453-L503
def __split_bgedge(self, bgedge, guidance=None, sorted_guidance=False, account_for_colors_multiplicity_in_guidance=True, key=None): """ Splits a :class:`bg.edge.BGEdge` in current :class:`BreakpointGraph` most similar to supplied one (if no unique identifier ``key`` is provided) with respect to supplied guidance. If no unique identifier for edge to be changed is specified, edge to be split is determined by iterating over all edges between vertices in supplied :class:`bg.edge.BGEdge` instance and the edge with most similarity score to supplied one is chosen. Once the edge to be split is determined, split if performed form a perspective of :class:`bg.multicolor.Multicolor` split. The originally detected edge is deleted, and new edges containing information about multi-colors after splitting, are added to the current :class:`BreakpointGraph`. :param bgedge: an edge to find most "similar to" among existing edges for a split :type bgedge: :class:`bg.edge.BGEdge` :param guidance: a guidance for underlying :class:`bg.multicolor.Multicolor` object to be split :type guidance: iterable where each entry is iterable with colors entries :param duplication_splitting: flag (**not** currently implemented) for a splitting of color-based splitting to take into account multiplicity of respective colors :type duplication_splitting: ``Boolean`` :param key: unique identifier of edge to be split :type key: any python object. ``int`` is expected :return: ``None``, performs inplace changes """ candidate_id = None candidate_score = 0 candidate_data = None if key is not None: new_multicolors = Multicolor.split_colors( multicolor=self.bg[bgedge.vertex1][bgedge.vertex2][key]["attr_dict"]["multicolor"], guidance=guidance, sorted_guidance=sorted_guidance, account_for_color_multiplicity_in_guidance=account_for_colors_multiplicity_in_guidance) self.__delete_bgedge(bgedge=BGEdge(vertex1=bgedge.vertex1, vertex2=bgedge.vertex2, multicolor=self.bg[bgedge.vertex1][bgedge.vertex2][key]["attr_dict"]["multicolor"]), key=key) for multicolor in new_multicolors: self.__add_bgedge(BGEdge(vertex1=bgedge.vertex1, vertex2=bgedge.vertex2, multicolor=multicolor), merge=False) else: for v1, v2, key, data in self.bg.edges(nbunch=bgedge.vertex1, data=True, keys=True): if v2 == bgedge.vertex2: score = Multicolor.similarity_score(bgedge.multicolor, data["attr_dict"]["multicolor"]) if score > candidate_score: candidate_id = key candidate_data = data candidate_score = score if candidate_data is not None: new_multicolors = Multicolor.split_colors(multicolor=candidate_data["attr_dict"]["multicolor"], guidance=guidance, sorted_guidance=sorted_guidance, account_for_color_multiplicity_in_guidance=account_for_colors_multiplicity_in_guidance) self.__delete_bgedge(bgedge=BGEdge(vertex1=bgedge.vertex1, vertex2=bgedge.vertex2, multicolor=candidate_data["attr_dict"]["multicolor"]), key=candidate_id) for multicolor in new_multicolors: self.__add_bgedge(BGEdge(vertex1=bgedge.vertex1, vertex2=bgedge.vertex2, multicolor=multicolor), merge=False)
[ "def", "__split_bgedge", "(", "self", ",", "bgedge", ",", "guidance", "=", "None", ",", "sorted_guidance", "=", "False", ",", "account_for_colors_multiplicity_in_guidance", "=", "True", ",", "key", "=", "None", ")", ":", "candidate_id", "=", "None", "candidate_score", "=", "0", "candidate_data", "=", "None", "if", "key", "is", "not", "None", ":", "new_multicolors", "=", "Multicolor", ".", "split_colors", "(", "multicolor", "=", "self", ".", "bg", "[", "bgedge", ".", "vertex1", "]", "[", "bgedge", ".", "vertex2", "]", "[", "key", "]", "[", "\"attr_dict\"", "]", "[", "\"multicolor\"", "]", ",", "guidance", "=", "guidance", ",", "sorted_guidance", "=", "sorted_guidance", ",", "account_for_color_multiplicity_in_guidance", "=", "account_for_colors_multiplicity_in_guidance", ")", "self", ".", "__delete_bgedge", "(", "bgedge", "=", "BGEdge", "(", "vertex1", "=", "bgedge", ".", "vertex1", ",", "vertex2", "=", "bgedge", ".", "vertex2", ",", "multicolor", "=", "self", ".", "bg", "[", "bgedge", ".", "vertex1", "]", "[", "bgedge", ".", "vertex2", "]", "[", "key", "]", "[", "\"attr_dict\"", "]", "[", "\"multicolor\"", "]", ")", ",", "key", "=", "key", ")", "for", "multicolor", "in", "new_multicolors", ":", "self", ".", "__add_bgedge", "(", "BGEdge", "(", "vertex1", "=", "bgedge", ".", "vertex1", ",", "vertex2", "=", "bgedge", ".", "vertex2", ",", "multicolor", "=", "multicolor", ")", ",", "merge", "=", "False", ")", "else", ":", "for", "v1", ",", "v2", ",", "key", ",", "data", "in", "self", ".", "bg", ".", "edges", "(", "nbunch", "=", "bgedge", ".", "vertex1", ",", "data", "=", "True", ",", "keys", "=", "True", ")", ":", "if", "v2", "==", "bgedge", ".", "vertex2", ":", "score", "=", "Multicolor", ".", "similarity_score", "(", "bgedge", ".", "multicolor", ",", "data", "[", "\"attr_dict\"", "]", "[", "\"multicolor\"", "]", ")", "if", "score", ">", "candidate_score", ":", "candidate_id", "=", "key", "candidate_data", "=", "data", "candidate_score", "=", "score", "if", "candidate_data", "is", "not", "None", ":", "new_multicolors", "=", "Multicolor", ".", "split_colors", "(", "multicolor", "=", "candidate_data", "[", "\"attr_dict\"", "]", "[", "\"multicolor\"", "]", ",", "guidance", "=", "guidance", ",", "sorted_guidance", "=", "sorted_guidance", ",", "account_for_color_multiplicity_in_guidance", "=", "account_for_colors_multiplicity_in_guidance", ")", "self", ".", "__delete_bgedge", "(", "bgedge", "=", "BGEdge", "(", "vertex1", "=", "bgedge", ".", "vertex1", ",", "vertex2", "=", "bgedge", ".", "vertex2", ",", "multicolor", "=", "candidate_data", "[", "\"attr_dict\"", "]", "[", "\"multicolor\"", "]", ")", ",", "key", "=", "candidate_id", ")", "for", "multicolor", "in", "new_multicolors", ":", "self", ".", "__add_bgedge", "(", "BGEdge", "(", "vertex1", "=", "bgedge", ".", "vertex1", ",", "vertex2", "=", "bgedge", ".", "vertex2", ",", "multicolor", "=", "multicolor", ")", ",", "merge", "=", "False", ")" ]
Splits a :class:`bg.edge.BGEdge` in current :class:`BreakpointGraph` most similar to supplied one (if no unique identifier ``key`` is provided) with respect to supplied guidance. If no unique identifier for edge to be changed is specified, edge to be split is determined by iterating over all edges between vertices in supplied :class:`bg.edge.BGEdge` instance and the edge with most similarity score to supplied one is chosen. Once the edge to be split is determined, split if performed form a perspective of :class:`bg.multicolor.Multicolor` split. The originally detected edge is deleted, and new edges containing information about multi-colors after splitting, are added to the current :class:`BreakpointGraph`. :param bgedge: an edge to find most "similar to" among existing edges for a split :type bgedge: :class:`bg.edge.BGEdge` :param guidance: a guidance for underlying :class:`bg.multicolor.Multicolor` object to be split :type guidance: iterable where each entry is iterable with colors entries :param duplication_splitting: flag (**not** currently implemented) for a splitting of color-based splitting to take into account multiplicity of respective colors :type duplication_splitting: ``Boolean`` :param key: unique identifier of edge to be split :type key: any python object. ``int`` is expected :return: ``None``, performs inplace changes
[ "Splits", "a", ":", "class", ":", "bg", ".", "edge", ".", "BGEdge", "in", "current", ":", "class", ":", "BreakpointGraph", "most", "similar", "to", "supplied", "one", "(", "if", "no", "unique", "identifier", "key", "is", "provided", ")", "with", "respect", "to", "supplied", "guidance", "." ]
python
train
andreikop/qutepart
qutepart/bookmarks.py
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/bookmarks.py#L46-L52
def clear(self, startBlock, endBlock): """Clear bookmarks on block range including start and end """ for block in qutepart.iterateBlocksFrom(startBlock): self._setBlockMarked(block, False) if block == endBlock: break
[ "def", "clear", "(", "self", ",", "startBlock", ",", "endBlock", ")", ":", "for", "block", "in", "qutepart", ".", "iterateBlocksFrom", "(", "startBlock", ")", ":", "self", ".", "_setBlockMarked", "(", "block", ",", "False", ")", "if", "block", "==", "endBlock", ":", "break" ]
Clear bookmarks on block range including start and end
[ "Clear", "bookmarks", "on", "block", "range", "including", "start", "and", "end" ]
python
train
dedupeio/dedupe
dedupe/clustering.py
https://github.com/dedupeio/dedupe/blob/9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b/dedupe/clustering.py#L177-L196
def confidences(cluster, condensed_distances, d): ''' We calculate a per record score that is similar to a standard deviation. The main reason is that these record scores can be used to calculate the standard deviation of an entire cluster, which is a reasonable metric for clusters. ''' scores = dict.fromkeys(cluster, 0.0) squared_distances = condensed_distances ** 2 for i, j in itertools.combinations(cluster, 2): index = d * (d - 1) / 2 - (d - i) * (d - i - 1) / 2 + j - i - 1 squared_dist = squared_distances[int(index)] scores[i] += squared_dist scores[j] += squared_dist scores = numpy.array([score for _, score in sorted(scores.items())]) scores /= len(cluster) - 1 scores = numpy.sqrt(scores) scores = 1 - scores return scores
[ "def", "confidences", "(", "cluster", ",", "condensed_distances", ",", "d", ")", ":", "scores", "=", "dict", ".", "fromkeys", "(", "cluster", ",", "0.0", ")", "squared_distances", "=", "condensed_distances", "**", "2", "for", "i", ",", "j", "in", "itertools", ".", "combinations", "(", "cluster", ",", "2", ")", ":", "index", "=", "d", "*", "(", "d", "-", "1", ")", "/", "2", "-", "(", "d", "-", "i", ")", "*", "(", "d", "-", "i", "-", "1", ")", "/", "2", "+", "j", "-", "i", "-", "1", "squared_dist", "=", "squared_distances", "[", "int", "(", "index", ")", "]", "scores", "[", "i", "]", "+=", "squared_dist", "scores", "[", "j", "]", "+=", "squared_dist", "scores", "=", "numpy", ".", "array", "(", "[", "score", "for", "_", ",", "score", "in", "sorted", "(", "scores", ".", "items", "(", ")", ")", "]", ")", "scores", "/=", "len", "(", "cluster", ")", "-", "1", "scores", "=", "numpy", ".", "sqrt", "(", "scores", ")", "scores", "=", "1", "-", "scores", "return", "scores" ]
We calculate a per record score that is similar to a standard deviation. The main reason is that these record scores can be used to calculate the standard deviation of an entire cluster, which is a reasonable metric for clusters.
[ "We", "calculate", "a", "per", "record", "score", "that", "is", "similar", "to", "a", "standard", "deviation", ".", "The", "main", "reason", "is", "that", "these", "record", "scores", "can", "be", "used", "to", "calculate", "the", "standard", "deviation", "of", "an", "entire", "cluster", "which", "is", "a", "reasonable", "metric", "for", "clusters", "." ]
python
train
h2oai/h2o-3
h2o-py/h2o/utils/typechecks.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/utils/typechecks.py#L461-L473
def assert_matches(v, regex): """ Assert that string variable matches the provided regular expression. :param v: variable to check. :param regex: regular expression to check against (can be either a string, or compiled regexp). """ m = re.match(regex, v) if m is None: vn = _retrieve_assert_arguments()[0] message = "Argument `{var}` (= {val!r}) did not match /{regex}/".format(var=vn, regex=regex, val=v) raise H2OValueError(message, var_name=vn, skip_frames=1) return m
[ "def", "assert_matches", "(", "v", ",", "regex", ")", ":", "m", "=", "re", ".", "match", "(", "regex", ",", "v", ")", "if", "m", "is", "None", ":", "vn", "=", "_retrieve_assert_arguments", "(", ")", "[", "0", "]", "message", "=", "\"Argument `{var}` (= {val!r}) did not match /{regex}/\"", ".", "format", "(", "var", "=", "vn", ",", "regex", "=", "regex", ",", "val", "=", "v", ")", "raise", "H2OValueError", "(", "message", ",", "var_name", "=", "vn", ",", "skip_frames", "=", "1", ")", "return", "m" ]
Assert that string variable matches the provided regular expression. :param v: variable to check. :param regex: regular expression to check against (can be either a string, or compiled regexp).
[ "Assert", "that", "string", "variable", "matches", "the", "provided", "regular", "expression", "." ]
python
test
polyaxon/polyaxon
polyaxon/pipelines/utils.py
https://github.com/polyaxon/polyaxon/blob/e1724f0756b1a42f9e7aa08a976584a84ef7f016/polyaxon/pipelines/utils.py#L28-L46
def create_pipeline_run(pipeline, context_by_op): """Create a pipeline run/instance.""" pipeline_run = PipelineRun.objects.create(pipeline=pipeline) dag, ops = pipeline.dag # Go trough the operation and create operation runs and the upstreams op_runs = {} runs_by_ops = {} for op_id in dag.keys(): op_run = OperationRun.objects.create( pipeline_run=pipeline_run, operation_id=op_id, celery_task_context=context_by_op.get(op_id)) op_run_id = op_run.id op_runs[op_run_id] = op_run runs_by_ops[op_id] = op_run_id # Create operations upstreams # We set the upstream for the topologically sorted dag set_topological_dag_upstreams(dag=dag, ops=ops, op_runs=op_runs, runs_by_ops=runs_by_ops)
[ "def", "create_pipeline_run", "(", "pipeline", ",", "context_by_op", ")", ":", "pipeline_run", "=", "PipelineRun", ".", "objects", ".", "create", "(", "pipeline", "=", "pipeline", ")", "dag", ",", "ops", "=", "pipeline", ".", "dag", "# Go trough the operation and create operation runs and the upstreams", "op_runs", "=", "{", "}", "runs_by_ops", "=", "{", "}", "for", "op_id", "in", "dag", ".", "keys", "(", ")", ":", "op_run", "=", "OperationRun", ".", "objects", ".", "create", "(", "pipeline_run", "=", "pipeline_run", ",", "operation_id", "=", "op_id", ",", "celery_task_context", "=", "context_by_op", ".", "get", "(", "op_id", ")", ")", "op_run_id", "=", "op_run", ".", "id", "op_runs", "[", "op_run_id", "]", "=", "op_run", "runs_by_ops", "[", "op_id", "]", "=", "op_run_id", "# Create operations upstreams", "# We set the upstream for the topologically sorted dag", "set_topological_dag_upstreams", "(", "dag", "=", "dag", ",", "ops", "=", "ops", ",", "op_runs", "=", "op_runs", ",", "runs_by_ops", "=", "runs_by_ops", ")" ]
Create a pipeline run/instance.
[ "Create", "a", "pipeline", "run", "/", "instance", "." ]
python
train
hatemile/hatemile-for-python
hatemile/implementation/css.py
https://github.com/hatemile/hatemile-for-python/blob/1e914f9aa09f6f8d78282af131311546ecba9fb8/hatemile/implementation/css.py#L445-L463
def _is_valid_inherit_element(self, element): """ Check that the children of element can be manipulated to apply the CSS properties. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :return: True if the children of element can be manipulated to apply the CSS properties or False if the children of element cannot be manipulated to apply the CSS properties. :rtype: bool """ # pylint: disable=no-self-use tag_name = element.get_tag_name() return ( (tag_name in AccessibleCSSImplementation.VALID_INHERIT_TAGS) and (not element.has_attribute(CommonFunctions.DATA_IGNORE)) )
[ "def", "_is_valid_inherit_element", "(", "self", ",", "element", ")", ":", "# pylint: disable=no-self-use", "tag_name", "=", "element", ".", "get_tag_name", "(", ")", "return", "(", "(", "tag_name", "in", "AccessibleCSSImplementation", ".", "VALID_INHERIT_TAGS", ")", "and", "(", "not", "element", ".", "has_attribute", "(", "CommonFunctions", ".", "DATA_IGNORE", ")", ")", ")" ]
Check that the children of element can be manipulated to apply the CSS properties. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :return: True if the children of element can be manipulated to apply the CSS properties or False if the children of element cannot be manipulated to apply the CSS properties. :rtype: bool
[ "Check", "that", "the", "children", "of", "element", "can", "be", "manipulated", "to", "apply", "the", "CSS", "properties", "." ]
python
train
Jajcus/pyxmpp2
pyxmpp2/ext/vcard.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/vcard.py#L547-L555
def rfc2426(self): """RFC2426-encode the field content. :return: the field in the RFC 2426 format. :returntype: `str`""" return rfc2425encode("adr",u';'.join(quote_semicolon(val) for val in (self.pobox,self.extadr,self.street,self.locality, self.region,self.pcode,self.ctry)), {"type":",".join(self.type)})
[ "def", "rfc2426", "(", "self", ")", ":", "return", "rfc2425encode", "(", "\"adr\"", ",", "u';'", ".", "join", "(", "quote_semicolon", "(", "val", ")", "for", "val", "in", "(", "self", ".", "pobox", ",", "self", ".", "extadr", ",", "self", ".", "street", ",", "self", ".", "locality", ",", "self", ".", "region", ",", "self", ".", "pcode", ",", "self", ".", "ctry", ")", ")", ",", "{", "\"type\"", ":", "\",\"", ".", "join", "(", "self", ".", "type", ")", "}", ")" ]
RFC2426-encode the field content. :return: the field in the RFC 2426 format. :returntype: `str`
[ "RFC2426", "-", "encode", "the", "field", "content", "." ]
python
valid
openstack/networking-cisco
networking_cisco/apps/saf/server/dfa_events_handler.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_events_handler.py#L125-L150
def callback(self, timestamp, event_type, payload): """Callback method for processing events in notification queue. :param timestamp: time the message is received. :param event_type: event type in the notification queue such as identity.project.created, identity.project.deleted. :param payload: Contains information of an event """ try: data = (event_type, payload) LOG.debug('RX NOTIFICATION ==>\nevent_type: %(event)s, ' 'payload: %(payload)s\n', ( {'event': event_type, 'payload': payload})) if 'create' in event_type: pri = self._create_pri elif 'delete' in event_type: pri = self._delete_pri elif 'update' in event_type: pri = self._update_pri else: pri = self._delete_pri self._pq.put((pri, timestamp, data)) except Exception as exc: LOG.exception('Error: %(err)s for event %(event)s', {'err': str(exc), 'event': event_type})
[ "def", "callback", "(", "self", ",", "timestamp", ",", "event_type", ",", "payload", ")", ":", "try", ":", "data", "=", "(", "event_type", ",", "payload", ")", "LOG", ".", "debug", "(", "'RX NOTIFICATION ==>\\nevent_type: %(event)s, '", "'payload: %(payload)s\\n'", ",", "(", "{", "'event'", ":", "event_type", ",", "'payload'", ":", "payload", "}", ")", ")", "if", "'create'", "in", "event_type", ":", "pri", "=", "self", ".", "_create_pri", "elif", "'delete'", "in", "event_type", ":", "pri", "=", "self", ".", "_delete_pri", "elif", "'update'", "in", "event_type", ":", "pri", "=", "self", ".", "_update_pri", "else", ":", "pri", "=", "self", ".", "_delete_pri", "self", ".", "_pq", ".", "put", "(", "(", "pri", ",", "timestamp", ",", "data", ")", ")", "except", "Exception", "as", "exc", ":", "LOG", ".", "exception", "(", "'Error: %(err)s for event %(event)s'", ",", "{", "'err'", ":", "str", "(", "exc", ")", ",", "'event'", ":", "event_type", "}", ")" ]
Callback method for processing events in notification queue. :param timestamp: time the message is received. :param event_type: event type in the notification queue such as identity.project.created, identity.project.deleted. :param payload: Contains information of an event
[ "Callback", "method", "for", "processing", "events", "in", "notification", "queue", "." ]
python
train
Xion/taipan
taipan/objective/classes.py
https://github.com/Xion/taipan/blob/f333f0287c8bd0915182c7d5308e5f05ef0cca78/taipan/objective/classes.py#L57-L71
def ensure_direct_subclass(class_, of): """Check whether given class is a direct subclass of another. :param class_: Class to check :param of: Superclass to check against :return: ``class_``, if the check succeeds :raise TypeError: When the check fails .. versionadded:: 0.0.4 """ if not is_direct_subclass(class_, of): raise TypeError("expected a direct subclass of %r, got %s instead" % ( of, class_.__name__)) return class_
[ "def", "ensure_direct_subclass", "(", "class_", ",", "of", ")", ":", "if", "not", "is_direct_subclass", "(", "class_", ",", "of", ")", ":", "raise", "TypeError", "(", "\"expected a direct subclass of %r, got %s instead\"", "%", "(", "of", ",", "class_", ".", "__name__", ")", ")", "return", "class_" ]
Check whether given class is a direct subclass of another. :param class_: Class to check :param of: Superclass to check against :return: ``class_``, if the check succeeds :raise TypeError: When the check fails .. versionadded:: 0.0.4
[ "Check", "whether", "given", "class", "is", "a", "direct", "subclass", "of", "another", "." ]
python
train
DallasMorningNews/django-datafreezer
datafreezer/views.py
https://github.com/DallasMorningNews/django-datafreezer/blob/982dcf2015c80a280f1a093e32977cb71d4ea7aa/datafreezer/views.py#L1049-L1052
def generate_page_title(self, data_slug): """Generates remainder of page title specific to data_slug (tag).""" tag = Tag.objects.filter(slug=data_slug) return tag[0].word
[ "def", "generate_page_title", "(", "self", ",", "data_slug", ")", ":", "tag", "=", "Tag", ".", "objects", ".", "filter", "(", "slug", "=", "data_slug", ")", "return", "tag", "[", "0", "]", ".", "word" ]
Generates remainder of page title specific to data_slug (tag).
[ "Generates", "remainder", "of", "page", "title", "specific", "to", "data_slug", "(", "tag", ")", "." ]
python
train
riptano/ccm
ccmlib/node.py
https://github.com/riptano/ccm/blob/275699f79d102b5039b79cc17fa6305dccf18412/ccmlib/node.py#L241-L249
def get_install_dir(self): """ Returns the path to the cassandra source directory used by this node. """ if self.__install_dir is None: return self.cluster.get_install_dir() else: common.validate_install_dir(self.__install_dir) return self.__install_dir
[ "def", "get_install_dir", "(", "self", ")", ":", "if", "self", ".", "__install_dir", "is", "None", ":", "return", "self", ".", "cluster", ".", "get_install_dir", "(", ")", "else", ":", "common", ".", "validate_install_dir", "(", "self", ".", "__install_dir", ")", "return", "self", ".", "__install_dir" ]
Returns the path to the cassandra source directory used by this node.
[ "Returns", "the", "path", "to", "the", "cassandra", "source", "directory", "used", "by", "this", "node", "." ]
python
train
twilio/twilio-python
twilio/rest/serverless/v1/service/environment/variable.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/serverless/v1/service/environment/variable.py#L258-L279
def fetch(self): """ Fetch a VariableInstance :returns: Fetched VariableInstance :rtype: twilio.rest.serverless.v1.service.environment.variable.VariableInstance """ params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return VariableInstance( self._version, payload, service_sid=self._solution['service_sid'], environment_sid=self._solution['environment_sid'], sid=self._solution['sid'], )
[ "def", "fetch", "(", "self", ")", ":", "params", "=", "values", ".", "of", "(", "{", "}", ")", "payload", "=", "self", ".", "_version", ".", "fetch", "(", "'GET'", ",", "self", ".", "_uri", ",", "params", "=", "params", ",", ")", "return", "VariableInstance", "(", "self", ".", "_version", ",", "payload", ",", "service_sid", "=", "self", ".", "_solution", "[", "'service_sid'", "]", ",", "environment_sid", "=", "self", ".", "_solution", "[", "'environment_sid'", "]", ",", "sid", "=", "self", ".", "_solution", "[", "'sid'", "]", ",", ")" ]
Fetch a VariableInstance :returns: Fetched VariableInstance :rtype: twilio.rest.serverless.v1.service.environment.variable.VariableInstance
[ "Fetch", "a", "VariableInstance" ]
python
train
dwavesystems/dwave-cloud-client
dwave/cloud/solver.py
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/solver.py#L308-L346
def sample_qubo(self, qubo, **params): """Sample from the specified QUBO. Args: qubo (dict of (int, int):float): Coefficients of a quadratic unconstrained binary optimization (QUBO) model. **params: Parameters for the sampling method, specified per solver. Returns: :obj:`Future` Examples: This example creates a client using the local system's default D-Wave Cloud Client configuration file, which is configured to access a D-Wave 2000Q QPU, submits a :term:`QUBO` problem (a Boolean NOT gate represented by a penalty model), and samples 5 times. >>> from dwave.cloud import Client >>> with Client.from_config() as client: # doctest: +SKIP ... solver = client.get_solver() ... u, v = next(iter(solver.edges)) ... Q = {(u, u): -1, (u, v): 0, (v, u): 2, (v, v): -1} ... computation = solver.sample_qubo(Q, num_reads=5) ... for i in range(5): ... print(computation.samples[i][u], computation.samples[i][v]) ... ... (0, 1) (1, 0) (1, 0) (0, 1) (1, 0) """ # In a QUBO the linear and quadratic terms in the objective are mixed into # a matrix. For the sake of encoding, we will separate them before calling `_sample` linear = {i1: v for (i1, i2), v in uniform_iterator(qubo) if i1 == i2} quadratic = {(i1, i2): v for (i1, i2), v in uniform_iterator(qubo) if i1 != i2} return self._sample('qubo', linear, quadratic, params)
[ "def", "sample_qubo", "(", "self", ",", "qubo", ",", "*", "*", "params", ")", ":", "# In a QUBO the linear and quadratic terms in the objective are mixed into", "# a matrix. For the sake of encoding, we will separate them before calling `_sample`", "linear", "=", "{", "i1", ":", "v", "for", "(", "i1", ",", "i2", ")", ",", "v", "in", "uniform_iterator", "(", "qubo", ")", "if", "i1", "==", "i2", "}", "quadratic", "=", "{", "(", "i1", ",", "i2", ")", ":", "v", "for", "(", "i1", ",", "i2", ")", ",", "v", "in", "uniform_iterator", "(", "qubo", ")", "if", "i1", "!=", "i2", "}", "return", "self", ".", "_sample", "(", "'qubo'", ",", "linear", ",", "quadratic", ",", "params", ")" ]
Sample from the specified QUBO. Args: qubo (dict of (int, int):float): Coefficients of a quadratic unconstrained binary optimization (QUBO) model. **params: Parameters for the sampling method, specified per solver. Returns: :obj:`Future` Examples: This example creates a client using the local system's default D-Wave Cloud Client configuration file, which is configured to access a D-Wave 2000Q QPU, submits a :term:`QUBO` problem (a Boolean NOT gate represented by a penalty model), and samples 5 times. >>> from dwave.cloud import Client >>> with Client.from_config() as client: # doctest: +SKIP ... solver = client.get_solver() ... u, v = next(iter(solver.edges)) ... Q = {(u, u): -1, (u, v): 0, (v, u): 2, (v, v): -1} ... computation = solver.sample_qubo(Q, num_reads=5) ... for i in range(5): ... print(computation.samples[i][u], computation.samples[i][v]) ... ... (0, 1) (1, 0) (1, 0) (0, 1) (1, 0)
[ "Sample", "from", "the", "specified", "QUBO", "." ]
python
train
swift-nav/libsbp
python/sbp/client/drivers/cdc_driver.py
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/client/drivers/cdc_driver.py#L50-L65
def write(self, s): """ Write wrapper. Parameters ---------- s : bytes Bytes to write """ try: return self.handle.write(s) except OSError: print() print("Piksi disconnected") print() raise IOError
[ "def", "write", "(", "self", ",", "s", ")", ":", "try", ":", "return", "self", ".", "handle", ".", "write", "(", "s", ")", "except", "OSError", ":", "print", "(", ")", "print", "(", "\"Piksi disconnected\"", ")", "print", "(", ")", "raise", "IOError" ]
Write wrapper. Parameters ---------- s : bytes Bytes to write
[ "Write", "wrapper", "." ]
python
train
ekmmetering/ekmmeters
ekmmeters.py
https://github.com/ekmmetering/ekmmeters/blob/b3748bdf30263bfa46ea40157bdf8df2522e1904/ekmmeters.py#L964-L982
def initPort(self): """ Required initialization call, wraps pyserial constructor. """ try: self.m_ser = serial.Serial(port=self.m_ttyport, baudrate=self.m_baudrate, timeout=0, parity=serial.PARITY_EVEN, stopbits=serial.STOPBITS_ONE, bytesize=serial.SEVENBITS, rtscts=False) ekm_log("Pyserial version = " + serial.VERSION) ekm_log("Port = " + self.m_ttyport) ekm_log("Rate = " + str(self.m_baudrate)) time.sleep(self.m_init_wait) return True except: ekm_log(traceback.format_exc(sys.exc_info())) return False
[ "def", "initPort", "(", "self", ")", ":", "try", ":", "self", ".", "m_ser", "=", "serial", ".", "Serial", "(", "port", "=", "self", ".", "m_ttyport", ",", "baudrate", "=", "self", ".", "m_baudrate", ",", "timeout", "=", "0", ",", "parity", "=", "serial", ".", "PARITY_EVEN", ",", "stopbits", "=", "serial", ".", "STOPBITS_ONE", ",", "bytesize", "=", "serial", ".", "SEVENBITS", ",", "rtscts", "=", "False", ")", "ekm_log", "(", "\"Pyserial version = \"", "+", "serial", ".", "VERSION", ")", "ekm_log", "(", "\"Port = \"", "+", "self", ".", "m_ttyport", ")", "ekm_log", "(", "\"Rate = \"", "+", "str", "(", "self", ".", "m_baudrate", ")", ")", "time", ".", "sleep", "(", "self", ".", "m_init_wait", ")", "return", "True", "except", ":", "ekm_log", "(", "traceback", ".", "format_exc", "(", "sys", ".", "exc_info", "(", ")", ")", ")", "return", "False" ]
Required initialization call, wraps pyserial constructor.
[ "Required", "initialization", "call", "wraps", "pyserial", "constructor", "." ]
python
test
vatlab/SoS
src/sos/eval.py
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/eval.py#L127-L168
def SoS_exec(script: str, _dict: dict = None, return_result: bool = True) -> None: '''Execute a statement.''' if _dict is None: _dict = env.sos_dict.dict() if not return_result: exec( compile(script, filename=stmtHash.hash(script), mode='exec'), _dict) return None try: stmts = list(ast.iter_child_nodes(ast.parse(script))) if not stmts: return if isinstance(stmts[-1], ast.Expr): # the last one is an expression and we will try to return the results # so we first execute the previous statements if len(stmts) > 1: exec( compile( ast.Module(body=stmts[:-1]), filename=stmtHash.hash(script), mode="exec"), _dict) # then we eval the last one res = eval( compile( ast.Expression(body=stmts[-1].value), filename=stmtHash.hash(script), mode="eval"), _dict) else: # otherwise we just execute the entire code exec( compile(script, filename=stmtHash.hash(script), mode='exec'), _dict) res = None except SyntaxError as e: raise SyntaxError(f"Invalid code {script}: {e}") # if check_readonly: # env.sos_dict.check_readonly_vars() return res
[ "def", "SoS_exec", "(", "script", ":", "str", ",", "_dict", ":", "dict", "=", "None", ",", "return_result", ":", "bool", "=", "True", ")", "->", "None", ":", "if", "_dict", "is", "None", ":", "_dict", "=", "env", ".", "sos_dict", ".", "dict", "(", ")", "if", "not", "return_result", ":", "exec", "(", "compile", "(", "script", ",", "filename", "=", "stmtHash", ".", "hash", "(", "script", ")", ",", "mode", "=", "'exec'", ")", ",", "_dict", ")", "return", "None", "try", ":", "stmts", "=", "list", "(", "ast", ".", "iter_child_nodes", "(", "ast", ".", "parse", "(", "script", ")", ")", ")", "if", "not", "stmts", ":", "return", "if", "isinstance", "(", "stmts", "[", "-", "1", "]", ",", "ast", ".", "Expr", ")", ":", "# the last one is an expression and we will try to return the results", "# so we first execute the previous statements", "if", "len", "(", "stmts", ")", ">", "1", ":", "exec", "(", "compile", "(", "ast", ".", "Module", "(", "body", "=", "stmts", "[", ":", "-", "1", "]", ")", ",", "filename", "=", "stmtHash", ".", "hash", "(", "script", ")", ",", "mode", "=", "\"exec\"", ")", ",", "_dict", ")", "# then we eval the last one", "res", "=", "eval", "(", "compile", "(", "ast", ".", "Expression", "(", "body", "=", "stmts", "[", "-", "1", "]", ".", "value", ")", ",", "filename", "=", "stmtHash", ".", "hash", "(", "script", ")", ",", "mode", "=", "\"eval\"", ")", ",", "_dict", ")", "else", ":", "# otherwise we just execute the entire code", "exec", "(", "compile", "(", "script", ",", "filename", "=", "stmtHash", ".", "hash", "(", "script", ")", ",", "mode", "=", "'exec'", ")", ",", "_dict", ")", "res", "=", "None", "except", "SyntaxError", "as", "e", ":", "raise", "SyntaxError", "(", "f\"Invalid code {script}: {e}\"", ")", "# if check_readonly:", "# env.sos_dict.check_readonly_vars()", "return", "res" ]
Execute a statement.
[ "Execute", "a", "statement", "." ]
python
train
decryptus/sonicprobe
sonicprobe/libs/urisup.py
https://github.com/decryptus/sonicprobe/blob/72f73f3a40d2982d79ad68686e36aa31d94b76f8/sonicprobe/libs/urisup.py#L386-L400
def unsplit_query(query): """ Create a query string using the tuple query with a format as the one returned by split_query() """ def unsplit_assignment((x, y)): if (x is not None) and (y is not None): return x + '=' + y elif x is not None: return x elif y is not None: return '=' + y else: return '' return '&'.join(map(unsplit_assignment, query))
[ "def", "unsplit_query", "(", "query", ")", ":", "def", "unsplit_assignment", "(", "(", "x", ",", "y", ")", ")", ":", "if", "(", "x", "is", "not", "None", ")", "and", "(", "y", "is", "not", "None", ")", ":", "return", "x", "+", "'='", "+", "y", "elif", "x", "is", "not", "None", ":", "return", "x", "elif", "y", "is", "not", "None", ":", "return", "'='", "+", "y", "else", ":", "return", "''", "return", "'&'", ".", "join", "(", "map", "(", "unsplit_assignment", ",", "query", ")", ")" ]
Create a query string using the tuple query with a format as the one returned by split_query()
[ "Create", "a", "query", "string", "using", "the", "tuple", "query", "with", "a", "format", "as", "the", "one", "returned", "by", "split_query", "()" ]
python
train
src-d/modelforge
modelforge/registry.py
https://github.com/src-d/modelforge/blob/4f73c2bf0318261ac01bc8b6c0d4250a5d303418/modelforge/registry.py#L111-L134
def delete_model(args: argparse.Namespace, backend: StorageBackend, log: logging.Logger): """ Delete a model. :param args: :class:`argparse.Namespace` with "input", "backend", "args", "meta", \ "update_default", "username", "password", "remote_repo", \ "template_model", "template_readme" and "log_level". :param backend: Backend which is responsible for working with model files. :param log: Logger supplied by supply_backend :return: None """ try: meta = backend.index.remove_model(args.input) template_readme = backend.index.load_template(args.template_readme) backend.index.update_readme(template_readme) except ValueError: return 1 backend.delete_model(meta) log.info("Updating the models index...") try: backend.index.upload("delete", meta) except ValueError: # TODO: replace with PorcelainError return 1 log.info("Successfully deleted.")
[ "def", "delete_model", "(", "args", ":", "argparse", ".", "Namespace", ",", "backend", ":", "StorageBackend", ",", "log", ":", "logging", ".", "Logger", ")", ":", "try", ":", "meta", "=", "backend", ".", "index", ".", "remove_model", "(", "args", ".", "input", ")", "template_readme", "=", "backend", ".", "index", ".", "load_template", "(", "args", ".", "template_readme", ")", "backend", ".", "index", ".", "update_readme", "(", "template_readme", ")", "except", "ValueError", ":", "return", "1", "backend", ".", "delete_model", "(", "meta", ")", "log", ".", "info", "(", "\"Updating the models index...\"", ")", "try", ":", "backend", ".", "index", ".", "upload", "(", "\"delete\"", ",", "meta", ")", "except", "ValueError", ":", "# TODO: replace with PorcelainError", "return", "1", "log", ".", "info", "(", "\"Successfully deleted.\"", ")" ]
Delete a model. :param args: :class:`argparse.Namespace` with "input", "backend", "args", "meta", \ "update_default", "username", "password", "remote_repo", \ "template_model", "template_readme" and "log_level". :param backend: Backend which is responsible for working with model files. :param log: Logger supplied by supply_backend :return: None
[ "Delete", "a", "model", "." ]
python
train
django-extensions/django-extensions
django_extensions/management/commands/export_emails.py
https://github.com/django-extensions/django-extensions/blob/7e0bef97ea6cb7f9eea5e2528e3a985a83a7b9b8/django_extensions/management/commands/export_emails.py#L117-L138
def vcard(self, qs): """VCARD format.""" try: import vobject except ImportError: print(self.style.ERROR("Please install vobject to use the vcard export format.")) sys.exit(1) out = sys.stdout for ent in qs: card = vobject.vCard() card.add('fn').value = full_name(**ent) if not ent['last_name'] and not ent['first_name']: # fallback to fullname, if both first and lastname are not declared card.add('n').value = vobject.vcard.Name(full_name(**ent)) else: card.add('n').value = vobject.vcard.Name(ent['last_name'], ent['first_name']) emailpart = card.add('email') emailpart.value = ent['email'] emailpart.type_param = 'INTERNET' out.write(card.serialize())
[ "def", "vcard", "(", "self", ",", "qs", ")", ":", "try", ":", "import", "vobject", "except", "ImportError", ":", "print", "(", "self", ".", "style", ".", "ERROR", "(", "\"Please install vobject to use the vcard export format.\"", ")", ")", "sys", ".", "exit", "(", "1", ")", "out", "=", "sys", ".", "stdout", "for", "ent", "in", "qs", ":", "card", "=", "vobject", ".", "vCard", "(", ")", "card", ".", "add", "(", "'fn'", ")", ".", "value", "=", "full_name", "(", "*", "*", "ent", ")", "if", "not", "ent", "[", "'last_name'", "]", "and", "not", "ent", "[", "'first_name'", "]", ":", "# fallback to fullname, if both first and lastname are not declared", "card", ".", "add", "(", "'n'", ")", ".", "value", "=", "vobject", ".", "vcard", ".", "Name", "(", "full_name", "(", "*", "*", "ent", ")", ")", "else", ":", "card", ".", "add", "(", "'n'", ")", ".", "value", "=", "vobject", ".", "vcard", ".", "Name", "(", "ent", "[", "'last_name'", "]", ",", "ent", "[", "'first_name'", "]", ")", "emailpart", "=", "card", ".", "add", "(", "'email'", ")", "emailpart", ".", "value", "=", "ent", "[", "'email'", "]", "emailpart", ".", "type_param", "=", "'INTERNET'", "out", ".", "write", "(", "card", ".", "serialize", "(", ")", ")" ]
VCARD format.
[ "VCARD", "format", "." ]
python
train
Shizmob/pydle
pydle/features/rfc1459/client.py
https://github.com/Shizmob/pydle/blob/7ec7d65d097318ed0bcdc5d8401470287d8c7cf7/pydle/features/rfc1459/client.py#L802-L813
async def on_raw_312(self, message): """ WHOIS server info. """ target, nickname, server, serverinfo = message.params info = { 'server': server, 'server_info': serverinfo } if nickname in self._pending['whois']: self._whois_info[nickname].update(info) if nickname in self._pending['whowas']: self._whowas_info[nickname].update(info)
[ "async", "def", "on_raw_312", "(", "self", ",", "message", ")", ":", "target", ",", "nickname", ",", "server", ",", "serverinfo", "=", "message", ".", "params", "info", "=", "{", "'server'", ":", "server", ",", "'server_info'", ":", "serverinfo", "}", "if", "nickname", "in", "self", ".", "_pending", "[", "'whois'", "]", ":", "self", ".", "_whois_info", "[", "nickname", "]", ".", "update", "(", "info", ")", "if", "nickname", "in", "self", ".", "_pending", "[", "'whowas'", "]", ":", "self", ".", "_whowas_info", "[", "nickname", "]", ".", "update", "(", "info", ")" ]
WHOIS server info.
[ "WHOIS", "server", "info", "." ]
python
train
coala/coala-decorators-USE-cOALA-UTILS-INSTEAD
coala_decorators/__init__.py
https://github.com/coala/coala-decorators-USE-cOALA-UTILS-INSTEAD/blob/b1c4463f364bbcd0ad5138f697a52f11c9afe326/coala_decorators/__init__.py#L6-L29
def yield_once(iterator): """ Decorator to make an iterator returned by a method yield each result only once. >>> @yield_once ... def generate_list(foo): ... return foo >>> list(generate_list([1, 2, 1])) [1, 2] :param iterator: Any method that returns an iterator :return: An method returning an iterator that yields every result only once at most. """ @wraps(iterator) def yield_once_generator(*args, **kwargs): yielded = set() for item in iterator(*args, **kwargs): if item not in yielded: yielded.add(item) yield item return yield_once_generator
[ "def", "yield_once", "(", "iterator", ")", ":", "@", "wraps", "(", "iterator", ")", "def", "yield_once_generator", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "yielded", "=", "set", "(", ")", "for", "item", "in", "iterator", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "item", "not", "in", "yielded", ":", "yielded", ".", "add", "(", "item", ")", "yield", "item", "return", "yield_once_generator" ]
Decorator to make an iterator returned by a method yield each result only once. >>> @yield_once ... def generate_list(foo): ... return foo >>> list(generate_list([1, 2, 1])) [1, 2] :param iterator: Any method that returns an iterator :return: An method returning an iterator that yields every result only once at most.
[ "Decorator", "to", "make", "an", "iterator", "returned", "by", "a", "method", "yield", "each", "result", "only", "once", "." ]
python
train
open-homeautomation/miflora
demo.py
https://github.com/open-homeautomation/miflora/blob/916606e7edc70bdc017dfbe681bc81771e0df7f3/demo.py#L47-L57
def _get_backend(args): """Extract the backend class from the command line arguments.""" if args.backend == 'gatttool': backend = GatttoolBackend elif args.backend == 'bluepy': backend = BluepyBackend elif args.backend == 'pygatt': backend = PygattBackend else: raise Exception('unknown backend: {}'.format(args.backend)) return backend
[ "def", "_get_backend", "(", "args", ")", ":", "if", "args", ".", "backend", "==", "'gatttool'", ":", "backend", "=", "GatttoolBackend", "elif", "args", ".", "backend", "==", "'bluepy'", ":", "backend", "=", "BluepyBackend", "elif", "args", ".", "backend", "==", "'pygatt'", ":", "backend", "=", "PygattBackend", "else", ":", "raise", "Exception", "(", "'unknown backend: {}'", ".", "format", "(", "args", ".", "backend", ")", ")", "return", "backend" ]
Extract the backend class from the command line arguments.
[ "Extract", "the", "backend", "class", "from", "the", "command", "line", "arguments", "." ]
python
train
codelv/enaml-native-cli
enamlnativecli/main.py
https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1466-L1502
def run_tornado(self, args): """ Tornado dev server implementation """ server = self import tornado.ioloop import tornado.web import tornado.websocket ioloop = tornado.ioloop.IOLoop.current() class DevWebSocketHandler(tornado.websocket.WebSocketHandler): def open(self): super(DevWebSocketHandler, self).open() server.on_open(self) def on_message(self, message): server.on_message(self, message) def on_close(self): super(DevWebSocketHandler, self).on_close() server.on_close(self) class MainHandler(tornado.web.RequestHandler): def get(self): self.write(server.index_page) #: Set the call later method server.call_later = ioloop.call_later server.add_callback = ioloop.add_callback app = tornado.web.Application([ (r"/", MainHandler), (r"/dev", DevWebSocketHandler), ]) app.listen(self.port) print("Tornado Dev server started on {}".format(self.port)) ioloop.start()
[ "def", "run_tornado", "(", "self", ",", "args", ")", ":", "server", "=", "self", "import", "tornado", ".", "ioloop", "import", "tornado", ".", "web", "import", "tornado", ".", "websocket", "ioloop", "=", "tornado", ".", "ioloop", ".", "IOLoop", ".", "current", "(", ")", "class", "DevWebSocketHandler", "(", "tornado", ".", "websocket", ".", "WebSocketHandler", ")", ":", "def", "open", "(", "self", ")", ":", "super", "(", "DevWebSocketHandler", ",", "self", ")", ".", "open", "(", ")", "server", ".", "on_open", "(", "self", ")", "def", "on_message", "(", "self", ",", "message", ")", ":", "server", ".", "on_message", "(", "self", ",", "message", ")", "def", "on_close", "(", "self", ")", ":", "super", "(", "DevWebSocketHandler", ",", "self", ")", ".", "on_close", "(", ")", "server", ".", "on_close", "(", "self", ")", "class", "MainHandler", "(", "tornado", ".", "web", ".", "RequestHandler", ")", ":", "def", "get", "(", "self", ")", ":", "self", ".", "write", "(", "server", ".", "index_page", ")", "#: Set the call later method", "server", ".", "call_later", "=", "ioloop", ".", "call_later", "server", ".", "add_callback", "=", "ioloop", ".", "add_callback", "app", "=", "tornado", ".", "web", ".", "Application", "(", "[", "(", "r\"/\"", ",", "MainHandler", ")", ",", "(", "r\"/dev\"", ",", "DevWebSocketHandler", ")", ",", "]", ")", "app", ".", "listen", "(", "self", ".", "port", ")", "print", "(", "\"Tornado Dev server started on {}\"", ".", "format", "(", "self", ".", "port", ")", ")", "ioloop", ".", "start", "(", ")" ]
Tornado dev server implementation
[ "Tornado", "dev", "server", "implementation" ]
python
train
dmsimard/python-cachetclient
cachetclient/cachet.py
https://github.com/dmsimard/python-cachetclient/blob/31bbc6d17ba5de088846e1ffae259b6755e672a0/cachetclient/cachet.py#L37-L47
def check_required_args(required_args, args): """ Checks if all required_args have a value. :param required_args: list of required args :param args: kwargs :return: True (if an exception isn't raised) """ for arg in required_args: if arg not in args: raise KeyError('Required argument: %s' % arg) return True
[ "def", "check_required_args", "(", "required_args", ",", "args", ")", ":", "for", "arg", "in", "required_args", ":", "if", "arg", "not", "in", "args", ":", "raise", "KeyError", "(", "'Required argument: %s'", "%", "arg", ")", "return", "True" ]
Checks if all required_args have a value. :param required_args: list of required args :param args: kwargs :return: True (if an exception isn't raised)
[ "Checks", "if", "all", "required_args", "have", "a", "value", ".", ":", "param", "required_args", ":", "list", "of", "required", "args", ":", "param", "args", ":", "kwargs", ":", "return", ":", "True", "(", "if", "an", "exception", "isn", "t", "raised", ")" ]
python
train
pymupdf/PyMuPDF
fitz/fitz.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L2591-L2602
def addInkAnnot(self, list): """Add a 'handwriting' as a list of list of point-likes. Each sublist forms an independent stroke.""" CheckParent(self) val = _fitz.Page_addInkAnnot(self, list) if not val: return val.thisown = True val.parent = weakref.proxy(self) self._annot_refs[id(val)] = val return val
[ "def", "addInkAnnot", "(", "self", ",", "list", ")", ":", "CheckParent", "(", "self", ")", "val", "=", "_fitz", ".", "Page_addInkAnnot", "(", "self", ",", "list", ")", "if", "not", "val", ":", "return", "val", ".", "thisown", "=", "True", "val", ".", "parent", "=", "weakref", ".", "proxy", "(", "self", ")", "self", ".", "_annot_refs", "[", "id", "(", "val", ")", "]", "=", "val", "return", "val" ]
Add a 'handwriting' as a list of list of point-likes. Each sublist forms an independent stroke.
[ "Add", "a", "handwriting", "as", "a", "list", "of", "list", "of", "point", "-", "likes", ".", "Each", "sublist", "forms", "an", "independent", "stroke", "." ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/breakpoint.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/breakpoint.py#L3076-L3085
def get_process_page_breakpoints(self, dwProcessId): """ @type dwProcessId: int @param dwProcessId: Process global ID. @rtype: list of L{PageBreakpoint} @return: All page breakpoints for the given process. """ return [ bp for ((pid, address), bp) in compat.iteritems(self.__pageBP) \ if pid == dwProcessId ]
[ "def", "get_process_page_breakpoints", "(", "self", ",", "dwProcessId", ")", ":", "return", "[", "bp", "for", "(", "(", "pid", ",", "address", ")", ",", "bp", ")", "in", "compat", ".", "iteritems", "(", "self", ".", "__pageBP", ")", "if", "pid", "==", "dwProcessId", "]" ]
@type dwProcessId: int @param dwProcessId: Process global ID. @rtype: list of L{PageBreakpoint} @return: All page breakpoints for the given process.
[ "@type", "dwProcessId", ":", "int", "@param", "dwProcessId", ":", "Process", "global", "ID", "." ]
python
train
bspaans/python-mingus
mingus/extra/tablature.py
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/extra/tablature.py#L399-L422
def from_Suite(suite, maxwidth=80): """Convert a mingus.containers.Suite to an ASCII tablature string, complete with headers. This function makes use of the Suite's title, subtitle, author, email and description attributes. """ subtitle = str(len(suite.compositions)) + ' Compositions' if suite.subtitle\ == '' else suite.subtitle result = os.linesep.join(add_headers( maxwidth, suite.title, subtitle, suite.author, suite.email, suite.description, )) hr = maxwidth * '=' n = os.linesep result = n + hr + n + result + n + hr + n + n for comp in suite: c = from_Composition(comp, maxwidth) result += c + n + hr + n + n return result
[ "def", "from_Suite", "(", "suite", ",", "maxwidth", "=", "80", ")", ":", "subtitle", "=", "str", "(", "len", "(", "suite", ".", "compositions", ")", ")", "+", "' Compositions'", "if", "suite", ".", "subtitle", "==", "''", "else", "suite", ".", "subtitle", "result", "=", "os", ".", "linesep", ".", "join", "(", "add_headers", "(", "maxwidth", ",", "suite", ".", "title", ",", "subtitle", ",", "suite", ".", "author", ",", "suite", ".", "email", ",", "suite", ".", "description", ",", ")", ")", "hr", "=", "maxwidth", "*", "'='", "n", "=", "os", ".", "linesep", "result", "=", "n", "+", "hr", "+", "n", "+", "result", "+", "n", "+", "hr", "+", "n", "+", "n", "for", "comp", "in", "suite", ":", "c", "=", "from_Composition", "(", "comp", ",", "maxwidth", ")", "result", "+=", "c", "+", "n", "+", "hr", "+", "n", "+", "n", "return", "result" ]
Convert a mingus.containers.Suite to an ASCII tablature string, complete with headers. This function makes use of the Suite's title, subtitle, author, email and description attributes.
[ "Convert", "a", "mingus", ".", "containers", ".", "Suite", "to", "an", "ASCII", "tablature", "string", "complete", "with", "headers", "." ]
python
train
materialsproject/pymatgen
pymatgen/analysis/surface_analysis.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/surface_analysis.py#L1749-L1800
def plot_one_stability_map(self, analyzer, max_r, delu_dict=None, label="", increments=50, delu_default=0, plt=None, from_sphere_area=False, e_units="keV", r_units="nanometers", normalize=False, scale_per_atom=False): """ Returns the plot of the formation energy of a particle against its effect radius Args: analyzer (SurfaceEnergyPlotter): Analyzer associated with the first polymorph max_r (float): The maximum radius of the particle to plot up to. delu_dict (Dict): Dictionary of the chemical potentials to be set as constant. Note the key should be a sympy Symbol object of the format: Symbol("delu_el") where el is the name of the element. label (str): Label of the plot for legend increments (int): Number of plot points delu_default (float): Default value for all unset chemical potentials plt (pylab): Plot from_sphere_area (bool): There are two ways to calculate the bulk formation energy. Either by treating the volume and thus surface area of the particle as a perfect sphere, or as a Wulff shape. r_units (str): Can be nanometers or Angstrom e_units (str): Can be keV or eV normalize (str): Whether or not to normalize energy by volume """ plt = plt if plt else pretty_plot(width=8, height=7) wulffshape = analyzer.wulff_from_chempot(delu_dict=delu_dict, delu_default=delu_default, symprec=self.symprec) gform_list, r_list = [], [] for r in np.linspace(1e-6, max_r, increments): gform, r = self.wulff_gform_and_r(wulffshape, analyzer.ucell_entry, r, from_sphere_area=from_sphere_area, r_units=r_units, e_units=e_units, normalize=normalize, scale_per_atom=scale_per_atom) gform_list.append(gform) r_list.append(r) ru = "nm" if r_units == "nanometers" else "\AA" plt.xlabel(r"Particle radius ($%s$)" %(ru)) eu = "$%s/%s^3$" %(e_units, ru) plt.ylabel(r"$G_{form}$ (%s)" %(eu)) plt.plot(r_list, gform_list, label=label) return plt
[ "def", "plot_one_stability_map", "(", "self", ",", "analyzer", ",", "max_r", ",", "delu_dict", "=", "None", ",", "label", "=", "\"\"", ",", "increments", "=", "50", ",", "delu_default", "=", "0", ",", "plt", "=", "None", ",", "from_sphere_area", "=", "False", ",", "e_units", "=", "\"keV\"", ",", "r_units", "=", "\"nanometers\"", ",", "normalize", "=", "False", ",", "scale_per_atom", "=", "False", ")", ":", "plt", "=", "plt", "if", "plt", "else", "pretty_plot", "(", "width", "=", "8", ",", "height", "=", "7", ")", "wulffshape", "=", "analyzer", ".", "wulff_from_chempot", "(", "delu_dict", "=", "delu_dict", ",", "delu_default", "=", "delu_default", ",", "symprec", "=", "self", ".", "symprec", ")", "gform_list", ",", "r_list", "=", "[", "]", ",", "[", "]", "for", "r", "in", "np", ".", "linspace", "(", "1e-6", ",", "max_r", ",", "increments", ")", ":", "gform", ",", "r", "=", "self", ".", "wulff_gform_and_r", "(", "wulffshape", ",", "analyzer", ".", "ucell_entry", ",", "r", ",", "from_sphere_area", "=", "from_sphere_area", ",", "r_units", "=", "r_units", ",", "e_units", "=", "e_units", ",", "normalize", "=", "normalize", ",", "scale_per_atom", "=", "scale_per_atom", ")", "gform_list", ".", "append", "(", "gform", ")", "r_list", ".", "append", "(", "r", ")", "ru", "=", "\"nm\"", "if", "r_units", "==", "\"nanometers\"", "else", "\"\\AA\"", "plt", ".", "xlabel", "(", "r\"Particle radius ($%s$)\"", "%", "(", "ru", ")", ")", "eu", "=", "\"$%s/%s^3$\"", "%", "(", "e_units", ",", "ru", ")", "plt", ".", "ylabel", "(", "r\"$G_{form}$ (%s)\"", "%", "(", "eu", ")", ")", "plt", ".", "plot", "(", "r_list", ",", "gform_list", ",", "label", "=", "label", ")", "return", "plt" ]
Returns the plot of the formation energy of a particle against its effect radius Args: analyzer (SurfaceEnergyPlotter): Analyzer associated with the first polymorph max_r (float): The maximum radius of the particle to plot up to. delu_dict (Dict): Dictionary of the chemical potentials to be set as constant. Note the key should be a sympy Symbol object of the format: Symbol("delu_el") where el is the name of the element. label (str): Label of the plot for legend increments (int): Number of plot points delu_default (float): Default value for all unset chemical potentials plt (pylab): Plot from_sphere_area (bool): There are two ways to calculate the bulk formation energy. Either by treating the volume and thus surface area of the particle as a perfect sphere, or as a Wulff shape. r_units (str): Can be nanometers or Angstrom e_units (str): Can be keV or eV normalize (str): Whether or not to normalize energy by volume
[ "Returns", "the", "plot", "of", "the", "formation", "energy", "of", "a", "particle", "against", "its", "effect", "radius" ]
python
train
openstack/networking-arista
networking_arista/common/db_lib.py
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/common/db_lib.py#L230-L238
def get_segments(segment_id=None): """Returns list of all network segments that may be relevant on CVX""" session = db.get_reader_session() with session.begin(): model = segment_models.NetworkSegment segments = session.query(model).filter_unnecessary_segments() if segment_id: segments = segments.filter(model.id == segment_id) return segments.all()
[ "def", "get_segments", "(", "segment_id", "=", "None", ")", ":", "session", "=", "db", ".", "get_reader_session", "(", ")", "with", "session", ".", "begin", "(", ")", ":", "model", "=", "segment_models", ".", "NetworkSegment", "segments", "=", "session", ".", "query", "(", "model", ")", ".", "filter_unnecessary_segments", "(", ")", "if", "segment_id", ":", "segments", "=", "segments", ".", "filter", "(", "model", ".", "id", "==", "segment_id", ")", "return", "segments", ".", "all", "(", ")" ]
Returns list of all network segments that may be relevant on CVX
[ "Returns", "list", "of", "all", "network", "segments", "that", "may", "be", "relevant", "on", "CVX" ]
python
train
googledatalab/pydatalab
solutionbox/image_classification/mltoolbox/image/classification/_local.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_local.py#L70-L79
def predict(model_dir, image_files, resize, show_image): """Predict using an model in a local or GCS directory.""" from . import _predictor images = _util.load_images(image_files, resize=resize) labels_and_scores = _predictor.predict(model_dir, images) results = zip(image_files, images, labels_and_scores) ret = _util.process_prediction_results(results, show_image) return ret
[ "def", "predict", "(", "model_dir", ",", "image_files", ",", "resize", ",", "show_image", ")", ":", "from", ".", "import", "_predictor", "images", "=", "_util", ".", "load_images", "(", "image_files", ",", "resize", "=", "resize", ")", "labels_and_scores", "=", "_predictor", ".", "predict", "(", "model_dir", ",", "images", ")", "results", "=", "zip", "(", "image_files", ",", "images", ",", "labels_and_scores", ")", "ret", "=", "_util", ".", "process_prediction_results", "(", "results", ",", "show_image", ")", "return", "ret" ]
Predict using an model in a local or GCS directory.
[ "Predict", "using", "an", "model", "in", "a", "local", "or", "GCS", "directory", "." ]
python
train
yeraydiazdiaz/lunr.py
lunr/match_data.py
https://github.com/yeraydiazdiaz/lunr.py/blob/28ec3f6d4888295eed730211ee9617aa488d6ba3/lunr/match_data.py#L25-L51
def combine(self, other): """An instance of lunr.MatchData will be created for every term that matches a document. However only one instance is required in a lunr.Index~Result. This method combines metadata from another instance of MatchData with this object's metadata. """ for term in other.metadata.keys(): if term not in self.metadata: self.metadata[term] = {} fields = other.metadata[term].keys() for field in fields: if field not in self.metadata[term]: self.metadata[term][field] = {} keys = other.metadata[term][field].keys() for key in keys: if key not in self.metadata[term][field]: self.metadata[term][field][key] = other.metadata[term][field][ key ] else: self.metadata[term][field][key].extend( other.metadata[term][field][key] )
[ "def", "combine", "(", "self", ",", "other", ")", ":", "for", "term", "in", "other", ".", "metadata", ".", "keys", "(", ")", ":", "if", "term", "not", "in", "self", ".", "metadata", ":", "self", ".", "metadata", "[", "term", "]", "=", "{", "}", "fields", "=", "other", ".", "metadata", "[", "term", "]", ".", "keys", "(", ")", "for", "field", "in", "fields", ":", "if", "field", "not", "in", "self", ".", "metadata", "[", "term", "]", ":", "self", ".", "metadata", "[", "term", "]", "[", "field", "]", "=", "{", "}", "keys", "=", "other", ".", "metadata", "[", "term", "]", "[", "field", "]", ".", "keys", "(", ")", "for", "key", "in", "keys", ":", "if", "key", "not", "in", "self", ".", "metadata", "[", "term", "]", "[", "field", "]", ":", "self", ".", "metadata", "[", "term", "]", "[", "field", "]", "[", "key", "]", "=", "other", ".", "metadata", "[", "term", "]", "[", "field", "]", "[", "key", "]", "else", ":", "self", ".", "metadata", "[", "term", "]", "[", "field", "]", "[", "key", "]", ".", "extend", "(", "other", ".", "metadata", "[", "term", "]", "[", "field", "]", "[", "key", "]", ")" ]
An instance of lunr.MatchData will be created for every term that matches a document. However only one instance is required in a lunr.Index~Result. This method combines metadata from another instance of MatchData with this object's metadata.
[ "An", "instance", "of", "lunr", ".", "MatchData", "will", "be", "created", "for", "every", "term", "that", "matches", "a", "document", "." ]
python
train
fermiPy/fermipy
fermipy/jobs/slac_impl.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/slac_impl.py#L242-L262
def get_slac_default_args(job_time=1500): """ Create a batch job interface object. Parameters ---------- job_time : int Expected max length of the job, in seconds. This is used to select the batch queue and set the job_check_sleep parameter that sets how often we check for job completion. """ slac_default_args = dict(lsf_args={'W': job_time, 'R': '\"select[rhel60&&!fell]\"'}, max_jobs=500, time_per_cycle=15, jobs_per_cycle=20, max_job_age=90, no_batch=False) return slac_default_args.copy()
[ "def", "get_slac_default_args", "(", "job_time", "=", "1500", ")", ":", "slac_default_args", "=", "dict", "(", "lsf_args", "=", "{", "'W'", ":", "job_time", ",", "'R'", ":", "'\\\"select[rhel60&&!fell]\\\"'", "}", ",", "max_jobs", "=", "500", ",", "time_per_cycle", "=", "15", ",", "jobs_per_cycle", "=", "20", ",", "max_job_age", "=", "90", ",", "no_batch", "=", "False", ")", "return", "slac_default_args", ".", "copy", "(", ")" ]
Create a batch job interface object. Parameters ---------- job_time : int Expected max length of the job, in seconds. This is used to select the batch queue and set the job_check_sleep parameter that sets how often we check for job completion.
[ "Create", "a", "batch", "job", "interface", "object", "." ]
python
train
ulf1/oxyba
oxyba/jackknife_stats.py
https://github.com/ulf1/oxyba/blob/b3043116050de275124365cb11e7df91fb40169d/oxyba/jackknife_stats.py#L1-L105
def jackknife_stats(theta_subs, theta_full, N=None, d=1): """Compute Jackknife Estimates, SE, Bias, t-scores, p-values Parameters: ----------- theta_subs : ndarray The metrics, estimates, parameters, etc. of the model (see "func") for each subsample. It is a <C x M> matrix, i.e. C=binocoeff(N,d) subsamples, and M parameters that are returned by the model. theta_full : ndarray The metrics, estimates, parameters, etc. of the model (see "func") for the full sample. It is a <1 x M> vecotr with the M parameters that are returned by the model. N : int The number of observations in the full sample. Is required for Delete-d Jackknife, i.e. d>1. (Default is N=None) d : int The number of observations to leave out for each Jackknife subsample, i.e. the subsample size is N-d. (The default is d=1 for the "Delete-1 Jackknife" procedure.) Returns: -------- pvalues : ndarray The two-sided P-values of the t-Score for each Jackknife estimate. (In Social Sciences pval<0.05 is referred as acceptable but it is usually better to look for p-values way closer to Zero. Just remove or replace a variable/feature with high pval>=pcritical and run the Jackknife again.) tscores : ndarray The t-Score for each Jackknife estimate. (As rule of thumb a value abs(tscore)>2 indicates a bad model parameter but jsut check the p-value.) theta_jack : ndarray The bias-corrected Jackknife Estimates (model parameter, metric, coefficient, etc.). Use the parameters for prediction. se_jack : ndarray The Jackknife Standard Error theta_biased : ndarray The biased Jackknife Estimate. Other Variables: ---------------- These variables occur in the source code as intermediate results. Q : int The Number of independent variables of a model (incl. intercept). C : int The number of Jackknife subsamples if d>1. There are C=binocoeff(N,d) combinations. """ # The biased Jackknife Estimate import numpy as np theta_biased = np.mean(theta_subs, axis=0) # Inflation Factor for the Jackknife Standard Error if d is 1: if N is None: N = theta_subs.shape[0] inflation = (N - 1) / N elif d > 1: if N is None: raise Exception(( "If d>1 then you must provide N (number of " "observations in the full sample)")) C = theta_subs.shape[0] inflation = ((N - d) / d) / C # The Jackknife Standard Error se_jack = np.sqrt( inflation * np.sum((theta_subs - theta_biased)**2, axis=0)) # The bias-corrected Jackknife Estimate theta_jack = N * theta_full - (N - 1) * theta_biased # The Jackknife t-Statistics tscores = theta_jack / se_jack # Two-sided P-values import scipy.stats Q = theta_subs.shape[1] pvalues = scipy.stats.t.sf(np.abs(tscores), N - Q - d) * 2 # done return pvalues, tscores, theta_jack, se_jack, theta_biased
[ "def", "jackknife_stats", "(", "theta_subs", ",", "theta_full", ",", "N", "=", "None", ",", "d", "=", "1", ")", ":", "# The biased Jackknife Estimate", "import", "numpy", "as", "np", "theta_biased", "=", "np", ".", "mean", "(", "theta_subs", ",", "axis", "=", "0", ")", "# Inflation Factor for the Jackknife Standard Error", "if", "d", "is", "1", ":", "if", "N", "is", "None", ":", "N", "=", "theta_subs", ".", "shape", "[", "0", "]", "inflation", "=", "(", "N", "-", "1", ")", "/", "N", "elif", "d", ">", "1", ":", "if", "N", "is", "None", ":", "raise", "Exception", "(", "(", "\"If d>1 then you must provide N (number of \"", "\"observations in the full sample)\"", ")", ")", "C", "=", "theta_subs", ".", "shape", "[", "0", "]", "inflation", "=", "(", "(", "N", "-", "d", ")", "/", "d", ")", "/", "C", "# The Jackknife Standard Error", "se_jack", "=", "np", ".", "sqrt", "(", "inflation", "*", "np", ".", "sum", "(", "(", "theta_subs", "-", "theta_biased", ")", "**", "2", ",", "axis", "=", "0", ")", ")", "# The bias-corrected Jackknife Estimate", "theta_jack", "=", "N", "*", "theta_full", "-", "(", "N", "-", "1", ")", "*", "theta_biased", "# The Jackknife t-Statistics", "tscores", "=", "theta_jack", "/", "se_jack", "# Two-sided P-values", "import", "scipy", ".", "stats", "Q", "=", "theta_subs", ".", "shape", "[", "1", "]", "pvalues", "=", "scipy", ".", "stats", ".", "t", ".", "sf", "(", "np", ".", "abs", "(", "tscores", ")", ",", "N", "-", "Q", "-", "d", ")", "*", "2", "# done", "return", "pvalues", ",", "tscores", ",", "theta_jack", ",", "se_jack", ",", "theta_biased" ]
Compute Jackknife Estimates, SE, Bias, t-scores, p-values Parameters: ----------- theta_subs : ndarray The metrics, estimates, parameters, etc. of the model (see "func") for each subsample. It is a <C x M> matrix, i.e. C=binocoeff(N,d) subsamples, and M parameters that are returned by the model. theta_full : ndarray The metrics, estimates, parameters, etc. of the model (see "func") for the full sample. It is a <1 x M> vecotr with the M parameters that are returned by the model. N : int The number of observations in the full sample. Is required for Delete-d Jackknife, i.e. d>1. (Default is N=None) d : int The number of observations to leave out for each Jackknife subsample, i.e. the subsample size is N-d. (The default is d=1 for the "Delete-1 Jackknife" procedure.) Returns: -------- pvalues : ndarray The two-sided P-values of the t-Score for each Jackknife estimate. (In Social Sciences pval<0.05 is referred as acceptable but it is usually better to look for p-values way closer to Zero. Just remove or replace a variable/feature with high pval>=pcritical and run the Jackknife again.) tscores : ndarray The t-Score for each Jackknife estimate. (As rule of thumb a value abs(tscore)>2 indicates a bad model parameter but jsut check the p-value.) theta_jack : ndarray The bias-corrected Jackknife Estimates (model parameter, metric, coefficient, etc.). Use the parameters for prediction. se_jack : ndarray The Jackknife Standard Error theta_biased : ndarray The biased Jackknife Estimate. Other Variables: ---------------- These variables occur in the source code as intermediate results. Q : int The Number of independent variables of a model (incl. intercept). C : int The number of Jackknife subsamples if d>1. There are C=binocoeff(N,d) combinations.
[ "Compute", "Jackknife", "Estimates", "SE", "Bias", "t", "-", "scores", "p", "-", "values" ]
python
train
vanheeringen-lab/gimmemotifs
gimmemotifs/scanner.py
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/scanner.py#L378-L428
def set_background(self, fname=None, genome=None, length=200, nseq=10000): """Set the background to use for FPR and z-score calculations. Background can be specified either as a genome name or as the name of a FASTA file. Parameters ---------- fname : str, optional Name of FASTA file to use as background. genome : str, optional Name of genome to use to retrieve random sequences. length : int, optional Length of genomic sequences to retrieve. The default is 200. nseq : int, optional Number of genomic sequences to retrieve. """ length = int(length) if genome and fname: raise ValueError("Need either genome or filename for background.") if fname: if not os.path.exists(fname): raise IOError("Background file {} does not exist!".format(fname)) self.background = Fasta(fname) self.background_hash = file_checksum(fname) return if not genome: if self.genome: genome = self.genome logger.info("Using default background: genome {} with length {}".format( genome, length)) else: raise ValueError("Need either genome or filename for background.") logger.info("Using background: genome {} with length {}".format(genome, length)) with Cache(CACHE_DIR) as cache: self.background_hash = "{}\{}".format(genome, int(length)) fa = cache.get(self.background_hash) if not fa: fa = RandomGenomicFasta(genome, length, nseq) cache.set(self.background_hash, fa) self.background = fa
[ "def", "set_background", "(", "self", ",", "fname", "=", "None", ",", "genome", "=", "None", ",", "length", "=", "200", ",", "nseq", "=", "10000", ")", ":", "length", "=", "int", "(", "length", ")", "if", "genome", "and", "fname", ":", "raise", "ValueError", "(", "\"Need either genome or filename for background.\"", ")", "if", "fname", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "fname", ")", ":", "raise", "IOError", "(", "\"Background file {} does not exist!\"", ".", "format", "(", "fname", ")", ")", "self", ".", "background", "=", "Fasta", "(", "fname", ")", "self", ".", "background_hash", "=", "file_checksum", "(", "fname", ")", "return", "if", "not", "genome", ":", "if", "self", ".", "genome", ":", "genome", "=", "self", ".", "genome", "logger", ".", "info", "(", "\"Using default background: genome {} with length {}\"", ".", "format", "(", "genome", ",", "length", ")", ")", "else", ":", "raise", "ValueError", "(", "\"Need either genome or filename for background.\"", ")", "logger", ".", "info", "(", "\"Using background: genome {} with length {}\"", ".", "format", "(", "genome", ",", "length", ")", ")", "with", "Cache", "(", "CACHE_DIR", ")", "as", "cache", ":", "self", ".", "background_hash", "=", "\"{}\\{}\"", ".", "format", "(", "genome", ",", "int", "(", "length", ")", ")", "fa", "=", "cache", ".", "get", "(", "self", ".", "background_hash", ")", "if", "not", "fa", ":", "fa", "=", "RandomGenomicFasta", "(", "genome", ",", "length", ",", "nseq", ")", "cache", ".", "set", "(", "self", ".", "background_hash", ",", "fa", ")", "self", ".", "background", "=", "fa" ]
Set the background to use for FPR and z-score calculations. Background can be specified either as a genome name or as the name of a FASTA file. Parameters ---------- fname : str, optional Name of FASTA file to use as background. genome : str, optional Name of genome to use to retrieve random sequences. length : int, optional Length of genomic sequences to retrieve. The default is 200. nseq : int, optional Number of genomic sequences to retrieve.
[ "Set", "the", "background", "to", "use", "for", "FPR", "and", "z", "-", "score", "calculations", "." ]
python
train
apache/incubator-superset
superset/models/helpers.py
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/models/helpers.py#L99-L184
def import_from_dict(cls, session, dict_rep, parent=None, recursive=True, sync=[]): """Import obj from a dictionary""" parent_refs = cls._parent_foreign_key_mappings() export_fields = set(cls.export_fields) | set(parent_refs.keys()) new_children = {c: dict_rep.get(c) for c in cls.export_children if c in dict_rep} unique_constrains = cls._unique_constrains() filters = [] # Using these filters to check if obj already exists # Remove fields that should not get imported for k in list(dict_rep): if k not in export_fields: del dict_rep[k] if not parent: if cls.export_parent: for p in parent_refs.keys(): if p not in dict_rep: raise RuntimeError( '{0}: Missing field {1}'.format(cls.__name__, p)) else: # Set foreign keys to parent obj for k, v in parent_refs.items(): dict_rep[k] = getattr(parent, v) # Add filter for parent obj filters.extend([getattr(cls, k) == dict_rep.get(k) for k in parent_refs.keys()]) # Add filter for unique constraints ucs = [and_(*[getattr(cls, k) == dict_rep.get(k) for k in cs if dict_rep.get(k) is not None]) for cs in unique_constrains] filters.append(or_(*ucs)) # Check if object already exists in DB, break if more than one is found try: obj_query = session.query(cls).filter(and_(*filters)) obj = obj_query.one_or_none() except MultipleResultsFound as e: logging.error('Error importing %s \n %s \n %s', cls.__name__, str(obj_query), yaml.safe_dump(dict_rep)) raise e if not obj: is_new_obj = True # Create new DB object obj = cls(**dict_rep) logging.info('Importing new %s %s', obj.__tablename__, str(obj)) if cls.export_parent and parent: setattr(obj, cls.export_parent, parent) session.add(obj) else: is_new_obj = False logging.info('Updating %s %s', obj.__tablename__, str(obj)) # Update columns for k, v in dict_rep.items(): setattr(obj, k, v) # Recursively create children if recursive: for c in cls.export_children: child_class = cls.__mapper__.relationships[c].argument.class_ added = [] for c_obj in new_children.get(c, []): added.append(child_class.import_from_dict(session=session, dict_rep=c_obj, parent=obj, sync=sync)) # If children should get synced, delete the ones that did not # get updated. if c in sync and not is_new_obj: back_refs = child_class._parent_foreign_key_mappings() delete_filters = [getattr(child_class, k) == getattr(obj, back_refs.get(k)) for k in back_refs.keys()] to_delete = set(session.query(child_class).filter( and_(*delete_filters))).difference(set(added)) for o in to_delete: logging.info('Deleting %s %s', c, str(obj)) session.delete(o) return obj
[ "def", "import_from_dict", "(", "cls", ",", "session", ",", "dict_rep", ",", "parent", "=", "None", ",", "recursive", "=", "True", ",", "sync", "=", "[", "]", ")", ":", "parent_refs", "=", "cls", ".", "_parent_foreign_key_mappings", "(", ")", "export_fields", "=", "set", "(", "cls", ".", "export_fields", ")", "|", "set", "(", "parent_refs", ".", "keys", "(", ")", ")", "new_children", "=", "{", "c", ":", "dict_rep", ".", "get", "(", "c", ")", "for", "c", "in", "cls", ".", "export_children", "if", "c", "in", "dict_rep", "}", "unique_constrains", "=", "cls", ".", "_unique_constrains", "(", ")", "filters", "=", "[", "]", "# Using these filters to check if obj already exists", "# Remove fields that should not get imported", "for", "k", "in", "list", "(", "dict_rep", ")", ":", "if", "k", "not", "in", "export_fields", ":", "del", "dict_rep", "[", "k", "]", "if", "not", "parent", ":", "if", "cls", ".", "export_parent", ":", "for", "p", "in", "parent_refs", ".", "keys", "(", ")", ":", "if", "p", "not", "in", "dict_rep", ":", "raise", "RuntimeError", "(", "'{0}: Missing field {1}'", ".", "format", "(", "cls", ".", "__name__", ",", "p", ")", ")", "else", ":", "# Set foreign keys to parent obj", "for", "k", ",", "v", "in", "parent_refs", ".", "items", "(", ")", ":", "dict_rep", "[", "k", "]", "=", "getattr", "(", "parent", ",", "v", ")", "# Add filter for parent obj", "filters", ".", "extend", "(", "[", "getattr", "(", "cls", ",", "k", ")", "==", "dict_rep", ".", "get", "(", "k", ")", "for", "k", "in", "parent_refs", ".", "keys", "(", ")", "]", ")", "# Add filter for unique constraints", "ucs", "=", "[", "and_", "(", "*", "[", "getattr", "(", "cls", ",", "k", ")", "==", "dict_rep", ".", "get", "(", "k", ")", "for", "k", "in", "cs", "if", "dict_rep", ".", "get", "(", "k", ")", "is", "not", "None", "]", ")", "for", "cs", "in", "unique_constrains", "]", "filters", ".", "append", "(", "or_", "(", "*", "ucs", ")", ")", "# Check if object already exists in DB, break if more than one is found", "try", ":", "obj_query", "=", "session", ".", "query", "(", "cls", ")", ".", "filter", "(", "and_", "(", "*", "filters", ")", ")", "obj", "=", "obj_query", ".", "one_or_none", "(", ")", "except", "MultipleResultsFound", "as", "e", ":", "logging", ".", "error", "(", "'Error importing %s \\n %s \\n %s'", ",", "cls", ".", "__name__", ",", "str", "(", "obj_query", ")", ",", "yaml", ".", "safe_dump", "(", "dict_rep", ")", ")", "raise", "e", "if", "not", "obj", ":", "is_new_obj", "=", "True", "# Create new DB object", "obj", "=", "cls", "(", "*", "*", "dict_rep", ")", "logging", ".", "info", "(", "'Importing new %s %s'", ",", "obj", ".", "__tablename__", ",", "str", "(", "obj", ")", ")", "if", "cls", ".", "export_parent", "and", "parent", ":", "setattr", "(", "obj", ",", "cls", ".", "export_parent", ",", "parent", ")", "session", ".", "add", "(", "obj", ")", "else", ":", "is_new_obj", "=", "False", "logging", ".", "info", "(", "'Updating %s %s'", ",", "obj", ".", "__tablename__", ",", "str", "(", "obj", ")", ")", "# Update columns", "for", "k", ",", "v", "in", "dict_rep", ".", "items", "(", ")", ":", "setattr", "(", "obj", ",", "k", ",", "v", ")", "# Recursively create children", "if", "recursive", ":", "for", "c", "in", "cls", ".", "export_children", ":", "child_class", "=", "cls", ".", "__mapper__", ".", "relationships", "[", "c", "]", ".", "argument", ".", "class_", "added", "=", "[", "]", "for", "c_obj", "in", "new_children", ".", "get", "(", "c", ",", "[", "]", ")", ":", "added", ".", "append", "(", "child_class", ".", "import_from_dict", "(", "session", "=", "session", ",", "dict_rep", "=", "c_obj", ",", "parent", "=", "obj", ",", "sync", "=", "sync", ")", ")", "# If children should get synced, delete the ones that did not", "# get updated.", "if", "c", "in", "sync", "and", "not", "is_new_obj", ":", "back_refs", "=", "child_class", ".", "_parent_foreign_key_mappings", "(", ")", "delete_filters", "=", "[", "getattr", "(", "child_class", ",", "k", ")", "==", "getattr", "(", "obj", ",", "back_refs", ".", "get", "(", "k", ")", ")", "for", "k", "in", "back_refs", ".", "keys", "(", ")", "]", "to_delete", "=", "set", "(", "session", ".", "query", "(", "child_class", ")", ".", "filter", "(", "and_", "(", "*", "delete_filters", ")", ")", ")", ".", "difference", "(", "set", "(", "added", ")", ")", "for", "o", "in", "to_delete", ":", "logging", ".", "info", "(", "'Deleting %s %s'", ",", "c", ",", "str", "(", "obj", ")", ")", "session", ".", "delete", "(", "o", ")", "return", "obj" ]
Import obj from a dictionary
[ "Import", "obj", "from", "a", "dictionary" ]
python
train
ponty/confduino
confduino/libinstall.py
https://github.com/ponty/confduino/blob/f4c261e5e84997f145a8bdd001f471db74c9054b/confduino/libinstall.py#L130-L143
def move_examples(root, lib_dir): """find examples not under lib dir, and move into ``examples``""" all_pde = files_multi_pattern(root, INO_PATTERNS) lib_pde = files_multi_pattern(lib_dir, INO_PATTERNS) stray_pde = all_pde.difference(lib_pde) if len(stray_pde) and not len(lib_pde): log.debug( 'examples found outside lib dir, moving them: %s', stray_pde) examples = lib_dir / EXAMPLES examples.makedirs() for x in stray_pde: d = examples / x.namebase d.makedirs() x.move(d)
[ "def", "move_examples", "(", "root", ",", "lib_dir", ")", ":", "all_pde", "=", "files_multi_pattern", "(", "root", ",", "INO_PATTERNS", ")", "lib_pde", "=", "files_multi_pattern", "(", "lib_dir", ",", "INO_PATTERNS", ")", "stray_pde", "=", "all_pde", ".", "difference", "(", "lib_pde", ")", "if", "len", "(", "stray_pde", ")", "and", "not", "len", "(", "lib_pde", ")", ":", "log", ".", "debug", "(", "'examples found outside lib dir, moving them: %s'", ",", "stray_pde", ")", "examples", "=", "lib_dir", "/", "EXAMPLES", "examples", ".", "makedirs", "(", ")", "for", "x", "in", "stray_pde", ":", "d", "=", "examples", "/", "x", ".", "namebase", "d", ".", "makedirs", "(", ")", "x", ".", "move", "(", "d", ")" ]
find examples not under lib dir, and move into ``examples``
[ "find", "examples", "not", "under", "lib", "dir", "and", "move", "into", "examples" ]
python
train
adamkerz/django-presentation
django_presentation/forms/fields/TypedChoiceField.py
https://github.com/adamkerz/django-presentation/blob/1e812faa5f682e021fa6580509d8d324cfcc119c/django_presentation/forms/fields/TypedChoiceField.py#L15-L27
def to_python(self,value): """ Validates that the value is in self.choices and can be coerced to the right type. """ if value==self.emptyValue or value in EMPTY_VALUES: return self.emptyValue try: value=self.coerce(value) except(ValueError,TypeError,ValidationError): raise ValidationError(self.error_messages['invalid_choice']%{'value':value}) return value
[ "def", "to_python", "(", "self", ",", "value", ")", ":", "if", "value", "==", "self", ".", "emptyValue", "or", "value", "in", "EMPTY_VALUES", ":", "return", "self", ".", "emptyValue", "try", ":", "value", "=", "self", ".", "coerce", "(", "value", ")", "except", "(", "ValueError", ",", "TypeError", ",", "ValidationError", ")", ":", "raise", "ValidationError", "(", "self", ".", "error_messages", "[", "'invalid_choice'", "]", "%", "{", "'value'", ":", "value", "}", ")", "return", "value" ]
Validates that the value is in self.choices and can be coerced to the right type.
[ "Validates", "that", "the", "value", "is", "in", "self", ".", "choices", "and", "can", "be", "coerced", "to", "the", "right", "type", "." ]
python
train
avanwyk/cipy
cipy/algorithms/pso/functions.py
https://github.com/avanwyk/cipy/blob/98450dd01767b3615c113e50dc396f135e177b29/cipy/algorithms/pso/functions.py#L168-L194
def update_particle(position_update, velocity_update, state, nbest_topology, idx_particle): """ Update function for a particle. Calculates and updates the velocity and position of a particle for a single iteration of the PSO algorithm. Social best particle is determined by the state.params['topology'] function. Args: state: cipy.algorithms.pso.State: The state of the PSO algorithm. nbest_topology: dict: Containing neighbourhood best index for each particle index. idx_particle: tuple: Tuple of the index of the particle and the particle itself. Returns: cipy.algorithms.pso.Particle: A new particle with the updated position and velocity. """ (idx, particle) = idx_particle nbest = state.swarm[nbest_topology[idx]].best_position velocity = velocity_update(particle, nbest, state) position = position_update(particle.position, velocity) return particle._replace(position=position, velocity=velocity)
[ "def", "update_particle", "(", "position_update", ",", "velocity_update", ",", "state", ",", "nbest_topology", ",", "idx_particle", ")", ":", "(", "idx", ",", "particle", ")", "=", "idx_particle", "nbest", "=", "state", ".", "swarm", "[", "nbest_topology", "[", "idx", "]", "]", ".", "best_position", "velocity", "=", "velocity_update", "(", "particle", ",", "nbest", ",", "state", ")", "position", "=", "position_update", "(", "particle", ".", "position", ",", "velocity", ")", "return", "particle", ".", "_replace", "(", "position", "=", "position", ",", "velocity", "=", "velocity", ")" ]
Update function for a particle. Calculates and updates the velocity and position of a particle for a single iteration of the PSO algorithm. Social best particle is determined by the state.params['topology'] function. Args: state: cipy.algorithms.pso.State: The state of the PSO algorithm. nbest_topology: dict: Containing neighbourhood best index for each particle index. idx_particle: tuple: Tuple of the index of the particle and the particle itself. Returns: cipy.algorithms.pso.Particle: A new particle with the updated position and velocity.
[ "Update", "function", "for", "a", "particle", "." ]
python
train
pycontribs/python-crowd
crowd.py
https://github.com/pycontribs/python-crowd/blob/a075e45774dd5baecf0217843cda747084268e32/crowd.py#L321-L380
def add_user(self, username, raise_on_error=False, **kwargs): """Add a user to the directory Args: username: The account username raise_on_error: optional (default: False) **kwargs: key-value pairs: password: mandatory email: mandatory first_name: optional last_name: optional display_name: optional active: optional (default True) Returns: True: Succeeded False: If unsuccessful """ # Check that mandatory elements have been provided if 'password' not in kwargs: raise ValueError("missing password") if 'email' not in kwargs: raise ValueError("missing email") # Populate data with default and mandatory values. # A KeyError means a mandatory value was not provided, # so raise a ValueError indicating bad args. try: data = { "name": username, "first-name": username, "last-name": username, "display-name": username, "email": kwargs["email"], "password": {"value": kwargs["password"]}, "active": True } except KeyError: return ValueError # Remove special case 'password' del(kwargs["password"]) # Put values from kwargs into data for k, v in kwargs.items(): new_k = k.replace("_", "-") if new_k not in data: raise ValueError("invalid argument %s" % k) data[new_k] = v response = self._post(self.rest_url + "/user", data=json.dumps(data)) if response.status_code == 201: return True if raise_on_error: raise RuntimeError(response.json()['message']) return False
[ "def", "add_user", "(", "self", ",", "username", ",", "raise_on_error", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# Check that mandatory elements have been provided", "if", "'password'", "not", "in", "kwargs", ":", "raise", "ValueError", "(", "\"missing password\"", ")", "if", "'email'", "not", "in", "kwargs", ":", "raise", "ValueError", "(", "\"missing email\"", ")", "# Populate data with default and mandatory values.", "# A KeyError means a mandatory value was not provided,", "# so raise a ValueError indicating bad args.", "try", ":", "data", "=", "{", "\"name\"", ":", "username", ",", "\"first-name\"", ":", "username", ",", "\"last-name\"", ":", "username", ",", "\"display-name\"", ":", "username", ",", "\"email\"", ":", "kwargs", "[", "\"email\"", "]", ",", "\"password\"", ":", "{", "\"value\"", ":", "kwargs", "[", "\"password\"", "]", "}", ",", "\"active\"", ":", "True", "}", "except", "KeyError", ":", "return", "ValueError", "# Remove special case 'password'", "del", "(", "kwargs", "[", "\"password\"", "]", ")", "# Put values from kwargs into data", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "new_k", "=", "k", ".", "replace", "(", "\"_\"", ",", "\"-\"", ")", "if", "new_k", "not", "in", "data", ":", "raise", "ValueError", "(", "\"invalid argument %s\"", "%", "k", ")", "data", "[", "new_k", "]", "=", "v", "response", "=", "self", ".", "_post", "(", "self", ".", "rest_url", "+", "\"/user\"", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ")", "if", "response", ".", "status_code", "==", "201", ":", "return", "True", "if", "raise_on_error", ":", "raise", "RuntimeError", "(", "response", ".", "json", "(", ")", "[", "'message'", "]", ")", "return", "False" ]
Add a user to the directory Args: username: The account username raise_on_error: optional (default: False) **kwargs: key-value pairs: password: mandatory email: mandatory first_name: optional last_name: optional display_name: optional active: optional (default True) Returns: True: Succeeded False: If unsuccessful
[ "Add", "a", "user", "to", "the", "directory" ]
python
train
HHammond/PrettyPandas
prettypandas/summarizer.py
https://github.com/HHammond/PrettyPandas/blob/99a814ffc3aa61f66eaf902afaa4b7802518d33a/prettypandas/summarizer.py#L335-L342
def as_percent(self, precision=2, *args, **kwargs): """Format subset as percentages :param precision: Decimal precision :param subset: Pandas subset """ f = Formatter(as_percent(precision), args, kwargs) return self._add_formatter(f)
[ "def", "as_percent", "(", "self", ",", "precision", "=", "2", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "f", "=", "Formatter", "(", "as_percent", "(", "precision", ")", ",", "args", ",", "kwargs", ")", "return", "self", ".", "_add_formatter", "(", "f", ")" ]
Format subset as percentages :param precision: Decimal precision :param subset: Pandas subset
[ "Format", "subset", "as", "percentages" ]
python
train
linode/linode_api4-python
linode_api4/login_client.py
https://github.com/linode/linode_api4-python/blob/1dd7318d2aed014c746d48c7957464c57af883ca/linode_api4/login_client.py#L417-L441
def expire_token(self, token): """ Given a token, makes a request to the authentication server to expire it immediately. This is considered a responsible way to log out a user. If you simply remove the session your application has for the user without expiring their token, the user is not _really_ logged out. :param token: The OAuth token you wish to expire :type token: str :returns: If the expiration attempt succeeded. :rtype: bool :raises ApiError: If the expiration attempt failed. """ r = requests.post(self._login_uri("/oauth/token/expire"), data={ "client_id": self.client_id, "client_secret": self.client_secret, "token": token, }) if r.status_code != 200: raise ApiError("Failed to expire token!", r) return True
[ "def", "expire_token", "(", "self", ",", "token", ")", ":", "r", "=", "requests", ".", "post", "(", "self", ".", "_login_uri", "(", "\"/oauth/token/expire\"", ")", ",", "data", "=", "{", "\"client_id\"", ":", "self", ".", "client_id", ",", "\"client_secret\"", ":", "self", ".", "client_secret", ",", "\"token\"", ":", "token", ",", "}", ")", "if", "r", ".", "status_code", "!=", "200", ":", "raise", "ApiError", "(", "\"Failed to expire token!\"", ",", "r", ")", "return", "True" ]
Given a token, makes a request to the authentication server to expire it immediately. This is considered a responsible way to log out a user. If you simply remove the session your application has for the user without expiring their token, the user is not _really_ logged out. :param token: The OAuth token you wish to expire :type token: str :returns: If the expiration attempt succeeded. :rtype: bool :raises ApiError: If the expiration attempt failed.
[ "Given", "a", "token", "makes", "a", "request", "to", "the", "authentication", "server", "to", "expire", "it", "immediately", ".", "This", "is", "considered", "a", "responsible", "way", "to", "log", "out", "a", "user", ".", "If", "you", "simply", "remove", "the", "session", "your", "application", "has", "for", "the", "user", "without", "expiring", "their", "token", "the", "user", "is", "not", "_really_", "logged", "out", "." ]
python
train
moderngl/moderngl
examples/window/__init__.py
https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/examples/window/__init__.py#L65-L99
def parse_args(args=None): """Parse arguments from sys.argv""" parser = argparse.ArgumentParser() parser.add_argument( '-w', '--window', default="pyqt5", choices=find_window_classes(), help='Name for the window type to use', ) parser.add_argument( '-fs', '--fullscreen', action="store_true", help='Open the window in fullscreen mode', ) parser.add_argument( '-vs', '--vsync', type=str2bool, default="1", help="Enable or disable vsync", ) parser.add_argument( '-s', '--samples', type=int, default=4, help="Specify the desired number of samples to use for multisampling", ) parser.add_argument( '-c', '--cursor', type=str2bool, default="true", help="Enable or disable displaying the mouse cursor", ) return parser.parse_args(args or sys.argv[1:])
[ "def", "parse_args", "(", "args", "=", "None", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "parser", ".", "add_argument", "(", "'-w'", ",", "'--window'", ",", "default", "=", "\"pyqt5\"", ",", "choices", "=", "find_window_classes", "(", ")", ",", "help", "=", "'Name for the window type to use'", ",", ")", "parser", ".", "add_argument", "(", "'-fs'", ",", "'--fullscreen'", ",", "action", "=", "\"store_true\"", ",", "help", "=", "'Open the window in fullscreen mode'", ",", ")", "parser", ".", "add_argument", "(", "'-vs'", ",", "'--vsync'", ",", "type", "=", "str2bool", ",", "default", "=", "\"1\"", ",", "help", "=", "\"Enable or disable vsync\"", ",", ")", "parser", ".", "add_argument", "(", "'-s'", ",", "'--samples'", ",", "type", "=", "int", ",", "default", "=", "4", ",", "help", "=", "\"Specify the desired number of samples to use for multisampling\"", ",", ")", "parser", ".", "add_argument", "(", "'-c'", ",", "'--cursor'", ",", "type", "=", "str2bool", ",", "default", "=", "\"true\"", ",", "help", "=", "\"Enable or disable displaying the mouse cursor\"", ",", ")", "return", "parser", ".", "parse_args", "(", "args", "or", "sys", ".", "argv", "[", "1", ":", "]", ")" ]
Parse arguments from sys.argv
[ "Parse", "arguments", "from", "sys", ".", "argv" ]
python
train
vinci1it2000/schedula
schedula/utils/sol.py
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L1089-L1127
def _see_remote_link_node(self, node_id, fringe=None, dist=None, check_dsp=lambda x: True): """ See data remote links of the node (set output to remote links). :param node_id: Node id. :type node_id: str :param fringe: Heapq of closest available nodes. :type fringe: list[(float | int, bool, (str, Dispatcher)] :param dist: Distance from the starting node. :type dist: float, int :param check_dsp: A function to check if the remote dispatcher is ok. :type check_dsp: (Dispatcher) -> bool """ # Namespace shortcut. node, p_id, c_i = self.nodes[node_id], self.index[:-1], self.index[-1:] if node['type'] == 'data' and p_id and check_dsp(p_id): sol = self.sub_sol[self.index[:-1]] # Get parent solution. for dsp_id, n in sol.dsp.nodes.items(): if n['index'] == c_i and node_id in n.get('outputs', {}): value = self[node_id] # Get data output. for n_id in stlp(n['outputs'][node_id]): # Node has been visited or inp do not coincide with out. if not (n_id in sol._visited or sol.workflow.has_edge(n_id, dsp_id)): # Donate the result to the child. sol._wf_add_edge(dsp_id, n_id, value=value) if fringe is not None: # See node. sol._see_node(n_id, fringe, dist, w_wait_in=2) break
[ "def", "_see_remote_link_node", "(", "self", ",", "node_id", ",", "fringe", "=", "None", ",", "dist", "=", "None", ",", "check_dsp", "=", "lambda", "x", ":", "True", ")", ":", "# Namespace shortcut.", "node", ",", "p_id", ",", "c_i", "=", "self", ".", "nodes", "[", "node_id", "]", ",", "self", ".", "index", "[", ":", "-", "1", "]", ",", "self", ".", "index", "[", "-", "1", ":", "]", "if", "node", "[", "'type'", "]", "==", "'data'", "and", "p_id", "and", "check_dsp", "(", "p_id", ")", ":", "sol", "=", "self", ".", "sub_sol", "[", "self", ".", "index", "[", ":", "-", "1", "]", "]", "# Get parent solution.", "for", "dsp_id", ",", "n", "in", "sol", ".", "dsp", ".", "nodes", ".", "items", "(", ")", ":", "if", "n", "[", "'index'", "]", "==", "c_i", "and", "node_id", "in", "n", ".", "get", "(", "'outputs'", ",", "{", "}", ")", ":", "value", "=", "self", "[", "node_id", "]", "# Get data output.", "for", "n_id", "in", "stlp", "(", "n", "[", "'outputs'", "]", "[", "node_id", "]", ")", ":", "# Node has been visited or inp do not coincide with out.", "if", "not", "(", "n_id", "in", "sol", ".", "_visited", "or", "sol", ".", "workflow", ".", "has_edge", "(", "n_id", ",", "dsp_id", ")", ")", ":", "# Donate the result to the child.", "sol", ".", "_wf_add_edge", "(", "dsp_id", ",", "n_id", ",", "value", "=", "value", ")", "if", "fringe", "is", "not", "None", ":", "# See node.", "sol", ".", "_see_node", "(", "n_id", ",", "fringe", ",", "dist", ",", "w_wait_in", "=", "2", ")", "break" ]
See data remote links of the node (set output to remote links). :param node_id: Node id. :type node_id: str :param fringe: Heapq of closest available nodes. :type fringe: list[(float | int, bool, (str, Dispatcher)] :param dist: Distance from the starting node. :type dist: float, int :param check_dsp: A function to check if the remote dispatcher is ok. :type check_dsp: (Dispatcher) -> bool
[ "See", "data", "remote", "links", "of", "the", "node", "(", "set", "output", "to", "remote", "links", ")", "." ]
python
train
jazzband/django-analytical
analytical/templatetags/gosquared.py
https://github.com/jazzband/django-analytical/blob/5487fd677bd47bc63fc2cf39597a0adc5d6c9ab3/analytical/templatetags/gosquared.py#L38-L48
def gosquared(parser, token): """ GoSquared tracking template tag. Renders Javascript code to track page visits. You must supply your GoSquared site token in the ``GOSQUARED_SITE_TOKEN`` setting. """ bits = token.split_contents() if len(bits) > 1: raise TemplateSyntaxError("'%s' takes no arguments" % bits[0]) return GoSquaredNode()
[ "def", "gosquared", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "split_contents", "(", ")", "if", "len", "(", "bits", ")", ">", "1", ":", "raise", "TemplateSyntaxError", "(", "\"'%s' takes no arguments\"", "%", "bits", "[", "0", "]", ")", "return", "GoSquaredNode", "(", ")" ]
GoSquared tracking template tag. Renders Javascript code to track page visits. You must supply your GoSquared site token in the ``GOSQUARED_SITE_TOKEN`` setting.
[ "GoSquared", "tracking", "template", "tag", "." ]
python
valid
spacetelescope/drizzlepac
drizzlepac/runastrodriz.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/runastrodriz.py#L609-L613
def _restoreResults(newdir,origdir): """ Move (not copy) all files from newdir back to the original directory """ for fname in glob.glob(os.path.join(newdir,'*')): shutil.move(fname,os.path.join(origdir,os.path.basename(fname)))
[ "def", "_restoreResults", "(", "newdir", ",", "origdir", ")", ":", "for", "fname", "in", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "newdir", ",", "'*'", ")", ")", ":", "shutil", ".", "move", "(", "fname", ",", "os", ".", "path", ".", "join", "(", "origdir", ",", "os", ".", "path", ".", "basename", "(", "fname", ")", ")", ")" ]
Move (not copy) all files from newdir back to the original directory
[ "Move", "(", "not", "copy", ")", "all", "files", "from", "newdir", "back", "to", "the", "original", "directory" ]
python
train
saltstack/salt
salt/modules/mac_pkgutil.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_pkgutil.py#L85-L112
def install(source, package_id): ''' Install a .pkg from an URI or an absolute path. :param str source: The path to a package. :param str package_id: The package ID :return: True if successful, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' pkgutil.install source=/vagrant/build_essentials.pkg package_id=com.apple.pkg.gcc4.2Leo ''' if is_installed(package_id): return True uri = urllib.parse.urlparse(source) if not uri.scheme == '': msg = 'Unsupported scheme for source uri: {0}'.format(uri.scheme) raise SaltInvocationError(msg) _install_from_path(source) return is_installed(package_id)
[ "def", "install", "(", "source", ",", "package_id", ")", ":", "if", "is_installed", "(", "package_id", ")", ":", "return", "True", "uri", "=", "urllib", ".", "parse", ".", "urlparse", "(", "source", ")", "if", "not", "uri", ".", "scheme", "==", "''", ":", "msg", "=", "'Unsupported scheme for source uri: {0}'", ".", "format", "(", "uri", ".", "scheme", ")", "raise", "SaltInvocationError", "(", "msg", ")", "_install_from_path", "(", "source", ")", "return", "is_installed", "(", "package_id", ")" ]
Install a .pkg from an URI or an absolute path. :param str source: The path to a package. :param str package_id: The package ID :return: True if successful, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' pkgutil.install source=/vagrant/build_essentials.pkg package_id=com.apple.pkg.gcc4.2Leo
[ "Install", "a", ".", "pkg", "from", "an", "URI", "or", "an", "absolute", "path", "." ]
python
train
timClicks/slate
src/slate/classes.py
https://github.com/timClicks/slate/blob/e796bbb09ea5ab473aa33ce2984bf9fc2bebb64b/src/slate/classes.py#L92-L103
def text(self, clean=True): """ Returns the text of the PDF as a single string. Options: :clean: Removes misc cruft, like lots of whitespace. """ if clean: return utils.normalise_whitespace(''.join(self).replace('\n', ' ')) else: return ''.join(self)
[ "def", "text", "(", "self", ",", "clean", "=", "True", ")", ":", "if", "clean", ":", "return", "utils", ".", "normalise_whitespace", "(", "''", ".", "join", "(", "self", ")", ".", "replace", "(", "'\\n'", ",", "' '", ")", ")", "else", ":", "return", "''", ".", "join", "(", "self", ")" ]
Returns the text of the PDF as a single string. Options: :clean: Removes misc cruft, like lots of whitespace.
[ "Returns", "the", "text", "of", "the", "PDF", "as", "a", "single", "string", ".", "Options", ":" ]
python
train
Alignak-monitoring/alignak
alignak/daemons/arbiterdaemon.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daemons/arbiterdaemon.py#L1801-L1826
def get_monitoring_problems(self): """Get the schedulers satellites problems list :return: problems dictionary :rtype: dict """ res = self.get_id() res['problems'] = {} # Report our schedulers information, but only if a dispatcher exists if getattr(self, 'dispatcher', None) is None: return res for satellite in self.dispatcher.all_daemons_links: if satellite.type not in ['scheduler']: continue if not satellite.active: continue if satellite.statistics and 'problems' in satellite.statistics: res['problems'][satellite.name] = { '_freshness': satellite.statistics['_freshness'], 'problems': satellite.statistics['problems'] } return res
[ "def", "get_monitoring_problems", "(", "self", ")", ":", "res", "=", "self", ".", "get_id", "(", ")", "res", "[", "'problems'", "]", "=", "{", "}", "# Report our schedulers information, but only if a dispatcher exists", "if", "getattr", "(", "self", ",", "'dispatcher'", ",", "None", ")", "is", "None", ":", "return", "res", "for", "satellite", "in", "self", ".", "dispatcher", ".", "all_daemons_links", ":", "if", "satellite", ".", "type", "not", "in", "[", "'scheduler'", "]", ":", "continue", "if", "not", "satellite", ".", "active", ":", "continue", "if", "satellite", ".", "statistics", "and", "'problems'", "in", "satellite", ".", "statistics", ":", "res", "[", "'problems'", "]", "[", "satellite", ".", "name", "]", "=", "{", "'_freshness'", ":", "satellite", ".", "statistics", "[", "'_freshness'", "]", ",", "'problems'", ":", "satellite", ".", "statistics", "[", "'problems'", "]", "}", "return", "res" ]
Get the schedulers satellites problems list :return: problems dictionary :rtype: dict
[ "Get", "the", "schedulers", "satellites", "problems", "list" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/tailf_confd_monitoring.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/tailf_confd_monitoring.py#L271-L283
def confd_state_webui_listen_tcp_ip(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring") webui = ET.SubElement(confd_state, "webui") listen = ET.SubElement(webui, "listen") tcp = ET.SubElement(listen, "tcp") ip = ET.SubElement(tcp, "ip") ip.text = kwargs.pop('ip') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "confd_state_webui_listen_tcp_ip", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "confd_state", "=", "ET", ".", "SubElement", "(", "config", ",", "\"confd-state\"", ",", "xmlns", "=", "\"http://tail-f.com/yang/confd-monitoring\"", ")", "webui", "=", "ET", ".", "SubElement", "(", "confd_state", ",", "\"webui\"", ")", "listen", "=", "ET", ".", "SubElement", "(", "webui", ",", "\"listen\"", ")", "tcp", "=", "ET", ".", "SubElement", "(", "listen", ",", "\"tcp\"", ")", "ip", "=", "ET", ".", "SubElement", "(", "tcp", ",", "\"ip\"", ")", "ip", ".", "text", "=", "kwargs", ".", "pop", "(", "'ip'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
python-cmd2/cmd2
cmd2/cmd2.py
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/cmd2/cmd2.py#L4031-L4034
def register_postcmd_hook(self, func: Callable[[plugin.PostcommandData], plugin.PostcommandData]) -> None: """Register a hook to be called after the command function.""" self._validate_prepostcmd_hook(func, plugin.PostcommandData) self._postcmd_hooks.append(func)
[ "def", "register_postcmd_hook", "(", "self", ",", "func", ":", "Callable", "[", "[", "plugin", ".", "PostcommandData", "]", ",", "plugin", ".", "PostcommandData", "]", ")", "->", "None", ":", "self", ".", "_validate_prepostcmd_hook", "(", "func", ",", "plugin", ".", "PostcommandData", ")", "self", ".", "_postcmd_hooks", ".", "append", "(", "func", ")" ]
Register a hook to be called after the command function.
[ "Register", "a", "hook", "to", "be", "called", "after", "the", "command", "function", "." ]
python
train
chrislit/abydos
abydos/fingerprint/_occurrence_halved.py
https://github.com/chrislit/abydos/blob/165466b3ff6afd8024a4c8660421b0c4e7773db9/abydos/fingerprint/_occurrence_halved.py#L42-L99
def fingerprint(self, word, n_bits=16, most_common=MOST_COMMON_LETTERS_CG): """Return the occurrence halved fingerprint. Based on the occurrence halved fingerprint from :cite:`Cislak:2017`. Parameters ---------- word : str The word to fingerprint n_bits : int Number of bits in the fingerprint returned most_common : list The most common tokens in the target language, ordered by frequency Returns ------- int The occurrence halved fingerprint Examples -------- >>> ohf = OccurrenceHalved() >>> bin(ohf.fingerprint('hat')) '0b1010000000010' >>> bin(ohf.fingerprint('niall')) '0b10010100000' >>> bin(ohf.fingerprint('colin')) '0b1001010000' >>> bin(ohf.fingerprint('atcg')) '0b10100000000000' >>> bin(ohf.fingerprint('entreatment')) '0b1111010000110000' """ if n_bits % 2: n_bits += 1 w_len = len(word) // 2 w_1 = set(word[:w_len]) w_2 = set(word[w_len:]) fingerprint = 0 for letter in most_common: if n_bits: fingerprint <<= 1 if letter in w_1: fingerprint += 1 fingerprint <<= 1 if letter in w_2: fingerprint += 1 n_bits -= 2 else: break if n_bits > 0: fingerprint <<= n_bits return fingerprint
[ "def", "fingerprint", "(", "self", ",", "word", ",", "n_bits", "=", "16", ",", "most_common", "=", "MOST_COMMON_LETTERS_CG", ")", ":", "if", "n_bits", "%", "2", ":", "n_bits", "+=", "1", "w_len", "=", "len", "(", "word", ")", "//", "2", "w_1", "=", "set", "(", "word", "[", ":", "w_len", "]", ")", "w_2", "=", "set", "(", "word", "[", "w_len", ":", "]", ")", "fingerprint", "=", "0", "for", "letter", "in", "most_common", ":", "if", "n_bits", ":", "fingerprint", "<<=", "1", "if", "letter", "in", "w_1", ":", "fingerprint", "+=", "1", "fingerprint", "<<=", "1", "if", "letter", "in", "w_2", ":", "fingerprint", "+=", "1", "n_bits", "-=", "2", "else", ":", "break", "if", "n_bits", ">", "0", ":", "fingerprint", "<<=", "n_bits", "return", "fingerprint" ]
Return the occurrence halved fingerprint. Based on the occurrence halved fingerprint from :cite:`Cislak:2017`. Parameters ---------- word : str The word to fingerprint n_bits : int Number of bits in the fingerprint returned most_common : list The most common tokens in the target language, ordered by frequency Returns ------- int The occurrence halved fingerprint Examples -------- >>> ohf = OccurrenceHalved() >>> bin(ohf.fingerprint('hat')) '0b1010000000010' >>> bin(ohf.fingerprint('niall')) '0b10010100000' >>> bin(ohf.fingerprint('colin')) '0b1001010000' >>> bin(ohf.fingerprint('atcg')) '0b10100000000000' >>> bin(ohf.fingerprint('entreatment')) '0b1111010000110000'
[ "Return", "the", "occurrence", "halved", "fingerprint", "." ]
python
valid
kejbaly2/metrique
metrique/utils.py
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/utils.py#L270-L320
def daemonize(pid_file=None, cwd=None): """ Detach a process from the controlling terminal and run it in the background as a daemon. Modified version of: code.activestate.com/recipes/278731-creating-a-daemon-the-python-way/ author = "Chad J. Schroeder" copyright = "Copyright (C) 2005 Chad J. Schroeder" """ cwd = cwd or '/' try: pid = os.fork() except OSError as e: raise Exception("%s [%d]" % (e.strerror, e.errno)) if (pid == 0): # The first child. os.setsid() try: pid = os.fork() # Fork a second child. except OSError as e: raise Exception("%s [%d]" % (e.strerror, e.errno)) if (pid == 0): # The second child. os.chdir(cwd) os.umask(0) else: os._exit(0) # Exit parent (the first child) of the second child. else: os._exit(0) # Exit parent of the first child. maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if (maxfd == resource.RLIM_INFINITY): maxfd = 1024 # Iterate through and close all file descriptors. for fd in range(0, maxfd): try: os.close(fd) except OSError: # ERROR, fd wasn't open to begin with (ignored) pass os.open('/dev/null', os.O_RDWR) # standard input (0) # Duplicate standard input to standard output and standard error. os.dup2(0, 1) # standard output (1) os.dup2(0, 2) # standard error (2) pid_file = pid_file or '%s.pid' % os.getpid() write_file(pid_file, os.getpid()) return 0
[ "def", "daemonize", "(", "pid_file", "=", "None", ",", "cwd", "=", "None", ")", ":", "cwd", "=", "cwd", "or", "'/'", "try", ":", "pid", "=", "os", ".", "fork", "(", ")", "except", "OSError", "as", "e", ":", "raise", "Exception", "(", "\"%s [%d]\"", "%", "(", "e", ".", "strerror", ",", "e", ".", "errno", ")", ")", "if", "(", "pid", "==", "0", ")", ":", "# The first child.", "os", ".", "setsid", "(", ")", "try", ":", "pid", "=", "os", ".", "fork", "(", ")", "# Fork a second child.", "except", "OSError", "as", "e", ":", "raise", "Exception", "(", "\"%s [%d]\"", "%", "(", "e", ".", "strerror", ",", "e", ".", "errno", ")", ")", "if", "(", "pid", "==", "0", ")", ":", "# The second child.", "os", ".", "chdir", "(", "cwd", ")", "os", ".", "umask", "(", "0", ")", "else", ":", "os", ".", "_exit", "(", "0", ")", "# Exit parent (the first child) of the second child.", "else", ":", "os", ".", "_exit", "(", "0", ")", "# Exit parent of the first child.", "maxfd", "=", "resource", ".", "getrlimit", "(", "resource", ".", "RLIMIT_NOFILE", ")", "[", "1", "]", "if", "(", "maxfd", "==", "resource", ".", "RLIM_INFINITY", ")", ":", "maxfd", "=", "1024", "# Iterate through and close all file descriptors.", "for", "fd", "in", "range", "(", "0", ",", "maxfd", ")", ":", "try", ":", "os", ".", "close", "(", "fd", ")", "except", "OSError", ":", "# ERROR, fd wasn't open to begin with (ignored)", "pass", "os", ".", "open", "(", "'/dev/null'", ",", "os", ".", "O_RDWR", ")", "# standard input (0)", "# Duplicate standard input to standard output and standard error.", "os", ".", "dup2", "(", "0", ",", "1", ")", "# standard output (1)", "os", ".", "dup2", "(", "0", ",", "2", ")", "# standard error (2)", "pid_file", "=", "pid_file", "or", "'%s.pid'", "%", "os", ".", "getpid", "(", ")", "write_file", "(", "pid_file", ",", "os", ".", "getpid", "(", ")", ")", "return", "0" ]
Detach a process from the controlling terminal and run it in the background as a daemon. Modified version of: code.activestate.com/recipes/278731-creating-a-daemon-the-python-way/ author = "Chad J. Schroeder" copyright = "Copyright (C) 2005 Chad J. Schroeder"
[ "Detach", "a", "process", "from", "the", "controlling", "terminal", "and", "run", "it", "in", "the", "background", "as", "a", "daemon", "." ]
python
train
Kozea/cairocffi
cairocffi/context.py
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/context.py#L1766-L1775
def get_font_face(self): """Return the current font face. :param font_face: A new :class:`FontFace` object wrapping an existing cairo object. """ return FontFace._from_pointer( cairo.cairo_get_font_face(self._pointer), incref=True)
[ "def", "get_font_face", "(", "self", ")", ":", "return", "FontFace", ".", "_from_pointer", "(", "cairo", ".", "cairo_get_font_face", "(", "self", ".", "_pointer", ")", ",", "incref", "=", "True", ")" ]
Return the current font face. :param font_face: A new :class:`FontFace` object wrapping an existing cairo object.
[ "Return", "the", "current", "font", "face", "." ]
python
train
jobovy/galpy
galpy/df/diskdf.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/df/diskdf.py#L1012-L1055
def oortC(self,R,romberg=False,nsigma=None,phi=0.): """ NAME: oortC PURPOSE: calculate the Oort function C INPUT: R - radius at which to calculate C (can be Quantity) OPTIONAL INPUT: nsigma - number of sigma to integrate the velocities over KEYWORDS: romberg - if True, use a romberg integrator (default: False) OUTPUT: Oort C at R HISTORY: 2011-04-19 - Written - Bovy (NYU) BUGS: could be made more efficient, e.g., surfacemass is calculated multiple times we know this is zero, but it is calculated anyway (bug or feature?) """ #2C= -meanvR/R-dmeanvphi/R/dphi+dmeanvR/dR meanvr= self.meanvR(R,romberg=romberg,nsigma=nsigma,phi=phi, use_physical=False) dmeanvphiRdphi= 0. #We know this, since the DF does not depend on phi surfmass= self._vmomentsurfacemass(R,0,0,phi=phi,romberg=romberg,nsigma=nsigma) dmeanvRdR= self._vmomentsurfacemass(R,1,0,deriv='R',phi=phi,romberg=romberg,nsigma=nsigma)/\ surfmass #other terms is zero because f is even in vR return 0.5*(-meanvr/R-dmeanvphiRdphi/R+dmeanvRdR)
[ "def", "oortC", "(", "self", ",", "R", ",", "romberg", "=", "False", ",", "nsigma", "=", "None", ",", "phi", "=", "0.", ")", ":", "#2C= -meanvR/R-dmeanvphi/R/dphi+dmeanvR/dR", "meanvr", "=", "self", ".", "meanvR", "(", "R", ",", "romberg", "=", "romberg", ",", "nsigma", "=", "nsigma", ",", "phi", "=", "phi", ",", "use_physical", "=", "False", ")", "dmeanvphiRdphi", "=", "0.", "#We know this, since the DF does not depend on phi", "surfmass", "=", "self", ".", "_vmomentsurfacemass", "(", "R", ",", "0", ",", "0", ",", "phi", "=", "phi", ",", "romberg", "=", "romberg", ",", "nsigma", "=", "nsigma", ")", "dmeanvRdR", "=", "self", ".", "_vmomentsurfacemass", "(", "R", ",", "1", ",", "0", ",", "deriv", "=", "'R'", ",", "phi", "=", "phi", ",", "romberg", "=", "romberg", ",", "nsigma", "=", "nsigma", ")", "/", "surfmass", "#other terms is zero because f is even in vR", "return", "0.5", "*", "(", "-", "meanvr", "/", "R", "-", "dmeanvphiRdphi", "/", "R", "+", "dmeanvRdR", ")" ]
NAME: oortC PURPOSE: calculate the Oort function C INPUT: R - radius at which to calculate C (can be Quantity) OPTIONAL INPUT: nsigma - number of sigma to integrate the velocities over KEYWORDS: romberg - if True, use a romberg integrator (default: False) OUTPUT: Oort C at R HISTORY: 2011-04-19 - Written - Bovy (NYU) BUGS: could be made more efficient, e.g., surfacemass is calculated multiple times we know this is zero, but it is calculated anyway (bug or feature?)
[ "NAME", ":" ]
python
train
CityOfZion/neo-python
neo/Core/State/AssetState.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Core/State/AssetState.py#L97-L129
def Deserialize(self, reader): """ Deserialize full object. Args: reader (neocore.IO.BinaryReader): """ super(AssetState, self).Deserialize(reader) self.AssetId = reader.ReadUInt256() self.AssetType = reader.ReadByte() self.Name = reader.ReadVarString() position = reader.stream.tell() try: self.Amount = reader.ReadFixed8() except Exception as e: reader.stream.seek(position) self.Amount = reader.ReadFixed8() self.Available = reader.ReadFixed8() self.Precision = reader.ReadByte() # fee mode reader.ReadByte() self.Fee = reader.ReadFixed8() self.FeeAddress = reader.ReadUInt160() self.Owner = ECDSA.Deserialize_Secp256r1(reader) self.Admin = reader.ReadUInt160() self.Issuer = reader.ReadUInt160() self.Expiration = reader.ReadUInt32() self.IsFrozen = reader.ReadBool()
[ "def", "Deserialize", "(", "self", ",", "reader", ")", ":", "super", "(", "AssetState", ",", "self", ")", ".", "Deserialize", "(", "reader", ")", "self", ".", "AssetId", "=", "reader", ".", "ReadUInt256", "(", ")", "self", ".", "AssetType", "=", "reader", ".", "ReadByte", "(", ")", "self", ".", "Name", "=", "reader", ".", "ReadVarString", "(", ")", "position", "=", "reader", ".", "stream", ".", "tell", "(", ")", "try", ":", "self", ".", "Amount", "=", "reader", ".", "ReadFixed8", "(", ")", "except", "Exception", "as", "e", ":", "reader", ".", "stream", ".", "seek", "(", "position", ")", "self", ".", "Amount", "=", "reader", ".", "ReadFixed8", "(", ")", "self", ".", "Available", "=", "reader", ".", "ReadFixed8", "(", ")", "self", ".", "Precision", "=", "reader", ".", "ReadByte", "(", ")", "# fee mode", "reader", ".", "ReadByte", "(", ")", "self", ".", "Fee", "=", "reader", ".", "ReadFixed8", "(", ")", "self", ".", "FeeAddress", "=", "reader", ".", "ReadUInt160", "(", ")", "self", ".", "Owner", "=", "ECDSA", ".", "Deserialize_Secp256r1", "(", "reader", ")", "self", ".", "Admin", "=", "reader", ".", "ReadUInt160", "(", ")", "self", ".", "Issuer", "=", "reader", ".", "ReadUInt160", "(", ")", "self", ".", "Expiration", "=", "reader", ".", "ReadUInt32", "(", ")", "self", ".", "IsFrozen", "=", "reader", ".", "ReadBool", "(", ")" ]
Deserialize full object. Args: reader (neocore.IO.BinaryReader):
[ "Deserialize", "full", "object", "." ]
python
train
MillionIntegrals/vel
vel/storage/backend/mongodb.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/storage/backend/mongodb.py#L13-L15
def clean(self, initial_epoch): """ Remove entries from database that would get overwritten """ self.db.metrics.delete_many({'run_name': self.model_config.run_name, 'epoch_idx': {'$gt': initial_epoch}})
[ "def", "clean", "(", "self", ",", "initial_epoch", ")", ":", "self", ".", "db", ".", "metrics", ".", "delete_many", "(", "{", "'run_name'", ":", "self", ".", "model_config", ".", "run_name", ",", "'epoch_idx'", ":", "{", "'$gt'", ":", "initial_epoch", "}", "}", ")" ]
Remove entries from database that would get overwritten
[ "Remove", "entries", "from", "database", "that", "would", "get", "overwritten" ]
python
train
Autodesk/aomi
aomi/filez.py
https://github.com/Autodesk/aomi/blob/84da2dfb0424837adf9c4ddc1aa352e942bb7a4a/aomi/filez.py#L135-L152
def thaw(vault_client, src_file, opt): """Given the combination of a Secretfile and the output of a freeze operation, will restore secrets to usable locations""" if not os.path.exists(src_file): raise aomi.exceptions.AomiFile("%s does not exist" % src_file) tmp_dir = ensure_tmpdir() zip_file = thaw_decrypt(vault_client, src_file, tmp_dir, opt) archive = zipfile.ZipFile(zip_file, 'r') for archive_file in archive.namelist(): archive.extract(archive_file, tmp_dir) os.chmod("%s/%s" % (tmp_dir, archive_file), 0o640) LOG.debug("Extracted %s from archive", archive_file) LOG.info("Thawing secrets into %s", opt.secrets) config = get_secretfile(opt) Context.load(config, opt) \ .thaw(tmp_dir)
[ "def", "thaw", "(", "vault_client", ",", "src_file", ",", "opt", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "src_file", ")", ":", "raise", "aomi", ".", "exceptions", ".", "AomiFile", "(", "\"%s does not exist\"", "%", "src_file", ")", "tmp_dir", "=", "ensure_tmpdir", "(", ")", "zip_file", "=", "thaw_decrypt", "(", "vault_client", ",", "src_file", ",", "tmp_dir", ",", "opt", ")", "archive", "=", "zipfile", ".", "ZipFile", "(", "zip_file", ",", "'r'", ")", "for", "archive_file", "in", "archive", ".", "namelist", "(", ")", ":", "archive", ".", "extract", "(", "archive_file", ",", "tmp_dir", ")", "os", ".", "chmod", "(", "\"%s/%s\"", "%", "(", "tmp_dir", ",", "archive_file", ")", ",", "0o640", ")", "LOG", ".", "debug", "(", "\"Extracted %s from archive\"", ",", "archive_file", ")", "LOG", ".", "info", "(", "\"Thawing secrets into %s\"", ",", "opt", ".", "secrets", ")", "config", "=", "get_secretfile", "(", "opt", ")", "Context", ".", "load", "(", "config", ",", "opt", ")", ".", "thaw", "(", "tmp_dir", ")" ]
Given the combination of a Secretfile and the output of a freeze operation, will restore secrets to usable locations
[ "Given", "the", "combination", "of", "a", "Secretfile", "and", "the", "output", "of", "a", "freeze", "operation", "will", "restore", "secrets", "to", "usable", "locations" ]
python
train
9seconds/pep3134
pep3134/utils.py
https://github.com/9seconds/pep3134/blob/6b6fae903bb63cb2ac24004bb2c18ebc6a7d41d0/pep3134/utils.py#L9-L44
def construct_exc_class(cls): """Constructs proxy class for the exception.""" class ProxyException(cls, BaseException): __pep3134__ = True @property def __traceback__(self): if self.__fixed_traceback__: return self.__fixed_traceback__ current_exc, current_tb = sys.exc_info()[1:] if current_exc is self: return current_tb def __init__(self, instance=None): # pylint: disable=W0231 self.__original_exception__ = instance self.__fixed_traceback__ = None def __getattr__(self, item): return getattr(self.__original_exception__, item) def __repr__(self): return repr(self.__original_exception__) def __str__(self): return str(self.__original_exception__) def with_traceback(self, traceback): instance = copy.copy(self) instance.__fixed_traceback__ = traceback return instance ProxyException.__name__ = cls.__name__ return ProxyException
[ "def", "construct_exc_class", "(", "cls", ")", ":", "class", "ProxyException", "(", "cls", ",", "BaseException", ")", ":", "__pep3134__", "=", "True", "@", "property", "def", "__traceback__", "(", "self", ")", ":", "if", "self", ".", "__fixed_traceback__", ":", "return", "self", ".", "__fixed_traceback__", "current_exc", ",", "current_tb", "=", "sys", ".", "exc_info", "(", ")", "[", "1", ":", "]", "if", "current_exc", "is", "self", ":", "return", "current_tb", "def", "__init__", "(", "self", ",", "instance", "=", "None", ")", ":", "# pylint: disable=W0231", "self", ".", "__original_exception__", "=", "instance", "self", ".", "__fixed_traceback__", "=", "None", "def", "__getattr__", "(", "self", ",", "item", ")", ":", "return", "getattr", "(", "self", ".", "__original_exception__", ",", "item", ")", "def", "__repr__", "(", "self", ")", ":", "return", "repr", "(", "self", ".", "__original_exception__", ")", "def", "__str__", "(", "self", ")", ":", "return", "str", "(", "self", ".", "__original_exception__", ")", "def", "with_traceback", "(", "self", ",", "traceback", ")", ":", "instance", "=", "copy", ".", "copy", "(", "self", ")", "instance", ".", "__fixed_traceback__", "=", "traceback", "return", "instance", "ProxyException", ".", "__name__", "=", "cls", ".", "__name__", "return", "ProxyException" ]
Constructs proxy class for the exception.
[ "Constructs", "proxy", "class", "for", "the", "exception", "." ]
python
train
HewlettPackard/python-hpOneView
hpOneView/resources/storage/volumes.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/storage/volumes.py#L153-L175
def create_from_snapshot(self, data, timeout=-1): """ Creates a new volume on the storage system from a snapshot of a volume. A volume template must also be specified when creating a volume from a snapshot. The global setting "StorageVolumeTemplateRequired" controls whether or not root volume templates can be used to provision volumes. The value of this setting defaults to "false". If the value is set to "true", then only templates with an "isRoot" value of "false" can be used to provision a volume. Args: data (dict): Object to create. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stop waiting for its completion. Returns: dict: Created data. """ uri = self.URI + "/from-snapshot" return self._client.create(data, uri=uri, timeout=timeout)
[ "def", "create_from_snapshot", "(", "self", ",", "data", ",", "timeout", "=", "-", "1", ")", ":", "uri", "=", "self", ".", "URI", "+", "\"/from-snapshot\"", "return", "self", ".", "_client", ".", "create", "(", "data", ",", "uri", "=", "uri", ",", "timeout", "=", "timeout", ")" ]
Creates a new volume on the storage system from a snapshot of a volume. A volume template must also be specified when creating a volume from a snapshot. The global setting "StorageVolumeTemplateRequired" controls whether or not root volume templates can be used to provision volumes. The value of this setting defaults to "false". If the value is set to "true", then only templates with an "isRoot" value of "false" can be used to provision a volume. Args: data (dict): Object to create. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stop waiting for its completion. Returns: dict: Created data.
[ "Creates", "a", "new", "volume", "on", "the", "storage", "system", "from", "a", "snapshot", "of", "a", "volume", ".", "A", "volume", "template", "must", "also", "be", "specified", "when", "creating", "a", "volume", "from", "a", "snapshot", "." ]
python
train
piface/pifacecommon
pifacecommon/interrupts.py
https://github.com/piface/pifacecommon/blob/006bca14c18d43ba2d9eafaa84ef83b512c51cf6/pifacecommon/interrupts.py#L229-L238
def gpio_interrupts_enable(self): """Enables GPIO interrupts.""" try: bring_gpio_interrupt_into_userspace() set_gpio_interrupt_edge() except Timeout as e: raise InterruptEnableException( "There was an error bringing gpio%d into userspace. %s" % (GPIO_INTERRUPT_PIN, e.message) )
[ "def", "gpio_interrupts_enable", "(", "self", ")", ":", "try", ":", "bring_gpio_interrupt_into_userspace", "(", ")", "set_gpio_interrupt_edge", "(", ")", "except", "Timeout", "as", "e", ":", "raise", "InterruptEnableException", "(", "\"There was an error bringing gpio%d into userspace. %s\"", "%", "(", "GPIO_INTERRUPT_PIN", ",", "e", ".", "message", ")", ")" ]
Enables GPIO interrupts.
[ "Enables", "GPIO", "interrupts", "." ]
python
test
manns/pyspread
pyspread/src/model/model.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/model/model.py#L1214-L1230
def _get_updated_environment(self, env_dict=None): """Returns globals environment with 'magic' variable Parameters ---------- env_dict: Dict, defaults to {'S': self} \tDict that maps global variable name to value """ if env_dict is None: env_dict = {'S': self} env = globals().copy() env.update(env_dict) return env
[ "def", "_get_updated_environment", "(", "self", ",", "env_dict", "=", "None", ")", ":", "if", "env_dict", "is", "None", ":", "env_dict", "=", "{", "'S'", ":", "self", "}", "env", "=", "globals", "(", ")", ".", "copy", "(", ")", "env", ".", "update", "(", "env_dict", ")", "return", "env" ]
Returns globals environment with 'magic' variable Parameters ---------- env_dict: Dict, defaults to {'S': self} \tDict that maps global variable name to value
[ "Returns", "globals", "environment", "with", "magic", "variable" ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/process.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/process.py#L1507-L1542
def search_hexa(self, hexa, minAddr = None, maxAddr = None): """ Search for the given hexadecimal pattern within the process memory. Hex patterns must be in this form:: "68 65 6c 6c 6f 20 77 6f 72 6c 64" # "hello world" Spaces are optional. Capitalization of hex digits doesn't matter. This is exactly equivalent to the previous example:: "68656C6C6F20776F726C64" # "hello world" Wildcards are allowed, in the form of a C{?} sign in any hex digit:: "5? 5? c3" # pop register / pop register / ret "b8 ?? ?? ?? ??" # mov eax, immediate value @type hexa: str @param hexa: Pattern to search for. @type minAddr: int @param minAddr: (Optional) Start the search at this memory address. @type maxAddr: int @param maxAddr: (Optional) Stop the search at this memory address. @rtype: iterator of tuple( int, str ) @return: An iterator of tuples. Each tuple contains the following: - The memory address where the pattern was found. - The bytes that match the pattern. @raise WindowsError: An error occurred when querying or reading the process memory. """ pattern = HexPattern(hexa) matches = Search.search_process(self, pattern, minAddr, maxAddr) for addr, size, data in matches: yield addr, data
[ "def", "search_hexa", "(", "self", ",", "hexa", ",", "minAddr", "=", "None", ",", "maxAddr", "=", "None", ")", ":", "pattern", "=", "HexPattern", "(", "hexa", ")", "matches", "=", "Search", ".", "search_process", "(", "self", ",", "pattern", ",", "minAddr", ",", "maxAddr", ")", "for", "addr", ",", "size", ",", "data", "in", "matches", ":", "yield", "addr", ",", "data" ]
Search for the given hexadecimal pattern within the process memory. Hex patterns must be in this form:: "68 65 6c 6c 6f 20 77 6f 72 6c 64" # "hello world" Spaces are optional. Capitalization of hex digits doesn't matter. This is exactly equivalent to the previous example:: "68656C6C6F20776F726C64" # "hello world" Wildcards are allowed, in the form of a C{?} sign in any hex digit:: "5? 5? c3" # pop register / pop register / ret "b8 ?? ?? ?? ??" # mov eax, immediate value @type hexa: str @param hexa: Pattern to search for. @type minAddr: int @param minAddr: (Optional) Start the search at this memory address. @type maxAddr: int @param maxAddr: (Optional) Stop the search at this memory address. @rtype: iterator of tuple( int, str ) @return: An iterator of tuples. Each tuple contains the following: - The memory address where the pattern was found. - The bytes that match the pattern. @raise WindowsError: An error occurred when querying or reading the process memory.
[ "Search", "for", "the", "given", "hexadecimal", "pattern", "within", "the", "process", "memory", "." ]
python
train
celiao/tmdbsimple
tmdbsimple/search.py
https://github.com/celiao/tmdbsimple/blob/ff17893110c99771d6398a62c35d36dd9735f4b9/tmdbsimple/search.py#L81-L105
def tv(self, **kwargs): """ Search for TV shows by title. Args: query: CGI escpaed string. page: (optional) Minimum value of 1. Expected value is an integer. language: (optional) ISO 639-1 code. first_air_date_year: (optional) Filter the results to only match shows that have a air date with with value. search_type: (optional) By default, the search type is 'phrase'. This is almost guaranteed the option you will want. It's a great all purpose search type and by far the most tuned for every day querying. For those wanting more of an "autocomplete" type search, set this option to 'ngram'. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('tv') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
[ "def", "tv", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_path", "(", "'tv'", ")", "response", "=", "self", ".", "_GET", "(", "path", ",", "kwargs", ")", "self", ".", "_set_attrs_to_values", "(", "response", ")", "return", "response" ]
Search for TV shows by title. Args: query: CGI escpaed string. page: (optional) Minimum value of 1. Expected value is an integer. language: (optional) ISO 639-1 code. first_air_date_year: (optional) Filter the results to only match shows that have a air date with with value. search_type: (optional) By default, the search type is 'phrase'. This is almost guaranteed the option you will want. It's a great all purpose search type and by far the most tuned for every day querying. For those wanting more of an "autocomplete" type search, set this option to 'ngram'. Returns: A dict respresentation of the JSON returned from the API.
[ "Search", "for", "TV", "shows", "by", "title", "." ]
python
test
edx/XBlock
xblock/fields.py
https://github.com/edx/XBlock/blob/368bf46e2c0ee69bbb21817f428c4684936e18ee/xblock/fields.py#L983-L991
def to_json(self, value): """ Serialize the date as an ISO-formatted date string, or None. """ if isinstance(value, datetime.datetime): return value.strftime(self.DATETIME_FORMAT) if value is None: return None raise TypeError("Value stored must be a datetime object, not {}".format(type(value)))
[ "def", "to_json", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "datetime", ".", "datetime", ")", ":", "return", "value", ".", "strftime", "(", "self", ".", "DATETIME_FORMAT", ")", "if", "value", "is", "None", ":", "return", "None", "raise", "TypeError", "(", "\"Value stored must be a datetime object, not {}\"", ".", "format", "(", "type", "(", "value", ")", ")", ")" ]
Serialize the date as an ISO-formatted date string, or None.
[ "Serialize", "the", "date", "as", "an", "ISO", "-", "formatted", "date", "string", "or", "None", "." ]
python
train
gem/oq-engine
openquake/hazardlib/gsim/berge_thierry_2003.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/berge_thierry_2003.py#L77-L88
def _get_stddevs(self, C, stddev_types, num_sites, mag_conversion_sigma): """ Return total standard deviation. """ assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES for stddev_type in stddev_types) sigma = np.zeros(num_sites) + C['sigma'] * np.log(10) sigma = np.sqrt(sigma ** 2 + (C['a'] ** 2) * (mag_conversion_sigma ** 2)) stddevs = [sigma for _ in stddev_types] return stddevs
[ "def", "_get_stddevs", "(", "self", ",", "C", ",", "stddev_types", ",", "num_sites", ",", "mag_conversion_sigma", ")", ":", "assert", "all", "(", "stddev_type", "in", "self", ".", "DEFINED_FOR_STANDARD_DEVIATION_TYPES", "for", "stddev_type", "in", "stddev_types", ")", "sigma", "=", "np", ".", "zeros", "(", "num_sites", ")", "+", "C", "[", "'sigma'", "]", "*", "np", ".", "log", "(", "10", ")", "sigma", "=", "np", ".", "sqrt", "(", "sigma", "**", "2", "+", "(", "C", "[", "'a'", "]", "**", "2", ")", "*", "(", "mag_conversion_sigma", "**", "2", ")", ")", "stddevs", "=", "[", "sigma", "for", "_", "in", "stddev_types", "]", "return", "stddevs" ]
Return total standard deviation.
[ "Return", "total", "standard", "deviation", "." ]
python
train
cltk/cltk
cltk/prosody/latin/hexameter_scanner.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/hexameter_scanner.py#L54-L273
def scan(self, original_line: str, optional_transform: bool = False, dactyl_smoothing: bool = False) -> Verse: """ Scan a line of Latin hexameter and produce a scansion pattern, and other data. :param original_line: the original line of Latin verse :param optional_transform: whether or not to perform i to j transform for syllabification :param dactyl_smoothing: whether or not to perform dactyl smoothing :return: a Verse object >>> scanner = HexameterScanner() >>> print(scanner.scan("impulerit. Tantaene animis caelestibus irae?")) Verse(original='impulerit. Tantaene animis caelestibus irae?', scansion='- U U - - - U U - - - U U - - ', meter='hexameter', valid=True, syllable_count=15, accented='īmpulerīt. Tāntaene animīs caelēstibus īrae?', scansion_notes=['Valid by positional stresses.'], syllables = ['īm', 'pu', 'le', 'rīt', 'Tān', 'taen', 'a', 'ni', 'mīs', 'cae', 'lēs', 'ti', 'bus', 'i', 'rae']) >>> print(scanner.scan( ... "Arma virumque cano, Troiae qui prīmus ab ōrīs").scansion) # doctest: +NORMALIZE_WHITESPACE - U U - U U - - - - - U U - - >>> # some hexameters need the optional transformations: >>> optional_transform_scanner = HexameterScanner(optional_transform=True) >>> print(optional_transform_scanner.scan( ... "Ītaliam, fāto profugus, Lāvīniaque vēnit").scansion) # doctest: +NORMALIZE_WHITESPACE - - - - - U U - - - U U - U >>> print(HexameterScanner().scan( ... "lītora, multum ille et terrīs iactātus et alto").scansion) # doctest: +NORMALIZE_WHITESPACE - U U - - - - - - - U U - U >>> print(HexameterScanner().scan( ... "vī superum saevae memorem Iūnōnis ob īram;").scansion) # doctest: +NORMALIZE_WHITESPACE - U U - - - U U - - - U U - U >>> # handle multiple elisions >>> print(scanner.scan("monstrum horrendum, informe, ingens, cui lumen ademptum").scansion) # doctest: +NORMALIZE_WHITESPACE - - - - - - - - - U U - U >>> # if we have 17 syllables, create a chain of all dactyls >>> print(scanner.scan("quadrupedante putrem sonitu quatit ungula campum" ... ).scansion) # doctest: +NORMALIZE_WHITESPACE - U U - U U - U U - U U - U U - U >>> # if we have 13 syllables exactly, we'll create a spondaic hexameter >>> print(HexameterScanner().scan( ... "illi inter sese multa vi bracchia tollunt").scansion) # doctest: +NORMALIZE_WHITESPACE - - - - - - - - - UU - - >>> print(HexameterScanner().scan( ... "dat latus; insequitur cumulo praeruptus aquae mons").scansion) # doctest: +NORMALIZE_WHITESPACE - U U - U U - U U - - - U U - - >>> print(optional_transform_scanner.scan( ... "Non quivis videt inmodulata poëmata iudex").scansion) # doctest: +NORMALIZE_WHITESPACE - - - U U - U U - U U- U U - - >>> print(HexameterScanner().scan( ... "certabant urbem Romam Remoramne vocarent").scansion) # doctest: +NORMALIZE_WHITESPACE - - - - - - - U U - U U - - >>> # advanced smoothing is available via keyword flags: dactyl_smoothing >>> # print(HexameterScanner().scan( #... "his verbis: 'o gnata, tibi sunt ante ferendae", #... dactyl_smoothing=True).scansion) # doctest: +NORMALIZE_WHITESPACE #- - - - - U U - - - U U - - """ verse = Verse(original_line, meter='hexameter') # replace punctuation with spaces line = original_line.translate(self.punctuation_substitutions) # conservative i to j line = self.transform_i_to_j(line) working_line = self.elide_all(line) working_line = self.accent_by_position(working_line) syllables = self.syllabifier.syllabify(working_line) if optional_transform: working_line = self.transform_i_to_j_optional(line) working_line = self.elide_all(working_line) working_line = self.accent_by_position(working_line) syllables = self.syllabifier.syllabify(working_line) verse.scansion_notes += [self.constants.NOTE_MAP["optional i to j"]] verse.working_line = working_line verse.syllable_count = self.syllabifier.get_syllable_count(syllables) verse.syllables = syllables if verse.syllable_count < 12: verse.valid = False verse.scansion_notes += [self.constants.NOTE_MAP["< 12"]] return verse stresses = self.flag_dipthongs(syllables) syllables_wspaces = string_utils.to_syllables_with_trailing_spaces(working_line, syllables) offset_map = self.calc_offset(syllables_wspaces) for idx, syl in enumerate(syllables): for accented in self.constants.ACCENTED_VOWELS: if accented in syl: stresses.append(idx) # first syllable is always long in hexameter stresses.append(0) # second to last syllable is always long stresses.append(verse.syllable_count - 2) verse.scansion = self.produce_scansion(stresses, syllables_wspaces, offset_map) if len(string_utils.stress_positions(self.constants.STRESSED, verse.scansion)) != \ len(set(stresses)): verse.valid = False verse.scansion_notes += [self.constants.NOTE_MAP["invalid syllables"]] return verse if self.metrical_validator.is_valid_hexameter(verse.scansion): verse.scansion_notes += [self.constants.NOTE_MAP["positionally"]] return self.assign_candidate(verse, verse.scansion) # identify some obvious and probably choices based on number of syllables if verse.syllable_count == 17: # produce all dactyls candidate = self.produce_scansion( self.metrical_validator.hexameter_known_stresses(), syllables_wspaces, offset_map) verse.scansion_notes += [self.constants.NOTE_MAP["17"]] if self.metrical_validator.is_valid_hexameter(candidate): return self.assign_candidate(verse, candidate) if verse.syllable_count == 12: # create all spondee hexameter candidate = self.produce_scansion(list(range(12)), syllables_wspaces, offset_map) if self.metrical_validator.is_valid_hexameter(verse.scansion): verse.scansion_notes += [self.constants.NOTE_MAP["12"]] return self.assign_candidate(verse, candidate) if verse.syllable_count == 13: # create spondee hexameter with a dactyl at 5th foot known_unaccents = [9, 10] last_syllable_accented = False for vowel in self.constants.ACCENTED_VOWELS: if vowel in verse.syllables[12]: last_syllable_accented = True if not last_syllable_accented: known_unaccents.append(12) if set(known_unaccents) - set(stresses) != len(known_unaccents): verse.scansion = self.produce_scansion([x for x in range(13) if x not in known_unaccents], syllables_wspaces, offset_map) verse.scansion_notes += [self.constants.NOTE_MAP["5th dactyl"]] if self.metrical_validator.is_valid_hexameter(verse.scansion): return self.assign_candidate(verse, verse.scansion) if verse.syllable_count > 17: verse.valid = False verse.scansion_notes += [self.constants.NOTE_MAP["> 17"]] return verse smoothed = self.correct_inverted_amphibrachs(verse.scansion) if distance(verse.scansion, smoothed) > 0: verse.scansion_notes += [self.constants.NOTE_MAP["inverted"]] verse.scansion = smoothed stresses += string_utils.differences(verse.scansion, smoothed) if self.metrical_validator.is_valid_hexameter(verse.scansion): return self.assign_candidate(verse, verse.scansion) smoothed = self.correct_first_two_dactyls(verse.scansion) if distance(verse.scansion, smoothed) > 0: verse.scansion_notes += [self.constants.NOTE_MAP["invalid start"]] verse.scansion = smoothed stresses += string_utils.differences(verse.scansion, smoothed) if self.metrical_validator.is_valid_hexameter(verse.scansion): return self.assign_candidate(verse, verse.scansion) smoothed = self.correct_invalid_fifth_foot(verse.scansion) if distance(verse.scansion, smoothed) > 0: verse.scansion_notes += [self.constants.NOTE_MAP["invalid 5th"]] verse.scansion = smoothed stresses += string_utils.differences(verse.scansion, smoothed) if self.metrical_validator.is_valid_hexameter(verse.scansion): return self.assign_candidate(verse, verse.scansion) feet = self.metrical_validator.hexameter_feet(verse.scansion.replace(" ", "")) if feet: # Normal good citizens are unwelcome in the house of hexameter invalid_feet_in_hexameter = [self.constants.IAMB, self.constants.TROCHEE] current_foot = 0 ending = feet.pop() # don't process the ending, a possible trochee, add it back after scanned_line = "" for foot in feet: if foot.replace(" ", "") in invalid_feet_in_hexameter: scanned_line = self.invalid_foot_to_spondee(feet, foot, current_foot) scanned_line = scanned_line + ending current_foot += 1 smoothed = self.produce_scansion(stresses + string_utils.stress_positions( self.constants.STRESSED, scanned_line), syllables_wspaces, offset_map) if self.metrical_validator.is_valid_hexameter(smoothed): verse.scansion_notes += [self.constants.NOTE_MAP["invalid foot"]] return self.assign_candidate(verse, smoothed) # need to do this again, since the scansion has changed smoothed = self.correct_inverted_amphibrachs(verse.scansion) if distance(verse.scansion, smoothed) > 0: verse.scansion_notes += [self.constants.NOTE_MAP["inverted"]] verse.scansion = smoothed stresses += string_utils.differences(verse.scansion, smoothed) if self.metrical_validator.is_valid_hexameter(verse.scansion): return self.assign_candidate(verse, verse.scansion) candidates = self.metrical_validator.closest_hexameter_patterns(verse.scansion) if candidates is not None: if len(candidates) == 1 \ and len(verse.scansion.replace(" ", "")) == len(candidates[0]) \ and len(string_utils.differences(verse.scansion, candidates[0])) == 1: tmp_scansion = self.produce_scansion( string_utils.differences(verse.scansion, candidates[0]), syllables_wspaces, offset_map) if self.metrical_validator.is_valid_hexameter(tmp_scansion): verse.scansion_notes += [self.constants.NOTE_MAP["closest match"]] return self.assign_candidate(verse, tmp_scansion) # need to do this again, since the scansion has changed smoothed = self.correct_inverted_amphibrachs(smoothed) if self.metrical_validator.is_valid_hexameter(smoothed): verse.scansion_notes += [self.constants.NOTE_MAP["inverted"]] return self.assign_candidate(verse, smoothed) if dactyl_smoothing: smoothed = self.correct_dactyl_chain(smoothed) if distance(verse.scansion, smoothed) > 0: verse.scansion_notes += [self.constants.NOTE_MAP["dactyl smoothing"]] verse.scansion = smoothed if self.metrical_validator.is_valid_hexameter(verse.scansion): return self.assign_candidate(verse, verse.scansion) # if the line doesn't scan "as is", if may scan if the optional i to j transformations # are made, so here we set them and try again. if self.optional_transform and not verse.valid: return self.scan(original_line, optional_transform=True, dactyl_smoothing=True) return verse
[ "def", "scan", "(", "self", ",", "original_line", ":", "str", ",", "optional_transform", ":", "bool", "=", "False", ",", "dactyl_smoothing", ":", "bool", "=", "False", ")", "->", "Verse", ":", "verse", "=", "Verse", "(", "original_line", ",", "meter", "=", "'hexameter'", ")", "# replace punctuation with spaces", "line", "=", "original_line", ".", "translate", "(", "self", ".", "punctuation_substitutions", ")", "# conservative i to j", "line", "=", "self", ".", "transform_i_to_j", "(", "line", ")", "working_line", "=", "self", ".", "elide_all", "(", "line", ")", "working_line", "=", "self", ".", "accent_by_position", "(", "working_line", ")", "syllables", "=", "self", ".", "syllabifier", ".", "syllabify", "(", "working_line", ")", "if", "optional_transform", ":", "working_line", "=", "self", ".", "transform_i_to_j_optional", "(", "line", ")", "working_line", "=", "self", ".", "elide_all", "(", "working_line", ")", "working_line", "=", "self", ".", "accent_by_position", "(", "working_line", ")", "syllables", "=", "self", ".", "syllabifier", ".", "syllabify", "(", "working_line", ")", "verse", ".", "scansion_notes", "+=", "[", "self", ".", "constants", ".", "NOTE_MAP", "[", "\"optional i to j\"", "]", "]", "verse", ".", "working_line", "=", "working_line", "verse", ".", "syllable_count", "=", "self", ".", "syllabifier", ".", "get_syllable_count", "(", "syllables", ")", "verse", ".", "syllables", "=", "syllables", "if", "verse", ".", "syllable_count", "<", "12", ":", "verse", ".", "valid", "=", "False", "verse", ".", "scansion_notes", "+=", "[", "self", ".", "constants", ".", "NOTE_MAP", "[", "\"< 12\"", "]", "]", "return", "verse", "stresses", "=", "self", ".", "flag_dipthongs", "(", "syllables", ")", "syllables_wspaces", "=", "string_utils", ".", "to_syllables_with_trailing_spaces", "(", "working_line", ",", "syllables", ")", "offset_map", "=", "self", ".", "calc_offset", "(", "syllables_wspaces", ")", "for", "idx", ",", "syl", "in", "enumerate", "(", "syllables", ")", ":", "for", "accented", "in", "self", ".", "constants", ".", "ACCENTED_VOWELS", ":", "if", "accented", "in", "syl", ":", "stresses", ".", "append", "(", "idx", ")", "# first syllable is always long in hexameter", "stresses", ".", "append", "(", "0", ")", "# second to last syllable is always long", "stresses", ".", "append", "(", "verse", ".", "syllable_count", "-", "2", ")", "verse", ".", "scansion", "=", "self", ".", "produce_scansion", "(", "stresses", ",", "syllables_wspaces", ",", "offset_map", ")", "if", "len", "(", "string_utils", ".", "stress_positions", "(", "self", ".", "constants", ".", "STRESSED", ",", "verse", ".", "scansion", ")", ")", "!=", "len", "(", "set", "(", "stresses", ")", ")", ":", "verse", ".", "valid", "=", "False", "verse", ".", "scansion_notes", "+=", "[", "self", ".", "constants", ".", "NOTE_MAP", "[", "\"invalid syllables\"", "]", "]", "return", "verse", "if", "self", ".", "metrical_validator", ".", "is_valid_hexameter", "(", "verse", ".", "scansion", ")", ":", "verse", ".", "scansion_notes", "+=", "[", "self", ".", "constants", ".", "NOTE_MAP", "[", "\"positionally\"", "]", "]", "return", "self", ".", "assign_candidate", "(", "verse", ",", "verse", ".", "scansion", ")", "# identify some obvious and probably choices based on number of syllables", "if", "verse", ".", "syllable_count", "==", "17", ":", "# produce all dactyls", "candidate", "=", "self", ".", "produce_scansion", "(", "self", ".", "metrical_validator", ".", "hexameter_known_stresses", "(", ")", ",", "syllables_wspaces", ",", "offset_map", ")", "verse", ".", "scansion_notes", "+=", "[", "self", ".", "constants", ".", "NOTE_MAP", "[", "\"17\"", "]", "]", "if", "self", ".", "metrical_validator", ".", "is_valid_hexameter", "(", "candidate", ")", ":", "return", "self", ".", "assign_candidate", "(", "verse", ",", "candidate", ")", "if", "verse", ".", "syllable_count", "==", "12", ":", "# create all spondee hexameter", "candidate", "=", "self", ".", "produce_scansion", "(", "list", "(", "range", "(", "12", ")", ")", ",", "syllables_wspaces", ",", "offset_map", ")", "if", "self", ".", "metrical_validator", ".", "is_valid_hexameter", "(", "verse", ".", "scansion", ")", ":", "verse", ".", "scansion_notes", "+=", "[", "self", ".", "constants", ".", "NOTE_MAP", "[", "\"12\"", "]", "]", "return", "self", ".", "assign_candidate", "(", "verse", ",", "candidate", ")", "if", "verse", ".", "syllable_count", "==", "13", ":", "# create spondee hexameter with a dactyl at 5th foot", "known_unaccents", "=", "[", "9", ",", "10", "]", "last_syllable_accented", "=", "False", "for", "vowel", "in", "self", ".", "constants", ".", "ACCENTED_VOWELS", ":", "if", "vowel", "in", "verse", ".", "syllables", "[", "12", "]", ":", "last_syllable_accented", "=", "True", "if", "not", "last_syllable_accented", ":", "known_unaccents", ".", "append", "(", "12", ")", "if", "set", "(", "known_unaccents", ")", "-", "set", "(", "stresses", ")", "!=", "len", "(", "known_unaccents", ")", ":", "verse", ".", "scansion", "=", "self", ".", "produce_scansion", "(", "[", "x", "for", "x", "in", "range", "(", "13", ")", "if", "x", "not", "in", "known_unaccents", "]", ",", "syllables_wspaces", ",", "offset_map", ")", "verse", ".", "scansion_notes", "+=", "[", "self", ".", "constants", ".", "NOTE_MAP", "[", "\"5th dactyl\"", "]", "]", "if", "self", ".", "metrical_validator", ".", "is_valid_hexameter", "(", "verse", ".", "scansion", ")", ":", "return", "self", ".", "assign_candidate", "(", "verse", ",", "verse", ".", "scansion", ")", "if", "verse", ".", "syllable_count", ">", "17", ":", "verse", ".", "valid", "=", "False", "verse", ".", "scansion_notes", "+=", "[", "self", ".", "constants", ".", "NOTE_MAP", "[", "\"> 17\"", "]", "]", "return", "verse", "smoothed", "=", "self", ".", "correct_inverted_amphibrachs", "(", "verse", ".", "scansion", ")", "if", "distance", "(", "verse", ".", "scansion", ",", "smoothed", ")", ">", "0", ":", "verse", ".", "scansion_notes", "+=", "[", "self", ".", "constants", ".", "NOTE_MAP", "[", "\"inverted\"", "]", "]", "verse", ".", "scansion", "=", "smoothed", "stresses", "+=", "string_utils", ".", "differences", "(", "verse", ".", "scansion", ",", "smoothed", ")", "if", "self", ".", "metrical_validator", ".", "is_valid_hexameter", "(", "verse", ".", "scansion", ")", ":", "return", "self", ".", "assign_candidate", "(", "verse", ",", "verse", ".", "scansion", ")", "smoothed", "=", "self", ".", "correct_first_two_dactyls", "(", "verse", ".", "scansion", ")", "if", "distance", "(", "verse", ".", "scansion", ",", "smoothed", ")", ">", "0", ":", "verse", ".", "scansion_notes", "+=", "[", "self", ".", "constants", ".", "NOTE_MAP", "[", "\"invalid start\"", "]", "]", "verse", ".", "scansion", "=", "smoothed", "stresses", "+=", "string_utils", ".", "differences", "(", "verse", ".", "scansion", ",", "smoothed", ")", "if", "self", ".", "metrical_validator", ".", "is_valid_hexameter", "(", "verse", ".", "scansion", ")", ":", "return", "self", ".", "assign_candidate", "(", "verse", ",", "verse", ".", "scansion", ")", "smoothed", "=", "self", ".", "correct_invalid_fifth_foot", "(", "verse", ".", "scansion", ")", "if", "distance", "(", "verse", ".", "scansion", ",", "smoothed", ")", ">", "0", ":", "verse", ".", "scansion_notes", "+=", "[", "self", ".", "constants", ".", "NOTE_MAP", "[", "\"invalid 5th\"", "]", "]", "verse", ".", "scansion", "=", "smoothed", "stresses", "+=", "string_utils", ".", "differences", "(", "verse", ".", "scansion", ",", "smoothed", ")", "if", "self", ".", "metrical_validator", ".", "is_valid_hexameter", "(", "verse", ".", "scansion", ")", ":", "return", "self", ".", "assign_candidate", "(", "verse", ",", "verse", ".", "scansion", ")", "feet", "=", "self", ".", "metrical_validator", ".", "hexameter_feet", "(", "verse", ".", "scansion", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ")", "if", "feet", ":", "# Normal good citizens are unwelcome in the house of hexameter", "invalid_feet_in_hexameter", "=", "[", "self", ".", "constants", ".", "IAMB", ",", "self", ".", "constants", ".", "TROCHEE", "]", "current_foot", "=", "0", "ending", "=", "feet", ".", "pop", "(", ")", "# don't process the ending, a possible trochee, add it back after", "scanned_line", "=", "\"\"", "for", "foot", "in", "feet", ":", "if", "foot", ".", "replace", "(", "\" \"", ",", "\"\"", ")", "in", "invalid_feet_in_hexameter", ":", "scanned_line", "=", "self", ".", "invalid_foot_to_spondee", "(", "feet", ",", "foot", ",", "current_foot", ")", "scanned_line", "=", "scanned_line", "+", "ending", "current_foot", "+=", "1", "smoothed", "=", "self", ".", "produce_scansion", "(", "stresses", "+", "string_utils", ".", "stress_positions", "(", "self", ".", "constants", ".", "STRESSED", ",", "scanned_line", ")", ",", "syllables_wspaces", ",", "offset_map", ")", "if", "self", ".", "metrical_validator", ".", "is_valid_hexameter", "(", "smoothed", ")", ":", "verse", ".", "scansion_notes", "+=", "[", "self", ".", "constants", ".", "NOTE_MAP", "[", "\"invalid foot\"", "]", "]", "return", "self", ".", "assign_candidate", "(", "verse", ",", "smoothed", ")", "# need to do this again, since the scansion has changed", "smoothed", "=", "self", ".", "correct_inverted_amphibrachs", "(", "verse", ".", "scansion", ")", "if", "distance", "(", "verse", ".", "scansion", ",", "smoothed", ")", ">", "0", ":", "verse", ".", "scansion_notes", "+=", "[", "self", ".", "constants", ".", "NOTE_MAP", "[", "\"inverted\"", "]", "]", "verse", ".", "scansion", "=", "smoothed", "stresses", "+=", "string_utils", ".", "differences", "(", "verse", ".", "scansion", ",", "smoothed", ")", "if", "self", ".", "metrical_validator", ".", "is_valid_hexameter", "(", "verse", ".", "scansion", ")", ":", "return", "self", ".", "assign_candidate", "(", "verse", ",", "verse", ".", "scansion", ")", "candidates", "=", "self", ".", "metrical_validator", ".", "closest_hexameter_patterns", "(", "verse", ".", "scansion", ")", "if", "candidates", "is", "not", "None", ":", "if", "len", "(", "candidates", ")", "==", "1", "and", "len", "(", "verse", ".", "scansion", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ")", "==", "len", "(", "candidates", "[", "0", "]", ")", "and", "len", "(", "string_utils", ".", "differences", "(", "verse", ".", "scansion", ",", "candidates", "[", "0", "]", ")", ")", "==", "1", ":", "tmp_scansion", "=", "self", ".", "produce_scansion", "(", "string_utils", ".", "differences", "(", "verse", ".", "scansion", ",", "candidates", "[", "0", "]", ")", ",", "syllables_wspaces", ",", "offset_map", ")", "if", "self", ".", "metrical_validator", ".", "is_valid_hexameter", "(", "tmp_scansion", ")", ":", "verse", ".", "scansion_notes", "+=", "[", "self", ".", "constants", ".", "NOTE_MAP", "[", "\"closest match\"", "]", "]", "return", "self", ".", "assign_candidate", "(", "verse", ",", "tmp_scansion", ")", "# need to do this again, since the scansion has changed", "smoothed", "=", "self", ".", "correct_inverted_amphibrachs", "(", "smoothed", ")", "if", "self", ".", "metrical_validator", ".", "is_valid_hexameter", "(", "smoothed", ")", ":", "verse", ".", "scansion_notes", "+=", "[", "self", ".", "constants", ".", "NOTE_MAP", "[", "\"inverted\"", "]", "]", "return", "self", ".", "assign_candidate", "(", "verse", ",", "smoothed", ")", "if", "dactyl_smoothing", ":", "smoothed", "=", "self", ".", "correct_dactyl_chain", "(", "smoothed", ")", "if", "distance", "(", "verse", ".", "scansion", ",", "smoothed", ")", ">", "0", ":", "verse", ".", "scansion_notes", "+=", "[", "self", ".", "constants", ".", "NOTE_MAP", "[", "\"dactyl smoothing\"", "]", "]", "verse", ".", "scansion", "=", "smoothed", "if", "self", ".", "metrical_validator", ".", "is_valid_hexameter", "(", "verse", ".", "scansion", ")", ":", "return", "self", ".", "assign_candidate", "(", "verse", ",", "verse", ".", "scansion", ")", "# if the line doesn't scan \"as is\", if may scan if the optional i to j transformations", "# are made, so here we set them and try again.", "if", "self", ".", "optional_transform", "and", "not", "verse", ".", "valid", ":", "return", "self", ".", "scan", "(", "original_line", ",", "optional_transform", "=", "True", ",", "dactyl_smoothing", "=", "True", ")", "return", "verse" ]
Scan a line of Latin hexameter and produce a scansion pattern, and other data. :param original_line: the original line of Latin verse :param optional_transform: whether or not to perform i to j transform for syllabification :param dactyl_smoothing: whether or not to perform dactyl smoothing :return: a Verse object >>> scanner = HexameterScanner() >>> print(scanner.scan("impulerit. Tantaene animis caelestibus irae?")) Verse(original='impulerit. Tantaene animis caelestibus irae?', scansion='- U U - - - U U - - - U U - - ', meter='hexameter', valid=True, syllable_count=15, accented='īmpulerīt. Tāntaene animīs caelēstibus īrae?', scansion_notes=['Valid by positional stresses.'], syllables = ['īm', 'pu', 'le', 'rīt', 'Tān', 'taen', 'a', 'ni', 'mīs', 'cae', 'lēs', 'ti', 'bus', 'i', 'rae']) >>> print(scanner.scan( ... "Arma virumque cano, Troiae qui prīmus ab ōrīs").scansion) # doctest: +NORMALIZE_WHITESPACE - U U - U U - - - - - U U - - >>> # some hexameters need the optional transformations: >>> optional_transform_scanner = HexameterScanner(optional_transform=True) >>> print(optional_transform_scanner.scan( ... "Ītaliam, fāto profugus, Lāvīniaque vēnit").scansion) # doctest: +NORMALIZE_WHITESPACE - - - - - U U - - - U U - U >>> print(HexameterScanner().scan( ... "lītora, multum ille et terrīs iactātus et alto").scansion) # doctest: +NORMALIZE_WHITESPACE - U U - - - - - - - U U - U >>> print(HexameterScanner().scan( ... "vī superum saevae memorem Iūnōnis ob īram;").scansion) # doctest: +NORMALIZE_WHITESPACE - U U - - - U U - - - U U - U >>> # handle multiple elisions >>> print(scanner.scan("monstrum horrendum, informe, ingens, cui lumen ademptum").scansion) # doctest: +NORMALIZE_WHITESPACE - - - - - - - - - U U - U >>> # if we have 17 syllables, create a chain of all dactyls >>> print(scanner.scan("quadrupedante putrem sonitu quatit ungula campum" ... ).scansion) # doctest: +NORMALIZE_WHITESPACE - U U - U U - U U - U U - U U - U >>> # if we have 13 syllables exactly, we'll create a spondaic hexameter >>> print(HexameterScanner().scan( ... "illi inter sese multa vi bracchia tollunt").scansion) # doctest: +NORMALIZE_WHITESPACE - - - - - - - - - UU - - >>> print(HexameterScanner().scan( ... "dat latus; insequitur cumulo praeruptus aquae mons").scansion) # doctest: +NORMALIZE_WHITESPACE - U U - U U - U U - - - U U - - >>> print(optional_transform_scanner.scan( ... "Non quivis videt inmodulata poëmata iudex").scansion) # doctest: +NORMALIZE_WHITESPACE - - - U U - U U - U U- U U - - >>> print(HexameterScanner().scan( ... "certabant urbem Romam Remoramne vocarent").scansion) # doctest: +NORMALIZE_WHITESPACE - - - - - - - U U - U U - - >>> # advanced smoothing is available via keyword flags: dactyl_smoothing >>> # print(HexameterScanner().scan( #... "his verbis: 'o gnata, tibi sunt ante ferendae", #... dactyl_smoothing=True).scansion) # doctest: +NORMALIZE_WHITESPACE #- - - - - U U - - - U U - -
[ "Scan", "a", "line", "of", "Latin", "hexameter", "and", "produce", "a", "scansion", "pattern", "and", "other", "data", "." ]
python
train
sirfoga/pyhal
hal/files/models/system.py
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/files/models/system.py#L291-L300
def rename(self, new_path): """Renames to new path :param new_path: new path to use """ rename_path = fix_raw_path(new_path) if is_folder(self.path): os.rename(self.path, rename_path) else: os.renames(self.path, rename_path)
[ "def", "rename", "(", "self", ",", "new_path", ")", ":", "rename_path", "=", "fix_raw_path", "(", "new_path", ")", "if", "is_folder", "(", "self", ".", "path", ")", ":", "os", ".", "rename", "(", "self", ".", "path", ",", "rename_path", ")", "else", ":", "os", ".", "renames", "(", "self", ".", "path", ",", "rename_path", ")" ]
Renames to new path :param new_path: new path to use
[ "Renames", "to", "new", "path" ]
python
train
lablup/backend.ai-client-py
src/ai/backend/client/cli/admin/images.py
https://github.com/lablup/backend.ai-client-py/blob/a063d774fea6f4350b89498c40d3c837ec3029a7/src/ai/backend/client/cli/admin/images.py#L72-L83
def dealias_image(alias): '''Remove an image alias.''' with Session() as session: try: result = session.Image.dealiasImage(alias) except Exception as e: print_error(e) sys.exit(1) if result['ok']: print("alias {0} removed.".format(alias)) else: print(result['msg'])
[ "def", "dealias_image", "(", "alias", ")", ":", "with", "Session", "(", ")", "as", "session", ":", "try", ":", "result", "=", "session", ".", "Image", ".", "dealiasImage", "(", "alias", ")", "except", "Exception", "as", "e", ":", "print_error", "(", "e", ")", "sys", ".", "exit", "(", "1", ")", "if", "result", "[", "'ok'", "]", ":", "print", "(", "\"alias {0} removed.\"", ".", "format", "(", "alias", ")", ")", "else", ":", "print", "(", "result", "[", "'msg'", "]", ")" ]
Remove an image alias.
[ "Remove", "an", "image", "alias", "." ]
python
train
brutasse/graphite-api
graphite_api/functions.py
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3443-L3470
def isNonNull(requestContext, seriesList): """ Takes a metric or wild card seriesList and counts up how many non-null values are specified. This is useful for understanding which metrics have data at a given point in time (ie, to count which servers are alive). Example:: &target=isNonNull(webapp.pages.*.views) Returns a seriesList where 1 is specified for non-null values, and 0 is specified for null values. """ def transform(v): if v is None: return 0 else: return 1 for series in seriesList: series.name = "isNonNull(%s)" % (series.name) series.pathExpression = series.name values = [transform(v) for v in series] series.extend(values) del series[:len(values)] return seriesList
[ "def", "isNonNull", "(", "requestContext", ",", "seriesList", ")", ":", "def", "transform", "(", "v", ")", ":", "if", "v", "is", "None", ":", "return", "0", "else", ":", "return", "1", "for", "series", "in", "seriesList", ":", "series", ".", "name", "=", "\"isNonNull(%s)\"", "%", "(", "series", ".", "name", ")", "series", ".", "pathExpression", "=", "series", ".", "name", "values", "=", "[", "transform", "(", "v", ")", "for", "v", "in", "series", "]", "series", ".", "extend", "(", "values", ")", "del", "series", "[", ":", "len", "(", "values", ")", "]", "return", "seriesList" ]
Takes a metric or wild card seriesList and counts up how many non-null values are specified. This is useful for understanding which metrics have data at a given point in time (ie, to count which servers are alive). Example:: &target=isNonNull(webapp.pages.*.views) Returns a seriesList where 1 is specified for non-null values, and 0 is specified for null values.
[ "Takes", "a", "metric", "or", "wild", "card", "seriesList", "and", "counts", "up", "how", "many", "non", "-", "null", "values", "are", "specified", ".", "This", "is", "useful", "for", "understanding", "which", "metrics", "have", "data", "at", "a", "given", "point", "in", "time", "(", "ie", "to", "count", "which", "servers", "are", "alive", ")", "." ]
python
train
saltstack/salt
salt/states/pcs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pcs.py#L984-L1021
def resource_present(name, resource_id, resource_type, resource_options=None, cibname=None): ''' Ensure that a resource is created Should be run on one cluster node only (there may be races) Can only be run on a node with a functional pacemaker/corosync name Irrelevant, not used (recommended: {{formulaname}}__resource_present_{{resource_id}}) resource_id name for the resource resource_type resource type (f.e. ocf:heartbeat:IPaddr2 or VirtualIP) resource_options additional options for creating the resource cibname use a cached CIB-file named like cibname instead of the live CIB Example: .. code-block:: yaml mysql_pcs__resource_present_galera: pcs.resource_present: - resource_id: galera - resource_type: "ocf:heartbeat:galera" - resource_options: - 'wsrep_cluster_address=gcomm://node1.example.org,node2.example.org,node3.example.org' - '--master' - cibname: cib_for_galera ''' return _item_present(name=name, item='resource', item_id=resource_id, item_type=resource_type, extra_args=resource_options, cibname=cibname)
[ "def", "resource_present", "(", "name", ",", "resource_id", ",", "resource_type", ",", "resource_options", "=", "None", ",", "cibname", "=", "None", ")", ":", "return", "_item_present", "(", "name", "=", "name", ",", "item", "=", "'resource'", ",", "item_id", "=", "resource_id", ",", "item_type", "=", "resource_type", ",", "extra_args", "=", "resource_options", ",", "cibname", "=", "cibname", ")" ]
Ensure that a resource is created Should be run on one cluster node only (there may be races) Can only be run on a node with a functional pacemaker/corosync name Irrelevant, not used (recommended: {{formulaname}}__resource_present_{{resource_id}}) resource_id name for the resource resource_type resource type (f.e. ocf:heartbeat:IPaddr2 or VirtualIP) resource_options additional options for creating the resource cibname use a cached CIB-file named like cibname instead of the live CIB Example: .. code-block:: yaml mysql_pcs__resource_present_galera: pcs.resource_present: - resource_id: galera - resource_type: "ocf:heartbeat:galera" - resource_options: - 'wsrep_cluster_address=gcomm://node1.example.org,node2.example.org,node3.example.org' - '--master' - cibname: cib_for_galera
[ "Ensure", "that", "a", "resource", "is", "created" ]
python
train
openstack/networking-cisco
networking_cisco/ml2_drivers/nexus/mech_cisco_nexus.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/ml2_drivers/nexus/mech_cisco_nexus.py#L1338-L1436
def configure_switch_entries(self, switch_ip, port_bindings): """Create a nexus switch entry in Nexus. The port_bindings is sorted by vlan_id, vni, port_id. When there is a change in vlan_id or vni, then vlan data is configured in Nexus device. Otherwise we check if there is a change in port_id where we configure the port with vlan trunk config. Called during switch replay event. """ prev_vlan = -1 prev_vni = -1 prev_port = None prev_native_vlan = 0 starttime = time.time() port_bindings.sort(key=lambda x: (x.port_id, x.vlan_id, x.vni)) self.driver.capture_and_print_timeshot( starttime, "replay_t2_aft_sort", switch=switch_ip) # Let's make these lists a set to exclude duplicates vlans = set() pvlans = set() interface_count = 0 duplicate_port = 0 vlan_count = 0 for port in port_bindings: if nxos_db.is_reserved_binding(port): continue auto_create, auto_trunk = self._gather_config_parms( nxos_db.is_provider_vlan(port.vlan_id), port.vlan_id) if port.port_id == prev_port: if port.vlan_id == prev_vlan and port.vni == prev_vni: # Same port/Same Vlan - skip duplicate duplicate_port += 1 continue else: # Same port/different Vlan - track it vlan_count += 1 if auto_create: vlans.add((port.vlan_id, port.vni)) if auto_trunk: pvlans.add(port.vlan_id) if port.is_native: prev_native_vlan = port.vlan_id else: # Different port - write out interface trunk on previous port if prev_port: interface_count += 1 LOG.debug("Switch %s port %s replay summary: unique vlan " "count %d, duplicate port entries %d", switch_ip, prev_port, vlan_count, duplicate_port) duplicate_port = 0 vlan_count = 0 if pvlans: self._restore_port_binding( switch_ip, pvlans, prev_port, prev_native_vlan) pvlans.clear() prev_native_vlan = 0 # Start tracking new port if auto_create: vlans.add((port.vlan_id, port.vni)) if auto_trunk: pvlans.add(port.vlan_id) prev_port = port.port_id if port.is_native: prev_native_vlan = port.vlan_id if pvlans: LOG.debug("Switch %s port %s replay summary: unique vlan " "count %d, duplicate port entries %d", switch_ip, port.port_id, vlan_count, duplicate_port) self._restore_port_binding( switch_ip, pvlans, prev_port, prev_native_vlan) LOG.debug("Replayed total %d ports for Switch %s", interface_count + 1, switch_ip) self.driver.capture_and_print_timeshot( starttime, "replay_part_1", switch=switch_ip) vlans = list(vlans) if vlans: vlans.sort() vlan, vni = vlans[0] if vni == 0: self._save_switch_vlan_range(switch_ip, vlans) else: self._save_switch_vxlan_range(switch_ip, vlans) self.set_switch_ip_and_active_state( switch_ip, const.SWITCH_RESTORE_S2) self.configure_next_batch_of_vlans(switch_ip) self.driver.capture_and_print_timeshot( starttime, "replay_part_2", switch=switch_ip)
[ "def", "configure_switch_entries", "(", "self", ",", "switch_ip", ",", "port_bindings", ")", ":", "prev_vlan", "=", "-", "1", "prev_vni", "=", "-", "1", "prev_port", "=", "None", "prev_native_vlan", "=", "0", "starttime", "=", "time", ".", "time", "(", ")", "port_bindings", ".", "sort", "(", "key", "=", "lambda", "x", ":", "(", "x", ".", "port_id", ",", "x", ".", "vlan_id", ",", "x", ".", "vni", ")", ")", "self", ".", "driver", ".", "capture_and_print_timeshot", "(", "starttime", ",", "\"replay_t2_aft_sort\"", ",", "switch", "=", "switch_ip", ")", "# Let's make these lists a set to exclude duplicates", "vlans", "=", "set", "(", ")", "pvlans", "=", "set", "(", ")", "interface_count", "=", "0", "duplicate_port", "=", "0", "vlan_count", "=", "0", "for", "port", "in", "port_bindings", ":", "if", "nxos_db", ".", "is_reserved_binding", "(", "port", ")", ":", "continue", "auto_create", ",", "auto_trunk", "=", "self", ".", "_gather_config_parms", "(", "nxos_db", ".", "is_provider_vlan", "(", "port", ".", "vlan_id", ")", ",", "port", ".", "vlan_id", ")", "if", "port", ".", "port_id", "==", "prev_port", ":", "if", "port", ".", "vlan_id", "==", "prev_vlan", "and", "port", ".", "vni", "==", "prev_vni", ":", "# Same port/Same Vlan - skip duplicate", "duplicate_port", "+=", "1", "continue", "else", ":", "# Same port/different Vlan - track it", "vlan_count", "+=", "1", "if", "auto_create", ":", "vlans", ".", "add", "(", "(", "port", ".", "vlan_id", ",", "port", ".", "vni", ")", ")", "if", "auto_trunk", ":", "pvlans", ".", "add", "(", "port", ".", "vlan_id", ")", "if", "port", ".", "is_native", ":", "prev_native_vlan", "=", "port", ".", "vlan_id", "else", ":", "# Different port - write out interface trunk on previous port", "if", "prev_port", ":", "interface_count", "+=", "1", "LOG", ".", "debug", "(", "\"Switch %s port %s replay summary: unique vlan \"", "\"count %d, duplicate port entries %d\"", ",", "switch_ip", ",", "prev_port", ",", "vlan_count", ",", "duplicate_port", ")", "duplicate_port", "=", "0", "vlan_count", "=", "0", "if", "pvlans", ":", "self", ".", "_restore_port_binding", "(", "switch_ip", ",", "pvlans", ",", "prev_port", ",", "prev_native_vlan", ")", "pvlans", ".", "clear", "(", ")", "prev_native_vlan", "=", "0", "# Start tracking new port", "if", "auto_create", ":", "vlans", ".", "add", "(", "(", "port", ".", "vlan_id", ",", "port", ".", "vni", ")", ")", "if", "auto_trunk", ":", "pvlans", ".", "add", "(", "port", ".", "vlan_id", ")", "prev_port", "=", "port", ".", "port_id", "if", "port", ".", "is_native", ":", "prev_native_vlan", "=", "port", ".", "vlan_id", "if", "pvlans", ":", "LOG", ".", "debug", "(", "\"Switch %s port %s replay summary: unique vlan \"", "\"count %d, duplicate port entries %d\"", ",", "switch_ip", ",", "port", ".", "port_id", ",", "vlan_count", ",", "duplicate_port", ")", "self", ".", "_restore_port_binding", "(", "switch_ip", ",", "pvlans", ",", "prev_port", ",", "prev_native_vlan", ")", "LOG", ".", "debug", "(", "\"Replayed total %d ports for Switch %s\"", ",", "interface_count", "+", "1", ",", "switch_ip", ")", "self", ".", "driver", ".", "capture_and_print_timeshot", "(", "starttime", ",", "\"replay_part_1\"", ",", "switch", "=", "switch_ip", ")", "vlans", "=", "list", "(", "vlans", ")", "if", "vlans", ":", "vlans", ".", "sort", "(", ")", "vlan", ",", "vni", "=", "vlans", "[", "0", "]", "if", "vni", "==", "0", ":", "self", ".", "_save_switch_vlan_range", "(", "switch_ip", ",", "vlans", ")", "else", ":", "self", ".", "_save_switch_vxlan_range", "(", "switch_ip", ",", "vlans", ")", "self", ".", "set_switch_ip_and_active_state", "(", "switch_ip", ",", "const", ".", "SWITCH_RESTORE_S2", ")", "self", ".", "configure_next_batch_of_vlans", "(", "switch_ip", ")", "self", ".", "driver", ".", "capture_and_print_timeshot", "(", "starttime", ",", "\"replay_part_2\"", ",", "switch", "=", "switch_ip", ")" ]
Create a nexus switch entry in Nexus. The port_bindings is sorted by vlan_id, vni, port_id. When there is a change in vlan_id or vni, then vlan data is configured in Nexus device. Otherwise we check if there is a change in port_id where we configure the port with vlan trunk config. Called during switch replay event.
[ "Create", "a", "nexus", "switch", "entry", "in", "Nexus", "." ]
python
train
rushter/heamy
heamy/feature.py
https://github.com/rushter/heamy/blob/c330854cee3c547417eb353a4a4a23331b40b4bc/heamy/feature.py#L126-L152
def mean_target(df, feature_name, target_name, C=None): """Mean target. Original idea: Stanislav Semenov Parameters ---------- C : float, default None Regularization coefficient. The higher, the more conservative result. The optimal value lies between 10 and 50 depending on the data. feature_name : str target_name : str df: DataFrame Returns ------- Series """ def group_mean(group): group_size = float(group.shape[0]) if C is None: return (group.mean() * group_size + global_mean) / group_size else: return (group.mean() * group_size + global_mean * C) / (group_size + C) global_mean = df[target_name].mean() return df.groupby(feature_name)[target_name].transform(group_mean)
[ "def", "mean_target", "(", "df", ",", "feature_name", ",", "target_name", ",", "C", "=", "None", ")", ":", "def", "group_mean", "(", "group", ")", ":", "group_size", "=", "float", "(", "group", ".", "shape", "[", "0", "]", ")", "if", "C", "is", "None", ":", "return", "(", "group", ".", "mean", "(", ")", "*", "group_size", "+", "global_mean", ")", "/", "group_size", "else", ":", "return", "(", "group", ".", "mean", "(", ")", "*", "group_size", "+", "global_mean", "*", "C", ")", "/", "(", "group_size", "+", "C", ")", "global_mean", "=", "df", "[", "target_name", "]", ".", "mean", "(", ")", "return", "df", ".", "groupby", "(", "feature_name", ")", "[", "target_name", "]", ".", "transform", "(", "group_mean", ")" ]
Mean target. Original idea: Stanislav Semenov Parameters ---------- C : float, default None Regularization coefficient. The higher, the more conservative result. The optimal value lies between 10 and 50 depending on the data. feature_name : str target_name : str df: DataFrame Returns ------- Series
[ "Mean", "target", ".", "Original", "idea", ":", "Stanislav", "Semenov" ]
python
train
polysquare/polysquare-setuptools-lint
polysquare_setuptools_lint/__init__.py
https://github.com/polysquare/polysquare-setuptools-lint/blob/5df5a6401c7ad6a90b42230eeb99c82cc56952b6/polysquare_setuptools_lint/__init__.py#L71-L82
def _stamped_deps(stamp_directory, func, dependencies, *args, **kwargs): """Run func, assumed to have dependencies as its first argument.""" if not isinstance(dependencies, list): jobstamps_dependencies = [dependencies] else: jobstamps_dependencies = dependencies kwargs.update({ "jobstamps_cache_output_directory": stamp_directory, "jobstamps_dependencies": jobstamps_dependencies }) return jobstamp.run(func, dependencies, *args, **kwargs)
[ "def", "_stamped_deps", "(", "stamp_directory", ",", "func", ",", "dependencies", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "dependencies", ",", "list", ")", ":", "jobstamps_dependencies", "=", "[", "dependencies", "]", "else", ":", "jobstamps_dependencies", "=", "dependencies", "kwargs", ".", "update", "(", "{", "\"jobstamps_cache_output_directory\"", ":", "stamp_directory", ",", "\"jobstamps_dependencies\"", ":", "jobstamps_dependencies", "}", ")", "return", "jobstamp", ".", "run", "(", "func", ",", "dependencies", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Run func, assumed to have dependencies as its first argument.
[ "Run", "func", "assumed", "to", "have", "dependencies", "as", "its", "first", "argument", "." ]
python
train
rigetti/quantumflow
quantumflow/forest/__init__.py
https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/quantumflow/forest/__init__.py#L171-L185
def circuit_to_pyquil(circuit: Circuit) -> pyquil.Program: """Convert a QuantumFlow circuit to a pyQuil program""" prog = pyquil.Program() for elem in circuit.elements: if isinstance(elem, Gate) and elem.name in QUIL_GATES: params = list(elem.params.values()) if elem.params else [] prog.gate(elem.name, params, elem.qubits) elif isinstance(elem, Measure): prog.measure(elem.qubit, elem.cbit) else: # FIXME: more informative error message raise ValueError('Cannot convert operation to pyquil') return prog
[ "def", "circuit_to_pyquil", "(", "circuit", ":", "Circuit", ")", "->", "pyquil", ".", "Program", ":", "prog", "=", "pyquil", ".", "Program", "(", ")", "for", "elem", "in", "circuit", ".", "elements", ":", "if", "isinstance", "(", "elem", ",", "Gate", ")", "and", "elem", ".", "name", "in", "QUIL_GATES", ":", "params", "=", "list", "(", "elem", ".", "params", ".", "values", "(", ")", ")", "if", "elem", ".", "params", "else", "[", "]", "prog", ".", "gate", "(", "elem", ".", "name", ",", "params", ",", "elem", ".", "qubits", ")", "elif", "isinstance", "(", "elem", ",", "Measure", ")", ":", "prog", ".", "measure", "(", "elem", ".", "qubit", ",", "elem", ".", "cbit", ")", "else", ":", "# FIXME: more informative error message", "raise", "ValueError", "(", "'Cannot convert operation to pyquil'", ")", "return", "prog" ]
Convert a QuantumFlow circuit to a pyQuil program
[ "Convert", "a", "QuantumFlow", "circuit", "to", "a", "pyQuil", "program" ]
python
train
secnot/rectpack
rectpack/skyline.py
https://github.com/secnot/rectpack/blob/21d46be48fd453500ea49de699bc9eabc427bdf7/rectpack/skyline.py#L192-L208
def fitness(self, width, height): """Search for the best fitness """ assert(width > 0 and height >0) if width > max(self.width, self.height) or\ height > max(self.height, self.width): return None # If there is room in wasted space, FREE PACKING!! if self._waste_management: if self._waste.fitness(width, height) is not None: return 0 # Get best fitness segment, for normal rectangle, and for # rotated rectangle if rotation is enabled. rect, fitness = self._select_position(width, height) return fitness
[ "def", "fitness", "(", "self", ",", "width", ",", "height", ")", ":", "assert", "(", "width", ">", "0", "and", "height", ">", "0", ")", "if", "width", ">", "max", "(", "self", ".", "width", ",", "self", ".", "height", ")", "or", "height", ">", "max", "(", "self", ".", "height", ",", "self", ".", "width", ")", ":", "return", "None", "# If there is room in wasted space, FREE PACKING!!", "if", "self", ".", "_waste_management", ":", "if", "self", ".", "_waste", ".", "fitness", "(", "width", ",", "height", ")", "is", "not", "None", ":", "return", "0", "# Get best fitness segment, for normal rectangle, and for", "# rotated rectangle if rotation is enabled.", "rect", ",", "fitness", "=", "self", ".", "_select_position", "(", "width", ",", "height", ")", "return", "fitness" ]
Search for the best fitness
[ "Search", "for", "the", "best", "fitness" ]
python
train
apache/incubator-superset
superset/views/core.py
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L1517-L1532
def checkbox(self, model_view, id_, attr, value): """endpoint for checking/unchecking any boolean in a sqla model""" modelview_to_model = { '{}ColumnInlineView'.format(name.capitalize()): source.column_class for name, source in ConnectorRegistry.sources.items() } model = modelview_to_model[model_view] col = db.session.query(model).filter_by(id=id_).first() checked = value == 'true' if col: setattr(col, attr, checked) if checked: metrics = col.get_metrics().values() col.datasource.add_missing_metrics(metrics) db.session.commit() return json_success('OK')
[ "def", "checkbox", "(", "self", ",", "model_view", ",", "id_", ",", "attr", ",", "value", ")", ":", "modelview_to_model", "=", "{", "'{}ColumnInlineView'", ".", "format", "(", "name", ".", "capitalize", "(", ")", ")", ":", "source", ".", "column_class", "for", "name", ",", "source", "in", "ConnectorRegistry", ".", "sources", ".", "items", "(", ")", "}", "model", "=", "modelview_to_model", "[", "model_view", "]", "col", "=", "db", ".", "session", ".", "query", "(", "model", ")", ".", "filter_by", "(", "id", "=", "id_", ")", ".", "first", "(", ")", "checked", "=", "value", "==", "'true'", "if", "col", ":", "setattr", "(", "col", ",", "attr", ",", "checked", ")", "if", "checked", ":", "metrics", "=", "col", ".", "get_metrics", "(", ")", ".", "values", "(", ")", "col", ".", "datasource", ".", "add_missing_metrics", "(", "metrics", ")", "db", ".", "session", ".", "commit", "(", ")", "return", "json_success", "(", "'OK'", ")" ]
endpoint for checking/unchecking any boolean in a sqla model
[ "endpoint", "for", "checking", "/", "unchecking", "any", "boolean", "in", "a", "sqla", "model" ]
python
train
ev3dev/ev3dev-lang-python
ev3dev2/control/GyroBalancer.py
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/control/GyroBalancer.py#L475-L481
def _move(self, speed=0, steering=0, seconds=None): """Move robot.""" self.drive_queue.put((speed, steering)) if seconds is not None: time.sleep(seconds) self.drive_queue.put((0, 0)) self.drive_queue.join()
[ "def", "_move", "(", "self", ",", "speed", "=", "0", ",", "steering", "=", "0", ",", "seconds", "=", "None", ")", ":", "self", ".", "drive_queue", ".", "put", "(", "(", "speed", ",", "steering", ")", ")", "if", "seconds", "is", "not", "None", ":", "time", ".", "sleep", "(", "seconds", ")", "self", ".", "drive_queue", ".", "put", "(", "(", "0", ",", "0", ")", ")", "self", ".", "drive_queue", ".", "join", "(", ")" ]
Move robot.
[ "Move", "robot", "." ]
python
train
its-rigs/Trolly
trolly/client.py
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/client.py#L195-L207
def create_member(self, member_json): ''' Create a Member object from JSON object Returns: Member: The member from the given `member_json`. ''' return trolly.member.Member( trello_client=self, member_id=member_json['id'], name=member_json['fullName'], data=member_json, )
[ "def", "create_member", "(", "self", ",", "member_json", ")", ":", "return", "trolly", ".", "member", ".", "Member", "(", "trello_client", "=", "self", ",", "member_id", "=", "member_json", "[", "'id'", "]", ",", "name", "=", "member_json", "[", "'fullName'", "]", ",", "data", "=", "member_json", ",", ")" ]
Create a Member object from JSON object Returns: Member: The member from the given `member_json`.
[ "Create", "a", "Member", "object", "from", "JSON", "object" ]
python
test
spyder-ide/spyder
spyder/plugins/console/widgets/console.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/console/widgets/console.py#L72-L115
def set_style(self): """ Set font style with the following attributes: 'foreground_color', 'background_color', 'italic', 'bold' and 'underline' """ if self.current_format is None: assert self.base_format is not None self.current_format = QTextCharFormat(self.base_format) # Foreground color if self.foreground_color is None: qcolor = self.base_format.foreground() else: cstr = self.ANSI_COLORS[self.foreground_color-30][self.intensity] qcolor = QColor(cstr) self.current_format.setForeground(qcolor) # Background color if self.background_color is None: qcolor = self.base_format.background() else: cstr = self.ANSI_COLORS[self.background_color-40][self.intensity] qcolor = QColor(cstr) self.current_format.setBackground(qcolor) font = self.current_format.font() # Italic if self.italic is None: italic = self.base_format.fontItalic() else: italic = self.italic font.setItalic(italic) # Bold if self.bold is None: bold = self.base_format.font().bold() else: bold = self.bold font.setBold(bold) # Underline if self.underline is None: underline = self.base_format.font().underline() else: underline = self.underline font.setUnderline(underline) self.current_format.setFont(font)
[ "def", "set_style", "(", "self", ")", ":", "if", "self", ".", "current_format", "is", "None", ":", "assert", "self", ".", "base_format", "is", "not", "None", "self", ".", "current_format", "=", "QTextCharFormat", "(", "self", ".", "base_format", ")", "# Foreground color", "if", "self", ".", "foreground_color", "is", "None", ":", "qcolor", "=", "self", ".", "base_format", ".", "foreground", "(", ")", "else", ":", "cstr", "=", "self", ".", "ANSI_COLORS", "[", "self", ".", "foreground_color", "-", "30", "]", "[", "self", ".", "intensity", "]", "qcolor", "=", "QColor", "(", "cstr", ")", "self", ".", "current_format", ".", "setForeground", "(", "qcolor", ")", "# Background color", "if", "self", ".", "background_color", "is", "None", ":", "qcolor", "=", "self", ".", "base_format", ".", "background", "(", ")", "else", ":", "cstr", "=", "self", ".", "ANSI_COLORS", "[", "self", ".", "background_color", "-", "40", "]", "[", "self", ".", "intensity", "]", "qcolor", "=", "QColor", "(", "cstr", ")", "self", ".", "current_format", ".", "setBackground", "(", "qcolor", ")", "font", "=", "self", ".", "current_format", ".", "font", "(", ")", "# Italic", "if", "self", ".", "italic", "is", "None", ":", "italic", "=", "self", ".", "base_format", ".", "fontItalic", "(", ")", "else", ":", "italic", "=", "self", ".", "italic", "font", ".", "setItalic", "(", "italic", ")", "# Bold", "if", "self", ".", "bold", "is", "None", ":", "bold", "=", "self", ".", "base_format", ".", "font", "(", ")", ".", "bold", "(", ")", "else", ":", "bold", "=", "self", ".", "bold", "font", ".", "setBold", "(", "bold", ")", "# Underline", "if", "self", ".", "underline", "is", "None", ":", "underline", "=", "self", ".", "base_format", ".", "font", "(", ")", ".", "underline", "(", ")", "else", ":", "underline", "=", "self", ".", "underline", "font", ".", "setUnderline", "(", "underline", ")", "self", ".", "current_format", ".", "setFont", "(", "font", ")" ]
Set font style with the following attributes: 'foreground_color', 'background_color', 'italic', 'bold' and 'underline'
[ "Set", "font", "style", "with", "the", "following", "attributes", ":", "foreground_color", "background_color", "italic", "bold", "and", "underline" ]
python
train
AlpacaDB/selectivesearch
selectivesearch/selectivesearch.py
https://github.com/AlpacaDB/selectivesearch/blob/52f7f83bb247b1ed941b099c6a610da1b0e30451/selectivesearch/selectivesearch.py#L58-L66
def _sim_fill(r1, r2, imsize): """ calculate the fill similarity over the image """ bbsize = ( (max(r1["max_x"], r2["max_x"]) - min(r1["min_x"], r2["min_x"])) * (max(r1["max_y"], r2["max_y"]) - min(r1["min_y"], r2["min_y"])) ) return 1.0 - (bbsize - r1["size"] - r2["size"]) / imsize
[ "def", "_sim_fill", "(", "r1", ",", "r2", ",", "imsize", ")", ":", "bbsize", "=", "(", "(", "max", "(", "r1", "[", "\"max_x\"", "]", ",", "r2", "[", "\"max_x\"", "]", ")", "-", "min", "(", "r1", "[", "\"min_x\"", "]", ",", "r2", "[", "\"min_x\"", "]", ")", ")", "*", "(", "max", "(", "r1", "[", "\"max_y\"", "]", ",", "r2", "[", "\"max_y\"", "]", ")", "-", "min", "(", "r1", "[", "\"min_y\"", "]", ",", "r2", "[", "\"min_y\"", "]", ")", ")", ")", "return", "1.0", "-", "(", "bbsize", "-", "r1", "[", "\"size\"", "]", "-", "r2", "[", "\"size\"", "]", ")", "/", "imsize" ]
calculate the fill similarity over the image
[ "calculate", "the", "fill", "similarity", "over", "the", "image" ]
python
train
nickoala/telepot
telepot/aio/helper.py
https://github.com/nickoala/telepot/blob/3792fde251d0f1d5a6ca16c8ad1a71f89360c41d/telepot/aio/helper.py#L74-L129
def answer(self, inline_query, compute_fn, *compute_args, **compute_kwargs): """ Create a task that calls ``compute fn`` (along with additional arguments ``*compute_args`` and ``**compute_kwargs``), then applies the returned value to :meth:`.Bot.answerInlineQuery` to answer the inline query. If a preceding task is already working for a user, that task is cancelled, thus ensuring at most one active task per user id. :param inline_query: The inline query to be processed. The originating user is inferred from ``msg['from']['id']``. :param compute_fn: A function whose returned value is given to :meth:`.Bot.answerInlineQuery` to send. May return: - a *list* of `InlineQueryResult <https://core.telegram.org/bots/api#inlinequeryresult>`_ - a *tuple* whose first element is a list of `InlineQueryResult <https://core.telegram.org/bots/api#inlinequeryresult>`_, followed by positional arguments to be supplied to :meth:`.Bot.answerInlineQuery` - a *dictionary* representing keyword arguments to be supplied to :meth:`.Bot.answerInlineQuery` :param \*compute_args: positional arguments to ``compute_fn`` :param \*\*compute_kwargs: keyword arguments to ``compute_fn`` """ from_id = inline_query['from']['id'] async def compute_and_answer(): try: query_id = inline_query['id'] ans = await _invoke(compute_fn, *compute_args, **compute_kwargs) if isinstance(ans, list): await self._bot.answerInlineQuery(query_id, ans) elif isinstance(ans, tuple): await self._bot.answerInlineQuery(query_id, *ans) elif isinstance(ans, dict): await self._bot.answerInlineQuery(query_id, **ans) else: raise ValueError('Invalid answer format') except CancelledError: # Cancelled. Record has been occupied by new task. Don't touch. raise except: # Die accidentally. Remove myself from record. del self._working_tasks[from_id] raise else: # Die naturally. Remove myself from record. del self._working_tasks[from_id] if from_id in self._working_tasks: self._working_tasks[from_id].cancel() t = self._loop.create_task(compute_and_answer()) self._working_tasks[from_id] = t
[ "def", "answer", "(", "self", ",", "inline_query", ",", "compute_fn", ",", "*", "compute_args", ",", "*", "*", "compute_kwargs", ")", ":", "from_id", "=", "inline_query", "[", "'from'", "]", "[", "'id'", "]", "async", "def", "compute_and_answer", "(", ")", ":", "try", ":", "query_id", "=", "inline_query", "[", "'id'", "]", "ans", "=", "await", "_invoke", "(", "compute_fn", ",", "*", "compute_args", ",", "*", "*", "compute_kwargs", ")", "if", "isinstance", "(", "ans", ",", "list", ")", ":", "await", "self", ".", "_bot", ".", "answerInlineQuery", "(", "query_id", ",", "ans", ")", "elif", "isinstance", "(", "ans", ",", "tuple", ")", ":", "await", "self", ".", "_bot", ".", "answerInlineQuery", "(", "query_id", ",", "*", "ans", ")", "elif", "isinstance", "(", "ans", ",", "dict", ")", ":", "await", "self", ".", "_bot", ".", "answerInlineQuery", "(", "query_id", ",", "*", "*", "ans", ")", "else", ":", "raise", "ValueError", "(", "'Invalid answer format'", ")", "except", "CancelledError", ":", "# Cancelled. Record has been occupied by new task. Don't touch.", "raise", "except", ":", "# Die accidentally. Remove myself from record.", "del", "self", ".", "_working_tasks", "[", "from_id", "]", "raise", "else", ":", "# Die naturally. Remove myself from record.", "del", "self", ".", "_working_tasks", "[", "from_id", "]", "if", "from_id", "in", "self", ".", "_working_tasks", ":", "self", ".", "_working_tasks", "[", "from_id", "]", ".", "cancel", "(", ")", "t", "=", "self", ".", "_loop", ".", "create_task", "(", "compute_and_answer", "(", ")", ")", "self", ".", "_working_tasks", "[", "from_id", "]", "=", "t" ]
Create a task that calls ``compute fn`` (along with additional arguments ``*compute_args`` and ``**compute_kwargs``), then applies the returned value to :meth:`.Bot.answerInlineQuery` to answer the inline query. If a preceding task is already working for a user, that task is cancelled, thus ensuring at most one active task per user id. :param inline_query: The inline query to be processed. The originating user is inferred from ``msg['from']['id']``. :param compute_fn: A function whose returned value is given to :meth:`.Bot.answerInlineQuery` to send. May return: - a *list* of `InlineQueryResult <https://core.telegram.org/bots/api#inlinequeryresult>`_ - a *tuple* whose first element is a list of `InlineQueryResult <https://core.telegram.org/bots/api#inlinequeryresult>`_, followed by positional arguments to be supplied to :meth:`.Bot.answerInlineQuery` - a *dictionary* representing keyword arguments to be supplied to :meth:`.Bot.answerInlineQuery` :param \*compute_args: positional arguments to ``compute_fn`` :param \*\*compute_kwargs: keyword arguments to ``compute_fn``
[ "Create", "a", "task", "that", "calls", "compute", "fn", "(", "along", "with", "additional", "arguments", "*", "compute_args", "and", "**", "compute_kwargs", ")", "then", "applies", "the", "returned", "value", "to", ":", "meth", ":", ".", "Bot", ".", "answerInlineQuery", "to", "answer", "the", "inline", "query", ".", "If", "a", "preceding", "task", "is", "already", "working", "for", "a", "user", "that", "task", "is", "cancelled", "thus", "ensuring", "at", "most", "one", "active", "task", "per", "user", "id", "." ]
python
train
RI-imaging/qpimage
qpimage/core.py
https://github.com/RI-imaging/qpimage/blob/863c0fce5735b4c0ae369f75c0df9a33411b2bb2/qpimage/core.py#L597-L655
def copyh5(inh5, outh5): """Recursively copy all hdf5 data from one group to another Data from links is copied. Parameters ---------- inh5: str, h5py.File, or h5py.Group The input hdf5 data. This can be either a file name or an hdf5 object. outh5: str, h5py.File, h5py.Group, or None The output hdf5 data. This can be either a file name or an hdf5 object. If set to `None`, a new hdf5 object is created in memory. Notes ----- All data in outh5 are overridden by the inh5 data. """ if not isinstance(inh5, h5py.Group): inh5 = h5py.File(inh5, mode="r") if outh5 is None: # create file in memory h5kwargs = {"name": "qpimage{}.h5".format(QPImage._instances), "driver": "core", "backing_store": False, "mode": "a"} outh5 = h5py.File(**h5kwargs) return_h5obj = True QPImage._instances += 1 elif not isinstance(outh5, h5py.Group): # create new file outh5 = h5py.File(outh5, mode="w") return_h5obj = False else: return_h5obj = True # begin iteration for key in inh5: if key in outh5: del outh5[key] if isinstance(inh5[key], h5py.Group): outh5.create_group(key) copyh5(inh5[key], outh5[key]) else: dset = write_image_dataset(group=outh5, key=key, data=inh5[key][:], h5dtype=inh5[key].dtype) dset.attrs.update(inh5[key].attrs) outh5.attrs.update(inh5.attrs) if return_h5obj: # in-memory or previously created instance of h5py.File return outh5 else: # properly close the file and return its name fn = outh5.filename outh5.flush() outh5.close() return fn
[ "def", "copyh5", "(", "inh5", ",", "outh5", ")", ":", "if", "not", "isinstance", "(", "inh5", ",", "h5py", ".", "Group", ")", ":", "inh5", "=", "h5py", ".", "File", "(", "inh5", ",", "mode", "=", "\"r\"", ")", "if", "outh5", "is", "None", ":", "# create file in memory", "h5kwargs", "=", "{", "\"name\"", ":", "\"qpimage{}.h5\"", ".", "format", "(", "QPImage", ".", "_instances", ")", ",", "\"driver\"", ":", "\"core\"", ",", "\"backing_store\"", ":", "False", ",", "\"mode\"", ":", "\"a\"", "}", "outh5", "=", "h5py", ".", "File", "(", "*", "*", "h5kwargs", ")", "return_h5obj", "=", "True", "QPImage", ".", "_instances", "+=", "1", "elif", "not", "isinstance", "(", "outh5", ",", "h5py", ".", "Group", ")", ":", "# create new file", "outh5", "=", "h5py", ".", "File", "(", "outh5", ",", "mode", "=", "\"w\"", ")", "return_h5obj", "=", "False", "else", ":", "return_h5obj", "=", "True", "# begin iteration", "for", "key", "in", "inh5", ":", "if", "key", "in", "outh5", ":", "del", "outh5", "[", "key", "]", "if", "isinstance", "(", "inh5", "[", "key", "]", ",", "h5py", ".", "Group", ")", ":", "outh5", ".", "create_group", "(", "key", ")", "copyh5", "(", "inh5", "[", "key", "]", ",", "outh5", "[", "key", "]", ")", "else", ":", "dset", "=", "write_image_dataset", "(", "group", "=", "outh5", ",", "key", "=", "key", ",", "data", "=", "inh5", "[", "key", "]", "[", ":", "]", ",", "h5dtype", "=", "inh5", "[", "key", "]", ".", "dtype", ")", "dset", ".", "attrs", ".", "update", "(", "inh5", "[", "key", "]", ".", "attrs", ")", "outh5", ".", "attrs", ".", "update", "(", "inh5", ".", "attrs", ")", "if", "return_h5obj", ":", "# in-memory or previously created instance of h5py.File", "return", "outh5", "else", ":", "# properly close the file and return its name", "fn", "=", "outh5", ".", "filename", "outh5", ".", "flush", "(", ")", "outh5", ".", "close", "(", ")", "return", "fn" ]
Recursively copy all hdf5 data from one group to another Data from links is copied. Parameters ---------- inh5: str, h5py.File, or h5py.Group The input hdf5 data. This can be either a file name or an hdf5 object. outh5: str, h5py.File, h5py.Group, or None The output hdf5 data. This can be either a file name or an hdf5 object. If set to `None`, a new hdf5 object is created in memory. Notes ----- All data in outh5 are overridden by the inh5 data.
[ "Recursively", "copy", "all", "hdf5", "data", "from", "one", "group", "to", "another" ]
python
train
log2timeline/plaso
plaso/parsers/winreg_plugins/mrulist.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/winreg_plugins/mrulist.py#L77-L99
def _ParseMRUListValue(self, registry_key): """Parses the MRUList value in a given Registry key. Args: registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains the MRUList value. Returns: mrulist_entries: MRUList entries or None if not available. """ mrulist_value = registry_key.GetValueByName('MRUList') # The key exists but does not contain a value named "MRUList". if not mrulist_value: return None mrulist_entries_map = self._GetDataTypeMap('mrulist_entries') context = dtfabric_data_maps.DataTypeMapContext(values={ 'data_size': len(mrulist_value.data)}) return self._ReadStructureFromByteStream( mrulist_value.data, 0, mrulist_entries_map, context=context)
[ "def", "_ParseMRUListValue", "(", "self", ",", "registry_key", ")", ":", "mrulist_value", "=", "registry_key", ".", "GetValueByName", "(", "'MRUList'", ")", "# The key exists but does not contain a value named \"MRUList\".", "if", "not", "mrulist_value", ":", "return", "None", "mrulist_entries_map", "=", "self", ".", "_GetDataTypeMap", "(", "'mrulist_entries'", ")", "context", "=", "dtfabric_data_maps", ".", "DataTypeMapContext", "(", "values", "=", "{", "'data_size'", ":", "len", "(", "mrulist_value", ".", "data", ")", "}", ")", "return", "self", ".", "_ReadStructureFromByteStream", "(", "mrulist_value", ".", "data", ",", "0", ",", "mrulist_entries_map", ",", "context", "=", "context", ")" ]
Parses the MRUList value in a given Registry key. Args: registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains the MRUList value. Returns: mrulist_entries: MRUList entries or None if not available.
[ "Parses", "the", "MRUList", "value", "in", "a", "given", "Registry", "key", "." ]
python
train
jim-easterbrook/pywws
src/pywws/weatherstation.py
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/weatherstation.py#L731-L741
def get_fixed_block(self, keys=[], unbuffered=False): """Get the decoded "fixed block" of settings and min/max data. A subset of the entire block can be selected by keys.""" if unbuffered or not self._fixed_block: self._fixed_block = self._read_fixed_block() format = self.fixed_format # navigate down list of keys to get to wanted data for key in keys: format = format[key] return _decode(self._fixed_block, format)
[ "def", "get_fixed_block", "(", "self", ",", "keys", "=", "[", "]", ",", "unbuffered", "=", "False", ")", ":", "if", "unbuffered", "or", "not", "self", ".", "_fixed_block", ":", "self", ".", "_fixed_block", "=", "self", ".", "_read_fixed_block", "(", ")", "format", "=", "self", ".", "fixed_format", "# navigate down list of keys to get to wanted data", "for", "key", "in", "keys", ":", "format", "=", "format", "[", "key", "]", "return", "_decode", "(", "self", ".", "_fixed_block", ",", "format", ")" ]
Get the decoded "fixed block" of settings and min/max data. A subset of the entire block can be selected by keys.
[ "Get", "the", "decoded", "fixed", "block", "of", "settings", "and", "min", "/", "max", "data", "." ]
python
train
aiogram/aiogram
aiogram/types/chat.py
https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/types/chat.py#L291-L306
async def pin_message(self, message_id: int, disable_notification: bool = False): """ Use this method to pin a message in a supergroup. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Source: https://core.telegram.org/bots/api#pinchatmessage :param message_id: Identifier of a message to pin :type message_id: :obj:`base.Integer` :param disable_notification: Pass True, if it is not necessary to send a notification to all group members about the new pinned message :type disable_notification: :obj:`typing.Union[base.Boolean, None]` :return: Returns True on success. :rtype: :obj:`base.Boolean` """ return await self.bot.pin_chat_message(self.id, message_id, disable_notification)
[ "async", "def", "pin_message", "(", "self", ",", "message_id", ":", "int", ",", "disable_notification", ":", "bool", "=", "False", ")", ":", "return", "await", "self", ".", "bot", ".", "pin_chat_message", "(", "self", ".", "id", ",", "message_id", ",", "disable_notification", ")" ]
Use this method to pin a message in a supergroup. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Source: https://core.telegram.org/bots/api#pinchatmessage :param message_id: Identifier of a message to pin :type message_id: :obj:`base.Integer` :param disable_notification: Pass True, if it is not necessary to send a notification to all group members about the new pinned message :type disable_notification: :obj:`typing.Union[base.Boolean, None]` :return: Returns True on success. :rtype: :obj:`base.Boolean`
[ "Use", "this", "method", "to", "pin", "a", "message", "in", "a", "supergroup", ".", "The", "bot", "must", "be", "an", "administrator", "in", "the", "chat", "for", "this", "to", "work", "and", "must", "have", "the", "appropriate", "admin", "rights", "." ]
python
train
raiden-network/raiden
raiden/api/rest.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/api/rest.py#L540-L548
def unhandled_exception(self, exception: Exception): """ Flask.errorhandler when an exception wasn't correctly handled """ log.critical( 'Unhandled exception when processing endpoint request', exc_info=True, node=pex(self.rest_api.raiden_api.address), ) self.greenlet.kill(exception) return api_error([str(exception)], HTTPStatus.INTERNAL_SERVER_ERROR)
[ "def", "unhandled_exception", "(", "self", ",", "exception", ":", "Exception", ")", ":", "log", ".", "critical", "(", "'Unhandled exception when processing endpoint request'", ",", "exc_info", "=", "True", ",", "node", "=", "pex", "(", "self", ".", "rest_api", ".", "raiden_api", ".", "address", ")", ",", ")", "self", ".", "greenlet", ".", "kill", "(", "exception", ")", "return", "api_error", "(", "[", "str", "(", "exception", ")", "]", ",", "HTTPStatus", ".", "INTERNAL_SERVER_ERROR", ")" ]
Flask.errorhandler when an exception wasn't correctly handled
[ "Flask", ".", "errorhandler", "when", "an", "exception", "wasn", "t", "correctly", "handled" ]
python
train
thespacedoctor/sherlock
sherlock/transient_classifier.py
https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/transient_classifier.py#L542-L621
def _remove_previous_ned_queries( self, coordinateList): """iterate through the transient locations to see if we have recent local NED coverage of that area already **Key Arguments:** - ``coordinateList`` -- set of coordinate to check for previous queries **Return:** - ``updatedCoordinateList`` -- coordinate list with previous queries removed .. todo :: - update key arguments values and definitions with defaults - update return values and definitions - update usage examples and text - update docstring text - check sublime snippet exists - clip any useful text to docs mindmap - regenerate the docs and check redendering of this docstring """ self.log.debug('starting the ``_remove_previous_ned_queries`` method') # 1 DEGREE QUERY RADIUS radius = 60. * 60. updatedCoordinateList = [] keepers = [] # CALCULATE THE OLDEST RESULTS LIMIT now = datetime.now() td = timedelta( days=self.settings["ned stream refresh rate in days"]) refreshLimit = now - td refreshLimit = refreshLimit.strftime("%Y-%m-%d %H:%M:%S") raList = [] raList[:] = [c[0] for c in coordinateList] decList = [] decList[:] = [c[1] for c in coordinateList] # MATCH COORDINATES AGAINST PREVIOUS NED SEARCHES cs = conesearch( log=self.log, dbConn=self.cataloguesDbConn, tableName="tcs_helper_ned_query_history", columns="*", ra=raList, dec=decList, radiusArcsec=radius, separations=True, distinct=True, sqlWhere="dateQueried > '%(refreshLimit)s'" % locals(), closest=False ) matchIndies, matches = cs.search() # DETERMINE WHICH COORDINATES REQUIRE A NED QUERY curatedMatchIndices = [] curatedMatches = [] for i, m in zip(matchIndies, matches.list): match = False row = m row["separationArcsec"] = row["cmSepArcsec"] raStream = row["raDeg"] decStream = row["decDeg"] radiusStream = row["arcsecRadius"] dateStream = row["dateQueried"] angularSeparation = row["separationArcsec"] if angularSeparation + self.settings["first pass ned search radius arcec"] < radiusStream: curatedMatchIndices.append(i) curatedMatches.append(m) # NON MATCHES for i, v in enumerate(coordinateList): if i not in curatedMatchIndices: updatedCoordinateList.append(v) self.log.debug('completed the ``_remove_previous_ned_queries`` method') return updatedCoordinateList
[ "def", "_remove_previous_ned_queries", "(", "self", ",", "coordinateList", ")", ":", "self", ".", "log", ".", "debug", "(", "'starting the ``_remove_previous_ned_queries`` method'", ")", "# 1 DEGREE QUERY RADIUS", "radius", "=", "60.", "*", "60.", "updatedCoordinateList", "=", "[", "]", "keepers", "=", "[", "]", "# CALCULATE THE OLDEST RESULTS LIMIT", "now", "=", "datetime", ".", "now", "(", ")", "td", "=", "timedelta", "(", "days", "=", "self", ".", "settings", "[", "\"ned stream refresh rate in days\"", "]", ")", "refreshLimit", "=", "now", "-", "td", "refreshLimit", "=", "refreshLimit", ".", "strftime", "(", "\"%Y-%m-%d %H:%M:%S\"", ")", "raList", "=", "[", "]", "raList", "[", ":", "]", "=", "[", "c", "[", "0", "]", "for", "c", "in", "coordinateList", "]", "decList", "=", "[", "]", "decList", "[", ":", "]", "=", "[", "c", "[", "1", "]", "for", "c", "in", "coordinateList", "]", "# MATCH COORDINATES AGAINST PREVIOUS NED SEARCHES", "cs", "=", "conesearch", "(", "log", "=", "self", ".", "log", ",", "dbConn", "=", "self", ".", "cataloguesDbConn", ",", "tableName", "=", "\"tcs_helper_ned_query_history\"", ",", "columns", "=", "\"*\"", ",", "ra", "=", "raList", ",", "dec", "=", "decList", ",", "radiusArcsec", "=", "radius", ",", "separations", "=", "True", ",", "distinct", "=", "True", ",", "sqlWhere", "=", "\"dateQueried > '%(refreshLimit)s'\"", "%", "locals", "(", ")", ",", "closest", "=", "False", ")", "matchIndies", ",", "matches", "=", "cs", ".", "search", "(", ")", "# DETERMINE WHICH COORDINATES REQUIRE A NED QUERY", "curatedMatchIndices", "=", "[", "]", "curatedMatches", "=", "[", "]", "for", "i", ",", "m", "in", "zip", "(", "matchIndies", ",", "matches", ".", "list", ")", ":", "match", "=", "False", "row", "=", "m", "row", "[", "\"separationArcsec\"", "]", "=", "row", "[", "\"cmSepArcsec\"", "]", "raStream", "=", "row", "[", "\"raDeg\"", "]", "decStream", "=", "row", "[", "\"decDeg\"", "]", "radiusStream", "=", "row", "[", "\"arcsecRadius\"", "]", "dateStream", "=", "row", "[", "\"dateQueried\"", "]", "angularSeparation", "=", "row", "[", "\"separationArcsec\"", "]", "if", "angularSeparation", "+", "self", ".", "settings", "[", "\"first pass ned search radius arcec\"", "]", "<", "radiusStream", ":", "curatedMatchIndices", ".", "append", "(", "i", ")", "curatedMatches", ".", "append", "(", "m", ")", "# NON MATCHES", "for", "i", ",", "v", "in", "enumerate", "(", "coordinateList", ")", ":", "if", "i", "not", "in", "curatedMatchIndices", ":", "updatedCoordinateList", ".", "append", "(", "v", ")", "self", ".", "log", ".", "debug", "(", "'completed the ``_remove_previous_ned_queries`` method'", ")", "return", "updatedCoordinateList" ]
iterate through the transient locations to see if we have recent local NED coverage of that area already **Key Arguments:** - ``coordinateList`` -- set of coordinate to check for previous queries **Return:** - ``updatedCoordinateList`` -- coordinate list with previous queries removed .. todo :: - update key arguments values and definitions with defaults - update return values and definitions - update usage examples and text - update docstring text - check sublime snippet exists - clip any useful text to docs mindmap - regenerate the docs and check redendering of this docstring
[ "iterate", "through", "the", "transient", "locations", "to", "see", "if", "we", "have", "recent", "local", "NED", "coverage", "of", "that", "area", "already" ]
python
train
juju/python-libjuju
juju/user.py
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/user.py#L82-L86
async def enable(self): """Re-enable this user. """ await self.controller.enable_user(self.username) self._user_info.disabled = False
[ "async", "def", "enable", "(", "self", ")", ":", "await", "self", ".", "controller", ".", "enable_user", "(", "self", ".", "username", ")", "self", ".", "_user_info", ".", "disabled", "=", "False" ]
Re-enable this user.
[ "Re", "-", "enable", "this", "user", "." ]
python
train
litters/shrew
shrew/cli.py
https://github.com/litters/shrew/blob/ed4b1879321d858d6bc884d14fea7557372a4d41/shrew/cli.py#L222-L230
def add_username_password(self, use_store=False): """ Add --username and --password options :param bool use_store: Name of the section (concept, command line options, API reference) """ self.argparser.add_argument('--username', default=None, help='Username') self.argparser.add_argument('--password', default=None, help='Password') self.argparser.add_argument('--clear-store', action='store_true', default=False, help='Clear password keystore') self.use_username_password_store = use_store
[ "def", "add_username_password", "(", "self", ",", "use_store", "=", "False", ")", ":", "self", ".", "argparser", ".", "add_argument", "(", "'--username'", ",", "default", "=", "None", ",", "help", "=", "'Username'", ")", "self", ".", "argparser", ".", "add_argument", "(", "'--password'", ",", "default", "=", "None", ",", "help", "=", "'Password'", ")", "self", ".", "argparser", ".", "add_argument", "(", "'--clear-store'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Clear password keystore'", ")", "self", ".", "use_username_password_store", "=", "use_store" ]
Add --username and --password options :param bool use_store: Name of the section (concept, command line options, API reference)
[ "Add", "--", "username", "and", "--", "password", "options", ":", "param", "bool", "use_store", ":", "Name", "of", "the", "section", "(", "concept", "command", "line", "options", "API", "reference", ")" ]
python
train
3DLIRIOUS/MeshLabXML
meshlabxml/create.py
https://github.com/3DLIRIOUS/MeshLabXML/blob/177cce21e92baca500f56a932d66bd9a33257af8/meshlabxml/create.py#L510-L583
def plane_hires_edges(script, size=1.0, x_segments=1, y_segments=1, center=False, color=None): """ Creates a plane with a specified number of vertices on it sides, but no vertices on the interior. Currently used to create a simpler bottom for cube_hires. """ size = util.make_list(size, 2) grid(script, size=[x_segments + y_segments - 1, 1], x_segments=(x_segments + y_segments - 1), y_segments=1) if ml_script1.ml_version == '1.3.4BETA': and_val = 'and' else: and_val = '&&' if script.ml_version == '1.3.4BETA': # muparser version: 1.3.2 # Deform left side transform.vert_function( script, x_func='if((y>0) and (x<%s),0,x)' % (y_segments), y_func='if((y>0) and (x<%s),(x+1)*%s,y)' % ( y_segments, size[1] / y_segments)) # Deform top transform.vert_function( script, x_func='if((y>0) and (x>=%s),(x-%s+1)*%s,x)' % ( y_segments, y_segments, size[0] / x_segments), y_func='if((y>0) and (x>=%s),%s,y)' % (y_segments, size[1])) # Deform right side transform.vert_function( script, x_func='if((y<.00001) and (x>%s),%s,x)' % ( x_segments, size[0]), y_func='if((y<.00001) and (x>%s),(x-%s)*%s,y)' % ( x_segments, x_segments, size[1] / y_segments)) # Deform bottom transform.vert_function( script, x_func='if((y<.00001) and (x<=%s) and (x>0),(x)*%s,x)' % ( x_segments, size[0] / x_segments), y_func='if((y<.00001) and (x<=%s) and (x>0),0,y)' % (x_segments)) else: # muparser version: 2.2.5 # Deform left side transform.vert_function( script, x_func='((y>0) && (x<{yseg}) ? 0 : x)'.format(yseg=y_segments), y_func='((y>0) && (x<%s) ? (x+1)*%s : y)' % ( y_segments, size[1] / y_segments)) # Deform top transform.vert_function( script, x_func='((y>0) && (x>=%s) ? (x-%s+1)*%s : x)' % ( y_segments, y_segments, size[0] / x_segments), y_func='((y>0) && (x>=%s) ? %s : y)' % (y_segments, size[1])) # Deform right side transform.vert_function( script, x_func='((y<.00001) && (x>%s) ? %s : x)' % ( x_segments, size[0]), y_func='((y<.00001) && (x>%s) ? (x-%s)*%s : y)' % ( x_segments, x_segments, size[1] / y_segments)) # Deform bottom transform.vert_function( script, x_func='((y<.00001) && (x<=%s) && (x>0) ? (x)*%s : x)' % ( x_segments, size[0] / x_segments), y_func='((y<.00001) && (x<=%s) && (x>0) ? 0 : y)' % (x_segments)) if center: transform.translate(script, [-size[0] / 2, -size[1] / 2]) if color is not None: vert_color.function(script, color=color) return None
[ "def", "plane_hires_edges", "(", "script", ",", "size", "=", "1.0", ",", "x_segments", "=", "1", ",", "y_segments", "=", "1", ",", "center", "=", "False", ",", "color", "=", "None", ")", ":", "size", "=", "util", ".", "make_list", "(", "size", ",", "2", ")", "grid", "(", "script", ",", "size", "=", "[", "x_segments", "+", "y_segments", "-", "1", ",", "1", "]", ",", "x_segments", "=", "(", "x_segments", "+", "y_segments", "-", "1", ")", ",", "y_segments", "=", "1", ")", "if", "ml_script1", ".", "ml_version", "==", "'1.3.4BETA'", ":", "and_val", "=", "'and'", "else", ":", "and_val", "=", "'&&'", "if", "script", ".", "ml_version", "==", "'1.3.4BETA'", ":", "# muparser version: 1.3.2", "# Deform left side", "transform", ".", "vert_function", "(", "script", ",", "x_func", "=", "'if((y>0) and (x<%s),0,x)'", "%", "(", "y_segments", ")", ",", "y_func", "=", "'if((y>0) and (x<%s),(x+1)*%s,y)'", "%", "(", "y_segments", ",", "size", "[", "1", "]", "/", "y_segments", ")", ")", "# Deform top", "transform", ".", "vert_function", "(", "script", ",", "x_func", "=", "'if((y>0) and (x>=%s),(x-%s+1)*%s,x)'", "%", "(", "y_segments", ",", "y_segments", ",", "size", "[", "0", "]", "/", "x_segments", ")", ",", "y_func", "=", "'if((y>0) and (x>=%s),%s,y)'", "%", "(", "y_segments", ",", "size", "[", "1", "]", ")", ")", "# Deform right side", "transform", ".", "vert_function", "(", "script", ",", "x_func", "=", "'if((y<.00001) and (x>%s),%s,x)'", "%", "(", "x_segments", ",", "size", "[", "0", "]", ")", ",", "y_func", "=", "'if((y<.00001) and (x>%s),(x-%s)*%s,y)'", "%", "(", "x_segments", ",", "x_segments", ",", "size", "[", "1", "]", "/", "y_segments", ")", ")", "# Deform bottom", "transform", ".", "vert_function", "(", "script", ",", "x_func", "=", "'if((y<.00001) and (x<=%s) and (x>0),(x)*%s,x)'", "%", "(", "x_segments", ",", "size", "[", "0", "]", "/", "x_segments", ")", ",", "y_func", "=", "'if((y<.00001) and (x<=%s) and (x>0),0,y)'", "%", "(", "x_segments", ")", ")", "else", ":", "# muparser version: 2.2.5", "# Deform left side", "transform", ".", "vert_function", "(", "script", ",", "x_func", "=", "'((y>0) && (x<{yseg}) ? 0 : x)'", ".", "format", "(", "yseg", "=", "y_segments", ")", ",", "y_func", "=", "'((y>0) && (x<%s) ? (x+1)*%s : y)'", "%", "(", "y_segments", ",", "size", "[", "1", "]", "/", "y_segments", ")", ")", "# Deform top", "transform", ".", "vert_function", "(", "script", ",", "x_func", "=", "'((y>0) && (x>=%s) ? (x-%s+1)*%s : x)'", "%", "(", "y_segments", ",", "y_segments", ",", "size", "[", "0", "]", "/", "x_segments", ")", ",", "y_func", "=", "'((y>0) && (x>=%s) ? %s : y)'", "%", "(", "y_segments", ",", "size", "[", "1", "]", ")", ")", "# Deform right side", "transform", ".", "vert_function", "(", "script", ",", "x_func", "=", "'((y<.00001) && (x>%s) ? %s : x)'", "%", "(", "x_segments", ",", "size", "[", "0", "]", ")", ",", "y_func", "=", "'((y<.00001) && (x>%s) ? (x-%s)*%s : y)'", "%", "(", "x_segments", ",", "x_segments", ",", "size", "[", "1", "]", "/", "y_segments", ")", ")", "# Deform bottom", "transform", ".", "vert_function", "(", "script", ",", "x_func", "=", "'((y<.00001) && (x<=%s) && (x>0) ? (x)*%s : x)'", "%", "(", "x_segments", ",", "size", "[", "0", "]", "/", "x_segments", ")", ",", "y_func", "=", "'((y<.00001) && (x<=%s) && (x>0) ? 0 : y)'", "%", "(", "x_segments", ")", ")", "if", "center", ":", "transform", ".", "translate", "(", "script", ",", "[", "-", "size", "[", "0", "]", "/", "2", ",", "-", "size", "[", "1", "]", "/", "2", "]", ")", "if", "color", "is", "not", "None", ":", "vert_color", ".", "function", "(", "script", ",", "color", "=", "color", ")", "return", "None" ]
Creates a plane with a specified number of vertices on it sides, but no vertices on the interior. Currently used to create a simpler bottom for cube_hires.
[ "Creates", "a", "plane", "with", "a", "specified", "number", "of", "vertices", "on", "it", "sides", "but", "no", "vertices", "on", "the", "interior", "." ]
python
test
joelfrederico/SciSalt
scisalt/matplotlib/less_labels.py
https://github.com/joelfrederico/SciSalt/blob/7bf57c49c7dde0a8b0aa337fbd2fbd527ce7a67f/scisalt/matplotlib/less_labels.py#L7-L15
def less_labels(ax, x_fraction=0.5, y_fraction=0.5): """ Scale the number of tick labels in x and y by *x_fraction* and *y_fraction* respectively. """ nbins = _np.size(ax.get_xticklabels()) ax.locator_params(nbins=_np.floor(nbins*x_fraction), axis='x') nbins = _np.size(ax.get_yticklabels()) ax.locator_params(nbins=_np.floor(nbins*y_fraction), axis='y')
[ "def", "less_labels", "(", "ax", ",", "x_fraction", "=", "0.5", ",", "y_fraction", "=", "0.5", ")", ":", "nbins", "=", "_np", ".", "size", "(", "ax", ".", "get_xticklabels", "(", ")", ")", "ax", ".", "locator_params", "(", "nbins", "=", "_np", ".", "floor", "(", "nbins", "*", "x_fraction", ")", ",", "axis", "=", "'x'", ")", "nbins", "=", "_np", ".", "size", "(", "ax", ".", "get_yticklabels", "(", ")", ")", "ax", ".", "locator_params", "(", "nbins", "=", "_np", ".", "floor", "(", "nbins", "*", "y_fraction", ")", ",", "axis", "=", "'y'", ")" ]
Scale the number of tick labels in x and y by *x_fraction* and *y_fraction* respectively.
[ "Scale", "the", "number", "of", "tick", "labels", "in", "x", "and", "y", "by", "*", "x_fraction", "*", "and", "*", "y_fraction", "*", "respectively", "." ]
python
valid