repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
estnltk/estnltk
estnltk/mw_verbs/verbchain_detector.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/verbchain_detector.py#L37-L85
def removeRedundantVerbChains( foundChains, removeOverlapping = True, removeSingleAraAndEi = False ): ''' Eemaldab yleliigsed verbiahelad: ahelad, mis katavad osaliselt v6i t2ielikult teisi ahelaid (removeOverlapping == True), yhes6nalised 'ei' ja 'ära' ahelad (kui removeSingleAraAndEi == True); Yldiselt on nii, et ylekattuvaid ei tohiks palju olla, kuna fraaside laiendamisel pyytakse alati kontrollida, et laiendus ei kattuks m6ne olemasoleva fraasiga; Peamiselt tekivad ylekattuvused siis, kui morf analyysi on sattunud valed finiitverbi analyysid (v6i analyysid on j22nud mitmesteks) ja seega tuvastatakse osalausest rohkem finiitverbe, kui oleks vaja. Heuristik: kahe ylekattuva puhul j2tame alles fraasi, mis algab eespool ning m2rgime sellel OTHER_VERBS v22rtuseks True, mis m2rgib, et kontekstis on mingi segadus teiste verbidega. ''' toDelete = [] for i in range(len(foundChains)): matchObj1 = foundChains[i] if removeOverlapping: for j in range(i+1, len(foundChains)): matchObj2 = foundChains[j] if matchObj1 != matchObj2 and matchObj1[CLAUSE_IDX] == matchObj2[CLAUSE_IDX]: phrase1 = set(matchObj1[PHRASE]) phrase2 = set(matchObj2[PHRASE]) intersect = phrase1.intersection(phrase2) if len(intersect) > 0: # Yldiselt on nii, et ylekattuvaid ei tohiks olla, kuna fraaside laiendamisel # pyytakse alati kontrollida, et laiendus ei kattuks m6ne olemasoleva fraasiga; # Peamiselt tekivad ylekattuvused siis, kui morf analyysil on finiitverbi # analyysidesse j22nud sisse mitmesused (v6i on sattunud valed analyysid) ja # seega tuvastatakse osalausest rohkem finiitverbe, kui oleks vaja. # Heuristik: j2tame alles fraasi, mis algab eespool ning lisame selle otsa # kysim2rgi (kuna pole kindel, et asjad on korras) minWid1 = min(matchObj1[PHRASE]) minWid2 = min(matchObj2[PHRASE]) if minWid1 < minWid2: matchObj1[OTHER_VERBS] = True toDelete.append(j) else: matchObj2[OTHER_VERBS] = True toDelete.append(i) if removeSingleAraAndEi: if ( len(matchObj1[PATTERN])==1 and re.match('^(ei|ära)$', matchObj1[PATTERN][0]) ): toDelete.append(i) if toDelete: if len(set(toDelete)) != len(toDelete): toDelete = list(set(toDelete)) # Eemaldame duplikaadid toDelete = [ foundChains[i] for i in toDelete ] for verbObj in toDelete: foundChains.remove(verbObj)
[ "def", "removeRedundantVerbChains", "(", "foundChains", ",", "removeOverlapping", "=", "True", ",", "removeSingleAraAndEi", "=", "False", ")", ":", "toDelete", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "foundChains", ")", ")", ":", "matchObj1", "=", "foundChains", "[", "i", "]", "if", "removeOverlapping", ":", "for", "j", "in", "range", "(", "i", "+", "1", ",", "len", "(", "foundChains", ")", ")", ":", "matchObj2", "=", "foundChains", "[", "j", "]", "if", "matchObj1", "!=", "matchObj2", "and", "matchObj1", "[", "CLAUSE_IDX", "]", "==", "matchObj2", "[", "CLAUSE_IDX", "]", ":", "phrase1", "=", "set", "(", "matchObj1", "[", "PHRASE", "]", ")", "phrase2", "=", "set", "(", "matchObj2", "[", "PHRASE", "]", ")", "intersect", "=", "phrase1", ".", "intersection", "(", "phrase2", ")", "if", "len", "(", "intersect", ")", ">", "0", ":", "# Yldiselt on nii, et ylekattuvaid ei tohiks olla, kuna fraaside laiendamisel\r", "# pyytakse alati kontrollida, et laiendus ei kattuks m6ne olemasoleva fraasiga;\r", "# Peamiselt tekivad ylekattuvused siis, kui morf analyysil on finiitverbi \r", "# analyysidesse j22nud sisse mitmesused (v6i on sattunud valed analyysid) ja \r", "# seega tuvastatakse osalausest rohkem finiitverbe, kui oleks vaja.\r", "# Heuristik: j2tame alles fraasi, mis algab eespool ning lisame selle otsa\r", "# kysim2rgi (kuna pole kindel, et asjad on korras)\r", "minWid1", "=", "min", "(", "matchObj1", "[", "PHRASE", "]", ")", "minWid2", "=", "min", "(", "matchObj2", "[", "PHRASE", "]", ")", "if", "minWid1", "<", "minWid2", ":", "matchObj1", "[", "OTHER_VERBS", "]", "=", "True", "toDelete", ".", "append", "(", "j", ")", "else", ":", "matchObj2", "[", "OTHER_VERBS", "]", "=", "True", "toDelete", ".", "append", "(", "i", ")", "if", "removeSingleAraAndEi", ":", "if", "(", "len", "(", "matchObj1", "[", "PATTERN", "]", ")", "==", "1", "and", "re", ".", "match", "(", "'^(ei|ära)$',", " ", "atchObj1[", "P", "ATTERN]", "[", "0", "]", ")", " ", ":", "\r", "toDelete", ".", "append", "(", "i", ")", "if", "toDelete", ":", "if", "len", "(", "set", "(", "toDelete", ")", ")", "!=", "len", "(", "toDelete", ")", ":", "toDelete", "=", "list", "(", "set", "(", "toDelete", ")", ")", "# Eemaldame duplikaadid\r", "toDelete", "=", "[", "foundChains", "[", "i", "]", "for", "i", "in", "toDelete", "]", "for", "verbObj", "in", "toDelete", ":", "foundChains", ".", "remove", "(", "verbObj", ")" ]
Eemaldab yleliigsed verbiahelad: ahelad, mis katavad osaliselt v6i t2ielikult teisi ahelaid (removeOverlapping == True), yhes6nalised 'ei' ja 'ära' ahelad (kui removeSingleAraAndEi == True); Yldiselt on nii, et ylekattuvaid ei tohiks palju olla, kuna fraaside laiendamisel pyytakse alati kontrollida, et laiendus ei kattuks m6ne olemasoleva fraasiga; Peamiselt tekivad ylekattuvused siis, kui morf analyysi on sattunud valed finiitverbi analyysid (v6i analyysid on j22nud mitmesteks) ja seega tuvastatakse osalausest rohkem finiitverbe, kui oleks vaja. Heuristik: kahe ylekattuva puhul j2tame alles fraasi, mis algab eespool ning m2rgime sellel OTHER_VERBS v22rtuseks True, mis m2rgib, et kontekstis on mingi segadus teiste verbidega.
[ "Eemaldab", "yleliigsed", "verbiahelad", ":", "ahelad", "mis", "katavad", "osaliselt", "v6i", "t2ielikult", "teisi", "ahelaid", "(", "removeOverlapping", "==", "True", ")", "yhes6nalised", "ei", "ja", "ära", "ahelad", "(", "kui", "removeSingleAraAndEi", "==", "True", ")", ";", "Yldiselt", "on", "nii", "et", "ylekattuvaid", "ei", "tohiks", "palju", "olla", "kuna", "fraaside", "laiendamisel", "pyytakse", "alati", "kontrollida", "et", "laiendus", "ei", "kattuks", "m6ne", "olemasoleva", "fraasiga", ";", "Peamiselt", "tekivad", "ylekattuvused", "siis", "kui", "morf", "analyysi", "on", "sattunud", "valed", "finiitverbi", "analyysid", "(", "v6i", "analyysid", "on", "j22nud", "mitmesteks", ")", "ja", "seega", "tuvastatakse", "osalausest", "rohkem", "finiitverbe", "kui", "oleks", "vaja", ".", "Heuristik", ":", "kahe", "ylekattuva", "puhul", "j2tame", "alles", "fraasi", "mis", "algab", "eespool", "ning", "m2rgime", "sellel", "OTHER_VERBS", "v22rtuseks", "True", "mis", "m2rgib", "et", "kontekstis", "on", "mingi", "segadus", "teiste", "verbidega", "." ]
python
train
60.632653
thunder-project/thunder
thunder/series/series.py
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L233-L237
def sum(self): """ Compute the sum across records. """ return self._constructor(self.values.sum(axis=self.baseaxes, keepdims=True))
[ "def", "sum", "(", "self", ")", ":", "return", "self", ".", "_constructor", "(", "self", ".", "values", ".", "sum", "(", "axis", "=", "self", ".", "baseaxes", ",", "keepdims", "=", "True", ")", ")" ]
Compute the sum across records.
[ "Compute", "the", "sum", "across", "records", "." ]
python
train
31.8
DataONEorg/d1_python
lib_client/src/d1_client/baseclient_1_1.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_client/src/d1_client/baseclient_1_1.py#L88-L109
def query( self, queryEngine, query_str, vendorSpecific=None, do_post=False, **kwargs ): """See Also: queryResponse() Args: queryEngine: query_str: vendorSpecific: do_post: **kwargs: Returns: """ response = self.queryResponse( queryEngine, query_str, vendorSpecific, do_post, **kwargs ) if self._content_type_is_json(response): return self._read_json_response(response) else: return self._read_stream_response(response)
[ "def", "query", "(", "self", ",", "queryEngine", ",", "query_str", ",", "vendorSpecific", "=", "None", ",", "do_post", "=", "False", ",", "*", "*", "kwargs", ")", ":", "response", "=", "self", ".", "queryResponse", "(", "queryEngine", ",", "query_str", ",", "vendorSpecific", ",", "do_post", ",", "*", "*", "kwargs", ")", "if", "self", ".", "_content_type_is_json", "(", "response", ")", ":", "return", "self", ".", "_read_json_response", "(", "response", ")", "else", ":", "return", "self", ".", "_read_stream_response", "(", "response", ")" ]
See Also: queryResponse() Args: queryEngine: query_str: vendorSpecific: do_post: **kwargs: Returns:
[ "See", "Also", ":", "queryResponse", "()" ]
python
train
25.590909
Capitains/MyCapytain
MyCapytain/common/utils/xml.py
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/utils/xml.py#L202-L304
def passageLoop(parent, new_tree, xpath1, xpath2=None, preceding_siblings=False, following_siblings=False): """ Loop over passages to construct and increment new tree given a parent and XPaths :param parent: Parent on which to perform xpath :param new_tree: Parent on which to add nodes :param xpath1: List of xpath elements :type xpath1: [str] :param xpath2: List of xpath elements :type xpath2: [str] :param preceding_siblings: Append preceding siblings of XPath 1/2 match to the tree :param following_siblings: Append following siblings of XPath 1/2 match to the tree :return: Newly incremented tree """ current_1, queue_1 = __formatXpath__(xpath1) if xpath2 is None: # In case we need what is following or preceding our node result_1, loop = performXpath(parent, current_1) if loop is True: queue_1 = xpath1 central = None has_no_queue = len(queue_1) == 0 # For each sibling, when we need them in the context of a range if preceding_siblings or following_siblings: for sibling in xmliter(parent): if sibling == result_1: central = True # We copy the node we looked for (Result_1) child = copyNode(result_1, children=has_no_queue, parent=new_tree) # if we don't have children # we loop over the passage child if not has_no_queue: passageLoop( result_1, child, queue_1, None, preceding_siblings=preceding_siblings, following_siblings=following_siblings ) # If we were waiting for preceding_siblings, we break it off # As we don't need to go further if preceding_siblings: break elif not central and preceding_siblings: copyNode(sibling, parent=new_tree, children=True) elif central and following_siblings: copyNode(sibling, parent=new_tree, children=True) else: result_1, loop = performXpath(parent, current_1) if loop is True: queue_1 = xpath1 if xpath2 == xpath1: current_2, queue_2 = current_1, queue_1 else: current_2, queue_2 = __formatXpath__(xpath2) else: current_2, queue_2 = __formatXpath__(xpath2) if xpath1 != xpath2: result_2, loop = performXpath(parent, current_2) if loop is True: queue_2 = xpath2 else: result_2 = result_1 if result_1 == result_2: has_no_queue = len(queue_1) == 0 child = copyNode(result_1, children=has_no_queue, parent=new_tree) if not has_no_queue: passageLoop( result_1, child, queue_1, queue_2 ) else: start = False # For each sibling for sibling in xmliter(parent): # If we have found start # We copy the node because we are between start and end if start: # If we are at the end # We break the copy if sibling == result_2: break else: copyNode(sibling, parent=new_tree, children=True) # If this is start # Then we copy it and initiate star elif sibling == result_1: start = True has_no_queue_1 = len(queue_1) == 0 node = copyNode(sibling, children=has_no_queue_1, parent=new_tree) if not has_no_queue_1: passageLoop(sibling, node, queue_1, None, following_siblings=True) continue_loop = len(queue_2) == 0 node = copyNode(result_2, children=continue_loop, parent=new_tree) if not continue_loop: passageLoop(result_2, node, queue_2, None, preceding_siblings=True) return new_tree
[ "def", "passageLoop", "(", "parent", ",", "new_tree", ",", "xpath1", ",", "xpath2", "=", "None", ",", "preceding_siblings", "=", "False", ",", "following_siblings", "=", "False", ")", ":", "current_1", ",", "queue_1", "=", "__formatXpath__", "(", "xpath1", ")", "if", "xpath2", "is", "None", ":", "# In case we need what is following or preceding our node", "result_1", ",", "loop", "=", "performXpath", "(", "parent", ",", "current_1", ")", "if", "loop", "is", "True", ":", "queue_1", "=", "xpath1", "central", "=", "None", "has_no_queue", "=", "len", "(", "queue_1", ")", "==", "0", "# For each sibling, when we need them in the context of a range", "if", "preceding_siblings", "or", "following_siblings", ":", "for", "sibling", "in", "xmliter", "(", "parent", ")", ":", "if", "sibling", "==", "result_1", ":", "central", "=", "True", "# We copy the node we looked for (Result_1)", "child", "=", "copyNode", "(", "result_1", ",", "children", "=", "has_no_queue", ",", "parent", "=", "new_tree", ")", "# if we don't have children", "# we loop over the passage child", "if", "not", "has_no_queue", ":", "passageLoop", "(", "result_1", ",", "child", ",", "queue_1", ",", "None", ",", "preceding_siblings", "=", "preceding_siblings", ",", "following_siblings", "=", "following_siblings", ")", "# If we were waiting for preceding_siblings, we break it off", "# As we don't need to go further", "if", "preceding_siblings", ":", "break", "elif", "not", "central", "and", "preceding_siblings", ":", "copyNode", "(", "sibling", ",", "parent", "=", "new_tree", ",", "children", "=", "True", ")", "elif", "central", "and", "following_siblings", ":", "copyNode", "(", "sibling", ",", "parent", "=", "new_tree", ",", "children", "=", "True", ")", "else", ":", "result_1", ",", "loop", "=", "performXpath", "(", "parent", ",", "current_1", ")", "if", "loop", "is", "True", ":", "queue_1", "=", "xpath1", "if", "xpath2", "==", "xpath1", ":", "current_2", ",", "queue_2", "=", "current_1", ",", "queue_1", "else", ":", "current_2", ",", "queue_2", "=", "__formatXpath__", "(", "xpath2", ")", "else", ":", "current_2", ",", "queue_2", "=", "__formatXpath__", "(", "xpath2", ")", "if", "xpath1", "!=", "xpath2", ":", "result_2", ",", "loop", "=", "performXpath", "(", "parent", ",", "current_2", ")", "if", "loop", "is", "True", ":", "queue_2", "=", "xpath2", "else", ":", "result_2", "=", "result_1", "if", "result_1", "==", "result_2", ":", "has_no_queue", "=", "len", "(", "queue_1", ")", "==", "0", "child", "=", "copyNode", "(", "result_1", ",", "children", "=", "has_no_queue", ",", "parent", "=", "new_tree", ")", "if", "not", "has_no_queue", ":", "passageLoop", "(", "result_1", ",", "child", ",", "queue_1", ",", "queue_2", ")", "else", ":", "start", "=", "False", "# For each sibling", "for", "sibling", "in", "xmliter", "(", "parent", ")", ":", "# If we have found start", "# We copy the node because we are between start and end", "if", "start", ":", "# If we are at the end", "# We break the copy", "if", "sibling", "==", "result_2", ":", "break", "else", ":", "copyNode", "(", "sibling", ",", "parent", "=", "new_tree", ",", "children", "=", "True", ")", "# If this is start", "# Then we copy it and initiate star", "elif", "sibling", "==", "result_1", ":", "start", "=", "True", "has_no_queue_1", "=", "len", "(", "queue_1", ")", "==", "0", "node", "=", "copyNode", "(", "sibling", ",", "children", "=", "has_no_queue_1", ",", "parent", "=", "new_tree", ")", "if", "not", "has_no_queue_1", ":", "passageLoop", "(", "sibling", ",", "node", ",", "queue_1", ",", "None", ",", "following_siblings", "=", "True", ")", "continue_loop", "=", "len", "(", "queue_2", ")", "==", "0", "node", "=", "copyNode", "(", "result_2", ",", "children", "=", "continue_loop", ",", "parent", "=", "new_tree", ")", "if", "not", "continue_loop", ":", "passageLoop", "(", "result_2", ",", "node", ",", "queue_2", ",", "None", ",", "preceding_siblings", "=", "True", ")", "return", "new_tree" ]
Loop over passages to construct and increment new tree given a parent and XPaths :param parent: Parent on which to perform xpath :param new_tree: Parent on which to add nodes :param xpath1: List of xpath elements :type xpath1: [str] :param xpath2: List of xpath elements :type xpath2: [str] :param preceding_siblings: Append preceding siblings of XPath 1/2 match to the tree :param following_siblings: Append following siblings of XPath 1/2 match to the tree :return: Newly incremented tree
[ "Loop", "over", "passages", "to", "construct", "and", "increment", "new", "tree", "given", "a", "parent", "and", "XPaths" ]
python
train
41.893204
DataBiosphere/dsub
dsub/providers/google_v2.py
https://github.com/DataBiosphere/dsub/blob/443ce31daa6023dc2fd65ef2051796e19d18d5a7/dsub/providers/google_v2.py#L405-L416
def _map(self, event): """Extract elements from an operation event and map to a named event.""" description = event.get('description', '') start_time = google_base.parse_rfc3339_utc_string( event.get('timestamp', '')) for name, regex in _EVENT_REGEX_MAP.items(): match = regex.match(description) if match: return {'name': name, 'start-time': start_time}, match return {'name': description, 'start-time': start_time}, None
[ "def", "_map", "(", "self", ",", "event", ")", ":", "description", "=", "event", ".", "get", "(", "'description'", ",", "''", ")", "start_time", "=", "google_base", ".", "parse_rfc3339_utc_string", "(", "event", ".", "get", "(", "'timestamp'", ",", "''", ")", ")", "for", "name", ",", "regex", "in", "_EVENT_REGEX_MAP", ".", "items", "(", ")", ":", "match", "=", "regex", ".", "match", "(", "description", ")", "if", "match", ":", "return", "{", "'name'", ":", "name", ",", "'start-time'", ":", "start_time", "}", ",", "match", "return", "{", "'name'", ":", "description", ",", "'start-time'", ":", "start_time", "}", ",", "None" ]
Extract elements from an operation event and map to a named event.
[ "Extract", "elements", "from", "an", "operation", "event", "and", "map", "to", "a", "named", "event", "." ]
python
valid
38.333333
saltstack/salt
salt/modules/inspectlib/fsdb.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspectlib/fsdb.py#L142-L154
def open(self, dbname=None): ''' Open database from the path with the name or latest. If there are no yet databases, create a new implicitly. :return: ''' databases = self.list() if self.is_closed(): self.db_path = os.path.join(self.path, dbname or (databases and databases[0] or self.new())) if not self._opened: self.list_tables() self._opened = True
[ "def", "open", "(", "self", ",", "dbname", "=", "None", ")", ":", "databases", "=", "self", ".", "list", "(", ")", "if", "self", ".", "is_closed", "(", ")", ":", "self", ".", "db_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "dbname", "or", "(", "databases", "and", "databases", "[", "0", "]", "or", "self", ".", "new", "(", ")", ")", ")", "if", "not", "self", ".", "_opened", ":", "self", ".", "list_tables", "(", ")", "self", ".", "_opened", "=", "True" ]
Open database from the path with the name or latest. If there are no yet databases, create a new implicitly. :return:
[ "Open", "database", "from", "the", "path", "with", "the", "name", "or", "latest", ".", "If", "there", "are", "no", "yet", "databases", "create", "a", "new", "implicitly", "." ]
python
train
34.846154
mota/i3-cycle
i3_cycle.py
https://github.com/mota/i3-cycle/blob/58947cccb1060c0543a6d9c1f974ee80069110e1/i3_cycle.py#L76-L98
def main(): """ Entry point """ parser = ArgumentParser() parser.add_argument("direction", choices=( "up", "down", "left", "right", "next", "prev" ), help="Direction to put the focus on") args = parser.parse_args() tree = i3Tree() con = None if args.direction in ("next", "prev"): con = cycle_outputs(tree, args.direction) else: con = cycle_windows(tree, args.direction) if con: i3.focus(con_id=con.id)
[ "def", "main", "(", ")", ":", "parser", "=", "ArgumentParser", "(", ")", "parser", ".", "add_argument", "(", "\"direction\"", ",", "choices", "=", "(", "\"up\"", ",", "\"down\"", ",", "\"left\"", ",", "\"right\"", ",", "\"next\"", ",", "\"prev\"", ")", ",", "help", "=", "\"Direction to put the focus on\"", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "tree", "=", "i3Tree", "(", ")", "con", "=", "None", "if", "args", ".", "direction", "in", "(", "\"next\"", ",", "\"prev\"", ")", ":", "con", "=", "cycle_outputs", "(", "tree", ",", "args", ".", "direction", ")", "else", ":", "con", "=", "cycle_windows", "(", "tree", ",", "args", ".", "direction", ")", "if", "con", ":", "i3", ".", "focus", "(", "con_id", "=", "con", ".", "id", ")" ]
Entry point
[ "Entry", "point" ]
python
train
25.173913
ClearcodeHQ/mirakuru
src/mirakuru/base.py
https://github.com/ClearcodeHQ/mirakuru/blob/38203f328479ac9356d468a20daa743807194698/src/mirakuru/base.py#L416-L432
def start(self): """ Start executor with additional checks. Checks if previous executor isn't running then start process (executor) and wait until it's started. :returns: itself :rtype: Executor """ if self.pre_start_check(): # Some other executor (or process) is running with same config: raise AlreadyRunning(self) super(Executor, self).start() self.wait_for(self.check_subprocess) return self
[ "def", "start", "(", "self", ")", ":", "if", "self", ".", "pre_start_check", "(", ")", ":", "# Some other executor (or process) is running with same config:", "raise", "AlreadyRunning", "(", "self", ")", "super", "(", "Executor", ",", "self", ")", ".", "start", "(", ")", "self", ".", "wait_for", "(", "self", ".", "check_subprocess", ")", "return", "self" ]
Start executor with additional checks. Checks if previous executor isn't running then start process (executor) and wait until it's started. :returns: itself :rtype: Executor
[ "Start", "executor", "with", "additional", "checks", "." ]
python
train
29.058824
ofa/django-bouncy
django_bouncy/views.py
https://github.com/ofa/django-bouncy/blob/a386dfa8c4ce59bd18978a3537c03cd6ad07bf06/django_bouncy/views.py#L235-L272
def process_delivery(message, notification): """Function to process a delivery notification""" mail = message['mail'] delivery = message['delivery'] if 'timestamp' in delivery: delivered_datetime = clean_time(delivery['timestamp']) else: delivered_datetime = None deliveries = [] for eachrecipient in delivery['recipients']: # Create each delivery deliveries += [Delivery.objects.create( sns_topic=notification['TopicArn'], sns_messageid=notification['MessageId'], mail_timestamp=clean_time(mail['timestamp']), mail_id=mail['messageId'], mail_from=mail['source'], address=eachrecipient, # delivery delivered_time=delivered_datetime, processing_time=int(delivery['processingTimeMillis']), smtp_response=delivery['smtpResponse'] )] # Send signals for each delivery. for eachdelivery in deliveries: signals.feedback.send( sender=Delivery, instance=eachdelivery, message=message, notification=notification ) logger.info('Logged %s Deliveries(s)', str(len(deliveries))) return HttpResponse('Delivery Processed')
[ "def", "process_delivery", "(", "message", ",", "notification", ")", ":", "mail", "=", "message", "[", "'mail'", "]", "delivery", "=", "message", "[", "'delivery'", "]", "if", "'timestamp'", "in", "delivery", ":", "delivered_datetime", "=", "clean_time", "(", "delivery", "[", "'timestamp'", "]", ")", "else", ":", "delivered_datetime", "=", "None", "deliveries", "=", "[", "]", "for", "eachrecipient", "in", "delivery", "[", "'recipients'", "]", ":", "# Create each delivery ", "deliveries", "+=", "[", "Delivery", ".", "objects", ".", "create", "(", "sns_topic", "=", "notification", "[", "'TopicArn'", "]", ",", "sns_messageid", "=", "notification", "[", "'MessageId'", "]", ",", "mail_timestamp", "=", "clean_time", "(", "mail", "[", "'timestamp'", "]", ")", ",", "mail_id", "=", "mail", "[", "'messageId'", "]", ",", "mail_from", "=", "mail", "[", "'source'", "]", ",", "address", "=", "eachrecipient", ",", "# delivery", "delivered_time", "=", "delivered_datetime", ",", "processing_time", "=", "int", "(", "delivery", "[", "'processingTimeMillis'", "]", ")", ",", "smtp_response", "=", "delivery", "[", "'smtpResponse'", "]", ")", "]", "# Send signals for each delivery.", "for", "eachdelivery", "in", "deliveries", ":", "signals", ".", "feedback", ".", "send", "(", "sender", "=", "Delivery", ",", "instance", "=", "eachdelivery", ",", "message", "=", "message", ",", "notification", "=", "notification", ")", "logger", ".", "info", "(", "'Logged %s Deliveries(s)'", ",", "str", "(", "len", "(", "deliveries", ")", ")", ")", "return", "HttpResponse", "(", "'Delivery Processed'", ")" ]
Function to process a delivery notification
[ "Function", "to", "process", "a", "delivery", "notification" ]
python
train
32.710526
dw/mitogen
ansible_mitogen/runner.py
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/ansible_mitogen/runner.py#L195-L216
def check(self): """ Compare the :func:`os.stat` for the pam_env style environmnt file `path` with the previous result `old_st`, which may be :data:`None` if the previous stat attempt failed. Reload its contents if the file has changed or appeared since last attempt. :returns: New :func:`os.stat` result. The new call to :func:`reload_env` should pass it as the value of `old_st`. """ st = self._stat() if self._st == st: return self._st = st self._remove_existing() if st is None: LOG.debug('%r: file has disappeared', self) else: self._on_file_changed()
[ "def", "check", "(", "self", ")", ":", "st", "=", "self", ".", "_stat", "(", ")", "if", "self", ".", "_st", "==", "st", ":", "return", "self", ".", "_st", "=", "st", "self", ".", "_remove_existing", "(", ")", "if", "st", "is", "None", ":", "LOG", ".", "debug", "(", "'%r: file has disappeared'", ",", "self", ")", "else", ":", "self", ".", "_on_file_changed", "(", ")" ]
Compare the :func:`os.stat` for the pam_env style environmnt file `path` with the previous result `old_st`, which may be :data:`None` if the previous stat attempt failed. Reload its contents if the file has changed or appeared since last attempt. :returns: New :func:`os.stat` result. The new call to :func:`reload_env` should pass it as the value of `old_st`.
[ "Compare", "the", ":", "func", ":", "os", ".", "stat", "for", "the", "pam_env", "style", "environmnt", "file", "path", "with", "the", "previous", "result", "old_st", "which", "may", "be", ":", "data", ":", "None", "if", "the", "previous", "stat", "attempt", "failed", ".", "Reload", "its", "contents", "if", "the", "file", "has", "changed", "or", "appeared", "since", "last", "attempt", "." ]
python
train
31.909091
jsvine/tinyapi
tinyapi/draft.py
https://github.com/jsvine/tinyapi/blob/ac2cf0400b2a9b22bd0b1f43b36be99f5d1a787c/tinyapi/draft.py#L83-L87
def delete(self): """Delete the draft.""" response = self.session.request("delete:Message", [ self.message_id ]) self.data = response return self
[ "def", "delete", "(", "self", ")", ":", "response", "=", "self", ".", "session", ".", "request", "(", "\"delete:Message\"", ",", "[", "self", ".", "message_id", "]", ")", "self", ".", "data", "=", "response", "return", "self" ]
Delete the draft.
[ "Delete", "the", "draft", "." ]
python
train
34.6
deep-compute/logagg
logagg/formatters.py
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/formatters.py#L162-L196
def mongodb(line): ''' >>> import pprint >>> input_line1 = '2017-08-17T07:56:33.489+0200 I REPL [signalProcessingThread] shutting down replication subsystems' >>> output_line1 = mongodb(input_line1) >>> pprint.pprint(output_line1) {'data': {'component': 'REPL', 'context': '[signalProcessingThread]', 'message': 'shutting down replication subsystems', 'severity': 'I', 'timestamp': '2017-08-17T07:56:33.489+0200'}, 'timestamp': '2017-08-17T07:56:33.489+0200', 'type': 'log'} >>> input_line2 = '2017-08-17T07:56:33.515+0200 W NETWORK [initandlisten] No primary detected for set confsvr_repl1' >>> output_line2 = mongodb(input_line2) >>> pprint.pprint(output_line2) {'data': {'component': 'NETWORK', 'context': '[initandlisten]', 'message': 'No primary detected for set confsvr_repl1', 'severity': 'W', 'timestamp': '2017-08-17T07:56:33.515+0200'}, 'timestamp': '2017-08-17T07:56:33.515+0200', 'type': 'log'} ''' keys = ['timestamp', 'severity', 'component', 'context', 'message'] values = re.split(r'\s+', line, maxsplit=4) mongodb_log = dict(zip(keys,values)) return dict( timestamp=values[0], data=mongodb_log, type='log', )
[ "def", "mongodb", "(", "line", ")", ":", "keys", "=", "[", "'timestamp'", ",", "'severity'", ",", "'component'", ",", "'context'", ",", "'message'", "]", "values", "=", "re", ".", "split", "(", "r'\\s+'", ",", "line", ",", "maxsplit", "=", "4", ")", "mongodb_log", "=", "dict", "(", "zip", "(", "keys", ",", "values", ")", ")", "return", "dict", "(", "timestamp", "=", "values", "[", "0", "]", ",", "data", "=", "mongodb_log", ",", "type", "=", "'log'", ",", ")" ]
>>> import pprint >>> input_line1 = '2017-08-17T07:56:33.489+0200 I REPL [signalProcessingThread] shutting down replication subsystems' >>> output_line1 = mongodb(input_line1) >>> pprint.pprint(output_line1) {'data': {'component': 'REPL', 'context': '[signalProcessingThread]', 'message': 'shutting down replication subsystems', 'severity': 'I', 'timestamp': '2017-08-17T07:56:33.489+0200'}, 'timestamp': '2017-08-17T07:56:33.489+0200', 'type': 'log'} >>> input_line2 = '2017-08-17T07:56:33.515+0200 W NETWORK [initandlisten] No primary detected for set confsvr_repl1' >>> output_line2 = mongodb(input_line2) >>> pprint.pprint(output_line2) {'data': {'component': 'NETWORK', 'context': '[initandlisten]', 'message': 'No primary detected for set confsvr_repl1', 'severity': 'W', 'timestamp': '2017-08-17T07:56:33.515+0200'}, 'timestamp': '2017-08-17T07:56:33.515+0200', 'type': 'log'}
[ ">>>", "import", "pprint", ">>>", "input_line1", "=", "2017", "-", "08", "-", "17T07", ":", "56", ":", "33", ".", "489", "+", "0200", "I", "REPL", "[", "signalProcessingThread", "]", "shutting", "down", "replication", "subsystems", ">>>", "output_line1", "=", "mongodb", "(", "input_line1", ")", ">>>", "pprint", ".", "pprint", "(", "output_line1", ")", "{", "data", ":", "{", "component", ":", "REPL", "context", ":", "[", "signalProcessingThread", "]", "message", ":", "shutting", "down", "replication", "subsystems", "severity", ":", "I", "timestamp", ":", "2017", "-", "08", "-", "17T07", ":", "56", ":", "33", ".", "489", "+", "0200", "}", "timestamp", ":", "2017", "-", "08", "-", "17T07", ":", "56", ":", "33", ".", "489", "+", "0200", "type", ":", "log", "}" ]
python
train
37.685714
bitesofcode/projex
projex/dataset.py
https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/dataset.py#L90-L112
def fromXml(cls, xparent): """ Loads the settings for this dataset to the inputted parent xml. :param xparent | <xml.etree.ElementTree.Element> """ output = cls() for xentry in xparent: key = xentry.get('key') if not key: continue typ = xentry.get('type', 'str') if typ in DataSet._xmlTypes: value = DataSet._xmlTypes[typ][1](xentry) else: value = xentry.get('value', '') output.define(key, value) return output
[ "def", "fromXml", "(", "cls", ",", "xparent", ")", ":", "output", "=", "cls", "(", ")", "for", "xentry", "in", "xparent", ":", "key", "=", "xentry", ".", "get", "(", "'key'", ")", "if", "not", "key", ":", "continue", "typ", "=", "xentry", ".", "get", "(", "'type'", ",", "'str'", ")", "if", "typ", "in", "DataSet", ".", "_xmlTypes", ":", "value", "=", "DataSet", ".", "_xmlTypes", "[", "typ", "]", "[", "1", "]", "(", "xentry", ")", "else", ":", "value", "=", "xentry", ".", "get", "(", "'value'", ",", "''", ")", "output", ".", "define", "(", "key", ",", "value", ")", "return", "output" ]
Loads the settings for this dataset to the inputted parent xml. :param xparent | <xml.etree.ElementTree.Element>
[ "Loads", "the", "settings", "for", "this", "dataset", "to", "the", "inputted", "parent", "xml", ".", ":", "param", "xparent", "|", "<xml", ".", "etree", ".", "ElementTree", ".", "Element", ">" ]
python
train
25.391304
saltstack/salt
salt/cli/support/collector.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/support/collector.py#L409-L419
def _get_action_type(self, action): ''' Get action type. :param action: :return: ''' action_name = next(iter(action or {'': None})) if ':' not in action_name: action_name = '{}:{}'.format(self.CALL_TYPE, action_name) return action_name.split(':')[0] or None
[ "def", "_get_action_type", "(", "self", ",", "action", ")", ":", "action_name", "=", "next", "(", "iter", "(", "action", "or", "{", "''", ":", "None", "}", ")", ")", "if", "':'", "not", "in", "action_name", ":", "action_name", "=", "'{}:{}'", ".", "format", "(", "self", ".", "CALL_TYPE", ",", "action_name", ")", "return", "action_name", ".", "split", "(", "':'", ")", "[", "0", "]", "or", "None" ]
Get action type. :param action: :return:
[ "Get", "action", "type", ".", ":", "param", "action", ":", ":", "return", ":" ]
python
train
29.454545
kevinconway/daemons
daemons/startstop/simple.py
https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/startstop/simple.py#L24-L49
def start(self): """Start the process with daemonization. If the process is already started this call should exit with code ALREADY_RUNNING. Otherwise it must call the 'daemonize' method and then call 'run'. """ if self.pid is not None: LOG.error( "The process is already running with pid {0}.".format(self.pid) ) sys.exit(exit.ALREADY_RUNNING) self.daemonize() LOG.info("Beginning run loop for process.") try: self.run() except Exception: LOG.exception("Uncaught exception in the daemon run() method.") self.stop() sys.exit(exit.RUN_FAILURE)
[ "def", "start", "(", "self", ")", ":", "if", "self", ".", "pid", "is", "not", "None", ":", "LOG", ".", "error", "(", "\"The process is already running with pid {0}.\"", ".", "format", "(", "self", ".", "pid", ")", ")", "sys", ".", "exit", "(", "exit", ".", "ALREADY_RUNNING", ")", "self", ".", "daemonize", "(", ")", "LOG", ".", "info", "(", "\"Beginning run loop for process.\"", ")", "try", ":", "self", ".", "run", "(", ")", "except", "Exception", ":", "LOG", ".", "exception", "(", "\"Uncaught exception in the daemon run() method.\"", ")", "self", ".", "stop", "(", ")", "sys", ".", "exit", "(", "exit", ".", "RUN_FAILURE", ")" ]
Start the process with daemonization. If the process is already started this call should exit with code ALREADY_RUNNING. Otherwise it must call the 'daemonize' method and then call 'run'.
[ "Start", "the", "process", "with", "daemonization", "." ]
python
train
27.076923
apple/turicreate
src/unity/python/turicreate/toolkits/object_detector/util/_visualization.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/object_detector/util/_visualization.py#L14-L19
def _string_hash(s): """String hash (djb2) with consistency between py2/py3 and persistency between runs (unlike `hash`).""" h = 5381 for c in s: h = h * 33 + ord(c) return h
[ "def", "_string_hash", "(", "s", ")", ":", "h", "=", "5381", "for", "c", "in", "s", ":", "h", "=", "h", "*", "33", "+", "ord", "(", "c", ")", "return", "h" ]
String hash (djb2) with consistency between py2/py3 and persistency between runs (unlike `hash`).
[ "String", "hash", "(", "djb2", ")", "with", "consistency", "between", "py2", "/", "py3", "and", "persistency", "between", "runs", "(", "unlike", "hash", ")", "." ]
python
train
32.166667
Riparo/nougat
nougat/utils.py
https://github.com/Riparo/nougat/blob/8453bc37e0b782f296952f0a418532ebbbcd74f3/nougat/utils.py#L16-L38
def is_middleware(func) -> bool: """ test whether it is a middleware :return: Boolean """ if inspect.isfunction(func): _check = func _name = func.__name__ else: _check = func.__call__ _name = func.__class__.__name__ if not inspect.iscoroutinefunction(_check): raise UnknownMiddlewareException("Middleware {} should be async function".format(_name)) args = list(inspect.signature(_check).parameters.keys()) if set(args) - MIDDLEWARE_PARAMETER_BOUNDARY: raise UnknownMiddlewareException("Parameters of middleware {} " "must be in list ['app', 'request', 'response', 'next']".format(_name)) return True
[ "def", "is_middleware", "(", "func", ")", "->", "bool", ":", "if", "inspect", ".", "isfunction", "(", "func", ")", ":", "_check", "=", "func", "_name", "=", "func", ".", "__name__", "else", ":", "_check", "=", "func", ".", "__call__", "_name", "=", "func", ".", "__class__", ".", "__name__", "if", "not", "inspect", ".", "iscoroutinefunction", "(", "_check", ")", ":", "raise", "UnknownMiddlewareException", "(", "\"Middleware {} should be async function\"", ".", "format", "(", "_name", ")", ")", "args", "=", "list", "(", "inspect", ".", "signature", "(", "_check", ")", ".", "parameters", ".", "keys", "(", ")", ")", "if", "set", "(", "args", ")", "-", "MIDDLEWARE_PARAMETER_BOUNDARY", ":", "raise", "UnknownMiddlewareException", "(", "\"Parameters of middleware {} \"", "\"must be in list ['app', 'request', 'response', 'next']\"", ".", "format", "(", "_name", ")", ")", "return", "True" ]
test whether it is a middleware :return: Boolean
[ "test", "whether", "it", "is", "a", "middleware", ":", "return", ":", "Boolean" ]
python
train
30.913043
ThreatConnect-Inc/tcex
tcex/tcex_bin_run.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_bin_run.py#L1516-L1561
def stage_tc_batch(self, owner, staging_data): """Stage data in ThreatConnect Platform using batch API. Args: owner (str): The ThreatConnect owner to submit batch job. staging_data (dict): A dict of ThreatConnect batch data. """ batch = self.tcex.batch(owner) for group in staging_data.get('group') or []: # add to redis variable = group.pop('variable', None) path = group.pop('path', None) data = self.path_data(group, path) # update group data if group.get('xid') is None: # add xid if one doesn't exist group['xid'] = self.stage_tc_batch_xid(group.get('type'), group.get('name'), owner) # add owner name group['ownerName'] = owner # add to batch batch.add_group(group) # create tcentity if variable is not None and data is not None: self.stage_redis(variable, self.stage_tc_group_entity(data)) for indicator in staging_data.get('indicator') or []: # add to redis variable = indicator.pop('variable', None) path = indicator.pop('path', None) if indicator.get('xid') is None: indicator['xid'] = self.stage_tc_batch_xid( indicator.get('type'), indicator.get('summary'), owner ) indicator['ownerName'] = owner # add to batch after extra data has been popped batch.add_indicator(indicator) data = self.path_data(dict(indicator), path) if variable is not None and data is not None: # if isinstance(data, (dict)): # tcentity uses value as the name # data['value'] = data.pop('summary') self.stage_redis(variable, self.stage_tc_indicator_entity(data)) # submit batch batch_results = batch.submit() self.log.debug('[stage] Batch Results: {}'.format(batch_results)) for error in batch_results.get('errors') or []: self.log.error('[stage] {}'.format(error))
[ "def", "stage_tc_batch", "(", "self", ",", "owner", ",", "staging_data", ")", ":", "batch", "=", "self", ".", "tcex", ".", "batch", "(", "owner", ")", "for", "group", "in", "staging_data", ".", "get", "(", "'group'", ")", "or", "[", "]", ":", "# add to redis", "variable", "=", "group", ".", "pop", "(", "'variable'", ",", "None", ")", "path", "=", "group", ".", "pop", "(", "'path'", ",", "None", ")", "data", "=", "self", ".", "path_data", "(", "group", ",", "path", ")", "# update group data", "if", "group", ".", "get", "(", "'xid'", ")", "is", "None", ":", "# add xid if one doesn't exist", "group", "[", "'xid'", "]", "=", "self", ".", "stage_tc_batch_xid", "(", "group", ".", "get", "(", "'type'", ")", ",", "group", ".", "get", "(", "'name'", ")", ",", "owner", ")", "# add owner name", "group", "[", "'ownerName'", "]", "=", "owner", "# add to batch", "batch", ".", "add_group", "(", "group", ")", "# create tcentity", "if", "variable", "is", "not", "None", "and", "data", "is", "not", "None", ":", "self", ".", "stage_redis", "(", "variable", ",", "self", ".", "stage_tc_group_entity", "(", "data", ")", ")", "for", "indicator", "in", "staging_data", ".", "get", "(", "'indicator'", ")", "or", "[", "]", ":", "# add to redis", "variable", "=", "indicator", ".", "pop", "(", "'variable'", ",", "None", ")", "path", "=", "indicator", ".", "pop", "(", "'path'", ",", "None", ")", "if", "indicator", ".", "get", "(", "'xid'", ")", "is", "None", ":", "indicator", "[", "'xid'", "]", "=", "self", ".", "stage_tc_batch_xid", "(", "indicator", ".", "get", "(", "'type'", ")", ",", "indicator", ".", "get", "(", "'summary'", ")", ",", "owner", ")", "indicator", "[", "'ownerName'", "]", "=", "owner", "# add to batch after extra data has been popped", "batch", ".", "add_indicator", "(", "indicator", ")", "data", "=", "self", ".", "path_data", "(", "dict", "(", "indicator", ")", ",", "path", ")", "if", "variable", "is", "not", "None", "and", "data", "is", "not", "None", ":", "# if isinstance(data, (dict)):", "# tcentity uses value as the name", "# data['value'] = data.pop('summary')", "self", ".", "stage_redis", "(", "variable", ",", "self", ".", "stage_tc_indicator_entity", "(", "data", ")", ")", "# submit batch", "batch_results", "=", "batch", ".", "submit", "(", ")", "self", ".", "log", ".", "debug", "(", "'[stage] Batch Results: {}'", ".", "format", "(", "batch_results", ")", ")", "for", "error", "in", "batch_results", ".", "get", "(", "'errors'", ")", "or", "[", "]", ":", "self", ".", "log", ".", "error", "(", "'[stage] {}'", ".", "format", "(", "error", ")", ")" ]
Stage data in ThreatConnect Platform using batch API. Args: owner (str): The ThreatConnect owner to submit batch job. staging_data (dict): A dict of ThreatConnect batch data.
[ "Stage", "data", "in", "ThreatConnect", "Platform", "using", "batch", "API", "." ]
python
train
46.543478
quantopian/pyfolio
pyfolio/risk.py
https://github.com/quantopian/pyfolio/blob/712716ab0cdebbec9fabb25eea3bf40e4354749d/pyfolio/risk.py#L328-L354
def plot_cap_exposures_longshort(long_exposures, short_exposures, ax=None): """ Plots outputs of compute_cap_exposures as area charts Parameters ---------- long_exposures, short_exposures : arrays Arrays of long and short market cap exposures (output of compute_cap_exposures). """ if ax is None: ax = plt.gca() color_list = plt.cm.gist_rainbow(np.linspace(0, 1, 5)) ax.stackplot(long_exposures[0].index, long_exposures, labels=CAP_BUCKETS.keys(), colors=color_list, alpha=0.8, baseline='zero') ax.stackplot(long_exposures[0].index, short_exposures, colors=color_list, alpha=0.8, baseline='zero') ax.axhline(0, color='k', linestyle='-') ax.set(title='Long and short exposures to market caps', ylabel='Proportion of long/short exposure in market cap buckets') ax.legend(loc='upper left', frameon=True, framealpha=0.5) return ax
[ "def", "plot_cap_exposures_longshort", "(", "long_exposures", ",", "short_exposures", ",", "ax", "=", "None", ")", ":", "if", "ax", "is", "None", ":", "ax", "=", "plt", ".", "gca", "(", ")", "color_list", "=", "plt", ".", "cm", ".", "gist_rainbow", "(", "np", ".", "linspace", "(", "0", ",", "1", ",", "5", ")", ")", "ax", ".", "stackplot", "(", "long_exposures", "[", "0", "]", ".", "index", ",", "long_exposures", ",", "labels", "=", "CAP_BUCKETS", ".", "keys", "(", ")", ",", "colors", "=", "color_list", ",", "alpha", "=", "0.8", ",", "baseline", "=", "'zero'", ")", "ax", ".", "stackplot", "(", "long_exposures", "[", "0", "]", ".", "index", ",", "short_exposures", ",", "colors", "=", "color_list", ",", "alpha", "=", "0.8", ",", "baseline", "=", "'zero'", ")", "ax", ".", "axhline", "(", "0", ",", "color", "=", "'k'", ",", "linestyle", "=", "'-'", ")", "ax", ".", "set", "(", "title", "=", "'Long and short exposures to market caps'", ",", "ylabel", "=", "'Proportion of long/short exposure in market cap buckets'", ")", "ax", ".", "legend", "(", "loc", "=", "'upper left'", ",", "frameon", "=", "True", ",", "framealpha", "=", "0.5", ")", "return", "ax" ]
Plots outputs of compute_cap_exposures as area charts Parameters ---------- long_exposures, short_exposures : arrays Arrays of long and short market cap exposures (output of compute_cap_exposures).
[ "Plots", "outputs", "of", "compute_cap_exposures", "as", "area", "charts" ]
python
valid
35.074074
boriel/zxbasic
asmparse.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/asmparse.py#L826-L832
def p_namespace(p): """ asm : NAMESPACE ID """ global NAMESPACE NAMESPACE = normalize_namespace(p[2]) __DEBUG__('Setting namespace to ' + (NAMESPACE.rstrip(DOT) or DOT), level=1)
[ "def", "p_namespace", "(", "p", ")", ":", "global", "NAMESPACE", "NAMESPACE", "=", "normalize_namespace", "(", "p", "[", "2", "]", ")", "__DEBUG__", "(", "'Setting namespace to '", "+", "(", "NAMESPACE", ".", "rstrip", "(", "DOT", ")", "or", "DOT", ")", ",", "level", "=", "1", ")" ]
asm : NAMESPACE ID
[ "asm", ":", "NAMESPACE", "ID" ]
python
train
27.571429
ergo/ziggurat_foundations
ziggurat_foundations/models/base.py
https://github.com/ergo/ziggurat_foundations/blob/9eeec894d08e8d7defa60ddc04b63f69cd4cbeba/ziggurat_foundations/models/base.py#L45-L51
def get_appstruct(self): """ return list of tuples keys and values corresponding to this model's data """ result = [] for k in self._get_keys(): result.append((k, getattr(self, k))) return result
[ "def", "get_appstruct", "(", "self", ")", ":", "result", "=", "[", "]", "for", "k", "in", "self", ".", "_get_keys", "(", ")", ":", "result", ".", "append", "(", "(", "k", ",", "getattr", "(", "self", ",", "k", ")", ")", ")", "return", "result" ]
return list of tuples keys and values corresponding to this model's data
[ "return", "list", "of", "tuples", "keys", "and", "values", "corresponding", "to", "this", "model", "s", "data" ]
python
train
34.428571
learningequality/morango
morango/controller.py
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/controller.py#L5-L13
def _self_referential_fk(klass_model): """ Return whether this model has a self ref FK, and the name for the field """ for f in klass_model._meta.concrete_fields: if f.related_model: if issubclass(klass_model, f.related_model): return f.attname return None
[ "def", "_self_referential_fk", "(", "klass_model", ")", ":", "for", "f", "in", "klass_model", ".", "_meta", ".", "concrete_fields", ":", "if", "f", ".", "related_model", ":", "if", "issubclass", "(", "klass_model", ",", "f", ".", "related_model", ")", ":", "return", "f", ".", "attname", "return", "None" ]
Return whether this model has a self ref FK, and the name for the field
[ "Return", "whether", "this", "model", "has", "a", "self", "ref", "FK", "and", "the", "name", "for", "the", "field" ]
python
valid
33.777778
miyakogi/wdom
wdom/options.py
https://github.com/miyakogi/wdom/blob/a21bcd23e94baceee71161829f6897bee3fd39c1/wdom/options.py#L133-L145
def parse_command_line() -> Namespace: """Parse command line options and set them to ``config``. This function skips unknown command line options. After parsing options, set log level and set options in ``tornado.options``. """ import tornado.options parser.parse_known_args(namespace=config) set_loglevel() # set new log level based on commanline option for k, v in vars(config).items(): if k.startswith('log'): tornado.options.options.__setattr__(k, v) return config
[ "def", "parse_command_line", "(", ")", "->", "Namespace", ":", "import", "tornado", ".", "options", "parser", ".", "parse_known_args", "(", "namespace", "=", "config", ")", "set_loglevel", "(", ")", "# set new log level based on commanline option", "for", "k", ",", "v", "in", "vars", "(", "config", ")", ".", "items", "(", ")", ":", "if", "k", ".", "startswith", "(", "'log'", ")", ":", "tornado", ".", "options", ".", "options", ".", "__setattr__", "(", "k", ",", "v", ")", "return", "config" ]
Parse command line options and set them to ``config``. This function skips unknown command line options. After parsing options, set log level and set options in ``tornado.options``.
[ "Parse", "command", "line", "options", "and", "set", "them", "to", "config", "." ]
python
train
39.538462
peterwittek/somoclu
src/Python/somoclu/train.py
https://github.com/peterwittek/somoclu/blob/b31dfbeba6765e64aedddcf8259626d6684f5349/src/Python/somoclu/train.py#L183-L231
def train(self, data=None, epochs=10, radius0=0, radiusN=1, radiuscooling="linear", scale0=0.1, scaleN=0.01, scalecooling="linear"): """Train the map on the current data in the Somoclu object. :param data: Optional parameter to provide training data. It is not necessary if the data was added via the method `update_data`. :type data: 2D numpy.array of float32. :param epochs: The number of epochs to train the map for. :type epochs: int. :param radius0: The initial radius on the map where the update happens around a best matching unit. Default value of 0 will trigger a value of min(n_columns, n_rows)/2. :type radius0: float. :param radiusN: The radius on the map where the update happens around a best matching unit in the final epoch. Default: 1. :type radiusN: float. :param radiuscooling: The cooling strategy between radius0 and radiusN: * "linear": Linear interpolation (default) * "exponential": Exponential decay :param scale0: The initial learning scale. Default value: 0.1. :type scale0: float. :param scaleN: The learning scale in the final epoch. Default: 0.01. :type scaleN: float. :param scalecooling: The cooling strategy between scale0 and scaleN: * "linear": Linear interpolation (default) * "exponential": Exponential decay :type scalecooling: str. """ _check_cooling_parameters(radiuscooling, scalecooling) if self._data is None and data is None: raise Exception("No data was provided!") elif data is not None: self.update_data(data) self._init_codebook() self.umatrix.shape = (self._n_rows * self._n_columns, ) self.bmus.shape = (self.n_vectors * 2, ) wrap_train(np.ravel(self._data), epochs, self._n_columns, self._n_rows, self.n_dim, self.n_vectors, radius0, radiusN, radiuscooling, scale0, scaleN, scalecooling, self._kernel_type, self._map_type, self._grid_type, self._compact_support, self._neighborhood == "gaussian", self._std_coeff, self._verbose, self.codebook, self.bmus, self.umatrix) self.umatrix.shape = (self._n_rows, self._n_columns) self.bmus.shape = (self.n_vectors, 2) self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
[ "def", "train", "(", "self", ",", "data", "=", "None", ",", "epochs", "=", "10", ",", "radius0", "=", "0", ",", "radiusN", "=", "1", ",", "radiuscooling", "=", "\"linear\"", ",", "scale0", "=", "0.1", ",", "scaleN", "=", "0.01", ",", "scalecooling", "=", "\"linear\"", ")", ":", "_check_cooling_parameters", "(", "radiuscooling", ",", "scalecooling", ")", "if", "self", ".", "_data", "is", "None", "and", "data", "is", "None", ":", "raise", "Exception", "(", "\"No data was provided!\"", ")", "elif", "data", "is", "not", "None", ":", "self", ".", "update_data", "(", "data", ")", "self", ".", "_init_codebook", "(", ")", "self", ".", "umatrix", ".", "shape", "=", "(", "self", ".", "_n_rows", "*", "self", ".", "_n_columns", ",", ")", "self", ".", "bmus", ".", "shape", "=", "(", "self", ".", "n_vectors", "*", "2", ",", ")", "wrap_train", "(", "np", ".", "ravel", "(", "self", ".", "_data", ")", ",", "epochs", ",", "self", ".", "_n_columns", ",", "self", ".", "_n_rows", ",", "self", ".", "n_dim", ",", "self", ".", "n_vectors", ",", "radius0", ",", "radiusN", ",", "radiuscooling", ",", "scale0", ",", "scaleN", ",", "scalecooling", ",", "self", ".", "_kernel_type", ",", "self", ".", "_map_type", ",", "self", ".", "_grid_type", ",", "self", ".", "_compact_support", ",", "self", ".", "_neighborhood", "==", "\"gaussian\"", ",", "self", ".", "_std_coeff", ",", "self", ".", "_verbose", ",", "self", ".", "codebook", ",", "self", ".", "bmus", ",", "self", ".", "umatrix", ")", "self", ".", "umatrix", ".", "shape", "=", "(", "self", ".", "_n_rows", ",", "self", ".", "_n_columns", ")", "self", ".", "bmus", ".", "shape", "=", "(", "self", ".", "n_vectors", ",", "2", ")", "self", ".", "codebook", ".", "shape", "=", "(", "self", ".", "_n_rows", ",", "self", ".", "_n_columns", ",", "self", ".", "n_dim", ")" ]
Train the map on the current data in the Somoclu object. :param data: Optional parameter to provide training data. It is not necessary if the data was added via the method `update_data`. :type data: 2D numpy.array of float32. :param epochs: The number of epochs to train the map for. :type epochs: int. :param radius0: The initial radius on the map where the update happens around a best matching unit. Default value of 0 will trigger a value of min(n_columns, n_rows)/2. :type radius0: float. :param radiusN: The radius on the map where the update happens around a best matching unit in the final epoch. Default: 1. :type radiusN: float. :param radiuscooling: The cooling strategy between radius0 and radiusN: * "linear": Linear interpolation (default) * "exponential": Exponential decay :param scale0: The initial learning scale. Default value: 0.1. :type scale0: float. :param scaleN: The learning scale in the final epoch. Default: 0.01. :type scaleN: float. :param scalecooling: The cooling strategy between scale0 and scaleN: * "linear": Linear interpolation (default) * "exponential": Exponential decay :type scalecooling: str.
[ "Train", "the", "map", "on", "the", "current", "data", "in", "the", "Somoclu", "object", ".", ":", "param", "data", ":", "Optional", "parameter", "to", "provide", "training", "data", ".", "It", "is", "not", "necessary", "if", "the", "data", "was", "added", "via", "the", "method", "update_data", ".", ":", "type", "data", ":", "2D", "numpy", ".", "array", "of", "float32", ".", ":", "param", "epochs", ":", "The", "number", "of", "epochs", "to", "train", "the", "map", "for", ".", ":", "type", "epochs", ":", "int", ".", ":", "param", "radius0", ":", "The", "initial", "radius", "on", "the", "map", "where", "the", "update", "happens", "around", "a", "best", "matching", "unit", ".", "Default", "value", "of", "0", "will", "trigger", "a", "value", "of", "min", "(", "n_columns", "n_rows", ")", "/", "2", ".", ":", "type", "radius0", ":", "float", ".", ":", "param", "radiusN", ":", "The", "radius", "on", "the", "map", "where", "the", "update", "happens", "around", "a", "best", "matching", "unit", "in", "the", "final", "epoch", ".", "Default", ":", "1", ".", ":", "type", "radiusN", ":", "float", ".", ":", "param", "radiuscooling", ":", "The", "cooling", "strategy", "between", "radius0", "and", "radiusN", ":" ]
python
train
54.408163
jborean93/smbprotocol
smbprotocol/tree.py
https://github.com/jborean93/smbprotocol/blob/d8eb00fbc824f97d0f4946e3f768c5e6c723499a/smbprotocol/tree.py#L250-L275
def disconnect(self): """ Disconnects the tree connection. """ if not self._connected: return log.info("Session: %s, Tree: %s - Disconnecting from Tree Connect" % (self.session.username, self.share_name)) req = SMB2TreeDisconnect() log.info("Session: %s, Tree: %s - Sending Tree Disconnect message" % (self.session.username, self.share_name)) log.debug(str(req)) request = self.session.connection.send(req, sid=self.session.session_id, tid=self.tree_connect_id) log.info("Session: %s, Tree: %s - Receiving Tree Disconnect response" % (self.session.username, self.share_name)) res = self.session.connection.receive(request) res_disconnect = SMB2TreeDisconnect() res_disconnect.unpack(res['data'].get_value()) log.debug(str(res_disconnect)) self._connected = False del self.session.tree_connect_table[self.tree_connect_id]
[ "def", "disconnect", "(", "self", ")", ":", "if", "not", "self", ".", "_connected", ":", "return", "log", ".", "info", "(", "\"Session: %s, Tree: %s - Disconnecting from Tree Connect\"", "%", "(", "self", ".", "session", ".", "username", ",", "self", ".", "share_name", ")", ")", "req", "=", "SMB2TreeDisconnect", "(", ")", "log", ".", "info", "(", "\"Session: %s, Tree: %s - Sending Tree Disconnect message\"", "%", "(", "self", ".", "session", ".", "username", ",", "self", ".", "share_name", ")", ")", "log", ".", "debug", "(", "str", "(", "req", ")", ")", "request", "=", "self", ".", "session", ".", "connection", ".", "send", "(", "req", ",", "sid", "=", "self", ".", "session", ".", "session_id", ",", "tid", "=", "self", ".", "tree_connect_id", ")", "log", ".", "info", "(", "\"Session: %s, Tree: %s - Receiving Tree Disconnect response\"", "%", "(", "self", ".", "session", ".", "username", ",", "self", ".", "share_name", ")", ")", "res", "=", "self", ".", "session", ".", "connection", ".", "receive", "(", "request", ")", "res_disconnect", "=", "SMB2TreeDisconnect", "(", ")", "res_disconnect", ".", "unpack", "(", "res", "[", "'data'", "]", ".", "get_value", "(", ")", ")", "log", ".", "debug", "(", "str", "(", "res_disconnect", ")", ")", "self", ".", "_connected", "=", "False", "del", "self", ".", "session", ".", "tree_connect_table", "[", "self", ".", "tree_connect_id", "]" ]
Disconnects the tree connection.
[ "Disconnects", "the", "tree", "connection", "." ]
python
train
41.653846
log2timeline/plaso
plaso/analyzers/hashers/manager.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/analyzers/hashers/manager.py#L149-L166
def RegisterHasher(cls, hasher_class): """Registers a hasher class. The hasher classes are identified based on their lower case name. Args: hasher_class (type): class object of the hasher. Raises: KeyError: if hasher class is already set for the corresponding name. """ hasher_name = hasher_class.NAME.lower() if hasher_name in cls._hasher_classes: raise KeyError(( 'hasher class already set for name: {0:s}.').format( hasher_class.NAME)) cls._hasher_classes[hasher_name] = hasher_class
[ "def", "RegisterHasher", "(", "cls", ",", "hasher_class", ")", ":", "hasher_name", "=", "hasher_class", ".", "NAME", ".", "lower", "(", ")", "if", "hasher_name", "in", "cls", ".", "_hasher_classes", ":", "raise", "KeyError", "(", "(", "'hasher class already set for name: {0:s}.'", ")", ".", "format", "(", "hasher_class", ".", "NAME", ")", ")", "cls", ".", "_hasher_classes", "[", "hasher_name", "]", "=", "hasher_class" ]
Registers a hasher class. The hasher classes are identified based on their lower case name. Args: hasher_class (type): class object of the hasher. Raises: KeyError: if hasher class is already set for the corresponding name.
[ "Registers", "a", "hasher", "class", "." ]
python
train
30.388889
mabuchilab/QNET
src/qnet/printing/treeprinting.py
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/printing/treeprinting.py#L11-L22
def _shorten_render(renderer, max_len): """Return a modified that returns the representation of expr, or '...' if that representation is longer than `max_len`""" def short_renderer(expr): res = renderer(expr) if len(res) > max_len: return '...' else: return res return short_renderer
[ "def", "_shorten_render", "(", "renderer", ",", "max_len", ")", ":", "def", "short_renderer", "(", "expr", ")", ":", "res", "=", "renderer", "(", "expr", ")", "if", "len", "(", "res", ")", ">", "max_len", ":", "return", "'...'", "else", ":", "return", "res", "return", "short_renderer" ]
Return a modified that returns the representation of expr, or '...' if that representation is longer than `max_len`
[ "Return", "a", "modified", "that", "returns", "the", "representation", "of", "expr", "or", "...", "if", "that", "representation", "is", "longer", "than", "max_len" ]
python
train
28.166667
saltstack/salt
salt/modules/dracr.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/dracr.py#L1285-L1314
def set_chassis_location(location, host=None, admin_username=None, admin_password=None): ''' Set the location of the chassis. location The name of the location to be set on the chassis. host The chassis host. admin_username The username used to access the chassis. admin_password The password used to access the chassis. CLI Example: .. code-block:: bash salt '*' dracr.set_chassis_location location-name host=111.222.333.444 admin_username=root admin_password=secret ''' return __execute_cmd('setsysinfo -c chassislocation {0}'.format(location), host=host, admin_username=admin_username, admin_password=admin_password)
[ "def", "set_chassis_location", "(", "location", ",", "host", "=", "None", ",", "admin_username", "=", "None", ",", "admin_password", "=", "None", ")", ":", "return", "__execute_cmd", "(", "'setsysinfo -c chassislocation {0}'", ".", "format", "(", "location", ")", ",", "host", "=", "host", ",", "admin_username", "=", "admin_username", ",", "admin_password", "=", "admin_password", ")" ]
Set the location of the chassis. location The name of the location to be set on the chassis. host The chassis host. admin_username The username used to access the chassis. admin_password The password used to access the chassis. CLI Example: .. code-block:: bash salt '*' dracr.set_chassis_location location-name host=111.222.333.444 admin_username=root admin_password=secret
[ "Set", "the", "location", "of", "the", "chassis", "." ]
python
train
27.166667
tensorflow/probability
tensorflow_probability/python/mcmc/replica_exchange_mc.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/replica_exchange_mc.py#L519-L556
def bootstrap_results(self, init_state): """Returns an object with the same type as returned by `one_step`. Args: init_state: `Tensor` or Python `list` of `Tensor`s representing the initial state(s) of the Markov chain(s). Returns: kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of `Tensor`s representing internal calculations made within this function. This inculdes replica states. """ with tf.compat.v1.name_scope( name=mcmc_util.make_name(self.name, 'remc', 'bootstrap_results'), values=[init_state]): replica_results = [ self.replica_kernels[i].bootstrap_results(init_state) for i in range(self.num_replica) ] init_state_parts = ( list(init_state) if mcmc_util.is_list_like(init_state) else [init_state]) # Convert all states parts to tensor... replica_states = [[ tf.convert_to_tensor(value=s) for s in init_state_parts ] for i in range(self.num_replica)] if not mcmc_util.is_list_like(init_state): replica_states = [s[0] for s in replica_states] return ReplicaExchangeMCKernelResults( replica_states=replica_states, replica_results=replica_results, sampled_replica_states=replica_states, sampled_replica_results=replica_results, )
[ "def", "bootstrap_results", "(", "self", ",", "init_state", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", "=", "mcmc_util", ".", "make_name", "(", "self", ".", "name", ",", "'remc'", ",", "'bootstrap_results'", ")", ",", "values", "=", "[", "init_state", "]", ")", ":", "replica_results", "=", "[", "self", ".", "replica_kernels", "[", "i", "]", ".", "bootstrap_results", "(", "init_state", ")", "for", "i", "in", "range", "(", "self", ".", "num_replica", ")", "]", "init_state_parts", "=", "(", "list", "(", "init_state", ")", "if", "mcmc_util", ".", "is_list_like", "(", "init_state", ")", "else", "[", "init_state", "]", ")", "# Convert all states parts to tensor...", "replica_states", "=", "[", "[", "tf", ".", "convert_to_tensor", "(", "value", "=", "s", ")", "for", "s", "in", "init_state_parts", "]", "for", "i", "in", "range", "(", "self", ".", "num_replica", ")", "]", "if", "not", "mcmc_util", ".", "is_list_like", "(", "init_state", ")", ":", "replica_states", "=", "[", "s", "[", "0", "]", "for", "s", "in", "replica_states", "]", "return", "ReplicaExchangeMCKernelResults", "(", "replica_states", "=", "replica_states", ",", "replica_results", "=", "replica_results", ",", "sampled_replica_states", "=", "replica_states", ",", "sampled_replica_results", "=", "replica_results", ",", ")" ]
Returns an object with the same type as returned by `one_step`. Args: init_state: `Tensor` or Python `list` of `Tensor`s representing the initial state(s) of the Markov chain(s). Returns: kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of `Tensor`s representing internal calculations made within this function. This inculdes replica states.
[ "Returns", "an", "object", "with", "the", "same", "type", "as", "returned", "by", "one_step", "." ]
python
test
35.552632
marcinmiklitz/pywindow
pywindow/trajectory.py
https://github.com/marcinmiklitz/pywindow/blob/e5264812157224f22a691741ca2e0aefdc9bd2eb/pywindow/trajectory.py#L1262-L1297
def _map_trajectory(self): """ Return filepath as a class attribute""" self.trajectory_map = {} with open(self.filepath, 'r') as trajectory_file: with closing( mmap( trajectory_file.fileno(), 0, access=ACCESS_READ)) as mapped_file: progress = 0 line = 0 frame = -1 frame_start = 0 while progress <= len(mapped_file): line = line + 1 # We read a binary data from a mapped file. bline = mapped_file.readline() # If the bline length equals zero we terminate. # We reached end of the file but still add the last frame! if len(bline) == 0: frame = frame + 1 if progress - frame_start > 10: self.trajectory_map[frame] = [ frame_start, progress ] break # We need to decode byte line into an utf-8 string. sline = bline.decode("utf-8").strip('\n').split() # We extract map's byte coordinates for each frame if len(sline) == 1 and sline[0] == 'END': frame = frame + 1 self.trajectory_map[frame] = [frame_start, progress] frame_start = progress # Here we extract the map's byte coordinates for the header # And also the periodic system type needed for later. progress = progress + len(bline) self.no_of_frames = frame
[ "def", "_map_trajectory", "(", "self", ")", ":", "self", ".", "trajectory_map", "=", "{", "}", "with", "open", "(", "self", ".", "filepath", ",", "'r'", ")", "as", "trajectory_file", ":", "with", "closing", "(", "mmap", "(", "trajectory_file", ".", "fileno", "(", ")", ",", "0", ",", "access", "=", "ACCESS_READ", ")", ")", "as", "mapped_file", ":", "progress", "=", "0", "line", "=", "0", "frame", "=", "-", "1", "frame_start", "=", "0", "while", "progress", "<=", "len", "(", "mapped_file", ")", ":", "line", "=", "line", "+", "1", "# We read a binary data from a mapped file.", "bline", "=", "mapped_file", ".", "readline", "(", ")", "# If the bline length equals zero we terminate.", "# We reached end of the file but still add the last frame!", "if", "len", "(", "bline", ")", "==", "0", ":", "frame", "=", "frame", "+", "1", "if", "progress", "-", "frame_start", ">", "10", ":", "self", ".", "trajectory_map", "[", "frame", "]", "=", "[", "frame_start", ",", "progress", "]", "break", "# We need to decode byte line into an utf-8 string.", "sline", "=", "bline", ".", "decode", "(", "\"utf-8\"", ")", ".", "strip", "(", "'\\n'", ")", ".", "split", "(", ")", "# We extract map's byte coordinates for each frame", "if", "len", "(", "sline", ")", "==", "1", "and", "sline", "[", "0", "]", "==", "'END'", ":", "frame", "=", "frame", "+", "1", "self", ".", "trajectory_map", "[", "frame", "]", "=", "[", "frame_start", ",", "progress", "]", "frame_start", "=", "progress", "# Here we extract the map's byte coordinates for the header", "# And also the periodic system type needed for later.", "progress", "=", "progress", "+", "len", "(", "bline", ")", "self", ".", "no_of_frames", "=", "frame" ]
Return filepath as a class attribute
[ "Return", "filepath", "as", "a", "class", "attribute" ]
python
train
48.888889
tanghaibao/jcvi
jcvi/apps/grid.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/grid.py#L490-L545
def kill(args): """ %prog kill [options] JOBNAMEPAT/JOBIDs Kill jobs based on JOBNAME pattern matching (case-sensitive) or list of JOBIDs (comma separated) Examples: %prog kill "pyth*" # Use regex %prog kill 160253,160245,160252 # Use list of job ids %prog kill all # Everything """ import shlex from jcvi.apps.base import sh, getusername from subprocess import check_output, CalledProcessError import xml.etree.ElementTree as ET valid_methods = ("pattern", "jobid") p = OptionParser(kill.__doc__) p.add_option("--method", choices=valid_methods, help="Identify jobs based on [default: guess]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) username = getusername() tag, = args tag = tag.strip() if tag == "all": sh("qdel -u {0}".format(username)) return valid_jobids = set() method = opts.method or guess_method(tag) if method == "jobid": jobids = tag.split(",") valid_jobids |= set(jobids) elif method == "pattern": qsxmlcmd = 'qstat -u "{0}" -j "{1}" -nenv -njd -xml'.\ format(username, tag) try: qsxml = check_output(shlex.split(qsxmlcmd)).strip() except CalledProcessError as e: qsxml = None logging.debug('No jobs matching the pattern "{0}"'.format(tag)) if qsxml is not None: for job in ET.fromstring(qsxml).findall("djob_info"): for elem in job.findall("element"): jobid = elem.find("JB_job_number").text valid_jobids.add(jobid) if valid_jobids: sh("qdel {0}".format(",".join(valid_jobids)))
[ "def", "kill", "(", "args", ")", ":", "import", "shlex", "from", "jcvi", ".", "apps", ".", "base", "import", "sh", ",", "getusername", "from", "subprocess", "import", "check_output", ",", "CalledProcessError", "import", "xml", ".", "etree", ".", "ElementTree", "as", "ET", "valid_methods", "=", "(", "\"pattern\"", ",", "\"jobid\"", ")", "p", "=", "OptionParser", "(", "kill", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--method\"", ",", "choices", "=", "valid_methods", ",", "help", "=", "\"Identify jobs based on [default: guess]\"", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "username", "=", "getusername", "(", ")", "tag", ",", "=", "args", "tag", "=", "tag", ".", "strip", "(", ")", "if", "tag", "==", "\"all\"", ":", "sh", "(", "\"qdel -u {0}\"", ".", "format", "(", "username", ")", ")", "return", "valid_jobids", "=", "set", "(", ")", "method", "=", "opts", ".", "method", "or", "guess_method", "(", "tag", ")", "if", "method", "==", "\"jobid\"", ":", "jobids", "=", "tag", ".", "split", "(", "\",\"", ")", "valid_jobids", "|=", "set", "(", "jobids", ")", "elif", "method", "==", "\"pattern\"", ":", "qsxmlcmd", "=", "'qstat -u \"{0}\" -j \"{1}\" -nenv -njd -xml'", ".", "format", "(", "username", ",", "tag", ")", "try", ":", "qsxml", "=", "check_output", "(", "shlex", ".", "split", "(", "qsxmlcmd", ")", ")", ".", "strip", "(", ")", "except", "CalledProcessError", "as", "e", ":", "qsxml", "=", "None", "logging", ".", "debug", "(", "'No jobs matching the pattern \"{0}\"'", ".", "format", "(", "tag", ")", ")", "if", "qsxml", "is", "not", "None", ":", "for", "job", "in", "ET", ".", "fromstring", "(", "qsxml", ")", ".", "findall", "(", "\"djob_info\"", ")", ":", "for", "elem", "in", "job", ".", "findall", "(", "\"element\"", ")", ":", "jobid", "=", "elem", ".", "find", "(", "\"JB_job_number\"", ")", ".", "text", "valid_jobids", ".", "add", "(", "jobid", ")", "if", "valid_jobids", ":", "sh", "(", "\"qdel {0}\"", ".", "format", "(", "\",\"", ".", "join", "(", "valid_jobids", ")", ")", ")" ]
%prog kill [options] JOBNAMEPAT/JOBIDs Kill jobs based on JOBNAME pattern matching (case-sensitive) or list of JOBIDs (comma separated) Examples: %prog kill "pyth*" # Use regex %prog kill 160253,160245,160252 # Use list of job ids %prog kill all # Everything
[ "%prog", "kill", "[", "options", "]", "JOBNAMEPAT", "/", "JOBIDs" ]
python
train
31.535714
JohnVinyard/featureflow
featureflow/feature.py
https://github.com/JohnVinyard/featureflow/blob/7731487b00e38fa4f58c88b7881870fda2d69fdb/featureflow/feature.py#L107-L130
def copy( self, extractor=None, needs=None, store=None, data_writer=None, persistence=None, extractor_args=None): """ Use self as a template to build a new feature, replacing values in kwargs """ f = Feature( extractor or self.extractor, needs=needs, store=self.store if store is None else store, encoder=self.encoder, decoder=self.decoder, key=self.key, data_writer=data_writer, persistence=persistence, **(extractor_args or self.extractor_args)) f._fixup_needs() return f
[ "def", "copy", "(", "self", ",", "extractor", "=", "None", ",", "needs", "=", "None", ",", "store", "=", "None", ",", "data_writer", "=", "None", ",", "persistence", "=", "None", ",", "extractor_args", "=", "None", ")", ":", "f", "=", "Feature", "(", "extractor", "or", "self", ".", "extractor", ",", "needs", "=", "needs", ",", "store", "=", "self", ".", "store", "if", "store", "is", "None", "else", "store", ",", "encoder", "=", "self", ".", "encoder", ",", "decoder", "=", "self", ".", "decoder", ",", "key", "=", "self", ".", "key", ",", "data_writer", "=", "data_writer", ",", "persistence", "=", "persistence", ",", "*", "*", "(", "extractor_args", "or", "self", ".", "extractor_args", ")", ")", "f", ".", "_fixup_needs", "(", ")", "return", "f" ]
Use self as a template to build a new feature, replacing values in kwargs
[ "Use", "self", "as", "a", "template", "to", "build", "a", "new", "feature", "replacing", "values", "in", "kwargs" ]
python
train
29.083333
RudolfCardinal/pythonlib
cardinal_pythonlib/ui.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/ui.py#L63-L81
def get_save_as_filename(defaultfilename: str, defaultextension: str, title: str = "Save As") -> str: """ Provides a GUI "Save As" dialogue (via ``tkinter``) and returns the filename. """ root = tkinter.Tk() # create and get Tk topmost window # (don't do this too early; the command prompt loses focus) root.withdraw() # won't need this; this gets rid of a blank Tk window root.attributes('-topmost', True) # makes the tk window topmost filename = filedialog.asksaveasfilename( initialfile=defaultfilename, defaultextension=defaultextension, parent=root, title=title ) root.attributes('-topmost', False) # stop the tk window being topmost return filename
[ "def", "get_save_as_filename", "(", "defaultfilename", ":", "str", ",", "defaultextension", ":", "str", ",", "title", ":", "str", "=", "\"Save As\"", ")", "->", "str", ":", "root", "=", "tkinter", ".", "Tk", "(", ")", "# create and get Tk topmost window", "# (don't do this too early; the command prompt loses focus)", "root", ".", "withdraw", "(", ")", "# won't need this; this gets rid of a blank Tk window", "root", ".", "attributes", "(", "'-topmost'", ",", "True", ")", "# makes the tk window topmost", "filename", "=", "filedialog", ".", "asksaveasfilename", "(", "initialfile", "=", "defaultfilename", ",", "defaultextension", "=", "defaultextension", ",", "parent", "=", "root", ",", "title", "=", "title", ")", "root", ".", "attributes", "(", "'-topmost'", ",", "False", ")", "# stop the tk window being topmost", "return", "filename" ]
Provides a GUI "Save As" dialogue (via ``tkinter``) and returns the filename.
[ "Provides", "a", "GUI", "Save", "As", "dialogue", "(", "via", "tkinter", ")", "and", "returns", "the", "filename", "." ]
python
train
40.526316
PMEAL/OpenPNM
openpnm/models/geometry/throat_surface_area.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/models/geometry/throat_surface_area.py#L54-L78
def extrusion(target, throat_perimeter='throat.perimeter', throat_length='throat.length'): r""" Calculate surface area for an arbitrary shaped throat give the perimeter and length. Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. throat_perimeter : string Dictionary key to the throat perimeter array. Default is 'throat.perimeter'. throat_length : string Dictionary key to the throat length array. Default is 'throat.length'. """ P = target[throat_perimeter] L = target[throat_length] value = P*L return value
[ "def", "extrusion", "(", "target", ",", "throat_perimeter", "=", "'throat.perimeter'", ",", "throat_length", "=", "'throat.length'", ")", ":", "P", "=", "target", "[", "throat_perimeter", "]", "L", "=", "target", "[", "throat_length", "]", "value", "=", "P", "*", "L", "return", "value" ]
r""" Calculate surface area for an arbitrary shaped throat give the perimeter and length. Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. throat_perimeter : string Dictionary key to the throat perimeter array. Default is 'throat.perimeter'. throat_length : string Dictionary key to the throat length array. Default is 'throat.length'.
[ "r", "Calculate", "surface", "area", "for", "an", "arbitrary", "shaped", "throat", "give", "the", "perimeter", "and", "length", "." ]
python
train
30.28
ensime/ensime-vim
ensime_shared/ensime.py
https://github.com/ensime/ensime-vim/blob/caa734e84f002b25446c615706283a74edd4ecfe/ensime_shared/ensime.py#L67-L79
def client_status(self, config_path): """Get status of client for a project, given path to its config.""" c = self.client_for(config_path) status = "stopped" if not c or not c.ensime: status = 'unloaded' elif c.ensime.is_ready(): status = 'ready' elif c.ensime.is_running(): status = 'startup' elif c.ensime.aborted(): status = 'aborted' return status
[ "def", "client_status", "(", "self", ",", "config_path", ")", ":", "c", "=", "self", ".", "client_for", "(", "config_path", ")", "status", "=", "\"stopped\"", "if", "not", "c", "or", "not", "c", ".", "ensime", ":", "status", "=", "'unloaded'", "elif", "c", ".", "ensime", ".", "is_ready", "(", ")", ":", "status", "=", "'ready'", "elif", "c", ".", "ensime", ".", "is_running", "(", ")", ":", "status", "=", "'startup'", "elif", "c", ".", "ensime", ".", "aborted", "(", ")", ":", "status", "=", "'aborted'", "return", "status" ]
Get status of client for a project, given path to its config.
[ "Get", "status", "of", "client", "for", "a", "project", "given", "path", "to", "its", "config", "." ]
python
train
34.692308
marcomusy/vtkplotter
vtkplotter/actors.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/actors.py#L2429-L2453
def crop(self, top=None, bottom=None, right=None, left=None): """Crop image. :param float top: fraction to crop from the top margin :param float bottom: fraction to crop from the bottom margin :param float left: fraction to crop from the left margin :param float right: fraction to crop from the right margin """ extractVOI = vtk.vtkExtractVOI() extractVOI.SetInputData(self.GetInput()) extractVOI.IncludeBoundaryOn() d = self.GetInput().GetDimensions() bx0, bx1, by0, by1 = 0, d[0]-1, 0, d[1]-1 if left is not None: bx0 = int((d[0]-1)*left) if right is not None: bx1 = int((d[0]-1)*(1-right)) if bottom is not None: by0 = int((d[1]-1)*bottom) if top is not None: by1 = int((d[1]-1)*(1-top)) extractVOI.SetVOI(bx0, bx1, by0, by1, 0, 0) extractVOI.Update() img = extractVOI.GetOutput() #img.SetOrigin(-bx0, -by0, 0) self.GetMapper().SetInputData(img) self.GetMapper().Modified() return self
[ "def", "crop", "(", "self", ",", "top", "=", "None", ",", "bottom", "=", "None", ",", "right", "=", "None", ",", "left", "=", "None", ")", ":", "extractVOI", "=", "vtk", ".", "vtkExtractVOI", "(", ")", "extractVOI", ".", "SetInputData", "(", "self", ".", "GetInput", "(", ")", ")", "extractVOI", ".", "IncludeBoundaryOn", "(", ")", "d", "=", "self", ".", "GetInput", "(", ")", ".", "GetDimensions", "(", ")", "bx0", ",", "bx1", ",", "by0", ",", "by1", "=", "0", ",", "d", "[", "0", "]", "-", "1", ",", "0", ",", "d", "[", "1", "]", "-", "1", "if", "left", "is", "not", "None", ":", "bx0", "=", "int", "(", "(", "d", "[", "0", "]", "-", "1", ")", "*", "left", ")", "if", "right", "is", "not", "None", ":", "bx1", "=", "int", "(", "(", "d", "[", "0", "]", "-", "1", ")", "*", "(", "1", "-", "right", ")", ")", "if", "bottom", "is", "not", "None", ":", "by0", "=", "int", "(", "(", "d", "[", "1", "]", "-", "1", ")", "*", "bottom", ")", "if", "top", "is", "not", "None", ":", "by1", "=", "int", "(", "(", "d", "[", "1", "]", "-", "1", ")", "*", "(", "1", "-", "top", ")", ")", "extractVOI", ".", "SetVOI", "(", "bx0", ",", "bx1", ",", "by0", ",", "by1", ",", "0", ",", "0", ")", "extractVOI", ".", "Update", "(", ")", "img", "=", "extractVOI", ".", "GetOutput", "(", ")", "#img.SetOrigin(-bx0, -by0, 0)", "self", ".", "GetMapper", "(", ")", ".", "SetInputData", "(", "img", ")", "self", ".", "GetMapper", "(", ")", ".", "Modified", "(", ")", "return", "self" ]
Crop image. :param float top: fraction to crop from the top margin :param float bottom: fraction to crop from the bottom margin :param float left: fraction to crop from the left margin :param float right: fraction to crop from the right margin
[ "Crop", "image", ".", ":", "param", "float", "top", ":", "fraction", "to", "crop", "from", "the", "top", "margin", ":", "param", "float", "bottom", ":", "fraction", "to", "crop", "from", "the", "bottom", "margin", ":", "param", "float", "left", ":", "fraction", "to", "crop", "from", "the", "left", "margin", ":", "param", "float", "right", ":", "fraction", "to", "crop", "from", "the", "right", "margin" ]
python
train
42.28
ray-project/ray
python/ray/log_monitor.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/log_monitor.py#L210-L223
def run(self): """Run the log monitor. This will query Redis once every second to check if there are new log files to monitor. It will also store those log files in Redis. """ while True: self.update_log_filenames() self.open_closed_files() anything_published = self.check_log_files_and_publish_updates() # If nothing was published, then wait a little bit before checking # for logs to avoid using too much CPU. if not anything_published: time.sleep(0.05)
[ "def", "run", "(", "self", ")", ":", "while", "True", ":", "self", ".", "update_log_filenames", "(", ")", "self", ".", "open_closed_files", "(", ")", "anything_published", "=", "self", ".", "check_log_files_and_publish_updates", "(", ")", "# If nothing was published, then wait a little bit before checking", "# for logs to avoid using too much CPU.", "if", "not", "anything_published", ":", "time", ".", "sleep", "(", "0.05", ")" ]
Run the log monitor. This will query Redis once every second to check if there are new log files to monitor. It will also store those log files in Redis.
[ "Run", "the", "log", "monitor", "." ]
python
train
40.785714
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py#L880-L896
def get_interface_detail_output_interface_line_protocol_exception_info(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_interface_detail = ET.Element("get_interface_detail") config = get_interface_detail output = ET.SubElement(get_interface_detail, "output") interface = ET.SubElement(output, "interface") interface_type_key = ET.SubElement(interface, "interface-type") interface_type_key.text = kwargs.pop('interface_type') interface_name_key = ET.SubElement(interface, "interface-name") interface_name_key.text = kwargs.pop('interface_name') line_protocol_exception_info = ET.SubElement(interface, "line-protocol-exception-info") line_protocol_exception_info.text = kwargs.pop('line_protocol_exception_info') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_interface_detail_output_interface_line_protocol_exception_info", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_interface_detail", "=", "ET", ".", "Element", "(", "\"get_interface_detail\"", ")", "config", "=", "get_interface_detail", "output", "=", "ET", ".", "SubElement", "(", "get_interface_detail", ",", "\"output\"", ")", "interface", "=", "ET", ".", "SubElement", "(", "output", ",", "\"interface\"", ")", "interface_type_key", "=", "ET", ".", "SubElement", "(", "interface", ",", "\"interface-type\"", ")", "interface_type_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'interface_type'", ")", "interface_name_key", "=", "ET", ".", "SubElement", "(", "interface", ",", "\"interface-name\"", ")", "interface_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'interface_name'", ")", "line_protocol_exception_info", "=", "ET", ".", "SubElement", "(", "interface", ",", "\"line-protocol-exception-info\"", ")", "line_protocol_exception_info", ".", "text", "=", "kwargs", ".", "pop", "(", "'line_protocol_exception_info'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
54
spyder-ide/spyder
spyder/app/mainwindow.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/app/mainwindow.py#L2859-L2871
def apply_statusbar_settings(self): """Update status bar widgets settings""" show_status_bar = CONF.get('main', 'show_status_bar') self.statusBar().setVisible(show_status_bar) if show_status_bar: for widget, name in ((self.mem_status, 'memory_usage'), (self.cpu_status, 'cpu_usage')): if widget is not None: widget.setVisible(CONF.get('main', '%s/enable' % name)) widget.set_interval(CONF.get('main', '%s/timeout' % name)) else: return
[ "def", "apply_statusbar_settings", "(", "self", ")", ":", "show_status_bar", "=", "CONF", ".", "get", "(", "'main'", ",", "'show_status_bar'", ")", "self", ".", "statusBar", "(", ")", ".", "setVisible", "(", "show_status_bar", ")", "if", "show_status_bar", ":", "for", "widget", ",", "name", "in", "(", "(", "self", ".", "mem_status", ",", "'memory_usage'", ")", ",", "(", "self", ".", "cpu_status", ",", "'cpu_usage'", ")", ")", ":", "if", "widget", "is", "not", "None", ":", "widget", ".", "setVisible", "(", "CONF", ".", "get", "(", "'main'", ",", "'%s/enable'", "%", "name", ")", ")", "widget", ".", "set_interval", "(", "CONF", ".", "get", "(", "'main'", ",", "'%s/timeout'", "%", "name", ")", ")", "else", ":", "return" ]
Update status bar widgets settings
[ "Update", "status", "bar", "widgets", "settings" ]
python
train
45.307692
bitesofcode/projexui
projexui/widgets/xnodewidget/xnodeconnection.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xnodewidget/xnodeconnection.py#L1438-L1449
def setDirty(self, state=True): """ Flags the connection as being dirty and needing a rebuild. :param state | <bool> """ self._dirty = state # set if this connection should be visible if self._inputNode and self._outputNode: vis = self._inputNode.isVisible() and self._outputNode.isVisible() self.setVisible(vis)
[ "def", "setDirty", "(", "self", ",", "state", "=", "True", ")", ":", "self", ".", "_dirty", "=", "state", "# set if this connection should be visible", "if", "self", ".", "_inputNode", "and", "self", ".", "_outputNode", ":", "vis", "=", "self", ".", "_inputNode", ".", "isVisible", "(", ")", "and", "self", ".", "_outputNode", ".", "isVisible", "(", ")", "self", ".", "setVisible", "(", "vis", ")" ]
Flags the connection as being dirty and needing a rebuild. :param state | <bool>
[ "Flags", "the", "connection", "as", "being", "dirty", "and", "needing", "a", "rebuild", ".", ":", "param", "state", "|", "<bool", ">" ]
python
train
33.166667
manns/pyspread
pyspread/src/actions/_grid_actions.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/actions/_grid_actions.py#L756-L770
def insert_tabs(self, tab, no_tabs=1): """Adds no_tabs tabs before table, appends if tab > maxtabs and marks grid as changed """ # Mark content as changed post_command_event(self.main_window, self.ContentChangedMsg) self.code_array.insert(tab, no_tabs, axis=2) # Update TableChoiceIntCtrl shape = self.grid.code_array.shape post_command_event(self.main_window, self.ResizeGridMsg, shape=shape)
[ "def", "insert_tabs", "(", "self", ",", "tab", ",", "no_tabs", "=", "1", ")", ":", "# Mark content as changed", "post_command_event", "(", "self", ".", "main_window", ",", "self", ".", "ContentChangedMsg", ")", "self", ".", "code_array", ".", "insert", "(", "tab", ",", "no_tabs", ",", "axis", "=", "2", ")", "# Update TableChoiceIntCtrl", "shape", "=", "self", ".", "grid", ".", "code_array", ".", "shape", "post_command_event", "(", "self", ".", "main_window", ",", "self", ".", "ResizeGridMsg", ",", "shape", "=", "shape", ")" ]
Adds no_tabs tabs before table, appends if tab > maxtabs and marks grid as changed
[ "Adds", "no_tabs", "tabs", "before", "table", "appends", "if", "tab", ">", "maxtabs" ]
python
train
30.4
biolink/biolink-model
metamodel/utils/generator.py
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/utils/generator.py#L95-L106
def cls_slots(self, cls: CLASS_OR_CLASSNAME) -> List[SlotDefinition]: """ Return the list of slots directly included in the class definition. Includes slots whose domain is cls -- as declared in slot.domain or class.slots Does not include slots declared in mixins, apply_to or is_a links @param cls: class name or class definition name @return: all direct class slots """ if not isinstance(cls, ClassDefinition): cls = self.schema.classes[cls] return [self.schema.slots[s] for s in cls.slots]
[ "def", "cls_slots", "(", "self", ",", "cls", ":", "CLASS_OR_CLASSNAME", ")", "->", "List", "[", "SlotDefinition", "]", ":", "if", "not", "isinstance", "(", "cls", ",", "ClassDefinition", ")", ":", "cls", "=", "self", ".", "schema", ".", "classes", "[", "cls", "]", "return", "[", "self", ".", "schema", ".", "slots", "[", "s", "]", "for", "s", "in", "cls", ".", "slots", "]" ]
Return the list of slots directly included in the class definition. Includes slots whose domain is cls -- as declared in slot.domain or class.slots Does not include slots declared in mixins, apply_to or is_a links @param cls: class name or class definition name @return: all direct class slots
[ "Return", "the", "list", "of", "slots", "directly", "included", "in", "the", "class", "definition", ".", "Includes", "slots", "whose", "domain", "is", "cls", "--", "as", "declared", "in", "slot", ".", "domain", "or", "class", ".", "slots" ]
python
train
46.666667
apache/spark
python/pyspark/mllib/evaluation.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/evaluation.py#L504-L511
def precision(self, label=None): """ Returns precision or precision for a given label (category) if specified. """ if label is None: return self.call("precision") else: return self.call("precision", float(label))
[ "def", "precision", "(", "self", ",", "label", "=", "None", ")", ":", "if", "label", "is", "None", ":", "return", "self", ".", "call", "(", "\"precision\"", ")", "else", ":", "return", "self", ".", "call", "(", "\"precision\"", ",", "float", "(", "label", ")", ")" ]
Returns precision or precision for a given label (category) if specified.
[ "Returns", "precision", "or", "precision", "for", "a", "given", "label", "(", "category", ")", "if", "specified", "." ]
python
train
33.625
davidfokkema/artist
artist/multi_plot.py
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/multi_plot.py#L86-L95
def set_empty_for_all(self, row_column_list): """Keep all specified subplots completely empty. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :type row_column_list: list or None """ for row, column in row_column_list: self.set_empty(row, column)
[ "def", "set_empty_for_all", "(", "self", ",", "row_column_list", ")", ":", "for", "row", ",", "column", "in", "row_column_list", ":", "self", ".", "set_empty", "(", "row", ",", "column", ")" ]
Keep all specified subplots completely empty. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :type row_column_list: list or None
[ "Keep", "all", "specified", "subplots", "completely", "empty", "." ]
python
train
37.9
fabioz/PyDev.Debugger
_pydev_imps/_pydev_SimpleXMLRPCServer.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydev_imps/_pydev_SimpleXMLRPCServer.py#L544-L553
def handle_xmlrpc(self, request_text): """Handle a single XML-RPC request""" response = self._marshaled_dispatch(request_text) sys.stdout.write('Content-Type: text/xml\n') sys.stdout.write('Content-Length: %d\n' % len(response)) sys.stdout.write('\n') sys.stdout.write(response)
[ "def", "handle_xmlrpc", "(", "self", ",", "request_text", ")", ":", "response", "=", "self", ".", "_marshaled_dispatch", "(", "request_text", ")", "sys", ".", "stdout", ".", "write", "(", "'Content-Type: text/xml\\n'", ")", "sys", ".", "stdout", ".", "write", "(", "'Content-Length: %d\\n'", "%", "len", "(", "response", ")", ")", "sys", ".", "stdout", ".", "write", "(", "'\\n'", ")", "sys", ".", "stdout", ".", "write", "(", "response", ")" ]
Handle a single XML-RPC request
[ "Handle", "a", "single", "XML", "-", "RPC", "request" ]
python
train
32
zhanglab/psamm
psamm/fastcore.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/fastcore.py#L89-L114
def lp10(self, subset_k, subset_p, weights={}): """Force reactions in K above epsilon while minimizing support of P. This program forces reactions in subset K to attain flux > epsilon while minimizing the sum of absolute flux values for reactions in subset P (L1-regularization). """ if self._z is None: self._add_minimization_vars() positive = set(subset_k) - self._flipped negative = set(subset_k) & self._flipped v = self._v.set(positive) cs = self._prob.add_linear_constraints(v >= self._epsilon) self._temp_constr.extend(cs) v = self._v.set(negative) cs = self._prob.add_linear_constraints(v <= -self._epsilon) self._temp_constr.extend(cs) self._prob.set_objective(self._z.expr( (rxnid, -weights.get(rxnid, 1)) for rxnid in subset_p)) self._solve()
[ "def", "lp10", "(", "self", ",", "subset_k", ",", "subset_p", ",", "weights", "=", "{", "}", ")", ":", "if", "self", ".", "_z", "is", "None", ":", "self", ".", "_add_minimization_vars", "(", ")", "positive", "=", "set", "(", "subset_k", ")", "-", "self", ".", "_flipped", "negative", "=", "set", "(", "subset_k", ")", "&", "self", ".", "_flipped", "v", "=", "self", ".", "_v", ".", "set", "(", "positive", ")", "cs", "=", "self", ".", "_prob", ".", "add_linear_constraints", "(", "v", ">=", "self", ".", "_epsilon", ")", "self", ".", "_temp_constr", ".", "extend", "(", "cs", ")", "v", "=", "self", ".", "_v", ".", "set", "(", "negative", ")", "cs", "=", "self", ".", "_prob", ".", "add_linear_constraints", "(", "v", "<=", "-", "self", ".", "_epsilon", ")", "self", ".", "_temp_constr", ".", "extend", "(", "cs", ")", "self", ".", "_prob", ".", "set_objective", "(", "self", ".", "_z", ".", "expr", "(", "(", "rxnid", ",", "-", "weights", ".", "get", "(", "rxnid", ",", "1", ")", ")", "for", "rxnid", "in", "subset_p", ")", ")", "self", ".", "_solve", "(", ")" ]
Force reactions in K above epsilon while minimizing support of P. This program forces reactions in subset K to attain flux > epsilon while minimizing the sum of absolute flux values for reactions in subset P (L1-regularization).
[ "Force", "reactions", "in", "K", "above", "epsilon", "while", "minimizing", "support", "of", "P", "." ]
python
train
34.115385
googledatalab/pydatalab
google/datalab/storage/_api.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/storage/_api.py#L124-L146
def object_download(self, bucket, key, start_offset=0, byte_count=None): """Reads the contents of an object as text. Args: bucket: the name of the bucket containing the object. key: the key of the object to be read. start_offset: the start offset of bytes to read. byte_count: the number of bytes to read. If None, it reads to the end. Returns: The text content within the object. Raises: Exception if the object could not be read from. """ args = {'alt': 'media'} headers = {} if start_offset > 0 or byte_count is not None: header = 'bytes=%d-' % start_offset if byte_count is not None: header += '%d' % byte_count headers['Range'] = header url = Api._DOWNLOAD_ENDPOINT + (Api._OBJECT_PATH % (bucket, Api._escape_key(key))) return google.datalab.utils.Http.request(url, args=args, headers=headers, credentials=self._credentials, raw_response=True)
[ "def", "object_download", "(", "self", ",", "bucket", ",", "key", ",", "start_offset", "=", "0", ",", "byte_count", "=", "None", ")", ":", "args", "=", "{", "'alt'", ":", "'media'", "}", "headers", "=", "{", "}", "if", "start_offset", ">", "0", "or", "byte_count", "is", "not", "None", ":", "header", "=", "'bytes=%d-'", "%", "start_offset", "if", "byte_count", "is", "not", "None", ":", "header", "+=", "'%d'", "%", "byte_count", "headers", "[", "'Range'", "]", "=", "header", "url", "=", "Api", ".", "_DOWNLOAD_ENDPOINT", "+", "(", "Api", ".", "_OBJECT_PATH", "%", "(", "bucket", ",", "Api", ".", "_escape_key", "(", "key", ")", ")", ")", "return", "google", ".", "datalab", ".", "utils", ".", "Http", ".", "request", "(", "url", ",", "args", "=", "args", ",", "headers", "=", "headers", ",", "credentials", "=", "self", ".", "_credentials", ",", "raw_response", "=", "True", ")" ]
Reads the contents of an object as text. Args: bucket: the name of the bucket containing the object. key: the key of the object to be read. start_offset: the start offset of bytes to read. byte_count: the number of bytes to read. If None, it reads to the end. Returns: The text content within the object. Raises: Exception if the object could not be read from.
[ "Reads", "the", "contents", "of", "an", "object", "as", "text", "." ]
python
train
42.347826
romanz/trezor-agent
libagent/gpg/protocol.py
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/protocol.py#L209-L217
def data(self): """Data for packet creation.""" header = struct.pack('>BLB', 4, # version self.created, # creation self.algo_id) # public key algorithm ID oid = util.prefix_len('>B', self.curve_info['oid']) blob = self.curve_info['serialize'](self.verifying_key) return header + oid + blob + self.ecdh_packet
[ "def", "data", "(", "self", ")", ":", "header", "=", "struct", ".", "pack", "(", "'>BLB'", ",", "4", ",", "# version", "self", ".", "created", ",", "# creation", "self", ".", "algo_id", ")", "# public key algorithm ID", "oid", "=", "util", ".", "prefix_len", "(", "'>B'", ",", "self", ".", "curve_info", "[", "'oid'", "]", ")", "blob", "=", "self", ".", "curve_info", "[", "'serialize'", "]", "(", "self", ".", "verifying_key", ")", "return", "header", "+", "oid", "+", "blob", "+", "self", ".", "ecdh_packet" ]
Data for packet creation.
[ "Data", "for", "packet", "creation", "." ]
python
train
49
jobovy/galpy
galpy/potential/MiyamotoNagaiPotential.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/MiyamotoNagaiPotential.py#L106-L129
def _zforce(self,R,z,phi=0.,t=0.): """ NAME: _zforce PURPOSE: evaluate the vertical force for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the vertical force HISTORY: 2010-07-09 - Written - Bovy (NYU) """ sqrtbz= nu.sqrt(self._b2+z**2.) asqrtbz= self._a+sqrtbz if isinstance(R,float) and sqrtbz == asqrtbz: return (-z/ (R**2.+(self._a+nu.sqrt(z**2.+self._b2))**2.)**(3./2.)) else: return (-z*asqrtbz/sqrtbz/ (R**2.+(self._a+nu.sqrt(z**2.+self._b2))**2.)**(3./2.))
[ "def", "_zforce", "(", "self", ",", "R", ",", "z", ",", "phi", "=", "0.", ",", "t", "=", "0.", ")", ":", "sqrtbz", "=", "nu", ".", "sqrt", "(", "self", ".", "_b2", "+", "z", "**", "2.", ")", "asqrtbz", "=", "self", ".", "_a", "+", "sqrtbz", "if", "isinstance", "(", "R", ",", "float", ")", "and", "sqrtbz", "==", "asqrtbz", ":", "return", "(", "-", "z", "/", "(", "R", "**", "2.", "+", "(", "self", ".", "_a", "+", "nu", ".", "sqrt", "(", "z", "**", "2.", "+", "self", ".", "_b2", ")", ")", "**", "2.", ")", "**", "(", "3.", "/", "2.", ")", ")", "else", ":", "return", "(", "-", "z", "*", "asqrtbz", "/", "sqrtbz", "/", "(", "R", "**", "2.", "+", "(", "self", ".", "_a", "+", "nu", ".", "sqrt", "(", "z", "**", "2.", "+", "self", ".", "_b2", ")", ")", "**", "2.", ")", "**", "(", "3.", "/", "2.", ")", ")" ]
NAME: _zforce PURPOSE: evaluate the vertical force for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the vertical force HISTORY: 2010-07-09 - Written - Bovy (NYU)
[ "NAME", ":", "_zforce", "PURPOSE", ":", "evaluate", "the", "vertical", "force", "for", "this", "potential", "INPUT", ":", "R", "-", "Galactocentric", "cylindrical", "radius", "z", "-", "vertical", "height", "phi", "-", "azimuth", "t", "-", "time", "OUTPUT", ":", "the", "vertical", "force", "HISTORY", ":", "2010", "-", "07", "-", "09", "-", "Written", "-", "Bovy", "(", "NYU", ")" ]
python
train
31.166667
quodlibet/mutagen
mutagen/ogg.py
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/ogg.py#L243-L276
def to_packets(pages, strict=False): """Construct a list of packet data from a list of Ogg pages. If strict is true, the first page must start a new packet, and the last page must end the last packet. """ serial = pages[0].serial sequence = pages[0].sequence packets = [] if strict: if pages[0].continued: raise ValueError("first packet is continued") if not pages[-1].complete: raise ValueError("last packet does not complete") elif pages and pages[0].continued: packets.append([b""]) for page in pages: if serial != page.serial: raise ValueError("invalid serial number in %r" % page) elif sequence != page.sequence: raise ValueError("bad sequence number in %r" % page) else: sequence += 1 if page.continued: packets[-1].append(page.packets[0]) else: packets.append([page.packets[0]]) packets.extend([p] for p in page.packets[1:]) return [b"".join(p) for p in packets]
[ "def", "to_packets", "(", "pages", ",", "strict", "=", "False", ")", ":", "serial", "=", "pages", "[", "0", "]", ".", "serial", "sequence", "=", "pages", "[", "0", "]", ".", "sequence", "packets", "=", "[", "]", "if", "strict", ":", "if", "pages", "[", "0", "]", ".", "continued", ":", "raise", "ValueError", "(", "\"first packet is continued\"", ")", "if", "not", "pages", "[", "-", "1", "]", ".", "complete", ":", "raise", "ValueError", "(", "\"last packet does not complete\"", ")", "elif", "pages", "and", "pages", "[", "0", "]", ".", "continued", ":", "packets", ".", "append", "(", "[", "b\"\"", "]", ")", "for", "page", "in", "pages", ":", "if", "serial", "!=", "page", ".", "serial", ":", "raise", "ValueError", "(", "\"invalid serial number in %r\"", "%", "page", ")", "elif", "sequence", "!=", "page", ".", "sequence", ":", "raise", "ValueError", "(", "\"bad sequence number in %r\"", "%", "page", ")", "else", ":", "sequence", "+=", "1", "if", "page", ".", "continued", ":", "packets", "[", "-", "1", "]", ".", "append", "(", "page", ".", "packets", "[", "0", "]", ")", "else", ":", "packets", ".", "append", "(", "[", "page", ".", "packets", "[", "0", "]", "]", ")", "packets", ".", "extend", "(", "[", "p", "]", "for", "p", "in", "page", ".", "packets", "[", "1", ":", "]", ")", "return", "[", "b\"\"", ".", "join", "(", "p", ")", "for", "p", "in", "packets", "]" ]
Construct a list of packet data from a list of Ogg pages. If strict is true, the first page must start a new packet, and the last page must end the last packet.
[ "Construct", "a", "list", "of", "packet", "data", "from", "a", "list", "of", "Ogg", "pages", "." ]
python
train
33.823529
saltstack/salt
salt/cloud/clouds/gce.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/gce.py#L452-L466
def __get_host(node, vm_): ''' Return public IP, private IP, or hostname for the libcloud 'node' object ''' if __get_ssh_interface(vm_) == 'private_ips' or vm_['external_ip'] is None: ip_address = node.private_ips[0] log.info('Salt node data. Private_ip: %s', ip_address) else: ip_address = node.public_ips[0] log.info('Salt node data. Public_ip: %s', ip_address) if ip_address: return ip_address return node.name
[ "def", "__get_host", "(", "node", ",", "vm_", ")", ":", "if", "__get_ssh_interface", "(", "vm_", ")", "==", "'private_ips'", "or", "vm_", "[", "'external_ip'", "]", "is", "None", ":", "ip_address", "=", "node", ".", "private_ips", "[", "0", "]", "log", ".", "info", "(", "'Salt node data. Private_ip: %s'", ",", "ip_address", ")", "else", ":", "ip_address", "=", "node", ".", "public_ips", "[", "0", "]", "log", ".", "info", "(", "'Salt node data. Public_ip: %s'", ",", "ip_address", ")", "if", "ip_address", ":", "return", "ip_address", "return", "node", ".", "name" ]
Return public IP, private IP, or hostname for the libcloud 'node' object
[ "Return", "public", "IP", "private", "IP", "or", "hostname", "for", "the", "libcloud", "node", "object" ]
python
train
31.266667
recurly/recurly-client-python
recurly/__init__.py
https://github.com/recurly/recurly-client-python/blob/682217c4e85ec5c8d4e41519ee0620d2dc4d84d7/recurly/__init__.py#L346-L360
def update_billing_info(self, billing_info): """Change this account's billing information to the given `BillingInfo`.""" url = urljoin(self._url, '/billing_info') response = billing_info.http_request(url, 'PUT', billing_info, {'Content-Type': 'application/xml; charset=utf-8'}) if response.status == 200: pass elif response.status == 201: billing_info._url = response.getheader('Location') else: billing_info.raise_http_error(response) response_xml = response.read() logging.getLogger('recurly.http.response').debug(response_xml) billing_info.update_from_element(ElementTree.fromstring(response_xml))
[ "def", "update_billing_info", "(", "self", ",", "billing_info", ")", ":", "url", "=", "urljoin", "(", "self", ".", "_url", ",", "'/billing_info'", ")", "response", "=", "billing_info", ".", "http_request", "(", "url", ",", "'PUT'", ",", "billing_info", ",", "{", "'Content-Type'", ":", "'application/xml; charset=utf-8'", "}", ")", "if", "response", ".", "status", "==", "200", ":", "pass", "elif", "response", ".", "status", "==", "201", ":", "billing_info", ".", "_url", "=", "response", ".", "getheader", "(", "'Location'", ")", "else", ":", "billing_info", ".", "raise_http_error", "(", "response", ")", "response_xml", "=", "response", ".", "read", "(", ")", "logging", ".", "getLogger", "(", "'recurly.http.response'", ")", ".", "debug", "(", "response_xml", ")", "billing_info", ".", "update_from_element", "(", "ElementTree", ".", "fromstring", "(", "response_xml", ")", ")" ]
Change this account's billing information to the given `BillingInfo`.
[ "Change", "this", "account", "s", "billing", "information", "to", "the", "given", "BillingInfo", "." ]
python
train
47.133333
UCL-INGI/INGInious
inginious/frontend/accessible_time.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/accessible_time.py#L11-L25
def parse_date(date, default=None): """ Parse a valid date """ if date == "": if default is not None: return default else: raise Exception("Unknown format for " + date) for format_type in ["%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M", "%Y-%m-%d %H", "%Y-%m-%d", "%d/%m/%Y %H:%M:%S", "%d/%m/%Y %H:%M", "%d/%m/%Y %H", "%d/%m/%Y"]: try: return datetime.strptime(date, format_type) except ValueError: pass raise Exception("Unknown format for " + date)
[ "def", "parse_date", "(", "date", ",", "default", "=", "None", ")", ":", "if", "date", "==", "\"\"", ":", "if", "default", "is", "not", "None", ":", "return", "default", "else", ":", "raise", "Exception", "(", "\"Unknown format for \"", "+", "date", ")", "for", "format_type", "in", "[", "\"%Y-%m-%d %H:%M:%S\"", ",", "\"%Y-%m-%d %H:%M\"", ",", "\"%Y-%m-%d %H\"", ",", "\"%Y-%m-%d\"", ",", "\"%d/%m/%Y %H:%M:%S\"", ",", "\"%d/%m/%Y %H:%M\"", ",", "\"%d/%m/%Y %H\"", ",", "\"%d/%m/%Y\"", "]", ":", "try", ":", "return", "datetime", ".", "strptime", "(", "date", ",", "format_type", ")", "except", "ValueError", ":", "pass", "raise", "Exception", "(", "\"Unknown format for \"", "+", "date", ")" ]
Parse a valid date
[ "Parse", "a", "valid", "date" ]
python
train
36.466667
etcher-be/epab
epab/utils/_repo.py
https://github.com/etcher-be/epab/blob/024cde74d058281aa66e6e4b7b71dccbe803b1c1/epab/utils/_repo.py#L54-L71
def list_tags(self, pattern: str = None) -> typing.List[str]: """ Returns list of tags, optionally matching "pattern" :param pattern: optional pattern to filter results :type pattern: str :return: existing tags :rtype: list of str """ tags: typing.List[str] = [str(tag) for tag in self.repo.tags] if not pattern: LOGGER.debug('tags found in repo: %s', tags) return tags LOGGER.debug('filtering tags with pattern: %s', pattern) filtered_tags: typing.List[str] = [tag for tag in tags if pattern in tag] LOGGER.debug('filtered tags: %s', filtered_tags) return filtered_tags
[ "def", "list_tags", "(", "self", ",", "pattern", ":", "str", "=", "None", ")", "->", "typing", ".", "List", "[", "str", "]", ":", "tags", ":", "typing", ".", "List", "[", "str", "]", "=", "[", "str", "(", "tag", ")", "for", "tag", "in", "self", ".", "repo", ".", "tags", "]", "if", "not", "pattern", ":", "LOGGER", ".", "debug", "(", "'tags found in repo: %s'", ",", "tags", ")", "return", "tags", "LOGGER", ".", "debug", "(", "'filtering tags with pattern: %s'", ",", "pattern", ")", "filtered_tags", ":", "typing", ".", "List", "[", "str", "]", "=", "[", "tag", "for", "tag", "in", "tags", "if", "pattern", "in", "tag", "]", "LOGGER", ".", "debug", "(", "'filtered tags: %s'", ",", "filtered_tags", ")", "return", "filtered_tags" ]
Returns list of tags, optionally matching "pattern" :param pattern: optional pattern to filter results :type pattern: str :return: existing tags :rtype: list of str
[ "Returns", "list", "of", "tags", "optionally", "matching", "pattern" ]
python
train
37.944444
jhuapl-boss/intern
intern/service/boss/project.py
https://github.com/jhuapl-boss/intern/blob/d8fc6df011d8f212c87e6a1fd4cc21cfb5d103ed/intern/service/boss/project.py#L376-L386
def delete_user(self, user): """Delete the given user. Args: user (string): User name. Raises: requests.HTTPError on failure. """ self.service.delete_user( user, self.url_prefix, self.auth, self.session, self.session_send_opts)
[ "def", "delete_user", "(", "self", ",", "user", ")", ":", "self", ".", "service", ".", "delete_user", "(", "user", ",", "self", ".", "url_prefix", ",", "self", ".", "auth", ",", "self", ".", "session", ",", "self", ".", "session_send_opts", ")" ]
Delete the given user. Args: user (string): User name. Raises: requests.HTTPError on failure.
[ "Delete", "the", "given", "user", "." ]
python
train
26.818182
pysathq/pysat
pysat/card.py
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/pysat/card.py#L220-L282
def atleast(cls, lits, bound=1, top_id=None, encoding=EncType.seqcounter): """ This method can be used for creating a CNF encoding of an AtLeastK constraint, i.e. of :math:`\sum_{i=1}^{n}{x_i}\geq k`. The method takes 1 mandatory argument ``lits`` and 3 default arguments can be specified: ``bound``, ``top_id``, and ``encoding``. :param lits: a list of literals in the sum. :param bound: the value of bound :math:`k`. :param top_id: top variable identifier used so far. :param encoding: identifier of the encoding to use. :type lits: iterable(int) :type bound: int :type top_id: integer or None :type encoding: integer Parameter ``top_id`` serves to increase integer identifiers of auxiliary variables introduced during the encoding process. This is helpful when augmenting an existing CNF formula with the new cardinality encoding to make sure there is no collision between identifiers of the variables. If specified the identifiers of the first auxiliary variable will be ``top_id+1``. The default value of ``encoding`` is :attr:`Enctype.seqcounter`. The method *translates* the AtLeast constraint into an AtMost constraint by *negating* the literals of ``lits``, creating a new bound :math:`n-k` and invoking :meth:`CardEnc.atmost` with the modified list of literals and the new bound. :raises CardEnc.NoSuchEncodingError: if encoding does not exist. :rtype: a :class:`.CNFPlus` object where the new \ clauses (or the new native atmost constraint) are stored. """ if encoding < 0 or encoding > 9: raise(NoSuchEncodingError(encoding)) if not top_id: top_id = max(map(lambda x: abs(x), lits)) # we are going to return this formula ret = CNFPlus() # Minicard's native representation is handled separately if encoding == 9: ret.atmosts, ret.nv = [([-l for l in lits], len(lits) - bound)], top_id return ret # saving default SIGINT handler def_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_DFL) res = pycard.encode_atleast(lits, bound, top_id, encoding) # recovering default SIGINT handler def_sigint_handler = signal.signal(signal.SIGINT, def_sigint_handler) if res: ret.clauses, ret.nv = res return ret
[ "def", "atleast", "(", "cls", ",", "lits", ",", "bound", "=", "1", ",", "top_id", "=", "None", ",", "encoding", "=", "EncType", ".", "seqcounter", ")", ":", "if", "encoding", "<", "0", "or", "encoding", ">", "9", ":", "raise", "(", "NoSuchEncodingError", "(", "encoding", ")", ")", "if", "not", "top_id", ":", "top_id", "=", "max", "(", "map", "(", "lambda", "x", ":", "abs", "(", "x", ")", ",", "lits", ")", ")", "# we are going to return this formula", "ret", "=", "CNFPlus", "(", ")", "# Minicard's native representation is handled separately", "if", "encoding", "==", "9", ":", "ret", ".", "atmosts", ",", "ret", ".", "nv", "=", "[", "(", "[", "-", "l", "for", "l", "in", "lits", "]", ",", "len", "(", "lits", ")", "-", "bound", ")", "]", ",", "top_id", "return", "ret", "# saving default SIGINT handler", "def_sigint_handler", "=", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "signal", ".", "SIG_DFL", ")", "res", "=", "pycard", ".", "encode_atleast", "(", "lits", ",", "bound", ",", "top_id", ",", "encoding", ")", "# recovering default SIGINT handler", "def_sigint_handler", "=", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "def_sigint_handler", ")", "if", "res", ":", "ret", ".", "clauses", ",", "ret", ".", "nv", "=", "res", "return", "ret" ]
This method can be used for creating a CNF encoding of an AtLeastK constraint, i.e. of :math:`\sum_{i=1}^{n}{x_i}\geq k`. The method takes 1 mandatory argument ``lits`` and 3 default arguments can be specified: ``bound``, ``top_id``, and ``encoding``. :param lits: a list of literals in the sum. :param bound: the value of bound :math:`k`. :param top_id: top variable identifier used so far. :param encoding: identifier of the encoding to use. :type lits: iterable(int) :type bound: int :type top_id: integer or None :type encoding: integer Parameter ``top_id`` serves to increase integer identifiers of auxiliary variables introduced during the encoding process. This is helpful when augmenting an existing CNF formula with the new cardinality encoding to make sure there is no collision between identifiers of the variables. If specified the identifiers of the first auxiliary variable will be ``top_id+1``. The default value of ``encoding`` is :attr:`Enctype.seqcounter`. The method *translates* the AtLeast constraint into an AtMost constraint by *negating* the literals of ``lits``, creating a new bound :math:`n-k` and invoking :meth:`CardEnc.atmost` with the modified list of literals and the new bound. :raises CardEnc.NoSuchEncodingError: if encoding does not exist. :rtype: a :class:`.CNFPlus` object where the new \ clauses (or the new native atmost constraint) are stored.
[ "This", "method", "can", "be", "used", "for", "creating", "a", "CNF", "encoding", "of", "an", "AtLeastK", "constraint", "i", ".", "e", ".", "of", ":", "math", ":", "\\", "sum_", "{", "i", "=", "1", "}", "^", "{", "n", "}", "{", "x_i", "}", "\\", "geq", "k", ".", "The", "method", "takes", "1", "mandatory", "argument", "lits", "and", "3", "default", "arguments", "can", "be", "specified", ":", "bound", "top_id", "and", "encoding", "." ]
python
train
40.634921
rwl/pylon
pylon/io/psat.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/io/psat.py#L281-L316
def _get_supply_array_construct(self): """ Returns a construct for an array of power supply data. """ bus_no = integer.setResultsName("bus_no") s_rating = real.setResultsName("s_rating") # MVA p_direction = real.setResultsName("p_direction") # CPF p_bid_max = real.setResultsName("p_bid_max") # p.u. p_bid_min = real.setResultsName("p_bid_min") # p.u. p_bid_actual = real.setResultsName("p_bid_actual") # p.u. p_fixed = real.setResultsName("p_fixed") # $/hr p_proportional = real.setResultsName("p_proportional") # $/MWh p_quadratic = real.setResultsName("p_quadratic") # $/MW^2h q_fixed = real.setResultsName("q_fixed") # $/hr q_proportional = real.setResultsName("q_proportional") # $/MVArh q_quadratic = real.setResultsName("q_quadratic") # $/MVAr^2h commitment = boolean.setResultsName("commitment") cost_tie_break = real.setResultsName("cost_tie_break") # $/MWh lp_factor = real.setResultsName("lp_factor")# Loss participation factor q_max = real.setResultsName("q_max") # p.u. q_min = real.setResultsName("q_min") # p.u. cost_cong_up = real.setResultsName("cost_cong_up") # $/h cost_cong_down = real.setResultsName("cost_cong_down") # $/h status = Optional(boolean).setResultsName("status") supply_data = bus_no + s_rating + p_direction + p_bid_max + \ p_bid_min + p_bid_actual + p_fixed + p_proportional + \ p_quadratic + q_fixed + q_proportional + q_quadratic + \ commitment + cost_tie_break + lp_factor + q_max + q_min + \ cost_cong_up + cost_cong_down + status + scolon supply_data.setParseAction(self.push_supply) supply_array = Literal("Supply.con") + "=" + "[" + "..." + \ ZeroOrMore(supply_data + Optional("]" + scolon)) return supply_array
[ "def", "_get_supply_array_construct", "(", "self", ")", ":", "bus_no", "=", "integer", ".", "setResultsName", "(", "\"bus_no\"", ")", "s_rating", "=", "real", ".", "setResultsName", "(", "\"s_rating\"", ")", "# MVA", "p_direction", "=", "real", ".", "setResultsName", "(", "\"p_direction\"", ")", "# CPF", "p_bid_max", "=", "real", ".", "setResultsName", "(", "\"p_bid_max\"", ")", "# p.u.", "p_bid_min", "=", "real", ".", "setResultsName", "(", "\"p_bid_min\"", ")", "# p.u.", "p_bid_actual", "=", "real", ".", "setResultsName", "(", "\"p_bid_actual\"", ")", "# p.u.", "p_fixed", "=", "real", ".", "setResultsName", "(", "\"p_fixed\"", ")", "# $/hr", "p_proportional", "=", "real", ".", "setResultsName", "(", "\"p_proportional\"", ")", "# $/MWh", "p_quadratic", "=", "real", ".", "setResultsName", "(", "\"p_quadratic\"", ")", "# $/MW^2h", "q_fixed", "=", "real", ".", "setResultsName", "(", "\"q_fixed\"", ")", "# $/hr", "q_proportional", "=", "real", ".", "setResultsName", "(", "\"q_proportional\"", ")", "# $/MVArh", "q_quadratic", "=", "real", ".", "setResultsName", "(", "\"q_quadratic\"", ")", "# $/MVAr^2h", "commitment", "=", "boolean", ".", "setResultsName", "(", "\"commitment\"", ")", "cost_tie_break", "=", "real", ".", "setResultsName", "(", "\"cost_tie_break\"", ")", "# $/MWh", "lp_factor", "=", "real", ".", "setResultsName", "(", "\"lp_factor\"", ")", "# Loss participation factor", "q_max", "=", "real", ".", "setResultsName", "(", "\"q_max\"", ")", "# p.u.", "q_min", "=", "real", ".", "setResultsName", "(", "\"q_min\"", ")", "# p.u.", "cost_cong_up", "=", "real", ".", "setResultsName", "(", "\"cost_cong_up\"", ")", "# $/h", "cost_cong_down", "=", "real", ".", "setResultsName", "(", "\"cost_cong_down\"", ")", "# $/h", "status", "=", "Optional", "(", "boolean", ")", ".", "setResultsName", "(", "\"status\"", ")", "supply_data", "=", "bus_no", "+", "s_rating", "+", "p_direction", "+", "p_bid_max", "+", "p_bid_min", "+", "p_bid_actual", "+", "p_fixed", "+", "p_proportional", "+", "p_quadratic", "+", "q_fixed", "+", "q_proportional", "+", "q_quadratic", "+", "commitment", "+", "cost_tie_break", "+", "lp_factor", "+", "q_max", "+", "q_min", "+", "cost_cong_up", "+", "cost_cong_down", "+", "status", "+", "scolon", "supply_data", ".", "setParseAction", "(", "self", ".", "push_supply", ")", "supply_array", "=", "Literal", "(", "\"Supply.con\"", ")", "+", "\"=\"", "+", "\"[\"", "+", "\"...\"", "+", "ZeroOrMore", "(", "supply_data", "+", "Optional", "(", "\"]\"", "+", "scolon", ")", ")", "return", "supply_array" ]
Returns a construct for an array of power supply data.
[ "Returns", "a", "construct", "for", "an", "array", "of", "power", "supply", "data", "." ]
python
train
52.527778
hyperledger/indy-plenum
plenum/server/node.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/node.py#L2464-L2528
def processRequest(self, request: Request, frm: str): """ Handle a REQUEST from the client. If the request has already been executed, the node re-sends the reply to the client. Otherwise, the node acknowledges the client request, adds it to its list of client requests, and sends a PROPAGATE to the remaining nodes. :param request: the REQUEST from the client :param frm: the name of the client that sent this REQUEST """ logger.debug("{} received client request: {} from {}". format(self.name, request, frm)) self.nodeRequestSpikeMonitorData['accum'] += 1 # TODO: What if client sends requests with same request id quickly so # before reply for one is generated, the other comes. In that # case we need to keep track of what requests ids node has seen # in-memory and once request with a particular request id is processed, # it should be removed from that in-memory DS. # If request is already processed(there is a reply for the # request in # the node's transaction store then return the reply from the # transaction store) # TODO: What if the reply was a REQNACK? Its not gonna be found in the # replies. txn_type = request.operation[TXN_TYPE] if self.is_action(txn_type): self.process_action(request, frm) elif txn_type == GET_TXN: self.handle_get_txn_req(request, frm) self.total_read_request_number += 1 elif self.is_query(txn_type): self.process_query(request, frm) self.total_read_request_number += 1 elif self.can_write_txn(txn_type): reply = self.getReplyFromLedgerForRequest(request) if reply: logger.debug("{} returning reply from already processed " "REQUEST: {}".format(self, request)) self.transmitToClient(reply, frm) return # If the node is not already processing the request if not self.isProcessingReq(request.key): self.startedProcessingReq(request.key, frm) # forced request should be processed before consensus self.handle_request_if_forced(request) # If not already got the propagate request(PROPAGATE) for the # corresponding client request(REQUEST) self.recordAndPropagate(request, frm) self.send_ack_to_client((request.identifier, request.reqId), frm) else: raise InvalidClientRequest( request.identifier, request.reqId, 'Pool is in readonly mode, try again in 60 seconds')
[ "def", "processRequest", "(", "self", ",", "request", ":", "Request", ",", "frm", ":", "str", ")", ":", "logger", ".", "debug", "(", "\"{} received client request: {} from {}\"", ".", "format", "(", "self", ".", "name", ",", "request", ",", "frm", ")", ")", "self", ".", "nodeRequestSpikeMonitorData", "[", "'accum'", "]", "+=", "1", "# TODO: What if client sends requests with same request id quickly so", "# before reply for one is generated, the other comes. In that", "# case we need to keep track of what requests ids node has seen", "# in-memory and once request with a particular request id is processed,", "# it should be removed from that in-memory DS.", "# If request is already processed(there is a reply for the", "# request in", "# the node's transaction store then return the reply from the", "# transaction store)", "# TODO: What if the reply was a REQNACK? Its not gonna be found in the", "# replies.", "txn_type", "=", "request", ".", "operation", "[", "TXN_TYPE", "]", "if", "self", ".", "is_action", "(", "txn_type", ")", ":", "self", ".", "process_action", "(", "request", ",", "frm", ")", "elif", "txn_type", "==", "GET_TXN", ":", "self", ".", "handle_get_txn_req", "(", "request", ",", "frm", ")", "self", ".", "total_read_request_number", "+=", "1", "elif", "self", ".", "is_query", "(", "txn_type", ")", ":", "self", ".", "process_query", "(", "request", ",", "frm", ")", "self", ".", "total_read_request_number", "+=", "1", "elif", "self", ".", "can_write_txn", "(", "txn_type", ")", ":", "reply", "=", "self", ".", "getReplyFromLedgerForRequest", "(", "request", ")", "if", "reply", ":", "logger", ".", "debug", "(", "\"{} returning reply from already processed \"", "\"REQUEST: {}\"", ".", "format", "(", "self", ",", "request", ")", ")", "self", ".", "transmitToClient", "(", "reply", ",", "frm", ")", "return", "# If the node is not already processing the request", "if", "not", "self", ".", "isProcessingReq", "(", "request", ".", "key", ")", ":", "self", ".", "startedProcessingReq", "(", "request", ".", "key", ",", "frm", ")", "# forced request should be processed before consensus", "self", ".", "handle_request_if_forced", "(", "request", ")", "# If not already got the propagate request(PROPAGATE) for the", "# corresponding client request(REQUEST)", "self", ".", "recordAndPropagate", "(", "request", ",", "frm", ")", "self", ".", "send_ack_to_client", "(", "(", "request", ".", "identifier", ",", "request", ".", "reqId", ")", ",", "frm", ")", "else", ":", "raise", "InvalidClientRequest", "(", "request", ".", "identifier", ",", "request", ".", "reqId", ",", "'Pool is in readonly mode, try again in 60 seconds'", ")" ]
Handle a REQUEST from the client. If the request has already been executed, the node re-sends the reply to the client. Otherwise, the node acknowledges the client request, adds it to its list of client requests, and sends a PROPAGATE to the remaining nodes. :param request: the REQUEST from the client :param frm: the name of the client that sent this REQUEST
[ "Handle", "a", "REQUEST", "from", "the", "client", ".", "If", "the", "request", "has", "already", "been", "executed", "the", "node", "re", "-", "sends", "the", "reply", "to", "the", "client", ".", "Otherwise", "the", "node", "acknowledges", "the", "client", "request", "adds", "it", "to", "its", "list", "of", "client", "requests", "and", "sends", "a", "PROPAGATE", "to", "the", "remaining", "nodes", "." ]
python
train
42.046154
datascopeanalytics/scrubadub
scrubadub/scrubbers.py
https://github.com/datascopeanalytics/scrubadub/blob/914bda49a16130b44af43df6a2f84755477c407c/scrubadub/scrubbers.py#L44-L62
def clean(self, text, **kwargs): """This is the master method that cleans all of the filth out of the dirty dirty ``text``. All keyword arguments to this function are passed through to the ``Filth.replace_with`` method to fine-tune how the ``Filth`` is cleaned. """ if sys.version_info < (3, 0): # Only in Python 2. In 3 every string is a Python 2 unicode if not isinstance(text, unicode): raise exceptions.UnicodeRequired clean_chunks = [] filth = Filth() for next_filth in self.iter_filth(text): clean_chunks.append(text[filth.end:next_filth.beg]) clean_chunks.append(next_filth.replace_with(**kwargs)) filth = next_filth clean_chunks.append(text[filth.end:]) return u''.join(clean_chunks)
[ "def", "clean", "(", "self", ",", "text", ",", "*", "*", "kwargs", ")", ":", "if", "sys", ".", "version_info", "<", "(", "3", ",", "0", ")", ":", "# Only in Python 2. In 3 every string is a Python 2 unicode", "if", "not", "isinstance", "(", "text", ",", "unicode", ")", ":", "raise", "exceptions", ".", "UnicodeRequired", "clean_chunks", "=", "[", "]", "filth", "=", "Filth", "(", ")", "for", "next_filth", "in", "self", ".", "iter_filth", "(", "text", ")", ":", "clean_chunks", ".", "append", "(", "text", "[", "filth", ".", "end", ":", "next_filth", ".", "beg", "]", ")", "clean_chunks", ".", "append", "(", "next_filth", ".", "replace_with", "(", "*", "*", "kwargs", ")", ")", "filth", "=", "next_filth", "clean_chunks", ".", "append", "(", "text", "[", "filth", ".", "end", ":", "]", ")", "return", "u''", ".", "join", "(", "clean_chunks", ")" ]
This is the master method that cleans all of the filth out of the dirty dirty ``text``. All keyword arguments to this function are passed through to the ``Filth.replace_with`` method to fine-tune how the ``Filth`` is cleaned.
[ "This", "is", "the", "master", "method", "that", "cleans", "all", "of", "the", "filth", "out", "of", "the", "dirty", "dirty", "text", ".", "All", "keyword", "arguments", "to", "this", "function", "are", "passed", "through", "to", "the", "Filth", ".", "replace_with", "method", "to", "fine", "-", "tune", "how", "the", "Filth", "is", "cleaned", "." ]
python
train
44.157895
saltstack/salt
salt/modules/status.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/status.py#L1254-L1431
def netdev(): ''' .. versionchanged:: 2016.3.2 Return the network device stats for this minion .. versionchanged:: 2016.11.4 Added support for AIX CLI Example: .. code-block:: bash salt '*' status.netdev ''' def linux_netdev(): ''' linux specific implementation of netdev ''' ret = {} try: with salt.utils.files.fopen('/proc/net/dev', 'r') as fp_: stats = salt.utils.stringutils.to_unicode(fp_.read()) except IOError: pass else: for line in stats.splitlines(): if not line: continue if line.find(':') < 0: continue comps = line.split() # Fix lines like eth0:9999..' comps[0] = line.split(':')[0].strip() # Support lines both like eth0:999 and eth0: 9999 comps.insert(1, line.split(':')[1].strip().split()[0]) ret[comps[0]] = {'iface': comps[0], 'rx_bytes': _number(comps[2]), 'rx_compressed': _number(comps[8]), 'rx_drop': _number(comps[5]), 'rx_errs': _number(comps[4]), 'rx_fifo': _number(comps[6]), 'rx_frame': _number(comps[7]), 'rx_multicast': _number(comps[9]), 'rx_packets': _number(comps[3]), 'tx_bytes': _number(comps[10]), 'tx_carrier': _number(comps[16]), 'tx_colls': _number(comps[15]), 'tx_compressed': _number(comps[17]), 'tx_drop': _number(comps[13]), 'tx_errs': _number(comps[12]), 'tx_fifo': _number(comps[14]), 'tx_packets': _number(comps[11])} return ret def freebsd_netdev(): ''' freebsd specific implementation of netdev ''' _dict_tree = lambda: collections.defaultdict(_dict_tree) ret = _dict_tree() netstat = __salt__['cmd.run']('netstat -i -n -4 -b -d').splitlines() netstat += __salt__['cmd.run']('netstat -i -n -6 -b -d').splitlines()[1:] header = netstat[0].split() for line in netstat[1:]: comps = line.split() for i in range(4, 13): # The columns we want ret[comps[0]][comps[2]][comps[3]][header[i]] = _number(comps[i]) return ret def sunos_netdev(): ''' sunos specific implementation of netdev ''' ret = {} ##NOTE: we cannot use hwaddr_interfaces here, so we grab both ip4 and ip6 for dev in __grains__['ip4_interfaces'].keys() + __grains__['ip6_interfaces']: # fetch device info netstat_ipv4 = __salt__['cmd.run']('netstat -i -I {dev} -n -f inet'.format(dev=dev)).splitlines() netstat_ipv6 = __salt__['cmd.run']('netstat -i -I {dev} -n -f inet6'.format(dev=dev)).splitlines() # prepare data netstat_ipv4[0] = netstat_ipv4[0].split() netstat_ipv4[1] = netstat_ipv4[1].split() netstat_ipv6[0] = netstat_ipv6[0].split() netstat_ipv6[1] = netstat_ipv6[1].split() # add data ret[dev] = {} for i in range(len(netstat_ipv4[0])-1): if netstat_ipv4[0][i] == 'Name': continue if netstat_ipv4[0][i] in ['Address', 'Net/Dest']: ret[dev]['IPv4 {field}'.format(field=netstat_ipv4[0][i])] = netstat_ipv4[1][i] else: ret[dev][netstat_ipv4[0][i]] = _number(netstat_ipv4[1][i]) for i in range(len(netstat_ipv6[0])-1): if netstat_ipv6[0][i] == 'Name': continue if netstat_ipv6[0][i] in ['Address', 'Net/Dest']: ret[dev]['IPv6 {field}'.format(field=netstat_ipv6[0][i])] = netstat_ipv6[1][i] else: ret[dev][netstat_ipv6[0][i]] = _number(netstat_ipv6[1][i]) return ret def aix_netdev(): ''' AIX specific implementation of netdev ''' ret = {} fields = [] procn = None for dev in __grains__['ip4_interfaces'].keys() + __grains__['ip6_interfaces'].keys(): # fetch device info #root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -i -n -I en0 -f inet6 #Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll #en0 1500 link#3 e2.eb.32.42.84.c 10029668 0 446490 0 0 #en0 1500 172.29.128 172.29.149.95 10029668 0 446490 0 0 #root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -i -n -I en0 -f inet6 #Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll #en0 1500 link#3 e2.eb.32.42.84.c 10029731 0 446499 0 0 netstat_ipv4 = __salt__['cmd.run']('netstat -i -n -I {dev} -f inet'.format(dev=dev)).splitlines() netstat_ipv6 = __salt__['cmd.run']('netstat -i -n -I {dev} -f inet6'.format(dev=dev)).splitlines() # add data ret[dev] = [] for line in netstat_ipv4: if line.startswith('Name'): fields = line.split() continue comps = line.split() if len(comps) < 3: raise CommandExecutionError('Insufficent data returned by command to process \'{0}\''.format(line)) if comps[2].startswith('link'): continue procn = len(ret[dev]) ret[dev].append({}) ret[dev][procn]['ipv4'] = {} for i in range(1, len(fields)): if len(comps) > i: ret[dev][procn]['ipv4'][fields[i]] = comps[i] for line in netstat_ipv6: if line.startswith('Name'): fields = line.split() continue comps = line.split() if len(comps) < 3: raise CommandExecutionError('Insufficent data returned by command to process \'{0}\''.format(line)) if comps[2].startswith('link'): continue procn = len(ret[dev]) ret[dev].append({}) ret[dev][procn]['ipv6'] = {} for i in range(1, len(fields)): if len(comps) > i: ret[dev][procn]['ipv6'][fields[i]] = comps[i] return ret # dict that returns a function that does the right thing per platform get_version = { 'Linux': linux_netdev, 'FreeBSD': freebsd_netdev, 'SunOS': sunos_netdev, 'AIX': aix_netdev, } errmsg = 'This method is unsupported on the current operating system!' return get_version.get(__grains__['kernel'], lambda: errmsg)()
[ "def", "netdev", "(", ")", ":", "def", "linux_netdev", "(", ")", ":", "'''\n linux specific implementation of netdev\n '''", "ret", "=", "{", "}", "try", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "'/proc/net/dev'", ",", "'r'", ")", "as", "fp_", ":", "stats", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "fp_", ".", "read", "(", ")", ")", "except", "IOError", ":", "pass", "else", ":", "for", "line", "in", "stats", ".", "splitlines", "(", ")", ":", "if", "not", "line", ":", "continue", "if", "line", ".", "find", "(", "':'", ")", "<", "0", ":", "continue", "comps", "=", "line", ".", "split", "(", ")", "# Fix lines like eth0:9999..'", "comps", "[", "0", "]", "=", "line", ".", "split", "(", "':'", ")", "[", "0", "]", ".", "strip", "(", ")", "# Support lines both like eth0:999 and eth0: 9999", "comps", ".", "insert", "(", "1", ",", "line", ".", "split", "(", "':'", ")", "[", "1", "]", ".", "strip", "(", ")", ".", "split", "(", ")", "[", "0", "]", ")", "ret", "[", "comps", "[", "0", "]", "]", "=", "{", "'iface'", ":", "comps", "[", "0", "]", ",", "'rx_bytes'", ":", "_number", "(", "comps", "[", "2", "]", ")", ",", "'rx_compressed'", ":", "_number", "(", "comps", "[", "8", "]", ")", ",", "'rx_drop'", ":", "_number", "(", "comps", "[", "5", "]", ")", ",", "'rx_errs'", ":", "_number", "(", "comps", "[", "4", "]", ")", ",", "'rx_fifo'", ":", "_number", "(", "comps", "[", "6", "]", ")", ",", "'rx_frame'", ":", "_number", "(", "comps", "[", "7", "]", ")", ",", "'rx_multicast'", ":", "_number", "(", "comps", "[", "9", "]", ")", ",", "'rx_packets'", ":", "_number", "(", "comps", "[", "3", "]", ")", ",", "'tx_bytes'", ":", "_number", "(", "comps", "[", "10", "]", ")", ",", "'tx_carrier'", ":", "_number", "(", "comps", "[", "16", "]", ")", ",", "'tx_colls'", ":", "_number", "(", "comps", "[", "15", "]", ")", ",", "'tx_compressed'", ":", "_number", "(", "comps", "[", "17", "]", ")", ",", "'tx_drop'", ":", "_number", "(", "comps", "[", "13", "]", ")", ",", "'tx_errs'", ":", "_number", "(", "comps", "[", "12", "]", ")", ",", "'tx_fifo'", ":", "_number", "(", "comps", "[", "14", "]", ")", ",", "'tx_packets'", ":", "_number", "(", "comps", "[", "11", "]", ")", "}", "return", "ret", "def", "freebsd_netdev", "(", ")", ":", "'''\n freebsd specific implementation of netdev\n '''", "_dict_tree", "=", "lambda", ":", "collections", ".", "defaultdict", "(", "_dict_tree", ")", "ret", "=", "_dict_tree", "(", ")", "netstat", "=", "__salt__", "[", "'cmd.run'", "]", "(", "'netstat -i -n -4 -b -d'", ")", ".", "splitlines", "(", ")", "netstat", "+=", "__salt__", "[", "'cmd.run'", "]", "(", "'netstat -i -n -6 -b -d'", ")", ".", "splitlines", "(", ")", "[", "1", ":", "]", "header", "=", "netstat", "[", "0", "]", ".", "split", "(", ")", "for", "line", "in", "netstat", "[", "1", ":", "]", ":", "comps", "=", "line", ".", "split", "(", ")", "for", "i", "in", "range", "(", "4", ",", "13", ")", ":", "# The columns we want", "ret", "[", "comps", "[", "0", "]", "]", "[", "comps", "[", "2", "]", "]", "[", "comps", "[", "3", "]", "]", "[", "header", "[", "i", "]", "]", "=", "_number", "(", "comps", "[", "i", "]", ")", "return", "ret", "def", "sunos_netdev", "(", ")", ":", "'''\n sunos specific implementation of netdev\n '''", "ret", "=", "{", "}", "##NOTE: we cannot use hwaddr_interfaces here, so we grab both ip4 and ip6", "for", "dev", "in", "__grains__", "[", "'ip4_interfaces'", "]", ".", "keys", "(", ")", "+", "__grains__", "[", "'ip6_interfaces'", "]", ":", "# fetch device info", "netstat_ipv4", "=", "__salt__", "[", "'cmd.run'", "]", "(", "'netstat -i -I {dev} -n -f inet'", ".", "format", "(", "dev", "=", "dev", ")", ")", ".", "splitlines", "(", ")", "netstat_ipv6", "=", "__salt__", "[", "'cmd.run'", "]", "(", "'netstat -i -I {dev} -n -f inet6'", ".", "format", "(", "dev", "=", "dev", ")", ")", ".", "splitlines", "(", ")", "# prepare data", "netstat_ipv4", "[", "0", "]", "=", "netstat_ipv4", "[", "0", "]", ".", "split", "(", ")", "netstat_ipv4", "[", "1", "]", "=", "netstat_ipv4", "[", "1", "]", ".", "split", "(", ")", "netstat_ipv6", "[", "0", "]", "=", "netstat_ipv6", "[", "0", "]", ".", "split", "(", ")", "netstat_ipv6", "[", "1", "]", "=", "netstat_ipv6", "[", "1", "]", ".", "split", "(", ")", "# add data", "ret", "[", "dev", "]", "=", "{", "}", "for", "i", "in", "range", "(", "len", "(", "netstat_ipv4", "[", "0", "]", ")", "-", "1", ")", ":", "if", "netstat_ipv4", "[", "0", "]", "[", "i", "]", "==", "'Name'", ":", "continue", "if", "netstat_ipv4", "[", "0", "]", "[", "i", "]", "in", "[", "'Address'", ",", "'Net/Dest'", "]", ":", "ret", "[", "dev", "]", "[", "'IPv4 {field}'", ".", "format", "(", "field", "=", "netstat_ipv4", "[", "0", "]", "[", "i", "]", ")", "]", "=", "netstat_ipv4", "[", "1", "]", "[", "i", "]", "else", ":", "ret", "[", "dev", "]", "[", "netstat_ipv4", "[", "0", "]", "[", "i", "]", "]", "=", "_number", "(", "netstat_ipv4", "[", "1", "]", "[", "i", "]", ")", "for", "i", "in", "range", "(", "len", "(", "netstat_ipv6", "[", "0", "]", ")", "-", "1", ")", ":", "if", "netstat_ipv6", "[", "0", "]", "[", "i", "]", "==", "'Name'", ":", "continue", "if", "netstat_ipv6", "[", "0", "]", "[", "i", "]", "in", "[", "'Address'", ",", "'Net/Dest'", "]", ":", "ret", "[", "dev", "]", "[", "'IPv6 {field}'", ".", "format", "(", "field", "=", "netstat_ipv6", "[", "0", "]", "[", "i", "]", ")", "]", "=", "netstat_ipv6", "[", "1", "]", "[", "i", "]", "else", ":", "ret", "[", "dev", "]", "[", "netstat_ipv6", "[", "0", "]", "[", "i", "]", "]", "=", "_number", "(", "netstat_ipv6", "[", "1", "]", "[", "i", "]", ")", "return", "ret", "def", "aix_netdev", "(", ")", ":", "'''\n AIX specific implementation of netdev\n '''", "ret", "=", "{", "}", "fields", "=", "[", "]", "procn", "=", "None", "for", "dev", "in", "__grains__", "[", "'ip4_interfaces'", "]", ".", "keys", "(", ")", "+", "__grains__", "[", "'ip6_interfaces'", "]", ".", "keys", "(", ")", ":", "# fetch device info", "#root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -i -n -I en0 -f inet6", "#Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll", "#en0 1500 link#3 e2.eb.32.42.84.c 10029668 0 446490 0 0", "#en0 1500 172.29.128 172.29.149.95 10029668 0 446490 0 0", "#root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -i -n -I en0 -f inet6", "#Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll", "#en0 1500 link#3 e2.eb.32.42.84.c 10029731 0 446499 0 0", "netstat_ipv4", "=", "__salt__", "[", "'cmd.run'", "]", "(", "'netstat -i -n -I {dev} -f inet'", ".", "format", "(", "dev", "=", "dev", ")", ")", ".", "splitlines", "(", ")", "netstat_ipv6", "=", "__salt__", "[", "'cmd.run'", "]", "(", "'netstat -i -n -I {dev} -f inet6'", ".", "format", "(", "dev", "=", "dev", ")", ")", ".", "splitlines", "(", ")", "# add data", "ret", "[", "dev", "]", "=", "[", "]", "for", "line", "in", "netstat_ipv4", ":", "if", "line", ".", "startswith", "(", "'Name'", ")", ":", "fields", "=", "line", ".", "split", "(", ")", "continue", "comps", "=", "line", ".", "split", "(", ")", "if", "len", "(", "comps", ")", "<", "3", ":", "raise", "CommandExecutionError", "(", "'Insufficent data returned by command to process \\'{0}\\''", ".", "format", "(", "line", ")", ")", "if", "comps", "[", "2", "]", ".", "startswith", "(", "'link'", ")", ":", "continue", "procn", "=", "len", "(", "ret", "[", "dev", "]", ")", "ret", "[", "dev", "]", ".", "append", "(", "{", "}", ")", "ret", "[", "dev", "]", "[", "procn", "]", "[", "'ipv4'", "]", "=", "{", "}", "for", "i", "in", "range", "(", "1", ",", "len", "(", "fields", ")", ")", ":", "if", "len", "(", "comps", ")", ">", "i", ":", "ret", "[", "dev", "]", "[", "procn", "]", "[", "'ipv4'", "]", "[", "fields", "[", "i", "]", "]", "=", "comps", "[", "i", "]", "for", "line", "in", "netstat_ipv6", ":", "if", "line", ".", "startswith", "(", "'Name'", ")", ":", "fields", "=", "line", ".", "split", "(", ")", "continue", "comps", "=", "line", ".", "split", "(", ")", "if", "len", "(", "comps", ")", "<", "3", ":", "raise", "CommandExecutionError", "(", "'Insufficent data returned by command to process \\'{0}\\''", ".", "format", "(", "line", ")", ")", "if", "comps", "[", "2", "]", ".", "startswith", "(", "'link'", ")", ":", "continue", "procn", "=", "len", "(", "ret", "[", "dev", "]", ")", "ret", "[", "dev", "]", ".", "append", "(", "{", "}", ")", "ret", "[", "dev", "]", "[", "procn", "]", "[", "'ipv6'", "]", "=", "{", "}", "for", "i", "in", "range", "(", "1", ",", "len", "(", "fields", ")", ")", ":", "if", "len", "(", "comps", ")", ">", "i", ":", "ret", "[", "dev", "]", "[", "procn", "]", "[", "'ipv6'", "]", "[", "fields", "[", "i", "]", "]", "=", "comps", "[", "i", "]", "return", "ret", "# dict that returns a function that does the right thing per platform", "get_version", "=", "{", "'Linux'", ":", "linux_netdev", ",", "'FreeBSD'", ":", "freebsd_netdev", ",", "'SunOS'", ":", "sunos_netdev", ",", "'AIX'", ":", "aix_netdev", ",", "}", "errmsg", "=", "'This method is unsupported on the current operating system!'", "return", "get_version", ".", "get", "(", "__grains__", "[", "'kernel'", "]", ",", "lambda", ":", "errmsg", ")", "(", ")" ]
.. versionchanged:: 2016.3.2 Return the network device stats for this minion .. versionchanged:: 2016.11.4 Added support for AIX CLI Example: .. code-block:: bash salt '*' status.netdev
[ "..", "versionchanged", "::", "2016", ".", "3", ".", "2", "Return", "the", "network", "device", "stats", "for", "this", "minion" ]
python
train
40.646067
django-leonardo/django-leonardo
leonardo/utils/settings.py
https://github.com/django-leonardo/django-leonardo/blob/4b933e1792221a13b4028753d5f1d3499b0816d4/leonardo/utils/settings.py#L144-L162
def get_conf_from_module(mod): """return configuration from module with defaults no worry about None type """ conf = ModuleConfig(CONF_SPEC) # get imported module mod = _get_correct_module(mod) conf.set_module(mod) # extarct from default object or from module if hasattr(mod, 'default'): default = mod.default conf = extract_conf_from(default, conf) else: conf = extract_conf_from(mod, conf) return conf
[ "def", "get_conf_from_module", "(", "mod", ")", ":", "conf", "=", "ModuleConfig", "(", "CONF_SPEC", ")", "# get imported module", "mod", "=", "_get_correct_module", "(", "mod", ")", "conf", ".", "set_module", "(", "mod", ")", "# extarct from default object or from module", "if", "hasattr", "(", "mod", ",", "'default'", ")", ":", "default", "=", "mod", ".", "default", "conf", "=", "extract_conf_from", "(", "default", ",", "conf", ")", "else", ":", "conf", "=", "extract_conf_from", "(", "mod", ",", "conf", ")", "return", "conf" ]
return configuration from module with defaults no worry about None type
[ "return", "configuration", "from", "module", "with", "defaults", "no", "worry", "about", "None", "type" ]
python
train
23.894737
JoeVirtual/KonFoo
konfoo/core.py
https://github.com/JoeVirtual/KonFoo/blob/0c62ef5c2bed4deaf908b34082e4de2544532fdc/konfoo/core.py#L1105-L1121
def read_from(self, provider, **options): """ All :class:`Pointer` fields in the `Sequence` read the necessary number of bytes from the data :class:`Provider` for their referenced :attr:`~Pointer.data` object. Null pointer are ignored. :param Provider provider: data :class:`Provider`. :keyword bool nested: if ``True`` all :class:`Pointer` fields in the :attr:`~Pointer.data` objects of all :class:`Pointer` fields in the `Sequence` reads their referenced :attr:`~Pointer.data` object as well (chained method call). Each :class:`Pointer` field stores the bytes for its referenced :attr:`~Pointer.data` object in its :attr:`~Pointer.bytestream`. """ for item in iter(self): # Container or Pointer if is_mixin(item): item.read_from(provider, **options)
[ "def", "read_from", "(", "self", ",", "provider", ",", "*", "*", "options", ")", ":", "for", "item", "in", "iter", "(", "self", ")", ":", "# Container or Pointer", "if", "is_mixin", "(", "item", ")", ":", "item", ".", "read_from", "(", "provider", ",", "*", "*", "options", ")" ]
All :class:`Pointer` fields in the `Sequence` read the necessary number of bytes from the data :class:`Provider` for their referenced :attr:`~Pointer.data` object. Null pointer are ignored. :param Provider provider: data :class:`Provider`. :keyword bool nested: if ``True`` all :class:`Pointer` fields in the :attr:`~Pointer.data` objects of all :class:`Pointer` fields in the `Sequence` reads their referenced :attr:`~Pointer.data` object as well (chained method call). Each :class:`Pointer` field stores the bytes for its referenced :attr:`~Pointer.data` object in its :attr:`~Pointer.bytestream`.
[ "All", ":", "class", ":", "Pointer", "fields", "in", "the", "Sequence", "read", "the", "necessary", "number", "of", "bytes", "from", "the", "data", ":", "class", ":", "Provider", "for", "their", "referenced", ":", "attr", ":", "~Pointer", ".", "data", "object", ".", "Null", "pointer", "are", "ignored", "." ]
python
train
52.470588
python-bugzilla/python-bugzilla
bugzilla/_cli.py
https://github.com/python-bugzilla/python-bugzilla/blob/7de8b225104f24a1eee3e837bf1e02d60aefe69f/bugzilla/_cli.py#L100-L111
def get_default_url(): """ Grab a default URL from bugzillarc [DEFAULT] url=X """ from bugzilla.base import _open_bugzillarc cfg = _open_bugzillarc() if cfg: cfgurl = cfg.defaults().get("url", None) if cfgurl is not None: log.debug("bugzillarc: found cli url=%s", cfgurl) return cfgurl return DEFAULT_BZ
[ "def", "get_default_url", "(", ")", ":", "from", "bugzilla", ".", "base", "import", "_open_bugzillarc", "cfg", "=", "_open_bugzillarc", "(", ")", "if", "cfg", ":", "cfgurl", "=", "cfg", ".", "defaults", "(", ")", ".", "get", "(", "\"url\"", ",", "None", ")", "if", "cfgurl", "is", "not", "None", ":", "log", ".", "debug", "(", "\"bugzillarc: found cli url=%s\"", ",", "cfgurl", ")", "return", "cfgurl", "return", "DEFAULT_BZ" ]
Grab a default URL from bugzillarc [DEFAULT] url=X
[ "Grab", "a", "default", "URL", "from", "bugzillarc", "[", "DEFAULT", "]", "url", "=", "X" ]
python
train
30
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAFetch/QATdx.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAFetch/QATdx.py#L1331-L1351
def QA_fetch_get_hkfund_list(ip=None, port=None): """[summary] Keyword Arguments: ip {[type]} -- [description] (default: {None}) port {[type]} -- [description] (default: {None}) # 港股 HKMARKET 27 5 香港指数 FH 31 2 香港主板 KH 48 2 香港创业板 KG 49 2 香港基金 KT 43 1 B股转H股 HB """ global extension_market_list extension_market_list = QA_fetch_get_extensionmarket_list( ) if extension_market_list is None else extension_market_list return extension_market_list.query('market==49')
[ "def", "QA_fetch_get_hkfund_list", "(", "ip", "=", "None", ",", "port", "=", "None", ")", ":", "global", "extension_market_list", "extension_market_list", "=", "QA_fetch_get_extensionmarket_list", "(", ")", "if", "extension_market_list", "is", "None", "else", "extension_market_list", "return", "extension_market_list", ".", "query", "(", "'market==49'", ")" ]
[summary] Keyword Arguments: ip {[type]} -- [description] (default: {None}) port {[type]} -- [description] (default: {None}) # 港股 HKMARKET 27 5 香港指数 FH 31 2 香港主板 KH 48 2 香港创业板 KG 49 2 香港基金 KT 43 1 B股转H股 HB
[ "[", "summary", "]" ]
python
train
30.333333
tk0miya/tk.phpautodoc
src/phply/phpparse.py
https://github.com/tk0miya/tk.phpautodoc/blob/cf789f64abaf76351485cee231a075227e665fb6/src/phply/phpparse.py#L678-L680
def p_expr_new(p): 'expr : NEW class_name_reference ctor_arguments' p[0] = ast.New(p[2], p[3], lineno=p.lineno(1))
[ "def", "p_expr_new", "(", "p", ")", ":", "p", "[", "0", "]", "=", "ast", ".", "New", "(", "p", "[", "2", "]", ",", "p", "[", "3", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")" ]
expr : NEW class_name_reference ctor_arguments
[ "expr", ":", "NEW", "class_name_reference", "ctor_arguments" ]
python
train
40
F483/btctxstore
btctxstore/api.py
https://github.com/F483/btctxstore/blob/5790ace3a3d4c9bcc759e7c931fc4a57d40b6c25/btctxstore/api.py#L259-L263
def get_data_blob(self, rawtx): """TODO add docstring""" tx = deserialize.tx(rawtx) data = control.get_data_blob(tx) return serialize.data(data)
[ "def", "get_data_blob", "(", "self", ",", "rawtx", ")", ":", "tx", "=", "deserialize", ".", "tx", "(", "rawtx", ")", "data", "=", "control", ".", "get_data_blob", "(", "tx", ")", "return", "serialize", ".", "data", "(", "data", ")" ]
TODO add docstring
[ "TODO", "add", "docstring" ]
python
train
34.4
esterhui/pypu
scripts/build_json_from_gps.py
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/scripts/build_json_from_gps.py#L169-L194
def lookupGeoInfo(positions): """Looks up lat/lon info with goole given a list of positions as parsed by parsePositionFile. Returns google results in form of dicionary """ list_data=[] oldlat=0 oldlon=0 d={} for pos in positions: # Only lookup point if it is above threshold diff_lat=abs(float(pos['lat'])-oldlat) diff_lon=abs(float(pos['lon'])-oldlon) if (diff_lat>POS_THRESHOLD_DEG) or\ (diff_lon>POS_THRESHOLD_DEG): d=lookup_by_latlon(pos['lat'],pos['lon']) oldlat=float(pos['lat']) oldlon=float(pos['lon']) else: logger.debug("Skipping %s/%s, close to prev"%(pos['lat'],pos['lon'])) # Use fresh lookup value or old value list_data.append(d) logger.info('looked up %d positions'%(len(list_data))) return list_data
[ "def", "lookupGeoInfo", "(", "positions", ")", ":", "list_data", "=", "[", "]", "oldlat", "=", "0", "oldlon", "=", "0", "d", "=", "{", "}", "for", "pos", "in", "positions", ":", "# Only lookup point if it is above threshold", "diff_lat", "=", "abs", "(", "float", "(", "pos", "[", "'lat'", "]", ")", "-", "oldlat", ")", "diff_lon", "=", "abs", "(", "float", "(", "pos", "[", "'lon'", "]", ")", "-", "oldlon", ")", "if", "(", "diff_lat", ">", "POS_THRESHOLD_DEG", ")", "or", "(", "diff_lon", ">", "POS_THRESHOLD_DEG", ")", ":", "d", "=", "lookup_by_latlon", "(", "pos", "[", "'lat'", "]", ",", "pos", "[", "'lon'", "]", ")", "oldlat", "=", "float", "(", "pos", "[", "'lat'", "]", ")", "oldlon", "=", "float", "(", "pos", "[", "'lon'", "]", ")", "else", ":", "logger", ".", "debug", "(", "\"Skipping %s/%s, close to prev\"", "%", "(", "pos", "[", "'lat'", "]", ",", "pos", "[", "'lon'", "]", ")", ")", "# Use fresh lookup value or old value", "list_data", ".", "append", "(", "d", ")", "logger", ".", "info", "(", "'looked up %d positions'", "%", "(", "len", "(", "list_data", ")", ")", ")", "return", "list_data" ]
Looks up lat/lon info with goole given a list of positions as parsed by parsePositionFile. Returns google results in form of dicionary
[ "Looks", "up", "lat", "/", "lon", "info", "with", "goole", "given", "a", "list", "of", "positions", "as", "parsed", "by", "parsePositionFile", ".", "Returns", "google", "results", "in", "form", "of", "dicionary" ]
python
train
32.769231
google/grr
grr/client/grr_response_client/client_actions/tempfiles.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/client_actions/tempfiles.py#L228-L264
def DeleteGRRTempFile(path): """Delete a GRR temp file. To limit possible damage the path must be absolute and either the file must be within any of the Client.tempdir_roots or the file name must begin with Client.tempfile_prefix. Args: path: path string to file to be deleted. Raises: OSError: Permission denied, or file not found. ErrorBadPath: Path must be absolute. ErrorNotTempFile: Filename must start with Client.tempfile_prefix. ErrorNotAFile: File to delete does not exist. """ precondition.AssertType(path, Text) if not os.path.isabs(path): raise ErrorBadPath("Path must be absolute") prefix = config.CONFIG["Client.tempfile_prefix"] directories = [ GetTempDirForRoot(root) for root in config.CONFIG["Client.tempdir_roots"] ] if not _CheckIfPathIsValidForDeletion( path, prefix=prefix, directories=directories): msg = ("Can't delete temp file %s. Filename must start with %s " "or lie within any of %s.") raise ErrorNotTempFile(msg % (path, prefix, ";".join(directories))) if os.path.exists(path): # Clear our file handle cache so the file can be deleted. files.FILE_HANDLE_CACHE.Flush() os.remove(path) else: raise ErrorNotAFile("%s does not exist." % path)
[ "def", "DeleteGRRTempFile", "(", "path", ")", ":", "precondition", ".", "AssertType", "(", "path", ",", "Text", ")", "if", "not", "os", ".", "path", ".", "isabs", "(", "path", ")", ":", "raise", "ErrorBadPath", "(", "\"Path must be absolute\"", ")", "prefix", "=", "config", ".", "CONFIG", "[", "\"Client.tempfile_prefix\"", "]", "directories", "=", "[", "GetTempDirForRoot", "(", "root", ")", "for", "root", "in", "config", ".", "CONFIG", "[", "\"Client.tempdir_roots\"", "]", "]", "if", "not", "_CheckIfPathIsValidForDeletion", "(", "path", ",", "prefix", "=", "prefix", ",", "directories", "=", "directories", ")", ":", "msg", "=", "(", "\"Can't delete temp file %s. Filename must start with %s \"", "\"or lie within any of %s.\"", ")", "raise", "ErrorNotTempFile", "(", "msg", "%", "(", "path", ",", "prefix", ",", "\";\"", ".", "join", "(", "directories", ")", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "# Clear our file handle cache so the file can be deleted.", "files", ".", "FILE_HANDLE_CACHE", ".", "Flush", "(", ")", "os", ".", "remove", "(", "path", ")", "else", ":", "raise", "ErrorNotAFile", "(", "\"%s does not exist.\"", "%", "path", ")" ]
Delete a GRR temp file. To limit possible damage the path must be absolute and either the file must be within any of the Client.tempdir_roots or the file name must begin with Client.tempfile_prefix. Args: path: path string to file to be deleted. Raises: OSError: Permission denied, or file not found. ErrorBadPath: Path must be absolute. ErrorNotTempFile: Filename must start with Client.tempfile_prefix. ErrorNotAFile: File to delete does not exist.
[ "Delete", "a", "GRR", "temp", "file", "." ]
python
train
33.486486
nsqio/pynsq
nsq/reader.py
https://github.com/nsqio/pynsq/blob/48bf62d65ea63cddaa401efb23187b95511dbc84/nsq/reader.py#L446-L484
def connect_to_nsqd(self, host, port): """ Adds a connection to ``nsqd`` at the specified address. :param host: the address to connect to :param port: the port to connect to """ assert isinstance(host, string_types) assert isinstance(port, int) conn = AsyncConn(host, port, **self.conn_kwargs) conn.on('identify', self._on_connection_identify) conn.on('identify_response', self._on_connection_identify_response) conn.on('auth', self._on_connection_auth) conn.on('auth_response', self._on_connection_auth_response) conn.on('error', self._on_connection_error) conn.on('close', self._on_connection_close) conn.on('ready', self._on_connection_ready) conn.on('message', self._on_message) conn.on('heartbeat', self._on_heartbeat) conn.on('backoff', functools.partial(self._on_backoff_resume, success=False)) conn.on('resume', functools.partial(self._on_backoff_resume, success=True)) conn.on('continue', functools.partial(self._on_backoff_resume, success=None)) if conn.id in self.conns: return # only attempt to re-connect once every 10s per destination # this throttles reconnects to failed endpoints now = time.time() last_connect_attempt = self.connection_attempts.get(conn.id) if last_connect_attempt and last_connect_attempt > now - 10: return self.connection_attempts[conn.id] = now logger.info('[%s:%s] connecting to nsqd', conn.id, self.name) conn.connect() return conn
[ "def", "connect_to_nsqd", "(", "self", ",", "host", ",", "port", ")", ":", "assert", "isinstance", "(", "host", ",", "string_types", ")", "assert", "isinstance", "(", "port", ",", "int", ")", "conn", "=", "AsyncConn", "(", "host", ",", "port", ",", "*", "*", "self", ".", "conn_kwargs", ")", "conn", ".", "on", "(", "'identify'", ",", "self", ".", "_on_connection_identify", ")", "conn", ".", "on", "(", "'identify_response'", ",", "self", ".", "_on_connection_identify_response", ")", "conn", ".", "on", "(", "'auth'", ",", "self", ".", "_on_connection_auth", ")", "conn", ".", "on", "(", "'auth_response'", ",", "self", ".", "_on_connection_auth_response", ")", "conn", ".", "on", "(", "'error'", ",", "self", ".", "_on_connection_error", ")", "conn", ".", "on", "(", "'close'", ",", "self", ".", "_on_connection_close", ")", "conn", ".", "on", "(", "'ready'", ",", "self", ".", "_on_connection_ready", ")", "conn", ".", "on", "(", "'message'", ",", "self", ".", "_on_message", ")", "conn", ".", "on", "(", "'heartbeat'", ",", "self", ".", "_on_heartbeat", ")", "conn", ".", "on", "(", "'backoff'", ",", "functools", ".", "partial", "(", "self", ".", "_on_backoff_resume", ",", "success", "=", "False", ")", ")", "conn", ".", "on", "(", "'resume'", ",", "functools", ".", "partial", "(", "self", ".", "_on_backoff_resume", ",", "success", "=", "True", ")", ")", "conn", ".", "on", "(", "'continue'", ",", "functools", ".", "partial", "(", "self", ".", "_on_backoff_resume", ",", "success", "=", "None", ")", ")", "if", "conn", ".", "id", "in", "self", ".", "conns", ":", "return", "# only attempt to re-connect once every 10s per destination", "# this throttles reconnects to failed endpoints", "now", "=", "time", ".", "time", "(", ")", "last_connect_attempt", "=", "self", ".", "connection_attempts", ".", "get", "(", "conn", ".", "id", ")", "if", "last_connect_attempt", "and", "last_connect_attempt", ">", "now", "-", "10", ":", "return", "self", ".", "connection_attempts", "[", "conn", ".", "id", "]", "=", "now", "logger", ".", "info", "(", "'[%s:%s] connecting to nsqd'", ",", "conn", ".", "id", ",", "self", ".", "name", ")", "conn", ".", "connect", "(", ")", "return", "conn" ]
Adds a connection to ``nsqd`` at the specified address. :param host: the address to connect to :param port: the port to connect to
[ "Adds", "a", "connection", "to", "nsqd", "at", "the", "specified", "address", "." ]
python
test
41.128205
glomex/gcdt
gcdt/kumo_core.py
https://github.com/glomex/gcdt/blob/cd67cf416371337b83cb9ca3f696277125703339/gcdt/kumo_core.py#L632-L645
def delete_change_set(awsclient, change_set_name, stack_name): """Delete specified change set. Currently we only use this during automated regression testing. But we have plans so lets locate this functionality here :param awsclient: :param change_set_name: :param stack_name: """ client = awsclient.get_client('cloudformation') response = client.delete_change_set( ChangeSetName=change_set_name, StackName=stack_name)
[ "def", "delete_change_set", "(", "awsclient", ",", "change_set_name", ",", "stack_name", ")", ":", "client", "=", "awsclient", ".", "get_client", "(", "'cloudformation'", ")", "response", "=", "client", ".", "delete_change_set", "(", "ChangeSetName", "=", "change_set_name", ",", "StackName", "=", "stack_name", ")" ]
Delete specified change set. Currently we only use this during automated regression testing. But we have plans so lets locate this functionality here :param awsclient: :param change_set_name: :param stack_name:
[ "Delete", "specified", "change", "set", ".", "Currently", "we", "only", "use", "this", "during", "automated", "regression", "testing", ".", "But", "we", "have", "plans", "so", "lets", "locate", "this", "functionality", "here" ]
python
train
32.785714
apache/incubator-heron
third_party/python/cpplint/cpplint.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/third_party/python/cpplint/cpplint.py#L5088-L5107
def IsDerivedFunction(clean_lines, linenum): """Check if current line contains an inherited function. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if current line contains a function with "override" virt-specifier. """ # Scan back a few lines for start of current function for i in xrange(linenum, max(-1, linenum - 10), -1): match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i]) if match: # Look for "override" after the matching closing parenthesis line, _, closing_paren = CloseExpression( clean_lines, i, len(match.group(1))) return (closing_paren >= 0 and Search(r'\boverride\b', line[closing_paren:])) return False
[ "def", "IsDerivedFunction", "(", "clean_lines", ",", "linenum", ")", ":", "# Scan back a few lines for start of current function", "for", "i", "in", "xrange", "(", "linenum", ",", "max", "(", "-", "1", ",", "linenum", "-", "10", ")", ",", "-", "1", ")", ":", "match", "=", "Match", "(", "r'^([^()]*\\w+)\\('", ",", "clean_lines", ".", "elided", "[", "i", "]", ")", "if", "match", ":", "# Look for \"override\" after the matching closing parenthesis", "line", ",", "_", ",", "closing_paren", "=", "CloseExpression", "(", "clean_lines", ",", "i", ",", "len", "(", "match", ".", "group", "(", "1", ")", ")", ")", "return", "(", "closing_paren", ">=", "0", "and", "Search", "(", "r'\\boverride\\b'", ",", "line", "[", "closing_paren", ":", "]", ")", ")", "return", "False" ]
Check if current line contains an inherited function. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if current line contains a function with "override" virt-specifier.
[ "Check", "if", "current", "line", "contains", "an", "inherited", "function", "." ]
python
valid
38
pyBookshelf/bookshelf
bookshelf/api_v1.py
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v1.py#L874-L883
def down_ec2(instance_id, region, access_key_id, secret_access_key): """ shutdown of an existing EC2 instance """ conn = connect_to_ec2(region, access_key_id, secret_access_key) # get the instance_id from the state file, and stop the instance instance = conn.stop_instances(instance_ids=instance_id)[0] while instance.state != "stopped": log_yellow("Instance state: %s" % instance.state) sleep(10) instance.update() log_green('Instance state: %s' % instance.state)
[ "def", "down_ec2", "(", "instance_id", ",", "region", ",", "access_key_id", ",", "secret_access_key", ")", ":", "conn", "=", "connect_to_ec2", "(", "region", ",", "access_key_id", ",", "secret_access_key", ")", "# get the instance_id from the state file, and stop the instance", "instance", "=", "conn", ".", "stop_instances", "(", "instance_ids", "=", "instance_id", ")", "[", "0", "]", "while", "instance", ".", "state", "!=", "\"stopped\"", ":", "log_yellow", "(", "\"Instance state: %s\"", "%", "instance", ".", "state", ")", "sleep", "(", "10", ")", "instance", ".", "update", "(", ")", "log_green", "(", "'Instance state: %s'", "%", "instance", ".", "state", ")" ]
shutdown of an existing EC2 instance
[ "shutdown", "of", "an", "existing", "EC2", "instance" ]
python
train
50.3
tanghaibao/jcvi
jcvi/projects/synfind.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/synfind.py#L731-L812
def cartoon(args): """ %prog synteny.py Generate cartoon illustration of SynFind. """ p = OptionParser(cartoon.__doc__) opts, args, iopts = p.set_image_options(args, figsize="10x7") fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) # Panel A A = CartoonRegion(41) A.draw(root, .35, .85, strip=False, color=False) x1, x2 = A.x1, A.x2 lsg = "lightslategray" pad = .01 xc, yc = .35, .88 arrowlen = x2 - xc - pad arrowprops = dict(length_includes_head=True, width=.01, fc=lsg, lw=0, head_length=arrowlen * .15, head_width=.03) p = FancyArrow(xc - pad, yc, -arrowlen, 0, shape="left", **arrowprops) root.add_patch(p) p = FancyArrow(xc + pad, yc, arrowlen, 0, shape="right", **arrowprops) root.add_patch(p) yt = yc + 4 * pad root.text((x1 + xc) / 2, yt, "20 genes upstream", ha="center") root.text((x2 + xc) / 2, yt, "20 genes downstream", ha="center") root.plot((xc,), (yc,), "o", mfc='w', mec=lsg, mew=2, lw=2, color=lsg) root.text(xc, yt, "Query gene", ha="center") # Panel B A.draw(root, .35, .7, strip=False) RoundRect(root, (.07, .49), .56, .14, fc='y', alpha=.2) a = deepcopy(A) a.evolve(mode='S', target=10) a.draw(root, .35, .6) b = deepcopy(A) b.evolve(mode='F', target=8) b.draw(root, .35, .56) c = deepcopy(A) c.evolve(mode='G', target=6) c.draw(root, .35, .52) for x in (a, b, c): root.text(.64, x.y, "Score={0}".format(x.nonwhites), va="center") # Panel C A.truncate_between_flankers() a.truncate_between_flankers() b.truncate_between_flankers() c.truncate_between_flankers(target=6) plot_diagram(root, .14, .2, A, a, "S", "syntenic") plot_diagram(root, .37, .2, A, b, "F", "missing, with both flankers") plot_diagram(root, .6, .2, A, c, "G", "missing, with one flanker") labels = ((.04, .95, 'A'), (.04, .75, 'B'), (.04, .4, 'C')) panel_labels(root, labels) # Descriptions xt = .85 desc = ("Extract neighborhood", "of *window* size", "Count gene pairs within *window*", "Find regions above *score* cutoff", "Identify flankers", "Annotate syntelog class" ) for yt, t in zip((.88, .84, .64, .6, .3, .26), desc): root.text(xt, yt, markup(t), ha="center", va="center") root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() pf = "cartoon" image_name = pf + "." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
[ "def", "cartoon", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "cartoon", ".", "__doc__", ")", "opts", ",", "args", ",", "iopts", "=", "p", ".", "set_image_options", "(", "args", ",", "figsize", "=", "\"10x7\"", ")", "fig", "=", "plt", ".", "figure", "(", "1", ",", "(", "iopts", ".", "w", ",", "iopts", ".", "h", ")", ")", "root", "=", "fig", ".", "add_axes", "(", "[", "0", ",", "0", ",", "1", ",", "1", "]", ")", "# Panel A", "A", "=", "CartoonRegion", "(", "41", ")", "A", ".", "draw", "(", "root", ",", ".35", ",", ".85", ",", "strip", "=", "False", ",", "color", "=", "False", ")", "x1", ",", "x2", "=", "A", ".", "x1", ",", "A", ".", "x2", "lsg", "=", "\"lightslategray\"", "pad", "=", ".01", "xc", ",", "yc", "=", ".35", ",", ".88", "arrowlen", "=", "x2", "-", "xc", "-", "pad", "arrowprops", "=", "dict", "(", "length_includes_head", "=", "True", ",", "width", "=", ".01", ",", "fc", "=", "lsg", ",", "lw", "=", "0", ",", "head_length", "=", "arrowlen", "*", ".15", ",", "head_width", "=", ".03", ")", "p", "=", "FancyArrow", "(", "xc", "-", "pad", ",", "yc", ",", "-", "arrowlen", ",", "0", ",", "shape", "=", "\"left\"", ",", "*", "*", "arrowprops", ")", "root", ".", "add_patch", "(", "p", ")", "p", "=", "FancyArrow", "(", "xc", "+", "pad", ",", "yc", ",", "arrowlen", ",", "0", ",", "shape", "=", "\"right\"", ",", "*", "*", "arrowprops", ")", "root", ".", "add_patch", "(", "p", ")", "yt", "=", "yc", "+", "4", "*", "pad", "root", ".", "text", "(", "(", "x1", "+", "xc", ")", "/", "2", ",", "yt", ",", "\"20 genes upstream\"", ",", "ha", "=", "\"center\"", ")", "root", ".", "text", "(", "(", "x2", "+", "xc", ")", "/", "2", ",", "yt", ",", "\"20 genes downstream\"", ",", "ha", "=", "\"center\"", ")", "root", ".", "plot", "(", "(", "xc", ",", ")", ",", "(", "yc", ",", ")", ",", "\"o\"", ",", "mfc", "=", "'w'", ",", "mec", "=", "lsg", ",", "mew", "=", "2", ",", "lw", "=", "2", ",", "color", "=", "lsg", ")", "root", ".", "text", "(", "xc", ",", "yt", ",", "\"Query gene\"", ",", "ha", "=", "\"center\"", ")", "# Panel B", "A", ".", "draw", "(", "root", ",", ".35", ",", ".7", ",", "strip", "=", "False", ")", "RoundRect", "(", "root", ",", "(", ".07", ",", ".49", ")", ",", ".56", ",", ".14", ",", "fc", "=", "'y'", ",", "alpha", "=", ".2", ")", "a", "=", "deepcopy", "(", "A", ")", "a", ".", "evolve", "(", "mode", "=", "'S'", ",", "target", "=", "10", ")", "a", ".", "draw", "(", "root", ",", ".35", ",", ".6", ")", "b", "=", "deepcopy", "(", "A", ")", "b", ".", "evolve", "(", "mode", "=", "'F'", ",", "target", "=", "8", ")", "b", ".", "draw", "(", "root", ",", ".35", ",", ".56", ")", "c", "=", "deepcopy", "(", "A", ")", "c", ".", "evolve", "(", "mode", "=", "'G'", ",", "target", "=", "6", ")", "c", ".", "draw", "(", "root", ",", ".35", ",", ".52", ")", "for", "x", "in", "(", "a", ",", "b", ",", "c", ")", ":", "root", ".", "text", "(", ".64", ",", "x", ".", "y", ",", "\"Score={0}\"", ".", "format", "(", "x", ".", "nonwhites", ")", ",", "va", "=", "\"center\"", ")", "# Panel C", "A", ".", "truncate_between_flankers", "(", ")", "a", ".", "truncate_between_flankers", "(", ")", "b", ".", "truncate_between_flankers", "(", ")", "c", ".", "truncate_between_flankers", "(", "target", "=", "6", ")", "plot_diagram", "(", "root", ",", ".14", ",", ".2", ",", "A", ",", "a", ",", "\"S\"", ",", "\"syntenic\"", ")", "plot_diagram", "(", "root", ",", ".37", ",", ".2", ",", "A", ",", "b", ",", "\"F\"", ",", "\"missing, with both flankers\"", ")", "plot_diagram", "(", "root", ",", ".6", ",", ".2", ",", "A", ",", "c", ",", "\"G\"", ",", "\"missing, with one flanker\"", ")", "labels", "=", "(", "(", ".04", ",", ".95", ",", "'A'", ")", ",", "(", ".04", ",", ".75", ",", "'B'", ")", ",", "(", ".04", ",", ".4", ",", "'C'", ")", ")", "panel_labels", "(", "root", ",", "labels", ")", "# Descriptions", "xt", "=", ".85", "desc", "=", "(", "\"Extract neighborhood\"", ",", "\"of *window* size\"", ",", "\"Count gene pairs within *window*\"", ",", "\"Find regions above *score* cutoff\"", ",", "\"Identify flankers\"", ",", "\"Annotate syntelog class\"", ")", "for", "yt", ",", "t", "in", "zip", "(", "(", ".88", ",", ".84", ",", ".64", ",", ".6", ",", ".3", ",", ".26", ")", ",", "desc", ")", ":", "root", ".", "text", "(", "xt", ",", "yt", ",", "markup", "(", "t", ")", ",", "ha", "=", "\"center\"", ",", "va", "=", "\"center\"", ")", "root", ".", "set_xlim", "(", "0", ",", "1", ")", "root", ".", "set_ylim", "(", "0", ",", "1", ")", "root", ".", "set_axis_off", "(", ")", "pf", "=", "\"cartoon\"", "image_name", "=", "pf", "+", "\".\"", "+", "iopts", ".", "format", "savefig", "(", "image_name", ",", "dpi", "=", "iopts", ".", "dpi", ",", "iopts", "=", "iopts", ")" ]
%prog synteny.py Generate cartoon illustration of SynFind.
[ "%prog", "synteny", ".", "py" ]
python
train
30.914634
senaite/senaite.core
bika/lims/browser/fields/remarksfield.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/fields/remarksfield.py#L82-L89
def getRaw(self, instance, **kwargs): """Returns raw field value (possible wrapped in BaseUnit) """ value = ObjectField.get(self, instance, **kwargs) # getattr(instance, "Remarks") returns a BaseUnit if callable(value): value = value() return value
[ "def", "getRaw", "(", "self", ",", "instance", ",", "*", "*", "kwargs", ")", ":", "value", "=", "ObjectField", ".", "get", "(", "self", ",", "instance", ",", "*", "*", "kwargs", ")", "# getattr(instance, \"Remarks\") returns a BaseUnit", "if", "callable", "(", "value", ")", ":", "value", "=", "value", "(", ")", "return", "value" ]
Returns raw field value (possible wrapped in BaseUnit)
[ "Returns", "raw", "field", "value", "(", "possible", "wrapped", "in", "BaseUnit", ")" ]
python
train
37.625
pandas-dev/pandas
pandas/core/internals/blocks.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L2109-L2145
def _try_coerce_args(self, values, other): """ Coerce values and other to dtype 'i8'. NaN and NaT convert to the smallest i8, and will correctly round-trip to NaT if converted back in _try_coerce_result. values is always ndarray-like, other may not be Parameters ---------- values : ndarray-like other : ndarray-like or scalar Returns ------- base-type values, base-type other """ values = values.view('i8') if isinstance(other, bool): raise TypeError elif is_null_datetimelike(other): other = tslibs.iNaT elif isinstance(other, (datetime, np.datetime64, date)): other = self._box_func(other) if getattr(other, 'tz') is not None: raise TypeError("cannot coerce a Timestamp with a tz on a " "naive Block") other = other.asm8.view('i8') elif hasattr(other, 'dtype') and is_datetime64_dtype(other): other = other.astype('i8', copy=False).view('i8') else: # coercion issues # let higher levels handle raise TypeError(other) return values, other
[ "def", "_try_coerce_args", "(", "self", ",", "values", ",", "other", ")", ":", "values", "=", "values", ".", "view", "(", "'i8'", ")", "if", "isinstance", "(", "other", ",", "bool", ")", ":", "raise", "TypeError", "elif", "is_null_datetimelike", "(", "other", ")", ":", "other", "=", "tslibs", ".", "iNaT", "elif", "isinstance", "(", "other", ",", "(", "datetime", ",", "np", ".", "datetime64", ",", "date", ")", ")", ":", "other", "=", "self", ".", "_box_func", "(", "other", ")", "if", "getattr", "(", "other", ",", "'tz'", ")", "is", "not", "None", ":", "raise", "TypeError", "(", "\"cannot coerce a Timestamp with a tz on a \"", "\"naive Block\"", ")", "other", "=", "other", ".", "asm8", ".", "view", "(", "'i8'", ")", "elif", "hasattr", "(", "other", ",", "'dtype'", ")", "and", "is_datetime64_dtype", "(", "other", ")", ":", "other", "=", "other", ".", "astype", "(", "'i8'", ",", "copy", "=", "False", ")", ".", "view", "(", "'i8'", ")", "else", ":", "# coercion issues", "# let higher levels handle", "raise", "TypeError", "(", "other", ")", "return", "values", ",", "other" ]
Coerce values and other to dtype 'i8'. NaN and NaT convert to the smallest i8, and will correctly round-trip to NaT if converted back in _try_coerce_result. values is always ndarray-like, other may not be Parameters ---------- values : ndarray-like other : ndarray-like or scalar Returns ------- base-type values, base-type other
[ "Coerce", "values", "and", "other", "to", "dtype", "i8", ".", "NaN", "and", "NaT", "convert", "to", "the", "smallest", "i8", "and", "will", "correctly", "round", "-", "trip", "to", "NaT", "if", "converted", "back", "in", "_try_coerce_result", ".", "values", "is", "always", "ndarray", "-", "like", "other", "may", "not", "be" ]
python
train
33.108108
woolfson-group/isambard
isambard/optimisation/base_evo_opt.py
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/optimisation/base_evo_opt.py#L341-L381
def log_results(self, output_path=None, run_id=None): """Saves files for the minimization. Notes ----- Currently saves a logfile with best individual and a pdb of the best model. """ best_ind = self.halloffame[0] model_params = self.parse_individual( best_ind) # need to change name of 'params' if output_path is None: output_path = os.getcwd() if run_id is None: run_id = '{:%Y%m%d-%H%M%S}'.format( datetime.datetime.now()) with open('{0}/{1}_opt_log.txt'.format( output_path, run_id), 'w') as log_file: log_file.write('\nEvaluated {0} models in total\n'.format( self._model_count)) log_file.write('Run ID is {0}\n'.format(run_id)) log_file.write('Best fitness is {0}\n'.format( self.halloffame[0].fitness)) log_file.write( 'Parameters of best model are {0}\n'.format(model_params)) log_file.write( 'Best individual is {0}\n'.format(self.halloffame[0])) for i, entry in enumerate(self.halloffame[0]): if entry > 0.95: log_file.write( "Warning! Parameter {0} is at or near maximum allowed " "value\n".format(i + 1)) elif entry < -0.95: log_file.write( "Warning! Parameter {0} is at or near minimum allowed " "value\n".format(i + 1)) log_file.write('Minimization history: \n{0}'.format(self.logbook)) with open('{0}/{1}_opt_best_model.pdb'.format( output_path, run_id), 'w') as output_file: output_file.write(self.best_model.pdb) return
[ "def", "log_results", "(", "self", ",", "output_path", "=", "None", ",", "run_id", "=", "None", ")", ":", "best_ind", "=", "self", ".", "halloffame", "[", "0", "]", "model_params", "=", "self", ".", "parse_individual", "(", "best_ind", ")", "# need to change name of 'params'", "if", "output_path", "is", "None", ":", "output_path", "=", "os", ".", "getcwd", "(", ")", "if", "run_id", "is", "None", ":", "run_id", "=", "'{:%Y%m%d-%H%M%S}'", ".", "format", "(", "datetime", ".", "datetime", ".", "now", "(", ")", ")", "with", "open", "(", "'{0}/{1}_opt_log.txt'", ".", "format", "(", "output_path", ",", "run_id", ")", ",", "'w'", ")", "as", "log_file", ":", "log_file", ".", "write", "(", "'\\nEvaluated {0} models in total\\n'", ".", "format", "(", "self", ".", "_model_count", ")", ")", "log_file", ".", "write", "(", "'Run ID is {0}\\n'", ".", "format", "(", "run_id", ")", ")", "log_file", ".", "write", "(", "'Best fitness is {0}\\n'", ".", "format", "(", "self", ".", "halloffame", "[", "0", "]", ".", "fitness", ")", ")", "log_file", ".", "write", "(", "'Parameters of best model are {0}\\n'", ".", "format", "(", "model_params", ")", ")", "log_file", ".", "write", "(", "'Best individual is {0}\\n'", ".", "format", "(", "self", ".", "halloffame", "[", "0", "]", ")", ")", "for", "i", ",", "entry", "in", "enumerate", "(", "self", ".", "halloffame", "[", "0", "]", ")", ":", "if", "entry", ">", "0.95", ":", "log_file", ".", "write", "(", "\"Warning! Parameter {0} is at or near maximum allowed \"", "\"value\\n\"", ".", "format", "(", "i", "+", "1", ")", ")", "elif", "entry", "<", "-", "0.95", ":", "log_file", ".", "write", "(", "\"Warning! Parameter {0} is at or near minimum allowed \"", "\"value\\n\"", ".", "format", "(", "i", "+", "1", ")", ")", "log_file", ".", "write", "(", "'Minimization history: \\n{0}'", ".", "format", "(", "self", ".", "logbook", ")", ")", "with", "open", "(", "'{0}/{1}_opt_best_model.pdb'", ".", "format", "(", "output_path", ",", "run_id", ")", ",", "'w'", ")", "as", "output_file", ":", "output_file", ".", "write", "(", "self", ".", "best_model", ".", "pdb", ")", "return" ]
Saves files for the minimization. Notes ----- Currently saves a logfile with best individual and a pdb of the best model.
[ "Saves", "files", "for", "the", "minimization", "." ]
python
train
44.219512
molmod/molmod
molmod/pairff.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/pairff.py#L129-L162
def hessian_component(self, index1, index2): """Compute the hessian of the energy for one atom pair""" result = np.zeros((3, 3), float) if index1 == index2: for index3 in range(self.numc): if self.scaling[index1, index3] > 0: d_1 = 1/self.distances[index1, index3] for (se, ve), (sg, vg), (sh, vh) in zip( self.yield_pair_energies(index1, index3), self.yield_pair_gradients(index1, index3), self.yield_pair_hessians(index1, index3) ): result += ( +sh*self.dirouters[index1, index3]*ve +sg*(np.identity(3, float) - self.dirouters[index1, index3])*ve*d_1 +sg*np.outer(self.directions[index1, index3], vg) +sg*np.outer(vg, self.directions[index1, index3]) +se*vh )*self.scaling[index1, index3] elif self.scaling[index1, index2] > 0: d_1 = 1/self.distances[index1, index2] for (se, ve), (sg, vg), (sh, vh) in zip( self.yield_pair_energies(index1, index2), self.yield_pair_gradients(index1, index2), self.yield_pair_hessians(index1, index2) ): result -= ( +sh*self.dirouters[index1, index2]*ve +sg*(np.identity(3, float) - self.dirouters[index1, index2])*ve*d_1 +sg*np.outer(self.directions[index1, index2], vg) +sg*np.outer(vg, self.directions[index1, index2]) +se*vh )*self.scaling[index1, index2] return result
[ "def", "hessian_component", "(", "self", ",", "index1", ",", "index2", ")", ":", "result", "=", "np", ".", "zeros", "(", "(", "3", ",", "3", ")", ",", "float", ")", "if", "index1", "==", "index2", ":", "for", "index3", "in", "range", "(", "self", ".", "numc", ")", ":", "if", "self", ".", "scaling", "[", "index1", ",", "index3", "]", ">", "0", ":", "d_1", "=", "1", "/", "self", ".", "distances", "[", "index1", ",", "index3", "]", "for", "(", "se", ",", "ve", ")", ",", "(", "sg", ",", "vg", ")", ",", "(", "sh", ",", "vh", ")", "in", "zip", "(", "self", ".", "yield_pair_energies", "(", "index1", ",", "index3", ")", ",", "self", ".", "yield_pair_gradients", "(", "index1", ",", "index3", ")", ",", "self", ".", "yield_pair_hessians", "(", "index1", ",", "index3", ")", ")", ":", "result", "+=", "(", "+", "sh", "*", "self", ".", "dirouters", "[", "index1", ",", "index3", "]", "*", "ve", "+", "sg", "*", "(", "np", ".", "identity", "(", "3", ",", "float", ")", "-", "self", ".", "dirouters", "[", "index1", ",", "index3", "]", ")", "*", "ve", "*", "d_1", "+", "sg", "*", "np", ".", "outer", "(", "self", ".", "directions", "[", "index1", ",", "index3", "]", ",", "vg", ")", "+", "sg", "*", "np", ".", "outer", "(", "vg", ",", "self", ".", "directions", "[", "index1", ",", "index3", "]", ")", "+", "se", "*", "vh", ")", "*", "self", ".", "scaling", "[", "index1", ",", "index3", "]", "elif", "self", ".", "scaling", "[", "index1", ",", "index2", "]", ">", "0", ":", "d_1", "=", "1", "/", "self", ".", "distances", "[", "index1", ",", "index2", "]", "for", "(", "se", ",", "ve", ")", ",", "(", "sg", ",", "vg", ")", ",", "(", "sh", ",", "vh", ")", "in", "zip", "(", "self", ".", "yield_pair_energies", "(", "index1", ",", "index2", ")", ",", "self", ".", "yield_pair_gradients", "(", "index1", ",", "index2", ")", ",", "self", ".", "yield_pair_hessians", "(", "index1", ",", "index2", ")", ")", ":", "result", "-=", "(", "+", "sh", "*", "self", ".", "dirouters", "[", "index1", ",", "index2", "]", "*", "ve", "+", "sg", "*", "(", "np", ".", "identity", "(", "3", ",", "float", ")", "-", "self", ".", "dirouters", "[", "index1", ",", "index2", "]", ")", "*", "ve", "*", "d_1", "+", "sg", "*", "np", ".", "outer", "(", "self", ".", "directions", "[", "index1", ",", "index2", "]", ",", "vg", ")", "+", "sg", "*", "np", ".", "outer", "(", "vg", ",", "self", ".", "directions", "[", "index1", ",", "index2", "]", ")", "+", "se", "*", "vh", ")", "*", "self", ".", "scaling", "[", "index1", ",", "index2", "]", "return", "result" ]
Compute the hessian of the energy for one atom pair
[ "Compute", "the", "hessian", "of", "the", "energy", "for", "one", "atom", "pair" ]
python
train
52.382353
lucapinello/Haystack
haystack/external.py
https://github.com/lucapinello/Haystack/blob/cc080d741f36cd77b07c0b59d08ea6a4cf0ef2f7/haystack/external.py#L1090-L1096
def copy(self): """ m.copy() -- Return a 'deep' copy of the motif """ a = Motif() a.__dict__ = self.__dict__.copy() return a
[ "def", "copy", "(", "self", ")", ":", "a", "=", "Motif", "(", ")", "a", ".", "__dict__", "=", "self", ".", "__dict__", ".", "copy", "(", ")", "return", "a" ]
m.copy() -- Return a 'deep' copy of the motif
[ "m", ".", "copy", "()", "--", "Return", "a", "deep", "copy", "of", "the", "motif" ]
python
train
23.714286
desbma/sacad
sacad/http_helpers.py
https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/http_helpers.py#L189-L198
async def fastStreamedQuery(self, url, *, headers=None, verify=True): """ Send a GET request with short timeout, do not retry, and return streamed response. """ response = await self.session.get(url, headers=self._buildHeaders(headers), timeout=HTTP_SHORT_TIMEOUT, ssl=verify) response.raise_for_status() return response
[ "async", "def", "fastStreamedQuery", "(", "self", ",", "url", ",", "*", ",", "headers", "=", "None", ",", "verify", "=", "True", ")", ":", "response", "=", "await", "self", ".", "session", ".", "get", "(", "url", ",", "headers", "=", "self", ".", "_buildHeaders", "(", "headers", ")", ",", "timeout", "=", "HTTP_SHORT_TIMEOUT", ",", "ssl", "=", "verify", ")", "response", ".", "raise_for_status", "(", ")", "return", "response" ]
Send a GET request with short timeout, do not retry, and return streamed response.
[ "Send", "a", "GET", "request", "with", "short", "timeout", "do", "not", "retry", "and", "return", "streamed", "response", "." ]
python
train
44.3
adrn/gala
gala/dynamics/_genfunc/solver.py
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/dynamics/_genfunc/solver.py#L35-L78
def solver(AA, N_max, symNx = 2, throw_out_modes=False): """ Constructs the matrix A and the vector b from a timeseries of toy action-angles AA to solve for the vector x = (J_0,J_1,J_2,S...) where x contains all Fourier components of the generating function with |n|<N_max """ # Find all integer component n_vectors which lie within sphere of radius N_max # Here we have assumed that the potential is symmetric x->-x, y->-y, z->-z # This can be relaxed by changing symN to 1 # Additionally due to time reversal symmetry S_n = -S_-n so we only consider # "half" of the n-vector-space angs = unroll_angles(AA.T[3:].T,np.ones(3)) symNz = 2 NNx = range(-N_max, N_max+1, symNx) NNy = range(-N_max, N_max+1, symNz) NNz = range(-N_max, N_max+1, symNz) n_vectors = np.array([[i,j,k] for (i,j,k) in product(NNx,NNy,NNz) if(not(i==0 and j==0 and k==0) # exclude zero vector and (k>0 # northern hemisphere or (k==0 and j>0) # half of x-y plane or (k==0 and j==0 and i>0)) # half of x axis and np.sqrt(i*i+j*j+k*k)<=N_max)]) # inside sphere xxx = check_each_direction(n_vectors,angs) if(throw_out_modes): n_vectors = np.delete(n_vectors,check_each_direction(n_vectors,angs),axis=0) n = len(n_vectors)+3 b = np.zeros(shape=(n, )) a = np.zeros(shape=(n,n)) a[:3,:3]=len(AA)*np.identity(3) for i in AA: a[:3,3:]+=2.*n_vectors.T[:3]*np.cos(np.dot(n_vectors,i[3:])) a[3:,3:]+=4.*np.dot(n_vectors,n_vectors.T)*np.outer(np.cos(np.dot(n_vectors,i[3:])),np.cos(np.dot(n_vectors,i[3:]))) b[:3]+=i[:3] b[3:]+=2.*np.dot(n_vectors,i[:3])*np.cos(np.dot(n_vectors,i[3:])) a[3:,:3]=a[:3,3:].T return np.array(solve(a,b)), n_vectors
[ "def", "solver", "(", "AA", ",", "N_max", ",", "symNx", "=", "2", ",", "throw_out_modes", "=", "False", ")", ":", "# Find all integer component n_vectors which lie within sphere of radius N_max", "# Here we have assumed that the potential is symmetric x->-x, y->-y, z->-z", "# This can be relaxed by changing symN to 1", "# Additionally due to time reversal symmetry S_n = -S_-n so we only consider", "# \"half\" of the n-vector-space", "angs", "=", "unroll_angles", "(", "AA", ".", "T", "[", "3", ":", "]", ".", "T", ",", "np", ".", "ones", "(", "3", ")", ")", "symNz", "=", "2", "NNx", "=", "range", "(", "-", "N_max", ",", "N_max", "+", "1", ",", "symNx", ")", "NNy", "=", "range", "(", "-", "N_max", ",", "N_max", "+", "1", ",", "symNz", ")", "NNz", "=", "range", "(", "-", "N_max", ",", "N_max", "+", "1", ",", "symNz", ")", "n_vectors", "=", "np", ".", "array", "(", "[", "[", "i", ",", "j", ",", "k", "]", "for", "(", "i", ",", "j", ",", "k", ")", "in", "product", "(", "NNx", ",", "NNy", ",", "NNz", ")", "if", "(", "not", "(", "i", "==", "0", "and", "j", "==", "0", "and", "k", "==", "0", ")", "# exclude zero vector", "and", "(", "k", ">", "0", "# northern hemisphere", "or", "(", "k", "==", "0", "and", "j", ">", "0", ")", "# half of x-y plane", "or", "(", "k", "==", "0", "and", "j", "==", "0", "and", "i", ">", "0", ")", ")", "# half of x axis", "and", "np", ".", "sqrt", "(", "i", "*", "i", "+", "j", "*", "j", "+", "k", "*", "k", ")", "<=", "N_max", ")", "]", ")", "# inside sphere", "xxx", "=", "check_each_direction", "(", "n_vectors", ",", "angs", ")", "if", "(", "throw_out_modes", ")", ":", "n_vectors", "=", "np", ".", "delete", "(", "n_vectors", ",", "check_each_direction", "(", "n_vectors", ",", "angs", ")", ",", "axis", "=", "0", ")", "n", "=", "len", "(", "n_vectors", ")", "+", "3", "b", "=", "np", ".", "zeros", "(", "shape", "=", "(", "n", ",", ")", ")", "a", "=", "np", ".", "zeros", "(", "shape", "=", "(", "n", ",", "n", ")", ")", "a", "[", ":", "3", ",", ":", "3", "]", "=", "len", "(", "AA", ")", "*", "np", ".", "identity", "(", "3", ")", "for", "i", "in", "AA", ":", "a", "[", ":", "3", ",", "3", ":", "]", "+=", "2.", "*", "n_vectors", ".", "T", "[", ":", "3", "]", "*", "np", ".", "cos", "(", "np", ".", "dot", "(", "n_vectors", ",", "i", "[", "3", ":", "]", ")", ")", "a", "[", "3", ":", ",", "3", ":", "]", "+=", "4.", "*", "np", ".", "dot", "(", "n_vectors", ",", "n_vectors", ".", "T", ")", "*", "np", ".", "outer", "(", "np", ".", "cos", "(", "np", ".", "dot", "(", "n_vectors", ",", "i", "[", "3", ":", "]", ")", ")", ",", "np", ".", "cos", "(", "np", ".", "dot", "(", "n_vectors", ",", "i", "[", "3", ":", "]", ")", ")", ")", "b", "[", ":", "3", "]", "+=", "i", "[", ":", "3", "]", "b", "[", "3", ":", "]", "+=", "2.", "*", "np", ".", "dot", "(", "n_vectors", ",", "i", "[", ":", "3", "]", ")", "*", "np", ".", "cos", "(", "np", ".", "dot", "(", "n_vectors", ",", "i", "[", "3", ":", "]", ")", ")", "a", "[", "3", ":", ",", ":", "3", "]", "=", "a", "[", ":", "3", ",", "3", ":", "]", ".", "T", "return", "np", ".", "array", "(", "solve", "(", "a", ",", "b", ")", ")", ",", "n_vectors" ]
Constructs the matrix A and the vector b from a timeseries of toy action-angles AA to solve for the vector x = (J_0,J_1,J_2,S...) where x contains all Fourier components of the generating function with |n|<N_max
[ "Constructs", "the", "matrix", "A", "and", "the", "vector", "b", "from", "a", "timeseries", "of", "toy", "action", "-", "angles", "AA", "to", "solve", "for", "the", "vector", "x", "=", "(", "J_0", "J_1", "J_2", "S", "...", ")", "where", "x", "contains", "all", "Fourier", "components", "of", "the", "generating", "function", "with", "|n|<N_max" ]
python
train
43.659091
zero-os/zerotier_client
zerotier/client_support.py
https://github.com/zero-os/zerotier_client/blob/03993da11e69d837a0308a2f41ae7b378692fd82/zerotier/client_support.py#L75-L90
def val_factory(val, datatypes): """ return an instance of `val` that is of type `datatype`. keep track of exceptions so we can produce meaningful error messages. """ exceptions = [] for dt in datatypes: try: if isinstance(val, dt): return val return type_handler_object(val, dt) except Exception as e: exceptions.append(str(e)) # if we get here, we never found a valid value. raise an error raise ValueError('val_factory: Unable to instantiate {val} from types {types}. Exceptions: {excs}'. format(val=val, types=datatypes, excs=exceptions))
[ "def", "val_factory", "(", "val", ",", "datatypes", ")", ":", "exceptions", "=", "[", "]", "for", "dt", "in", "datatypes", ":", "try", ":", "if", "isinstance", "(", "val", ",", "dt", ")", ":", "return", "val", "return", "type_handler_object", "(", "val", ",", "dt", ")", "except", "Exception", "as", "e", ":", "exceptions", ".", "append", "(", "str", "(", "e", ")", ")", "# if we get here, we never found a valid value. raise an error", "raise", "ValueError", "(", "'val_factory: Unable to instantiate {val} from types {types}. Exceptions: {excs}'", ".", "format", "(", "val", "=", "val", ",", "types", "=", "datatypes", ",", "excs", "=", "exceptions", ")", ")" ]
return an instance of `val` that is of type `datatype`. keep track of exceptions so we can produce meaningful error messages.
[ "return", "an", "instance", "of", "val", "that", "is", "of", "type", "datatype", ".", "keep", "track", "of", "exceptions", "so", "we", "can", "produce", "meaningful", "error", "messages", "." ]
python
train
40.5
trailofbits/manticore
manticore/native/cpu/x86.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/x86.py#L3956-L3968
def _getMemoryBit(cpu, bitbase, bitoffset): """ Calculate address and bit offset given a base address and a bit offset relative to that address (in the form of asm operands) """ assert bitbase.type == 'memory' assert bitbase.size >= bitoffset.size addr = bitbase.address() offt = Operators.SEXTEND(bitoffset.read(), bitoffset.size, bitbase.size) offt_is_neg = offt >= (1 << (bitbase.size - 1)) offt_in_bytes = offt // 8 bitpos = offt % 8 new_addr = addr + Operators.ITEBV(bitbase.size, offt_is_neg, -offt_in_bytes, offt_in_bytes) return (new_addr, bitpos)
[ "def", "_getMemoryBit", "(", "cpu", ",", "bitbase", ",", "bitoffset", ")", ":", "assert", "bitbase", ".", "type", "==", "'memory'", "assert", "bitbase", ".", "size", ">=", "bitoffset", ".", "size", "addr", "=", "bitbase", ".", "address", "(", ")", "offt", "=", "Operators", ".", "SEXTEND", "(", "bitoffset", ".", "read", "(", ")", ",", "bitoffset", ".", "size", ",", "bitbase", ".", "size", ")", "offt_is_neg", "=", "offt", ">=", "(", "1", "<<", "(", "bitbase", ".", "size", "-", "1", ")", ")", "offt_in_bytes", "=", "offt", "//", "8", "bitpos", "=", "offt", "%", "8", "new_addr", "=", "addr", "+", "Operators", ".", "ITEBV", "(", "bitbase", ".", "size", ",", "offt_is_neg", ",", "-", "offt_in_bytes", ",", "offt_in_bytes", ")", "return", "(", "new_addr", ",", "bitpos", ")" ]
Calculate address and bit offset given a base address and a bit offset relative to that address (in the form of asm operands)
[ "Calculate", "address", "and", "bit", "offset", "given", "a", "base", "address", "and", "a", "bit", "offset", "relative", "to", "that", "address", "(", "in", "the", "form", "of", "asm", "operands", ")" ]
python
valid
48.923077
ralphje/imagemounter
imagemounter/volume.py
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume.py#L342-L359
def detect_volume_shadow_copies(self): """Method to call vshadowmount and mount NTFS volume shadow copies. :return: iterable with the :class:`Volume` objects of the VSS :raises CommandNotFoundError: if the underlying command does not exist :raises SubSystemError: if the underlying command fails :raises NoMountpointAvailableError: if there is no mountpoint available """ self._make_mountpoint(var_name='vss', suffix="vss", in_paths=True) try: _util.check_call_(["vshadowmount", "-o", str(self.offset), self.get_raw_path(), self._paths['vss']]) except Exception as e: logger.exception("Failed mounting the volume shadow copies.") raise SubsystemError(e) else: return self.volumes.detect_volumes(vstype='vss')
[ "def", "detect_volume_shadow_copies", "(", "self", ")", ":", "self", ".", "_make_mountpoint", "(", "var_name", "=", "'vss'", ",", "suffix", "=", "\"vss\"", ",", "in_paths", "=", "True", ")", "try", ":", "_util", ".", "check_call_", "(", "[", "\"vshadowmount\"", ",", "\"-o\"", ",", "str", "(", "self", ".", "offset", ")", ",", "self", ".", "get_raw_path", "(", ")", ",", "self", ".", "_paths", "[", "'vss'", "]", "]", ")", "except", "Exception", "as", "e", ":", "logger", ".", "exception", "(", "\"Failed mounting the volume shadow copies.\"", ")", "raise", "SubsystemError", "(", "e", ")", "else", ":", "return", "self", ".", "volumes", ".", "detect_volumes", "(", "vstype", "=", "'vss'", ")" ]
Method to call vshadowmount and mount NTFS volume shadow copies. :return: iterable with the :class:`Volume` objects of the VSS :raises CommandNotFoundError: if the underlying command does not exist :raises SubSystemError: if the underlying command fails :raises NoMountpointAvailableError: if there is no mountpoint available
[ "Method", "to", "call", "vshadowmount", "and", "mount", "NTFS", "volume", "shadow", "copies", "." ]
python
train
45.666667
takaakiaoki/ofblockmeshdicthelper
ofblockmeshdicthelper/__init__.py
https://github.com/takaakiaoki/ofblockmeshdicthelper/blob/df99e6b0e4f0334c9afe075b4f3ceaccb5bac9fd/ofblockmeshdicthelper/__init__.py#L193-L201
def format(self, vertices): """Format instance to dump vertices is dict of name to Vertex """ index = ' '.join(str(vertices[vn].index) for vn in self.vnames) vcom = ' '.join(self.vnames) # for comment return 'hex ({0:s}) {2:s} ({1[0]:d} {1[1]:d} {1[2]:d}) '\ '{4:s} // {2:s} ({3:s})'.format( index, self.cells, self.name, vcom, self.grading.format())
[ "def", "format", "(", "self", ",", "vertices", ")", ":", "index", "=", "' '", ".", "join", "(", "str", "(", "vertices", "[", "vn", "]", ".", "index", ")", "for", "vn", "in", "self", ".", "vnames", ")", "vcom", "=", "' '", ".", "join", "(", "self", ".", "vnames", ")", "# for comment", "return", "'hex ({0:s}) {2:s} ({1[0]:d} {1[1]:d} {1[2]:d}) '", "'{4:s} // {2:s} ({3:s})'", ".", "format", "(", "index", ",", "self", ".", "cells", ",", "self", ".", "name", ",", "vcom", ",", "self", ".", "grading", ".", "format", "(", ")", ")" ]
Format instance to dump vertices is dict of name to Vertex
[ "Format", "instance", "to", "dump", "vertices", "is", "dict", "of", "name", "to", "Vertex" ]
python
train
47.444444
ellmetha/django-machina
machina/apps/forum_moderation/views.py
https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_moderation/views.py#L357-L385
def get_context_data(self, **kwargs): """ Returns the context data to provide to the template. """ context = super().get_context_data(**kwargs) post = self.object topic = post.topic # Handles the case when a poll is associated to the topic. try: if hasattr(topic, 'poll') and topic.poll.options.exists(): poll = topic.poll context['poll'] = poll context['poll_options'] = poll.options.all() except ObjectDoesNotExist: # pragma: no cover pass if not post.is_topic_head: # Add the topic review previous_posts = ( topic.posts .filter(approved=True, created__lte=post.created) .select_related('poster', 'updated_by') .prefetch_related('attachments', 'poster__forum_profile') .order_by('-created') ) previous_posts = previous_posts[:machina_settings.TOPIC_REVIEW_POSTS_NUMBER] context['previous_posts'] = previous_posts return context
[ "def", "get_context_data", "(", "self", ",", "*", "*", "kwargs", ")", ":", "context", "=", "super", "(", ")", ".", "get_context_data", "(", "*", "*", "kwargs", ")", "post", "=", "self", ".", "object", "topic", "=", "post", ".", "topic", "# Handles the case when a poll is associated to the topic.", "try", ":", "if", "hasattr", "(", "topic", ",", "'poll'", ")", "and", "topic", ".", "poll", ".", "options", ".", "exists", "(", ")", ":", "poll", "=", "topic", ".", "poll", "context", "[", "'poll'", "]", "=", "poll", "context", "[", "'poll_options'", "]", "=", "poll", ".", "options", ".", "all", "(", ")", "except", "ObjectDoesNotExist", ":", "# pragma: no cover", "pass", "if", "not", "post", ".", "is_topic_head", ":", "# Add the topic review", "previous_posts", "=", "(", "topic", ".", "posts", ".", "filter", "(", "approved", "=", "True", ",", "created__lte", "=", "post", ".", "created", ")", ".", "select_related", "(", "'poster'", ",", "'updated_by'", ")", ".", "prefetch_related", "(", "'attachments'", ",", "'poster__forum_profile'", ")", ".", "order_by", "(", "'-created'", ")", ")", "previous_posts", "=", "previous_posts", "[", ":", "machina_settings", ".", "TOPIC_REVIEW_POSTS_NUMBER", "]", "context", "[", "'previous_posts'", "]", "=", "previous_posts", "return", "context" ]
Returns the context data to provide to the template.
[ "Returns", "the", "context", "data", "to", "provide", "to", "the", "template", "." ]
python
train
37.586207
JMSwag/dsdev-utils
dsdev_utils/paths.py
https://github.com/JMSwag/dsdev-utils/blob/5adbf9b3fd9fff92d1dd714423b08e26a5038e14/dsdev_utils/paths.py#L36-L47
def get_mac_dot_app_dir(directory): """Returns parent directory of mac .app Args: directory (str): Current directory Returns: (str): Parent directory of mac .app """ return os.path.dirname(os.path.dirname(os.path.dirname(directory)))
[ "def", "get_mac_dot_app_dir", "(", "directory", ")", ":", "return", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "dirname", "(", "directory", ")", ")", ")" ]
Returns parent directory of mac .app Args: directory (str): Current directory Returns: (str): Parent directory of mac .app
[ "Returns", "parent", "directory", "of", "mac", ".", "app" ]
python
train
21.666667
linkhub-sdk/popbill.py
popbill/closedownService.py
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/closedownService.py#L73-L88
def checkCorpNums(self, MemberCorpNum, CorpNumList): """ 휴폐업조회 대량 확인, 최대 1000건 args MemberCorpNum : 팝빌회원 사업자번호 CorpNumList : 조회할 사업자번호 배열 return 휴폐업정보 Object as List raise PopbillException """ if CorpNumList == None or len(CorpNumList) < 1: raise PopbillException(-99999999,"조죄할 사업자번호 목록이 입력되지 않았습니다.") postData = self._stringtify(CorpNumList) return self._httppost('/CloseDown',postData,MemberCorpNum)
[ "def", "checkCorpNums", "(", "self", ",", "MemberCorpNum", ",", "CorpNumList", ")", ":", "if", "CorpNumList", "==", "None", "or", "len", "(", "CorpNumList", ")", "<", "1", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"조죄할 사업자번호 목록이 입력되지 않았습니다.\")", "", "postData", "=", "self", ".", "_stringtify", "(", "CorpNumList", ")", "return", "self", ".", "_httppost", "(", "'/CloseDown'", ",", "postData", ",", "MemberCorpNum", ")" ]
휴폐업조회 대량 확인, 최대 1000건 args MemberCorpNum : 팝빌회원 사업자번호 CorpNumList : 조회할 사업자번호 배열 return 휴폐업정보 Object as List raise PopbillException
[ "휴폐업조회", "대량", "확인", "최대", "1000건", "args", "MemberCorpNum", ":", "팝빌회원", "사업자번호", "CorpNumList", ":", "조회할", "사업자번호", "배열", "return", "휴폐업정보", "Object", "as", "List", "raise", "PopbillException" ]
python
train
33.8125
mabuchilab/QNET
src/qnet/convert/to_qutip.py
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/convert/to_qutip.py#L121-L178
def SLH_to_qutip(slh, full_space=None, time_symbol=None, convert_as='pyfunc'): """Generate and return QuTiP representation matrices for the Hamiltonian and the collapse operators. Any inhomogeneities in the Lindblad operators (resulting from coherent drives) will be moved into the Hamiltonian, cf. :func:`~qnet.algebra.circuit_algebra.move_drive_to_H`. Args: slh (SLH): The SLH object from which to generate the qutip data full_space (HilbertSpace or None): The Hilbert space in which to represent the operators. If None, the space of `shl` will be used time_symbol (:class:`sympy.Symbol` or None): The symbol (if any) expressing time dependence (usually 't') convert_as (str): How to express time dependencies to qutip. Must be 'pyfunc' or 'str' Returns: tuple ``(H, [L1, L2, ...])`` as numerical `qutip.Qobj` representations, where ``H`` and each ``L`` may be a nested list to express time dependence, e.g. ``H = [H0, [H1, eps_t]]``, where ``H0`` and ``H1`` are of type `qutip.Qobj`, and ``eps_t`` is either a string (``convert_as='str'``) or a function (``convert_as='pyfunc'``) Raises: AlgebraError: If the Hilbert space (`slh.space` or `full_space`) is invalid for numerical conversion """ if full_space: if not full_space >= slh.space: raise AlgebraError("full_space="+str(full_space)+" needs to " "at least include slh.space = "+str(slh.space)) else: full_space = slh.space if full_space == TrivialSpace: raise AlgebraError( "Cannot convert SLH object in TrivialSpace. " "You may pass a non-trivial `full_space`") slh = move_drive_to_H(slh) if time_symbol is None: H = convert_to_qutip(slh.H, full_space=full_space) Ls = [] for L in slh.Ls: if is_scalar(L): L = L * IdentityOperator L_qutip = convert_to_qutip(L, full_space=full_space) if L_qutip.norm('max') > 0: Ls.append(L_qutip) else: H = _time_dependent_to_qutip(slh.H, full_space, time_symbol, convert_as) Ls = [] for L in slh.Ls: if is_scalar(L): L = L * IdentityOperator L_qutip = _time_dependent_to_qutip(L, full_space, time_symbol, convert_as) Ls.append(L_qutip) return H, Ls
[ "def", "SLH_to_qutip", "(", "slh", ",", "full_space", "=", "None", ",", "time_symbol", "=", "None", ",", "convert_as", "=", "'pyfunc'", ")", ":", "if", "full_space", ":", "if", "not", "full_space", ">=", "slh", ".", "space", ":", "raise", "AlgebraError", "(", "\"full_space=\"", "+", "str", "(", "full_space", ")", "+", "\" needs to \"", "\"at least include slh.space = \"", "+", "str", "(", "slh", ".", "space", ")", ")", "else", ":", "full_space", "=", "slh", ".", "space", "if", "full_space", "==", "TrivialSpace", ":", "raise", "AlgebraError", "(", "\"Cannot convert SLH object in TrivialSpace. \"", "\"You may pass a non-trivial `full_space`\"", ")", "slh", "=", "move_drive_to_H", "(", "slh", ")", "if", "time_symbol", "is", "None", ":", "H", "=", "convert_to_qutip", "(", "slh", ".", "H", ",", "full_space", "=", "full_space", ")", "Ls", "=", "[", "]", "for", "L", "in", "slh", ".", "Ls", ":", "if", "is_scalar", "(", "L", ")", ":", "L", "=", "L", "*", "IdentityOperator", "L_qutip", "=", "convert_to_qutip", "(", "L", ",", "full_space", "=", "full_space", ")", "if", "L_qutip", ".", "norm", "(", "'max'", ")", ">", "0", ":", "Ls", ".", "append", "(", "L_qutip", ")", "else", ":", "H", "=", "_time_dependent_to_qutip", "(", "slh", ".", "H", ",", "full_space", ",", "time_symbol", ",", "convert_as", ")", "Ls", "=", "[", "]", "for", "L", "in", "slh", ".", "Ls", ":", "if", "is_scalar", "(", "L", ")", ":", "L", "=", "L", "*", "IdentityOperator", "L_qutip", "=", "_time_dependent_to_qutip", "(", "L", ",", "full_space", ",", "time_symbol", ",", "convert_as", ")", "Ls", ".", "append", "(", "L_qutip", ")", "return", "H", ",", "Ls" ]
Generate and return QuTiP representation matrices for the Hamiltonian and the collapse operators. Any inhomogeneities in the Lindblad operators (resulting from coherent drives) will be moved into the Hamiltonian, cf. :func:`~qnet.algebra.circuit_algebra.move_drive_to_H`. Args: slh (SLH): The SLH object from which to generate the qutip data full_space (HilbertSpace or None): The Hilbert space in which to represent the operators. If None, the space of `shl` will be used time_symbol (:class:`sympy.Symbol` or None): The symbol (if any) expressing time dependence (usually 't') convert_as (str): How to express time dependencies to qutip. Must be 'pyfunc' or 'str' Returns: tuple ``(H, [L1, L2, ...])`` as numerical `qutip.Qobj` representations, where ``H`` and each ``L`` may be a nested list to express time dependence, e.g. ``H = [H0, [H1, eps_t]]``, where ``H0`` and ``H1`` are of type `qutip.Qobj`, and ``eps_t`` is either a string (``convert_as='str'``) or a function (``convert_as='pyfunc'``) Raises: AlgebraError: If the Hilbert space (`slh.space` or `full_space`) is invalid for numerical conversion
[ "Generate", "and", "return", "QuTiP", "representation", "matrices", "for", "the", "Hamiltonian", "and", "the", "collapse", "operators", ".", "Any", "inhomogeneities", "in", "the", "Lindblad", "operators", "(", "resulting", "from", "coherent", "drives", ")", "will", "be", "moved", "into", "the", "Hamiltonian", "cf", ".", ":", "func", ":", "~qnet", ".", "algebra", ".", "circuit_algebra", ".", "move_drive_to_H", "." ]
python
train
43.758621
kevinconway/confpy
confpy/core/namespace.py
https://github.com/kevinconway/confpy/blob/1ee8afcab46ac6915a5ff4184180434ac7b84a60/confpy/core/namespace.py#L81-L100
def register(self, name, option): """Register a new option with the namespace. Args: name (str): The name to register the option under. option (option.Option): The option object to register. Raises: TypeError: If the option is not an option.Option object. ValueError: If the name is already registered. """ if name in self._options: raise ValueError("Option {0} already exists.".format(name)) if not isinstance(option, opt.Option): raise TypeError("Options must be of type Option.") self._options[name] = option
[ "def", "register", "(", "self", ",", "name", ",", "option", ")", ":", "if", "name", "in", "self", ".", "_options", ":", "raise", "ValueError", "(", "\"Option {0} already exists.\"", ".", "format", "(", "name", ")", ")", "if", "not", "isinstance", "(", "option", ",", "opt", ".", "Option", ")", ":", "raise", "TypeError", "(", "\"Options must be of type Option.\"", ")", "self", ".", "_options", "[", "name", "]", "=", "option" ]
Register a new option with the namespace. Args: name (str): The name to register the option under. option (option.Option): The option object to register. Raises: TypeError: If the option is not an option.Option object. ValueError: If the name is already registered.
[ "Register", "a", "new", "option", "with", "the", "namespace", ".", "Args", ":", "name", "(", "str", ")", ":", "The", "name", "to", "register", "the", "option", "under", ".", "option", "(", "option", ".", "Option", ")", ":", "The", "option", "object", "to", "register", ".", "Raises", ":", "TypeError", ":", "If", "the", "option", "is", "not", "an", "option", ".", "Option", "object", ".", "ValueError", ":", "If", "the", "name", "is", "already", "registered", "." ]
python
train
32.25
Groundworkstech/pybfd
pybfd/section.py
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/section.py#L477-L479
def get_content(self, offset, size): """Return the specified number of bytes from the current section.""" return _bfd.section_get_content(self.bfd, self._ptr, offset, size)
[ "def", "get_content", "(", "self", ",", "offset", ",", "size", ")", ":", "return", "_bfd", ".", "section_get_content", "(", "self", ".", "bfd", ",", "self", ".", "_ptr", ",", "offset", ",", "size", ")" ]
Return the specified number of bytes from the current section.
[ "Return", "the", "specified", "number", "of", "bytes", "from", "the", "current", "section", "." ]
python
train
62
tensorpack/tensorpack
tensorpack/tfutils/varmanip.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/tfutils/varmanip.py#L196-L211
def load_chkpt_vars(model_path): """ Load all variables from a checkpoint to a dict. Args: model_path(str): path to a checkpoint. Returns: dict: a name:value dict """ model_path = get_checkpoint_path(model_path) reader = tfv1.train.NewCheckpointReader(model_path) var_names = reader.get_variable_to_shape_map().keys() result = {} for n in var_names: result[n] = reader.get_tensor(n) return result
[ "def", "load_chkpt_vars", "(", "model_path", ")", ":", "model_path", "=", "get_checkpoint_path", "(", "model_path", ")", "reader", "=", "tfv1", ".", "train", ".", "NewCheckpointReader", "(", "model_path", ")", "var_names", "=", "reader", ".", "get_variable_to_shape_map", "(", ")", ".", "keys", "(", ")", "result", "=", "{", "}", "for", "n", "in", "var_names", ":", "result", "[", "n", "]", "=", "reader", ".", "get_tensor", "(", "n", ")", "return", "result" ]
Load all variables from a checkpoint to a dict. Args: model_path(str): path to a checkpoint. Returns: dict: a name:value dict
[ "Load", "all", "variables", "from", "a", "checkpoint", "to", "a", "dict", "." ]
python
train
27.9375
upsight/doctor
doctor/docs/base.py
https://github.com/upsight/doctor/blob/2cf1d433f6f1aa1355644b449a757c0660793cdd/doctor/docs/base.py#L365-L448
def get_resource_object_doc_lines() -> List[str]: """Generate documentation lines for all collected resource objects. As API documentation is generated we keep a running list of objects used in request parameters and responses. This section will generate documentation for each object and provide an inline reference in the API documentation. :returns: A list of lines required to generate the documentation. """ # First loop through all resources and make sure to add any properties that # are objects and not already in `ALL_RESOURCES`. We iterate over a copy # since we will be modifying the dict during the loop. for resource_name, a_type in ALL_RESOURCES.copy().items(): for prop_a_type in a_type.properties.values(): if issubclass(prop_a_type, Object): resource_name = prop_a_type.title if resource_name is None: class_name = prop_a_type.__name__ resource_name = class_name_to_resource_name(class_name) ALL_RESOURCES[resource_name] = prop_a_type elif (issubclass(prop_a_type, Array) and prop_a_type.items is not None and not isinstance(prop_a_type.items, list) and issubclass(prop_a_type.items, Object)): # This means the type is an array of objects, so we want to # collect the object as a resource we can document later. resource_name = prop_a_type.items.title if resource_name is None: class_name = prop_a_type.items.__name__ resource_name = class_name_to_resource_name(class_name) ALL_RESOURCES[resource_name] = prop_a_type.items # If we don't have any resources to document, just return. if not ALL_RESOURCES: return [] lines = ['Resource Objects', '----------------'] for resource_name in sorted(ALL_RESOURCES.keys()): a_type = ALL_RESOURCES[resource_name] # First add a reference to the resource resource_ref = '_resource-{}'.format( '-'.join(resource_name.lower().split(' '))) lines.extend(['.. {}:'.format(resource_ref), '']) # Add resource name heading lines.extend([resource_name, '#' * len(resource_name)]) # Add resource description lines.extend([a_type.description, '']) # Only document attributes if it has properties defined. if a_type.properties: # Add attributes documentation. lines.extend(['Attributes', '**********']) for prop in a_type.properties: prop_a_type = a_type.properties[prop] description = a_type.properties[prop].description.strip() # Add any object reference if the property is an object or # an array of objects. obj_ref = '' if issubclass(prop_a_type, Object): obj_ref = get_object_reference(prop_a_type) elif (issubclass(prop_a_type, Array) and prop_a_type.items is not None and not isinstance(prop_a_type.items, list) and issubclass(prop_a_type.items, Object)): # This means the type is an array of objects. obj_ref = get_object_reference(prop_a_type.items) elif (issubclass(prop_a_type, Array) and prop_a_type.items is not None): description += get_array_items_description(prop_a_type) native_type = a_type.properties[prop].native_type.__name__ if prop in a_type.required: description = '**Required**. ' + description lines.append('* **{}** (*{}*) - {}{}'.format( prop, native_type, description, obj_ref).strip()) lines.append('') # Add example of object. lines.extend(['Example', '*******']) example = a_type.get_example() pretty_json = json.dumps(example, separators=(',', ': '), indent=4, sort_keys=True) pretty_json_lines = prefix_lines(pretty_json, ' ') lines.extend(['.. code-block:: json', '']) lines.extend(pretty_json_lines) return lines
[ "def", "get_resource_object_doc_lines", "(", ")", "->", "List", "[", "str", "]", ":", "# First loop through all resources and make sure to add any properties that", "# are objects and not already in `ALL_RESOURCES`. We iterate over a copy", "# since we will be modifying the dict during the loop.", "for", "resource_name", ",", "a_type", "in", "ALL_RESOURCES", ".", "copy", "(", ")", ".", "items", "(", ")", ":", "for", "prop_a_type", "in", "a_type", ".", "properties", ".", "values", "(", ")", ":", "if", "issubclass", "(", "prop_a_type", ",", "Object", ")", ":", "resource_name", "=", "prop_a_type", ".", "title", "if", "resource_name", "is", "None", ":", "class_name", "=", "prop_a_type", ".", "__name__", "resource_name", "=", "class_name_to_resource_name", "(", "class_name", ")", "ALL_RESOURCES", "[", "resource_name", "]", "=", "prop_a_type", "elif", "(", "issubclass", "(", "prop_a_type", ",", "Array", ")", "and", "prop_a_type", ".", "items", "is", "not", "None", "and", "not", "isinstance", "(", "prop_a_type", ".", "items", ",", "list", ")", "and", "issubclass", "(", "prop_a_type", ".", "items", ",", "Object", ")", ")", ":", "# This means the type is an array of objects, so we want to", "# collect the object as a resource we can document later.", "resource_name", "=", "prop_a_type", ".", "items", ".", "title", "if", "resource_name", "is", "None", ":", "class_name", "=", "prop_a_type", ".", "items", ".", "__name__", "resource_name", "=", "class_name_to_resource_name", "(", "class_name", ")", "ALL_RESOURCES", "[", "resource_name", "]", "=", "prop_a_type", ".", "items", "# If we don't have any resources to document, just return.", "if", "not", "ALL_RESOURCES", ":", "return", "[", "]", "lines", "=", "[", "'Resource Objects'", ",", "'----------------'", "]", "for", "resource_name", "in", "sorted", "(", "ALL_RESOURCES", ".", "keys", "(", ")", ")", ":", "a_type", "=", "ALL_RESOURCES", "[", "resource_name", "]", "# First add a reference to the resource", "resource_ref", "=", "'_resource-{}'", ".", "format", "(", "'-'", ".", "join", "(", "resource_name", ".", "lower", "(", ")", ".", "split", "(", "' '", ")", ")", ")", "lines", ".", "extend", "(", "[", "'.. {}:'", ".", "format", "(", "resource_ref", ")", ",", "''", "]", ")", "# Add resource name heading", "lines", ".", "extend", "(", "[", "resource_name", ",", "'#'", "*", "len", "(", "resource_name", ")", "]", ")", "# Add resource description", "lines", ".", "extend", "(", "[", "a_type", ".", "description", ",", "''", "]", ")", "# Only document attributes if it has properties defined.", "if", "a_type", ".", "properties", ":", "# Add attributes documentation.", "lines", ".", "extend", "(", "[", "'Attributes'", ",", "'**********'", "]", ")", "for", "prop", "in", "a_type", ".", "properties", ":", "prop_a_type", "=", "a_type", ".", "properties", "[", "prop", "]", "description", "=", "a_type", ".", "properties", "[", "prop", "]", ".", "description", ".", "strip", "(", ")", "# Add any object reference if the property is an object or", "# an array of objects.", "obj_ref", "=", "''", "if", "issubclass", "(", "prop_a_type", ",", "Object", ")", ":", "obj_ref", "=", "get_object_reference", "(", "prop_a_type", ")", "elif", "(", "issubclass", "(", "prop_a_type", ",", "Array", ")", "and", "prop_a_type", ".", "items", "is", "not", "None", "and", "not", "isinstance", "(", "prop_a_type", ".", "items", ",", "list", ")", "and", "issubclass", "(", "prop_a_type", ".", "items", ",", "Object", ")", ")", ":", "# This means the type is an array of objects.", "obj_ref", "=", "get_object_reference", "(", "prop_a_type", ".", "items", ")", "elif", "(", "issubclass", "(", "prop_a_type", ",", "Array", ")", "and", "prop_a_type", ".", "items", "is", "not", "None", ")", ":", "description", "+=", "get_array_items_description", "(", "prop_a_type", ")", "native_type", "=", "a_type", ".", "properties", "[", "prop", "]", ".", "native_type", ".", "__name__", "if", "prop", "in", "a_type", ".", "required", ":", "description", "=", "'**Required**. '", "+", "description", "lines", ".", "append", "(", "'* **{}** (*{}*) - {}{}'", ".", "format", "(", "prop", ",", "native_type", ",", "description", ",", "obj_ref", ")", ".", "strip", "(", ")", ")", "lines", ".", "append", "(", "''", ")", "# Add example of object.", "lines", ".", "extend", "(", "[", "'Example'", ",", "'*******'", "]", ")", "example", "=", "a_type", ".", "get_example", "(", ")", "pretty_json", "=", "json", ".", "dumps", "(", "example", ",", "separators", "=", "(", "','", ",", "': '", ")", ",", "indent", "=", "4", ",", "sort_keys", "=", "True", ")", "pretty_json_lines", "=", "prefix_lines", "(", "pretty_json", ",", "' '", ")", "lines", ".", "extend", "(", "[", "'.. code-block:: json'", ",", "''", "]", ")", "lines", ".", "extend", "(", "pretty_json_lines", ")", "return", "lines" ]
Generate documentation lines for all collected resource objects. As API documentation is generated we keep a running list of objects used in request parameters and responses. This section will generate documentation for each object and provide an inline reference in the API documentation. :returns: A list of lines required to generate the documentation.
[ "Generate", "documentation", "lines", "for", "all", "collected", "resource", "objects", "." ]
python
train
51.321429
F-Secure/see
plugins/agent.py
https://github.com/F-Secure/see/blob/3e053e52a45229f96a12db9e98caf7fb3880e811/plugins/agent.py#L97-L105
def store_file(self, folder, name): """Stores the uploaded file in the given path.""" path = os.path.join(folder, name) length = self.headers['content-length'] with open(path, 'wb') as sample: sample.write(self.rfile.read(int(length))) return path
[ "def", "store_file", "(", "self", ",", "folder", ",", "name", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "folder", ",", "name", ")", "length", "=", "self", ".", "headers", "[", "'content-length'", "]", "with", "open", "(", "path", ",", "'wb'", ")", "as", "sample", ":", "sample", ".", "write", "(", "self", ".", "rfile", ".", "read", "(", "int", "(", "length", ")", ")", ")", "return", "path" ]
Stores the uploaded file in the given path.
[ "Stores", "the", "uploaded", "file", "in", "the", "given", "path", "." ]
python
train
32.555556
praekeltfoundation/molo.commenting
molo/commenting/managers.py
https://github.com/praekeltfoundation/molo.commenting/blob/94549bd75e4a5c5b3db43149e32d636330b3969c/molo/commenting/managers.py#L9-L18
def for_model(self, model): """ QuerySet for all comments for a particular model (either an instance or a class). """ ct = ContentType.objects.get_for_model(model) qs = self.get_queryset().filter(content_type=ct) if isinstance(model, models.Model): qs = qs.filter(object_pk=force_text(model._get_pk_val())) return qs
[ "def", "for_model", "(", "self", ",", "model", ")", ":", "ct", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "model", ")", "qs", "=", "self", ".", "get_queryset", "(", ")", ".", "filter", "(", "content_type", "=", "ct", ")", "if", "isinstance", "(", "model", ",", "models", ".", "Model", ")", ":", "qs", "=", "qs", ".", "filter", "(", "object_pk", "=", "force_text", "(", "model", ".", "_get_pk_val", "(", ")", ")", ")", "return", "qs" ]
QuerySet for all comments for a particular model (either an instance or a class).
[ "QuerySet", "for", "all", "comments", "for", "a", "particular", "model", "(", "either", "an", "instance", "or", "a", "class", ")", "." ]
python
train
38.3
SwissDataScienceCenter/renku-python
renku/models/projects.py
https://github.com/SwissDataScienceCenter/renku-python/blob/691644d695b055a01e0ca22b2620e55bbd928c0d/renku/models/projects.py#L74-L82
def create(self, name=None, **kwargs): """Create a new project. :param name: The name of the project. :returns: An instance of the newly create project. :rtype: renku.models.projects.Project """ data = self._client.api.create_project({'name': name}) return self.Meta.model(data, client=self._client, collection=self)
[ "def", "create", "(", "self", ",", "name", "=", "None", ",", "*", "*", "kwargs", ")", ":", "data", "=", "self", ".", "_client", ".", "api", ".", "create_project", "(", "{", "'name'", ":", "name", "}", ")", "return", "self", ".", "Meta", ".", "model", "(", "data", ",", "client", "=", "self", ".", "_client", ",", "collection", "=", "self", ")" ]
Create a new project. :param name: The name of the project. :returns: An instance of the newly create project. :rtype: renku.models.projects.Project
[ "Create", "a", "new", "project", "." ]
python
train
40.555556
basho/riak-python-client
riak/client/operations.py
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/client/operations.py#L1034-L1064
def get_counter(self, transport, bucket, key, r=None, pr=None, basic_quorum=None, notfound_ok=None): """get_counter(bucket, key, r=None, pr=None, basic_quorum=None,\ notfound_ok=None) Gets the value of a counter. .. deprecated:: 2.1.0 (Riak 2.0) Riak 1.4-style counters are deprecated in favor of the :class:`~riak.datatypes.Counter` datatype. .. note:: This request is automatically retried :attr:`retries` times if it fails due to network error. :param bucket: the bucket of the counter :type bucket: RiakBucket :param key: the key of the counter :type key: string :param r: the read quorum :type r: integer, string, None :param pr: the primary read quorum :type pr: integer, string, None :param basic_quorum: whether to use the "basic quorum" policy for not-founds :type basic_quorum: bool :param notfound_ok: whether to treat not-found responses as successful :type notfound_ok: bool :rtype: integer """ return transport.get_counter(bucket, key, r=r, pr=pr)
[ "def", "get_counter", "(", "self", ",", "transport", ",", "bucket", ",", "key", ",", "r", "=", "None", ",", "pr", "=", "None", ",", "basic_quorum", "=", "None", ",", "notfound_ok", "=", "None", ")", ":", "return", "transport", ".", "get_counter", "(", "bucket", ",", "key", ",", "r", "=", "r", ",", "pr", "=", "pr", ")" ]
get_counter(bucket, key, r=None, pr=None, basic_quorum=None,\ notfound_ok=None) Gets the value of a counter. .. deprecated:: 2.1.0 (Riak 2.0) Riak 1.4-style counters are deprecated in favor of the :class:`~riak.datatypes.Counter` datatype. .. note:: This request is automatically retried :attr:`retries` times if it fails due to network error. :param bucket: the bucket of the counter :type bucket: RiakBucket :param key: the key of the counter :type key: string :param r: the read quorum :type r: integer, string, None :param pr: the primary read quorum :type pr: integer, string, None :param basic_quorum: whether to use the "basic quorum" policy for not-founds :type basic_quorum: bool :param notfound_ok: whether to treat not-found responses as successful :type notfound_ok: bool :rtype: integer
[ "get_counter", "(", "bucket", "key", "r", "=", "None", "pr", "=", "None", "basic_quorum", "=", "None", "\\", "notfound_ok", "=", "None", ")" ]
python
train
37.903226
Robpol86/terminaltables
terminaltables/width_and_alignment.py
https://github.com/Robpol86/terminaltables/blob/ad8f46e50afdbaea377fc1f713bc0e7a31c4fccc/terminaltables/width_and_alignment.py#L11-L39
def visible_width(string): """Get the visible width of a unicode string. Some CJK unicode characters are more than one byte unlike ASCII and latin unicode characters. From: https://github.com/Robpol86/terminaltables/pull/9 :param str string: String to measure. :return: String's width. :rtype: int """ if '\033' in string: string = RE_COLOR_ANSI.sub('', string) # Convert to unicode. try: string = string.decode('u8') except (AttributeError, UnicodeEncodeError): pass width = 0 for char in string: if unicodedata.east_asian_width(char) in ('F', 'W'): width += 2 else: width += 1 return width
[ "def", "visible_width", "(", "string", ")", ":", "if", "'\\033'", "in", "string", ":", "string", "=", "RE_COLOR_ANSI", ".", "sub", "(", "''", ",", "string", ")", "# Convert to unicode.", "try", ":", "string", "=", "string", ".", "decode", "(", "'u8'", ")", "except", "(", "AttributeError", ",", "UnicodeEncodeError", ")", ":", "pass", "width", "=", "0", "for", "char", "in", "string", ":", "if", "unicodedata", ".", "east_asian_width", "(", "char", ")", "in", "(", "'F'", ",", "'W'", ")", ":", "width", "+=", "2", "else", ":", "width", "+=", "1", "return", "width" ]
Get the visible width of a unicode string. Some CJK unicode characters are more than one byte unlike ASCII and latin unicode characters. From: https://github.com/Robpol86/terminaltables/pull/9 :param str string: String to measure. :return: String's width. :rtype: int
[ "Get", "the", "visible", "width", "of", "a", "unicode", "string", "." ]
python
train
23.793103
mjirik/io3d
io3d/datareader.py
https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/datareader.py#L276-L288
def _fix_sitk_bug(path, metadata): """There is a bug in simple ITK for Z axis in 3D images. This is a fix. :param path: path to dicom file to read :param metadata: metadata to correct :return: corrected metadata """ ds = dicom.read_file(path) try: metadata["voxelsize_mm"][0] = ds.SpacingBetweenSlices except Exception as e: logger.warning("Read dicom 'SpacingBetweenSlices' failed: ", e) return metadata
[ "def", "_fix_sitk_bug", "(", "path", ",", "metadata", ")", ":", "ds", "=", "dicom", ".", "read_file", "(", "path", ")", "try", ":", "metadata", "[", "\"voxelsize_mm\"", "]", "[", "0", "]", "=", "ds", ".", "SpacingBetweenSlices", "except", "Exception", "as", "e", ":", "logger", ".", "warning", "(", "\"Read dicom 'SpacingBetweenSlices' failed: \"", ",", "e", ")", "return", "metadata" ]
There is a bug in simple ITK for Z axis in 3D images. This is a fix. :param path: path to dicom file to read :param metadata: metadata to correct :return: corrected metadata
[ "There", "is", "a", "bug", "in", "simple", "ITK", "for", "Z", "axis", "in", "3D", "images", ".", "This", "is", "a", "fix", "." ]
python
train
37.615385
cni/MRS
MRS/data.py
https://github.com/cni/MRS/blob/16098b3cf4830780efd787fee9efa46513850283/MRS/data.py#L87-L105
def check_md5(filename, stored_md5): """ Computes the md5 of filename and check if it matches with the supplied string md5 Input ----- filename : string Path to a file. md5 : string Known md5 of filename to check against. """ computed_md5 = _get_file_md5(filename) if stored_md5 != computed_md5: print ("MD5 checksum of filename", filename, "failed. Expected MD5 was", stored_md5, "but computed MD5 was", computed_md5, '\n', "Please check if the data has been downloaded correctly or if the upstream data has changed.")
[ "def", "check_md5", "(", "filename", ",", "stored_md5", ")", ":", "computed_md5", "=", "_get_file_md5", "(", "filename", ")", "if", "stored_md5", "!=", "computed_md5", ":", "print", "(", "\"MD5 checksum of filename\"", ",", "filename", ",", "\"failed. Expected MD5 was\"", ",", "stored_md5", ",", "\"but computed MD5 was\"", ",", "computed_md5", ",", "'\\n'", ",", "\"Please check if the data has been downloaded correctly or if the upstream data has changed.\"", ")" ]
Computes the md5 of filename and check if it matches with the supplied string md5 Input ----- filename : string Path to a file. md5 : string Known md5 of filename to check against.
[ "Computes", "the", "md5", "of", "filename", "and", "check", "if", "it", "matches", "with", "the", "supplied", "string", "md5" ]
python
train
32.263158
saltstack/salt
salt/modules/mysql.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mysql.py#L1022-L1046
def db_get(name, **connection_args): ''' Return a list of databases of a MySQL server using the output from the ``SELECT DEFAULT_CHARACTER_SET_NAME, DEFAULT_COLLATION_NAME FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME='dbname';`` query. CLI Example: .. code-block:: bash salt '*' mysql.db_get test ''' dbc = _connect(**connection_args) if dbc is None: return [] cur = dbc.cursor() qry = ('SELECT DEFAULT_CHARACTER_SET_NAME, DEFAULT_COLLATION_NAME FROM ' 'INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME=%(dbname)s;') args = {"dbname": name} _execute(cur, qry, args) if cur.rowcount: rows = cur.fetchall() return {'character_set': rows[0][0], 'collate': rows[0][1]} return {}
[ "def", "db_get", "(", "name", ",", "*", "*", "connection_args", ")", ":", "dbc", "=", "_connect", "(", "*", "*", "connection_args", ")", "if", "dbc", "is", "None", ":", "return", "[", "]", "cur", "=", "dbc", ".", "cursor", "(", ")", "qry", "=", "(", "'SELECT DEFAULT_CHARACTER_SET_NAME, DEFAULT_COLLATION_NAME FROM '", "'INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME=%(dbname)s;'", ")", "args", "=", "{", "\"dbname\"", ":", "name", "}", "_execute", "(", "cur", ",", "qry", ",", "args", ")", "if", "cur", ".", "rowcount", ":", "rows", "=", "cur", ".", "fetchall", "(", ")", "return", "{", "'character_set'", ":", "rows", "[", "0", "]", "[", "0", "]", ",", "'collate'", ":", "rows", "[", "0", "]", "[", "1", "]", "}", "return", "{", "}" ]
Return a list of databases of a MySQL server using the output from the ``SELECT DEFAULT_CHARACTER_SET_NAME, DEFAULT_COLLATION_NAME FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME='dbname';`` query. CLI Example: .. code-block:: bash salt '*' mysql.db_get test
[ "Return", "a", "list", "of", "databases", "of", "a", "MySQL", "server", "using", "the", "output", "from", "the", "SELECT", "DEFAULT_CHARACTER_SET_NAME", "DEFAULT_COLLATION_NAME", "FROM", "INFORMATION_SCHEMA", ".", "SCHEMATA", "WHERE", "SCHEMA_NAME", "=", "dbname", ";", "query", "." ]
python
train
31