repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
karimbahgat/PyCRS
pycrs/parse.py
https://github.com/karimbahgat/PyCRS/blob/d6a8bb9c28787a25b4a1d59a7e4603db3221eaef/pycrs/parse.py#L24-L41
def from_epsg_code(code): """ Load crs object from epsg code, via spatialreference.org. Parses based on the proj4 representation. Arguments: - *code*: The EPSG code as an integer. Returns: - A CS instance of the indicated type. """ # must go online (or look up local table) to get crs details code = str(code) proj4 = utils.crscode_to_string("epsg", code, "proj4") crs = from_proj4(proj4) return crs
[ "def", "from_epsg_code", "(", "code", ")", ":", "# must go online (or look up local table) to get crs details", "code", "=", "str", "(", "code", ")", "proj4", "=", "utils", ".", "crscode_to_string", "(", "\"epsg\"", ",", "code", ",", "\"proj4\"", ")", "crs", "=", "from_proj4", "(", "proj4", ")", "return", "crs" ]
Load crs object from epsg code, via spatialreference.org. Parses based on the proj4 representation. Arguments: - *code*: The EPSG code as an integer. Returns: - A CS instance of the indicated type.
[ "Load", "crs", "object", "from", "epsg", "code", "via", "spatialreference", ".", "org", ".", "Parses", "based", "on", "the", "proj4", "representation", "." ]
python
test
24.388889
zetaops/zengine
zengine/models/workflow_manager.py
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/models/workflow_manager.py#L111-L127
def get_name(self): """ Tries to get WF name from 'process' or 'collobration' or 'pariticipant' Returns: str. WF name. """ paths = ['bpmn:process', 'bpmn:collaboration/bpmn:participant/', 'bpmn:collaboration', ] for path in paths: tag = self.root.find(path, NS) if tag is not None and len(tag): name = tag.get('name') if name: return name
[ "def", "get_name", "(", "self", ")", ":", "paths", "=", "[", "'bpmn:process'", ",", "'bpmn:collaboration/bpmn:participant/'", ",", "'bpmn:collaboration'", ",", "]", "for", "path", "in", "paths", ":", "tag", "=", "self", ".", "root", ".", "find", "(", "path", ",", "NS", ")", "if", "tag", "is", "not", "None", "and", "len", "(", "tag", ")", ":", "name", "=", "tag", ".", "get", "(", "'name'", ")", "if", "name", ":", "return", "name" ]
Tries to get WF name from 'process' or 'collobration' or 'pariticipant' Returns: str. WF name.
[ "Tries", "to", "get", "WF", "name", "from", "process", "or", "collobration", "or", "pariticipant" ]
python
train
30
knipknap/exscript
Exscript/util/mail.py
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/util/mail.py#L462-L488
def send(mail, server='localhost'): """ Sends the given mail. :type mail: Mail :param mail: The mail object. :type server: string :param server: The address of the mailserver. """ sender = mail.get_sender() rcpt = mail.get_receipients() session = smtplib.SMTP(server) message = MIMEMultipart() message['Subject'] = mail.get_subject() message['From'] = mail.get_sender() message['To'] = ', '.join(mail.get_to()) message['Cc'] = ', '.join(mail.get_cc()) message.preamble = 'Your mail client is not MIME aware.' body = MIMEText(mail.get_body().encode("utf-8"), "plain", "utf-8") body.add_header('Content-Disposition', 'inline') message.attach(body) for filename in mail.get_attachments(): message.attach(_get_mime_object(filename)) session.sendmail(sender, rcpt, message.as_string())
[ "def", "send", "(", "mail", ",", "server", "=", "'localhost'", ")", ":", "sender", "=", "mail", ".", "get_sender", "(", ")", "rcpt", "=", "mail", ".", "get_receipients", "(", ")", "session", "=", "smtplib", ".", "SMTP", "(", "server", ")", "message", "=", "MIMEMultipart", "(", ")", "message", "[", "'Subject'", "]", "=", "mail", ".", "get_subject", "(", ")", "message", "[", "'From'", "]", "=", "mail", ".", "get_sender", "(", ")", "message", "[", "'To'", "]", "=", "', '", ".", "join", "(", "mail", ".", "get_to", "(", ")", ")", "message", "[", "'Cc'", "]", "=", "', '", ".", "join", "(", "mail", ".", "get_cc", "(", ")", ")", "message", ".", "preamble", "=", "'Your mail client is not MIME aware.'", "body", "=", "MIMEText", "(", "mail", ".", "get_body", "(", ")", ".", "encode", "(", "\"utf-8\"", ")", ",", "\"plain\"", ",", "\"utf-8\"", ")", "body", ".", "add_header", "(", "'Content-Disposition'", ",", "'inline'", ")", "message", ".", "attach", "(", "body", ")", "for", "filename", "in", "mail", ".", "get_attachments", "(", ")", ":", "message", ".", "attach", "(", "_get_mime_object", "(", "filename", ")", ")", "session", ".", "sendmail", "(", "sender", ",", "rcpt", ",", "message", ".", "as_string", "(", ")", ")" ]
Sends the given mail. :type mail: Mail :param mail: The mail object. :type server: string :param server: The address of the mailserver.
[ "Sends", "the", "given", "mail", "." ]
python
train
31.555556
scarface-4711/denonavr
denonavr/denonavr.py
https://github.com/scarface-4711/denonavr/blob/59a136e27b43cb1d1e140cf67705087b3aa377cd/denonavr/denonavr.py#L1533-L1549
def _play(self): """Send play command to receiver command via HTTP post.""" # Use pause command only for sources which support NETAUDIO if self._input_func in self._netaudio_func_list: body = {"cmd0": "PutNetAudioCommand/CurEnter", "cmd1": "aspMainZone_WebUpdateStatus/", "ZoneName": "MAIN ZONE"} try: if self.send_post_command( self._urls.command_netaudio_post, body): self._state = STATE_PLAYING return True else: return False except requests.exceptions.RequestException: _LOGGER.error("Connection error: play command not sent.") return False
[ "def", "_play", "(", "self", ")", ":", "# Use pause command only for sources which support NETAUDIO", "if", "self", ".", "_input_func", "in", "self", ".", "_netaudio_func_list", ":", "body", "=", "{", "\"cmd0\"", ":", "\"PutNetAudioCommand/CurEnter\"", ",", "\"cmd1\"", ":", "\"aspMainZone_WebUpdateStatus/\"", ",", "\"ZoneName\"", ":", "\"MAIN ZONE\"", "}", "try", ":", "if", "self", ".", "send_post_command", "(", "self", ".", "_urls", ".", "command_netaudio_post", ",", "body", ")", ":", "self", ".", "_state", "=", "STATE_PLAYING", "return", "True", "else", ":", "return", "False", "except", "requests", ".", "exceptions", ".", "RequestException", ":", "_LOGGER", ".", "error", "(", "\"Connection error: play command not sent.\"", ")", "return", "False" ]
Send play command to receiver command via HTTP post.
[ "Send", "play", "command", "to", "receiver", "command", "via", "HTTP", "post", "." ]
python
train
45.647059
bigchaindb/bigchaindb
bigchaindb/lib.py
https://github.com/bigchaindb/bigchaindb/blob/835fdfcf598918f76139e3b88ee33dd157acaaa7/bigchaindb/lib.py#L352-L366
def get_block_containing_tx(self, txid): """Retrieve the list of blocks (block ids) containing a transaction with transaction id `txid` Args: txid (str): transaction id of the transaction to query Returns: Block id list (list(int)) """ blocks = list(backend.query.get_block_with_transaction(self.connection, txid)) if len(blocks) > 1: logger.critical('Transaction id %s exists in multiple blocks', txid) return [block['height'] for block in blocks]
[ "def", "get_block_containing_tx", "(", "self", ",", "txid", ")", ":", "blocks", "=", "list", "(", "backend", ".", "query", ".", "get_block_with_transaction", "(", "self", ".", "connection", ",", "txid", ")", ")", "if", "len", "(", "blocks", ")", ">", "1", ":", "logger", ".", "critical", "(", "'Transaction id %s exists in multiple blocks'", ",", "txid", ")", "return", "[", "block", "[", "'height'", "]", "for", "block", "in", "blocks", "]" ]
Retrieve the list of blocks (block ids) containing a transaction with transaction id `txid` Args: txid (str): transaction id of the transaction to query Returns: Block id list (list(int))
[ "Retrieve", "the", "list", "of", "blocks", "(", "block", "ids", ")", "containing", "a", "transaction", "with", "transaction", "id", "txid" ]
python
train
36
chriso/gauged
gauged/structures/sparse_map.py
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/structures/sparse_map.py#L73-L77
def append(self, position, array): """Append an array to the end of the map. The position must be greater than any positions in the map""" if not Gauged.map_append(self.ptr, position, array.ptr): raise MemoryError
[ "def", "append", "(", "self", ",", "position", ",", "array", ")", ":", "if", "not", "Gauged", ".", "map_append", "(", "self", ".", "ptr", ",", "position", ",", "array", ".", "ptr", ")", ":", "raise", "MemoryError" ]
Append an array to the end of the map. The position must be greater than any positions in the map
[ "Append", "an", "array", "to", "the", "end", "of", "the", "map", ".", "The", "position", "must", "be", "greater", "than", "any", "positions", "in", "the", "map" ]
python
train
49
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L3901-L3905
def setWorkingPlayAreaSize(self, sizeX, sizeZ): """Sets the Play Area in the working copy.""" fn = self.function_table.setWorkingPlayAreaSize fn(sizeX, sizeZ)
[ "def", "setWorkingPlayAreaSize", "(", "self", ",", "sizeX", ",", "sizeZ", ")", ":", "fn", "=", "self", ".", "function_table", ".", "setWorkingPlayAreaSize", "fn", "(", "sizeX", ",", "sizeZ", ")" ]
Sets the Play Area in the working copy.
[ "Sets", "the", "Play", "Area", "in", "the", "working", "copy", "." ]
python
train
35.8
FNNDSC/pfdicom
pfdicom/pfdicom.py
https://github.com/FNNDSC/pfdicom/blob/91a0426c514a3496cb2e0576481055a47afee8d8/pfdicom/pfdicom.py#L266-L348
def DICOMfile_read(self, *args, **kwargs): """ Read a DICOM file and perform some initial parsing of tags. NB! For thread safety, class member variables should not be assigned since other threads might override/change these variables in mid- flight! """ b_status = False l_tags = [] l_tagsToUse = [] d_tagsInString = {} str_file = "" d_DICOM = { 'dcm': None, 'd_dcm': {}, 'strRaw': '', 'l_tagRaw': [], 'd_json': {}, 'd_dicom': {}, 'd_dicomSimple': {} } for k, v in kwargs.items(): if k == 'file': str_file = v if k == 'l_tagsToUse': l_tags = v if len(args): l_file = args[0] str_file = l_file[0] str_localFile = os.path.basename(str_file) str_path = os.path.dirname(str_file) # self.dp.qprint("%s: In input base directory: %s" % (threading.currentThread().getName(), self.str_inputDir)) # self.dp.qprint("%s: Reading DICOM file in path: %s" % (threading.currentThread().getName(),str_path)) # self.dp.qprint("%s: Analysing tags on DICOM file: %s" % (threading.currentThread().getName(),str_localFile)) # self.dp.qprint("%s: Loading: %s" % (threading.currentThread().getName(),str_file)) try: # self.dcm = dicom.read_file(str_file) d_DICOM['dcm'] = dicom.read_file(str_file) b_status = True except: self.dp.qprint('In directory: %s' % os.getcwd(), comms = 'error') self.dp.qprint('Failed to read %s' % str_file, comms = 'error') b_status = False d_DICOM['d_dcm'] = dict(d_DICOM['dcm']) d_DICOM['strRaw'] = str(d_DICOM['dcm']) d_DICOM['l_tagRaw'] = d_DICOM['dcm'].dir() if len(l_tags): l_tagsToUse = l_tags else: l_tagsToUse = d_DICOM['l_tagRaw'] if 'PixelData' in l_tagsToUse: l_tagsToUse.remove('PixelData') for key in l_tagsToUse: d_DICOM['d_dicom'][key] = d_DICOM['dcm'].data_element(key) try: d_DICOM['d_dicomSimple'][key] = getattr(d_DICOM['dcm'], key) except: d_DICOM['d_dicomSimple'][key] = "no attribute" d_DICOM['d_json'][key] = str(d_DICOM['d_dicomSimple'][key]) # pudb.set_trace() d_tagsInString = self.tagsInString_process(d_DICOM, self.str_outputFileStem) str_outputFile = d_tagsInString['str_result'] return { 'status': b_status, 'inputPath': str_path, 'inputFilename': str_localFile, 'outputFileStem': str_outputFile, 'd_DICOM': d_DICOM, 'l_tagsToUse': l_tagsToUse }
[ "def", "DICOMfile_read", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "b_status", "=", "False", "l_tags", "=", "[", "]", "l_tagsToUse", "=", "[", "]", "d_tagsInString", "=", "{", "}", "str_file", "=", "\"\"", "d_DICOM", "=", "{", "'dcm'", ":", "None", ",", "'d_dcm'", ":", "{", "}", ",", "'strRaw'", ":", "''", ",", "'l_tagRaw'", ":", "[", "]", ",", "'d_json'", ":", "{", "}", ",", "'d_dicom'", ":", "{", "}", ",", "'d_dicomSimple'", ":", "{", "}", "}", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "if", "k", "==", "'file'", ":", "str_file", "=", "v", "if", "k", "==", "'l_tagsToUse'", ":", "l_tags", "=", "v", "if", "len", "(", "args", ")", ":", "l_file", "=", "args", "[", "0", "]", "str_file", "=", "l_file", "[", "0", "]", "str_localFile", "=", "os", ".", "path", ".", "basename", "(", "str_file", ")", "str_path", "=", "os", ".", "path", ".", "dirname", "(", "str_file", ")", "# self.dp.qprint(\"%s: In input base directory: %s\" % (threading.currentThread().getName(), self.str_inputDir))", "# self.dp.qprint(\"%s: Reading DICOM file in path: %s\" % (threading.currentThread().getName(),str_path))", "# self.dp.qprint(\"%s: Analysing tags on DICOM file: %s\" % (threading.currentThread().getName(),str_localFile)) ", "# self.dp.qprint(\"%s: Loading: %s\" % (threading.currentThread().getName(),str_file))", "try", ":", "# self.dcm = dicom.read_file(str_file)", "d_DICOM", "[", "'dcm'", "]", "=", "dicom", ".", "read_file", "(", "str_file", ")", "b_status", "=", "True", "except", ":", "self", ".", "dp", ".", "qprint", "(", "'In directory: %s'", "%", "os", ".", "getcwd", "(", ")", ",", "comms", "=", "'error'", ")", "self", ".", "dp", ".", "qprint", "(", "'Failed to read %s'", "%", "str_file", ",", "comms", "=", "'error'", ")", "b_status", "=", "False", "d_DICOM", "[", "'d_dcm'", "]", "=", "dict", "(", "d_DICOM", "[", "'dcm'", "]", ")", "d_DICOM", "[", "'strRaw'", "]", "=", "str", "(", "d_DICOM", "[", "'dcm'", "]", ")", "d_DICOM", "[", "'l_tagRaw'", "]", "=", "d_DICOM", "[", "'dcm'", "]", ".", "dir", "(", ")", "if", "len", "(", "l_tags", ")", ":", "l_tagsToUse", "=", "l_tags", "else", ":", "l_tagsToUse", "=", "d_DICOM", "[", "'l_tagRaw'", "]", "if", "'PixelData'", "in", "l_tagsToUse", ":", "l_tagsToUse", ".", "remove", "(", "'PixelData'", ")", "for", "key", "in", "l_tagsToUse", ":", "d_DICOM", "[", "'d_dicom'", "]", "[", "key", "]", "=", "d_DICOM", "[", "'dcm'", "]", ".", "data_element", "(", "key", ")", "try", ":", "d_DICOM", "[", "'d_dicomSimple'", "]", "[", "key", "]", "=", "getattr", "(", "d_DICOM", "[", "'dcm'", "]", ",", "key", ")", "except", ":", "d_DICOM", "[", "'d_dicomSimple'", "]", "[", "key", "]", "=", "\"no attribute\"", "d_DICOM", "[", "'d_json'", "]", "[", "key", "]", "=", "str", "(", "d_DICOM", "[", "'d_dicomSimple'", "]", "[", "key", "]", ")", "# pudb.set_trace()", "d_tagsInString", "=", "self", ".", "tagsInString_process", "(", "d_DICOM", ",", "self", ".", "str_outputFileStem", ")", "str_outputFile", "=", "d_tagsInString", "[", "'str_result'", "]", "return", "{", "'status'", ":", "b_status", ",", "'inputPath'", ":", "str_path", ",", "'inputFilename'", ":", "str_localFile", ",", "'outputFileStem'", ":", "str_outputFile", ",", "'d_DICOM'", ":", "d_DICOM", ",", "'l_tagsToUse'", ":", "l_tagsToUse", "}" ]
Read a DICOM file and perform some initial parsing of tags. NB! For thread safety, class member variables should not be assigned since other threads might override/change these variables in mid- flight!
[ "Read", "a", "DICOM", "file", "and", "perform", "some", "initial", "parsing", "of", "tags", "." ]
python
train
37.036145
nkmathew/yasi-sexp-indenter
yasi.py
https://github.com/nkmathew/yasi-sexp-indenter/blob/6ec2a4675e79606c555bcb67494a0ba994b05805/yasi.py#L394-L407
def tabify(text, options): """ tabify(text : str, options : argparse.Namespace|str) -> str >>> tabify(' (println "hello world")', '--tab=3') '\t\t (println "hello world")' Replace spaces with tabs """ opts = parse_options(options) if opts.tab_size < 1: return text else: tab_equiv = ' ' * opts.tab_size return text.replace(tab_equiv, '\t')
[ "def", "tabify", "(", "text", ",", "options", ")", ":", "opts", "=", "parse_options", "(", "options", ")", "if", "opts", ".", "tab_size", "<", "1", ":", "return", "text", "else", ":", "tab_equiv", "=", "' '", "*", "opts", ".", "tab_size", "return", "text", ".", "replace", "(", "tab_equiv", ",", "'\\t'", ")" ]
tabify(text : str, options : argparse.Namespace|str) -> str >>> tabify(' (println "hello world")', '--tab=3') '\t\t (println "hello world")' Replace spaces with tabs
[ "tabify", "(", "text", ":", "str", "options", ":", "argparse", ".", "Namespace|str", ")", "-", ">", "str" ]
python
train
28.071429
fogleman/pg
pg/glfw.py
https://github.com/fogleman/pg/blob/124ea3803c788b2c98c4f3a428e5d26842a67b58/pg/glfw.py#L84-L135
def _glfw_get_version(filename): ''' Queries and returns the library version tuple or None by using a subprocess. ''' version_checker_source = """ import sys import ctypes def get_version(library_handle): ''' Queries and returns the library version tuple or None. ''' major_value = ctypes.c_int(0) major = ctypes.pointer(major_value) minor_value = ctypes.c_int(0) minor = ctypes.pointer(minor_value) rev_value = ctypes.c_int(0) rev = ctypes.pointer(rev_value) if hasattr(library_handle, 'glfwGetVersion'): library_handle.glfwGetVersion(major, minor, rev) version = (major_value.value, minor_value.value, rev_value.value) return version else: return None try: input_func = raw_input except NameError: input_func = input filename = input_func().strip() try: library_handle = ctypes.CDLL(filename) except OSError: pass else: version = get_version(library_handle) print(version) """ args = [sys.executable, '-c', textwrap.dedent(version_checker_source)] process = subprocess.Popen(args, universal_newlines=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE) out = process.communicate(_to_char_p(filename))[0] out = out.strip() if out: return eval(out) else: return None
[ "def", "_glfw_get_version", "(", "filename", ")", ":", "version_checker_source", "=", "\"\"\"\n import sys\n import ctypes\n\n def get_version(library_handle):\n '''\n Queries and returns the library version tuple or None.\n '''\n major_value = ctypes.c_int(0)\n major = ctypes.pointer(major_value)\n minor_value = ctypes.c_int(0)\n minor = ctypes.pointer(minor_value)\n rev_value = ctypes.c_int(0)\n rev = ctypes.pointer(rev_value)\n if hasattr(library_handle, 'glfwGetVersion'):\n library_handle.glfwGetVersion(major, minor, rev)\n version = (major_value.value,\n minor_value.value,\n rev_value.value)\n return version\n else:\n return None\n\n try:\n input_func = raw_input\n except NameError:\n input_func = input\n filename = input_func().strip()\n\n try:\n library_handle = ctypes.CDLL(filename)\n except OSError:\n pass\n else:\n version = get_version(library_handle)\n print(version)\n \"\"\"", "args", "=", "[", "sys", ".", "executable", ",", "'-c'", ",", "textwrap", ".", "dedent", "(", "version_checker_source", ")", "]", "process", "=", "subprocess", ".", "Popen", "(", "args", ",", "universal_newlines", "=", "True", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "out", "=", "process", ".", "communicate", "(", "_to_char_p", "(", "filename", ")", ")", "[", "0", "]", "out", "=", "out", ".", "strip", "(", ")", "if", "out", ":", "return", "eval", "(", "out", ")", "else", ":", "return", "None" ]
Queries and returns the library version tuple or None by using a subprocess.
[ "Queries", "and", "returns", "the", "library", "version", "tuple", "or", "None", "by", "using", "a", "subprocess", "." ]
python
train
30.923077
inveniosoftware-contrib/invenio-classifier
invenio_classifier/reader.py
https://github.com/inveniosoftware-contrib/invenio-classifier/blob/3c758cf34dca6bf0548e7da5de34e5f72e3b255e/invenio_classifier/reader.py#L914-L941
def _get_regex_pattern(label): """Return a regular expression of the label. This takes care of plural and different kinds of separators. """ parts = _split_by_punctuation.split(label) for index, part in enumerate(parts): if index % 2 == 0: # Word if not parts[index].isdigit() and len(parts[index]) > 1: parts[index] = _convert_word(parts[index]) else: # Punctuation if not parts[index + 1]: # The separator is not followed by another word. Treat # it as a symbol. parts[index] = _convert_punctuation( parts[index], current_app.config["CLASSIFIER_SYMBOLS"] ) else: parts[index] = _convert_punctuation( parts[index], current_app.config["CLASSIFIER_SEPARATORS"] ) return "".join(parts)
[ "def", "_get_regex_pattern", "(", "label", ")", ":", "parts", "=", "_split_by_punctuation", ".", "split", "(", "label", ")", "for", "index", ",", "part", "in", "enumerate", "(", "parts", ")", ":", "if", "index", "%", "2", "==", "0", ":", "# Word", "if", "not", "parts", "[", "index", "]", ".", "isdigit", "(", ")", "and", "len", "(", "parts", "[", "index", "]", ")", ">", "1", ":", "parts", "[", "index", "]", "=", "_convert_word", "(", "parts", "[", "index", "]", ")", "else", ":", "# Punctuation", "if", "not", "parts", "[", "index", "+", "1", "]", ":", "# The separator is not followed by another word. Treat", "# it as a symbol.", "parts", "[", "index", "]", "=", "_convert_punctuation", "(", "parts", "[", "index", "]", ",", "current_app", ".", "config", "[", "\"CLASSIFIER_SYMBOLS\"", "]", ")", "else", ":", "parts", "[", "index", "]", "=", "_convert_punctuation", "(", "parts", "[", "index", "]", ",", "current_app", ".", "config", "[", "\"CLASSIFIER_SEPARATORS\"", "]", ")", "return", "\"\"", ".", "join", "(", "parts", ")" ]
Return a regular expression of the label. This takes care of plural and different kinds of separators.
[ "Return", "a", "regular", "expression", "of", "the", "label", "." ]
python
train
33.964286
tarbell-project/tarbell
tarbell/cli.py
https://github.com/tarbell-project/tarbell/blob/818b3d3623dcda5a08a5bf45550219719b0f0365/tarbell/cli.py#L741-L751
def _list_templates(settings): """ List templates from settings. """ for idx, option in enumerate(settings.config.get("project_templates"), start=1): puts(" {0!s:5} {1!s:36}".format( colored.yellow("[{0}]".format(idx)), colored.cyan(option.get("name")) )) if option.get("url"): puts(" {0}\n".format(option.get("url")))
[ "def", "_list_templates", "(", "settings", ")", ":", "for", "idx", ",", "option", "in", "enumerate", "(", "settings", ".", "config", ".", "get", "(", "\"project_templates\"", ")", ",", "start", "=", "1", ")", ":", "puts", "(", "\" {0!s:5} {1!s:36}\"", ".", "format", "(", "colored", ".", "yellow", "(", "\"[{0}]\"", ".", "format", "(", "idx", ")", ")", ",", "colored", ".", "cyan", "(", "option", ".", "get", "(", "\"name\"", ")", ")", ")", ")", "if", "option", ".", "get", "(", "\"url\"", ")", ":", "puts", "(", "\" {0}\\n\"", ".", "format", "(", "option", ".", "get", "(", "\"url\"", ")", ")", ")" ]
List templates from settings.
[ "List", "templates", "from", "settings", "." ]
python
train
35.454545
quantumlib/Cirq
dev_tools/auto_merge.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/dev_tools/auto_merge.py#L446-L460
def delete_comment(repo: GithubRepository, comment_id: int) -> None: """ References: https://developer.github.com/v3/issues/comments/#delete-a-comment """ url = ("https://api.github.com/repos/{}/{}/issues/comments/{}" "?access_token={}".format(repo.organization, repo.name, comment_id, repo.access_token)) response = requests.delete(url) if response.status_code != 204: raise RuntimeError( 'Comment delete failed. Code: {}. Content: {}.'.format( response.status_code, response.content))
[ "def", "delete_comment", "(", "repo", ":", "GithubRepository", ",", "comment_id", ":", "int", ")", "->", "None", ":", "url", "=", "(", "\"https://api.github.com/repos/{}/{}/issues/comments/{}\"", "\"?access_token={}\"", ".", "format", "(", "repo", ".", "organization", ",", "repo", ".", "name", ",", "comment_id", ",", "repo", ".", "access_token", ")", ")", "response", "=", "requests", ".", "delete", "(", "url", ")", "if", "response", ".", "status_code", "!=", "204", ":", "raise", "RuntimeError", "(", "'Comment delete failed. Code: {}. Content: {}.'", ".", "format", "(", "response", ".", "status_code", ",", "response", ".", "content", ")", ")" ]
References: https://developer.github.com/v3/issues/comments/#delete-a-comment
[ "References", ":", "https", ":", "//", "developer", ".", "github", ".", "com", "/", "v3", "/", "issues", "/", "comments", "/", "#delete", "-", "a", "-", "comment" ]
python
train
44.133333
MrYsLab/pymata-aio
pymata_aio/pymata3.py
https://github.com/MrYsLab/pymata-aio/blob/015081a4628b9d47dfe3f8d6c698ff903f107810/pymata_aio/pymata3.py#L623-L630
def shutdown(self): """ Shutdown the application and exit :returns: No return value """ task = asyncio.ensure_future(self.core.shutdown()) self.loop.run_until_complete(task)
[ "def", "shutdown", "(", "self", ")", ":", "task", "=", "asyncio", ".", "ensure_future", "(", "self", ".", "core", ".", "shutdown", "(", ")", ")", "self", ".", "loop", ".", "run_until_complete", "(", "task", ")" ]
Shutdown the application and exit :returns: No return value
[ "Shutdown", "the", "application", "and", "exit" ]
python
train
26.875
trailofbits/manticore
manticore/core/smtlib/solver.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/core/smtlib/solver.py#L93-L102
def min(self, constraints, X: BitVec, M=10000): """ Iteratively finds the minimum value for a symbol within given constraints. :param constraints: constraints that the expression must fulfil :param X: a symbol or expression :param M: maximum number of iterations allowed """ assert isinstance(X, BitVec) return self.optimize(constraints, X, 'minimize', M)
[ "def", "min", "(", "self", ",", "constraints", ",", "X", ":", "BitVec", ",", "M", "=", "10000", ")", ":", "assert", "isinstance", "(", "X", ",", "BitVec", ")", "return", "self", ".", "optimize", "(", "constraints", ",", "X", ",", "'minimize'", ",", "M", ")" ]
Iteratively finds the minimum value for a symbol within given constraints. :param constraints: constraints that the expression must fulfil :param X: a symbol or expression :param M: maximum number of iterations allowed
[ "Iteratively", "finds", "the", "minimum", "value", "for", "a", "symbol", "within", "given", "constraints", "." ]
python
valid
41.1
openstack/quark
quark/drivers/ironic_driver.py
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/drivers/ironic_driver.py#L165-L211
def select_ipam_strategy(self, network_id, network_strategy, **kwargs): """Return relevant IPAM strategy name. :param network_id: neutron network id. :param network_strategy: default strategy for the network. NOTE(morgabra) This feels like a hack but I can't think of a better idea. The root problem is we can now attach ports to networks with a different backend driver/ipam strategy than the network speficies. We handle the the backend driver part with allowing network_plugin to be specified for port objects. This works pretty well because nova or whatever knows when we are hooking up an Ironic node so it can pass along that key during port_create(). IPAM is a little trickier, especially in Ironic's case, because we *must* use a specific IPAM for provider networks. There isn't really much of an option other than involve the backend driver when selecting the IPAM strategy. """ LOG.info("Selecting IPAM strategy for network_id:%s " "network_strategy:%s" % (network_id, network_strategy)) net_type = "tenant" if STRATEGY.is_provider_network(network_id): net_type = "provider" strategy = self._ipam_strategies.get(net_type, {}) default = strategy.get("default") overrides = strategy.get("overrides", {}) # If we override a particular strategy explicitly, we use it. if network_strategy in overrides: LOG.info("Selected overridden IPAM strategy: %s" % (overrides[network_strategy])) return overrides[network_strategy] # Otherwise, we are free to use an explicit default. if default: LOG.info("Selected default IPAM strategy for tenant " "network: %s" % (default)) return default # Fallback to the network-specified IPAM strategy LOG.info("Selected network strategy for tenant " "network: %s" % (network_strategy)) return network_strategy
[ "def", "select_ipam_strategy", "(", "self", ",", "network_id", ",", "network_strategy", ",", "*", "*", "kwargs", ")", ":", "LOG", ".", "info", "(", "\"Selecting IPAM strategy for network_id:%s \"", "\"network_strategy:%s\"", "%", "(", "network_id", ",", "network_strategy", ")", ")", "net_type", "=", "\"tenant\"", "if", "STRATEGY", ".", "is_provider_network", "(", "network_id", ")", ":", "net_type", "=", "\"provider\"", "strategy", "=", "self", ".", "_ipam_strategies", ".", "get", "(", "net_type", ",", "{", "}", ")", "default", "=", "strategy", ".", "get", "(", "\"default\"", ")", "overrides", "=", "strategy", ".", "get", "(", "\"overrides\"", ",", "{", "}", ")", "# If we override a particular strategy explicitly, we use it.", "if", "network_strategy", "in", "overrides", ":", "LOG", ".", "info", "(", "\"Selected overridden IPAM strategy: %s\"", "%", "(", "overrides", "[", "network_strategy", "]", ")", ")", "return", "overrides", "[", "network_strategy", "]", "# Otherwise, we are free to use an explicit default.", "if", "default", ":", "LOG", ".", "info", "(", "\"Selected default IPAM strategy for tenant \"", "\"network: %s\"", "%", "(", "default", ")", ")", "return", "default", "# Fallback to the network-specified IPAM strategy", "LOG", ".", "info", "(", "\"Selected network strategy for tenant \"", "\"network: %s\"", "%", "(", "network_strategy", ")", ")", "return", "network_strategy" ]
Return relevant IPAM strategy name. :param network_id: neutron network id. :param network_strategy: default strategy for the network. NOTE(morgabra) This feels like a hack but I can't think of a better idea. The root problem is we can now attach ports to networks with a different backend driver/ipam strategy than the network speficies. We handle the the backend driver part with allowing network_plugin to be specified for port objects. This works pretty well because nova or whatever knows when we are hooking up an Ironic node so it can pass along that key during port_create(). IPAM is a little trickier, especially in Ironic's case, because we *must* use a specific IPAM for provider networks. There isn't really much of an option other than involve the backend driver when selecting the IPAM strategy.
[ "Return", "relevant", "IPAM", "strategy", "name", "." ]
python
valid
43.978723
getpelican/pelican-plugins
events/events.py
https://github.com/getpelican/pelican-plugins/blob/cfc7a3f224f1743063b034561f89a6a712d13587/events/events.py#L159-L168
def generate_events_list(generator): """Populate the event_list variable to be used in jinja templates""" if not localized_events: generator.context['events_list'] = sorted(events, reverse = True, key=lambda ev: (ev.dtstart, ev.dtend)) else: generator.context['events_list'] = {k: sorted(v, reverse = True, key=lambda ev: (ev.dtstart, ev.dtend)) for k, v in localized_events.items()}
[ "def", "generate_events_list", "(", "generator", ")", ":", "if", "not", "localized_events", ":", "generator", ".", "context", "[", "'events_list'", "]", "=", "sorted", "(", "events", ",", "reverse", "=", "True", ",", "key", "=", "lambda", "ev", ":", "(", "ev", ".", "dtstart", ",", "ev", ".", "dtend", ")", ")", "else", ":", "generator", ".", "context", "[", "'events_list'", "]", "=", "{", "k", ":", "sorted", "(", "v", ",", "reverse", "=", "True", ",", "key", "=", "lambda", "ev", ":", "(", "ev", ".", "dtstart", ",", "ev", ".", "dtend", ")", ")", "for", "k", ",", "v", "in", "localized_events", ".", "items", "(", ")", "}" ]
Populate the event_list variable to be used in jinja templates
[ "Populate", "the", "event_list", "variable", "to", "be", "used", "in", "jinja", "templates" ]
python
train
55.1
fhamborg/news-please
newsplease/crawler/commoncrawl_crawler.py
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/crawler/commoncrawl_crawler.py#L33-L53
def __setup(local_download_dir_warc, log_level): """ Setup :return: """ if not os.path.exists(local_download_dir_warc): os.makedirs(local_download_dir_warc) # make loggers quite configure_logging({"LOG_LEVEL": "ERROR"}) logging.getLogger('requests').setLevel(logging.CRITICAL) logging.getLogger('readability').setLevel(logging.CRITICAL) logging.getLogger('PIL').setLevel(logging.CRITICAL) logging.getLogger('newspaper').setLevel(logging.CRITICAL) logging.getLogger('newsplease').setLevel(logging.CRITICAL) logging.getLogger('urllib3').setLevel(logging.CRITICAL) # set own logger logging.basicConfig(level=log_level) __logger = logging.getLogger(__name__) __logger.setLevel(log_level)
[ "def", "__setup", "(", "local_download_dir_warc", ",", "log_level", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "local_download_dir_warc", ")", ":", "os", ".", "makedirs", "(", "local_download_dir_warc", ")", "# make loggers quite", "configure_logging", "(", "{", "\"LOG_LEVEL\"", ":", "\"ERROR\"", "}", ")", "logging", ".", "getLogger", "(", "'requests'", ")", ".", "setLevel", "(", "logging", ".", "CRITICAL", ")", "logging", ".", "getLogger", "(", "'readability'", ")", ".", "setLevel", "(", "logging", ".", "CRITICAL", ")", "logging", ".", "getLogger", "(", "'PIL'", ")", ".", "setLevel", "(", "logging", ".", "CRITICAL", ")", "logging", ".", "getLogger", "(", "'newspaper'", ")", ".", "setLevel", "(", "logging", ".", "CRITICAL", ")", "logging", ".", "getLogger", "(", "'newsplease'", ")", ".", "setLevel", "(", "logging", ".", "CRITICAL", ")", "logging", ".", "getLogger", "(", "'urllib3'", ")", ".", "setLevel", "(", "logging", ".", "CRITICAL", ")", "# set own logger", "logging", ".", "basicConfig", "(", "level", "=", "log_level", ")", "__logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "__logger", ".", "setLevel", "(", "log_level", ")" ]
Setup :return:
[ "Setup", ":", "return", ":" ]
python
train
35.285714
kislyuk/aegea
aegea/packages/github3/repos/repo.py
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/repos/repo.py#L340-L352
def asset(self, id): """Returns a single Asset. :param int id: (required), id of the asset :returns: :class:`Asset <github3.repos.release.Asset>` """ data = None if int(id) > 0: url = self._build_url('releases', 'assets', str(id), base_url=self._api) data = self._json(self._get(url, headers=Release.CUSTOM_HEADERS), 200) return Asset(data, self) if data else None
[ "def", "asset", "(", "self", ",", "id", ")", ":", "data", "=", "None", "if", "int", "(", "id", ")", ">", "0", ":", "url", "=", "self", ".", "_build_url", "(", "'releases'", ",", "'assets'", ",", "str", "(", "id", ")", ",", "base_url", "=", "self", ".", "_api", ")", "data", "=", "self", ".", "_json", "(", "self", ".", "_get", "(", "url", ",", "headers", "=", "Release", ".", "CUSTOM_HEADERS", ")", ",", "200", ")", "return", "Asset", "(", "data", ",", "self", ")", "if", "data", "else", "None" ]
Returns a single Asset. :param int id: (required), id of the asset :returns: :class:`Asset <github3.repos.release.Asset>`
[ "Returns", "a", "single", "Asset", "." ]
python
train
38.230769
hangyan/shaw
shaw/types/d.py
https://github.com/hangyan/shaw/blob/63d01d35e225ba4edb9c61edaf351e1bc0e8fd15/shaw/types/d.py#L63-L69
def deepcp(data): """Use ujson to do deep_copy""" import ujson try: return ujson.loads(ujson.dumps(data)) except Exception: return copy.deepcopy(data)
[ "def", "deepcp", "(", "data", ")", ":", "import", "ujson", "try", ":", "return", "ujson", ".", "loads", "(", "ujson", ".", "dumps", "(", "data", ")", ")", "except", "Exception", ":", "return", "copy", ".", "deepcopy", "(", "data", ")" ]
Use ujson to do deep_copy
[ "Use", "ujson", "to", "do", "deep_copy" ]
python
train
25.142857
polyaxon/polyaxon-cli
polyaxon_cli/cli/build.py
https://github.com/polyaxon/polyaxon-cli/blob/a7f5eed74d4d909cad79059f3c21c58606881449/polyaxon_cli/cli/build.py#L217-L242
def bookmark(ctx): """Bookmark build job. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon build bookmark ``` \b ```bash $ polyaxon build -b 2 bookmark ``` """ user, project_name, _build = get_build_or_local(ctx.obj.get('project'), ctx.obj.get('build')) try: PolyaxonClient().build_job.bookmark(user, project_name, _build) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not bookmark build job `{}`.'.format(_build)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success("Build job bookmarked.")
[ "def", "bookmark", "(", "ctx", ")", ":", "user", ",", "project_name", ",", "_build", "=", "get_build_or_local", "(", "ctx", ".", "obj", ".", "get", "(", "'project'", ")", ",", "ctx", ".", "obj", ".", "get", "(", "'build'", ")", ")", "try", ":", "PolyaxonClient", "(", ")", ".", "build_job", ".", "bookmark", "(", "user", ",", "project_name", ",", "_build", ")", "except", "(", "PolyaxonHTTPError", ",", "PolyaxonShouldExitError", ",", "PolyaxonClientException", ")", "as", "e", ":", "Printer", ".", "print_error", "(", "'Could not bookmark build job `{}`.'", ".", "format", "(", "_build", ")", ")", "Printer", ".", "print_error", "(", "'Error message `{}`.'", ".", "format", "(", "e", ")", ")", "sys", ".", "exit", "(", "1", ")", "Printer", ".", "print_success", "(", "\"Build job bookmarked.\"", ")" ]
Bookmark build job. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon build bookmark ``` \b ```bash $ polyaxon build -b 2 bookmark ```
[ "Bookmark", "build", "job", "." ]
python
valid
26.884615
pescadores/pescador
pescador/maps.py
https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/pescador/maps.py#L87-L117
def tuples(stream, *keys): """Reformat data as tuples. Parameters ---------- stream : iterable Stream of data objects. *keys : strings Keys to use for ordering data. Yields ------ items : tuple of np.ndarrays Data object reformated as a tuple. Raises ------ DataError If the stream contains items that are not data-like. KeyError If a data object does not contain the requested key. """ if not keys: raise PescadorError('Unable to generate tuples from ' 'an empty item set') for data in stream: try: yield tuple(data[key] for key in keys) except TypeError: raise DataError("Malformed data stream: {}".format(data))
[ "def", "tuples", "(", "stream", ",", "*", "keys", ")", ":", "if", "not", "keys", ":", "raise", "PescadorError", "(", "'Unable to generate tuples from '", "'an empty item set'", ")", "for", "data", "in", "stream", ":", "try", ":", "yield", "tuple", "(", "data", "[", "key", "]", "for", "key", "in", "keys", ")", "except", "TypeError", ":", "raise", "DataError", "(", "\"Malformed data stream: {}\"", ".", "format", "(", "data", ")", ")" ]
Reformat data as tuples. Parameters ---------- stream : iterable Stream of data objects. *keys : strings Keys to use for ordering data. Yields ------ items : tuple of np.ndarrays Data object reformated as a tuple. Raises ------ DataError If the stream contains items that are not data-like. KeyError If a data object does not contain the requested key.
[ "Reformat", "data", "as", "tuples", "." ]
python
train
24.645161
matthewdeanmartin/jiggle_version
jiggle_version/package_info_finder.py
https://github.com/matthewdeanmartin/jiggle_version/blob/963656a0a47b7162780a5f6c8f4b8bbbebc148f5/jiggle_version/package_info_finder.py#L229-L268
def find_malformed_single_file_project(self): # type: () -> List[str] """ Take first non-setup.py python file. What a mess. :return: """ files = [f for f in os.listdir(".") if os.path.isfile(f)] candidates = [] # project misnamed & not in setup.py for file in files: if file.endswith("setup.py") or not file.endswith(".py"): continue # duh candidate = file.replace(".py", "") if candidate != "setup": candidates.append(candidate) # return first return candidates # files with shebang for file in files: if file.endswith("setup.py"): continue # duh if "." not in file: candidate = files try: firstline = self.file_opener.open_this(file, "r").readline() if ( firstline.startswith("#") and "python" in firstline and candidate in self.setup_py_source() ): candidates.append(candidate) return candidates except: pass # default. return candidates
[ "def", "find_malformed_single_file_project", "(", "self", ")", ":", "# type: () -> List[str]", "files", "=", "[", "f", "for", "f", "in", "os", ".", "listdir", "(", "\".\"", ")", "if", "os", ".", "path", ".", "isfile", "(", "f", ")", "]", "candidates", "=", "[", "]", "# project misnamed & not in setup.py", "for", "file", "in", "files", ":", "if", "file", ".", "endswith", "(", "\"setup.py\"", ")", "or", "not", "file", ".", "endswith", "(", "\".py\"", ")", ":", "continue", "# duh", "candidate", "=", "file", ".", "replace", "(", "\".py\"", ",", "\"\"", ")", "if", "candidate", "!=", "\"setup\"", ":", "candidates", ".", "append", "(", "candidate", ")", "# return first", "return", "candidates", "# files with shebang", "for", "file", "in", "files", ":", "if", "file", ".", "endswith", "(", "\"setup.py\"", ")", ":", "continue", "# duh", "if", "\".\"", "not", "in", "file", ":", "candidate", "=", "files", "try", ":", "firstline", "=", "self", ".", "file_opener", ".", "open_this", "(", "file", ",", "\"r\"", ")", ".", "readline", "(", ")", "if", "(", "firstline", ".", "startswith", "(", "\"#\"", ")", "and", "\"python\"", "in", "firstline", "and", "candidate", "in", "self", ".", "setup_py_source", "(", ")", ")", ":", "candidates", ".", "append", "(", "candidate", ")", "return", "candidates", "except", ":", "pass", "# default.", "return", "candidates" ]
Take first non-setup.py python file. What a mess. :return:
[ "Take", "first", "non", "-", "setup", ".", "py", "python", "file", ".", "What", "a", "mess", ".", ":", "return", ":" ]
python
train
32.325
sorgerlab/indra
indra/tools/reading/util/reporter.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/util/reporter.py#L31-L47
def add_section(self, section_name): """Create a section of the report, to be headed by section_name Text and images can be added by using the `section` argument of the `add_text` and `add_image` methods. Sections can also be ordered by using the `set_section_order` method. By default, text and images that have no section will be placed after all the sections, in the order they were added. This behavior may be altered using the `sections_first` attribute of the `make_report` method. """ self.section_headings.append(section_name) if section_name in self.sections: raise ValueError("Section %s already exists." % section_name) self.sections[section_name] = [] return
[ "def", "add_section", "(", "self", ",", "section_name", ")", ":", "self", ".", "section_headings", ".", "append", "(", "section_name", ")", "if", "section_name", "in", "self", ".", "sections", ":", "raise", "ValueError", "(", "\"Section %s already exists.\"", "%", "section_name", ")", "self", ".", "sections", "[", "section_name", "]", "=", "[", "]", "return" ]
Create a section of the report, to be headed by section_name Text and images can be added by using the `section` argument of the `add_text` and `add_image` methods. Sections can also be ordered by using the `set_section_order` method. By default, text and images that have no section will be placed after all the sections, in the order they were added. This behavior may be altered using the `sections_first` attribute of the `make_report` method.
[ "Create", "a", "section", "of", "the", "report", "to", "be", "headed", "by", "section_name" ]
python
train
45.411765
klahnakoski/pyLibrary
mo_threads/queues.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/mo_threads/queues.py#L111-L119
def pop_message(self, till=None): """ RETURN TUPLE (message, payload) CALLER IS RESPONSIBLE FOR CALLING message.delete() WHEN DONE DUMMY IMPLEMENTATION FOR DEBUGGING """ if till is not None and not isinstance(till, Signal): Log.error("Expecting a signal") return Null, self.pop(till=till)
[ "def", "pop_message", "(", "self", ",", "till", "=", "None", ")", ":", "if", "till", "is", "not", "None", "and", "not", "isinstance", "(", "till", ",", "Signal", ")", ":", "Log", ".", "error", "(", "\"Expecting a signal\"", ")", "return", "Null", ",", "self", ".", "pop", "(", "till", "=", "till", ")" ]
RETURN TUPLE (message, payload) CALLER IS RESPONSIBLE FOR CALLING message.delete() WHEN DONE DUMMY IMPLEMENTATION FOR DEBUGGING
[ "RETURN", "TUPLE", "(", "message", "payload", ")", "CALLER", "IS", "RESPONSIBLE", "FOR", "CALLING", "message", ".", "delete", "()", "WHEN", "DONE", "DUMMY", "IMPLEMENTATION", "FOR", "DEBUGGING" ]
python
train
37.888889
numenta/nupic
src/nupic/support/__init__.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/support/__init__.py#L442-L478
def aggregationDivide(dividend, divisor): """ Return the result from dividing two dicts that represent date and time. Both dividend and divisor are dicts that contain one or more of the following keys: 'years', 'months', 'weeks', 'days', 'hours', 'minutes', seconds', 'milliseconds', 'microseconds'. For example: :: aggregationDivide({'hours': 4}, {'minutes': 15}) == 16 :param dividend: (dict) The numerator, as a dict representing a date and time :param divisor: (dict) the denominator, as a dict representing a date and time :returns: (float) number of times divisor goes into dividend """ # Convert each into microseconds dividendMonthSec = aggregationToMonthsSeconds(dividend) divisorMonthSec = aggregationToMonthsSeconds(divisor) # It is a usage error to mix both months and seconds in the same operation if (dividendMonthSec['months'] != 0 and divisorMonthSec['seconds'] != 0) \ or (dividendMonthSec['seconds'] != 0 and divisorMonthSec['months'] != 0): raise RuntimeError("Aggregation dicts with months/years can only be " "inter-operated with other aggregation dicts that contain " "months/years") if dividendMonthSec['months'] > 0: return float(dividendMonthSec['months']) / divisor['months'] else: return float(dividendMonthSec['seconds']) / divisorMonthSec['seconds']
[ "def", "aggregationDivide", "(", "dividend", ",", "divisor", ")", ":", "# Convert each into microseconds", "dividendMonthSec", "=", "aggregationToMonthsSeconds", "(", "dividend", ")", "divisorMonthSec", "=", "aggregationToMonthsSeconds", "(", "divisor", ")", "# It is a usage error to mix both months and seconds in the same operation", "if", "(", "dividendMonthSec", "[", "'months'", "]", "!=", "0", "and", "divisorMonthSec", "[", "'seconds'", "]", "!=", "0", ")", "or", "(", "dividendMonthSec", "[", "'seconds'", "]", "!=", "0", "and", "divisorMonthSec", "[", "'months'", "]", "!=", "0", ")", ":", "raise", "RuntimeError", "(", "\"Aggregation dicts with months/years can only be \"", "\"inter-operated with other aggregation dicts that contain \"", "\"months/years\"", ")", "if", "dividendMonthSec", "[", "'months'", "]", ">", "0", ":", "return", "float", "(", "dividendMonthSec", "[", "'months'", "]", ")", "/", "divisor", "[", "'months'", "]", "else", ":", "return", "float", "(", "dividendMonthSec", "[", "'seconds'", "]", ")", "/", "divisorMonthSec", "[", "'seconds'", "]" ]
Return the result from dividing two dicts that represent date and time. Both dividend and divisor are dicts that contain one or more of the following keys: 'years', 'months', 'weeks', 'days', 'hours', 'minutes', seconds', 'milliseconds', 'microseconds'. For example: :: aggregationDivide({'hours': 4}, {'minutes': 15}) == 16 :param dividend: (dict) The numerator, as a dict representing a date and time :param divisor: (dict) the denominator, as a dict representing a date and time :returns: (float) number of times divisor goes into dividend
[ "Return", "the", "result", "from", "dividing", "two", "dicts", "that", "represent", "date", "and", "time", "." ]
python
valid
35.702703
Tanganelli/CoAPthon3
coapthon/reverse_proxy/coap.py
https://github.com/Tanganelli/CoAPthon3/blob/985763bfe2eb9e00f49ec100c5b8877c2ed7d531/coapthon/reverse_proxy/coap.py#L130-L139
def parse_config(self): """ Parse the xml file with remote servers and discover resources on each found server. """ tree = ElementTree.parse(self.file_xml) root = tree.getroot() for server in root.findall('server'): destination = server.text name = server.get("name") self.discover_remote(destination, name)
[ "def", "parse_config", "(", "self", ")", ":", "tree", "=", "ElementTree", ".", "parse", "(", "self", ".", "file_xml", ")", "root", "=", "tree", ".", "getroot", "(", ")", "for", "server", "in", "root", ".", "findall", "(", "'server'", ")", ":", "destination", "=", "server", ".", "text", "name", "=", "server", ".", "get", "(", "\"name\"", ")", "self", ".", "discover_remote", "(", "destination", ",", "name", ")" ]
Parse the xml file with remote servers and discover resources on each found server.
[ "Parse", "the", "xml", "file", "with", "remote", "servers", "and", "discover", "resources", "on", "each", "found", "server", "." ]
python
train
38.2
capitalone/giraffez
giraffez/encrypt.py
https://github.com/capitalone/giraffez/blob/6b4d27eb1a1eaf188c6885c7364ef27e92b1b957/giraffez/encrypt.py#L30-L41
def create_key_file(path): """ Creates a new encryption key in the path provided and sets the file permissions. Setting the file permissions currently does not work on Windows platforms because of the differences in how file permissions are read and modified. """ iv = "{}{}".format(os.urandom(32), time.time()) new_key = generate_key(ensure_bytes(iv)) with open(path, "wb") as f: f.write(base64.b64encode(new_key)) os.chmod(path, 0o400)
[ "def", "create_key_file", "(", "path", ")", ":", "iv", "=", "\"{}{}\"", ".", "format", "(", "os", ".", "urandom", "(", "32", ")", ",", "time", ".", "time", "(", ")", ")", "new_key", "=", "generate_key", "(", "ensure_bytes", "(", "iv", ")", ")", "with", "open", "(", "path", ",", "\"wb\"", ")", "as", "f", ":", "f", ".", "write", "(", "base64", ".", "b64encode", "(", "new_key", ")", ")", "os", ".", "chmod", "(", "path", ",", "0o400", ")" ]
Creates a new encryption key in the path provided and sets the file permissions. Setting the file permissions currently does not work on Windows platforms because of the differences in how file permissions are read and modified.
[ "Creates", "a", "new", "encryption", "key", "in", "the", "path", "provided", "and", "sets", "the", "file", "permissions", ".", "Setting", "the", "file", "permissions", "currently", "does", "not", "work", "on", "Windows", "platforms", "because", "of", "the", "differences", "in", "how", "file", "permissions", "are", "read", "and", "modified", "." ]
python
test
39.583333
Esri/ArcREST
src/arcrest/common/symbology.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/common/symbology.py#L150-L154
def outlineColor(self, value): """gets/sets the outlineColor""" if isinstance(value, Color) and \ not self._outline is None: self._outline['color'] = value
[ "def", "outlineColor", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "Color", ")", "and", "not", "self", ".", "_outline", "is", "None", ":", "self", ".", "_outline", "[", "'color'", "]", "=", "value" ]
gets/sets the outlineColor
[ "gets", "/", "sets", "the", "outlineColor" ]
python
train
38
ellethee/argparseinator
argparseinator/__init__.py
https://github.com/ellethee/argparseinator/blob/05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e/argparseinator/__init__.py#L599-L602
def extend_with(func): """Extends with class or function""" if not func.__name__ in ArgParseInator._plugins: ArgParseInator._plugins[func.__name__] = func
[ "def", "extend_with", "(", "func", ")", ":", "if", "not", "func", ".", "__name__", "in", "ArgParseInator", ".", "_plugins", ":", "ArgParseInator", ".", "_plugins", "[", "func", ".", "__name__", "]", "=", "func" ]
Extends with class or function
[ "Extends", "with", "class", "or", "function" ]
python
train
41.75
tcalmant/ipopo
pelix/remote/discovery/zookeeper.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/remote/discovery/zookeeper.py#L170-L176
def stop(self): """ Stops the connection """ self.__stop = True self._queue.stop() self._zk.stop()
[ "def", "stop", "(", "self", ")", ":", "self", ".", "__stop", "=", "True", "self", ".", "_queue", ".", "stop", "(", ")", "self", ".", "_zk", ".", "stop", "(", ")" ]
Stops the connection
[ "Stops", "the", "connection" ]
python
train
20
Yelp/py_zipkin
py_zipkin/thrift/__init__.py
https://github.com/Yelp/py_zipkin/blob/0944d9a3fb1f1798dbb276694aeed99f2b4283ba/py_zipkin/thrift/__init__.py#L24-L34
def create_annotation(timestamp, value, host): """ Create a zipkin annotation object :param timestamp: timestamp of when the annotation occured in microseconds :param value: name of the annotation, such as 'sr' :param host: zipkin endpoint object :returns: zipkin annotation object """ return zipkin_core.Annotation(timestamp=timestamp, value=value, host=host)
[ "def", "create_annotation", "(", "timestamp", ",", "value", ",", "host", ")", ":", "return", "zipkin_core", ".", "Annotation", "(", "timestamp", "=", "timestamp", ",", "value", "=", "value", ",", "host", "=", "host", ")" ]
Create a zipkin annotation object :param timestamp: timestamp of when the annotation occured in microseconds :param value: name of the annotation, such as 'sr' :param host: zipkin endpoint object :returns: zipkin annotation object
[ "Create", "a", "zipkin", "annotation", "object" ]
python
test
34.909091
openstack/horizon
horizon/tables/formset.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/tables/formset.py#L110-L120
def get_formset(self): """Provide the formset corresponding to this DataTable. Use this to validate the formset and to get the submitted data back. """ if self._formset is None: self._formset = self.formset_class( self.request.POST or None, initial=self._get_formset_data(), prefix=self._meta.name) return self._formset
[ "def", "get_formset", "(", "self", ")", ":", "if", "self", ".", "_formset", "is", "None", ":", "self", ".", "_formset", "=", "self", ".", "formset_class", "(", "self", ".", "request", ".", "POST", "or", "None", ",", "initial", "=", "self", ".", "_get_formset_data", "(", ")", ",", "prefix", "=", "self", ".", "_meta", ".", "name", ")", "return", "self", ".", "_formset" ]
Provide the formset corresponding to this DataTable. Use this to validate the formset and to get the submitted data back.
[ "Provide", "the", "formset", "corresponding", "to", "this", "DataTable", "." ]
python
train
37.272727
acorg/dark-matter
dark/reads.py
https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/reads.py#L1374-L1394
def summarizePosition(self, index): """ Compute residue counts at a specific sequence index. @param index: an C{int} index into the sequence. @return: A C{dict} with the count of too-short (excluded) sequences, and a Counter instance giving the residue counts. """ countAtPosition = Counter() excludedCount = 0 for read in self: try: countAtPosition[read.sequence[index]] += 1 except IndexError: excludedCount += 1 return { 'excludedCount': excludedCount, 'countAtPosition': countAtPosition }
[ "def", "summarizePosition", "(", "self", ",", "index", ")", ":", "countAtPosition", "=", "Counter", "(", ")", "excludedCount", "=", "0", "for", "read", "in", "self", ":", "try", ":", "countAtPosition", "[", "read", ".", "sequence", "[", "index", "]", "]", "+=", "1", "except", "IndexError", ":", "excludedCount", "+=", "1", "return", "{", "'excludedCount'", ":", "excludedCount", ",", "'countAtPosition'", ":", "countAtPosition", "}" ]
Compute residue counts at a specific sequence index. @param index: an C{int} index into the sequence. @return: A C{dict} with the count of too-short (excluded) sequences, and a Counter instance giving the residue counts.
[ "Compute", "residue", "counts", "at", "a", "specific", "sequence", "index", "." ]
python
train
30.809524
abusque/qng
qng/generator.py
https://github.com/abusque/qng/blob/93d2efd637b2a6bba7d3872fb9ff2bb3fc5c979d/qng/generator.py#L204-L222
def _snakify_name(self, name): """Snakify a name string. In this context, "to snakify" means to strip a name of all diacritics, convert it to lower case, and replace any spaces inside the name with hyphens. This way the name is made "machine-friendly", and ready to be combined with a second name component into a full "snake_case" name. :param str name: A name to snakify. :return str: A snakified name. """ name = self._strip_diacritics(name) name = name.lower() name = name.replace(' ', '-') return name
[ "def", "_snakify_name", "(", "self", ",", "name", ")", ":", "name", "=", "self", ".", "_strip_diacritics", "(", "name", ")", "name", "=", "name", ".", "lower", "(", ")", "name", "=", "name", ".", "replace", "(", "' '", ",", "'-'", ")", "return", "name" ]
Snakify a name string. In this context, "to snakify" means to strip a name of all diacritics, convert it to lower case, and replace any spaces inside the name with hyphens. This way the name is made "machine-friendly", and ready to be combined with a second name component into a full "snake_case" name. :param str name: A name to snakify. :return str: A snakified name.
[ "Snakify", "a", "name", "string", "." ]
python
train
31.789474
ceph/ceph-deploy
ceph_deploy/install.py
https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/install.py#L445-L622
def make(parser): """ Install Ceph packages on remote hosts. """ version = parser.add_mutually_exclusive_group() # XXX deprecated in favor of release version.add_argument( '--stable', nargs='?', action=StoreVersion, metavar='CODENAME', help='[DEPRECATED] install a release known as CODENAME\ (done by default) (default: %(default)s)', ) version.add_argument( '--release', nargs='?', action=StoreVersion, metavar='CODENAME', help='install a release known as CODENAME\ (done by default) (default: %(default)s)', ) version.add_argument( '--testing', nargs=0, action=StoreVersion, help='install the latest development release', ) version.add_argument( '--dev', nargs='?', action=StoreVersion, const='master', metavar='BRANCH_OR_TAG', help='install a bleeding edge build from Git branch\ or tag (default: %(default)s)', ) parser.add_argument( '--dev-commit', nargs='?', action=StoreVersion, metavar='COMMIT', help='install a bleeding edge build from Git commit (defaults to master branch)', ) version.set_defaults( stable=None, # XXX deprecated in favor of release release=None, # Set the default release in sanitize_args() dev='master', version_kind='stable', ) parser.add_argument( '--mon', dest='install_mon', action='store_true', help='install the mon component only', ) parser.add_argument( '--mgr', dest='install_mgr', action='store_true', help='install the mgr component only', ) parser.add_argument( '--mds', dest='install_mds', action='store_true', help='install the mds component only', ) parser.add_argument( '--rgw', dest='install_rgw', action='store_true', help='install the rgw component only', ) parser.add_argument( '--osd', dest='install_osd', action='store_true', help='install the osd component only', ) parser.add_argument( '--tests', dest='install_tests', action='store_true', help='install the testing components', ) parser.add_argument( '--cli', '--common', dest='install_common', action='store_true', help='install the common component only', ) parser.add_argument( '--all', dest='install_all', action='store_true', help='install all Ceph components (mon, osd, mds, rgw) except tests. This is the default', ) repo = parser.add_mutually_exclusive_group() repo.add_argument( '--adjust-repos', dest='adjust_repos', action='store_true', help='install packages modifying source repos', ) repo.add_argument( '--no-adjust-repos', dest='adjust_repos', action='store_false', help='install packages without modifying source repos', ) repo.add_argument( '--repo', action='store_true', help='install repo files only (skips package installation)', ) repo.set_defaults( adjust_repos=True, ) parser.add_argument( 'host', metavar='HOST', nargs='+', help='hosts to install on', ) parser.add_argument( '--local-mirror', nargs='?', const='PATH', default=None, help='Fetch packages and push them to hosts for a local repo mirror', ) parser.add_argument( '--repo-url', nargs='?', dest='repo_url', help='specify a repo URL that mirrors/contains Ceph packages', ) parser.add_argument( '--gpg-url', nargs='?', dest='gpg_url', help='specify a GPG key URL to be used with custom repos\ (defaults to ceph.com)' ) parser.add_argument( '--nogpgcheck', action='store_true', help='install packages without gpgcheck', ) parser.set_defaults( func=install, )
[ "def", "make", "(", "parser", ")", ":", "version", "=", "parser", ".", "add_mutually_exclusive_group", "(", ")", "# XXX deprecated in favor of release", "version", ".", "add_argument", "(", "'--stable'", ",", "nargs", "=", "'?'", ",", "action", "=", "StoreVersion", ",", "metavar", "=", "'CODENAME'", ",", "help", "=", "'[DEPRECATED] install a release known as CODENAME\\\n (done by default) (default: %(default)s)'", ",", ")", "version", ".", "add_argument", "(", "'--release'", ",", "nargs", "=", "'?'", ",", "action", "=", "StoreVersion", ",", "metavar", "=", "'CODENAME'", ",", "help", "=", "'install a release known as CODENAME\\\n (done by default) (default: %(default)s)'", ",", ")", "version", ".", "add_argument", "(", "'--testing'", ",", "nargs", "=", "0", ",", "action", "=", "StoreVersion", ",", "help", "=", "'install the latest development release'", ",", ")", "version", ".", "add_argument", "(", "'--dev'", ",", "nargs", "=", "'?'", ",", "action", "=", "StoreVersion", ",", "const", "=", "'master'", ",", "metavar", "=", "'BRANCH_OR_TAG'", ",", "help", "=", "'install a bleeding edge build from Git branch\\\n or tag (default: %(default)s)'", ",", ")", "parser", ".", "add_argument", "(", "'--dev-commit'", ",", "nargs", "=", "'?'", ",", "action", "=", "StoreVersion", ",", "metavar", "=", "'COMMIT'", ",", "help", "=", "'install a bleeding edge build from Git commit (defaults to master branch)'", ",", ")", "version", ".", "set_defaults", "(", "stable", "=", "None", ",", "# XXX deprecated in favor of release", "release", "=", "None", ",", "# Set the default release in sanitize_args()", "dev", "=", "'master'", ",", "version_kind", "=", "'stable'", ",", ")", "parser", ".", "add_argument", "(", "'--mon'", ",", "dest", "=", "'install_mon'", ",", "action", "=", "'store_true'", ",", "help", "=", "'install the mon component only'", ",", ")", "parser", ".", "add_argument", "(", "'--mgr'", ",", "dest", "=", "'install_mgr'", ",", "action", "=", "'store_true'", ",", "help", "=", "'install the mgr component only'", ",", ")", "parser", ".", "add_argument", "(", "'--mds'", ",", "dest", "=", "'install_mds'", ",", "action", "=", "'store_true'", ",", "help", "=", "'install the mds component only'", ",", ")", "parser", ".", "add_argument", "(", "'--rgw'", ",", "dest", "=", "'install_rgw'", ",", "action", "=", "'store_true'", ",", "help", "=", "'install the rgw component only'", ",", ")", "parser", ".", "add_argument", "(", "'--osd'", ",", "dest", "=", "'install_osd'", ",", "action", "=", "'store_true'", ",", "help", "=", "'install the osd component only'", ",", ")", "parser", ".", "add_argument", "(", "'--tests'", ",", "dest", "=", "'install_tests'", ",", "action", "=", "'store_true'", ",", "help", "=", "'install the testing components'", ",", ")", "parser", ".", "add_argument", "(", "'--cli'", ",", "'--common'", ",", "dest", "=", "'install_common'", ",", "action", "=", "'store_true'", ",", "help", "=", "'install the common component only'", ",", ")", "parser", ".", "add_argument", "(", "'--all'", ",", "dest", "=", "'install_all'", ",", "action", "=", "'store_true'", ",", "help", "=", "'install all Ceph components (mon, osd, mds, rgw) except tests. This is the default'", ",", ")", "repo", "=", "parser", ".", "add_mutually_exclusive_group", "(", ")", "repo", ".", "add_argument", "(", "'--adjust-repos'", ",", "dest", "=", "'adjust_repos'", ",", "action", "=", "'store_true'", ",", "help", "=", "'install packages modifying source repos'", ",", ")", "repo", ".", "add_argument", "(", "'--no-adjust-repos'", ",", "dest", "=", "'adjust_repos'", ",", "action", "=", "'store_false'", ",", "help", "=", "'install packages without modifying source repos'", ",", ")", "repo", ".", "add_argument", "(", "'--repo'", ",", "action", "=", "'store_true'", ",", "help", "=", "'install repo files only (skips package installation)'", ",", ")", "repo", ".", "set_defaults", "(", "adjust_repos", "=", "True", ",", ")", "parser", ".", "add_argument", "(", "'host'", ",", "metavar", "=", "'HOST'", ",", "nargs", "=", "'+'", ",", "help", "=", "'hosts to install on'", ",", ")", "parser", ".", "add_argument", "(", "'--local-mirror'", ",", "nargs", "=", "'?'", ",", "const", "=", "'PATH'", ",", "default", "=", "None", ",", "help", "=", "'Fetch packages and push them to hosts for a local repo mirror'", ",", ")", "parser", ".", "add_argument", "(", "'--repo-url'", ",", "nargs", "=", "'?'", ",", "dest", "=", "'repo_url'", ",", "help", "=", "'specify a repo URL that mirrors/contains Ceph packages'", ",", ")", "parser", ".", "add_argument", "(", "'--gpg-url'", ",", "nargs", "=", "'?'", ",", "dest", "=", "'gpg_url'", ",", "help", "=", "'specify a GPG key URL to be used with custom repos\\\n (defaults to ceph.com)'", ")", "parser", ".", "add_argument", "(", "'--nogpgcheck'", ",", "action", "=", "'store_true'", ",", "help", "=", "'install packages without gpgcheck'", ",", ")", "parser", ".", "set_defaults", "(", "func", "=", "install", ",", ")" ]
Install Ceph packages on remote hosts.
[ "Install", "Ceph", "packages", "on", "remote", "hosts", "." ]
python
train
23.174157
saltstack/salt
salt/utils/gitfs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L467-L503
def _get_envs_from_ref_paths(self, refs): ''' Return the names of remote refs (stripped of the remote name) and tags which are map to the branches and tags. ''' def _check_ref(env_set, rname): ''' Add the appropriate saltenv(s) to the set ''' if rname in self.saltenv_revmap: env_set.update(self.saltenv_revmap[rname]) else: if rname == self.base: env_set.add('base') elif not self.disable_saltenv_mapping: env_set.add(rname) use_branches = 'branch' in self.ref_types use_tags = 'tag' in self.ref_types ret = set() if salt.utils.stringutils.is_hex(self.base): # gitfs_base or per-saltenv 'base' may point to a commit ID, which # would not show up in the refs. Make sure we include it. ret.add('base') for ref in salt.utils.data.decode(refs): if ref.startswith('refs/'): ref = ref[5:] rtype, rname = ref.split('/', 1) if rtype == 'remotes' and use_branches: parted = rname.partition('/') rname = parted[2] if parted[2] else parted[0] _check_ref(ret, rname) elif rtype == 'tags' and use_tags: _check_ref(ret, rname) return ret
[ "def", "_get_envs_from_ref_paths", "(", "self", ",", "refs", ")", ":", "def", "_check_ref", "(", "env_set", ",", "rname", ")", ":", "'''\n Add the appropriate saltenv(s) to the set\n '''", "if", "rname", "in", "self", ".", "saltenv_revmap", ":", "env_set", ".", "update", "(", "self", ".", "saltenv_revmap", "[", "rname", "]", ")", "else", ":", "if", "rname", "==", "self", ".", "base", ":", "env_set", ".", "add", "(", "'base'", ")", "elif", "not", "self", ".", "disable_saltenv_mapping", ":", "env_set", ".", "add", "(", "rname", ")", "use_branches", "=", "'branch'", "in", "self", ".", "ref_types", "use_tags", "=", "'tag'", "in", "self", ".", "ref_types", "ret", "=", "set", "(", ")", "if", "salt", ".", "utils", ".", "stringutils", ".", "is_hex", "(", "self", ".", "base", ")", ":", "# gitfs_base or per-saltenv 'base' may point to a commit ID, which", "# would not show up in the refs. Make sure we include it.", "ret", ".", "add", "(", "'base'", ")", "for", "ref", "in", "salt", ".", "utils", ".", "data", ".", "decode", "(", "refs", ")", ":", "if", "ref", ".", "startswith", "(", "'refs/'", ")", ":", "ref", "=", "ref", "[", "5", ":", "]", "rtype", ",", "rname", "=", "ref", ".", "split", "(", "'/'", ",", "1", ")", "if", "rtype", "==", "'remotes'", "and", "use_branches", ":", "parted", "=", "rname", ".", "partition", "(", "'/'", ")", "rname", "=", "parted", "[", "2", "]", "if", "parted", "[", "2", "]", "else", "parted", "[", "0", "]", "_check_ref", "(", "ret", ",", "rname", ")", "elif", "rtype", "==", "'tags'", "and", "use_tags", ":", "_check_ref", "(", "ret", ",", "rname", ")", "return", "ret" ]
Return the names of remote refs (stripped of the remote name) and tags which are map to the branches and tags.
[ "Return", "the", "names", "of", "remote", "refs", "(", "stripped", "of", "the", "remote", "name", ")", "and", "tags", "which", "are", "map", "to", "the", "branches", "and", "tags", "." ]
python
train
37.594595
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/iam/apis/aggregator_account_admin_api.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/iam/apis/aggregator_account_admin_api.py#L2053-L2074
def get_account_invitation(self, account_id, invitation_id, **kwargs): # noqa: E501 """Details of a user invitation. # noqa: E501 An endpoint for retrieving the details of an active user invitation sent for a new or an existing user to join the account. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{account-id}/user-invitations/{invitation-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_account_invitation(account_id, invitation_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str account_id: Account ID. (required) :param str invitation_id: The ID of the invitation to be retrieved. (required) :return: UserInvitationResp If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.get_account_invitation_with_http_info(account_id, invitation_id, **kwargs) # noqa: E501 else: (data) = self.get_account_invitation_with_http_info(account_id, invitation_id, **kwargs) # noqa: E501 return data
[ "def", "get_account_invitation", "(", "self", ",", "account_id", ",", "invitation_id", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'asynchronous'", ")", ":", "return", "self", ".", "get_account_invitation_with_http_info", "(", "account_id", ",", "invitation_id", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "get_account_invitation_with_http_info", "(", "account_id", ",", "invitation_id", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
Details of a user invitation. # noqa: E501 An endpoint for retrieving the details of an active user invitation sent for a new or an existing user to join the account. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{account-id}/user-invitations/{invitation-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_account_invitation(account_id, invitation_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str account_id: Account ID. (required) :param str invitation_id: The ID of the invitation to be retrieved. (required) :return: UserInvitationResp If the method is called asynchronously, returns the request thread.
[ "Details", "of", "a", "user", "invitation", ".", "#", "noqa", ":", "E501" ]
python
train
62.227273
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/build/build_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/build/build_client.py#L1139-L1169
def get_path_contents(self, project, provider_name, service_endpoint_id=None, repository=None, commit_or_branch=None, path=None): """GetPathContents. [Preview API] Gets the contents of a directory in the given source code repository. :param str project: Project ID or project name :param str provider_name: The name of the source provider. :param str service_endpoint_id: If specified, the ID of the service endpoint to query. Can only be omitted for providers that do not use service endpoints, e.g. TFVC or TFGit. :param str repository: If specified, the vendor-specific identifier or the name of the repository to get branches. Can only be omitted for providers that do not support multiple repositories. :param str commit_or_branch: The identifier of the commit or branch from which a file's contents are retrieved. :param str path: The path contents to list, relative to the root of the repository. :rtype: [SourceRepositoryItem] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if provider_name is not None: route_values['providerName'] = self._serialize.url('provider_name', provider_name, 'str') query_parameters = {} if service_endpoint_id is not None: query_parameters['serviceEndpointId'] = self._serialize.query('service_endpoint_id', service_endpoint_id, 'str') if repository is not None: query_parameters['repository'] = self._serialize.query('repository', repository, 'str') if commit_or_branch is not None: query_parameters['commitOrBranch'] = self._serialize.query('commit_or_branch', commit_or_branch, 'str') if path is not None: query_parameters['path'] = self._serialize.query('path', path, 'str') response = self._send(http_method='GET', location_id='7944d6fb-df01-4709-920a-7a189aa34037', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[SourceRepositoryItem]', self._unwrap_collection(response))
[ "def", "get_path_contents", "(", "self", ",", "project", ",", "provider_name", ",", "service_endpoint_id", "=", "None", ",", "repository", "=", "None", ",", "commit_or_branch", "=", "None", ",", "path", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'str'", ")", "if", "provider_name", "is", "not", "None", ":", "route_values", "[", "'providerName'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'provider_name'", ",", "provider_name", ",", "'str'", ")", "query_parameters", "=", "{", "}", "if", "service_endpoint_id", "is", "not", "None", ":", "query_parameters", "[", "'serviceEndpointId'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'service_endpoint_id'", ",", "service_endpoint_id", ",", "'str'", ")", "if", "repository", "is", "not", "None", ":", "query_parameters", "[", "'repository'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'repository'", ",", "repository", ",", "'str'", ")", "if", "commit_or_branch", "is", "not", "None", ":", "query_parameters", "[", "'commitOrBranch'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'commit_or_branch'", ",", "commit_or_branch", ",", "'str'", ")", "if", "path", "is", "not", "None", ":", "query_parameters", "[", "'path'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'path'", ",", "path", ",", "'str'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'7944d6fb-df01-4709-920a-7a189aa34037'", ",", "version", "=", "'5.0-preview.1'", ",", "route_values", "=", "route_values", ",", "query_parameters", "=", "query_parameters", ")", "return", "self", ".", "_deserialize", "(", "'[SourceRepositoryItem]'", ",", "self", ".", "_unwrap_collection", "(", "response", ")", ")" ]
GetPathContents. [Preview API] Gets the contents of a directory in the given source code repository. :param str project: Project ID or project name :param str provider_name: The name of the source provider. :param str service_endpoint_id: If specified, the ID of the service endpoint to query. Can only be omitted for providers that do not use service endpoints, e.g. TFVC or TFGit. :param str repository: If specified, the vendor-specific identifier or the name of the repository to get branches. Can only be omitted for providers that do not support multiple repositories. :param str commit_or_branch: The identifier of the commit or branch from which a file's contents are retrieved. :param str path: The path contents to list, relative to the root of the repository. :rtype: [SourceRepositoryItem]
[ "GetPathContents", ".", "[", "Preview", "API", "]", "Gets", "the", "contents", "of", "a", "directory", "in", "the", "given", "source", "code", "repository", ".", ":", "param", "str", "project", ":", "Project", "ID", "or", "project", "name", ":", "param", "str", "provider_name", ":", "The", "name", "of", "the", "source", "provider", ".", ":", "param", "str", "service_endpoint_id", ":", "If", "specified", "the", "ID", "of", "the", "service", "endpoint", "to", "query", ".", "Can", "only", "be", "omitted", "for", "providers", "that", "do", "not", "use", "service", "endpoints", "e", ".", "g", ".", "TFVC", "or", "TFGit", ".", ":", "param", "str", "repository", ":", "If", "specified", "the", "vendor", "-", "specific", "identifier", "or", "the", "name", "of", "the", "repository", "to", "get", "branches", ".", "Can", "only", "be", "omitted", "for", "providers", "that", "do", "not", "support", "multiple", "repositories", ".", ":", "param", "str", "commit_or_branch", ":", "The", "identifier", "of", "the", "commit", "or", "branch", "from", "which", "a", "file", "s", "contents", "are", "retrieved", ".", ":", "param", "str", "path", ":", "The", "path", "contents", "to", "list", "relative", "to", "the", "root", "of", "the", "repository", ".", ":", "rtype", ":", "[", "SourceRepositoryItem", "]" ]
python
train
73.387097
mayfield/shellish
shellish/command/command.py
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/command/command.py#L135-L149
def get_pager_spec(self): """ Find the best pager settings for this command. If the user has specified overrides in the INI config file we prefer those. """ self_config = self.get_config() pagercmd = self_config.get('pager') istty = self_config.getboolean('pager_istty') core_config = self.get_config('core') if pagercmd is None: pagercmd = core_config.get('pager') if istty is None: istty = core_config.get('pager_istty') return { "pagercmd": pagercmd, "istty": istty }
[ "def", "get_pager_spec", "(", "self", ")", ":", "self_config", "=", "self", ".", "get_config", "(", ")", "pagercmd", "=", "self_config", ".", "get", "(", "'pager'", ")", "istty", "=", "self_config", ".", "getboolean", "(", "'pager_istty'", ")", "core_config", "=", "self", ".", "get_config", "(", "'core'", ")", "if", "pagercmd", "is", "None", ":", "pagercmd", "=", "core_config", ".", "get", "(", "'pager'", ")", "if", "istty", "is", "None", ":", "istty", "=", "core_config", ".", "get", "(", "'pager_istty'", ")", "return", "{", "\"pagercmd\"", ":", "pagercmd", ",", "\"istty\"", ":", "istty", "}" ]
Find the best pager settings for this command. If the user has specified overrides in the INI config file we prefer those.
[ "Find", "the", "best", "pager", "settings", "for", "this", "command", ".", "If", "the", "user", "has", "specified", "overrides", "in", "the", "INI", "config", "file", "we", "prefer", "those", "." ]
python
train
39
klmitch/policies
policies/policy.py
https://github.com/klmitch/policies/blob/edf26c5707a5a0cc8e9f59a209a64dee7f79b7a4/policies/policy.py#L467-L497
def resolve(self, symbol): """ Resolve a symbol using the entrypoint group. :param symbol: The symbol being resolved. :returns: The value of that symbol. If the symbol cannot be found, or if no entrypoint group was passed to the constructor, will return ``None``. """ # Search for a corresponding symbol if symbol not in self._resolve_cache: result = None # Search through entrypoints only if we have a group if self._group is not None: for ep in pkg_resources.iter_entry_points(self._group, symbol): try: result = ep.load() except (ImportError, AttributeError, pkg_resources.UnknownExtra): continue # We found the result we were looking for break # Cache the result self._resolve_cache[symbol] = result return self._resolve_cache[symbol]
[ "def", "resolve", "(", "self", ",", "symbol", ")", ":", "# Search for a corresponding symbol", "if", "symbol", "not", "in", "self", ".", "_resolve_cache", ":", "result", "=", "None", "# Search through entrypoints only if we have a group", "if", "self", ".", "_group", "is", "not", "None", ":", "for", "ep", "in", "pkg_resources", ".", "iter_entry_points", "(", "self", ".", "_group", ",", "symbol", ")", ":", "try", ":", "result", "=", "ep", ".", "load", "(", ")", "except", "(", "ImportError", ",", "AttributeError", ",", "pkg_resources", ".", "UnknownExtra", ")", ":", "continue", "# We found the result we were looking for", "break", "# Cache the result", "self", ".", "_resolve_cache", "[", "symbol", "]", "=", "result", "return", "self", ".", "_resolve_cache", "[", "symbol", "]" ]
Resolve a symbol using the entrypoint group. :param symbol: The symbol being resolved. :returns: The value of that symbol. If the symbol cannot be found, or if no entrypoint group was passed to the constructor, will return ``None``.
[ "Resolve", "a", "symbol", "using", "the", "entrypoint", "group", "." ]
python
train
33.806452
ralphje/imagemounter
imagemounter/unmounter.py
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/unmounter.py#L207-L214
def unmount_loopbacks(self): """Unmounts all loopback devices as identified by :func:`find_loopbacks`""" # re-index loopback devices self._index_loopbacks() for dev in self.find_loopbacks(): _util.check_output_(['losetup', '-d', dev])
[ "def", "unmount_loopbacks", "(", "self", ")", ":", "# re-index loopback devices", "self", ".", "_index_loopbacks", "(", ")", "for", "dev", "in", "self", ".", "find_loopbacks", "(", ")", ":", "_util", ".", "check_output_", "(", "[", "'losetup'", ",", "'-d'", ",", "dev", "]", ")" ]
Unmounts all loopback devices as identified by :func:`find_loopbacks`
[ "Unmounts", "all", "loopback", "devices", "as", "identified", "by", ":", "func", ":", "find_loopbacks" ]
python
train
34.125
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/process.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/process.py#L446-L462
def get_exit_code(self): """ @rtype: int @return: Process exit code, or C{STILL_ACTIVE} if it's still alive. @warning: If a process returns C{STILL_ACTIVE} as it's exit code, you may not be able to determine if it's active or not with this method. Use L{is_alive} to check if the process is still active. Alternatively you can call L{get_handle} to get the handle object and then L{ProcessHandle.wait} on it to wait until the process finishes running. """ if win32.PROCESS_ALL_ACCESS == win32.PROCESS_ALL_ACCESS_VISTA: dwAccess = win32.PROCESS_QUERY_LIMITED_INFORMATION else: dwAccess = win32.PROCESS_QUERY_INFORMATION return win32.GetExitCodeProcess( self.get_handle(dwAccess) )
[ "def", "get_exit_code", "(", "self", ")", ":", "if", "win32", ".", "PROCESS_ALL_ACCESS", "==", "win32", ".", "PROCESS_ALL_ACCESS_VISTA", ":", "dwAccess", "=", "win32", ".", "PROCESS_QUERY_LIMITED_INFORMATION", "else", ":", "dwAccess", "=", "win32", ".", "PROCESS_QUERY_INFORMATION", "return", "win32", ".", "GetExitCodeProcess", "(", "self", ".", "get_handle", "(", "dwAccess", ")", ")" ]
@rtype: int @return: Process exit code, or C{STILL_ACTIVE} if it's still alive. @warning: If a process returns C{STILL_ACTIVE} as it's exit code, you may not be able to determine if it's active or not with this method. Use L{is_alive} to check if the process is still active. Alternatively you can call L{get_handle} to get the handle object and then L{ProcessHandle.wait} on it to wait until the process finishes running.
[ "@rtype", ":", "int", "@return", ":", "Process", "exit", "code", "or", "C", "{", "STILL_ACTIVE", "}", "if", "it", "s", "still", "alive", "." ]
python
train
47.823529
openstack/networking-cisco
networking_cisco/ml2_drivers/nexus/mech_cisco_nexus.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/ml2_drivers/nexus/mech_cisco_nexus.py#L1700-L1757
def create_port_postcommit(self, context): """Create port non-database commit event.""" # No new events are handled until replay # thread has put the switch in active state. # If a switch is in active state, verify # the switch is still in active state # before accepting this new event. # # If create_port_postcommit fails, it causes # other openstack dbs to be cleared and # retries for new VMs will stop. Subnet # transactions will continue to be retried. vlan_segment, vxlan_segment = self._get_segments( context.top_bound_segment, context.bottom_bound_segment) # Verify segment. if not self._is_valid_segment(vlan_segment): return port = context.current if self._is_supported_deviceowner(port): if nexus_help.is_baremetal(context.current): all_switches, active_switches = ( self._get_baremetal_switches(context.current)) else: host_id = context.current.get(bc.portbindings.HOST_ID) all_switches, active_switches = ( self._get_host_switches(host_id)) # Verify switch is still up before replay # thread checks. verified_active_switches = [] for switch_ip in active_switches: try: self.driver.get_nexus_type(switch_ip) verified_active_switches.append(switch_ip) except Exception as e: LOG.error("Failed to ping " "switch ip %(switch_ip)s error %(exp_err)s", {'switch_ip': switch_ip, 'exp_err': e}) LOG.debug("Create Stats: thread %(thid)d, " "all_switches %(all)d, " "active %(active)d, verified %(verify)d", {'thid': threading.current_thread().ident, 'all': len(all_switches), 'active': len(active_switches), 'verify': len(verified_active_switches)}) # if host_id is valid and there is no active # switches remaining if all_switches and not verified_active_switches: raise excep.NexusConnectFailed( nexus_host=all_switches[0], config="None", exc="Create Failed: Port event can not " "be processed at this time.")
[ "def", "create_port_postcommit", "(", "self", ",", "context", ")", ":", "# No new events are handled until replay", "# thread has put the switch in active state.", "# If a switch is in active state, verify", "# the switch is still in active state", "# before accepting this new event.", "#", "# If create_port_postcommit fails, it causes", "# other openstack dbs to be cleared and", "# retries for new VMs will stop. Subnet", "# transactions will continue to be retried.", "vlan_segment", ",", "vxlan_segment", "=", "self", ".", "_get_segments", "(", "context", ".", "top_bound_segment", ",", "context", ".", "bottom_bound_segment", ")", "# Verify segment.", "if", "not", "self", ".", "_is_valid_segment", "(", "vlan_segment", ")", ":", "return", "port", "=", "context", ".", "current", "if", "self", ".", "_is_supported_deviceowner", "(", "port", ")", ":", "if", "nexus_help", ".", "is_baremetal", "(", "context", ".", "current", ")", ":", "all_switches", ",", "active_switches", "=", "(", "self", ".", "_get_baremetal_switches", "(", "context", ".", "current", ")", ")", "else", ":", "host_id", "=", "context", ".", "current", ".", "get", "(", "bc", ".", "portbindings", ".", "HOST_ID", ")", "all_switches", ",", "active_switches", "=", "(", "self", ".", "_get_host_switches", "(", "host_id", ")", ")", "# Verify switch is still up before replay", "# thread checks.", "verified_active_switches", "=", "[", "]", "for", "switch_ip", "in", "active_switches", ":", "try", ":", "self", ".", "driver", ".", "get_nexus_type", "(", "switch_ip", ")", "verified_active_switches", ".", "append", "(", "switch_ip", ")", "except", "Exception", "as", "e", ":", "LOG", ".", "error", "(", "\"Failed to ping \"", "\"switch ip %(switch_ip)s error %(exp_err)s\"", ",", "{", "'switch_ip'", ":", "switch_ip", ",", "'exp_err'", ":", "e", "}", ")", "LOG", ".", "debug", "(", "\"Create Stats: thread %(thid)d, \"", "\"all_switches %(all)d, \"", "\"active %(active)d, verified %(verify)d\"", ",", "{", "'thid'", ":", "threading", ".", "current_thread", "(", ")", ".", "ident", ",", "'all'", ":", "len", "(", "all_switches", ")", ",", "'active'", ":", "len", "(", "active_switches", ")", ",", "'verify'", ":", "len", "(", "verified_active_switches", ")", "}", ")", "# if host_id is valid and there is no active", "# switches remaining", "if", "all_switches", "and", "not", "verified_active_switches", ":", "raise", "excep", ".", "NexusConnectFailed", "(", "nexus_host", "=", "all_switches", "[", "0", "]", ",", "config", "=", "\"None\"", ",", "exc", "=", "\"Create Failed: Port event can not \"", "\"be processed at this time.\"", ")" ]
Create port non-database commit event.
[ "Create", "port", "non", "-", "database", "commit", "event", "." ]
python
train
43.827586
assemblerflow/flowcraft
flowcraft/generator/process_details.py
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/process_details.py#L110-L169
def proc_collector(process_map, args, pipeline_string): """ Function that collects all processes available and stores a dictionary of the required arguments of each process class to be passed to procs_dict_parser Parameters ---------- process_map: dict The dictionary with the Processes currently available in flowcraft and their corresponding classes as values args: argparse.Namespace The arguments passed through argparser that will be access to check the type of list to be printed pipeline_string: str the pipeline string """ arguments_list = [] # prints a detailed list of the process class arguments if args.detailed_list: # list of attributes to be passed to proc_collector arguments_list += [ "input_type", "output_type", "description", "dependencies", "conflicts", "directives" ] # prints a short list with each process and the corresponding description if args.short_list: arguments_list += [ "description" ] if arguments_list: # dict to store only the required entries procs_dict = {} # loops between all process_map Processes for name, cls in process_map.items(): # instantiates each Process class cls_inst = cls(template=name) # checks if recipe is provided if pipeline_string: if name not in pipeline_string: continue d = {arg_key: vars(cls_inst)[arg_key] for arg_key in vars(cls_inst) if arg_key in arguments_list} procs_dict[name] = d procs_dict_parser(procs_dict) sys.exit(0)
[ "def", "proc_collector", "(", "process_map", ",", "args", ",", "pipeline_string", ")", ":", "arguments_list", "=", "[", "]", "# prints a detailed list of the process class arguments", "if", "args", ".", "detailed_list", ":", "# list of attributes to be passed to proc_collector", "arguments_list", "+=", "[", "\"input_type\"", ",", "\"output_type\"", ",", "\"description\"", ",", "\"dependencies\"", ",", "\"conflicts\"", ",", "\"directives\"", "]", "# prints a short list with each process and the corresponding description", "if", "args", ".", "short_list", ":", "arguments_list", "+=", "[", "\"description\"", "]", "if", "arguments_list", ":", "# dict to store only the required entries", "procs_dict", "=", "{", "}", "# loops between all process_map Processes", "for", "name", ",", "cls", "in", "process_map", ".", "items", "(", ")", ":", "# instantiates each Process class", "cls_inst", "=", "cls", "(", "template", "=", "name", ")", "# checks if recipe is provided", "if", "pipeline_string", ":", "if", "name", "not", "in", "pipeline_string", ":", "continue", "d", "=", "{", "arg_key", ":", "vars", "(", "cls_inst", ")", "[", "arg_key", "]", "for", "arg_key", "in", "vars", "(", "cls_inst", ")", "if", "arg_key", "in", "arguments_list", "}", "procs_dict", "[", "name", "]", "=", "d", "procs_dict_parser", "(", "procs_dict", ")", "sys", ".", "exit", "(", "0", ")" ]
Function that collects all processes available and stores a dictionary of the required arguments of each process class to be passed to procs_dict_parser Parameters ---------- process_map: dict The dictionary with the Processes currently available in flowcraft and their corresponding classes as values args: argparse.Namespace The arguments passed through argparser that will be access to check the type of list to be printed pipeline_string: str the pipeline string
[ "Function", "that", "collects", "all", "processes", "available", "and", "stores", "a", "dictionary", "of", "the", "required", "arguments", "of", "each", "process", "class", "to", "be", "passed", "to", "procs_dict_parser" ]
python
test
29.1
rochacbruno/dynaconf
dynaconf/loaders/redis_loader.py
https://github.com/rochacbruno/dynaconf/blob/5a7cc8f8252251cbdf4f4112965801f9dfe2831d/dynaconf/loaders/redis_loader.py#L63-L87
def write(obj, data=None, **kwargs): """Write a value in to loader source :param obj: settings object :param data: vars to be stored :param kwargs: vars to be stored :return: """ if obj.REDIS_ENABLED_FOR_DYNACONF is False: raise RuntimeError( "Redis is not configured \n" "export REDIS_ENABLED_FOR_DYNACONF=true\n" "and configure the REDIS_FOR_DYNACONF_* variables" ) client = StrictRedis(**obj.REDIS_FOR_DYNACONF) holder = obj.get("ENVVAR_PREFIX_FOR_DYNACONF") data = data or {} data.update(kwargs) if not data: raise AttributeError("Data must be provided") redis_data = { key.upper(): unparse_conf_data(value) for key, value in data.items() } client.hmset(holder.upper(), redis_data) load(obj)
[ "def", "write", "(", "obj", ",", "data", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "obj", ".", "REDIS_ENABLED_FOR_DYNACONF", "is", "False", ":", "raise", "RuntimeError", "(", "\"Redis is not configured \\n\"", "\"export REDIS_ENABLED_FOR_DYNACONF=true\\n\"", "\"and configure the REDIS_FOR_DYNACONF_* variables\"", ")", "client", "=", "StrictRedis", "(", "*", "*", "obj", ".", "REDIS_FOR_DYNACONF", ")", "holder", "=", "obj", ".", "get", "(", "\"ENVVAR_PREFIX_FOR_DYNACONF\"", ")", "data", "=", "data", "or", "{", "}", "data", ".", "update", "(", "kwargs", ")", "if", "not", "data", ":", "raise", "AttributeError", "(", "\"Data must be provided\"", ")", "redis_data", "=", "{", "key", ".", "upper", "(", ")", ":", "unparse_conf_data", "(", "value", ")", "for", "key", ",", "value", "in", "data", ".", "items", "(", ")", "}", "client", ".", "hmset", "(", "holder", ".", "upper", "(", ")", ",", "redis_data", ")", "load", "(", "obj", ")" ]
Write a value in to loader source :param obj: settings object :param data: vars to be stored :param kwargs: vars to be stored :return:
[ "Write", "a", "value", "in", "to", "loader", "source" ]
python
train
32.16
google/grr
grr/client/grr_response_client/client_actions/file_finder_utils/uploading.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/client_actions/file_finder_utils/uploading.py#L78-L95
def _UploadChunk(self, chunk): """Uploads a single chunk to the transfer store flow. Args: chunk: A chunk to upload. Returns: A `BlobImageChunkDescriptor` object. """ blob = _CompressedDataBlob(chunk) self._action.ChargeBytesToSession(len(chunk.data)) self._action.SendReply(blob, session_id=self._TRANSFER_STORE_SESSION_ID) return rdf_client_fs.BlobImageChunkDescriptor( digest=hashlib.sha256(chunk.data).digest(), offset=chunk.offset, length=len(chunk.data))
[ "def", "_UploadChunk", "(", "self", ",", "chunk", ")", ":", "blob", "=", "_CompressedDataBlob", "(", "chunk", ")", "self", ".", "_action", ".", "ChargeBytesToSession", "(", "len", "(", "chunk", ".", "data", ")", ")", "self", ".", "_action", ".", "SendReply", "(", "blob", ",", "session_id", "=", "self", ".", "_TRANSFER_STORE_SESSION_ID", ")", "return", "rdf_client_fs", ".", "BlobImageChunkDescriptor", "(", "digest", "=", "hashlib", ".", "sha256", "(", "chunk", ".", "data", ")", ".", "digest", "(", ")", ",", "offset", "=", "chunk", ".", "offset", ",", "length", "=", "len", "(", "chunk", ".", "data", ")", ")" ]
Uploads a single chunk to the transfer store flow. Args: chunk: A chunk to upload. Returns: A `BlobImageChunkDescriptor` object.
[ "Uploads", "a", "single", "chunk", "to", "the", "transfer", "store", "flow", "." ]
python
train
28.611111
linkedin/pyexchange
pyexchange/exchange2010/soap_request.py
https://github.com/linkedin/pyexchange/blob/d568f4edd326adb451b915ddf66cf1a37820e3ca/pyexchange/exchange2010/soap_request.py#L62-L76
def delete_field(field_uri): """ Helper function to request deletion of a field. This is necessary when you want to overwrite values instead of appending. <t:DeleteItemField> <t:FieldURI FieldURI="calendar:Resources"/> </t:DeleteItemField> """ root = T.DeleteItemField( T.FieldURI(FieldURI=field_uri) ) return root
[ "def", "delete_field", "(", "field_uri", ")", ":", "root", "=", "T", ".", "DeleteItemField", "(", "T", ".", "FieldURI", "(", "FieldURI", "=", "field_uri", ")", ")", "return", "root" ]
Helper function to request deletion of a field. This is necessary when you want to overwrite values instead of appending. <t:DeleteItemField> <t:FieldURI FieldURI="calendar:Resources"/> </t:DeleteItemField>
[ "Helper", "function", "to", "request", "deletion", "of", "a", "field", ".", "This", "is", "necessary", "when", "you", "want", "to", "overwrite", "values", "instead", "of", "appending", "." ]
python
train
23.266667
gem/oq-engine
openquake/hmtk/seismicity/max_magnitude/kijko_sellevol_bayes.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/seismicity/max_magnitude/kijko_sellevol_bayes.py#L60-L84
def check_config(config, data): '''Check config file inputs :param dict config: Configuration settings for the function ''' essential_keys = ['input_mmin', 'b-value', 'sigma-b'] for key in essential_keys: if not key in config.keys(): raise ValueError('For KijkoSellevolBayes the key %s needs to ' 'be set in the configuation' % key) if 'tolerance' not in config.keys() or not config['tolerance']: config['tolerance'] = 1E-5 if not config.get('maximum_iterations', False): config['maximum_iterations'] = 1000 if config['input_mmin'] < np.min(data['magnitude']): config['input_mmin'] = np.min(data['magnitude']) if fabs(config['sigma-b'] < 1E-15): raise ValueError('Sigma-b must be greater than zero!') return config
[ "def", "check_config", "(", "config", ",", "data", ")", ":", "essential_keys", "=", "[", "'input_mmin'", ",", "'b-value'", ",", "'sigma-b'", "]", "for", "key", "in", "essential_keys", ":", "if", "not", "key", "in", "config", ".", "keys", "(", ")", ":", "raise", "ValueError", "(", "'For KijkoSellevolBayes the key %s needs to '", "'be set in the configuation'", "%", "key", ")", "if", "'tolerance'", "not", "in", "config", ".", "keys", "(", ")", "or", "not", "config", "[", "'tolerance'", "]", ":", "config", "[", "'tolerance'", "]", "=", "1E-5", "if", "not", "config", ".", "get", "(", "'maximum_iterations'", ",", "False", ")", ":", "config", "[", "'maximum_iterations'", "]", "=", "1000", "if", "config", "[", "'input_mmin'", "]", "<", "np", ".", "min", "(", "data", "[", "'magnitude'", "]", ")", ":", "config", "[", "'input_mmin'", "]", "=", "np", ".", "min", "(", "data", "[", "'magnitude'", "]", ")", "if", "fabs", "(", "config", "[", "'sigma-b'", "]", "<", "1E-15", ")", ":", "raise", "ValueError", "(", "'Sigma-b must be greater than zero!'", ")", "return", "config" ]
Check config file inputs :param dict config: Configuration settings for the function
[ "Check", "config", "file", "inputs" ]
python
train
33.04
csurfer/rake-nltk
rake_nltk/rake.py
https://github.com/csurfer/rake-nltk/blob/e36116d6074c5ddfbc69bce4440f0342355ceb2e/rake_nltk/rake.py#L178-L192
def _generate_phrases(self, sentences): """Method to generate contender phrases given the sentences of the text document. :param sentences: List of strings where each string represents a sentence which forms the text. :return: Set of string tuples where each tuple is a collection of words forming a contender phrase. """ phrase_list = set() # Create contender phrases from sentences. for sentence in sentences: word_list = [word.lower() for word in wordpunct_tokenize(sentence)] phrase_list.update(self._get_phrase_list_from_words(word_list)) return phrase_list
[ "def", "_generate_phrases", "(", "self", ",", "sentences", ")", ":", "phrase_list", "=", "set", "(", ")", "# Create contender phrases from sentences.", "for", "sentence", "in", "sentences", ":", "word_list", "=", "[", "word", ".", "lower", "(", ")", "for", "word", "in", "wordpunct_tokenize", "(", "sentence", ")", "]", "phrase_list", ".", "update", "(", "self", ".", "_get_phrase_list_from_words", "(", "word_list", ")", ")", "return", "phrase_list" ]
Method to generate contender phrases given the sentences of the text document. :param sentences: List of strings where each string represents a sentence which forms the text. :return: Set of string tuples where each tuple is a collection of words forming a contender phrase.
[ "Method", "to", "generate", "contender", "phrases", "given", "the", "sentences", "of", "the", "text", "document", "." ]
python
train
45.866667
metagriffin/fso
fso/filesystemoverlay.py
https://github.com/metagriffin/fso/blob/c37701fbfdfde359a2044eb9420abe569a7b35e4/fso/filesystemoverlay.py#L374-L379
def _lexists(self, path): '''IMPORTANT: expects `path` to already be deref()'erenced.''' try: return bool(self._lstat(path)) except os.error: return False
[ "def", "_lexists", "(", "self", ",", "path", ")", ":", "try", ":", "return", "bool", "(", "self", ".", "_lstat", "(", "path", ")", ")", "except", "os", ".", "error", ":", "return", "False" ]
IMPORTANT: expects `path` to already be deref()'erenced.
[ "IMPORTANT", ":", "expects", "path", "to", "already", "be", "deref", "()", "erenced", "." ]
python
valid
28.833333
jpscaletti/solution
solution/fields/color.py
https://github.com/jpscaletti/solution/blob/eabafd8e695bbb0209242e002dbcc05ffb327f43/solution/fields/color.py#L56-L71
def normalize_hex(hex_color): """Transform a xxx hex color to xxxxxx. """ hex_color = hex_color.replace('#', '').lower() length = len(hex_color) if length in (6, 8): return '#' + hex_color if length not in (3, 4): return None strhex = u'#%s%s%s' % ( hex_color[0] * 2, hex_color[1] * 2, hex_color[2] * 2) if length == 4: strhex += hex_color[3] * 2 return strhex
[ "def", "normalize_hex", "(", "hex_color", ")", ":", "hex_color", "=", "hex_color", ".", "replace", "(", "'#'", ",", "''", ")", ".", "lower", "(", ")", "length", "=", "len", "(", "hex_color", ")", "if", "length", "in", "(", "6", ",", "8", ")", ":", "return", "'#'", "+", "hex_color", "if", "length", "not", "in", "(", "3", ",", "4", ")", ":", "return", "None", "strhex", "=", "u'#%s%s%s'", "%", "(", "hex_color", "[", "0", "]", "*", "2", ",", "hex_color", "[", "1", "]", "*", "2", ",", "hex_color", "[", "2", "]", "*", "2", ")", "if", "length", "==", "4", ":", "strhex", "+=", "hex_color", "[", "3", "]", "*", "2", "return", "strhex" ]
Transform a xxx hex color to xxxxxx.
[ "Transform", "a", "xxx", "hex", "color", "to", "xxxxxx", "." ]
python
train
26.8125
PyCQA/pylint
pylint/checkers/imports.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/imports.py#L930-L939
def _filter_dependencies_graph(self, internal): """build the internal or the external depedency graph""" graph = collections.defaultdict(set) for importee, importers in self.stats["dependencies"].items(): for importer in importers: package = self._module_pkg.get(importer, importer) is_inside = importee.startswith(package) if is_inside and internal or not is_inside and not internal: graph[importee].add(importer) return graph
[ "def", "_filter_dependencies_graph", "(", "self", ",", "internal", ")", ":", "graph", "=", "collections", ".", "defaultdict", "(", "set", ")", "for", "importee", ",", "importers", "in", "self", ".", "stats", "[", "\"dependencies\"", "]", ".", "items", "(", ")", ":", "for", "importer", "in", "importers", ":", "package", "=", "self", ".", "_module_pkg", ".", "get", "(", "importer", ",", "importer", ")", "is_inside", "=", "importee", ".", "startswith", "(", "package", ")", "if", "is_inside", "and", "internal", "or", "not", "is_inside", "and", "not", "internal", ":", "graph", "[", "importee", "]", ".", "add", "(", "importer", ")", "return", "graph" ]
build the internal or the external depedency graph
[ "build", "the", "internal", "or", "the", "external", "depedency", "graph" ]
python
test
53
cloudera/impyla
impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py
https://github.com/cloudera/impyla/blob/547fa2ba3b6151e2a98b3544301471a643212dc3/impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py#L2119-L2127
def get_table_meta(self, db_patterns, tbl_patterns, tbl_types): """ Parameters: - db_patterns - tbl_patterns - tbl_types """ self.send_get_table_meta(db_patterns, tbl_patterns, tbl_types) return self.recv_get_table_meta()
[ "def", "get_table_meta", "(", "self", ",", "db_patterns", ",", "tbl_patterns", ",", "tbl_types", ")", ":", "self", ".", "send_get_table_meta", "(", "db_patterns", ",", "tbl_patterns", ",", "tbl_types", ")", "return", "self", ".", "recv_get_table_meta", "(", ")" ]
Parameters: - db_patterns - tbl_patterns - tbl_types
[ "Parameters", ":", "-", "db_patterns", "-", "tbl_patterns", "-", "tbl_types" ]
python
train
27.555556
rgs1/zk_shell
zk_shell/xclient.py
https://github.com/rgs1/zk_shell/blob/bbf34fdfcf1f81100e2a5816fad8af6afc782a54/zk_shell/xclient.py#L147-L157
def get(self, *args, **kwargs): """ wraps the default get() and deals with encoding """ value, stat = super(XClient, self).get(*args, **kwargs) try: if value is not None: value = value.decode(encoding="utf-8") except UnicodeDecodeError: pass return (value, stat)
[ "def", "get", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "value", ",", "stat", "=", "super", "(", "XClient", ",", "self", ")", ".", "get", "(", "*", "args", ",", "*", "*", "kwargs", ")", "try", ":", "if", "value", "is", "not", "None", ":", "value", "=", "value", ".", "decode", "(", "encoding", "=", "\"utf-8\"", ")", "except", "UnicodeDecodeError", ":", "pass", "return", "(", "value", ",", "stat", ")" ]
wraps the default get() and deals with encoding
[ "wraps", "the", "default", "get", "()", "and", "deals", "with", "encoding" ]
python
train
30.363636
helixyte/everest
everest/views/base.py
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/views/base.py#L219-L230
def _get_response_body_mime_type(self): """ Returns the response body MIME type. This might differ from the overall response mime type e.g. in ATOM responses where the body MIME type is XML. """ mime_type = self._get_response_mime_type() if mime_type is AtomMime: # FIXME: This cements using XML as the representation to use in # ATOM bodies (which is perhaps not too worrisome). mime_type = XmlMime return mime_type
[ "def", "_get_response_body_mime_type", "(", "self", ")", ":", "mime_type", "=", "self", ".", "_get_response_mime_type", "(", ")", "if", "mime_type", "is", "AtomMime", ":", "# FIXME: This cements using XML as the representation to use in", "# ATOM bodies (which is perhaps not too worrisome).", "mime_type", "=", "XmlMime", "return", "mime_type" ]
Returns the response body MIME type. This might differ from the overall response mime type e.g. in ATOM responses where the body MIME type is XML.
[ "Returns", "the", "response", "body", "MIME", "type", ".", "This", "might", "differ", "from", "the", "overall", "response", "mime", "type", "e", ".", "g", ".", "in", "ATOM", "responses", "where", "the", "body", "MIME", "type", "is", "XML", "." ]
python
train
42.666667
uogbuji/versa
tools/py/pipeline/core_actions.py
https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/pipeline/core_actions.py#L17-L64
def link(origin=None, rel=None, value=None, attributes=None, source=None): ''' Action function generator to create a link based on the context's current link, or on provided parameters :param origin: IRI/string, or list of same; origins for the created relationships. If None, the action context provides the parameter. :param rel: IRI/string, or list of same; IDs for the created relationships. If None, the action context provides the parameter. :param value: IRI/string, or list of same; values/targets for the created relationships. If None, the action context provides the parameter. :param source: pattern action to be executed, generating contexts to determine the output statements. If given, overrides specific origin, rel or value params :return: Versa action function to do the actual work ''' attributes = attributes or {} #rel = I(iri.absolutize(rel, ctx.base)) def _link(ctx): if source: if not callable(source): raise ValueError('Link source must be a pattern action function') contexts = source(ctx) for ctx in contexts: ctx.output_model.add(ctx.current_link[ORIGIN], ctx.current_link[RELATIONSHIP], ctx.current_link[TARGET], attributes) return (o, r, v, a) = ctx.current_link _origin = origin(ctx) if callable(origin) else origin o_list = [o] if _origin is None else (_origin if isinstance(_origin, list) else [_origin]) #_origin = _origin if isinstance(_origin, set) else set([_origin]) _rel = rel(ctx) if callable(rel) else rel r_list = [r] if _rel is None else (_rel if isinstance(_rel, list) else [_rel]) #_rel = _rel if isinstance(_rel, set) else set([_rel]) _value = value(ctx) if callable(value) else value v_list = [v] if _value is None else (_value if isinstance(_value, list) else [_value]) #_target = _target if isinstance(_target, set) else set([_target]) _attributes = attributes(ctx) if callable(attributes) else attributes #(ctx_o, ctx_r, ctx_t, ctx_a) = ctx.current_link #FIXME: Add test for IRI output via wrapper action function for (o, r, v, a) in [ (o, r, v, a) for o in o_list for r in r_list for v in v_list ]: ctx.output_model.add(o, r, v, attributes) return return _link
[ "def", "link", "(", "origin", "=", "None", ",", "rel", "=", "None", ",", "value", "=", "None", ",", "attributes", "=", "None", ",", "source", "=", "None", ")", ":", "attributes", "=", "attributes", "or", "{", "}", "#rel = I(iri.absolutize(rel, ctx.base))", "def", "_link", "(", "ctx", ")", ":", "if", "source", ":", "if", "not", "callable", "(", "source", ")", ":", "raise", "ValueError", "(", "'Link source must be a pattern action function'", ")", "contexts", "=", "source", "(", "ctx", ")", "for", "ctx", "in", "contexts", ":", "ctx", ".", "output_model", ".", "add", "(", "ctx", ".", "current_link", "[", "ORIGIN", "]", ",", "ctx", ".", "current_link", "[", "RELATIONSHIP", "]", ",", "ctx", ".", "current_link", "[", "TARGET", "]", ",", "attributes", ")", "return", "(", "o", ",", "r", ",", "v", ",", "a", ")", "=", "ctx", ".", "current_link", "_origin", "=", "origin", "(", "ctx", ")", "if", "callable", "(", "origin", ")", "else", "origin", "o_list", "=", "[", "o", "]", "if", "_origin", "is", "None", "else", "(", "_origin", "if", "isinstance", "(", "_origin", ",", "list", ")", "else", "[", "_origin", "]", ")", "#_origin = _origin if isinstance(_origin, set) else set([_origin])", "_rel", "=", "rel", "(", "ctx", ")", "if", "callable", "(", "rel", ")", "else", "rel", "r_list", "=", "[", "r", "]", "if", "_rel", "is", "None", "else", "(", "_rel", "if", "isinstance", "(", "_rel", ",", "list", ")", "else", "[", "_rel", "]", ")", "#_rel = _rel if isinstance(_rel, set) else set([_rel])", "_value", "=", "value", "(", "ctx", ")", "if", "callable", "(", "value", ")", "else", "value", "v_list", "=", "[", "v", "]", "if", "_value", "is", "None", "else", "(", "_value", "if", "isinstance", "(", "_value", ",", "list", ")", "else", "[", "_value", "]", ")", "#_target = _target if isinstance(_target, set) else set([_target])", "_attributes", "=", "attributes", "(", "ctx", ")", "if", "callable", "(", "attributes", ")", "else", "attributes", "#(ctx_o, ctx_r, ctx_t, ctx_a) = ctx.current_link", "#FIXME: Add test for IRI output via wrapper action function", "for", "(", "o", ",", "r", ",", "v", ",", "a", ")", "in", "[", "(", "o", ",", "r", ",", "v", ",", "a", ")", "for", "o", "in", "o_list", "for", "r", "in", "r_list", "for", "v", "in", "v_list", "]", ":", "ctx", ".", "output_model", ".", "add", "(", "o", ",", "r", ",", "v", ",", "attributes", ")", "return", "return", "_link" ]
Action function generator to create a link based on the context's current link, or on provided parameters :param origin: IRI/string, or list of same; origins for the created relationships. If None, the action context provides the parameter. :param rel: IRI/string, or list of same; IDs for the created relationships. If None, the action context provides the parameter. :param value: IRI/string, or list of same; values/targets for the created relationships. If None, the action context provides the parameter. :param source: pattern action to be executed, generating contexts to determine the output statements. If given, overrides specific origin, rel or value params :return: Versa action function to do the actual work
[ "Action", "function", "generator", "to", "create", "a", "link", "based", "on", "the", "context", "s", "current", "link", "or", "on", "provided", "parameters" ]
python
train
49.1875
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/mpl_plot/_mpl_to_vispy.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/mpl_plot/_mpl_to_vispy.py#L179-L198
def show(block=False): """Show current figures using vispy Parameters ---------- block : bool If True, blocking mode will be used. If False, then non-blocking / interactive mode will be used. Returns ------- canvases : list List of the vispy canvases that were created. """ if not has_matplotlib(): raise ImportError('Requires matplotlib version >= 1.2') cs = [_mpl_to_vispy(plt.figure(ii)) for ii in plt.get_fignums()] if block and len(cs) > 0: cs[0].app.run() return cs
[ "def", "show", "(", "block", "=", "False", ")", ":", "if", "not", "has_matplotlib", "(", ")", ":", "raise", "ImportError", "(", "'Requires matplotlib version >= 1.2'", ")", "cs", "=", "[", "_mpl_to_vispy", "(", "plt", ".", "figure", "(", "ii", ")", ")", "for", "ii", "in", "plt", ".", "get_fignums", "(", ")", "]", "if", "block", "and", "len", "(", "cs", ")", ">", "0", ":", "cs", "[", "0", "]", ".", "app", ".", "run", "(", ")", "return", "cs" ]
Show current figures using vispy Parameters ---------- block : bool If True, blocking mode will be used. If False, then non-blocking / interactive mode will be used. Returns ------- canvases : list List of the vispy canvases that were created.
[ "Show", "current", "figures", "using", "vispy" ]
python
train
27.1
pandas-dev/pandas
pandas/core/groupby/groupby.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1938-L1947
def cumprod(self, axis=0, *args, **kwargs): """ Cumulative product for each group. """ nv.validate_groupby_func('cumprod', args, kwargs, ['numeric_only', 'skipna']) if axis != 0: return self.apply(lambda x: x.cumprod(axis=axis, **kwargs)) return self._cython_transform('cumprod', **kwargs)
[ "def", "cumprod", "(", "self", ",", "axis", "=", "0", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "nv", ".", "validate_groupby_func", "(", "'cumprod'", ",", "args", ",", "kwargs", ",", "[", "'numeric_only'", ",", "'skipna'", "]", ")", "if", "axis", "!=", "0", ":", "return", "self", ".", "apply", "(", "lambda", "x", ":", "x", ".", "cumprod", "(", "axis", "=", "axis", ",", "*", "*", "kwargs", ")", ")", "return", "self", ".", "_cython_transform", "(", "'cumprod'", ",", "*", "*", "kwargs", ")" ]
Cumulative product for each group.
[ "Cumulative", "product", "for", "each", "group", "." ]
python
train
37.4
DataONEorg/d1_python
dev_tools/src/d1_dev/src-cleanup.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/dev_tools/src/d1_dev/src-cleanup.py#L332-L340
def _remove_init_all(r): """Remove any __all__ in __init__.py file.""" new_r = redbaron.NodeList() for n in r.node_list: if n.type == 'assignment' and n.target.value == '__all__': pass else: new_r.append(n) return new_r
[ "def", "_remove_init_all", "(", "r", ")", ":", "new_r", "=", "redbaron", ".", "NodeList", "(", ")", "for", "n", "in", "r", ".", "node_list", ":", "if", "n", ".", "type", "==", "'assignment'", "and", "n", ".", "target", ".", "value", "==", "'__all__'", ":", "pass", "else", ":", "new_r", ".", "append", "(", "n", ")", "return", "new_r" ]
Remove any __all__ in __init__.py file.
[ "Remove", "any", "__all__", "in", "__init__", ".", "py", "file", "." ]
python
train
29.666667
cltk/cltk
cltk/corpus/arabic/utils/pyarabic/araby.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/corpus/arabic/utils/pyarabic/araby.py#L948-L992
def shaddalike(partial, fully): """ If the two words has the same letters and the same harakats, this fuction return True. The first word is partially vocalized, the second is fully if the partially contians a shadda, it must be at the same place in the fully @param partial: the partially vocalized word @type partial: unicode @param fully: the fully vocalized word @type fully: unicode @return: if contains shadda @rtype: Boolean """ # المدخل ليس به شدة، لا داعي للبحث if not has_shadda(partial): return True # المدخل به شدة، والنتيجة ليس بها شدة، خاطئ elif not has_shadda(fully) and has_shadda(partial): return False # المدخل والمخرج بهما شدة، نتأكد من موقعهما partial = strip_harakat(partial) fully = strip_harakat(fully) pstack = stack.Stack(partial) vstack = stack.Stack(fully) plast = pstack.pop() vlast = vstack.pop() # if debug: print "+0", Pstack, Vstack while plast != None and vlast != None: if plast == vlast: plast = pstack.pop() vlast = vstack.pop() elif plast == SHADDA and vlast != SHADDA: # if debug: print "+2", Pstack.items, Plast, Vstack.items, Vlast break elif plast != SHADDA and vlast == SHADDA: # if debug: print "+2", Pstack.items, Plast, Vstack.items, Vlast vlast = vstack.pop() else: # if debug: print "+2", Pstack.items, Plast, Vstack.items, Vlast break if not (pstack.is_empty() and vstack.is_empty()): return False else: return True
[ "def", "shaddalike", "(", "partial", ",", "fully", ")", ":", "# المدخل ليس به شدة، لا داعي للبحث", "if", "not", "has_shadda", "(", "partial", ")", ":", "return", "True", "# المدخل به شدة، والنتيجة ليس بها شدة، خاطئ", "elif", "not", "has_shadda", "(", "fully", ")", "and", "has_shadda", "(", "partial", ")", ":", "return", "False", "# المدخل والمخرج بهما شدة، نتأكد من موقعهما", "partial", "=", "strip_harakat", "(", "partial", ")", "fully", "=", "strip_harakat", "(", "fully", ")", "pstack", "=", "stack", ".", "Stack", "(", "partial", ")", "vstack", "=", "stack", ".", "Stack", "(", "fully", ")", "plast", "=", "pstack", ".", "pop", "(", ")", "vlast", "=", "vstack", ".", "pop", "(", ")", "# if debug: print \"+0\", Pstack, Vstack", "while", "plast", "!=", "None", "and", "vlast", "!=", "None", ":", "if", "plast", "==", "vlast", ":", "plast", "=", "pstack", ".", "pop", "(", ")", "vlast", "=", "vstack", ".", "pop", "(", ")", "elif", "plast", "==", "SHADDA", "and", "vlast", "!=", "SHADDA", ":", "# if debug: print \"+2\", Pstack.items, Plast, Vstack.items, Vlast", "break", "elif", "plast", "!=", "SHADDA", "and", "vlast", "==", "SHADDA", ":", "# if debug: print \"+2\", Pstack.items, Plast, Vstack.items, Vlast", "vlast", "=", "vstack", ".", "pop", "(", ")", "else", ":", "# if debug: print \"+2\", Pstack.items, Plast, Vstack.items, Vlast", "break", "if", "not", "(", "pstack", ".", "is_empty", "(", ")", "and", "vstack", ".", "is_empty", "(", ")", ")", ":", "return", "False", "else", ":", "return", "True" ]
If the two words has the same letters and the same harakats, this fuction return True. The first word is partially vocalized, the second is fully if the partially contians a shadda, it must be at the same place in the fully @param partial: the partially vocalized word @type partial: unicode @param fully: the fully vocalized word @type fully: unicode @return: if contains shadda @rtype: Boolean
[ "If", "the", "two", "words", "has", "the", "same", "letters", "and", "the", "same", "harakats", "this", "fuction", "return", "True", ".", "The", "first", "word", "is", "partially", "vocalized", "the", "second", "is", "fully", "if", "the", "partially", "contians", "a", "shadda", "it", "must", "be", "at", "the", "same", "place", "in", "the", "fully" ]
python
train
35.511111
phfaist/pylatexenc
pylatexenc/latexwalker.py
https://github.com/phfaist/pylatexenc/blob/0c1788d1349e749501e67a6fba54d79e6e0d54f6/pylatexenc/latexwalker.py#L1269-L1281
def get_latex_expression(s, pos, **parse_flags): """ Reads a latex expression, e.g. macro argument. This may be a single char, an escape sequence, or a expression placed in braces. Returns a tuple `(<LatexNode instance>, pos, len)`. `pos` is the first char of the expression, and `len` is its length. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_expression()` instead. """ return LatexWalker(s, **parse_flags).get_latex_expression(pos=pos)
[ "def", "get_latex_expression", "(", "s", ",", "pos", ",", "*", "*", "parse_flags", ")", ":", "return", "LatexWalker", "(", "s", ",", "*", "*", "parse_flags", ")", ".", "get_latex_expression", "(", "pos", "=", "pos", ")" ]
Reads a latex expression, e.g. macro argument. This may be a single char, an escape sequence, or a expression placed in braces. Returns a tuple `(<LatexNode instance>, pos, len)`. `pos` is the first char of the expression, and `len` is its length. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_expression()` instead.
[ "Reads", "a", "latex", "expression", "e", ".", "g", ".", "macro", "argument", ".", "This", "may", "be", "a", "single", "char", "an", "escape", "sequence", "or", "a", "expression", "placed", "in", "braces", "." ]
python
test
37.461538
tornadoweb/tornado
tornado/web.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/web.py#L1182-L1218
def send_error(self, status_code: int = 500, **kwargs: Any) -> None: """Sends the given HTTP error code to the browser. If `flush()` has already been called, it is not possible to send an error, so this method will simply terminate the response. If output has been written but not yet flushed, it will be discarded and replaced with the error page. Override `write_error()` to customize the error page that is returned. Additional keyword arguments are passed through to `write_error`. """ if self._headers_written: gen_log.error("Cannot send error response after headers written") if not self._finished: # If we get an error between writing headers and finishing, # we are unlikely to be able to finish due to a # Content-Length mismatch. Try anyway to release the # socket. try: self.finish() except Exception: gen_log.error("Failed to flush partial response", exc_info=True) return self.clear() reason = kwargs.get("reason") if "exc_info" in kwargs: exception = kwargs["exc_info"][1] if isinstance(exception, HTTPError) and exception.reason: reason = exception.reason self.set_status(status_code, reason=reason) try: self.write_error(status_code, **kwargs) except Exception: app_log.error("Uncaught exception in write_error", exc_info=True) if not self._finished: self.finish()
[ "def", "send_error", "(", "self", ",", "status_code", ":", "int", "=", "500", ",", "*", "*", "kwargs", ":", "Any", ")", "->", "None", ":", "if", "self", ".", "_headers_written", ":", "gen_log", ".", "error", "(", "\"Cannot send error response after headers written\"", ")", "if", "not", "self", ".", "_finished", ":", "# If we get an error between writing headers and finishing,", "# we are unlikely to be able to finish due to a", "# Content-Length mismatch. Try anyway to release the", "# socket.", "try", ":", "self", ".", "finish", "(", ")", "except", "Exception", ":", "gen_log", ".", "error", "(", "\"Failed to flush partial response\"", ",", "exc_info", "=", "True", ")", "return", "self", ".", "clear", "(", ")", "reason", "=", "kwargs", ".", "get", "(", "\"reason\"", ")", "if", "\"exc_info\"", "in", "kwargs", ":", "exception", "=", "kwargs", "[", "\"exc_info\"", "]", "[", "1", "]", "if", "isinstance", "(", "exception", ",", "HTTPError", ")", "and", "exception", ".", "reason", ":", "reason", "=", "exception", ".", "reason", "self", ".", "set_status", "(", "status_code", ",", "reason", "=", "reason", ")", "try", ":", "self", ".", "write_error", "(", "status_code", ",", "*", "*", "kwargs", ")", "except", "Exception", ":", "app_log", ".", "error", "(", "\"Uncaught exception in write_error\"", ",", "exc_info", "=", "True", ")", "if", "not", "self", ".", "_finished", ":", "self", ".", "finish", "(", ")" ]
Sends the given HTTP error code to the browser. If `flush()` has already been called, it is not possible to send an error, so this method will simply terminate the response. If output has been written but not yet flushed, it will be discarded and replaced with the error page. Override `write_error()` to customize the error page that is returned. Additional keyword arguments are passed through to `write_error`.
[ "Sends", "the", "given", "HTTP", "error", "code", "to", "the", "browser", "." ]
python
train
43.864865
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/bulk.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/bulk.py#L79-L88
def add(self, original_index, operation): """Add an operation to this Run instance. :Parameters: - `original_index`: The original index of this operation within a larger bulk operation. - `operation`: The operation document. """ self.index_map.append(original_index) self.ops.append(operation)
[ "def", "add", "(", "self", ",", "original_index", ",", "operation", ")", ":", "self", ".", "index_map", ".", "append", "(", "original_index", ")", "self", ".", "ops", ".", "append", "(", "operation", ")" ]
Add an operation to this Run instance. :Parameters: - `original_index`: The original index of this operation within a larger bulk operation. - `operation`: The operation document.
[ "Add", "an", "operation", "to", "this", "Run", "instance", "." ]
python
train
35.7
apache/incubator-mxnet
example/ssd/evaluate/eval_voc.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/evaluate/eval_voc.py#L30-L49
def parse_voc_rec(filename): """ parse pascal voc record into a dictionary :param filename: xml file path :return: list of dict """ import xml.etree.ElementTree as ET tree = ET.parse(filename) objects = [] for obj in tree.findall('object'): obj_dict = dict() obj_dict['name'] = obj.find('name').text obj_dict['difficult'] = int(obj.find('difficult').text) bbox = obj.find('bndbox') obj_dict['bbox'] = [int(bbox.find('xmin').text), int(bbox.find('ymin').text), int(bbox.find('xmax').text), int(bbox.find('ymax').text)] objects.append(obj_dict) return objects
[ "def", "parse_voc_rec", "(", "filename", ")", ":", "import", "xml", ".", "etree", ".", "ElementTree", "as", "ET", "tree", "=", "ET", ".", "parse", "(", "filename", ")", "objects", "=", "[", "]", "for", "obj", "in", "tree", ".", "findall", "(", "'object'", ")", ":", "obj_dict", "=", "dict", "(", ")", "obj_dict", "[", "'name'", "]", "=", "obj", ".", "find", "(", "'name'", ")", ".", "text", "obj_dict", "[", "'difficult'", "]", "=", "int", "(", "obj", ".", "find", "(", "'difficult'", ")", ".", "text", ")", "bbox", "=", "obj", ".", "find", "(", "'bndbox'", ")", "obj_dict", "[", "'bbox'", "]", "=", "[", "int", "(", "bbox", ".", "find", "(", "'xmin'", ")", ".", "text", ")", ",", "int", "(", "bbox", ".", "find", "(", "'ymin'", ")", ".", "text", ")", ",", "int", "(", "bbox", ".", "find", "(", "'xmax'", ")", ".", "text", ")", ",", "int", "(", "bbox", ".", "find", "(", "'ymax'", ")", ".", "text", ")", "]", "objects", ".", "append", "(", "obj_dict", ")", "return", "objects" ]
parse pascal voc record into a dictionary :param filename: xml file path :return: list of dict
[ "parse", "pascal", "voc", "record", "into", "a", "dictionary", ":", "param", "filename", ":", "xml", "file", "path", ":", "return", ":", "list", "of", "dict" ]
python
train
35.5
sporteasy/python-poeditor
poeditor/client.py
https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L200-L219
def update_project(self, project_id, name=None, description=None, reference_language=None): """ Updates project settings (name, description, reference language) If optional parameters are not sent, their respective fields are not updated. """ kwargs = {} if name is not None: kwargs['name'] = name if description is not None: kwargs['description'] = description if reference_language is not None: kwargs['reference_language'] = reference_language data = self._run( url_path="projects/update", id=project_id, **kwargs ) return data['result']['project']['id']
[ "def", "update_project", "(", "self", ",", "project_id", ",", "name", "=", "None", ",", "description", "=", "None", ",", "reference_language", "=", "None", ")", ":", "kwargs", "=", "{", "}", "if", "name", "is", "not", "None", ":", "kwargs", "[", "'name'", "]", "=", "name", "if", "description", "is", "not", "None", ":", "kwargs", "[", "'description'", "]", "=", "description", "if", "reference_language", "is", "not", "None", ":", "kwargs", "[", "'reference_language'", "]", "=", "reference_language", "data", "=", "self", ".", "_run", "(", "url_path", "=", "\"projects/update\"", ",", "id", "=", "project_id", ",", "*", "*", "kwargs", ")", "return", "data", "[", "'result'", "]", "[", "'project'", "]", "[", "'id'", "]" ]
Updates project settings (name, description, reference language) If optional parameters are not sent, their respective fields are not updated.
[ "Updates", "project", "settings", "(", "name", "description", "reference", "language", ")", "If", "optional", "parameters", "are", "not", "sent", "their", "respective", "fields", "are", "not", "updated", "." ]
python
train
36.1
richardcornish/django-pygmentify
pygmentify/utils/pygmentify.py
https://github.com/richardcornish/django-pygmentify/blob/a2d3f6b3c3019d810d46f6ff6beb4e9f53190e7b/pygmentify/utils/pygmentify.py#L12-L33
def bits_to_dict(bits): """Convert a Django template tag's kwargs into a dictionary of Python types. The only necessary types are number, boolean, list, and string. http://pygments.org/docs/formatters/#HtmlFormatter from: ["style='monokai'", "cssclass='cssclass',", "boolean='true',", 'num=0,', "list='[]'"] to: {'style': 'monokai', 'cssclass': 'cssclass', 'boolean': True, 'num': 0, 'list': [],} """ # Strip any trailing commas cleaned_bits = [bit[:-1] if bit.endswith(',') else bit for bit in bits] # Create dictionary by splitting on equal signs options = dict(bit.split('=') for bit in cleaned_bits) # Coerce strings of types to Python types for key in options: if options[key] == "'true'" or options[key] == "'false'": options[key] = options[key].title() options[key] = ast.literal_eval(options[key]) return options
[ "def", "bits_to_dict", "(", "bits", ")", ":", "# Strip any trailing commas", "cleaned_bits", "=", "[", "bit", "[", ":", "-", "1", "]", "if", "bit", ".", "endswith", "(", "','", ")", "else", "bit", "for", "bit", "in", "bits", "]", "# Create dictionary by splitting on equal signs", "options", "=", "dict", "(", "bit", ".", "split", "(", "'='", ")", "for", "bit", "in", "cleaned_bits", ")", "# Coerce strings of types to Python types", "for", "key", "in", "options", ":", "if", "options", "[", "key", "]", "==", "\"'true'\"", "or", "options", "[", "key", "]", "==", "\"'false'\"", ":", "options", "[", "key", "]", "=", "options", "[", "key", "]", ".", "title", "(", ")", "options", "[", "key", "]", "=", "ast", ".", "literal_eval", "(", "options", "[", "key", "]", ")", "return", "options" ]
Convert a Django template tag's kwargs into a dictionary of Python types. The only necessary types are number, boolean, list, and string. http://pygments.org/docs/formatters/#HtmlFormatter from: ["style='monokai'", "cssclass='cssclass',", "boolean='true',", 'num=0,', "list='[]'"] to: {'style': 'monokai', 'cssclass': 'cssclass', 'boolean': True, 'num': 0, 'list': [],}
[ "Convert", "a", "Django", "template", "tag", "s", "kwargs", "into", "a", "dictionary", "of", "Python", "types", "." ]
python
train
40.272727
nicfit/nicfit.py
nicfit/command.py
https://github.com/nicfit/nicfit.py/blob/8313f8edbc5e7361ddad496d6d818324b5236c7a/nicfit/command.py#L118-L135
def loadCommandMap(Class, subparsers=None, instantiate=True, **cmd_kwargs): """Instantiate each registered command to a dict mapping name/alias to instance. Due to aliases, the returned length may be greater there the number of commands, but the unique instance count will match. """ if not Class._registered_commands: raise ValueError("No commands have been registered with {}" .format(Class)) all = {} for Cmd in set(Class._registered_commands[Class].values()): cmd = Cmd(subparsers=subparsers, **cmd_kwargs) \ if instantiate else Cmd for name in [Cmd.name()] + Cmd.aliases(): all[name] = cmd return all
[ "def", "loadCommandMap", "(", "Class", ",", "subparsers", "=", "None", ",", "instantiate", "=", "True", ",", "*", "*", "cmd_kwargs", ")", ":", "if", "not", "Class", ".", "_registered_commands", ":", "raise", "ValueError", "(", "\"No commands have been registered with {}\"", ".", "format", "(", "Class", ")", ")", "all", "=", "{", "}", "for", "Cmd", "in", "set", "(", "Class", ".", "_registered_commands", "[", "Class", "]", ".", "values", "(", ")", ")", ":", "cmd", "=", "Cmd", "(", "subparsers", "=", "subparsers", ",", "*", "*", "cmd_kwargs", ")", "if", "instantiate", "else", "Cmd", "for", "name", "in", "[", "Cmd", ".", "name", "(", ")", "]", "+", "Cmd", ".", "aliases", "(", ")", ":", "all", "[", "name", "]", "=", "cmd", "return", "all" ]
Instantiate each registered command to a dict mapping name/alias to instance. Due to aliases, the returned length may be greater there the number of commands, but the unique instance count will match.
[ "Instantiate", "each", "registered", "command", "to", "a", "dict", "mapping", "name", "/", "alias", "to", "instance", "." ]
python
test
42.611111
MIT-LCP/wfdb-python
wfdb/io/annotation.py
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/annotation.py#L1057-L1168
def wrann(record_name, extension, sample, symbol=None, subtype=None, chan=None, num=None, aux_note=None, label_store=None, fs=None, custom_labels=None, write_dir=''): """ Write a WFDB annotation file. Specify at least the following: - The record name of the WFDB record (record_name) - The annotation file extension (extension) - The annotation locations in samples relative to the beginning of the record (sample) - Either the numerical values used to store the labels (`label_store`), or more commonly, the display symbols of each label (`symbol`). Parameters ---------- record_name : str The string name of the WFDB record to be written (without any file extensions). extension : str The string annotation file extension. sample : numpy array A numpy array containing the annotation locations in samples relative to the beginning of the record. symbol : list, or numpy array, optional The symbols used to display the annotation labels. List or numpy array. If this field is present, `label_store` must not be present. subtype : numpy array, optional A numpy array containing the marked class/category of each annotation. chan : numpy array, optional A numpy array containing the signal channel associated with each annotation. num : numpy array, optional A numpy array containing the labelled annotation number for each annotation. aux_note : list, optional A list containing the auxiliary information string (or None for annotations without notes) for each annotation. label_store : numpy array, optional A numpy array containing the integer values used to store the annotation labels. If this field is present, `symbol` must not be present. fs : int, or float, optional The numerical sampling frequency of the record to be written to the file. custom_labels : pandas dataframe, optional The map of custom defined annotation labels used for this annotation, in addition to the standard WFDB annotation labels. Custom labels are defined by two or three fields: - The integer values used to store custom annotation labels in the file (optional) - Their short display symbols - Their long descriptions. This input argument may come in four formats: 1. A pandas.DataFrame object with columns: ['label_store', 'symbol', 'description'] 2. A pandas.DataFrame object with columns: ['symbol', 'description'] If this option is chosen, label_store values are automatically chosen. 3. A list or tuple of tuple triplets, with triplet elements representing: (label_store, symbol, description). 4. A list or tuple of tuple pairs, with pair elements representing: (symbol, description). If this option is chosen, label_store values are automatically chosen. If the `label_store` field is given for this function, and `custom_labels` is defined, `custom_labels` must contain `label_store` in its mapping. ie. it must come in format 1 or 3 above. write_dir : str, optional The directory in which to write the annotation file Notes ----- This is a gateway function, written as a simple way to write WFDB annotation files without needing to explicity create an Annotation object. You may also create an Annotation object, manually set its attributes, and call its `wrann` instance method. Each annotation stored in a WFDB annotation file contains a sample field and a label field. All other fields may or may not be present. Examples -------- >>> # Read an annotation as an Annotation object >>> annotation = wfdb.rdann('b001', 'atr', pb_dir='cebsdb') >>> # Write a copy of the annotation file >>> wfdb.wrann('b001', 'cpy', annotation.sample, annotation.symbol) """ # Create Annotation object annotation = Annotation(record_name=record_name, extension=extension, sample=sample, symbol=symbol, subtype=subtype, chan=chan, num=num, aux_note=aux_note, label_store=label_store, fs=fs, custom_labels=custom_labels) # Find out which input field describes the labels if symbol is None: if label_store is None: raise Exception("Either the 'symbol' field or the 'label_store' field must be set") else: if label_store is None: annotation.sym_to_aux() else: raise Exception("Only one of the 'symbol' and 'label_store' fields may be input, for describing annotation labels") # Perform field checks and write the annotation file annotation.wrann(write_fs=True, write_dir=write_dir)
[ "def", "wrann", "(", "record_name", ",", "extension", ",", "sample", ",", "symbol", "=", "None", ",", "subtype", "=", "None", ",", "chan", "=", "None", ",", "num", "=", "None", ",", "aux_note", "=", "None", ",", "label_store", "=", "None", ",", "fs", "=", "None", ",", "custom_labels", "=", "None", ",", "write_dir", "=", "''", ")", ":", "# Create Annotation object", "annotation", "=", "Annotation", "(", "record_name", "=", "record_name", ",", "extension", "=", "extension", ",", "sample", "=", "sample", ",", "symbol", "=", "symbol", ",", "subtype", "=", "subtype", ",", "chan", "=", "chan", ",", "num", "=", "num", ",", "aux_note", "=", "aux_note", ",", "label_store", "=", "label_store", ",", "fs", "=", "fs", ",", "custom_labels", "=", "custom_labels", ")", "# Find out which input field describes the labels", "if", "symbol", "is", "None", ":", "if", "label_store", "is", "None", ":", "raise", "Exception", "(", "\"Either the 'symbol' field or the 'label_store' field must be set\"", ")", "else", ":", "if", "label_store", "is", "None", ":", "annotation", ".", "sym_to_aux", "(", ")", "else", ":", "raise", "Exception", "(", "\"Only one of the 'symbol' and 'label_store' fields may be input, for describing annotation labels\"", ")", "# Perform field checks and write the annotation file", "annotation", ".", "wrann", "(", "write_fs", "=", "True", ",", "write_dir", "=", "write_dir", ")" ]
Write a WFDB annotation file. Specify at least the following: - The record name of the WFDB record (record_name) - The annotation file extension (extension) - The annotation locations in samples relative to the beginning of the record (sample) - Either the numerical values used to store the labels (`label_store`), or more commonly, the display symbols of each label (`symbol`). Parameters ---------- record_name : str The string name of the WFDB record to be written (without any file extensions). extension : str The string annotation file extension. sample : numpy array A numpy array containing the annotation locations in samples relative to the beginning of the record. symbol : list, or numpy array, optional The symbols used to display the annotation labels. List or numpy array. If this field is present, `label_store` must not be present. subtype : numpy array, optional A numpy array containing the marked class/category of each annotation. chan : numpy array, optional A numpy array containing the signal channel associated with each annotation. num : numpy array, optional A numpy array containing the labelled annotation number for each annotation. aux_note : list, optional A list containing the auxiliary information string (or None for annotations without notes) for each annotation. label_store : numpy array, optional A numpy array containing the integer values used to store the annotation labels. If this field is present, `symbol` must not be present. fs : int, or float, optional The numerical sampling frequency of the record to be written to the file. custom_labels : pandas dataframe, optional The map of custom defined annotation labels used for this annotation, in addition to the standard WFDB annotation labels. Custom labels are defined by two or three fields: - The integer values used to store custom annotation labels in the file (optional) - Their short display symbols - Their long descriptions. This input argument may come in four formats: 1. A pandas.DataFrame object with columns: ['label_store', 'symbol', 'description'] 2. A pandas.DataFrame object with columns: ['symbol', 'description'] If this option is chosen, label_store values are automatically chosen. 3. A list or tuple of tuple triplets, with triplet elements representing: (label_store, symbol, description). 4. A list or tuple of tuple pairs, with pair elements representing: (symbol, description). If this option is chosen, label_store values are automatically chosen. If the `label_store` field is given for this function, and `custom_labels` is defined, `custom_labels` must contain `label_store` in its mapping. ie. it must come in format 1 or 3 above. write_dir : str, optional The directory in which to write the annotation file Notes ----- This is a gateway function, written as a simple way to write WFDB annotation files without needing to explicity create an Annotation object. You may also create an Annotation object, manually set its attributes, and call its `wrann` instance method. Each annotation stored in a WFDB annotation file contains a sample field and a label field. All other fields may or may not be present. Examples -------- >>> # Read an annotation as an Annotation object >>> annotation = wfdb.rdann('b001', 'atr', pb_dir='cebsdb') >>> # Write a copy of the annotation file >>> wfdb.wrann('b001', 'cpy', annotation.sample, annotation.symbol)
[ "Write", "a", "WFDB", "annotation", "file", "." ]
python
train
43.446429
pandas-dev/pandas
pandas/core/internals/managers.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L120-L131
def make_empty(self, axes=None): """ return an empty BlockManager with the items axis of len 0 """ if axes is None: axes = [ensure_index([])] + [ensure_index(a) for a in self.axes[1:]] # preserve dtype if possible if self.ndim == 1: blocks = np.array([], dtype=self.array_dtype) else: blocks = [] return self.__class__(blocks, axes)
[ "def", "make_empty", "(", "self", ",", "axes", "=", "None", ")", ":", "if", "axes", "is", "None", ":", "axes", "=", "[", "ensure_index", "(", "[", "]", ")", "]", "+", "[", "ensure_index", "(", "a", ")", "for", "a", "in", "self", ".", "axes", "[", "1", ":", "]", "]", "# preserve dtype if possible", "if", "self", ".", "ndim", "==", "1", ":", "blocks", "=", "np", ".", "array", "(", "[", "]", ",", "dtype", "=", "self", ".", "array_dtype", ")", "else", ":", "blocks", "=", "[", "]", "return", "self", ".", "__class__", "(", "blocks", ",", "axes", ")" ]
return an empty BlockManager with the items axis of len 0
[ "return", "an", "empty", "BlockManager", "with", "the", "items", "axis", "of", "len", "0" ]
python
train
37.25
SethMMorton/natsort
natsort/natsort.py
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/natsort.py#L222-L267
def natsorted(seq, key=None, reverse=False, alg=ns.DEFAULT): """ Sorts an iterable naturally. Parameters ---------- seq : iterable The input to sort. key : callable, optional A key used to determine how to sort each element of the iterable. It is **not** applied recursively. It should accept a single argument and return a single value. reverse : {{True, False}}, optional Return the list in reversed sorted order. The default is `False`. alg : ns enum, optional This option is used to control which algorithm `natsort` uses when sorting. For details into these options, please see the :class:`ns` class documentation. The default is `ns.INT`. Returns ------- out: list The sorted input. See Also -------- natsort_keygen : Generates the key that makes natural sorting possible. realsorted : A wrapper for ``natsorted(seq, alg=ns.REAL)``. humansorted : A wrapper for ``natsorted(seq, alg=ns.LOCALE)``. index_natsorted : Returns the sorted indexes from `natsorted`. Examples -------- Use `natsorted` just like the builtin `sorted`:: >>> a = ['num3', 'num5', 'num2'] >>> natsorted(a) [{u}'num2', {u}'num3', {u}'num5'] """ key = natsort_keygen(key, alg) return sorted(seq, reverse=reverse, key=key)
[ "def", "natsorted", "(", "seq", ",", "key", "=", "None", ",", "reverse", "=", "False", ",", "alg", "=", "ns", ".", "DEFAULT", ")", ":", "key", "=", "natsort_keygen", "(", "key", ",", "alg", ")", "return", "sorted", "(", "seq", ",", "reverse", "=", "reverse", ",", "key", "=", "key", ")" ]
Sorts an iterable naturally. Parameters ---------- seq : iterable The input to sort. key : callable, optional A key used to determine how to sort each element of the iterable. It is **not** applied recursively. It should accept a single argument and return a single value. reverse : {{True, False}}, optional Return the list in reversed sorted order. The default is `False`. alg : ns enum, optional This option is used to control which algorithm `natsort` uses when sorting. For details into these options, please see the :class:`ns` class documentation. The default is `ns.INT`. Returns ------- out: list The sorted input. See Also -------- natsort_keygen : Generates the key that makes natural sorting possible. realsorted : A wrapper for ``natsorted(seq, alg=ns.REAL)``. humansorted : A wrapper for ``natsorted(seq, alg=ns.LOCALE)``. index_natsorted : Returns the sorted indexes from `natsorted`. Examples -------- Use `natsorted` just like the builtin `sorted`:: >>> a = ['num3', 'num5', 'num2'] >>> natsorted(a) [{u}'num2', {u}'num3', {u}'num5']
[ "Sorts", "an", "iterable", "naturally", "." ]
python
train
29.456522
mozilla-services/axe-selenium-python
axe_selenium_python/axe.py
https://github.com/mozilla-services/axe-selenium-python/blob/475c9f4eb771587aea73897bee356284d0361d77/axe_selenium_python/axe.py#L96-L115
def write_results(self, data, name=None): """ Write JSON to file with the specified name. :param name: Path to the file to be written to. If no path is passed a new JSON file "results.json" will be created in the current working directory. :param output: JSON object. """ if name: filepath = os.path.abspath(name) else: filepath = os.path.join(os.path.getcwd(), "results.json") with open(filepath, "w", encoding="utf8") as f: try: f.write(unicode(json.dumps(data, indent=4))) except NameError: f.write(json.dumps(data, indent=4))
[ "def", "write_results", "(", "self", ",", "data", ",", "name", "=", "None", ")", ":", "if", "name", ":", "filepath", "=", "os", ".", "path", ".", "abspath", "(", "name", ")", "else", ":", "filepath", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "getcwd", "(", ")", ",", "\"results.json\"", ")", "with", "open", "(", "filepath", ",", "\"w\"", ",", "encoding", "=", "\"utf8\"", ")", "as", "f", ":", "try", ":", "f", ".", "write", "(", "unicode", "(", "json", ".", "dumps", "(", "data", ",", "indent", "=", "4", ")", ")", ")", "except", "NameError", ":", "f", ".", "write", "(", "json", ".", "dumps", "(", "data", ",", "indent", "=", "4", ")", ")" ]
Write JSON to file with the specified name. :param name: Path to the file to be written to. If no path is passed a new JSON file "results.json" will be created in the current working directory. :param output: JSON object.
[ "Write", "JSON", "to", "file", "with", "the", "specified", "name", "." ]
python
train
34.95
DataONEorg/d1_python
gmn/src/d1_gmn/app/management/commands/audit-sync.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/management/commands/audit-sync.py#L125-L138
async def is_object_synced_to_cn(self, client, pid): """Check if object with {pid} has successfully synced to the CN. CNRead.describe() is used as it's a light-weight HTTP HEAD request. This assumes that the call is being made over a connection that has been authenticated and has read or better access on the given object if it exists. """ try: await client.describe(pid) except d1_common.types.exceptions.DataONEException: return False return True
[ "async", "def", "is_object_synced_to_cn", "(", "self", ",", "client", ",", "pid", ")", ":", "try", ":", "await", "client", ".", "describe", "(", "pid", ")", "except", "d1_common", ".", "types", ".", "exceptions", ".", "DataONEException", ":", "return", "False", "return", "True" ]
Check if object with {pid} has successfully synced to the CN. CNRead.describe() is used as it's a light-weight HTTP HEAD request. This assumes that the call is being made over a connection that has been authenticated and has read or better access on the given object if it exists.
[ "Check", "if", "object", "with", "{", "pid", "}", "has", "successfully", "synced", "to", "the", "CN", "." ]
python
train
37.642857
JoelBender/bacpypes
samples/TCPClient.py
https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/samples/TCPClient.py#L107-L182
def main(): """ Main function, called when run as an application. """ global args, server_address # parse the command line arguments parser = ArgumentParser(description=__doc__) parser.add_argument( "host", nargs='?', help="address of host (default %r)" % (SERVER_HOST,), default=SERVER_HOST, ) parser.add_argument( "port", nargs='?', type=int, help="server port (default %r)" % (SERVER_PORT,), default=SERVER_PORT, ) parser.add_argument( "--hello", action="store_true", default=False, help="send a hello message", ) parser.add_argument( "--connect-timeout", nargs='?', type=int, help="idle connection timeout", default=CONNECT_TIMEOUT, ) parser.add_argument( "--idle-timeout", nargs='?', type=int, help="idle connection timeout", default=IDLE_TIMEOUT, ) args = parser.parse_args() if _debug: _log.debug("initialization") if _debug: _log.debug(" - args: %r", args) # extract the server address and port host = args.host port = args.port server_address = (host, port) if _debug: _log.debug(" - server_address: %r", server_address) # build the stack this_console = ConsoleClient() if _debug: _log.debug(" - this_console: %r", this_console) this_middle_man = MiddleMan() if _debug: _log.debug(" - this_middle_man: %r", this_middle_man) this_director = TCPClientDirector( connect_timeout=args.connect_timeout, idle_timeout=args.idle_timeout, ) if _debug: _log.debug(" - this_director: %r", this_director) bind(this_console, this_middle_man, this_director) bind(MiddleManASE(), this_director) # create a task manager for scheduled functions task_manager = TaskManager() if _debug: _log.debug(" - task_manager: %r", task_manager) # don't wait to connect deferred(this_director.connect, server_address) # send hello maybe if args.hello: deferred(this_middle_man.indication, PDU(b'Hello, world!\n')) if _debug: _log.debug("running") run() if _debug: _log.debug("fini")
[ "def", "main", "(", ")", ":", "global", "args", ",", "server_address", "# parse the command line arguments", "parser", "=", "ArgumentParser", "(", "description", "=", "__doc__", ")", "parser", ".", "add_argument", "(", "\"host\"", ",", "nargs", "=", "'?'", ",", "help", "=", "\"address of host (default %r)\"", "%", "(", "SERVER_HOST", ",", ")", ",", "default", "=", "SERVER_HOST", ",", ")", "parser", ".", "add_argument", "(", "\"port\"", ",", "nargs", "=", "'?'", ",", "type", "=", "int", ",", "help", "=", "\"server port (default %r)\"", "%", "(", "SERVER_PORT", ",", ")", ",", "default", "=", "SERVER_PORT", ",", ")", "parser", ".", "add_argument", "(", "\"--hello\"", ",", "action", "=", "\"store_true\"", ",", "default", "=", "False", ",", "help", "=", "\"send a hello message\"", ",", ")", "parser", ".", "add_argument", "(", "\"--connect-timeout\"", ",", "nargs", "=", "'?'", ",", "type", "=", "int", ",", "help", "=", "\"idle connection timeout\"", ",", "default", "=", "CONNECT_TIMEOUT", ",", ")", "parser", ".", "add_argument", "(", "\"--idle-timeout\"", ",", "nargs", "=", "'?'", ",", "type", "=", "int", ",", "help", "=", "\"idle connection timeout\"", ",", "default", "=", "IDLE_TIMEOUT", ",", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "if", "_debug", ":", "_log", ".", "debug", "(", "\"initialization\"", ")", "if", "_debug", ":", "_log", ".", "debug", "(", "\" - args: %r\"", ",", "args", ")", "# extract the server address and port", "host", "=", "args", ".", "host", "port", "=", "args", ".", "port", "server_address", "=", "(", "host", ",", "port", ")", "if", "_debug", ":", "_log", ".", "debug", "(", "\" - server_address: %r\"", ",", "server_address", ")", "# build the stack", "this_console", "=", "ConsoleClient", "(", ")", "if", "_debug", ":", "_log", ".", "debug", "(", "\" - this_console: %r\"", ",", "this_console", ")", "this_middle_man", "=", "MiddleMan", "(", ")", "if", "_debug", ":", "_log", ".", "debug", "(", "\" - this_middle_man: %r\"", ",", "this_middle_man", ")", "this_director", "=", "TCPClientDirector", "(", "connect_timeout", "=", "args", ".", "connect_timeout", ",", "idle_timeout", "=", "args", ".", "idle_timeout", ",", ")", "if", "_debug", ":", "_log", ".", "debug", "(", "\" - this_director: %r\"", ",", "this_director", ")", "bind", "(", "this_console", ",", "this_middle_man", ",", "this_director", ")", "bind", "(", "MiddleManASE", "(", ")", ",", "this_director", ")", "# create a task manager for scheduled functions", "task_manager", "=", "TaskManager", "(", ")", "if", "_debug", ":", "_log", ".", "debug", "(", "\" - task_manager: %r\"", ",", "task_manager", ")", "# don't wait to connect", "deferred", "(", "this_director", ".", "connect", ",", "server_address", ")", "# send hello maybe", "if", "args", ".", "hello", ":", "deferred", "(", "this_middle_man", ".", "indication", ",", "PDU", "(", "b'Hello, world!\\n'", ")", ")", "if", "_debug", ":", "_log", ".", "debug", "(", "\"running\"", ")", "run", "(", ")", "if", "_debug", ":", "_log", ".", "debug", "(", "\"fini\"", ")" ]
Main function, called when run as an application.
[ "Main", "function", "called", "when", "run", "as", "an", "application", "." ]
python
train
28.421053
mitsei/dlkit
dlkit/handcar/learning/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/learning/sessions.py#L4264-L4274
def get_root_objective_bank_ids(self, alias): """Gets the root objective bank Ids in this hierarchy. return: (osid.id.IdList) - the root objective bank Ids raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure compliance: mandatory - This method must be implemented. """ url_path = self._urls.roots(alias) return self._get_request(url_path)
[ "def", "get_root_objective_bank_ids", "(", "self", ",", "alias", ")", ":", "url_path", "=", "self", ".", "_urls", ".", "roots", "(", "alias", ")", "return", "self", ".", "_get_request", "(", "url_path", ")" ]
Gets the root objective bank Ids in this hierarchy. return: (osid.id.IdList) - the root objective bank Ids raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure compliance: mandatory - This method must be implemented.
[ "Gets", "the", "root", "objective", "bank", "Ids", "in", "this", "hierarchy", "." ]
python
train
40.363636
fedora-infra/fedmsg
fedmsg/utils.py
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/utils.py#L120-L136
def load_class(location): """ Take a string of the form 'fedmsg.consumers.ircbot:IRCBotConsumer' and return the IRCBotConsumer class. """ mod_name, cls_name = location = location.strip().split(':') tokens = mod_name.split('.') fromlist = '[]' if len(tokens) > 1: fromlist = '.'.join(tokens[:-1]) module = __import__(mod_name, fromlist=fromlist) try: return getattr(module, cls_name) except AttributeError: raise ImportError("%r not found in %r" % (cls_name, mod_name))
[ "def", "load_class", "(", "location", ")", ":", "mod_name", ",", "cls_name", "=", "location", "=", "location", ".", "strip", "(", ")", ".", "split", "(", "':'", ")", "tokens", "=", "mod_name", ".", "split", "(", "'.'", ")", "fromlist", "=", "'[]'", "if", "len", "(", "tokens", ")", ">", "1", ":", "fromlist", "=", "'.'", ".", "join", "(", "tokens", "[", ":", "-", "1", "]", ")", "module", "=", "__import__", "(", "mod_name", ",", "fromlist", "=", "fromlist", ")", "try", ":", "return", "getattr", "(", "module", ",", "cls_name", ")", "except", "AttributeError", ":", "raise", "ImportError", "(", "\"%r not found in %r\"", "%", "(", "cls_name", ",", "mod_name", ")", ")" ]
Take a string of the form 'fedmsg.consumers.ircbot:IRCBotConsumer' and return the IRCBotConsumer class.
[ "Take", "a", "string", "of", "the", "form", "fedmsg", ".", "consumers", ".", "ircbot", ":", "IRCBotConsumer", "and", "return", "the", "IRCBotConsumer", "class", "." ]
python
train
30.529412
pkgw/pwkit
pwkit/environments/casa/closures.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/environments/casa/closures.py#L753-L780
def _process_sample (self, ap1, ap2, ap3, triple, tflags): """We have computed one independent phase closure triple in one timeslot. """ # Frequency-resolved: np.divide (triple, np.abs (triple), triple) phase = np.angle (triple) self.ap_spec_stats_by_ddid[self.cur_ddid].accum (ap1, phase, tflags + 0.) self.ap_spec_stats_by_ddid[self.cur_ddid].accum (ap2, phase, tflags + 0.) self.ap_spec_stats_by_ddid[self.cur_ddid].accum (ap3, phase, tflags + 0.) # Frequency-averaged: triple = np.dot (triple, tflags) / tflags.sum () phase = np.angle (triple) self.global_stats_by_time.accum (self.cur_time, phase) self.ap_stats_by_ddid[self.cur_ddid].accum (ap1, phase) self.ap_stats_by_ddid[self.cur_ddid].accum (ap2, phase) self.ap_stats_by_ddid[self.cur_ddid].accum (ap3, phase) self.bp_stats_by_ddid[self.cur_ddid].accum ((ap1, ap2), phase) self.bp_stats_by_ddid[self.cur_ddid].accum ((ap1, ap3), phase) self.bp_stats_by_ddid[self.cur_ddid].accum ((ap2, ap3), phase) self.ap_time_stats_by_ddid[self.cur_ddid].accum (self.cur_time, ap1, phase) self.ap_time_stats_by_ddid[self.cur_ddid].accum (self.cur_time, ap2, phase) self.ap_time_stats_by_ddid[self.cur_ddid].accum (self.cur_time, ap3, phase)
[ "def", "_process_sample", "(", "self", ",", "ap1", ",", "ap2", ",", "ap3", ",", "triple", ",", "tflags", ")", ":", "# Frequency-resolved:", "np", ".", "divide", "(", "triple", ",", "np", ".", "abs", "(", "triple", ")", ",", "triple", ")", "phase", "=", "np", ".", "angle", "(", "triple", ")", "self", ".", "ap_spec_stats_by_ddid", "[", "self", ".", "cur_ddid", "]", ".", "accum", "(", "ap1", ",", "phase", ",", "tflags", "+", "0.", ")", "self", ".", "ap_spec_stats_by_ddid", "[", "self", ".", "cur_ddid", "]", ".", "accum", "(", "ap2", ",", "phase", ",", "tflags", "+", "0.", ")", "self", ".", "ap_spec_stats_by_ddid", "[", "self", ".", "cur_ddid", "]", ".", "accum", "(", "ap3", ",", "phase", ",", "tflags", "+", "0.", ")", "# Frequency-averaged:", "triple", "=", "np", ".", "dot", "(", "triple", ",", "tflags", ")", "/", "tflags", ".", "sum", "(", ")", "phase", "=", "np", ".", "angle", "(", "triple", ")", "self", ".", "global_stats_by_time", ".", "accum", "(", "self", ".", "cur_time", ",", "phase", ")", "self", ".", "ap_stats_by_ddid", "[", "self", ".", "cur_ddid", "]", ".", "accum", "(", "ap1", ",", "phase", ")", "self", ".", "ap_stats_by_ddid", "[", "self", ".", "cur_ddid", "]", ".", "accum", "(", "ap2", ",", "phase", ")", "self", ".", "ap_stats_by_ddid", "[", "self", ".", "cur_ddid", "]", ".", "accum", "(", "ap3", ",", "phase", ")", "self", ".", "bp_stats_by_ddid", "[", "self", ".", "cur_ddid", "]", ".", "accum", "(", "(", "ap1", ",", "ap2", ")", ",", "phase", ")", "self", ".", "bp_stats_by_ddid", "[", "self", ".", "cur_ddid", "]", ".", "accum", "(", "(", "ap1", ",", "ap3", ")", ",", "phase", ")", "self", ".", "bp_stats_by_ddid", "[", "self", ".", "cur_ddid", "]", ".", "accum", "(", "(", "ap2", ",", "ap3", ")", ",", "phase", ")", "self", ".", "ap_time_stats_by_ddid", "[", "self", ".", "cur_ddid", "]", ".", "accum", "(", "self", ".", "cur_time", ",", "ap1", ",", "phase", ")", "self", ".", "ap_time_stats_by_ddid", "[", "self", ".", "cur_ddid", "]", ".", "accum", "(", "self", ".", "cur_time", ",", "ap2", ",", "phase", ")", "self", ".", "ap_time_stats_by_ddid", "[", "self", ".", "cur_ddid", "]", ".", "accum", "(", "self", ".", "cur_time", ",", "ap3", ",", "phase", ")" ]
We have computed one independent phase closure triple in one timeslot.
[ "We", "have", "computed", "one", "independent", "phase", "closure", "triple", "in", "one", "timeslot", "." ]
python
train
47.642857
AustralianSynchrotron/lightflow
lightflow/models/datastore.py
https://github.com/AustralianSynchrotron/lightflow/blob/dc53dbc1d961e20fb144273baca258060705c03e/lightflow/models/datastore.py#L248-L270
def get(self, key, default=None, *, section=DataStoreDocumentSection.Data): """ Return the field specified by its key from the specified section. This method access the specified section of the workflow document and returns the value for the given key. Args: key (str): The key pointing to the value that should be retrieved. It supports MongoDB's dot notation for nested fields. default: The default value that is returned if the key does not exist. section (DataStoreDocumentSection): The section from which the data should be retrieved. Returns: object: The value from the field that the specified key is pointing to. If the key does not exist, the default value is returned. If no default value is provided and the key does not exist ``None`` is returned. """ key_notation = '.'.join([section, key]) try: return self._decode_value(self._data_from_dotnotation(key_notation, default)) except KeyError: return None
[ "def", "get", "(", "self", ",", "key", ",", "default", "=", "None", ",", "*", ",", "section", "=", "DataStoreDocumentSection", ".", "Data", ")", ":", "key_notation", "=", "'.'", ".", "join", "(", "[", "section", ",", "key", "]", ")", "try", ":", "return", "self", ".", "_decode_value", "(", "self", ".", "_data_from_dotnotation", "(", "key_notation", ",", "default", ")", ")", "except", "KeyError", ":", "return", "None" ]
Return the field specified by its key from the specified section. This method access the specified section of the workflow document and returns the value for the given key. Args: key (str): The key pointing to the value that should be retrieved. It supports MongoDB's dot notation for nested fields. default: The default value that is returned if the key does not exist. section (DataStoreDocumentSection): The section from which the data should be retrieved. Returns: object: The value from the field that the specified key is pointing to. If the key does not exist, the default value is returned. If no default value is provided and the key does not exist ``None`` is returned.
[ "Return", "the", "field", "specified", "by", "its", "key", "from", "the", "specified", "section", "." ]
python
train
48
mottosso/be
be/vendor/requests/utils.py
https://github.com/mottosso/be/blob/0f3d4f3597c71223f616d78c6d9b2c8dffcd8a71/be/vendor/requests/utils.py#L276-L285
def add_dict_to_cookiejar(cj, cookie_dict): """Returns a CookieJar from a key/value dictionary. :param cj: CookieJar to insert cookies into. :param cookie_dict: Dict of key/values to insert into CookieJar. """ cj2 = cookiejar_from_dict(cookie_dict) cj.update(cj2) return cj
[ "def", "add_dict_to_cookiejar", "(", "cj", ",", "cookie_dict", ")", ":", "cj2", "=", "cookiejar_from_dict", "(", "cookie_dict", ")", "cj", ".", "update", "(", "cj2", ")", "return", "cj" ]
Returns a CookieJar from a key/value dictionary. :param cj: CookieJar to insert cookies into. :param cookie_dict: Dict of key/values to insert into CookieJar.
[ "Returns", "a", "CookieJar", "from", "a", "key", "/", "value", "dictionary", "." ]
python
train
29.4
thanethomson/statik
statik/errors.py
https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/errors.py#L87-L96
def render(self, context=None): """Renders the error message, optionally using the given context (which, if specified, will override the internal context).""" ctx = context.render() if context else self.get_error_context().render() return "%s: %s%s%s" % ( self.get_error_kind(), self.get_error_message(), (" (%s)." % ctx) if ctx else "", self.get_additional_error_detail() )
[ "def", "render", "(", "self", ",", "context", "=", "None", ")", ":", "ctx", "=", "context", ".", "render", "(", ")", "if", "context", "else", "self", ".", "get_error_context", "(", ")", ".", "render", "(", ")", "return", "\"%s: %s%s%s\"", "%", "(", "self", ".", "get_error_kind", "(", ")", ",", "self", ".", "get_error_message", "(", ")", ",", "(", "\" (%s).\"", "%", "ctx", ")", "if", "ctx", "else", "\"\"", ",", "self", ".", "get_additional_error_detail", "(", ")", ")" ]
Renders the error message, optionally using the given context (which, if specified, will override the internal context).
[ "Renders", "the", "error", "message", "optionally", "using", "the", "given", "context", "(", "which", "if", "specified", "will", "override", "the", "internal", "context", ")", "." ]
python
train
45.3
wandb/client
wandb/vendor/prompt_toolkit/terminal/vt100_output.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/terminal/vt100_output.py#L344-L368
def _get_size(fileno): # Thanks to fabric (fabfile.org), and # http://sqizit.bartletts.id.au/2011/02/14/pseudo-terminals-in-python/ """ Get the size of this pseudo terminal. :param fileno: stdout.fileno() :returns: A (rows, cols) tuple. """ # Inline imports, because these modules are not available on Windows. # (This file is used by ConEmuOutput, which is used on Windows.) import fcntl import termios # Buffer for the C call buf = array.array(b'h' if six.PY2 else u'h', [0, 0, 0, 0]) # Do TIOCGWINSZ (Get) # Note: We should not pass 'True' as a fourth parameter to 'ioctl'. (True # is the default.) This causes segmentation faults on some systems. # See: https://github.com/jonathanslenders/python-prompt-toolkit/pull/364 fcntl.ioctl(fileno, termios.TIOCGWINSZ, buf) # Return rows, cols return buf[0], buf[1]
[ "def", "_get_size", "(", "fileno", ")", ":", "# Thanks to fabric (fabfile.org), and", "# http://sqizit.bartletts.id.au/2011/02/14/pseudo-terminals-in-python/", "# Inline imports, because these modules are not available on Windows.", "# (This file is used by ConEmuOutput, which is used on Windows.)", "import", "fcntl", "import", "termios", "# Buffer for the C call", "buf", "=", "array", ".", "array", "(", "b'h'", "if", "six", ".", "PY2", "else", "u'h'", ",", "[", "0", ",", "0", ",", "0", ",", "0", "]", ")", "# Do TIOCGWINSZ (Get)", "# Note: We should not pass 'True' as a fourth parameter to 'ioctl'. (True", "# is the default.) This causes segmentation faults on some systems.", "# See: https://github.com/jonathanslenders/python-prompt-toolkit/pull/364", "fcntl", ".", "ioctl", "(", "fileno", ",", "termios", ".", "TIOCGWINSZ", ",", "buf", ")", "# Return rows, cols", "return", "buf", "[", "0", "]", ",", "buf", "[", "1", "]" ]
Get the size of this pseudo terminal. :param fileno: stdout.fileno() :returns: A (rows, cols) tuple.
[ "Get", "the", "size", "of", "this", "pseudo", "terminal", "." ]
python
train
35.32
stitchfix/pyxley
pyxley/charts/nvd3/pie_chart.py
https://github.com/stitchfix/pyxley/blob/2dab00022d977d986169cd8a629b3a2f91be893f/pyxley/charts/nvd3/pie_chart.py#L57-L71
def to_json(df, values): """Format output for the json response.""" records = [] if df.empty: return {"data": []} sum_ = float(np.sum([df[c].iloc[0] for c in values])) for c in values: records.append({ "label": values[c], "value": "%.2f"%np.around(df[c].iloc[0] / sum_, decimals=2) }) return { "data" : records }
[ "def", "to_json", "(", "df", ",", "values", ")", ":", "records", "=", "[", "]", "if", "df", ".", "empty", ":", "return", "{", "\"data\"", ":", "[", "]", "}", "sum_", "=", "float", "(", "np", ".", "sum", "(", "[", "df", "[", "c", "]", ".", "iloc", "[", "0", "]", "for", "c", "in", "values", "]", ")", ")", "for", "c", "in", "values", ":", "records", ".", "append", "(", "{", "\"label\"", ":", "values", "[", "c", "]", ",", "\"value\"", ":", "\"%.2f\"", "%", "np", ".", "around", "(", "df", "[", "c", "]", ".", "iloc", "[", "0", "]", "/", "sum_", ",", "decimals", "=", "2", ")", "}", ")", "return", "{", "\"data\"", ":", "records", "}" ]
Format output for the json response.
[ "Format", "output", "for", "the", "json", "response", "." ]
python
train
29.266667
CalebBell/fluids
fluids/geometry.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/geometry.py#L1804-L1841
def set_chebyshev_approximators(self, deg_forward=50, deg_backwards=200): r'''Method to derive and set coefficients for chebyshev polynomial function approximation of the height-volume and volume-height relationship. A single set of chebyshev coefficients is used for the entire height- volume and volume-height relationships respectively. The forward relationship, `V_from_h`, requires far fewer coefficients in its fit than the reverse to obtain the same relative accuracy. Optionally, deg_forward or deg_backwards can be set to None to try to automatically fit the series to machine precision. Parameters ---------- deg_forward : int, optional The degree of the chebyshev polynomial to be created for the `V_from_h` curve, [-] deg_backwards : int, optional The degree of the chebyshev polynomial to be created for the `h_from_V` curve, [-] ''' from fluids.optional.pychebfun import Chebfun to_fit = lambda h: self.V_from_h(h, 'full') # These high-degree polynomials cannot safety be evaluated using Horner's methods # chebval is 2.5x as slow but 100% required; around 40 coefficients results are junk self.c_forward = Chebfun.from_function(np.vectorize(to_fit), [0.0, self.h_max], N=deg_forward).coefficients().tolist() self.V_from_h_cheb = lambda x : chebval((2.0*x-self.h_max)/(self.h_max), self.c_forward) to_fit = lambda h: self.h_from_V(h, 'brenth') self.c_backward = Chebfun.from_function(np.vectorize(to_fit), [0.0, self.V_total], N=deg_backwards).coefficients().tolist() self.h_from_V_cheb = lambda x : chebval((2.0*x-self.V_total)/(self.V_total), self.c_backward) self.chebyshev = True
[ "def", "set_chebyshev_approximators", "(", "self", ",", "deg_forward", "=", "50", ",", "deg_backwards", "=", "200", ")", ":", "from", "fluids", ".", "optional", ".", "pychebfun", "import", "Chebfun", "to_fit", "=", "lambda", "h", ":", "self", ".", "V_from_h", "(", "h", ",", "'full'", ")", "# These high-degree polynomials cannot safety be evaluated using Horner's methods ", "# chebval is 2.5x as slow but 100% required; around 40 coefficients results are junk", "self", ".", "c_forward", "=", "Chebfun", ".", "from_function", "(", "np", ".", "vectorize", "(", "to_fit", ")", ",", "[", "0.0", ",", "self", ".", "h_max", "]", ",", "N", "=", "deg_forward", ")", ".", "coefficients", "(", ")", ".", "tolist", "(", ")", "self", ".", "V_from_h_cheb", "=", "lambda", "x", ":", "chebval", "(", "(", "2.0", "*", "x", "-", "self", ".", "h_max", ")", "/", "(", "self", ".", "h_max", ")", ",", "self", ".", "c_forward", ")", "to_fit", "=", "lambda", "h", ":", "self", ".", "h_from_V", "(", "h", ",", "'brenth'", ")", "self", ".", "c_backward", "=", "Chebfun", ".", "from_function", "(", "np", ".", "vectorize", "(", "to_fit", ")", ",", "[", "0.0", ",", "self", ".", "V_total", "]", ",", "N", "=", "deg_backwards", ")", ".", "coefficients", "(", ")", ".", "tolist", "(", ")", "self", ".", "h_from_V_cheb", "=", "lambda", "x", ":", "chebval", "(", "(", "2.0", "*", "x", "-", "self", ".", "V_total", ")", "/", "(", "self", ".", "V_total", ")", ",", "self", ".", "c_backward", ")", "self", ".", "chebyshev", "=", "True" ]
r'''Method to derive and set coefficients for chebyshev polynomial function approximation of the height-volume and volume-height relationship. A single set of chebyshev coefficients is used for the entire height- volume and volume-height relationships respectively. The forward relationship, `V_from_h`, requires far fewer coefficients in its fit than the reverse to obtain the same relative accuracy. Optionally, deg_forward or deg_backwards can be set to None to try to automatically fit the series to machine precision. Parameters ---------- deg_forward : int, optional The degree of the chebyshev polynomial to be created for the `V_from_h` curve, [-] deg_backwards : int, optional The degree of the chebyshev polynomial to be created for the `h_from_V` curve, [-]
[ "r", "Method", "to", "derive", "and", "set", "coefficients", "for", "chebyshev", "polynomial", "function", "approximation", "of", "the", "height", "-", "volume", "and", "volume", "-", "height", "relationship", ".", "A", "single", "set", "of", "chebyshev", "coefficients", "is", "used", "for", "the", "entire", "height", "-", "volume", "and", "volume", "-", "height", "relationships", "respectively", ".", "The", "forward", "relationship", "V_from_h", "requires", "far", "fewer", "coefficients", "in", "its", "fit", "than", "the", "reverse", "to", "obtain", "the", "same", "relative", "accuracy", ".", "Optionally", "deg_forward", "or", "deg_backwards", "can", "be", "set", "to", "None", "to", "try", "to", "automatically", "fit", "the", "series", "to", "machine", "precision", ".", "Parameters", "----------", "deg_forward", ":", "int", "optional", "The", "degree", "of", "the", "chebyshev", "polynomial", "to", "be", "created", "for", "the", "V_from_h", "curve", "[", "-", "]", "deg_backwards", ":", "int", "optional", "The", "degree", "of", "the", "chebyshev", "polynomial", "to", "be", "created", "for", "the", "h_from_V", "curve", "[", "-", "]" ]
python
train
50.763158
bernieke/python-magento
magento/magento_api.py
https://github.com/bernieke/python-magento/blob/bfd23e233905b1b1491a7c07b9d833dfebd70456/magento/magento_api.py#L118-L127
def keep_session_alive(self): """If the session expired, logs back in.""" try: self.resources() except xmlrpclib.Fault as fault: if fault.faultCode == 5: self.login() else: raise
[ "def", "keep_session_alive", "(", "self", ")", ":", "try", ":", "self", ".", "resources", "(", ")", "except", "xmlrpclib", ".", "Fault", "as", "fault", ":", "if", "fault", ".", "faultCode", "==", "5", ":", "self", ".", "login", "(", ")", "else", ":", "raise" ]
If the session expired, logs back in.
[ "If", "the", "session", "expired", "logs", "back", "in", "." ]
python
train
26.2
kata198/AdvancedHTMLParser
AdvancedHTMLParser/Parser.py
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/Parser.py#L940-L969
def createElementsFromHTML(cls, html, encoding='utf-8'): ''' createElementsFromHTML - Creates elements from provided html, and returns a list of the root-level elements children of these root-level nodes are accessable via the usual means. @param html <str> - Some html data @param encoding <str> - Encoding to use for document @return list<AdvancedTag> - The root (top-level) tags from parsed html. NOTE: If there is text outside the tags, they will be lost in this. Use createBlocksFromHTML instead if you need to retain both text and tags. Also, if you are just appending to an existing tag, use AdvancedTag.appendInnerHTML ''' # TODO: If text is present outside a tag, it will be lost. parser = cls(encoding=encoding) parser.parseStr(html) rootNode = parser.getRoot() rootNode.remove() # Detatch from temp document if isInvisibleRootTag(rootNode): return rootNode.children return [rootNode]
[ "def", "createElementsFromHTML", "(", "cls", ",", "html", ",", "encoding", "=", "'utf-8'", ")", ":", "# TODO: If text is present outside a tag, it will be lost.", "parser", "=", "cls", "(", "encoding", "=", "encoding", ")", "parser", ".", "parseStr", "(", "html", ")", "rootNode", "=", "parser", ".", "getRoot", "(", ")", "rootNode", ".", "remove", "(", ")", "# Detatch from temp document", "if", "isInvisibleRootTag", "(", "rootNode", ")", ":", "return", "rootNode", ".", "children", "return", "[", "rootNode", "]" ]
createElementsFromHTML - Creates elements from provided html, and returns a list of the root-level elements children of these root-level nodes are accessable via the usual means. @param html <str> - Some html data @param encoding <str> - Encoding to use for document @return list<AdvancedTag> - The root (top-level) tags from parsed html. NOTE: If there is text outside the tags, they will be lost in this. Use createBlocksFromHTML instead if you need to retain both text and tags. Also, if you are just appending to an existing tag, use AdvancedTag.appendInnerHTML
[ "createElementsFromHTML", "-", "Creates", "elements", "from", "provided", "html", "and", "returns", "a", "list", "of", "the", "root", "-", "level", "elements", "children", "of", "these", "root", "-", "level", "nodes", "are", "accessable", "via", "the", "usual", "means", "." ]
python
train
35.466667
coghost/izen
izen/helper.py
https://github.com/coghost/izen/blob/432db017f99dd2ba809e1ba1792145ab6510263d/izen/helper.py#L670-L673
def l_endian(v): """ 小端序 """ w = struct.pack('<H', v) return str(binascii.hexlify(w), encoding='gbk')
[ "def", "l_endian", "(", "v", ")", ":", "w", "=", "struct", ".", "pack", "(", "'<H'", ",", "v", ")", "return", "str", "(", "binascii", ".", "hexlify", "(", "w", ")", ",", "encoding", "=", "'gbk'", ")" ]
小端序
[ "小端序" ]
python
train
27.5
coldfix/udiskie
udiskie/async_.py
https://github.com/coldfix/udiskie/blob/804c9d27df6f7361fec3097c432398f2d702f911/udiskie/async_.py#L82-L87
def set_exception(self, exception): """Signal unsuccessful completion.""" was_handled = self._finish(self.errbacks, exception) if not was_handled: traceback.print_exception( type(exception), exception, exception.__traceback__)
[ "def", "set_exception", "(", "self", ",", "exception", ")", ":", "was_handled", "=", "self", ".", "_finish", "(", "self", ".", "errbacks", ",", "exception", ")", "if", "not", "was_handled", ":", "traceback", ".", "print_exception", "(", "type", "(", "exception", ")", ",", "exception", ",", "exception", ".", "__traceback__", ")" ]
Signal unsuccessful completion.
[ "Signal", "unsuccessful", "completion", "." ]
python
train
45.5
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L10698-L10723
def set_position_target_global_int_send(self, time_boot_ms, target_system, target_component, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate, force_mavlink1=False): ''' Sets a desired vehicle position, velocity, and/or acceleration in a global coordinate system (WGS84). Used by an external controller to command the vehicle (manual controller or other system). time_boot_ms : Timestamp in milliseconds since system boot. The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint. This allows the system to compensate processing latency. (uint32_t) target_system : System ID (uint8_t) target_component : Component ID (uint8_t) coordinate_frame : Valid options are: MAV_FRAME_GLOBAL_INT = 5, MAV_FRAME_GLOBAL_RELATIVE_ALT_INT = 6, MAV_FRAME_GLOBAL_TERRAIN_ALT_INT = 11 (uint8_t) type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t) lat_int : X Position in WGS84 frame in 1e7 * meters (int32_t) lon_int : Y Position in WGS84 frame in 1e7 * meters (int32_t) alt : Altitude in meters in AMSL altitude, not WGS84 if absolute or relative, above terrain if GLOBAL_TERRAIN_ALT_INT (float) vx : X velocity in NED frame in meter / s (float) vy : Y velocity in NED frame in meter / s (float) vz : Z velocity in NED frame in meter / s (float) afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) yaw : yaw setpoint in rad (float) yaw_rate : yaw rate setpoint in rad/s (float) ''' return self.send(self.set_position_target_global_int_encode(time_boot_ms, target_system, target_component, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate), force_mavlink1=force_mavlink1)
[ "def", "set_position_target_global_int_send", "(", "self", ",", "time_boot_ms", ",", "target_system", ",", "target_component", ",", "coordinate_frame", ",", "type_mask", ",", "lat_int", ",", "lon_int", ",", "alt", ",", "vx", ",", "vy", ",", "vz", ",", "afx", ",", "afy", ",", "afz", ",", "yaw", ",", "yaw_rate", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", ".", "send", "(", "self", ".", "set_position_target_global_int_encode", "(", "time_boot_ms", ",", "target_system", ",", "target_component", ",", "coordinate_frame", ",", "type_mask", ",", "lat_int", ",", "lon_int", ",", "alt", ",", "vx", ",", "vy", ",", "vz", ",", "afx", ",", "afy", ",", "afz", ",", "yaw", ",", "yaw_rate", ")", ",", "force_mavlink1", "=", "force_mavlink1", ")" ]
Sets a desired vehicle position, velocity, and/or acceleration in a global coordinate system (WGS84). Used by an external controller to command the vehicle (manual controller or other system). time_boot_ms : Timestamp in milliseconds since system boot. The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint. This allows the system to compensate processing latency. (uint32_t) target_system : System ID (uint8_t) target_component : Component ID (uint8_t) coordinate_frame : Valid options are: MAV_FRAME_GLOBAL_INT = 5, MAV_FRAME_GLOBAL_RELATIVE_ALT_INT = 6, MAV_FRAME_GLOBAL_TERRAIN_ALT_INT = 11 (uint8_t) type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t) lat_int : X Position in WGS84 frame in 1e7 * meters (int32_t) lon_int : Y Position in WGS84 frame in 1e7 * meters (int32_t) alt : Altitude in meters in AMSL altitude, not WGS84 if absolute or relative, above terrain if GLOBAL_TERRAIN_ALT_INT (float) vx : X velocity in NED frame in meter / s (float) vy : Y velocity in NED frame in meter / s (float) vz : Z velocity in NED frame in meter / s (float) afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) yaw : yaw setpoint in rad (float) yaw_rate : yaw rate setpoint in rad/s (float)
[ "Sets", "a", "desired", "vehicle", "position", "velocity", "and", "/", "or", "acceleration", "in", "a", "global", "coordinate", "system", "(", "WGS84", ")", ".", "Used", "by", "an", "external", "controller", "to", "command", "the", "vehicle", "(", "manual", "controller", "or", "other", "system", ")", "." ]
python
train
116.192308
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_lldp_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_lldp_ext.py#L140-L156
def get_lldp_neighbor_detail_output_lldp_neighbor_detail_remote_interface_mac(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail") config = get_lldp_neighbor_detail output = ET.SubElement(get_lldp_neighbor_detail, "output") lldp_neighbor_detail = ET.SubElement(output, "lldp-neighbor-detail") local_interface_name_key = ET.SubElement(lldp_neighbor_detail, "local-interface-name") local_interface_name_key.text = kwargs.pop('local_interface_name') remote_interface_name_key = ET.SubElement(lldp_neighbor_detail, "remote-interface-name") remote_interface_name_key.text = kwargs.pop('remote_interface_name') remote_interface_mac = ET.SubElement(lldp_neighbor_detail, "remote-interface-mac") remote_interface_mac.text = kwargs.pop('remote_interface_mac') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_lldp_neighbor_detail_output_lldp_neighbor_detail_remote_interface_mac", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_lldp_neighbor_detail", "=", "ET", ".", "Element", "(", "\"get_lldp_neighbor_detail\"", ")", "config", "=", "get_lldp_neighbor_detail", "output", "=", "ET", ".", "SubElement", "(", "get_lldp_neighbor_detail", ",", "\"output\"", ")", "lldp_neighbor_detail", "=", "ET", ".", "SubElement", "(", "output", ",", "\"lldp-neighbor-detail\"", ")", "local_interface_name_key", "=", "ET", ".", "SubElement", "(", "lldp_neighbor_detail", ",", "\"local-interface-name\"", ")", "local_interface_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'local_interface_name'", ")", "remote_interface_name_key", "=", "ET", ".", "SubElement", "(", "lldp_neighbor_detail", ",", "\"remote-interface-name\"", ")", "remote_interface_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'remote_interface_name'", ")", "remote_interface_mac", "=", "ET", ".", "SubElement", "(", "lldp_neighbor_detail", ",", "\"remote-interface-mac\"", ")", "remote_interface_mac", ".", "text", "=", "kwargs", ".", "pop", "(", "'remote_interface_mac'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
59.764706
Diviyan-Kalainathan/CausalDiscoveryToolbox
cdt/generators/acyclic_graph_generator.py
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/generators/acyclic_graph_generator.py#L77-L97
def init_variables(self, verbose=False): """Redefine the causes of the graph.""" for j in range(1, self.nodes): nb_parents = np.random.randint(0, min([self.parents_max, j])+1) for i in np.random.choice(range(0, j), nb_parents, replace=False): self.adjacency_matrix[i, j] = 1 try: self.g = nx.DiGraph(self.adjacency_matrix) assert not list(nx.simple_cycles(self.g)) except AssertionError: if verbose: print("Regenerating, graph non valid...") self.init_variables() # Mechanisms self.cfunctions = [self.mechanism(int(sum(self.adjacency_matrix[:, i])), self.points, self.noise, noise_coeff=self.noise_coeff) if sum(self.adjacency_matrix[:, i]) else self.initial_generator for i in range(self.nodes)]
[ "def", "init_variables", "(", "self", ",", "verbose", "=", "False", ")", ":", "for", "j", "in", "range", "(", "1", ",", "self", ".", "nodes", ")", ":", "nb_parents", "=", "np", ".", "random", ".", "randint", "(", "0", ",", "min", "(", "[", "self", ".", "parents_max", ",", "j", "]", ")", "+", "1", ")", "for", "i", "in", "np", ".", "random", ".", "choice", "(", "range", "(", "0", ",", "j", ")", ",", "nb_parents", ",", "replace", "=", "False", ")", ":", "self", ".", "adjacency_matrix", "[", "i", ",", "j", "]", "=", "1", "try", ":", "self", ".", "g", "=", "nx", ".", "DiGraph", "(", "self", ".", "adjacency_matrix", ")", "assert", "not", "list", "(", "nx", ".", "simple_cycles", "(", "self", ".", "g", ")", ")", "except", "AssertionError", ":", "if", "verbose", ":", "print", "(", "\"Regenerating, graph non valid...\"", ")", "self", ".", "init_variables", "(", ")", "# Mechanisms", "self", ".", "cfunctions", "=", "[", "self", ".", "mechanism", "(", "int", "(", "sum", "(", "self", ".", "adjacency_matrix", "[", ":", ",", "i", "]", ")", ")", ",", "self", ".", "points", ",", "self", ".", "noise", ",", "noise_coeff", "=", "self", ".", "noise_coeff", ")", "if", "sum", "(", "self", ".", "adjacency_matrix", "[", ":", ",", "i", "]", ")", "else", "self", ".", "initial_generator", "for", "i", "in", "range", "(", "self", ".", "nodes", ")", "]" ]
Redefine the causes of the graph.
[ "Redefine", "the", "causes", "of", "the", "graph", "." ]
python
valid
44.142857
python-cmd2/cmd2
cmd2/cmd2.py
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/cmd2/cmd2.py#L1012-L1154
def path_complete(self, text: str, line: str, begidx: int, endidx: int, path_filter: Optional[Callable[[str], bool]] = None) -> List[str]: """Performs completion of local file system paths :param text: the string prefix we are attempting to match (all returned matches must begin with it) :param line: the current input line with leading whitespace removed :param begidx: the beginning index of the prefix text :param endidx: the ending index of the prefix text :param path_filter: optional filter function that determines if a path belongs in the results this function takes a path as its argument and returns True if the path should be kept in the results :return: a list of possible tab completions """ # Used to complete ~ and ~user strings def complete_users() -> List[str]: # We are returning ~user strings that resolve to directories, # so don't append a space or quote in the case of a single result. self.allow_appended_space = False self.allow_closing_quote = False users = [] # Windows lacks the pwd module so we can't get a list of users. # Instead we will return a result once the user enters text that # resolves to an existing home directory. if sys.platform.startswith('win'): expanded_path = os.path.expanduser(text) if os.path.isdir(expanded_path): user = text if add_trailing_sep_if_dir: user += os.path.sep users.append(user) else: import pwd # Iterate through a list of users from the password database for cur_pw in pwd.getpwall(): # Check if the user has an existing home dir if os.path.isdir(cur_pw.pw_dir): # Add a ~ to the user to match against text cur_user = '~' + cur_pw.pw_name if cur_user.startswith(text): if add_trailing_sep_if_dir: cur_user += os.path.sep users.append(cur_user) return users # Determine if a trailing separator should be appended to directory completions add_trailing_sep_if_dir = False if endidx == len(line) or (endidx < len(line) and line[endidx] != os.path.sep): add_trailing_sep_if_dir = True # Used to replace cwd in the final results cwd = os.getcwd() cwd_added = False # Used to replace expanded user path in final result orig_tilde_path = '' expanded_tilde_path = '' # If the search text is blank, then search in the CWD for * if not text: search_str = os.path.join(os.getcwd(), '*') cwd_added = True else: # Purposely don't match any path containing wildcards wildcards = ['*', '?'] for wildcard in wildcards: if wildcard in text: return [] # Start the search string search_str = text + '*' # Handle tilde expansion and completion if text.startswith('~'): sep_index = text.find(os.path.sep, 1) # If there is no slash, then the user is still completing the user after the tilde if sep_index == -1: return complete_users() # Otherwise expand the user dir else: search_str = os.path.expanduser(search_str) # Get what we need to restore the original tilde path later orig_tilde_path = text[:sep_index] expanded_tilde_path = os.path.expanduser(orig_tilde_path) # If the search text does not have a directory, then use the cwd elif not os.path.dirname(text): search_str = os.path.join(os.getcwd(), search_str) cwd_added = True # Set this to True for proper quoting of paths with spaces self.matches_delimited = True # Find all matching path completions matches = glob.glob(search_str) # Filter out results that don't belong if path_filter is not None: matches = [c for c in matches if path_filter(c)] # Don't append a space or closing quote to directory if len(matches) == 1 and os.path.isdir(matches[0]): self.allow_appended_space = False self.allow_closing_quote = False # Sort the matches before any trailing slashes are added matches.sort(key=self.matches_sort_key) self.matches_sorted = True # Build display_matches and add a slash to directories for index, cur_match in enumerate(matches): # Display only the basename of this path in the tab-completion suggestions self.display_matches.append(os.path.basename(cur_match)) # Add a separator after directories if the next character isn't already a separator if os.path.isdir(cur_match) and add_trailing_sep_if_dir: matches[index] += os.path.sep self.display_matches[index] += os.path.sep # Remove cwd if it was added to match the text readline expects if cwd_added: if cwd == os.path.sep: to_replace = cwd else: to_replace = cwd + os.path.sep matches = [cur_path.replace(to_replace, '', 1) for cur_path in matches] # Restore the tilde string if we expanded one to match the text readline expects if expanded_tilde_path: matches = [cur_path.replace(expanded_tilde_path, orig_tilde_path, 1) for cur_path in matches] return matches
[ "def", "path_complete", "(", "self", ",", "text", ":", "str", ",", "line", ":", "str", ",", "begidx", ":", "int", ",", "endidx", ":", "int", ",", "path_filter", ":", "Optional", "[", "Callable", "[", "[", "str", "]", ",", "bool", "]", "]", "=", "None", ")", "->", "List", "[", "str", "]", ":", "# Used to complete ~ and ~user strings", "def", "complete_users", "(", ")", "->", "List", "[", "str", "]", ":", "# We are returning ~user strings that resolve to directories,", "# so don't append a space or quote in the case of a single result.", "self", ".", "allow_appended_space", "=", "False", "self", ".", "allow_closing_quote", "=", "False", "users", "=", "[", "]", "# Windows lacks the pwd module so we can't get a list of users.", "# Instead we will return a result once the user enters text that", "# resolves to an existing home directory.", "if", "sys", ".", "platform", ".", "startswith", "(", "'win'", ")", ":", "expanded_path", "=", "os", ".", "path", ".", "expanduser", "(", "text", ")", "if", "os", ".", "path", ".", "isdir", "(", "expanded_path", ")", ":", "user", "=", "text", "if", "add_trailing_sep_if_dir", ":", "user", "+=", "os", ".", "path", ".", "sep", "users", ".", "append", "(", "user", ")", "else", ":", "import", "pwd", "# Iterate through a list of users from the password database", "for", "cur_pw", "in", "pwd", ".", "getpwall", "(", ")", ":", "# Check if the user has an existing home dir", "if", "os", ".", "path", ".", "isdir", "(", "cur_pw", ".", "pw_dir", ")", ":", "# Add a ~ to the user to match against text", "cur_user", "=", "'~'", "+", "cur_pw", ".", "pw_name", "if", "cur_user", ".", "startswith", "(", "text", ")", ":", "if", "add_trailing_sep_if_dir", ":", "cur_user", "+=", "os", ".", "path", ".", "sep", "users", ".", "append", "(", "cur_user", ")", "return", "users", "# Determine if a trailing separator should be appended to directory completions", "add_trailing_sep_if_dir", "=", "False", "if", "endidx", "==", "len", "(", "line", ")", "or", "(", "endidx", "<", "len", "(", "line", ")", "and", "line", "[", "endidx", "]", "!=", "os", ".", "path", ".", "sep", ")", ":", "add_trailing_sep_if_dir", "=", "True", "# Used to replace cwd in the final results", "cwd", "=", "os", ".", "getcwd", "(", ")", "cwd_added", "=", "False", "# Used to replace expanded user path in final result", "orig_tilde_path", "=", "''", "expanded_tilde_path", "=", "''", "# If the search text is blank, then search in the CWD for *", "if", "not", "text", ":", "search_str", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "'*'", ")", "cwd_added", "=", "True", "else", ":", "# Purposely don't match any path containing wildcards", "wildcards", "=", "[", "'*'", ",", "'?'", "]", "for", "wildcard", "in", "wildcards", ":", "if", "wildcard", "in", "text", ":", "return", "[", "]", "# Start the search string", "search_str", "=", "text", "+", "'*'", "# Handle tilde expansion and completion", "if", "text", ".", "startswith", "(", "'~'", ")", ":", "sep_index", "=", "text", ".", "find", "(", "os", ".", "path", ".", "sep", ",", "1", ")", "# If there is no slash, then the user is still completing the user after the tilde", "if", "sep_index", "==", "-", "1", ":", "return", "complete_users", "(", ")", "# Otherwise expand the user dir", "else", ":", "search_str", "=", "os", ".", "path", ".", "expanduser", "(", "search_str", ")", "# Get what we need to restore the original tilde path later", "orig_tilde_path", "=", "text", "[", ":", "sep_index", "]", "expanded_tilde_path", "=", "os", ".", "path", ".", "expanduser", "(", "orig_tilde_path", ")", "# If the search text does not have a directory, then use the cwd", "elif", "not", "os", ".", "path", ".", "dirname", "(", "text", ")", ":", "search_str", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "search_str", ")", "cwd_added", "=", "True", "# Set this to True for proper quoting of paths with spaces", "self", ".", "matches_delimited", "=", "True", "# Find all matching path completions", "matches", "=", "glob", ".", "glob", "(", "search_str", ")", "# Filter out results that don't belong", "if", "path_filter", "is", "not", "None", ":", "matches", "=", "[", "c", "for", "c", "in", "matches", "if", "path_filter", "(", "c", ")", "]", "# Don't append a space or closing quote to directory", "if", "len", "(", "matches", ")", "==", "1", "and", "os", ".", "path", ".", "isdir", "(", "matches", "[", "0", "]", ")", ":", "self", ".", "allow_appended_space", "=", "False", "self", ".", "allow_closing_quote", "=", "False", "# Sort the matches before any trailing slashes are added", "matches", ".", "sort", "(", "key", "=", "self", ".", "matches_sort_key", ")", "self", ".", "matches_sorted", "=", "True", "# Build display_matches and add a slash to directories", "for", "index", ",", "cur_match", "in", "enumerate", "(", "matches", ")", ":", "# Display only the basename of this path in the tab-completion suggestions", "self", ".", "display_matches", ".", "append", "(", "os", ".", "path", ".", "basename", "(", "cur_match", ")", ")", "# Add a separator after directories if the next character isn't already a separator", "if", "os", ".", "path", ".", "isdir", "(", "cur_match", ")", "and", "add_trailing_sep_if_dir", ":", "matches", "[", "index", "]", "+=", "os", ".", "path", ".", "sep", "self", ".", "display_matches", "[", "index", "]", "+=", "os", ".", "path", ".", "sep", "# Remove cwd if it was added to match the text readline expects", "if", "cwd_added", ":", "if", "cwd", "==", "os", ".", "path", ".", "sep", ":", "to_replace", "=", "cwd", "else", ":", "to_replace", "=", "cwd", "+", "os", ".", "path", ".", "sep", "matches", "=", "[", "cur_path", ".", "replace", "(", "to_replace", ",", "''", ",", "1", ")", "for", "cur_path", "in", "matches", "]", "# Restore the tilde string if we expanded one to match the text readline expects", "if", "expanded_tilde_path", ":", "matches", "=", "[", "cur_path", ".", "replace", "(", "expanded_tilde_path", ",", "orig_tilde_path", ",", "1", ")", "for", "cur_path", "in", "matches", "]", "return", "matches" ]
Performs completion of local file system paths :param text: the string prefix we are attempting to match (all returned matches must begin with it) :param line: the current input line with leading whitespace removed :param begidx: the beginning index of the prefix text :param endidx: the ending index of the prefix text :param path_filter: optional filter function that determines if a path belongs in the results this function takes a path as its argument and returns True if the path should be kept in the results :return: a list of possible tab completions
[ "Performs", "completion", "of", "local", "file", "system", "paths" ]
python
train
41.307692
edx/edx-enterprise
integrated_channels/degreed/client.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/integrated_channels/degreed/client.py#L190-L211
def _create_session(self, scope): """ Instantiate a new session object for use in connecting with Degreed """ now = datetime.datetime.utcnow() if self.session is None or self.expires_at is None or now >= self.expires_at: # Create a new session with a valid token if self.session: self.session.close() oauth_access_token, expires_at = self._get_oauth_access_token( self.enterprise_configuration.key, self.enterprise_configuration.secret, self.enterprise_configuration.degreed_user_id, self.enterprise_configuration.degreed_user_password, scope ) session = requests.Session() session.timeout = self.SESSION_TIMEOUT session.headers['Authorization'] = 'Bearer {}'.format(oauth_access_token) session.headers['content-type'] = 'application/json' self.session = session self.expires_at = expires_at
[ "def", "_create_session", "(", "self", ",", "scope", ")", ":", "now", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "if", "self", ".", "session", "is", "None", "or", "self", ".", "expires_at", "is", "None", "or", "now", ">=", "self", ".", "expires_at", ":", "# Create a new session with a valid token", "if", "self", ".", "session", ":", "self", ".", "session", ".", "close", "(", ")", "oauth_access_token", ",", "expires_at", "=", "self", ".", "_get_oauth_access_token", "(", "self", ".", "enterprise_configuration", ".", "key", ",", "self", ".", "enterprise_configuration", ".", "secret", ",", "self", ".", "enterprise_configuration", ".", "degreed_user_id", ",", "self", ".", "enterprise_configuration", ".", "degreed_user_password", ",", "scope", ")", "session", "=", "requests", ".", "Session", "(", ")", "session", ".", "timeout", "=", "self", ".", "SESSION_TIMEOUT", "session", ".", "headers", "[", "'Authorization'", "]", "=", "'Bearer {}'", ".", "format", "(", "oauth_access_token", ")", "session", ".", "headers", "[", "'content-type'", "]", "=", "'application/json'", "self", ".", "session", "=", "session", "self", ".", "expires_at", "=", "expires_at" ]
Instantiate a new session object for use in connecting with Degreed
[ "Instantiate", "a", "new", "session", "object", "for", "use", "in", "connecting", "with", "Degreed" ]
python
valid
46.636364
wq/django-natural-keys
natural_keys/models.py
https://github.com/wq/django-natural-keys/blob/f6bd6baf848e709ae9920b259a3ad1a6be8af615/natural_keys/models.py#L38-L70
def get_by_natural_key(self, *args): """ Return the object corresponding to the provided natural key. (This is a generic implementation of the standard Django function) """ kwargs = self.natural_key_kwargs(*args) # Since kwargs already has __ lookups in it, we could just do this: # return self.get(**kwargs) # But, we should call each related model's get_by_natural_key in case # it's been overridden for name, rel_to in self.model.get_natural_key_info(): if not rel_to: continue # Extract natural key for related object nested_key = extract_nested_key(kwargs, rel_to, name) if nested_key: # Update kwargs with related object try: kwargs[name] = rel_to.objects.get_by_natural_key( *nested_key ) except rel_to.DoesNotExist: # If related object doesn't exist, assume this one doesn't raise self.model.DoesNotExist() else: kwargs[name] = None return self.get(**kwargs)
[ "def", "get_by_natural_key", "(", "self", ",", "*", "args", ")", ":", "kwargs", "=", "self", ".", "natural_key_kwargs", "(", "*", "args", ")", "# Since kwargs already has __ lookups in it, we could just do this:", "# return self.get(**kwargs)", "# But, we should call each related model's get_by_natural_key in case", "# it's been overridden", "for", "name", ",", "rel_to", "in", "self", ".", "model", ".", "get_natural_key_info", "(", ")", ":", "if", "not", "rel_to", ":", "continue", "# Extract natural key for related object", "nested_key", "=", "extract_nested_key", "(", "kwargs", ",", "rel_to", ",", "name", ")", "if", "nested_key", ":", "# Update kwargs with related object", "try", ":", "kwargs", "[", "name", "]", "=", "rel_to", ".", "objects", ".", "get_by_natural_key", "(", "*", "nested_key", ")", "except", "rel_to", ".", "DoesNotExist", ":", "# If related object doesn't exist, assume this one doesn't", "raise", "self", ".", "model", ".", "DoesNotExist", "(", ")", "else", ":", "kwargs", "[", "name", "]", "=", "None", "return", "self", ".", "get", "(", "*", "*", "kwargs", ")" ]
Return the object corresponding to the provided natural key. (This is a generic implementation of the standard Django function)
[ "Return", "the", "object", "corresponding", "to", "the", "provided", "natural", "key", "." ]
python
train
35.515152
crocs-muni/roca
roca/detect.py
https://github.com/crocs-muni/roca/blob/74ad6ce63c428d83dcffce9c5e26ef7b9e30faa5/roca/detect.py#L1808-L1840
def process_apk(self, data, name): """ Processes Android application :param data: :param name: :return: """ try: from apk_parse.apk import APK except Exception as e: logger.warning('Could not import apk_parse, try running: pip install apk_parse_ph4') return [TestResult(fname=name, type='apk-pem-cert', error='cannot-import')] ret = [] try: from cryptography.x509.base import load_der_x509_certificate apkf = APK(data, process_now=False, process_file_types=False, raw=True, temp_dir=self.args.tmp_dir) apkf.process() self.num_apk += 1 pem = apkf.cert_pem aux = {'subtype': 'apk'} x509 = load_der_x509_certificate(pem_to_der(pem), self.get_backend()) sub = self.process_x509(x509, name=name, idx=0, data=data, pem=True, source='apk-pem-cert', aux=aux) ret.append(sub) except Exception as e: logger.debug('Exception in processing APK %s : %s' % (name, e)) self.trace_logger.log(e) return ret
[ "def", "process_apk", "(", "self", ",", "data", ",", "name", ")", ":", "try", ":", "from", "apk_parse", ".", "apk", "import", "APK", "except", "Exception", "as", "e", ":", "logger", ".", "warning", "(", "'Could not import apk_parse, try running: pip install apk_parse_ph4'", ")", "return", "[", "TestResult", "(", "fname", "=", "name", ",", "type", "=", "'apk-pem-cert'", ",", "error", "=", "'cannot-import'", ")", "]", "ret", "=", "[", "]", "try", ":", "from", "cryptography", ".", "x509", ".", "base", "import", "load_der_x509_certificate", "apkf", "=", "APK", "(", "data", ",", "process_now", "=", "False", ",", "process_file_types", "=", "False", ",", "raw", "=", "True", ",", "temp_dir", "=", "self", ".", "args", ".", "tmp_dir", ")", "apkf", ".", "process", "(", ")", "self", ".", "num_apk", "+=", "1", "pem", "=", "apkf", ".", "cert_pem", "aux", "=", "{", "'subtype'", ":", "'apk'", "}", "x509", "=", "load_der_x509_certificate", "(", "pem_to_der", "(", "pem", ")", ",", "self", ".", "get_backend", "(", ")", ")", "sub", "=", "self", ".", "process_x509", "(", "x509", ",", "name", "=", "name", ",", "idx", "=", "0", ",", "data", "=", "data", ",", "pem", "=", "True", ",", "source", "=", "'apk-pem-cert'", ",", "aux", "=", "aux", ")", "ret", ".", "append", "(", "sub", ")", "except", "Exception", "as", "e", ":", "logger", ".", "debug", "(", "'Exception in processing APK %s : %s'", "%", "(", "name", ",", "e", ")", ")", "self", ".", "trace_logger", ".", "log", "(", "e", ")", "return", "ret" ]
Processes Android application :param data: :param name: :return:
[ "Processes", "Android", "application", ":", "param", "data", ":", ":", "param", "name", ":", ":", "return", ":" ]
python
train
34.818182
buriburisuri/sugartensor
sugartensor/sg_logging.py
https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_logging.py#L142-L160
def sg_summary_audio(tensor, sample_rate=16000, prefix=None, name=None): r"""Register `tensor` to summary report as audio Args: tensor: A `Tensor` to log as audio sample_rate : An int. Sample rate to report. Default is 16000. prefix: A `string`. A prefix to display in the tensor board web UI. name: A `string`. A name to display in the tensor board web UI. Returns: None """ # defaults prefix = '' if prefix is None else prefix + '/' # summary name name = prefix + _pretty_name(tensor) if name is None else prefix + name # summary statistics if not tf.get_variable_scope().reuse: tf.summary.audio(name + '-au', tensor, sample_rate)
[ "def", "sg_summary_audio", "(", "tensor", ",", "sample_rate", "=", "16000", ",", "prefix", "=", "None", ",", "name", "=", "None", ")", ":", "# defaults", "prefix", "=", "''", "if", "prefix", "is", "None", "else", "prefix", "+", "'/'", "# summary name", "name", "=", "prefix", "+", "_pretty_name", "(", "tensor", ")", "if", "name", "is", "None", "else", "prefix", "+", "name", "# summary statistics", "if", "not", "tf", ".", "get_variable_scope", "(", ")", ".", "reuse", ":", "tf", ".", "summary", ".", "audio", "(", "name", "+", "'-au'", ",", "tensor", ",", "sample_rate", ")" ]
r"""Register `tensor` to summary report as audio Args: tensor: A `Tensor` to log as audio sample_rate : An int. Sample rate to report. Default is 16000. prefix: A `string`. A prefix to display in the tensor board web UI. name: A `string`. A name to display in the tensor board web UI. Returns: None
[ "r", "Register", "tensor", "to", "summary", "report", "as", "audio" ]
python
train
36.631579
heuer/segno
segno/encoder.py
https://github.com/heuer/segno/blob/64d912a2bd17d0b5ff3e8b5d37098edfc663c2b3/segno/encoder.py#L1511-L1527
def is_mode_supported(mode, ver): """\ Returns if `mode` is supported by `version`. Note: This function does not check if `version` is actually a valid (Micro) QR Code version. Invalid versions like ``41`` may return an illegal value. :param int mode: Canonicalized mode. :param int or None ver: (Micro) QR Code version constant. :rtype: bool """ ver = None if ver > 0 else ver try: return ver in consts.SUPPORTED_MODES[mode] except KeyError: raise ModeError('Unknown mode "{0}"'.format(mode))
[ "def", "is_mode_supported", "(", "mode", ",", "ver", ")", ":", "ver", "=", "None", "if", "ver", ">", "0", "else", "ver", "try", ":", "return", "ver", "in", "consts", ".", "SUPPORTED_MODES", "[", "mode", "]", "except", "KeyError", ":", "raise", "ModeError", "(", "'Unknown mode \"{0}\"'", ".", "format", "(", "mode", ")", ")" ]
\ Returns if `mode` is supported by `version`. Note: This function does not check if `version` is actually a valid (Micro) QR Code version. Invalid versions like ``41`` may return an illegal value. :param int mode: Canonicalized mode. :param int or None ver: (Micro) QR Code version constant. :rtype: bool
[ "\\", "Returns", "if", "mode", "is", "supported", "by", "version", "." ]
python
train
31.941176
chaoss/grimoirelab-sigils
src/migration/utils.py
https://github.com/chaoss/grimoirelab-sigils/blob/33d395195acb316287143a535a2c6e4009bf0528/src/migration/utils.py#L40-L58
def replace(pretty, old_str, new_str): """ Replace strings giving some info on where the replacement was done """ out_str = '' line_number = 1 changes = 0 for line in pretty.splitlines(keepends=True): new_line = line.replace(old_str, new_str) if line.find(old_str) != -1: logging.debug('%s', line_number) logging.debug('< %s', line) logging.debug('> %s', new_line) changes += 1 out_str += new_line line_number += 1 logging.info('Total changes(%s): %s', old_str, changes) return out_str
[ "def", "replace", "(", "pretty", ",", "old_str", ",", "new_str", ")", ":", "out_str", "=", "''", "line_number", "=", "1", "changes", "=", "0", "for", "line", "in", "pretty", ".", "splitlines", "(", "keepends", "=", "True", ")", ":", "new_line", "=", "line", ".", "replace", "(", "old_str", ",", "new_str", ")", "if", "line", ".", "find", "(", "old_str", ")", "!=", "-", "1", ":", "logging", ".", "debug", "(", "'%s'", ",", "line_number", ")", "logging", ".", "debug", "(", "'< %s'", ",", "line", ")", "logging", ".", "debug", "(", "'> %s'", ",", "new_line", ")", "changes", "+=", "1", "out_str", "+=", "new_line", "line_number", "+=", "1", "logging", ".", "info", "(", "'Total changes(%s): %s'", ",", "old_str", ",", "changes", ")", "return", "out_str" ]
Replace strings giving some info on where the replacement was done
[ "Replace", "strings", "giving", "some", "info", "on", "where", "the", "replacement", "was", "done" ]
python
train
30.736842
FNNDSC/med2image
med2image/message.py
https://github.com/FNNDSC/med2image/blob/638d5d230de47608af20f9764acf8e382c2bf2ff/med2image/message.py#L142-L157
def socket_parse(self, astr_destination): ''' Examines <astr_destination> and if of form <str1>:<str2> assumes that <str1> is a host to send datagram comms to over port <str2>. Returns True or False. ''' t_socketInfo = astr_destination.partition(':') if len(t_socketInfo[1]): self._b_isSocket = True self._socketRemote = t_socketInfo[0] self._socketPort = t_socketInfo[2] else: self._b_isSocket = False return self._b_isSocket
[ "def", "socket_parse", "(", "self", ",", "astr_destination", ")", ":", "t_socketInfo", "=", "astr_destination", ".", "partition", "(", "':'", ")", "if", "len", "(", "t_socketInfo", "[", "1", "]", ")", ":", "self", ".", "_b_isSocket", "=", "True", "self", ".", "_socketRemote", "=", "t_socketInfo", "[", "0", "]", "self", ".", "_socketPort", "=", "t_socketInfo", "[", "2", "]", "else", ":", "self", ".", "_b_isSocket", "=", "False", "return", "self", ".", "_b_isSocket" ]
Examines <astr_destination> and if of form <str1>:<str2> assumes that <str1> is a host to send datagram comms to over port <str2>. Returns True or False.
[ "Examines", "<astr_destination", ">", "and", "if", "of", "form", "<str1", ">", ":", "<str2", ">", "assumes", "that", "<str1", ">", "is", "a", "host", "to", "send", "datagram", "comms", "to", "over", "port", "<str2", ">", "." ]
python
train
34.4375
timothydmorton/isochrones
isochrones/starmodel_old.py
https://github.com/timothydmorton/isochrones/blob/d84495573044c66db2fd6b959fe69e370757ea14/isochrones/starmodel_old.py#L1044-L1092
def save_hdf(self, filename, path='', overwrite=False, append=False): """Saves object data to HDF file (only works if MCMC is run) Samples are saved to /samples location under given path, and object properties are also attached, so suitable for re-loading via :func:`StarModel.load_hdf`. :param filename: Name of file to save to. Should be .h5 file. :param path: (optional) Path within HDF file structure to save to. :param overwrite: (optional) If ``True``, delete any existing file by the same name before writing. :param append: (optional) If ``True``, then if a file exists, then just the path within the file will be updated. """ if os.path.exists(filename): store = pd.HDFStore(filename) if path in store: store.close() if overwrite: os.remove(filename) elif not append: raise IOError('{} in {} exists. Set either overwrite or append option.'.format(path,filename)) else: store.close() self.samples.to_hdf(filename, '{}/samples'.format(path)) store = pd.HDFStore(filename) attrs = store.get_storer('{}/samples'.format(path)).attrs attrs.properties = self.properties attrs.ic_type = type(self.ic) attrs.maxAV = self.maxAV attrs.max_distance = self.max_distance attrs.min_logg = self.min_logg attrs.use_emcee = self.use_emcee attrs._mnest_basename = self._mnest_basename attrs.name = self.name store.close()
[ "def", "save_hdf", "(", "self", ",", "filename", ",", "path", "=", "''", ",", "overwrite", "=", "False", ",", "append", "=", "False", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "store", "=", "pd", ".", "HDFStore", "(", "filename", ")", "if", "path", "in", "store", ":", "store", ".", "close", "(", ")", "if", "overwrite", ":", "os", ".", "remove", "(", "filename", ")", "elif", "not", "append", ":", "raise", "IOError", "(", "'{} in {} exists. Set either overwrite or append option.'", ".", "format", "(", "path", ",", "filename", ")", ")", "else", ":", "store", ".", "close", "(", ")", "self", ".", "samples", ".", "to_hdf", "(", "filename", ",", "'{}/samples'", ".", "format", "(", "path", ")", ")", "store", "=", "pd", ".", "HDFStore", "(", "filename", ")", "attrs", "=", "store", ".", "get_storer", "(", "'{}/samples'", ".", "format", "(", "path", ")", ")", ".", "attrs", "attrs", ".", "properties", "=", "self", ".", "properties", "attrs", ".", "ic_type", "=", "type", "(", "self", ".", "ic", ")", "attrs", ".", "maxAV", "=", "self", ".", "maxAV", "attrs", ".", "max_distance", "=", "self", ".", "max_distance", "attrs", ".", "min_logg", "=", "self", ".", "min_logg", "attrs", ".", "use_emcee", "=", "self", ".", "use_emcee", "attrs", ".", "_mnest_basename", "=", "self", ".", "_mnest_basename", "attrs", ".", "name", "=", "self", ".", "name", "store", ".", "close", "(", ")" ]
Saves object data to HDF file (only works if MCMC is run) Samples are saved to /samples location under given path, and object properties are also attached, so suitable for re-loading via :func:`StarModel.load_hdf`. :param filename: Name of file to save to. Should be .h5 file. :param path: (optional) Path within HDF file structure to save to. :param overwrite: (optional) If ``True``, delete any existing file by the same name before writing. :param append: (optional) If ``True``, then if a file exists, then just the path within the file will be updated.
[ "Saves", "object", "data", "to", "HDF", "file", "(", "only", "works", "if", "MCMC", "is", "run", ")" ]
python
train
34.285714
akfullfo/taskforce
taskforce/utils.py
https://github.com/akfullfo/taskforce/blob/bc6dd744bd33546447d085dbd18a350532220193/taskforce/utils.py#L221-L238
def setproctitle(text): """ This is a wrapper for setproctitle.setproctitle(). The call sets 'text' as the new process title and returns the previous value. The module is commonly not installed. If missing, nothing is changed, and the call returns None. The module is described here: https://pypi.python.org/pypi/setproctitle """ try: import setproctitle except Exception as e: return None else: # pragma: no cover prev = setproctitle.getproctitle() setproctitle.setproctitle(text) return prev
[ "def", "setproctitle", "(", "text", ")", ":", "try", ":", "import", "setproctitle", "except", "Exception", "as", "e", ":", "return", "None", "else", ":", "# pragma: no cover", "prev", "=", "setproctitle", ".", "getproctitle", "(", ")", "setproctitle", ".", "setproctitle", "(", "text", ")", "return", "prev" ]
This is a wrapper for setproctitle.setproctitle(). The call sets 'text' as the new process title and returns the previous value. The module is commonly not installed. If missing, nothing is changed, and the call returns None. The module is described here: https://pypi.python.org/pypi/setproctitle
[ "This", "is", "a", "wrapper", "for", "setproctitle", ".", "setproctitle", "()", ".", "The", "call", "sets", "text", "as", "the", "new", "process", "title", "and", "returns", "the", "previous", "value", "." ]
python
train
35.666667