repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
hobson/pug-invest
pug/invest/sandbox/sim.py
https://github.com/hobson/pug-invest/blob/836911258a0e920083a88c91beae88eefdebb20c/pug/invest/sandbox/sim.py#L263-L273
def symbol_bollinger(symbol='GOOG', start=datetime.datetime(2008, 1, 1), end=datetime.datetime(2009, 12, 31), price_type='close', cleaner=clean_dataframe, window=20, sigma=1.): """Calculate the Bolinger indicator value >>> symbol_bollinger("goog", '2008-1-1', '2008-2-1')[-1] # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE -1.8782... """ symbols = normalize_symbols(symbol) prices = price_dataframe(symbols, start=start, end=end, price_type=price_type, cleaner=cleaner) return series_bollinger(prices[symbols[0]], window=window, sigma=sigma, plot=False)
[ "def", "symbol_bollinger", "(", "symbol", "=", "'GOOG'", ",", "start", "=", "datetime", ".", "datetime", "(", "2008", ",", "1", ",", "1", ")", ",", "end", "=", "datetime", ".", "datetime", "(", "2009", ",", "12", ",", "31", ")", ",", "price_type", "=", "'close'", ",", "cleaner", "=", "clean_dataframe", ",", "window", "=", "20", ",", "sigma", "=", "1.", ")", ":", "symbols", "=", "normalize_symbols", "(", "symbol", ")", "prices", "=", "price_dataframe", "(", "symbols", ",", "start", "=", "start", ",", "end", "=", "end", ",", "price_type", "=", "price_type", ",", "cleaner", "=", "cleaner", ")", "return", "series_bollinger", "(", "prices", "[", "symbols", "[", "0", "]", "]", ",", "window", "=", "window", ",", "sigma", "=", "sigma", ",", "plot", "=", "False", ")" ]
Calculate the Bolinger indicator value >>> symbol_bollinger("goog", '2008-1-1', '2008-2-1')[-1] # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE -1.8782...
[ "Calculate", "the", "Bolinger", "indicator", "value" ]
python
train
bram85/topydo
topydo/lib/printers/Ical.py
https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/lib/printers/Ical.py#L93-L146
def _convert_todo(self, p_todo): """ Converts a Todo instance (Topydo) to an icalendar Todo instance. """ def _get_uid(p_todo): """ Gets a unique ID from a todo item, stored by the ical tag. If the tag is not present, a random value is assigned to it and returned. """ def generate_uid(p_length=4): """ Generates a random string of the given length, used as identifier. """ return ''.join( random.choice(string.ascii_letters + string.digits) for i in range(p_length)) uid = p_todo.tag_value('ical') if not uid: uid = generate_uid() p_todo.set_tag('ical', uid) self.todolist.dirty = True return uid result = self.icalendar.Todo() # this should be called first, it may set the ical: tag and therefore # change the source() output. result['uid'] = _get_uid(p_todo) result['summary'] = self.icalendar.vText(p_todo.text()) result['description'] = self.icalendar.vText(p_todo.source()) result.add('priority', _convert_priority(p_todo.priority())) start = p_todo.start_date() if start: result.add('dtstart', start) due = p_todo.due_date() if due: result.add('due', due) created = p_todo.creation_date() if created: result.add('created', created) completed = p_todo.completion_date() if completed: completed = datetime.combine(completed, time(0, 0)) result.add('completed', completed) return result
[ "def", "_convert_todo", "(", "self", ",", "p_todo", ")", ":", "def", "_get_uid", "(", "p_todo", ")", ":", "\"\"\"\n Gets a unique ID from a todo item, stored by the ical tag. If the\n tag is not present, a random value is assigned to it and returned.\n \"\"\"", "def", "generate_uid", "(", "p_length", "=", "4", ")", ":", "\"\"\"\n Generates a random string of the given length, used as\n identifier.\n \"\"\"", "return", "''", ".", "join", "(", "random", ".", "choice", "(", "string", ".", "ascii_letters", "+", "string", ".", "digits", ")", "for", "i", "in", "range", "(", "p_length", ")", ")", "uid", "=", "p_todo", ".", "tag_value", "(", "'ical'", ")", "if", "not", "uid", ":", "uid", "=", "generate_uid", "(", ")", "p_todo", ".", "set_tag", "(", "'ical'", ",", "uid", ")", "self", ".", "todolist", ".", "dirty", "=", "True", "return", "uid", "result", "=", "self", ".", "icalendar", ".", "Todo", "(", ")", "# this should be called first, it may set the ical: tag and therefore", "# change the source() output.", "result", "[", "'uid'", "]", "=", "_get_uid", "(", "p_todo", ")", "result", "[", "'summary'", "]", "=", "self", ".", "icalendar", ".", "vText", "(", "p_todo", ".", "text", "(", ")", ")", "result", "[", "'description'", "]", "=", "self", ".", "icalendar", ".", "vText", "(", "p_todo", ".", "source", "(", ")", ")", "result", ".", "add", "(", "'priority'", ",", "_convert_priority", "(", "p_todo", ".", "priority", "(", ")", ")", ")", "start", "=", "p_todo", ".", "start_date", "(", ")", "if", "start", ":", "result", ".", "add", "(", "'dtstart'", ",", "start", ")", "due", "=", "p_todo", ".", "due_date", "(", ")", "if", "due", ":", "result", ".", "add", "(", "'due'", ",", "due", ")", "created", "=", "p_todo", ".", "creation_date", "(", ")", "if", "created", ":", "result", ".", "add", "(", "'created'", ",", "created", ")", "completed", "=", "p_todo", ".", "completion_date", "(", ")", "if", "completed", ":", "completed", "=", "datetime", ".", "combine", "(", "completed", ",", "time", "(", "0", ",", "0", ")", ")", "result", ".", "add", "(", "'completed'", ",", "completed", ")", "return", "result" ]
Converts a Todo instance (Topydo) to an icalendar Todo instance.
[ "Converts", "a", "Todo", "instance", "(", "Topydo", ")", "to", "an", "icalendar", "Todo", "instance", "." ]
python
train
htm-community/menorah
menorah/menorah.py
https://github.com/htm-community/menorah/blob/1991b01eda3f6361b22ed165b4a688ae3fb2deaf/menorah/menorah.py#L225-L237
def stream(self, handler, whenDone=None): """ Fetches data from river streams and feeds them into the given function. :param handler: (function) passed headers [list] and row [list] of the data for one time step, for every row of data """ self._createConfluence() headers = ["timestamp"] + self.getStreamIds() for row in self._confluence: handler(headers, row) if whenDone is not None: return whenDone()
[ "def", "stream", "(", "self", ",", "handler", ",", "whenDone", "=", "None", ")", ":", "self", ".", "_createConfluence", "(", ")", "headers", "=", "[", "\"timestamp\"", "]", "+", "self", ".", "getStreamIds", "(", ")", "for", "row", "in", "self", ".", "_confluence", ":", "handler", "(", "headers", ",", "row", ")", "if", "whenDone", "is", "not", "None", ":", "return", "whenDone", "(", ")" ]
Fetches data from river streams and feeds them into the given function. :param handler: (function) passed headers [list] and row [list] of the data for one time step, for every row of data
[ "Fetches", "data", "from", "river", "streams", "and", "feeds", "them", "into", "the", "given", "function", ".", ":", "param", "handler", ":", "(", "function", ")", "passed", "headers", "[", "list", "]", "and", "row", "[", "list", "]", "of", "the", "data", "for", "one", "time", "step", "for", "every", "row", "of", "data" ]
python
train
saltstack/salt
salt/modules/xapi_virt.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xapi_virt.py#L114-L121
def _get_xtool(): ''' Internal, returns xl or xm command line path ''' for xtool in ['xl', 'xm']: path = salt.utils.path.which(xtool) if path is not None: return path
[ "def", "_get_xtool", "(", ")", ":", "for", "xtool", "in", "[", "'xl'", ",", "'xm'", "]", ":", "path", "=", "salt", ".", "utils", ".", "path", ".", "which", "(", "xtool", ")", "if", "path", "is", "not", "None", ":", "return", "path" ]
Internal, returns xl or xm command line path
[ "Internal", "returns", "xl", "or", "xm", "command", "line", "path" ]
python
train
shoeffner/cvloop
tools/sanitize_ipynb.py
https://github.com/shoeffner/cvloop/blob/3ddd311e9b679d16c8fd36779931380374de343c/tools/sanitize_ipynb.py#L8-L30
def main(): """Sanitizes the loaded *.ipynb.""" with open(sys.argv[1], 'r') as nbfile: notebook = json.load(nbfile) # remove kernelspec (venvs) try: del notebook['metadata']['kernelspec'] except KeyError: pass # remove outputs and metadata, set execution counts to None for cell in notebook['cells']: try: if cell['cell_type'] == 'code': cell['outputs'] = [] cell['execution_count'] = None cell['metadata'] = {} except KeyError: pass with open(sys.argv[1], 'w') as nbfile: json.dump(notebook, nbfile, indent=1)
[ "def", "main", "(", ")", ":", "with", "open", "(", "sys", ".", "argv", "[", "1", "]", ",", "'r'", ")", "as", "nbfile", ":", "notebook", "=", "json", ".", "load", "(", "nbfile", ")", "# remove kernelspec (venvs)", "try", ":", "del", "notebook", "[", "'metadata'", "]", "[", "'kernelspec'", "]", "except", "KeyError", ":", "pass", "# remove outputs and metadata, set execution counts to None", "for", "cell", "in", "notebook", "[", "'cells'", "]", ":", "try", ":", "if", "cell", "[", "'cell_type'", "]", "==", "'code'", ":", "cell", "[", "'outputs'", "]", "=", "[", "]", "cell", "[", "'execution_count'", "]", "=", "None", "cell", "[", "'metadata'", "]", "=", "{", "}", "except", "KeyError", ":", "pass", "with", "open", "(", "sys", ".", "argv", "[", "1", "]", ",", "'w'", ")", "as", "nbfile", ":", "json", ".", "dump", "(", "notebook", ",", "nbfile", ",", "indent", "=", "1", ")" ]
Sanitizes the loaded *.ipynb.
[ "Sanitizes", "the", "loaded", "*", ".", "ipynb", "." ]
python
train
kmpm/nodemcu-uploader
nodemcu_uploader/uploader.py
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/uploader.py#L244-L260
def read_file(self, filename, destination=''): """reading data from device into local file""" if not destination: destination = filename log.info('Transferring %s to %s', filename, destination) data = self.download_file(filename) # Just in case, the filename may contain folder, so create it if needed. log.info(destination) if not os.path.exists(os.path.dirname(destination)): try: os.makedirs(os.path.dirname(destination)) except OSError as e: # Guard against race condition if e.errno != errno.EEXIST: raise with open(destination, 'w') as fil: fil.write(data)
[ "def", "read_file", "(", "self", ",", "filename", ",", "destination", "=", "''", ")", ":", "if", "not", "destination", ":", "destination", "=", "filename", "log", ".", "info", "(", "'Transferring %s to %s'", ",", "filename", ",", "destination", ")", "data", "=", "self", ".", "download_file", "(", "filename", ")", "# Just in case, the filename may contain folder, so create it if needed.", "log", ".", "info", "(", "destination", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "dirname", "(", "destination", ")", ")", ":", "try", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "destination", ")", ")", "except", "OSError", "as", "e", ":", "# Guard against race condition", "if", "e", ".", "errno", "!=", "errno", ".", "EEXIST", ":", "raise", "with", "open", "(", "destination", ",", "'w'", ")", "as", "fil", ":", "fil", ".", "write", "(", "data", ")" ]
reading data from device into local file
[ "reading", "data", "from", "device", "into", "local", "file" ]
python
valid
apache/airflow
airflow/contrib/hooks/bigquery_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L1915-L1927
def fetchall(self): """ Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). """ result = [] while True: one = self.fetchone() if one is None: break else: result.append(one) return result
[ "def", "fetchall", "(", "self", ")", ":", "result", "=", "[", "]", "while", "True", ":", "one", "=", "self", ".", "fetchone", "(", ")", "if", "one", "is", "None", ":", "break", "else", ":", "result", ".", "append", "(", "one", ")", "return", "result" ]
Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples).
[ "Fetch", "all", "(", "remaining", ")", "rows", "of", "a", "query", "result", "returning", "them", "as", "a", "sequence", "of", "sequences", "(", "e", ".", "g", ".", "a", "list", "of", "tuples", ")", "." ]
python
test
buildbot/buildbot
master/docs/bbdocs/ext.py
https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/docs/bbdocs/ext.py#L123-L130
def make_ref_target_directive(ref_type, indextemplates=None, **kwargs): """ Create and return a L{BBRefTargetDirective} subclass. """ class_vars = dict(ref_type=ref_type, indextemplates=indextemplates) class_vars.update(kwargs) return type("BB%sRefTargetDirective" % (ref_type.capitalize(),), (BBRefTargetDirective,), class_vars)
[ "def", "make_ref_target_directive", "(", "ref_type", ",", "indextemplates", "=", "None", ",", "*", "*", "kwargs", ")", ":", "class_vars", "=", "dict", "(", "ref_type", "=", "ref_type", ",", "indextemplates", "=", "indextemplates", ")", "class_vars", ".", "update", "(", "kwargs", ")", "return", "type", "(", "\"BB%sRefTargetDirective\"", "%", "(", "ref_type", ".", "capitalize", "(", ")", ",", ")", ",", "(", "BBRefTargetDirective", ",", ")", ",", "class_vars", ")" ]
Create and return a L{BBRefTargetDirective} subclass.
[ "Create", "and", "return", "a", "L", "{", "BBRefTargetDirective", "}", "subclass", "." ]
python
train
arista-eosplus/pyeapi
pyeapi/api/mlag.py
https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/api/mlag.py#L151-L163
def _parse_peer_link(self, config): """Scans the config block and parses the peer-link value Args: config (str): The config block to scan Returns: dict: A dict object that is intended to be merged into the resource dict """ match = re.search(r'peer-link (\S+)', config) value = match.group(1) if match else None return dict(peer_link=value)
[ "def", "_parse_peer_link", "(", "self", ",", "config", ")", ":", "match", "=", "re", ".", "search", "(", "r'peer-link (\\S+)'", ",", "config", ")", "value", "=", "match", ".", "group", "(", "1", ")", "if", "match", "else", "None", "return", "dict", "(", "peer_link", "=", "value", ")" ]
Scans the config block and parses the peer-link value Args: config (str): The config block to scan Returns: dict: A dict object that is intended to be merged into the resource dict
[ "Scans", "the", "config", "block", "and", "parses", "the", "peer", "-", "link", "value" ]
python
train
JonathanRaiman/pytreebank
pytreebank/download.py
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/download.py#L9-L25
def delete_paths(paths): """ Delete a list of paths that are files or directories. If a file/directory does not exist, skip it. Arguments: ---------- paths : list<str>, names of files/directories to remove. """ for path in paths: if exists(path): if isfile(path): remove(path) else: rmtree(path)
[ "def", "delete_paths", "(", "paths", ")", ":", "for", "path", "in", "paths", ":", "if", "exists", "(", "path", ")", ":", "if", "isfile", "(", "path", ")", ":", "remove", "(", "path", ")", "else", ":", "rmtree", "(", "path", ")" ]
Delete a list of paths that are files or directories. If a file/directory does not exist, skip it. Arguments: ---------- paths : list<str>, names of files/directories to remove.
[ "Delete", "a", "list", "of", "paths", "that", "are", "files", "or", "directories", ".", "If", "a", "file", "/", "directory", "does", "not", "exist", "skip", "it", "." ]
python
train
twisted/mantissa
xmantissa/websession.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/websession.py#L196-L213
def authenticatedUserForKey(self, key): """ Find a persistent session for a user. @type key: L{bytes} @param key: The persistent session identifier. @rtype: L{bytes} or C{None} @return: The avatar ID the session belongs to, or C{None} if no such session exists. """ session = self.store.findFirst( PersistentSession, PersistentSession.sessionKey == key) if session is None: return None else: session.renew() return session.authenticatedAs
[ "def", "authenticatedUserForKey", "(", "self", ",", "key", ")", ":", "session", "=", "self", ".", "store", ".", "findFirst", "(", "PersistentSession", ",", "PersistentSession", ".", "sessionKey", "==", "key", ")", "if", "session", "is", "None", ":", "return", "None", "else", ":", "session", ".", "renew", "(", ")", "return", "session", ".", "authenticatedAs" ]
Find a persistent session for a user. @type key: L{bytes} @param key: The persistent session identifier. @rtype: L{bytes} or C{None} @return: The avatar ID the session belongs to, or C{None} if no such session exists.
[ "Find", "a", "persistent", "session", "for", "a", "user", "." ]
python
train
nickmckay/LiPD-utilities
Python/lipd/versions.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/versions.py#L209-L229
def update_lipd_v1_3(d): """ Update LiPD v1.2 to v1.3 - Added 'createdBy' key - Top-level folder inside LiPD archives are named "bag". (No longer <datasetname>) - .jsonld file is now generically named 'metadata.jsonld' (No longer <datasetname>.lpd ) - All "paleo" and "chron" prefixes are removed from "paleoMeasurementTable", "paleoModel", etc. - Merge isotopeInterpretation and climateInterpretation into "interpretation" block - ensemble table entry is a list that allows multiple tables - summary table entry is a list that allows multiple tables :param dict d: Metadata v1.2 :return dict d: Metadata v1.3 """ # sub routine (recursive): changes all the key names and merges interpretation d = update_lipd_v1_3_names(d) # sub routine: changes ensemble and summary table structure d = update_lipd_v1_3_structure(d) d["lipdVersion"] = 1.3 if "LiPDVersion" in d: del d["LiPDVersion"] return d
[ "def", "update_lipd_v1_3", "(", "d", ")", ":", "# sub routine (recursive): changes all the key names and merges interpretation", "d", "=", "update_lipd_v1_3_names", "(", "d", ")", "# sub routine: changes ensemble and summary table structure", "d", "=", "update_lipd_v1_3_structure", "(", "d", ")", "d", "[", "\"lipdVersion\"", "]", "=", "1.3", "if", "\"LiPDVersion\"", "in", "d", ":", "del", "d", "[", "\"LiPDVersion\"", "]", "return", "d" ]
Update LiPD v1.2 to v1.3 - Added 'createdBy' key - Top-level folder inside LiPD archives are named "bag". (No longer <datasetname>) - .jsonld file is now generically named 'metadata.jsonld' (No longer <datasetname>.lpd ) - All "paleo" and "chron" prefixes are removed from "paleoMeasurementTable", "paleoModel", etc. - Merge isotopeInterpretation and climateInterpretation into "interpretation" block - ensemble table entry is a list that allows multiple tables - summary table entry is a list that allows multiple tables :param dict d: Metadata v1.2 :return dict d: Metadata v1.3
[ "Update", "LiPD", "v1", ".", "2", "to", "v1", ".", "3", "-", "Added", "createdBy", "key", "-", "Top", "-", "level", "folder", "inside", "LiPD", "archives", "are", "named", "bag", ".", "(", "No", "longer", "<datasetname", ">", ")", "-", ".", "jsonld", "file", "is", "now", "generically", "named", "metadata", ".", "jsonld", "(", "No", "longer", "<datasetname", ">", ".", "lpd", ")", "-", "All", "paleo", "and", "chron", "prefixes", "are", "removed", "from", "paleoMeasurementTable", "paleoModel", "etc", ".", "-", "Merge", "isotopeInterpretation", "and", "climateInterpretation", "into", "interpretation", "block", "-", "ensemble", "table", "entry", "is", "a", "list", "that", "allows", "multiple", "tables", "-", "summary", "table", "entry", "is", "a", "list", "that", "allows", "multiple", "tables", ":", "param", "dict", "d", ":", "Metadata", "v1", ".", "2", ":", "return", "dict", "d", ":", "Metadata", "v1", ".", "3" ]
python
train
marcolagi/quantulum
quantulum/classifier.py
https://github.com/marcolagi/quantulum/blob/28b697dfa997116c1aa3ef63a3ceb8725bffd24f/quantulum/classifier.py#L57-L66
def clean_text(text): """Clean text for TFIDF.""" new_text = re.sub(ur'\p{P}+', ' ', text) new_text = [stem(i) for i in new_text.lower().split() if not re.findall(r'[0-9]', i)] new_text = ' '.join(new_text) return new_text
[ "def", "clean_text", "(", "text", ")", ":", "new_text", "=", "re", ".", "sub", "(", "ur'\\p{P}+'", ",", "' '", ",", "text", ")", "new_text", "=", "[", "stem", "(", "i", ")", "for", "i", "in", "new_text", ".", "lower", "(", ")", ".", "split", "(", ")", "if", "not", "re", ".", "findall", "(", "r'[0-9]'", ",", "i", ")", "]", "new_text", "=", "' '", ".", "join", "(", "new_text", ")", "return", "new_text" ]
Clean text for TFIDF.
[ "Clean", "text", "for", "TFIDF", "." ]
python
train
sdispater/cleo
cleo/commands/command.py
https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/commands/command.py#L174-L185
def create_question(self, question, type=None, **kwargs): """ Returns a Question of specified type. """ if not type: return Question(question, **kwargs) if type == "choice": return ChoiceQuestion(question, **kwargs) if type == "confirmation": return ConfirmationQuestion(question, **kwargs)
[ "def", "create_question", "(", "self", ",", "question", ",", "type", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "type", ":", "return", "Question", "(", "question", ",", "*", "*", "kwargs", ")", "if", "type", "==", "\"choice\"", ":", "return", "ChoiceQuestion", "(", "question", ",", "*", "*", "kwargs", ")", "if", "type", "==", "\"confirmation\"", ":", "return", "ConfirmationQuestion", "(", "question", ",", "*", "*", "kwargs", ")" ]
Returns a Question of specified type.
[ "Returns", "a", "Question", "of", "specified", "type", "." ]
python
train
adafruit/Adafruit_Python_BluefruitLE
Adafruit_BluefruitLE/corebluetooth/gatt.py
https://github.com/adafruit/Adafruit_Python_BluefruitLE/blob/34fc6f596371b961628369d78ce836950514062f/Adafruit_BluefruitLE/corebluetooth/gatt.py#L84-L92
def read_value(self, timeout_sec=TIMEOUT_SEC): """Read the value of this characteristic.""" # Kick off a query to read the value of the characteristic, then wait # for the result to return asyncronously. self._value_read.clear() self._device._peripheral.readValueForCharacteristic_(self._characteristic) if not self._value_read.wait(timeout_sec): raise RuntimeError('Exceeded timeout waiting to read characteristic value!') return self._characteristic.value()
[ "def", "read_value", "(", "self", ",", "timeout_sec", "=", "TIMEOUT_SEC", ")", ":", "# Kick off a query to read the value of the characteristic, then wait", "# for the result to return asyncronously.", "self", ".", "_value_read", ".", "clear", "(", ")", "self", ".", "_device", ".", "_peripheral", ".", "readValueForCharacteristic_", "(", "self", ".", "_characteristic", ")", "if", "not", "self", ".", "_value_read", ".", "wait", "(", "timeout_sec", ")", ":", "raise", "RuntimeError", "(", "'Exceeded timeout waiting to read characteristic value!'", ")", "return", "self", ".", "_characteristic", ".", "value", "(", ")" ]
Read the value of this characteristic.
[ "Read", "the", "value", "of", "this", "characteristic", "." ]
python
valid
pytroll/satpy
satpy/multiscene.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/multiscene.py#L175-L188
def scenes(self): """Get list of Scene objects contained in this MultiScene. .. note:: If the Scenes contained in this object are stored in a generator (not list or tuple) then accessing this property will load/iterate through the generator possibly """ if self.is_generator: log.debug("Forcing iteration of generator-like object of Scenes") self._scenes = list(self._scenes) return self._scenes
[ "def", "scenes", "(", "self", ")", ":", "if", "self", ".", "is_generator", ":", "log", ".", "debug", "(", "\"Forcing iteration of generator-like object of Scenes\"", ")", "self", ".", "_scenes", "=", "list", "(", "self", ".", "_scenes", ")", "return", "self", ".", "_scenes" ]
Get list of Scene objects contained in this MultiScene. .. note:: If the Scenes contained in this object are stored in a generator (not list or tuple) then accessing this property will load/iterate through the generator possibly
[ "Get", "list", "of", "Scene", "objects", "contained", "in", "this", "MultiScene", "." ]
python
train
IndicoDataSolutions/IndicoIo-python
indicoio/custom/custom.py
https://github.com/IndicoDataSolutions/IndicoIo-python/blob/6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa/indicoio/custom/custom.py#L337-L356
def authorize(self, email, permission_type='read', cloud=None, api_key=None, version=None, **kwargs): """ This API endpoint allows you to authorize another user to access your model in a read or write capacity. Before calling authorize, you must first make sure your model has been registered. Inputs: email - String: The email of the user you would like to share access with. permission_type (optional) - String: One of ['read', 'write']. Users with read permissions can only call `predict`. Users with `write` permissions can add new input examples and train models. api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. """ kwargs['permission_type'] = permission_type kwargs['email'] = email url_params = {"batch": False, "api_key": api_key, "version": version, "method": "authorize"} return self._api_handler(None, cloud=cloud, api="custom", url_params=url_params, **kwargs)
[ "def", "authorize", "(", "self", ",", "email", ",", "permission_type", "=", "'read'", ",", "cloud", "=", "None", ",", "api_key", "=", "None", ",", "version", "=", "None", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'permission_type'", "]", "=", "permission_type", "kwargs", "[", "'email'", "]", "=", "email", "url_params", "=", "{", "\"batch\"", ":", "False", ",", "\"api_key\"", ":", "api_key", ",", "\"version\"", ":", "version", ",", "\"method\"", ":", "\"authorize\"", "}", "return", "self", ".", "_api_handler", "(", "None", ",", "cloud", "=", "cloud", ",", "api", "=", "\"custom\"", ",", "url_params", "=", "url_params", ",", "*", "*", "kwargs", ")" ]
This API endpoint allows you to authorize another user to access your model in a read or write capacity. Before calling authorize, you must first make sure your model has been registered. Inputs: email - String: The email of the user you would like to share access with. permission_type (optional) - String: One of ['read', 'write']. Users with read permissions can only call `predict`. Users with `write` permissions can add new input examples and train models. api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination.
[ "This", "API", "endpoint", "allows", "you", "to", "authorize", "another", "user", "to", "access", "your", "model", "in", "a", "read", "or", "write", "capacity", ".", "Before", "calling", "authorize", "you", "must", "first", "make", "sure", "your", "model", "has", "been", "registered", "." ]
python
train
PythonCharmers/python-future
src/future/types/newbytes.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/types/newbytes.py#L293-L304
def splitlines(self, keepends=False): """ B.splitlines([keepends]) -> list of lines Return a list of the lines in B, breaking at line boundaries. Line breaks are not included in the resulting list unless keepends is given and true. """ # Py2 str.splitlines() takes keepends as an optional parameter, # not as a keyword argument as in Python 3 bytes. parts = super(newbytes, self).splitlines(keepends) return [newbytes(part) for part in parts]
[ "def", "splitlines", "(", "self", ",", "keepends", "=", "False", ")", ":", "# Py2 str.splitlines() takes keepends as an optional parameter,", "# not as a keyword argument as in Python 3 bytes.", "parts", "=", "super", "(", "newbytes", ",", "self", ")", ".", "splitlines", "(", "keepends", ")", "return", "[", "newbytes", "(", "part", ")", "for", "part", "in", "parts", "]" ]
B.splitlines([keepends]) -> list of lines Return a list of the lines in B, breaking at line boundaries. Line breaks are not included in the resulting list unless keepends is given and true.
[ "B", ".", "splitlines", "(", "[", "keepends", "]", ")", "-", ">", "list", "of", "lines" ]
python
train
timothyb0912/pylogit
pylogit/choice_tools.py
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/choice_tools.py#L585-L715
def create_design_matrix(long_form, specification_dict, alt_id_col, names=None): """ Parameters ---------- long_form : pandas dataframe. Contains one row for each available alternative, for each observation. specification_dict : OrderedDict. Keys are a proper subset of the columns in `long_form_df`. Values are either a list or a single string, `"all_diff"` or `"all_same"`. If a list, the elements should be: - single objects that are within the alternative ID column of `long_form_df` - lists of objects that are within the alternative ID column of `long_form_df`. For each single object in the list, a unique column will be created (i.e. there will be a unique coefficient for that variable in the corresponding utility equation of the corresponding alternative). For lists within the `specification_dict` values, a single column will be created for all the alternatives within iterable (i.e. there will be one common coefficient for the variables in the iterable). alt_id_col : str. Column name which denotes the column in `long_form` that contains the alternative ID for each row in `long_form`. names : OrderedDict, optional. Should have the same keys as `specification_dict`. For each key: - if the corresponding value in `specification_dict` is "all_same", then there should be a single string as the value in names. - if the corresponding value in `specification_dict` is "all_diff", then there should be a list of strings as the value in names. There should be one string in the value in names for each possible alternative. - if the corresponding value in `specification_dict` is a list, then there should be a list of strings as the value in names. There should be one string the value in names per item in the value in `specification_dict`. Default == None. Returns ------- design_matrix, var_names: tuple with two elements. First element is the design matrix, a numpy array with some number of columns and as many rows as are in `long_form`. Each column corresponds to a coefficient to be estimated. The second element is a list of strings denoting the names of each coefficient, with one variable name per column in the design matrix. """ ########## # Check that the arguments meet this functions assumptions. # Fail gracefully if the arguments do not meet the function's requirements. ######### check_argument_type(long_form, specification_dict) ensure_alt_id_in_long_form(alt_id_col, long_form) ensure_specification_cols_are_in_dataframe(specification_dict, long_form) # Find out what and how many possible alternatives there are unique_alternatives = np.sort(long_form[alt_id_col].unique()) num_alternatives = len(unique_alternatives) check_type_and_values_of_specification_dict(specification_dict, unique_alternatives) # Check the user passed dictionary of names if the user passed such a list if names is not None: ensure_object_is_ordered_dict(names, "names") check_keys_and_values_of_name_dictionary(names, specification_dict, num_alternatives) ########## # Actually create the design matrix ########## # Create a list of the columns of independent variables independent_vars = [] # Create a list of variable names var_names = [] # Create the columns of the design matrix based on the specification dict. for variable in specification_dict: specification = specification_dict[variable] if specification == "all_same": # Create the variable column independent_vars.append(long_form[variable].values) # Create the column name var_names.append(variable) elif specification == "all_diff": for alt in unique_alternatives: # Create the variable column independent_vars.append((long_form[alt_id_col] == alt).values * long_form[variable].values) # create the column name var_names.append("{}_{}".format(variable, alt)) else: for group in specification: if isinstance(group, list): # Create the variable column independent_vars.append( long_form[alt_id_col].isin(group).values * long_form[variable].values) # Create the column name var_names.append("{}_{}".format(variable, str(group))) else: # the group is an integer # Create the variable column new_col_vals = ((long_form[alt_id_col] == group).values * long_form[variable].values) independent_vars.append(new_col_vals) # Create the column name var_names.append("{}_{}".format(variable, group)) # Create the final design matrix design_matrix = np.hstack((x[:, None] for x in independent_vars)) # Use the list of names passed by the user, if the user passed such a list if names is not None: var_names = [] for value in names.values(): if isinstance(value, str): var_names.append(value) else: for inner_name in value: var_names.append(inner_name) return design_matrix, var_names
[ "def", "create_design_matrix", "(", "long_form", ",", "specification_dict", ",", "alt_id_col", ",", "names", "=", "None", ")", ":", "##########", "# Check that the arguments meet this functions assumptions.", "# Fail gracefully if the arguments do not meet the function's requirements.", "#########", "check_argument_type", "(", "long_form", ",", "specification_dict", ")", "ensure_alt_id_in_long_form", "(", "alt_id_col", ",", "long_form", ")", "ensure_specification_cols_are_in_dataframe", "(", "specification_dict", ",", "long_form", ")", "# Find out what and how many possible alternatives there are", "unique_alternatives", "=", "np", ".", "sort", "(", "long_form", "[", "alt_id_col", "]", ".", "unique", "(", ")", ")", "num_alternatives", "=", "len", "(", "unique_alternatives", ")", "check_type_and_values_of_specification_dict", "(", "specification_dict", ",", "unique_alternatives", ")", "# Check the user passed dictionary of names if the user passed such a list", "if", "names", "is", "not", "None", ":", "ensure_object_is_ordered_dict", "(", "names", ",", "\"names\"", ")", "check_keys_and_values_of_name_dictionary", "(", "names", ",", "specification_dict", ",", "num_alternatives", ")", "##########", "# Actually create the design matrix", "##########", "# Create a list of the columns of independent variables", "independent_vars", "=", "[", "]", "# Create a list of variable names", "var_names", "=", "[", "]", "# Create the columns of the design matrix based on the specification dict.", "for", "variable", "in", "specification_dict", ":", "specification", "=", "specification_dict", "[", "variable", "]", "if", "specification", "==", "\"all_same\"", ":", "# Create the variable column", "independent_vars", ".", "append", "(", "long_form", "[", "variable", "]", ".", "values", ")", "# Create the column name", "var_names", ".", "append", "(", "variable", ")", "elif", "specification", "==", "\"all_diff\"", ":", "for", "alt", "in", "unique_alternatives", ":", "# Create the variable column", "independent_vars", ".", "append", "(", "(", "long_form", "[", "alt_id_col", "]", "==", "alt", ")", ".", "values", "*", "long_form", "[", "variable", "]", ".", "values", ")", "# create the column name", "var_names", ".", "append", "(", "\"{}_{}\"", ".", "format", "(", "variable", ",", "alt", ")", ")", "else", ":", "for", "group", "in", "specification", ":", "if", "isinstance", "(", "group", ",", "list", ")", ":", "# Create the variable column", "independent_vars", ".", "append", "(", "long_form", "[", "alt_id_col", "]", ".", "isin", "(", "group", ")", ".", "values", "*", "long_form", "[", "variable", "]", ".", "values", ")", "# Create the column name", "var_names", ".", "append", "(", "\"{}_{}\"", ".", "format", "(", "variable", ",", "str", "(", "group", ")", ")", ")", "else", ":", "# the group is an integer", "# Create the variable column", "new_col_vals", "=", "(", "(", "long_form", "[", "alt_id_col", "]", "==", "group", ")", ".", "values", "*", "long_form", "[", "variable", "]", ".", "values", ")", "independent_vars", ".", "append", "(", "new_col_vals", ")", "# Create the column name", "var_names", ".", "append", "(", "\"{}_{}\"", ".", "format", "(", "variable", ",", "group", ")", ")", "# Create the final design matrix", "design_matrix", "=", "np", ".", "hstack", "(", "(", "x", "[", ":", ",", "None", "]", "for", "x", "in", "independent_vars", ")", ")", "# Use the list of names passed by the user, if the user passed such a list", "if", "names", "is", "not", "None", ":", "var_names", "=", "[", "]", "for", "value", "in", "names", ".", "values", "(", ")", ":", "if", "isinstance", "(", "value", ",", "str", ")", ":", "var_names", ".", "append", "(", "value", ")", "else", ":", "for", "inner_name", "in", "value", ":", "var_names", ".", "append", "(", "inner_name", ")", "return", "design_matrix", ",", "var_names" ]
Parameters ---------- long_form : pandas dataframe. Contains one row for each available alternative, for each observation. specification_dict : OrderedDict. Keys are a proper subset of the columns in `long_form_df`. Values are either a list or a single string, `"all_diff"` or `"all_same"`. If a list, the elements should be: - single objects that are within the alternative ID column of `long_form_df` - lists of objects that are within the alternative ID column of `long_form_df`. For each single object in the list, a unique column will be created (i.e. there will be a unique coefficient for that variable in the corresponding utility equation of the corresponding alternative). For lists within the `specification_dict` values, a single column will be created for all the alternatives within iterable (i.e. there will be one common coefficient for the variables in the iterable). alt_id_col : str. Column name which denotes the column in `long_form` that contains the alternative ID for each row in `long_form`. names : OrderedDict, optional. Should have the same keys as `specification_dict`. For each key: - if the corresponding value in `specification_dict` is "all_same", then there should be a single string as the value in names. - if the corresponding value in `specification_dict` is "all_diff", then there should be a list of strings as the value in names. There should be one string in the value in names for each possible alternative. - if the corresponding value in `specification_dict` is a list, then there should be a list of strings as the value in names. There should be one string the value in names per item in the value in `specification_dict`. Default == None. Returns ------- design_matrix, var_names: tuple with two elements. First element is the design matrix, a numpy array with some number of columns and as many rows as are in `long_form`. Each column corresponds to a coefficient to be estimated. The second element is a list of strings denoting the names of each coefficient, with one variable name per column in the design matrix.
[ "Parameters", "----------", "long_form", ":", "pandas", "dataframe", ".", "Contains", "one", "row", "for", "each", "available", "alternative", "for", "each", "observation", ".", "specification_dict", ":", "OrderedDict", ".", "Keys", "are", "a", "proper", "subset", "of", "the", "columns", "in", "long_form_df", ".", "Values", "are", "either", "a", "list", "or", "a", "single", "string", "all_diff", "or", "all_same", ".", "If", "a", "list", "the", "elements", "should", "be", ":" ]
python
train
nerox8664/pytorch2keras
pytorch2keras/reshape_layers.py
https://github.com/nerox8664/pytorch2keras/blob/750eaf747323580e6732d0c5ba9f2f39cb096764/pytorch2keras/reshape_layers.py#L64-L96
def convert_reshape(params, w_name, scope_name, inputs, layers, weights, names): """ Convert reshape layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers """ print('Converting reshape ...') if names == 'short': tf_name = 'RESH' + random_string(4) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) if len(inputs) > 1: if layers[inputs[1]][0] == -1: print('Cannot deduct batch size! It will be omitted, but result may be wrong.') reshape = keras.layers.Reshape(layers[inputs[1] + '_np'], name=tf_name) layers[scope_name] = reshape(layers[inputs[0]]) else: if inputs[0] in layers: reshape = keras.layers.Reshape(params['shape'][1:], name=tf_name) layers[scope_name] = reshape(layers[inputs[0]]) else: print('Skip weight matrix transpose, but result may be wrong.')
[ "def", "convert_reshape", "(", "params", ",", "w_name", ",", "scope_name", ",", "inputs", ",", "layers", ",", "weights", ",", "names", ")", ":", "print", "(", "'Converting reshape ...'", ")", "if", "names", "==", "'short'", ":", "tf_name", "=", "'RESH'", "+", "random_string", "(", "4", ")", "elif", "names", "==", "'keep'", ":", "tf_name", "=", "w_name", "else", ":", "tf_name", "=", "w_name", "+", "str", "(", "random", ".", "random", "(", ")", ")", "if", "len", "(", "inputs", ")", ">", "1", ":", "if", "layers", "[", "inputs", "[", "1", "]", "]", "[", "0", "]", "==", "-", "1", ":", "print", "(", "'Cannot deduct batch size! It will be omitted, but result may be wrong.'", ")", "reshape", "=", "keras", ".", "layers", ".", "Reshape", "(", "layers", "[", "inputs", "[", "1", "]", "+", "'_np'", "]", ",", "name", "=", "tf_name", ")", "layers", "[", "scope_name", "]", "=", "reshape", "(", "layers", "[", "inputs", "[", "0", "]", "]", ")", "else", ":", "if", "inputs", "[", "0", "]", "in", "layers", ":", "reshape", "=", "keras", ".", "layers", ".", "Reshape", "(", "params", "[", "'shape'", "]", "[", "1", ":", "]", ",", "name", "=", "tf_name", ")", "layers", "[", "scope_name", "]", "=", "reshape", "(", "layers", "[", "inputs", "[", "0", "]", "]", ")", "else", ":", "print", "(", "'Skip weight matrix transpose, but result may be wrong.'", ")" ]
Convert reshape layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
[ "Convert", "reshape", "layer", "." ]
python
valid
HDI-Project/ballet
ballet/feature.py
https://github.com/HDI-Project/ballet/blob/6f4d4b87b8234cb6bb38b9e9484a58ef8fe8fdb2/ballet/feature.py#L40-L62
def _name_estimators(estimators): """Generate names for estimators. Adapted from sklearn.pipeline._name_estimators """ def get_name(estimator): if isinstance(estimator, DelegatingRobustTransformer): return get_name(estimator._transformer) return type(estimator).__name__.lower() names = list(map(get_name, estimators)) counter = dict(Counter(names)) counter = select_values(lambda x: x > 1, counter) for i in reversed(range(len(estimators))): name = names[i] if name in counter: names[i] += "-%d" % counter[name] counter[name] -= 1 return list(zip(names, estimators))
[ "def", "_name_estimators", "(", "estimators", ")", ":", "def", "get_name", "(", "estimator", ")", ":", "if", "isinstance", "(", "estimator", ",", "DelegatingRobustTransformer", ")", ":", "return", "get_name", "(", "estimator", ".", "_transformer", ")", "return", "type", "(", "estimator", ")", ".", "__name__", ".", "lower", "(", ")", "names", "=", "list", "(", "map", "(", "get_name", ",", "estimators", ")", ")", "counter", "=", "dict", "(", "Counter", "(", "names", ")", ")", "counter", "=", "select_values", "(", "lambda", "x", ":", "x", ">", "1", ",", "counter", ")", "for", "i", "in", "reversed", "(", "range", "(", "len", "(", "estimators", ")", ")", ")", ":", "name", "=", "names", "[", "i", "]", "if", "name", "in", "counter", ":", "names", "[", "i", "]", "+=", "\"-%d\"", "%", "counter", "[", "name", "]", "counter", "[", "name", "]", "-=", "1", "return", "list", "(", "zip", "(", "names", ",", "estimators", ")", ")" ]
Generate names for estimators. Adapted from sklearn.pipeline._name_estimators
[ "Generate", "names", "for", "estimators", "." ]
python
train
10gen/mongo-orchestration
mongo_orchestration/replica_sets.py
https://github.com/10gen/mongo-orchestration/blob/81fd2224205922ea2178b08190b53a33aec47261/mongo_orchestration/replica_sets.py#L499-L510
def hidden(self): """return list of hidden members""" members = [self.member_info(item["_id"]) for item in self.members()] result = [] for member in members: if member['rsInfo'].get('hidden'): server_id = member['server_id'] result.append({ '_id': member['_id'], 'host': self._servers.hostname(server_id), 'server_id': server_id}) return result
[ "def", "hidden", "(", "self", ")", ":", "members", "=", "[", "self", ".", "member_info", "(", "item", "[", "\"_id\"", "]", ")", "for", "item", "in", "self", ".", "members", "(", ")", "]", "result", "=", "[", "]", "for", "member", "in", "members", ":", "if", "member", "[", "'rsInfo'", "]", ".", "get", "(", "'hidden'", ")", ":", "server_id", "=", "member", "[", "'server_id'", "]", "result", ".", "append", "(", "{", "'_id'", ":", "member", "[", "'_id'", "]", ",", "'host'", ":", "self", ".", "_servers", ".", "hostname", "(", "server_id", ")", ",", "'server_id'", ":", "server_id", "}", ")", "return", "result" ]
return list of hidden members
[ "return", "list", "of", "hidden", "members" ]
python
train
stephenmcd/django-forms-builder
forms_builder/forms/utils.py
https://github.com/stephenmcd/django-forms-builder/blob/89fe03100ec09a6166cc0bf0022399bbbdca6298/forms_builder/forms/utils.py#L61-L67
def import_attr(path): """ Given a a Python dotted path to a variable in a module, imports the module and returns the variable in it. """ module_path, attr_name = path.rsplit(".", 1) return getattr(import_module(module_path), attr_name)
[ "def", "import_attr", "(", "path", ")", ":", "module_path", ",", "attr_name", "=", "path", ".", "rsplit", "(", "\".\"", ",", "1", ")", "return", "getattr", "(", "import_module", "(", "module_path", ")", ",", "attr_name", ")" ]
Given a a Python dotted path to a variable in a module, imports the module and returns the variable in it.
[ "Given", "a", "a", "Python", "dotted", "path", "to", "a", "variable", "in", "a", "module", "imports", "the", "module", "and", "returns", "the", "variable", "in", "it", "." ]
python
train
manns/pyspread
pyspread/src/actions/_grid_cell_actions.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/actions/_grid_cell_actions.py#L308-L337
def change_frozen_attr(self): """Changes frozen state of cell if there is no selection""" # Selections are not supported if self.grid.selection: statustext = _("Freezing selections is not supported.") post_command_event(self.main_window, self.StatusBarMsg, text=statustext) cursor = self.grid.actions.cursor frozen = self.grid.code_array.cell_attributes[cursor]["frozen"] if frozen: # We have an frozen cell that has to be unfrozen # Delete frozen cache content self.grid.code_array.frozen_cache.pop(repr(cursor)) else: # We have an non-frozen cell that has to be frozen # Add frozen cache content res_obj = self.grid.code_array[cursor] self.grid.code_array.frozen_cache[repr(cursor)] = res_obj # Set the new frozen state / code selection = Selection([], [], [], [], [cursor[:2]]) self.set_attr("frozen", not frozen, selection=selection)
[ "def", "change_frozen_attr", "(", "self", ")", ":", "# Selections are not supported", "if", "self", ".", "grid", ".", "selection", ":", "statustext", "=", "_", "(", "\"Freezing selections is not supported.\"", ")", "post_command_event", "(", "self", ".", "main_window", ",", "self", ".", "StatusBarMsg", ",", "text", "=", "statustext", ")", "cursor", "=", "self", ".", "grid", ".", "actions", ".", "cursor", "frozen", "=", "self", ".", "grid", ".", "code_array", ".", "cell_attributes", "[", "cursor", "]", "[", "\"frozen\"", "]", "if", "frozen", ":", "# We have an frozen cell that has to be unfrozen", "# Delete frozen cache content", "self", ".", "grid", ".", "code_array", ".", "frozen_cache", ".", "pop", "(", "repr", "(", "cursor", ")", ")", "else", ":", "# We have an non-frozen cell that has to be frozen", "# Add frozen cache content", "res_obj", "=", "self", ".", "grid", ".", "code_array", "[", "cursor", "]", "self", ".", "grid", ".", "code_array", ".", "frozen_cache", "[", "repr", "(", "cursor", ")", "]", "=", "res_obj", "# Set the new frozen state / code", "selection", "=", "Selection", "(", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "cursor", "[", ":", "2", "]", "]", ")", "self", ".", "set_attr", "(", "\"frozen\"", ",", "not", "frozen", ",", "selection", "=", "selection", ")" ]
Changes frozen state of cell if there is no selection
[ "Changes", "frozen", "state", "of", "cell", "if", "there", "is", "no", "selection" ]
python
train
adamcharnock/swiftwind
swiftwind/costs/models.py
https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L261-L269
def _is_billing_complete(self): """Has the specified `fixed_amount` been billed? If so, we should not be enacting this RecurringCost. """ if self.is_one_off(): return self.get_billed_amount() >= Balance(self.fixed_amount, self.currency) else: return False
[ "def", "_is_billing_complete", "(", "self", ")", ":", "if", "self", ".", "is_one_off", "(", ")", ":", "return", "self", ".", "get_billed_amount", "(", ")", ">=", "Balance", "(", "self", ".", "fixed_amount", ",", "self", ".", "currency", ")", "else", ":", "return", "False" ]
Has the specified `fixed_amount` been billed? If so, we should not be enacting this RecurringCost.
[ "Has", "the", "specified", "fixed_amount", "been", "billed?" ]
python
train
thombashi/allpairspy
examples/example2.2.py
https://github.com/thombashi/allpairspy/blob/9dd256d7ccdc1348c5807d4a814294d9c5192293/examples/example2.2.py#L14-L42
def is_valid_combination(values, names): dictionary = dict(zip(names, values)) """ Should return True if combination is valid and False otherwise. Dictionary that is passed here can be incomplete. To prevent search for unnecessary items filtering function is executed with found subset of data to validate it. """ rules = [ # Brand Y does not support Windows 98 # Brand X does not work with XP # Contractors are billed in 30 min increments lambda d: "98" == d["os"] and "Brand Y" == d["brand"], lambda d: "XP" == d["os"] and "Brand X" == d["brand"], lambda d: "Contr." == d["employee"] and d["increment"] < 30, ] for rule in rules: try: if rule(dictionary): return False except KeyError: pass return True
[ "def", "is_valid_combination", "(", "values", ",", "names", ")", ":", "dictionary", "=", "dict", "(", "zip", "(", "names", ",", "values", ")", ")", "rules", "=", "[", "# Brand Y does not support Windows 98", "# Brand X does not work with XP", "# Contractors are billed in 30 min increments", "lambda", "d", ":", "\"98\"", "==", "d", "[", "\"os\"", "]", "and", "\"Brand Y\"", "==", "d", "[", "\"brand\"", "]", ",", "lambda", "d", ":", "\"XP\"", "==", "d", "[", "\"os\"", "]", "and", "\"Brand X\"", "==", "d", "[", "\"brand\"", "]", ",", "lambda", "d", ":", "\"Contr.\"", "==", "d", "[", "\"employee\"", "]", "and", "d", "[", "\"increment\"", "]", "<", "30", ",", "]", "for", "rule", "in", "rules", ":", "try", ":", "if", "rule", "(", "dictionary", ")", ":", "return", "False", "except", "KeyError", ":", "pass", "return", "True" ]
Should return True if combination is valid and False otherwise. Dictionary that is passed here can be incomplete. To prevent search for unnecessary items filtering function is executed with found subset of data to validate it.
[ "Should", "return", "True", "if", "combination", "is", "valid", "and", "False", "otherwise", "." ]
python
train
ucsb-cs/submit
submit/models.py
https://github.com/ucsb-cs/submit/blob/92810c81255a4fc6bbebac1ac8aae856fd576ffe/submit/models.py#L663-L666
def points(self, include_hidden=False): """Return the number of points awarded to this submission.""" return sum(x.points for x in self.testable_results if include_hidden or not x.testable.is_hidden)
[ "def", "points", "(", "self", ",", "include_hidden", "=", "False", ")", ":", "return", "sum", "(", "x", ".", "points", "for", "x", "in", "self", ".", "testable_results", "if", "include_hidden", "or", "not", "x", ".", "testable", ".", "is_hidden", ")" ]
Return the number of points awarded to this submission.
[ "Return", "the", "number", "of", "points", "awarded", "to", "this", "submission", "." ]
python
train
ihucos/plash
opt/plash/lib/py/plash/macros/common.py
https://github.com/ihucos/plash/blob/2ab2bc956e309d5aa6414c80983bfbf29b0ce572/opt/plash/lib/py/plash/macros/common.py#L161-L167
def entrypoint_script(*lines): 'write lines to /entrypoint and hint it as default command' lines = list(lines) if lines and not lines[0].startswith('#!'): lines.insert(0, '#!/bin/sh') return eval([['entrypoint', '/entrypoint'], ['write-script', '/entrypoint'] + lines])
[ "def", "entrypoint_script", "(", "*", "lines", ")", ":", "lines", "=", "list", "(", "lines", ")", "if", "lines", "and", "not", "lines", "[", "0", "]", ".", "startswith", "(", "'#!'", ")", ":", "lines", ".", "insert", "(", "0", ",", "'#!/bin/sh'", ")", "return", "eval", "(", "[", "[", "'entrypoint'", ",", "'/entrypoint'", "]", ",", "[", "'write-script'", ",", "'/entrypoint'", "]", "+", "lines", "]", ")" ]
write lines to /entrypoint and hint it as default command
[ "write", "lines", "to", "/", "entrypoint", "and", "hint", "it", "as", "default", "command" ]
python
train
tanghaibao/jcvi
jcvi/utils/progressbar.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/progressbar.py#L171-L181
def update(self, pbar): 'Updates the widget with the current SI prefixed speed.' if pbar.seconds_elapsed < 2e-6 or pbar.currval < 2e-6: # =~ 0 scaled = power = 0 else: speed = pbar.currval / pbar.seconds_elapsed power = int(math.log(speed, 1000)) scaled = speed / 1000.**power return self._format % (scaled, self.prefixes[power], self.unit)
[ "def", "update", "(", "self", ",", "pbar", ")", ":", "if", "pbar", ".", "seconds_elapsed", "<", "2e-6", "or", "pbar", ".", "currval", "<", "2e-6", ":", "# =~ 0", "scaled", "=", "power", "=", "0", "else", ":", "speed", "=", "pbar", ".", "currval", "/", "pbar", ".", "seconds_elapsed", "power", "=", "int", "(", "math", ".", "log", "(", "speed", ",", "1000", ")", ")", "scaled", "=", "speed", "/", "1000.", "**", "power", "return", "self", ".", "_format", "%", "(", "scaled", ",", "self", ".", "prefixes", "[", "power", "]", ",", "self", ".", "unit", ")" ]
Updates the widget with the current SI prefixed speed.
[ "Updates", "the", "widget", "with", "the", "current", "SI", "prefixed", "speed", "." ]
python
train
polysquare/jobstamps
jobstamps/jobstamp.py
https://github.com/polysquare/jobstamps/blob/49b4dec93b38c9db55643226a9788c675a53ef25/jobstamps/jobstamp.py#L23-L29
def _safe_mkdir(directory): """Create a directory, ignoring errors if it already exists.""" try: os.makedirs(directory) except OSError as error: if error.errno != errno.EEXIST: raise error
[ "def", "_safe_mkdir", "(", "directory", ")", ":", "try", ":", "os", ".", "makedirs", "(", "directory", ")", "except", "OSError", "as", "error", ":", "if", "error", ".", "errno", "!=", "errno", ".", "EEXIST", ":", "raise", "error" ]
Create a directory, ignoring errors if it already exists.
[ "Create", "a", "directory", "ignoring", "errors", "if", "it", "already", "exists", "." ]
python
train
pysal/esda
esda/smoothing.py
https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/smoothing.py#L759-L824
def by_col(cls, df, e,b, w=None, inplace=False, **kwargs): """ Compute smoothing by columns in a dataframe. Parameters ----------- df : pandas.DataFrame a dataframe containing the data to be smoothed e : string or list of strings the name or names of columns containing event variables to be smoothed b : string or list of strings the name or names of columns containing the population variables to be smoothed w : pysal.weights.W or list of pysal.weights.W the spatial weights object or objects to use with the event-population pairs. If not provided and a weights object is in the dataframe's metadata, that weights object will be used. inplace : bool a flag denoting whether to output a copy of `df` with the relevant smoothed columns appended, or to append the columns directly to `df` itself. **kwargs: optional keyword arguments optional keyword options that are passed directly to the smoother. Returns --------- a copy of `df` containing the columns. Or, if `inplace`, this returns None, but implicitly adds columns to `df`. """ if not inplace: new = df.copy() cls.by_col(new, e, b, w=w, inplace=True, **kwargs) return new if isinstance(e, str): e = [e] if isinstance(b, str): b = [b] if w is None: found = False for k in df._metadata: w = df.__dict__.get(w, None) if isinstance(w, W): found = True if not found: raise Exception('Weights not provided and no weights attached to frame!' ' Please provide a weight or attach a weight to the' ' dataframe') if isinstance(w, W): w = [w] * len(e) if len(b) == 1 and len(e) > 1: b = b * len(e) try: assert len(e) == len(b) except AssertionError: raise ValueError('There is no one-to-one mapping between event' ' variable and population at risk variable!') for ei, bi, wi in zip(e, b, w): ename = ei bname = bi ei = df[ename] bi = df[bname] outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower())) df[outcol] = cls(ei, bi, w=wi, **kwargs).r
[ "def", "by_col", "(", "cls", ",", "df", ",", "e", ",", "b", ",", "w", "=", "None", ",", "inplace", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "not", "inplace", ":", "new", "=", "df", ".", "copy", "(", ")", "cls", ".", "by_col", "(", "new", ",", "e", ",", "b", ",", "w", "=", "w", ",", "inplace", "=", "True", ",", "*", "*", "kwargs", ")", "return", "new", "if", "isinstance", "(", "e", ",", "str", ")", ":", "e", "=", "[", "e", "]", "if", "isinstance", "(", "b", ",", "str", ")", ":", "b", "=", "[", "b", "]", "if", "w", "is", "None", ":", "found", "=", "False", "for", "k", "in", "df", ".", "_metadata", ":", "w", "=", "df", ".", "__dict__", ".", "get", "(", "w", ",", "None", ")", "if", "isinstance", "(", "w", ",", "W", ")", ":", "found", "=", "True", "if", "not", "found", ":", "raise", "Exception", "(", "'Weights not provided and no weights attached to frame!'", "' Please provide a weight or attach a weight to the'", "' dataframe'", ")", "if", "isinstance", "(", "w", ",", "W", ")", ":", "w", "=", "[", "w", "]", "*", "len", "(", "e", ")", "if", "len", "(", "b", ")", "==", "1", "and", "len", "(", "e", ")", ">", "1", ":", "b", "=", "b", "*", "len", "(", "e", ")", "try", ":", "assert", "len", "(", "e", ")", "==", "len", "(", "b", ")", "except", "AssertionError", ":", "raise", "ValueError", "(", "'There is no one-to-one mapping between event'", "' variable and population at risk variable!'", ")", "for", "ei", ",", "bi", ",", "wi", "in", "zip", "(", "e", ",", "b", ",", "w", ")", ":", "ename", "=", "ei", "bname", "=", "bi", "ei", "=", "df", "[", "ename", "]", "bi", "=", "df", "[", "bname", "]", "outcol", "=", "'_'", ".", "join", "(", "(", "'-'", ".", "join", "(", "(", "ename", ",", "bname", ")", ")", ",", "cls", ".", "__name__", ".", "lower", "(", ")", ")", ")", "df", "[", "outcol", "]", "=", "cls", "(", "ei", ",", "bi", ",", "w", "=", "wi", ",", "*", "*", "kwargs", ")", ".", "r" ]
Compute smoothing by columns in a dataframe. Parameters ----------- df : pandas.DataFrame a dataframe containing the data to be smoothed e : string or list of strings the name or names of columns containing event variables to be smoothed b : string or list of strings the name or names of columns containing the population variables to be smoothed w : pysal.weights.W or list of pysal.weights.W the spatial weights object or objects to use with the event-population pairs. If not provided and a weights object is in the dataframe's metadata, that weights object will be used. inplace : bool a flag denoting whether to output a copy of `df` with the relevant smoothed columns appended, or to append the columns directly to `df` itself. **kwargs: optional keyword arguments optional keyword options that are passed directly to the smoother. Returns --------- a copy of `df` containing the columns. Or, if `inplace`, this returns None, but implicitly adds columns to `df`.
[ "Compute", "smoothing", "by", "columns", "in", "a", "dataframe", "." ]
python
train
mattbierner/blotre-py
blotre.py
https://github.com/mattbierner/blotre-py/blob/c98228d1159bc651aad546e442b0acbf97b1e043/blotre.py#L358-L365
def _persist(self): """Persist client data.""" with open(self.file, 'w') as f: json.dump({ 'client': self.client, 'creds': self.creds, 'config': self.config }, f)
[ "def", "_persist", "(", "self", ")", ":", "with", "open", "(", "self", ".", "file", ",", "'w'", ")", "as", "f", ":", "json", ".", "dump", "(", "{", "'client'", ":", "self", ".", "client", ",", "'creds'", ":", "self", ".", "creds", ",", "'config'", ":", "self", ".", "config", "}", ",", "f", ")" ]
Persist client data.
[ "Persist", "client", "data", "." ]
python
train
striglia/pyramid_swagger
pyramid_swagger/tween.py
https://github.com/striglia/pyramid_swagger/blob/1dbc0b4f23e2e5f4ed575c116f3f7d0e83e30d45/pyramid_swagger/tween.py#L604-L634
def get_op_for_request(request, route_info, spec): """ Find out which operation in the Swagger schema corresponds to the given pyramid request. :type request: :class:`pyramid.request.Request` :type route_info: dict (usually has 'match' and 'route' keys) :type spec: :class:`bravado_core.spec.Spec` :rtype: :class:`bravado_core.operation.Operation` :raises: PathNotMatchedError when a matching Swagger operation is not found. """ # pyramid.urldispath.Route route = route_info['route'] if hasattr(route, 'path'): route_path = route.path if route_path[0] != '/': route_path = '/' + route_path op = spec.get_op_for_request(request.method, route_path) if op is not None: return op else: raise PathNotMatchedError( "Could not find a matching Swagger " "operation for {0} request {1}" .format(request.method, request.url)) else: raise PathNotMatchedError( "Could not find a matching route for {0} request {1}. " "Have you registered this endpoint with Pyramid?" .format(request.method, request.url))
[ "def", "get_op_for_request", "(", "request", ",", "route_info", ",", "spec", ")", ":", "# pyramid.urldispath.Route", "route", "=", "route_info", "[", "'route'", "]", "if", "hasattr", "(", "route", ",", "'path'", ")", ":", "route_path", "=", "route", ".", "path", "if", "route_path", "[", "0", "]", "!=", "'/'", ":", "route_path", "=", "'/'", "+", "route_path", "op", "=", "spec", ".", "get_op_for_request", "(", "request", ".", "method", ",", "route_path", ")", "if", "op", "is", "not", "None", ":", "return", "op", "else", ":", "raise", "PathNotMatchedError", "(", "\"Could not find a matching Swagger \"", "\"operation for {0} request {1}\"", ".", "format", "(", "request", ".", "method", ",", "request", ".", "url", ")", ")", "else", ":", "raise", "PathNotMatchedError", "(", "\"Could not find a matching route for {0} request {1}. \"", "\"Have you registered this endpoint with Pyramid?\"", ".", "format", "(", "request", ".", "method", ",", "request", ".", "url", ")", ")" ]
Find out which operation in the Swagger schema corresponds to the given pyramid request. :type request: :class:`pyramid.request.Request` :type route_info: dict (usually has 'match' and 'route' keys) :type spec: :class:`bravado_core.spec.Spec` :rtype: :class:`bravado_core.operation.Operation` :raises: PathNotMatchedError when a matching Swagger operation is not found.
[ "Find", "out", "which", "operation", "in", "the", "Swagger", "schema", "corresponds", "to", "the", "given", "pyramid", "request", "." ]
python
train
lablup/backend.ai-client-py
src/ai/backend/client/kernel.py
https://github.com/lablup/backend.ai-client-py/blob/a063d774fea6f4350b89498c40d3c837ec3029a7/src/ai/backend/client/kernel.py#L457-L492
def stream_execute(self, code: str = '', *, mode: str = 'query', opts: dict = None) -> WebSocketResponse: ''' Executes a code snippet in the streaming mode. Since the returned websocket represents a run loop, there is no need to specify *run_id* explicitly. ''' params = {} if self.owner_access_key: params['owner_access_key'] = self.owner_access_key opts = {} if opts is None else opts if mode == 'query': opts = {} elif mode == 'batch': opts = { 'clean': opts.get('clean', None), 'build': opts.get('build', None), 'buildLog': bool(opts.get('buildLog', False)), 'exec': opts.get('exec', None), } else: msg = 'Invalid stream-execution mode: {0}'.format(mode) raise BackendClientError(msg) request = Request(self.session, 'GET', '/stream/kernel/{}/execute'.format(self.kernel_id), params=params) async def send_code(ws): await ws.send_json({ 'code': code, 'mode': mode, 'options': opts, }) return request.connect_websocket(on_enter=send_code)
[ "def", "stream_execute", "(", "self", ",", "code", ":", "str", "=", "''", ",", "*", ",", "mode", ":", "str", "=", "'query'", ",", "opts", ":", "dict", "=", "None", ")", "->", "WebSocketResponse", ":", "params", "=", "{", "}", "if", "self", ".", "owner_access_key", ":", "params", "[", "'owner_access_key'", "]", "=", "self", ".", "owner_access_key", "opts", "=", "{", "}", "if", "opts", "is", "None", "else", "opts", "if", "mode", "==", "'query'", ":", "opts", "=", "{", "}", "elif", "mode", "==", "'batch'", ":", "opts", "=", "{", "'clean'", ":", "opts", ".", "get", "(", "'clean'", ",", "None", ")", ",", "'build'", ":", "opts", ".", "get", "(", "'build'", ",", "None", ")", ",", "'buildLog'", ":", "bool", "(", "opts", ".", "get", "(", "'buildLog'", ",", "False", ")", ")", ",", "'exec'", ":", "opts", ".", "get", "(", "'exec'", ",", "None", ")", ",", "}", "else", ":", "msg", "=", "'Invalid stream-execution mode: {0}'", ".", "format", "(", "mode", ")", "raise", "BackendClientError", "(", "msg", ")", "request", "=", "Request", "(", "self", ".", "session", ",", "'GET'", ",", "'/stream/kernel/{}/execute'", ".", "format", "(", "self", ".", "kernel_id", ")", ",", "params", "=", "params", ")", "async", "def", "send_code", "(", "ws", ")", ":", "await", "ws", ".", "send_json", "(", "{", "'code'", ":", "code", ",", "'mode'", ":", "mode", ",", "'options'", ":", "opts", ",", "}", ")", "return", "request", ".", "connect_websocket", "(", "on_enter", "=", "send_code", ")" ]
Executes a code snippet in the streaming mode. Since the returned websocket represents a run loop, there is no need to specify *run_id* explicitly.
[ "Executes", "a", "code", "snippet", "in", "the", "streaming", "mode", ".", "Since", "the", "returned", "websocket", "represents", "a", "run", "loop", "there", "is", "no", "need", "to", "specify", "*", "run_id", "*", "explicitly", "." ]
python
train
vi3k6i5/flashtext
flashtext/keyword.py
https://github.com/vi3k6i5/flashtext/blob/50c45f1f4a394572381249681046f57e2bf5a591/flashtext/keyword.py#L395-L411
def remove_keywords_from_list(self, keyword_list): """To remove keywords present in list Args: keyword_list (list(str)): List of keywords to remove Examples: >>> keyword_processor.remove_keywords_from_list(["java", "python"]}) Raises: AttributeError: If `keyword_list` is not a list. """ if not isinstance(keyword_list, list): raise AttributeError("keyword_list should be a list") for keyword in keyword_list: self.remove_keyword(keyword)
[ "def", "remove_keywords_from_list", "(", "self", ",", "keyword_list", ")", ":", "if", "not", "isinstance", "(", "keyword_list", ",", "list", ")", ":", "raise", "AttributeError", "(", "\"keyword_list should be a list\"", ")", "for", "keyword", "in", "keyword_list", ":", "self", ".", "remove_keyword", "(", "keyword", ")" ]
To remove keywords present in list Args: keyword_list (list(str)): List of keywords to remove Examples: >>> keyword_processor.remove_keywords_from_list(["java", "python"]}) Raises: AttributeError: If `keyword_list` is not a list.
[ "To", "remove", "keywords", "present", "in", "list" ]
python
train
biosignalsnotebooks/biosignalsnotebooks
biosignalsnotebooks/build/lib/biosignalsnotebooks/external_packages/novainstrumentation/tools.py
https://github.com/biosignalsnotebooks/biosignalsnotebooks/blob/aaa01d4125180b3a34f1e26e0d3ff08c23f666d3/biosignalsnotebooks/build/lib/biosignalsnotebooks/external_packages/novainstrumentation/tools.py#L132-L137
def load_data(filename): """ :rtype : numpy matrix """ data = pandas.read_csv(filename, header=None, delimiter='\t', skiprows=9) return data.as_matrix()
[ "def", "load_data", "(", "filename", ")", ":", "data", "=", "pandas", ".", "read_csv", "(", "filename", ",", "header", "=", "None", ",", "delimiter", "=", "'\\t'", ",", "skiprows", "=", "9", ")", "return", "data", ".", "as_matrix", "(", ")" ]
:rtype : numpy matrix
[ ":", "rtype", ":", "numpy", "matrix" ]
python
train
staggerpkg/stagger
stagger/fileutil.py
https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/fileutil.py#L51-L61
def opened(filename, mode): "Open filename, or do nothing if filename is already an open file object" if isinstance(filename, str): file = open(filename, mode) try: yield file finally: if not file.closed: file.close() else: yield filename
[ "def", "opened", "(", "filename", ",", "mode", ")", ":", "if", "isinstance", "(", "filename", ",", "str", ")", ":", "file", "=", "open", "(", "filename", ",", "mode", ")", "try", ":", "yield", "file", "finally", ":", "if", "not", "file", ".", "closed", ":", "file", ".", "close", "(", ")", "else", ":", "yield", "filename" ]
Open filename, or do nothing if filename is already an open file object
[ "Open", "filename", "or", "do", "nothing", "if", "filename", "is", "already", "an", "open", "file", "object" ]
python
train
blockstack/blockstack-core
blockstack/lib/operations/announce.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/operations/announce.py#L39-L75
def process_announcement( sender_namerec, op, working_dir ): """ If the announcement is valid, then immediately record it. """ node_config = get_blockstack_opts() # valid announcement announce_hash = op['message_hash'] announcer_id = op['announcer_id'] # verify that it came from this individual name_history = sender_namerec['history'] allowed_value_hashes = [] for block_height in name_history.keys(): for historic_namerec in name_history[block_height]: if historic_namerec.get('value_hash'): allowed_value_hashes.append(historic_namerec['value_hash']) if announce_hash not in allowed_value_hashes: # this individual did not send this announcement log.warning("Announce hash {} not found in name history for {}".format(announce_hash, announcer_id)) return # go get it from Atlas zonefiles_dir = node_config.get('zonefiles', None) if not zonefiles_dir: log.warning("This node does not store zone files, so no announcement can be found") return announce_text = get_atlas_zonefile_data(announce_hash, zonefiles_dir) if announce_text is None: log.warning("No zone file {} found".format(announce_hash)) return # go append it log.critical("ANNOUNCEMENT (from %s): %s\n------BEGIN MESSAGE------\n%s\n------END MESSAGE------\n" % (announcer_id, announce_hash, announce_text)) store_announcement( working_dir, announce_hash, announce_text )
[ "def", "process_announcement", "(", "sender_namerec", ",", "op", ",", "working_dir", ")", ":", "node_config", "=", "get_blockstack_opts", "(", ")", "# valid announcement", "announce_hash", "=", "op", "[", "'message_hash'", "]", "announcer_id", "=", "op", "[", "'announcer_id'", "]", "# verify that it came from this individual", "name_history", "=", "sender_namerec", "[", "'history'", "]", "allowed_value_hashes", "=", "[", "]", "for", "block_height", "in", "name_history", ".", "keys", "(", ")", ":", "for", "historic_namerec", "in", "name_history", "[", "block_height", "]", ":", "if", "historic_namerec", ".", "get", "(", "'value_hash'", ")", ":", "allowed_value_hashes", ".", "append", "(", "historic_namerec", "[", "'value_hash'", "]", ")", "if", "announce_hash", "not", "in", "allowed_value_hashes", ":", "# this individual did not send this announcement", "log", ".", "warning", "(", "\"Announce hash {} not found in name history for {}\"", ".", "format", "(", "announce_hash", ",", "announcer_id", ")", ")", "return", "# go get it from Atlas", "zonefiles_dir", "=", "node_config", ".", "get", "(", "'zonefiles'", ",", "None", ")", "if", "not", "zonefiles_dir", ":", "log", ".", "warning", "(", "\"This node does not store zone files, so no announcement can be found\"", ")", "return", "announce_text", "=", "get_atlas_zonefile_data", "(", "announce_hash", ",", "zonefiles_dir", ")", "if", "announce_text", "is", "None", ":", "log", ".", "warning", "(", "\"No zone file {} found\"", ".", "format", "(", "announce_hash", ")", ")", "return", "# go append it ", "log", ".", "critical", "(", "\"ANNOUNCEMENT (from %s): %s\\n------BEGIN MESSAGE------\\n%s\\n------END MESSAGE------\\n\"", "%", "(", "announcer_id", ",", "announce_hash", ",", "announce_text", ")", ")", "store_announcement", "(", "working_dir", ",", "announce_hash", ",", "announce_text", ")" ]
If the announcement is valid, then immediately record it.
[ "If", "the", "announcement", "is", "valid", "then", "immediately", "record", "it", "." ]
python
train
mlperf/training
reinforcement/tensorflow/minigo/cluster/evaluator/launch_eval.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/cluster/evaluator/launch_eval.py#L97-L102
def _append_pairs(new_pairs): """ Load the pairlist, add new stuff, save it out """ desired_pairs = restore_pairs() or [] desired_pairs += new_pairs print("Adding {} new pairs, queue has {} pairs".format(len(new_pairs), len(desired_pairs))) save_pairs(desired_pairs)
[ "def", "_append_pairs", "(", "new_pairs", ")", ":", "desired_pairs", "=", "restore_pairs", "(", ")", "or", "[", "]", "desired_pairs", "+=", "new_pairs", "print", "(", "\"Adding {} new pairs, queue has {} pairs\"", ".", "format", "(", "len", "(", "new_pairs", ")", ",", "len", "(", "desired_pairs", ")", ")", ")", "save_pairs", "(", "desired_pairs", ")" ]
Load the pairlist, add new stuff, save it out
[ "Load", "the", "pairlist", "add", "new", "stuff", "save", "it", "out" ]
python
train
acorg/dark-matter
dark/titles.py
https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/titles.py#L181-L200
def coverageInfo(self): """ Return information about the bases found at each location in our title sequence. @return: A C{dict} whose keys are C{int} subject offsets and whose values are unsorted lists of (score, base) 2-tuples, giving all the bases from reads that matched the subject at subject location, along with the bit score of the matching read. """ result = defaultdict(list) for titleAlignment in self: for hsp in titleAlignment.hsps: score = hsp.score.score for (subjectOffset, base, _) in titleAlignment.read.walkHSP( hsp, includeWhiskers=False): result[subjectOffset].append((score, base)) return result
[ "def", "coverageInfo", "(", "self", ")", ":", "result", "=", "defaultdict", "(", "list", ")", "for", "titleAlignment", "in", "self", ":", "for", "hsp", "in", "titleAlignment", ".", "hsps", ":", "score", "=", "hsp", ".", "score", ".", "score", "for", "(", "subjectOffset", ",", "base", ",", "_", ")", "in", "titleAlignment", ".", "read", ".", "walkHSP", "(", "hsp", ",", "includeWhiskers", "=", "False", ")", ":", "result", "[", "subjectOffset", "]", ".", "append", "(", "(", "score", ",", "base", ")", ")", "return", "result" ]
Return information about the bases found at each location in our title sequence. @return: A C{dict} whose keys are C{int} subject offsets and whose values are unsorted lists of (score, base) 2-tuples, giving all the bases from reads that matched the subject at subject location, along with the bit score of the matching read.
[ "Return", "information", "about", "the", "bases", "found", "at", "each", "location", "in", "our", "title", "sequence", "." ]
python
train
CivicSpleen/ambry
ambry/orm/partition.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/partition.py#L841-L860
def _update_names(self): """Update the derived names""" d = dict( table=self.table_name, time=self.time, space=self.space, grain=self.grain, variant=self.variant, segment=self.segment ) assert self.dataset name = PartialPartitionName(**d).promote(self.dataset.identity.name) self.name = str(name.name) self.vname = str(name.vname) self.cache_key = name.cache_key self.fqname = str(self.identity.fqname)
[ "def", "_update_names", "(", "self", ")", ":", "d", "=", "dict", "(", "table", "=", "self", ".", "table_name", ",", "time", "=", "self", ".", "time", ",", "space", "=", "self", ".", "space", ",", "grain", "=", "self", ".", "grain", ",", "variant", "=", "self", ".", "variant", ",", "segment", "=", "self", ".", "segment", ")", "assert", "self", ".", "dataset", "name", "=", "PartialPartitionName", "(", "*", "*", "d", ")", ".", "promote", "(", "self", ".", "dataset", ".", "identity", ".", "name", ")", "self", ".", "name", "=", "str", "(", "name", ".", "name", ")", "self", ".", "vname", "=", "str", "(", "name", ".", "vname", ")", "self", ".", "cache_key", "=", "name", ".", "cache_key", "self", ".", "fqname", "=", "str", "(", "self", ".", "identity", ".", "fqname", ")" ]
Update the derived names
[ "Update", "the", "derived", "names" ]
python
train
opendatateam/udata
udata/frontend/helpers.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/frontend/helpers.py#L350-L379
def i18n_alternate_links(): """Render the <link rel="alternate" hreflang /> if page is in a I18nBlueprint """ if (not request.endpoint or not current_app.url_map.is_endpoint_expecting(request.endpoint, 'lang_code')): return Markup('') try: LINK_PATTERN = ( '<link rel="alternate" href="{url}" hreflang="{lang}" />') links = [] current_lang = get_current_locale().language params = {} if request.args: params.update(request.args) if request.view_args: params.update(request.view_args) for lang in current_app.config['LANGUAGES']: if lang != current_lang: url = url_for(request.endpoint, lang_code=lang, **params) links.append(LINK_PATTERN.format(url=url, lang=lang)) return Markup(''.join(links)) except Exception: # Never fails return Markup('')
[ "def", "i18n_alternate_links", "(", ")", ":", "if", "(", "not", "request", ".", "endpoint", "or", "not", "current_app", ".", "url_map", ".", "is_endpoint_expecting", "(", "request", ".", "endpoint", ",", "'lang_code'", ")", ")", ":", "return", "Markup", "(", "''", ")", "try", ":", "LINK_PATTERN", "=", "(", "'<link rel=\"alternate\" href=\"{url}\" hreflang=\"{lang}\" />'", ")", "links", "=", "[", "]", "current_lang", "=", "get_current_locale", "(", ")", ".", "language", "params", "=", "{", "}", "if", "request", ".", "args", ":", "params", ".", "update", "(", "request", ".", "args", ")", "if", "request", ".", "view_args", ":", "params", ".", "update", "(", "request", ".", "view_args", ")", "for", "lang", "in", "current_app", ".", "config", "[", "'LANGUAGES'", "]", ":", "if", "lang", "!=", "current_lang", ":", "url", "=", "url_for", "(", "request", ".", "endpoint", ",", "lang_code", "=", "lang", ",", "*", "*", "params", ")", "links", ".", "append", "(", "LINK_PATTERN", ".", "format", "(", "url", "=", "url", ",", "lang", "=", "lang", ")", ")", "return", "Markup", "(", "''", ".", "join", "(", "links", ")", ")", "except", "Exception", ":", "# Never fails", "return", "Markup", "(", "''", ")" ]
Render the <link rel="alternate" hreflang /> if page is in a I18nBlueprint
[ "Render", "the", "<link", "rel", "=", "alternate", "hreflang", "/", ">" ]
python
train
pandas-dev/pandas
pandas/core/generic.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L5886-L5930
def convert_objects(self, convert_dates=True, convert_numeric=False, convert_timedeltas=True, copy=True): """ Attempt to infer better dtype for object columns. .. deprecated:: 0.21.0 Parameters ---------- convert_dates : boolean, default True If True, convert to date where possible. If 'coerce', force conversion, with unconvertible values becoming NaT. convert_numeric : boolean, default False If True, attempt to coerce to numbers (including strings), with unconvertible values becoming NaN. convert_timedeltas : boolean, default True If True, convert to timedelta where possible. If 'coerce', force conversion, with unconvertible values becoming NaT. copy : boolean, default True If True, return a copy even if no copy is necessary (e.g. no conversion was done). Note: This is meant for internal use, and should not be confused with inplace. Returns ------- converted : same as input object See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to numeric type. """ msg = ("convert_objects is deprecated. To re-infer data dtypes for " "object columns, use {klass}.infer_objects()\nFor all " "other conversions use the data-type specific converters " "pd.to_datetime, pd.to_timedelta and pd.to_numeric." ).format(klass=self.__class__.__name__) warnings.warn(msg, FutureWarning, stacklevel=2) return self._constructor( self._data.convert(convert_dates=convert_dates, convert_numeric=convert_numeric, convert_timedeltas=convert_timedeltas, copy=copy)).__finalize__(self)
[ "def", "convert_objects", "(", "self", ",", "convert_dates", "=", "True", ",", "convert_numeric", "=", "False", ",", "convert_timedeltas", "=", "True", ",", "copy", "=", "True", ")", ":", "msg", "=", "(", "\"convert_objects is deprecated. To re-infer data dtypes for \"", "\"object columns, use {klass}.infer_objects()\\nFor all \"", "\"other conversions use the data-type specific converters \"", "\"pd.to_datetime, pd.to_timedelta and pd.to_numeric.\"", ")", ".", "format", "(", "klass", "=", "self", ".", "__class__", ".", "__name__", ")", "warnings", ".", "warn", "(", "msg", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "return", "self", ".", "_constructor", "(", "self", ".", "_data", ".", "convert", "(", "convert_dates", "=", "convert_dates", ",", "convert_numeric", "=", "convert_numeric", ",", "convert_timedeltas", "=", "convert_timedeltas", ",", "copy", "=", "copy", ")", ")", ".", "__finalize__", "(", "self", ")" ]
Attempt to infer better dtype for object columns. .. deprecated:: 0.21.0 Parameters ---------- convert_dates : boolean, default True If True, convert to date where possible. If 'coerce', force conversion, with unconvertible values becoming NaT. convert_numeric : boolean, default False If True, attempt to coerce to numbers (including strings), with unconvertible values becoming NaN. convert_timedeltas : boolean, default True If True, convert to timedelta where possible. If 'coerce', force conversion, with unconvertible values becoming NaT. copy : boolean, default True If True, return a copy even if no copy is necessary (e.g. no conversion was done). Note: This is meant for internal use, and should not be confused with inplace. Returns ------- converted : same as input object See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to numeric type.
[ "Attempt", "to", "infer", "better", "dtype", "for", "object", "columns", "." ]
python
train
Qiskit/qiskit-terra
qiskit/tools/monitor/backend_overview.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/tools/monitor/backend_overview.py#L21-L39
def get_unique_backends(): """Gets the unique backends that are available. Returns: list: Unique available backends. Raises: QiskitError: No backends available. """ backends = IBMQ.backends() unique_hardware_backends = [] unique_names = [] for back in backends: if back.name() not in unique_names and not back.configuration().simulator: unique_hardware_backends.append(back) unique_names.append(back.name()) if not unique_hardware_backends: raise QiskitError('No backends available.') return unique_hardware_backends
[ "def", "get_unique_backends", "(", ")", ":", "backends", "=", "IBMQ", ".", "backends", "(", ")", "unique_hardware_backends", "=", "[", "]", "unique_names", "=", "[", "]", "for", "back", "in", "backends", ":", "if", "back", ".", "name", "(", ")", "not", "in", "unique_names", "and", "not", "back", ".", "configuration", "(", ")", ".", "simulator", ":", "unique_hardware_backends", ".", "append", "(", "back", ")", "unique_names", ".", "append", "(", "back", ".", "name", "(", ")", ")", "if", "not", "unique_hardware_backends", ":", "raise", "QiskitError", "(", "'No backends available.'", ")", "return", "unique_hardware_backends" ]
Gets the unique backends that are available. Returns: list: Unique available backends. Raises: QiskitError: No backends available.
[ "Gets", "the", "unique", "backends", "that", "are", "available", "." ]
python
test
ninuxorg/nodeshot
nodeshot/core/layers/models/layer.py
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/core/layers/models/layer.py#L158-L164
def node_contained_in_layer_area_validation(self): """ if layer defines an area, ensure node coordinates are contained in the area """ # if area is a polygon ensure it contains the node if self.layer and isinstance(self.layer.area, Polygon) and not self.layer.area.contains(self.geometry): raise ValidationError(_('Node must be inside layer area'))
[ "def", "node_contained_in_layer_area_validation", "(", "self", ")", ":", "# if area is a polygon ensure it contains the node", "if", "self", ".", "layer", "and", "isinstance", "(", "self", ".", "layer", ".", "area", ",", "Polygon", ")", "and", "not", "self", ".", "layer", ".", "area", ".", "contains", "(", "self", ".", "geometry", ")", ":", "raise", "ValidationError", "(", "_", "(", "'Node must be inside layer area'", ")", ")" ]
if layer defines an area, ensure node coordinates are contained in the area
[ "if", "layer", "defines", "an", "area", "ensure", "node", "coordinates", "are", "contained", "in", "the", "area" ]
python
train
chaoss/grimoirelab-sortinghat
sortinghat/utils.py
https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/utils.py#L33-L84
def merge_date_ranges(dates): """Merge date ranges. Generator that merges ovelaped data ranges. Default init and end dates (1900-01-01 and 2100-01-01) are considered range limits and will be removed when a set of ranges overlap. For example: * [(1900-01-01, 2010-01-01), (2008-01-01, 2100-01-01)] --> (2008-01-01, 2010-01-01) * [(1900-01-01, 2010-01-01), (2008-01-01, 2010-01-01), (2010-01-02, 2100-01-01)] --> (2008-01-01, 2010-01-01),(2010-01-02, 2100-01-01) * [(1900-01-01, 2010-01-01), (2010-01-02, 2100-01-01)] --> (1900-01-01, 2010-01-01), (2010-01-02, 2100-01-01) The condition MIN_PERIOD_DATE <= dt <= MAX_PERIOD_DATE must be true for each date. Otherwise, the generator will raise a ValueError exception. This code is based on samplebias' answer to StackOverflow's question "Merging a list of time-range tuples that have overlapping time-ranges" (http://stackoverflow.com/questions/5679638). :param dates: sequence of date ranges where each range is a (st_date, en_date) tuple :raises ValueError: when a value of the data range is out of bounds """ if not dates: return sorted_dates = sorted([sorted(t) for t in dates]) saved = list(sorted_dates[0]) for st, en in sorted_dates: if st < MIN_PERIOD_DATE or st > MAX_PERIOD_DATE: raise ValueError("start date %s is out of bounds" % str(st)) if en < MIN_PERIOD_DATE or en > MAX_PERIOD_DATE: raise ValueError("end date %s is out of bounds" % str(en)) if st <= saved[1]: if saved[0] == MIN_PERIOD_DATE: saved[0] = st if MAX_PERIOD_DATE in (en, saved[1]): saved[1] = min(saved[1], en) else: saved[1] = max(saved[1], en) else: yield tuple(saved) saved[0] = st saved[1] = en yield tuple(saved)
[ "def", "merge_date_ranges", "(", "dates", ")", ":", "if", "not", "dates", ":", "return", "sorted_dates", "=", "sorted", "(", "[", "sorted", "(", "t", ")", "for", "t", "in", "dates", "]", ")", "saved", "=", "list", "(", "sorted_dates", "[", "0", "]", ")", "for", "st", ",", "en", "in", "sorted_dates", ":", "if", "st", "<", "MIN_PERIOD_DATE", "or", "st", ">", "MAX_PERIOD_DATE", ":", "raise", "ValueError", "(", "\"start date %s is out of bounds\"", "%", "str", "(", "st", ")", ")", "if", "en", "<", "MIN_PERIOD_DATE", "or", "en", ">", "MAX_PERIOD_DATE", ":", "raise", "ValueError", "(", "\"end date %s is out of bounds\"", "%", "str", "(", "en", ")", ")", "if", "st", "<=", "saved", "[", "1", "]", ":", "if", "saved", "[", "0", "]", "==", "MIN_PERIOD_DATE", ":", "saved", "[", "0", "]", "=", "st", "if", "MAX_PERIOD_DATE", "in", "(", "en", ",", "saved", "[", "1", "]", ")", ":", "saved", "[", "1", "]", "=", "min", "(", "saved", "[", "1", "]", ",", "en", ")", "else", ":", "saved", "[", "1", "]", "=", "max", "(", "saved", "[", "1", "]", ",", "en", ")", "else", ":", "yield", "tuple", "(", "saved", ")", "saved", "[", "0", "]", "=", "st", "saved", "[", "1", "]", "=", "en", "yield", "tuple", "(", "saved", ")" ]
Merge date ranges. Generator that merges ovelaped data ranges. Default init and end dates (1900-01-01 and 2100-01-01) are considered range limits and will be removed when a set of ranges overlap. For example: * [(1900-01-01, 2010-01-01), (2008-01-01, 2100-01-01)] --> (2008-01-01, 2010-01-01) * [(1900-01-01, 2010-01-01), (2008-01-01, 2010-01-01), (2010-01-02, 2100-01-01)] --> (2008-01-01, 2010-01-01),(2010-01-02, 2100-01-01) * [(1900-01-01, 2010-01-01), (2010-01-02, 2100-01-01)] --> (1900-01-01, 2010-01-01), (2010-01-02, 2100-01-01) The condition MIN_PERIOD_DATE <= dt <= MAX_PERIOD_DATE must be true for each date. Otherwise, the generator will raise a ValueError exception. This code is based on samplebias' answer to StackOverflow's question "Merging a list of time-range tuples that have overlapping time-ranges" (http://stackoverflow.com/questions/5679638). :param dates: sequence of date ranges where each range is a (st_date, en_date) tuple :raises ValueError: when a value of the data range is out of bounds
[ "Merge", "date", "ranges", "." ]
python
train
aiortc/aiortc
aiortc/rtcdtlstransport.py
https://github.com/aiortc/aiortc/blob/60ed036abf4575bd63985724b4493d569e6da29b/aiortc/rtcdtlstransport.py#L378-L459
async def start(self, remoteParameters): """ Start DTLS transport negotiation with the parameters of the remote DTLS transport. :param: remoteParameters: An :class:`RTCDtlsParameters`. """ assert self._state == State.NEW assert len(remoteParameters.fingerprints) if self.transport.role == 'controlling': self._role = 'server' lib.SSL_set_accept_state(self.ssl) else: self._role = 'client' lib.SSL_set_connect_state(self.ssl) self._set_state(State.CONNECTING) try: while not self.encrypted: result = lib.SSL_do_handshake(self.ssl) await self._write_ssl() if result > 0: self.encrypted = True break error = lib.SSL_get_error(self.ssl, result) if error == lib.SSL_ERROR_WANT_READ: await self._recv_next() else: self.__log_debug('x DTLS handshake failed (error %d)', error) for info in get_error_queue(): self.__log_debug('x %s', ':'.join(info)) self._set_state(State.FAILED) return except ConnectionError: self.__log_debug('x DTLS handshake failed (connection error)') self._set_state(State.FAILED) return # check remote fingerprint x509 = lib.SSL_get_peer_certificate(self.ssl) remote_fingerprint = certificate_digest(x509) fingerprint_is_valid = False for f in remoteParameters.fingerprints: if f.algorithm.lower() == 'sha-256' and f.value.lower() == remote_fingerprint.lower(): fingerprint_is_valid = True break if not fingerprint_is_valid: self.__log_debug('x DTLS handshake failed (fingerprint mismatch)') self._set_state(State.FAILED) return # generate keying material buf = ffi.new('unsigned char[]', 2 * (SRTP_KEY_LEN + SRTP_SALT_LEN)) extractor = b'EXTRACTOR-dtls_srtp' _openssl_assert(lib.SSL_export_keying_material( self.ssl, buf, len(buf), extractor, len(extractor), ffi.NULL, 0, 0) == 1) view = ffi.buffer(buf) if self._role == 'server': srtp_tx_key = get_srtp_key_salt(view, 1) srtp_rx_key = get_srtp_key_salt(view, 0) else: srtp_tx_key = get_srtp_key_salt(view, 0) srtp_rx_key = get_srtp_key_salt(view, 1) rx_policy = Policy(key=srtp_rx_key, ssrc_type=Policy.SSRC_ANY_INBOUND) rx_policy.allow_repeat_tx = True rx_policy.window_size = 1024 self._rx_srtp = Session(rx_policy) tx_policy = Policy(key=srtp_tx_key, ssrc_type=Policy.SSRC_ANY_OUTBOUND) tx_policy.allow_repeat_tx = True tx_policy.window_size = 1024 self._tx_srtp = Session(tx_policy) # start data pump self.__log_debug('- DTLS handshake complete') self._set_state(State.CONNECTED) self._task = asyncio.ensure_future(self.__run())
[ "async", "def", "start", "(", "self", ",", "remoteParameters", ")", ":", "assert", "self", ".", "_state", "==", "State", ".", "NEW", "assert", "len", "(", "remoteParameters", ".", "fingerprints", ")", "if", "self", ".", "transport", ".", "role", "==", "'controlling'", ":", "self", ".", "_role", "=", "'server'", "lib", ".", "SSL_set_accept_state", "(", "self", ".", "ssl", ")", "else", ":", "self", ".", "_role", "=", "'client'", "lib", ".", "SSL_set_connect_state", "(", "self", ".", "ssl", ")", "self", ".", "_set_state", "(", "State", ".", "CONNECTING", ")", "try", ":", "while", "not", "self", ".", "encrypted", ":", "result", "=", "lib", ".", "SSL_do_handshake", "(", "self", ".", "ssl", ")", "await", "self", ".", "_write_ssl", "(", ")", "if", "result", ">", "0", ":", "self", ".", "encrypted", "=", "True", "break", "error", "=", "lib", ".", "SSL_get_error", "(", "self", ".", "ssl", ",", "result", ")", "if", "error", "==", "lib", ".", "SSL_ERROR_WANT_READ", ":", "await", "self", ".", "_recv_next", "(", ")", "else", ":", "self", ".", "__log_debug", "(", "'x DTLS handshake failed (error %d)'", ",", "error", ")", "for", "info", "in", "get_error_queue", "(", ")", ":", "self", ".", "__log_debug", "(", "'x %s'", ",", "':'", ".", "join", "(", "info", ")", ")", "self", ".", "_set_state", "(", "State", ".", "FAILED", ")", "return", "except", "ConnectionError", ":", "self", ".", "__log_debug", "(", "'x DTLS handshake failed (connection error)'", ")", "self", ".", "_set_state", "(", "State", ".", "FAILED", ")", "return", "# check remote fingerprint", "x509", "=", "lib", ".", "SSL_get_peer_certificate", "(", "self", ".", "ssl", ")", "remote_fingerprint", "=", "certificate_digest", "(", "x509", ")", "fingerprint_is_valid", "=", "False", "for", "f", "in", "remoteParameters", ".", "fingerprints", ":", "if", "f", ".", "algorithm", ".", "lower", "(", ")", "==", "'sha-256'", "and", "f", ".", "value", ".", "lower", "(", ")", "==", "remote_fingerprint", ".", "lower", "(", ")", ":", "fingerprint_is_valid", "=", "True", "break", "if", "not", "fingerprint_is_valid", ":", "self", ".", "__log_debug", "(", "'x DTLS handshake failed (fingerprint mismatch)'", ")", "self", ".", "_set_state", "(", "State", ".", "FAILED", ")", "return", "# generate keying material", "buf", "=", "ffi", ".", "new", "(", "'unsigned char[]'", ",", "2", "*", "(", "SRTP_KEY_LEN", "+", "SRTP_SALT_LEN", ")", ")", "extractor", "=", "b'EXTRACTOR-dtls_srtp'", "_openssl_assert", "(", "lib", ".", "SSL_export_keying_material", "(", "self", ".", "ssl", ",", "buf", ",", "len", "(", "buf", ")", ",", "extractor", ",", "len", "(", "extractor", ")", ",", "ffi", ".", "NULL", ",", "0", ",", "0", ")", "==", "1", ")", "view", "=", "ffi", ".", "buffer", "(", "buf", ")", "if", "self", ".", "_role", "==", "'server'", ":", "srtp_tx_key", "=", "get_srtp_key_salt", "(", "view", ",", "1", ")", "srtp_rx_key", "=", "get_srtp_key_salt", "(", "view", ",", "0", ")", "else", ":", "srtp_tx_key", "=", "get_srtp_key_salt", "(", "view", ",", "0", ")", "srtp_rx_key", "=", "get_srtp_key_salt", "(", "view", ",", "1", ")", "rx_policy", "=", "Policy", "(", "key", "=", "srtp_rx_key", ",", "ssrc_type", "=", "Policy", ".", "SSRC_ANY_INBOUND", ")", "rx_policy", ".", "allow_repeat_tx", "=", "True", "rx_policy", ".", "window_size", "=", "1024", "self", ".", "_rx_srtp", "=", "Session", "(", "rx_policy", ")", "tx_policy", "=", "Policy", "(", "key", "=", "srtp_tx_key", ",", "ssrc_type", "=", "Policy", ".", "SSRC_ANY_OUTBOUND", ")", "tx_policy", ".", "allow_repeat_tx", "=", "True", "tx_policy", ".", "window_size", "=", "1024", "self", ".", "_tx_srtp", "=", "Session", "(", "tx_policy", ")", "# start data pump", "self", ".", "__log_debug", "(", "'- DTLS handshake complete'", ")", "self", ".", "_set_state", "(", "State", ".", "CONNECTED", ")", "self", ".", "_task", "=", "asyncio", ".", "ensure_future", "(", "self", ".", "__run", "(", ")", ")" ]
Start DTLS transport negotiation with the parameters of the remote DTLS transport. :param: remoteParameters: An :class:`RTCDtlsParameters`.
[ "Start", "DTLS", "transport", "negotiation", "with", "the", "parameters", "of", "the", "remote", "DTLS", "transport", "." ]
python
train
kislyuk/argcomplete
argcomplete/my_shlex.py
https://github.com/kislyuk/argcomplete/blob/f9eb0a2354d9e6153f687c463df98c16251d97ed/argcomplete/my_shlex.py#L108-L115
def pop_source(self): "Pop the input source stack." self.instream.close() (self.infile, self.instream, self.lineno) = self.filestack.popleft() if self.debug: print('shlex: popping to %s, line %d' \ % (self.instream, self.lineno)) self.state = ' '
[ "def", "pop_source", "(", "self", ")", ":", "self", ".", "instream", ".", "close", "(", ")", "(", "self", ".", "infile", ",", "self", ".", "instream", ",", "self", ".", "lineno", ")", "=", "self", ".", "filestack", ".", "popleft", "(", ")", "if", "self", ".", "debug", ":", "print", "(", "'shlex: popping to %s, line %d'", "%", "(", "self", ".", "instream", ",", "self", ".", "lineno", ")", ")", "self", ".", "state", "=", "' '" ]
Pop the input source stack.
[ "Pop", "the", "input", "source", "stack", "." ]
python
train
avelino/bottle-auth
bottle_auth/core/httputil.py
https://github.com/avelino/bottle-auth/blob/db07e526864aeac05ee68444b47e5db29540ce18/bottle_auth/core/httputil.py#L88-L91
def get_list(self, name): """Returns all values for the given header as a list.""" norm_name = HTTPHeaders._normalize_name(name) return self._as_list.get(norm_name, [])
[ "def", "get_list", "(", "self", ",", "name", ")", ":", "norm_name", "=", "HTTPHeaders", ".", "_normalize_name", "(", "name", ")", "return", "self", ".", "_as_list", ".", "get", "(", "norm_name", ",", "[", "]", ")" ]
Returns all values for the given header as a list.
[ "Returns", "all", "values", "for", "the", "given", "header", "as", "a", "list", "." ]
python
test
angr/angr
angr/state_plugins/symbolic_memory.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/symbolic_memory.py#L96-L114
def merge(self, others, merge_conditions, common_ancestor=None): # pylint: disable=unused-argument """ Merge this SimMemory with the other SimMemory """ changed_bytes = self._changes_to_merge(others) l.info("Merging %d bytes", len(changed_bytes)) l.info("... %s has changed bytes %s", self.id, changed_bytes) self.read_strategies = self._merge_strategies(self.read_strategies, *[ o.read_strategies for o in others ]) self.write_strategies = self._merge_strategies(self.write_strategies, *[ o.write_strategies for o in others ]) merged_bytes = self._merge(others, changed_bytes, merge_conditions=merge_conditions) return len(merged_bytes) > 0
[ "def", "merge", "(", "self", ",", "others", ",", "merge_conditions", ",", "common_ancestor", "=", "None", ")", ":", "# pylint: disable=unused-argument", "changed_bytes", "=", "self", ".", "_changes_to_merge", "(", "others", ")", "l", ".", "info", "(", "\"Merging %d bytes\"", ",", "len", "(", "changed_bytes", ")", ")", "l", ".", "info", "(", "\"... %s has changed bytes %s\"", ",", "self", ".", "id", ",", "changed_bytes", ")", "self", ".", "read_strategies", "=", "self", ".", "_merge_strategies", "(", "self", ".", "read_strategies", ",", "*", "[", "o", ".", "read_strategies", "for", "o", "in", "others", "]", ")", "self", ".", "write_strategies", "=", "self", ".", "_merge_strategies", "(", "self", ".", "write_strategies", ",", "*", "[", "o", ".", "write_strategies", "for", "o", "in", "others", "]", ")", "merged_bytes", "=", "self", ".", "_merge", "(", "others", ",", "changed_bytes", ",", "merge_conditions", "=", "merge_conditions", ")", "return", "len", "(", "merged_bytes", ")", ">", "0" ]
Merge this SimMemory with the other SimMemory
[ "Merge", "this", "SimMemory", "with", "the", "other", "SimMemory" ]
python
train
tcalmant/ipopo
pelix/shell/report.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/shell/report.py#L576-L620
def threads_list(): """ Lists the active threads and their current code line """ results = {} # pylint: disable=W0212 try: # Extract frames frames = sys._current_frames() # Get the thread ID -> Thread mapping names = threading._active.copy() except AttributeError: # Extraction not available return results # Sort by thread ID thread_ids = sorted(frames.keys()) for thread_id in thread_ids: # Get the corresponding stack stack = frames[thread_id] # Try to get the thread name try: name = names[thread_id].name except KeyError: name = "<unknown>" trace_lines = [] frame = stack while frame is not None: # Store the line information trace_lines.append(format_frame_info(frame)) # Previous frame... frame = frame.f_back # Construct the thread description results[thread_id] = { "name": name, "stacktrace": "\n".join(reversed(trace_lines)), } return results
[ "def", "threads_list", "(", ")", ":", "results", "=", "{", "}", "# pylint: disable=W0212", "try", ":", "# Extract frames", "frames", "=", "sys", ".", "_current_frames", "(", ")", "# Get the thread ID -> Thread mapping", "names", "=", "threading", ".", "_active", ".", "copy", "(", ")", "except", "AttributeError", ":", "# Extraction not available", "return", "results", "# Sort by thread ID", "thread_ids", "=", "sorted", "(", "frames", ".", "keys", "(", ")", ")", "for", "thread_id", "in", "thread_ids", ":", "# Get the corresponding stack", "stack", "=", "frames", "[", "thread_id", "]", "# Try to get the thread name", "try", ":", "name", "=", "names", "[", "thread_id", "]", ".", "name", "except", "KeyError", ":", "name", "=", "\"<unknown>\"", "trace_lines", "=", "[", "]", "frame", "=", "stack", "while", "frame", "is", "not", "None", ":", "# Store the line information", "trace_lines", ".", "append", "(", "format_frame_info", "(", "frame", ")", ")", "# Previous frame...", "frame", "=", "frame", ".", "f_back", "# Construct the thread description", "results", "[", "thread_id", "]", "=", "{", "\"name\"", ":", "name", ",", "\"stacktrace\"", ":", "\"\\n\"", ".", "join", "(", "reversed", "(", "trace_lines", ")", ")", ",", "}", "return", "results" ]
Lists the active threads and their current code line
[ "Lists", "the", "active", "threads", "and", "their", "current", "code", "line" ]
python
train
horazont/aioxmpp
aioxmpp/xso/query.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/xso/query.py#L168-L184
def eval_bool(self, expr): """ Evaluate the expression `expr` and return the truthness of its result. A result of an expression is said to be true if it contains at least one value. It has the same semantics as :func:`bool` on sequences.s """ result = expr.eval(self) iterator = iter(result) try: next(iterator) except StopIteration: return False else: return True finally: if hasattr(iterator, "close"): iterator.close()
[ "def", "eval_bool", "(", "self", ",", "expr", ")", ":", "result", "=", "expr", ".", "eval", "(", "self", ")", "iterator", "=", "iter", "(", "result", ")", "try", ":", "next", "(", "iterator", ")", "except", "StopIteration", ":", "return", "False", "else", ":", "return", "True", "finally", ":", "if", "hasattr", "(", "iterator", ",", "\"close\"", ")", ":", "iterator", ".", "close", "(", ")" ]
Evaluate the expression `expr` and return the truthness of its result. A result of an expression is said to be true if it contains at least one value. It has the same semantics as :func:`bool` on sequences.s
[ "Evaluate", "the", "expression", "expr", "and", "return", "the", "truthness", "of", "its", "result", ".", "A", "result", "of", "an", "expression", "is", "said", "to", "be", "true", "if", "it", "contains", "at", "least", "one", "value", ".", "It", "has", "the", "same", "semantics", "as", ":", "func", ":", "bool", "on", "sequences", ".", "s" ]
python
train
Miserlou/django-knockout-modeler
knockout_modeler/ko.py
https://github.com/Miserlou/django-knockout-modeler/blob/714d21cc5ed008f132cea01dbae9f214c2bf1b76/knockout_modeler/ko.py#L190-L205
def ko(queryset, field_names=None): """ Converts a Django QuerySet into a complete Knockout implementation. """ try: koDataString = ko_data(queryset, field_names) koModelString = ko_model(queryset[0].__class__.__name__, field_names, data=True) koBindingsString = ko_bindings(queryset[0]) koString = koDataString + '\n' + koModelString + '\n' + koBindingsString return koString except Exception as e: logger.error(e) return ''
[ "def", "ko", "(", "queryset", ",", "field_names", "=", "None", ")", ":", "try", ":", "koDataString", "=", "ko_data", "(", "queryset", ",", "field_names", ")", "koModelString", "=", "ko_model", "(", "queryset", "[", "0", "]", ".", "__class__", ".", "__name__", ",", "field_names", ",", "data", "=", "True", ")", "koBindingsString", "=", "ko_bindings", "(", "queryset", "[", "0", "]", ")", "koString", "=", "koDataString", "+", "'\\n'", "+", "koModelString", "+", "'\\n'", "+", "koBindingsString", "return", "koString", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "e", ")", "return", "''" ]
Converts a Django QuerySet into a complete Knockout implementation.
[ "Converts", "a", "Django", "QuerySet", "into", "a", "complete", "Knockout", "implementation", "." ]
python
train
solocompt/plugs-core
plugs_core/utils.py
https://github.com/solocompt/plugs-core/blob/19fd23101fcfdabe657485f0a22e6b63e2b44f9d/plugs_core/utils.py#L27-L42
def get_db_distinct(queryset, field, func, **params): """ Checks if a field / value pair exists in database and continues generating values until it finds a one that does not exist func is the function that generates values and params is the parameters that function takes """ while True: try: value = func(**params) queryset.get(**{field: value}) except ObjectDoesNotExist: break return value
[ "def", "get_db_distinct", "(", "queryset", ",", "field", ",", "func", ",", "*", "*", "params", ")", ":", "while", "True", ":", "try", ":", "value", "=", "func", "(", "*", "*", "params", ")", "queryset", ".", "get", "(", "*", "*", "{", "field", ":", "value", "}", ")", "except", "ObjectDoesNotExist", ":", "break", "return", "value" ]
Checks if a field / value pair exists in database and continues generating values until it finds a one that does not exist func is the function that generates values and params is the parameters that function takes
[ "Checks", "if", "a", "field", "/", "value", "pair", "exists", "in", "database", "and", "continues", "generating", "values", "until", "it", "finds", "a", "one", "that", "does", "not", "exist" ]
python
train
miLibris/flask-rest-jsonapi
flask_rest_jsonapi/api.py
https://github.com/miLibris/flask-rest-jsonapi/blob/ecc8f2cd2b54cc0bfae7acd6cffcda0ba1140c43/flask_rest_jsonapi/api.py#L61-L91
def route(self, resource, view, *urls, **kwargs): """Create an api view. :param Resource resource: a resource class inherited from flask_rest_jsonapi.resource.Resource :param str view: the view name :param list urls: the urls of the view :param dict kwargs: additional options of the route """ resource.view = view url_rule_options = kwargs.get('url_rule_options') or dict() view_func = resource.as_view(view) if 'blueprint' in kwargs: resource.view = '.'.join([kwargs['blueprint'].name, resource.view]) for url in urls: kwargs['blueprint'].add_url_rule(url, view_func=view_func, **url_rule_options) elif self.blueprint is not None: resource.view = '.'.join([self.blueprint.name, resource.view]) for url in urls: self.blueprint.add_url_rule(url, view_func=view_func, **url_rule_options) elif self.app is not None: for url in urls: self.app.add_url_rule(url, view_func=view_func, **url_rule_options) else: self.resources.append({'resource': resource, 'view': view, 'urls': urls, 'url_rule_options': url_rule_options}) self.resource_registry.append(resource)
[ "def", "route", "(", "self", ",", "resource", ",", "view", ",", "*", "urls", ",", "*", "*", "kwargs", ")", ":", "resource", ".", "view", "=", "view", "url_rule_options", "=", "kwargs", ".", "get", "(", "'url_rule_options'", ")", "or", "dict", "(", ")", "view_func", "=", "resource", ".", "as_view", "(", "view", ")", "if", "'blueprint'", "in", "kwargs", ":", "resource", ".", "view", "=", "'.'", ".", "join", "(", "[", "kwargs", "[", "'blueprint'", "]", ".", "name", ",", "resource", ".", "view", "]", ")", "for", "url", "in", "urls", ":", "kwargs", "[", "'blueprint'", "]", ".", "add_url_rule", "(", "url", ",", "view_func", "=", "view_func", ",", "*", "*", "url_rule_options", ")", "elif", "self", ".", "blueprint", "is", "not", "None", ":", "resource", ".", "view", "=", "'.'", ".", "join", "(", "[", "self", ".", "blueprint", ".", "name", ",", "resource", ".", "view", "]", ")", "for", "url", "in", "urls", ":", "self", ".", "blueprint", ".", "add_url_rule", "(", "url", ",", "view_func", "=", "view_func", ",", "*", "*", "url_rule_options", ")", "elif", "self", ".", "app", "is", "not", "None", ":", "for", "url", "in", "urls", ":", "self", ".", "app", ".", "add_url_rule", "(", "url", ",", "view_func", "=", "view_func", ",", "*", "*", "url_rule_options", ")", "else", ":", "self", ".", "resources", ".", "append", "(", "{", "'resource'", ":", "resource", ",", "'view'", ":", "view", ",", "'urls'", ":", "urls", ",", "'url_rule_options'", ":", "url_rule_options", "}", ")", "self", ".", "resource_registry", ".", "append", "(", "resource", ")" ]
Create an api view. :param Resource resource: a resource class inherited from flask_rest_jsonapi.resource.Resource :param str view: the view name :param list urls: the urls of the view :param dict kwargs: additional options of the route
[ "Create", "an", "api", "view", "." ]
python
train
janpipek/physt
physt/binnings.py
https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/binnings.py#L182-L192
def bins(self): """Bins in the wider format (as edge pairs) Returns ------- bins: np.ndarray shape=(bin_count, 2) """ if self._bins is None: self._bins = make_bin_array(self.numpy_bins) return self._bins
[ "def", "bins", "(", "self", ")", ":", "if", "self", ".", "_bins", "is", "None", ":", "self", ".", "_bins", "=", "make_bin_array", "(", "self", ".", "numpy_bins", ")", "return", "self", ".", "_bins" ]
Bins in the wider format (as edge pairs) Returns ------- bins: np.ndarray shape=(bin_count, 2)
[ "Bins", "in", "the", "wider", "format", "(", "as", "edge", "pairs", ")" ]
python
train
CalebBell/thermo
thermo/phase_change.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/phase_change.py#L1094-L1155
def load_all_methods(self): r'''Method which picks out coefficients for the specified chemical from the various dictionaries and DataFrames storing it. All data is stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`, and :obj:`all_methods` as a set of methods for which the data exists for. Called on initialization only. See the source code for the variables at which the coefficients are stored. The coefficients can safely be altered once the class is initialized. This method can be called again to reset the parameters. ''' methods = [] Tmins, Tmaxs = [], [] if has_CoolProp and self.CASRN in coolprop_dict: methods.append(COOLPROP) self.CP_f = coolprop_fluids[self.CASRN] Tmins.append(self.CP_f.Tt); Tmaxs.append(self.CP_f.Tc) if self.CASRN in _VDISaturationDict: methods.append(VDI_TABULAR) Ts, props = VDI_tabular_data(self.CASRN, 'Hvap') self.VDI_Tmin = Ts[0] self.VDI_Tmax = Ts[-1] self.tabular_data[VDI_TABULAR] = (Ts, props) Tmins.append(self.VDI_Tmin); Tmaxs.append(self.VDI_Tmax) if self.CASRN in Alibakhshi_Cs.index and self.Tc: methods.append(ALIBAKHSHI) self.Alibakhshi_C = float(Alibakhshi_Cs.at[self.CASRN, 'C']) Tmaxs.append( max(self.Tc-100., 0) ) if self.CASRN in CRCHvap_data.index and not np.isnan(CRCHvap_data.at[self.CASRN, 'HvapTb']): methods.append(CRC_HVAP_TB) self.CRC_HVAP_TB_Tb = float(CRCHvap_data.at[self.CASRN, 'Tb']) self.CRC_HVAP_TB_Hvap = float(CRCHvap_data.at[self.CASRN, 'HvapTb']) if self.CASRN in CRCHvap_data.index and not np.isnan(CRCHvap_data.at[self.CASRN, 'Hvap298']): methods.append(CRC_HVAP_298) self.CRC_HVAP_298 = float(CRCHvap_data.at[self.CASRN, 'Hvap298']) if self.CASRN in GharagheiziHvap_data.index: methods.append(GHARAGHEIZI_HVAP_298) self.GHARAGHEIZI_HVAP_298_Hvap = float(GharagheiziHvap_data.at[self.CASRN, 'Hvap298']) if all((self.Tc, self.omega)): methods.extend(self.CSP_methods) Tmaxs.append(self.Tc); Tmins.append(0) if all((self.Tc, self.Pc)): methods.append(CLAPEYRON) Tmaxs.append(self.Tc); Tmins.append(0) if all((self.Tb, self.Tc, self.Pc)): methods.extend(self.boiling_methods) Tmaxs.append(self.Tc); Tmins.append(0) if self.CASRN in Perrys2_150.index: methods.append(DIPPR_PERRY_8E) _, Tc, C1, C2, C3, C4, self.Perrys2_150_Tmin, self.Perrys2_150_Tmax = _Perrys2_150_values[Perrys2_150.index.get_loc(self.CASRN)].tolist() self.Perrys2_150_coeffs = [Tc, C1, C2, C3, C4] Tmins.append(self.Perrys2_150_Tmin); Tmaxs.append(self.Perrys2_150_Tmax) if self.CASRN in VDI_PPDS_4.index: _, MW, Tc, A, B, C, D, E = _VDI_PPDS_4_values[VDI_PPDS_4.index.get_loc(self.CASRN)].tolist() self.VDI_PPDS_coeffs = [A, B, C, D, E] self.VDI_PPDS_Tc = Tc self.VDI_PPDS_MW = MW methods.append(VDI_PPDS) Tmaxs.append(self.VDI_PPDS_Tc); self.all_methods = set(methods) if Tmins and Tmaxs: self.Tmin, self.Tmax = min(Tmins), max(Tmaxs)
[ "def", "load_all_methods", "(", "self", ")", ":", "methods", "=", "[", "]", "Tmins", ",", "Tmaxs", "=", "[", "]", ",", "[", "]", "if", "has_CoolProp", "and", "self", ".", "CASRN", "in", "coolprop_dict", ":", "methods", ".", "append", "(", "COOLPROP", ")", "self", ".", "CP_f", "=", "coolprop_fluids", "[", "self", ".", "CASRN", "]", "Tmins", ".", "append", "(", "self", ".", "CP_f", ".", "Tt", ")", "Tmaxs", ".", "append", "(", "self", ".", "CP_f", ".", "Tc", ")", "if", "self", ".", "CASRN", "in", "_VDISaturationDict", ":", "methods", ".", "append", "(", "VDI_TABULAR", ")", "Ts", ",", "props", "=", "VDI_tabular_data", "(", "self", ".", "CASRN", ",", "'Hvap'", ")", "self", ".", "VDI_Tmin", "=", "Ts", "[", "0", "]", "self", ".", "VDI_Tmax", "=", "Ts", "[", "-", "1", "]", "self", ".", "tabular_data", "[", "VDI_TABULAR", "]", "=", "(", "Ts", ",", "props", ")", "Tmins", ".", "append", "(", "self", ".", "VDI_Tmin", ")", "Tmaxs", ".", "append", "(", "self", ".", "VDI_Tmax", ")", "if", "self", ".", "CASRN", "in", "Alibakhshi_Cs", ".", "index", "and", "self", ".", "Tc", ":", "methods", ".", "append", "(", "ALIBAKHSHI", ")", "self", ".", "Alibakhshi_C", "=", "float", "(", "Alibakhshi_Cs", ".", "at", "[", "self", ".", "CASRN", ",", "'C'", "]", ")", "Tmaxs", ".", "append", "(", "max", "(", "self", ".", "Tc", "-", "100.", ",", "0", ")", ")", "if", "self", ".", "CASRN", "in", "CRCHvap_data", ".", "index", "and", "not", "np", ".", "isnan", "(", "CRCHvap_data", ".", "at", "[", "self", ".", "CASRN", ",", "'HvapTb'", "]", ")", ":", "methods", ".", "append", "(", "CRC_HVAP_TB", ")", "self", ".", "CRC_HVAP_TB_Tb", "=", "float", "(", "CRCHvap_data", ".", "at", "[", "self", ".", "CASRN", ",", "'Tb'", "]", ")", "self", ".", "CRC_HVAP_TB_Hvap", "=", "float", "(", "CRCHvap_data", ".", "at", "[", "self", ".", "CASRN", ",", "'HvapTb'", "]", ")", "if", "self", ".", "CASRN", "in", "CRCHvap_data", ".", "index", "and", "not", "np", ".", "isnan", "(", "CRCHvap_data", ".", "at", "[", "self", ".", "CASRN", ",", "'Hvap298'", "]", ")", ":", "methods", ".", "append", "(", "CRC_HVAP_298", ")", "self", ".", "CRC_HVAP_298", "=", "float", "(", "CRCHvap_data", ".", "at", "[", "self", ".", "CASRN", ",", "'Hvap298'", "]", ")", "if", "self", ".", "CASRN", "in", "GharagheiziHvap_data", ".", "index", ":", "methods", ".", "append", "(", "GHARAGHEIZI_HVAP_298", ")", "self", ".", "GHARAGHEIZI_HVAP_298_Hvap", "=", "float", "(", "GharagheiziHvap_data", ".", "at", "[", "self", ".", "CASRN", ",", "'Hvap298'", "]", ")", "if", "all", "(", "(", "self", ".", "Tc", ",", "self", ".", "omega", ")", ")", ":", "methods", ".", "extend", "(", "self", ".", "CSP_methods", ")", "Tmaxs", ".", "append", "(", "self", ".", "Tc", ")", "Tmins", ".", "append", "(", "0", ")", "if", "all", "(", "(", "self", ".", "Tc", ",", "self", ".", "Pc", ")", ")", ":", "methods", ".", "append", "(", "CLAPEYRON", ")", "Tmaxs", ".", "append", "(", "self", ".", "Tc", ")", "Tmins", ".", "append", "(", "0", ")", "if", "all", "(", "(", "self", ".", "Tb", ",", "self", ".", "Tc", ",", "self", ".", "Pc", ")", ")", ":", "methods", ".", "extend", "(", "self", ".", "boiling_methods", ")", "Tmaxs", ".", "append", "(", "self", ".", "Tc", ")", "Tmins", ".", "append", "(", "0", ")", "if", "self", ".", "CASRN", "in", "Perrys2_150", ".", "index", ":", "methods", ".", "append", "(", "DIPPR_PERRY_8E", ")", "_", ",", "Tc", ",", "C1", ",", "C2", ",", "C3", ",", "C4", ",", "self", ".", "Perrys2_150_Tmin", ",", "self", ".", "Perrys2_150_Tmax", "=", "_Perrys2_150_values", "[", "Perrys2_150", ".", "index", ".", "get_loc", "(", "self", ".", "CASRN", ")", "]", ".", "tolist", "(", ")", "self", ".", "Perrys2_150_coeffs", "=", "[", "Tc", ",", "C1", ",", "C2", ",", "C3", ",", "C4", "]", "Tmins", ".", "append", "(", "self", ".", "Perrys2_150_Tmin", ")", "Tmaxs", ".", "append", "(", "self", ".", "Perrys2_150_Tmax", ")", "if", "self", ".", "CASRN", "in", "VDI_PPDS_4", ".", "index", ":", "_", ",", "MW", ",", "Tc", ",", "A", ",", "B", ",", "C", ",", "D", ",", "E", "=", "_VDI_PPDS_4_values", "[", "VDI_PPDS_4", ".", "index", ".", "get_loc", "(", "self", ".", "CASRN", ")", "]", ".", "tolist", "(", ")", "self", ".", "VDI_PPDS_coeffs", "=", "[", "A", ",", "B", ",", "C", ",", "D", ",", "E", "]", "self", ".", "VDI_PPDS_Tc", "=", "Tc", "self", ".", "VDI_PPDS_MW", "=", "MW", "methods", ".", "append", "(", "VDI_PPDS", ")", "Tmaxs", ".", "append", "(", "self", ".", "VDI_PPDS_Tc", ")", "self", ".", "all_methods", "=", "set", "(", "methods", ")", "if", "Tmins", "and", "Tmaxs", ":", "self", ".", "Tmin", ",", "self", ".", "Tmax", "=", "min", "(", "Tmins", ")", ",", "max", "(", "Tmaxs", ")" ]
r'''Method which picks out coefficients for the specified chemical from the various dictionaries and DataFrames storing it. All data is stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`, and :obj:`all_methods` as a set of methods for which the data exists for. Called on initialization only. See the source code for the variables at which the coefficients are stored. The coefficients can safely be altered once the class is initialized. This method can be called again to reset the parameters.
[ "r", "Method", "which", "picks", "out", "coefficients", "for", "the", "specified", "chemical", "from", "the", "various", "dictionaries", "and", "DataFrames", "storing", "it", ".", "All", "data", "is", "stored", "as", "attributes", ".", "This", "method", "also", "sets", ":", "obj", ":", "Tmin", ":", "obj", ":", "Tmax", "and", ":", "obj", ":", "all_methods", "as", "a", "set", "of", "methods", "for", "which", "the", "data", "exists", "for", "." ]
python
valid
istresearch/scrapy-cluster
rest/rest_service.py
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L347-L365
def _setup_redis(self): """Returns a Redis Client""" if not self.closed: try: self.logger.debug("Creating redis connection to host " + str(self.settings['REDIS_HOST'])) self.redis_conn = redis.StrictRedis(host=self.settings['REDIS_HOST'], port=self.settings['REDIS_PORT'], db=self.settings['REDIS_DB']) self.redis_conn.info() self.redis_connected = True self.logger.info("Successfully connected to redis") except KeyError as e: self.logger.error('Missing setting named ' + str(e), {'ex': traceback.format_exc()}) except: self.logger.error("Couldn't initialize redis client.", {'ex': traceback.format_exc()}) raise
[ "def", "_setup_redis", "(", "self", ")", ":", "if", "not", "self", ".", "closed", ":", "try", ":", "self", ".", "logger", ".", "debug", "(", "\"Creating redis connection to host \"", "+", "str", "(", "self", ".", "settings", "[", "'REDIS_HOST'", "]", ")", ")", "self", ".", "redis_conn", "=", "redis", ".", "StrictRedis", "(", "host", "=", "self", ".", "settings", "[", "'REDIS_HOST'", "]", ",", "port", "=", "self", ".", "settings", "[", "'REDIS_PORT'", "]", ",", "db", "=", "self", ".", "settings", "[", "'REDIS_DB'", "]", ")", "self", ".", "redis_conn", ".", "info", "(", ")", "self", ".", "redis_connected", "=", "True", "self", ".", "logger", ".", "info", "(", "\"Successfully connected to redis\"", ")", "except", "KeyError", "as", "e", ":", "self", ".", "logger", ".", "error", "(", "'Missing setting named '", "+", "str", "(", "e", ")", ",", "{", "'ex'", ":", "traceback", ".", "format_exc", "(", ")", "}", ")", "except", ":", "self", ".", "logger", ".", "error", "(", "\"Couldn't initialize redis client.\"", ",", "{", "'ex'", ":", "traceback", ".", "format_exc", "(", ")", "}", ")", "raise" ]
Returns a Redis Client
[ "Returns", "a", "Redis", "Client" ]
python
train
aiogram/aiogram
aiogram/bot/bot.py
https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/bot/bot.py#L850-L885
async def send_poll(self, chat_id: typing.Union[base.Integer, base.String], question: base.String, options: typing.List[base.String], disable_notification: typing.Optional[base.Boolean], reply_to_message_id: typing.Union[base.Integer, None], reply_markup: typing.Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None] = None) -> types.Message: """ Use this method to send a native poll. A native poll can't be sent to a private chat. On success, the sent Message is returned. :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername). A native poll can't be sent to a private chat. :type chat_id: :obj:`typing.Union[base.Integer, base.String]` :param question: Poll question, 1-255 characters :type question: :obj:`base.String` :param options: List of answer options, 2-10 strings 1-100 characters each :param options: :obj:`typing.List[base.String]` :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: :obj:`typing.Optional[Boolean]` :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: :obj:`typing.Optional[Integer]` :param reply_markup: Additional interface options :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]` :return: On success, the sent Message is returned :rtype: :obj:`types.Message` """ options = prepare_arg(options) payload = generate_payload(**locals()) result = await self.request(api.Methods.SEND_POLL, payload) return types.Message(**result)
[ "async", "def", "send_poll", "(", "self", ",", "chat_id", ":", "typing", ".", "Union", "[", "base", ".", "Integer", ",", "base", ".", "String", "]", ",", "question", ":", "base", ".", "String", ",", "options", ":", "typing", ".", "List", "[", "base", ".", "String", "]", ",", "disable_notification", ":", "typing", ".", "Optional", "[", "base", ".", "Boolean", "]", ",", "reply_to_message_id", ":", "typing", ".", "Union", "[", "base", ".", "Integer", ",", "None", "]", ",", "reply_markup", ":", "typing", ".", "Union", "[", "types", ".", "InlineKeyboardMarkup", ",", "types", ".", "ReplyKeyboardMarkup", ",", "types", ".", "ReplyKeyboardRemove", ",", "types", ".", "ForceReply", ",", "None", "]", "=", "None", ")", "->", "types", ".", "Message", ":", "options", "=", "prepare_arg", "(", "options", ")", "payload", "=", "generate_payload", "(", "*", "*", "locals", "(", ")", ")", "result", "=", "await", "self", ".", "request", "(", "api", ".", "Methods", ".", "SEND_POLL", ",", "payload", ")", "return", "types", ".", "Message", "(", "*", "*", "result", ")" ]
Use this method to send a native poll. A native poll can't be sent to a private chat. On success, the sent Message is returned. :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername). A native poll can't be sent to a private chat. :type chat_id: :obj:`typing.Union[base.Integer, base.String]` :param question: Poll question, 1-255 characters :type question: :obj:`base.String` :param options: List of answer options, 2-10 strings 1-100 characters each :param options: :obj:`typing.List[base.String]` :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: :obj:`typing.Optional[Boolean]` :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: :obj:`typing.Optional[Integer]` :param reply_markup: Additional interface options :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]` :return: On success, the sent Message is returned :rtype: :obj:`types.Message`
[ "Use", "this", "method", "to", "send", "a", "native", "poll", ".", "A", "native", "poll", "can", "t", "be", "sent", "to", "a", "private", "chat", ".", "On", "success", "the", "sent", "Message", "is", "returned", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/lstm.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/lstm.py#L486-L498
def lstm_area_attention_base(): """Hparams for LSTM with area attention.""" hparams = lstm_luong_attention() hparams.batch_size = 16384 hparams.num_hidden_layers = 2 hparams.hidden_size = 1024 hparams.num_heads = 4 hparams.dropout = 0.2 hparams.learning_rate = 0.1 hparams.max_area_width = 2 hparams.area_key_mode = "mean" hparams.area_value_mode = "sum" return hparams
[ "def", "lstm_area_attention_base", "(", ")", ":", "hparams", "=", "lstm_luong_attention", "(", ")", "hparams", ".", "batch_size", "=", "16384", "hparams", ".", "num_hidden_layers", "=", "2", "hparams", ".", "hidden_size", "=", "1024", "hparams", ".", "num_heads", "=", "4", "hparams", ".", "dropout", "=", "0.2", "hparams", ".", "learning_rate", "=", "0.1", "hparams", ".", "max_area_width", "=", "2", "hparams", ".", "area_key_mode", "=", "\"mean\"", "hparams", ".", "area_value_mode", "=", "\"sum\"", "return", "hparams" ]
Hparams for LSTM with area attention.
[ "Hparams", "for", "LSTM", "with", "area", "attention", "." ]
python
train
wummel/linkchecker
linkcheck/network/iputil.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/network/iputil.py#L104-L111
def is_valid_ipv4 (ip): """ Return True if given ip is a valid IPv4 address. """ if not _ipv4_re.match(ip): return False a, b, c, d = [int(i) for i in ip.split(".")] return a <= 255 and b <= 255 and c <= 255 and d <= 255
[ "def", "is_valid_ipv4", "(", "ip", ")", ":", "if", "not", "_ipv4_re", ".", "match", "(", "ip", ")", ":", "return", "False", "a", ",", "b", ",", "c", ",", "d", "=", "[", "int", "(", "i", ")", "for", "i", "in", "ip", ".", "split", "(", "\".\"", ")", "]", "return", "a", "<=", "255", "and", "b", "<=", "255", "and", "c", "<=", "255", "and", "d", "<=", "255" ]
Return True if given ip is a valid IPv4 address.
[ "Return", "True", "if", "given", "ip", "is", "a", "valid", "IPv4", "address", "." ]
python
train
trailofbits/manticore
manticore/native/cpu/abstractcpu.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/abstractcpu.py#L803-L812
def pop_int(self, force=False): """ Read a value from the stack and increment the stack pointer. :param force: whether to ignore memory permissions :return: Value read """ value = self.read_int(self.STACK, force=force) self.STACK += self.address_bit_size // 8 return value
[ "def", "pop_int", "(", "self", ",", "force", "=", "False", ")", ":", "value", "=", "self", ".", "read_int", "(", "self", ".", "STACK", ",", "force", "=", "force", ")", "self", ".", "STACK", "+=", "self", ".", "address_bit_size", "//", "8", "return", "value" ]
Read a value from the stack and increment the stack pointer. :param force: whether to ignore memory permissions :return: Value read
[ "Read", "a", "value", "from", "the", "stack", "and", "increment", "the", "stack", "pointer", "." ]
python
valid
hyperledger/sawtooth-core
cli/sawtooth_cli/rest_client.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/cli/sawtooth_cli/rest_client.py#L166-L209
def _submit_request(self, url, params=None, data=None, headers=None, method="GET"): """Submits the given request, and handles the errors appropriately. Args: url (str): the request to send. params (dict): params to be passed along to get/post data (bytes): the data to include in the request. headers (dict): the headers to include in the request. method (str): the method to use for the request, "POST" or "GET". Returns: tuple of (int, str): The response status code and the json parsed body, or the error message. Raises: `CliException`: If any issues occur with the URL. """ if headers is None: headers = {} if self._auth_header is not None: headers['Authorization'] = self._auth_header try: if method == 'POST': result = requests.post( url, params=params, data=data, headers=headers) elif method == 'GET': result = requests.get( url, params=params, data=data, headers=headers) result.raise_for_status() return (result.status_code, result.json()) except requests.exceptions.HTTPError as e: return (e.response.status_code, e.response.reason) except RemoteDisconnected as e: raise CliException(e) except (requests.exceptions.MissingSchema, requests.exceptions.InvalidURL) as e: raise CliException(e) except requests.exceptions.ConnectionError as e: raise CliException( ('Unable to connect to "{}": ' 'make sure URL is correct').format(self._base_url))
[ "def", "_submit_request", "(", "self", ",", "url", ",", "params", "=", "None", ",", "data", "=", "None", ",", "headers", "=", "None", ",", "method", "=", "\"GET\"", ")", ":", "if", "headers", "is", "None", ":", "headers", "=", "{", "}", "if", "self", ".", "_auth_header", "is", "not", "None", ":", "headers", "[", "'Authorization'", "]", "=", "self", ".", "_auth_header", "try", ":", "if", "method", "==", "'POST'", ":", "result", "=", "requests", ".", "post", "(", "url", ",", "params", "=", "params", ",", "data", "=", "data", ",", "headers", "=", "headers", ")", "elif", "method", "==", "'GET'", ":", "result", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "params", ",", "data", "=", "data", ",", "headers", "=", "headers", ")", "result", ".", "raise_for_status", "(", ")", "return", "(", "result", ".", "status_code", ",", "result", ".", "json", "(", ")", ")", "except", "requests", ".", "exceptions", ".", "HTTPError", "as", "e", ":", "return", "(", "e", ".", "response", ".", "status_code", ",", "e", ".", "response", ".", "reason", ")", "except", "RemoteDisconnected", "as", "e", ":", "raise", "CliException", "(", "e", ")", "except", "(", "requests", ".", "exceptions", ".", "MissingSchema", ",", "requests", ".", "exceptions", ".", "InvalidURL", ")", "as", "e", ":", "raise", "CliException", "(", "e", ")", "except", "requests", ".", "exceptions", ".", "ConnectionError", "as", "e", ":", "raise", "CliException", "(", "(", "'Unable to connect to \"{}\": '", "'make sure URL is correct'", ")", ".", "format", "(", "self", ".", "_base_url", ")", ")" ]
Submits the given request, and handles the errors appropriately. Args: url (str): the request to send. params (dict): params to be passed along to get/post data (bytes): the data to include in the request. headers (dict): the headers to include in the request. method (str): the method to use for the request, "POST" or "GET". Returns: tuple of (int, str): The response status code and the json parsed body, or the error message. Raises: `CliException`: If any issues occur with the URL.
[ "Submits", "the", "given", "request", "and", "handles", "the", "errors", "appropriately", "." ]
python
train
ciena/afkak
afkak/kafkacodec.py
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/kafkacodec.py#L624-L652
def create_message_set(requests, codec=CODEC_NONE): """ Create a message set from a list of requests. Each request can have a list of messages and its own key. If codec is :data:`CODEC_NONE`, return a list of raw Kafka messages. Otherwise, return a list containing a single codec-encoded message. :param codec: The encoding for the message set, one of the constants: - `afkak.CODEC_NONE` - `afkak.CODEC_GZIP` - `afkak.CODEC_SNAPPY` :raises: :exc:`UnsupportedCodecError` for an unsupported codec """ msglist = [] for req in requests: msglist.extend([create_message(m, key=req.key) for m in req.messages]) if codec == CODEC_NONE: return msglist elif codec == CODEC_GZIP: return [create_gzip_message(msglist)] elif codec == CODEC_SNAPPY: return [create_snappy_message(msglist)] else: raise UnsupportedCodecError("Codec 0x%02x unsupported" % codec)
[ "def", "create_message_set", "(", "requests", ",", "codec", "=", "CODEC_NONE", ")", ":", "msglist", "=", "[", "]", "for", "req", "in", "requests", ":", "msglist", ".", "extend", "(", "[", "create_message", "(", "m", ",", "key", "=", "req", ".", "key", ")", "for", "m", "in", "req", ".", "messages", "]", ")", "if", "codec", "==", "CODEC_NONE", ":", "return", "msglist", "elif", "codec", "==", "CODEC_GZIP", ":", "return", "[", "create_gzip_message", "(", "msglist", ")", "]", "elif", "codec", "==", "CODEC_SNAPPY", ":", "return", "[", "create_snappy_message", "(", "msglist", ")", "]", "else", ":", "raise", "UnsupportedCodecError", "(", "\"Codec 0x%02x unsupported\"", "%", "codec", ")" ]
Create a message set from a list of requests. Each request can have a list of messages and its own key. If codec is :data:`CODEC_NONE`, return a list of raw Kafka messages. Otherwise, return a list containing a single codec-encoded message. :param codec: The encoding for the message set, one of the constants: - `afkak.CODEC_NONE` - `afkak.CODEC_GZIP` - `afkak.CODEC_SNAPPY` :raises: :exc:`UnsupportedCodecError` for an unsupported codec
[ "Create", "a", "message", "set", "from", "a", "list", "of", "requests", "." ]
python
train
twisted/txaws
txaws/s3/client.py
https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/s3/client.py#L453-L482
def copy_object(self, source_bucket, source_object_name, dest_bucket=None, dest_object_name=None, metadata={}, amz_headers={}): """ Copy an object stored in S3 from a source bucket to a destination bucket. @param source_bucket: The S3 bucket to copy the object from. @param source_object_name: The name of the object to copy. @param dest_bucket: Optionally, the S3 bucket to copy the object to. Defaults to C{source_bucket}. @param dest_object_name: Optionally, the name of the new object. Defaults to C{source_object_name}. @param metadata: A C{dict} used to build C{x-amz-meta-*} headers. @param amz_headers: A C{dict} used to build C{x-amz-*} headers. @return: A C{Deferred} that will fire with the result of request. """ dest_bucket = dest_bucket or source_bucket dest_object_name = dest_object_name or source_object_name amz_headers["copy-source"] = "/%s/%s" % (source_bucket, source_object_name) details = self._details( method=b"PUT", url_context=self._url_context( bucket=dest_bucket, object_name=dest_object_name, ), metadata=metadata, amz_headers=amz_headers, ) d = self._submit(self._query_factory(details)) return d
[ "def", "copy_object", "(", "self", ",", "source_bucket", ",", "source_object_name", ",", "dest_bucket", "=", "None", ",", "dest_object_name", "=", "None", ",", "metadata", "=", "{", "}", ",", "amz_headers", "=", "{", "}", ")", ":", "dest_bucket", "=", "dest_bucket", "or", "source_bucket", "dest_object_name", "=", "dest_object_name", "or", "source_object_name", "amz_headers", "[", "\"copy-source\"", "]", "=", "\"/%s/%s\"", "%", "(", "source_bucket", ",", "source_object_name", ")", "details", "=", "self", ".", "_details", "(", "method", "=", "b\"PUT\"", ",", "url_context", "=", "self", ".", "_url_context", "(", "bucket", "=", "dest_bucket", ",", "object_name", "=", "dest_object_name", ",", ")", ",", "metadata", "=", "metadata", ",", "amz_headers", "=", "amz_headers", ",", ")", "d", "=", "self", ".", "_submit", "(", "self", ".", "_query_factory", "(", "details", ")", ")", "return", "d" ]
Copy an object stored in S3 from a source bucket to a destination bucket. @param source_bucket: The S3 bucket to copy the object from. @param source_object_name: The name of the object to copy. @param dest_bucket: Optionally, the S3 bucket to copy the object to. Defaults to C{source_bucket}. @param dest_object_name: Optionally, the name of the new object. Defaults to C{source_object_name}. @param metadata: A C{dict} used to build C{x-amz-meta-*} headers. @param amz_headers: A C{dict} used to build C{x-amz-*} headers. @return: A C{Deferred} that will fire with the result of request.
[ "Copy", "an", "object", "stored", "in", "S3", "from", "a", "source", "bucket", "to", "a", "destination", "bucket", "." ]
python
train
vtkiorg/vtki
vtki/plotting.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/plotting.py#L2144-L2147
def remove_scalar_bar(self): """ Removes scalar bar """ if hasattr(self, 'scalar_bar'): self.remove_actor(self.scalar_bar, reset_camera=False)
[ "def", "remove_scalar_bar", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'scalar_bar'", ")", ":", "self", ".", "remove_actor", "(", "self", ".", "scalar_bar", ",", "reset_camera", "=", "False", ")" ]
Removes scalar bar
[ "Removes", "scalar", "bar" ]
python
train
ThomasChiroux/attowiki
src/attowiki/git_tools.py
https://github.com/ThomasChiroux/attowiki/blob/6c93c420305490d324fdc95a7b40b2283a222183/src/attowiki/git_tools.py#L136-L153
def commit_history(filename): """Retrieve the commit history for a given filename. Keyword Arguments: :filename: (str) -- full name of the file Returns: list of dicts -- list of commit if the file is not found, returns an empty list """ result = [] repo = Repo() for commit in repo.head.commit.iter_parents(paths=_delta_dir() + filename): result.append({'date': datetime.fromtimestamp(commit.committed_date + commit.committer_tz_offset), 'hexsha': commit.hexsha}) return result
[ "def", "commit_history", "(", "filename", ")", ":", "result", "=", "[", "]", "repo", "=", "Repo", "(", ")", "for", "commit", "in", "repo", ".", "head", ".", "commit", ".", "iter_parents", "(", "paths", "=", "_delta_dir", "(", ")", "+", "filename", ")", ":", "result", ".", "append", "(", "{", "'date'", ":", "datetime", ".", "fromtimestamp", "(", "commit", ".", "committed_date", "+", "commit", ".", "committer_tz_offset", ")", ",", "'hexsha'", ":", "commit", ".", "hexsha", "}", ")", "return", "result" ]
Retrieve the commit history for a given filename. Keyword Arguments: :filename: (str) -- full name of the file Returns: list of dicts -- list of commit if the file is not found, returns an empty list
[ "Retrieve", "the", "commit", "history", "for", "a", "given", "filename", "." ]
python
train
jonathf/chaospy
chaospy/quad/generator.py
https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/quad/generator.py#L9-L46
def rule_generator(*funcs): """ Constructor for creating multivariate quadrature generator. Args: funcs (:py:data:typing.Callable): One dimensional integration rule where each rule returns ``abscissas`` and ``weights`` as one dimensional arrays. They must take one positional argument ``order``. Returns: (:py:data:typing.Callable): Multidimensional integration quadrature function that takes the arguments ``order`` and ``sparse``, and a optional ``part``. The argument ``sparse`` is used to select for if Smolyak sparse grid is used, and ``part`` defines if subset of rule should be generated (for parallelization). Example: >>> clenshaw_curtis = lambda order: chaospy.quad_clenshaw_curtis( ... order, lower=-1, upper=1, growth=True) >>> gauss_legendre = lambda order: chaospy.quad_gauss_legendre( ... order, lower=0, upper=1) >>> quad_func = chaospy.rule_generator(clenshaw_curtis, gauss_legendre) >>> abscissas, weights = quad_func(1) >>> print(numpy.around(abscissas, 4)) [[-1. -1. 0. 0. 1. 1. ] [ 0.2113 0.7887 0.2113 0.7887 0.2113 0.7887]] >>> print(numpy.around(weights, 4)) [0.1667 0.1667 0.6667 0.6667 0.1667 0.1667] """ dim = len(funcs) tensprod_rule = create_tensorprod_function(funcs) assert hasattr(tensprod_rule, "__call__") mv_rule = create_mv_rule(tensprod_rule, dim) assert hasattr(mv_rule, "__call__") return mv_rule
[ "def", "rule_generator", "(", "*", "funcs", ")", ":", "dim", "=", "len", "(", "funcs", ")", "tensprod_rule", "=", "create_tensorprod_function", "(", "funcs", ")", "assert", "hasattr", "(", "tensprod_rule", ",", "\"__call__\"", ")", "mv_rule", "=", "create_mv_rule", "(", "tensprod_rule", ",", "dim", ")", "assert", "hasattr", "(", "mv_rule", ",", "\"__call__\"", ")", "return", "mv_rule" ]
Constructor for creating multivariate quadrature generator. Args: funcs (:py:data:typing.Callable): One dimensional integration rule where each rule returns ``abscissas`` and ``weights`` as one dimensional arrays. They must take one positional argument ``order``. Returns: (:py:data:typing.Callable): Multidimensional integration quadrature function that takes the arguments ``order`` and ``sparse``, and a optional ``part``. The argument ``sparse`` is used to select for if Smolyak sparse grid is used, and ``part`` defines if subset of rule should be generated (for parallelization). Example: >>> clenshaw_curtis = lambda order: chaospy.quad_clenshaw_curtis( ... order, lower=-1, upper=1, growth=True) >>> gauss_legendre = lambda order: chaospy.quad_gauss_legendre( ... order, lower=0, upper=1) >>> quad_func = chaospy.rule_generator(clenshaw_curtis, gauss_legendre) >>> abscissas, weights = quad_func(1) >>> print(numpy.around(abscissas, 4)) [[-1. -1. 0. 0. 1. 1. ] [ 0.2113 0.7887 0.2113 0.7887 0.2113 0.7887]] >>> print(numpy.around(weights, 4)) [0.1667 0.1667 0.6667 0.6667 0.1667 0.1667]
[ "Constructor", "for", "creating", "multivariate", "quadrature", "generator", "." ]
python
train
msmbuilder/msmbuilder
msmbuilder/featurizer/featurizer.py
https://github.com/msmbuilder/msmbuilder/blob/556a93a170782f47be53f4a1e9d740fb1c8272b3/msmbuilder/featurizer/featurizer.py#L294-L321
def partial_transform(self, traj): """Featurize an MD trajectory into a vector space via distance after superposition Parameters ---------- traj : mdtraj.Trajectory A molecular dynamics trajectory to featurize. Returns ------- features : np.ndarray, shape=(n_frames, n_ref_frames) The RMSD value of each frame of the input trajectory to be featurized versus each frame in the reference trajectory. The number of features is the number of reference frames. See Also -------- transform : simultaneously featurize a collection of MD trajectories """ if self.atom_indices is not None: sliced_traj = traj.atom_slice(self.atom_indices) else: sliced_traj = traj result = libdistance.cdist( sliced_traj, self.sliced_reference_traj, 'rmsd' ) return self._transform(result)
[ "def", "partial_transform", "(", "self", ",", "traj", ")", ":", "if", "self", ".", "atom_indices", "is", "not", "None", ":", "sliced_traj", "=", "traj", ".", "atom_slice", "(", "self", ".", "atom_indices", ")", "else", ":", "sliced_traj", "=", "traj", "result", "=", "libdistance", ".", "cdist", "(", "sliced_traj", ",", "self", ".", "sliced_reference_traj", ",", "'rmsd'", ")", "return", "self", ".", "_transform", "(", "result", ")" ]
Featurize an MD trajectory into a vector space via distance after superposition Parameters ---------- traj : mdtraj.Trajectory A molecular dynamics trajectory to featurize. Returns ------- features : np.ndarray, shape=(n_frames, n_ref_frames) The RMSD value of each frame of the input trajectory to be featurized versus each frame in the reference trajectory. The number of features is the number of reference frames. See Also -------- transform : simultaneously featurize a collection of MD trajectories
[ "Featurize", "an", "MD", "trajectory", "into", "a", "vector", "space", "via", "distance", "after", "superposition" ]
python
train
cherrypy/cheroot
cheroot/server.py
https://github.com/cherrypy/cheroot/blob/2af3b1798d66da697957480d3a8b4831a405770b/cheroot/server.py#L1251-L1303
def communicate(self): """Read each request and respond appropriately.""" request_seen = False try: while True: # (re)set req to None so that if something goes wrong in # the RequestHandlerClass constructor, the error doesn't # get written to the previous request. req = None req = self.RequestHandlerClass(self.server, self) # This order of operations should guarantee correct pipelining. req.parse_request() if self.server.stats['Enabled']: self.requests_seen += 1 if not req.ready: # Something went wrong in the parsing (and the server has # probably already made a simple_response). Return and # let the conn close. return request_seen = True req.respond() if req.close_connection: return except socket.error as ex: errnum = ex.args[0] # sadly SSL sockets return a different (longer) time out string timeout_errs = 'timed out', 'The read operation timed out' if errnum in timeout_errs: # Don't error if we're between requests; only error # if 1) no request has been started at all, or 2) we're # in the middle of a request. # See https://github.com/cherrypy/cherrypy/issues/853 if (not request_seen) or (req and req.started_request): self._conditional_error(req, '408 Request Timeout') elif errnum not in errors.socket_errors_to_ignore: self.server.error_log( 'socket.error %s' % repr(errnum), level=logging.WARNING, traceback=True, ) self._conditional_error(req, '500 Internal Server Error') except (KeyboardInterrupt, SystemExit): raise except errors.FatalSSLAlert: pass except errors.NoSSLError: self._handle_no_ssl(req) except Exception as ex: self.server.error_log( repr(ex), level=logging.ERROR, traceback=True, ) self._conditional_error(req, '500 Internal Server Error')
[ "def", "communicate", "(", "self", ")", ":", "request_seen", "=", "False", "try", ":", "while", "True", ":", "# (re)set req to None so that if something goes wrong in", "# the RequestHandlerClass constructor, the error doesn't", "# get written to the previous request.", "req", "=", "None", "req", "=", "self", ".", "RequestHandlerClass", "(", "self", ".", "server", ",", "self", ")", "# This order of operations should guarantee correct pipelining.", "req", ".", "parse_request", "(", ")", "if", "self", ".", "server", ".", "stats", "[", "'Enabled'", "]", ":", "self", ".", "requests_seen", "+=", "1", "if", "not", "req", ".", "ready", ":", "# Something went wrong in the parsing (and the server has", "# probably already made a simple_response). Return and", "# let the conn close.", "return", "request_seen", "=", "True", "req", ".", "respond", "(", ")", "if", "req", ".", "close_connection", ":", "return", "except", "socket", ".", "error", "as", "ex", ":", "errnum", "=", "ex", ".", "args", "[", "0", "]", "# sadly SSL sockets return a different (longer) time out string", "timeout_errs", "=", "'timed out'", ",", "'The read operation timed out'", "if", "errnum", "in", "timeout_errs", ":", "# Don't error if we're between requests; only error", "# if 1) no request has been started at all, or 2) we're", "# in the middle of a request.", "# See https://github.com/cherrypy/cherrypy/issues/853", "if", "(", "not", "request_seen", ")", "or", "(", "req", "and", "req", ".", "started_request", ")", ":", "self", ".", "_conditional_error", "(", "req", ",", "'408 Request Timeout'", ")", "elif", "errnum", "not", "in", "errors", ".", "socket_errors_to_ignore", ":", "self", ".", "server", ".", "error_log", "(", "'socket.error %s'", "%", "repr", "(", "errnum", ")", ",", "level", "=", "logging", ".", "WARNING", ",", "traceback", "=", "True", ",", ")", "self", ".", "_conditional_error", "(", "req", ",", "'500 Internal Server Error'", ")", "except", "(", "KeyboardInterrupt", ",", "SystemExit", ")", ":", "raise", "except", "errors", ".", "FatalSSLAlert", ":", "pass", "except", "errors", ".", "NoSSLError", ":", "self", ".", "_handle_no_ssl", "(", "req", ")", "except", "Exception", "as", "ex", ":", "self", ".", "server", ".", "error_log", "(", "repr", "(", "ex", ")", ",", "level", "=", "logging", ".", "ERROR", ",", "traceback", "=", "True", ",", ")", "self", ".", "_conditional_error", "(", "req", ",", "'500 Internal Server Error'", ")" ]
Read each request and respond appropriately.
[ "Read", "each", "request", "and", "respond", "appropriately", "." ]
python
train
improbable-research/keanu
keanu-python/keanu/plots/traceplot.py
https://github.com/improbable-research/keanu/blob/73189a8f569078e156168e795f82c7366c59574b/keanu-python/keanu/plots/traceplot.py#L12-L37
def traceplot(trace: sample_types, labels: List[Union[str, Tuple[str, str]]] = None, ax: Any = None, x0: int = 0) -> Any: """ Plot samples values. :param trace: result of MCMC run :param labels: labels of vertices to be plotted. if None, all vertices are plotted. :param ax: Matplotlib axes :param x0: index of first data point, used for sample stream plots """ if labels is None: labels = list(trace.keys()) if ax is None: _, ax = plt.subplots(len(labels), 1, squeeze=False) for index, label in enumerate(labels): data = [sample for sample in trace[label]] ax[index][0].set_title(label) ax[index][0].plot(__integer_xaxis(ax[index][0], x0, len(data)), data) __pause_for_crude_animation() return ax
[ "def", "traceplot", "(", "trace", ":", "sample_types", ",", "labels", ":", "List", "[", "Union", "[", "str", ",", "Tuple", "[", "str", ",", "str", "]", "]", "]", "=", "None", ",", "ax", ":", "Any", "=", "None", ",", "x0", ":", "int", "=", "0", ")", "->", "Any", ":", "if", "labels", "is", "None", ":", "labels", "=", "list", "(", "trace", ".", "keys", "(", ")", ")", "if", "ax", "is", "None", ":", "_", ",", "ax", "=", "plt", ".", "subplots", "(", "len", "(", "labels", ")", ",", "1", ",", "squeeze", "=", "False", ")", "for", "index", ",", "label", "in", "enumerate", "(", "labels", ")", ":", "data", "=", "[", "sample", "for", "sample", "in", "trace", "[", "label", "]", "]", "ax", "[", "index", "]", "[", "0", "]", ".", "set_title", "(", "label", ")", "ax", "[", "index", "]", "[", "0", "]", ".", "plot", "(", "__integer_xaxis", "(", "ax", "[", "index", "]", "[", "0", "]", ",", "x0", ",", "len", "(", "data", ")", ")", ",", "data", ")", "__pause_for_crude_animation", "(", ")", "return", "ax" ]
Plot samples values. :param trace: result of MCMC run :param labels: labels of vertices to be plotted. if None, all vertices are plotted. :param ax: Matplotlib axes :param x0: index of first data point, used for sample stream plots
[ "Plot", "samples", "values", "." ]
python
train
fracpete/python-weka-wrapper3
python/weka/core/dataset.py
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/core/dataset.py#L314-L323
def insert_attribute(self, att, index): """ Inserts the attribute at the specified location. :param att: the attribute to insert :type att: Attribute :param index: the index to insert the attribute at :type index: int """ javabridge.call(self.jobject, "insertAttributeAt", "(Lweka/core/Attribute;I)V", att.jobject, index)
[ "def", "insert_attribute", "(", "self", ",", "att", ",", "index", ")", ":", "javabridge", ".", "call", "(", "self", ".", "jobject", ",", "\"insertAttributeAt\"", ",", "\"(Lweka/core/Attribute;I)V\"", ",", "att", ".", "jobject", ",", "index", ")" ]
Inserts the attribute at the specified location. :param att: the attribute to insert :type att: Attribute :param index: the index to insert the attribute at :type index: int
[ "Inserts", "the", "attribute", "at", "the", "specified", "location", "." ]
python
train
saltstack/salt
salt/cloud/clouds/ec2.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/ec2.py#L3624-L3649
def _list_nodes_full(location=None): ''' Return a list of the VMs that in this location ''' provider = __active_provider_name__ or 'ec2' if ':' in provider: comps = provider.split(':') provider = comps[0] params = {'Action': 'DescribeInstances'} instances = aws.query(params, location=location, provider=provider, opts=__opts__, sigver='4') if 'error' in instances: raise SaltCloudSystemExit( 'An error occurred while listing nodes: {0}'.format( instances['error']['Errors']['Error']['Message'] ) ) ret = _extract_instance_info(instances) __utils__['cloud.cache_node_list'](ret, provider, __opts__) return ret
[ "def", "_list_nodes_full", "(", "location", "=", "None", ")", ":", "provider", "=", "__active_provider_name__", "or", "'ec2'", "if", "':'", "in", "provider", ":", "comps", "=", "provider", ".", "split", "(", "':'", ")", "provider", "=", "comps", "[", "0", "]", "params", "=", "{", "'Action'", ":", "'DescribeInstances'", "}", "instances", "=", "aws", ".", "query", "(", "params", ",", "location", "=", "location", ",", "provider", "=", "provider", ",", "opts", "=", "__opts__", ",", "sigver", "=", "'4'", ")", "if", "'error'", "in", "instances", ":", "raise", "SaltCloudSystemExit", "(", "'An error occurred while listing nodes: {0}'", ".", "format", "(", "instances", "[", "'error'", "]", "[", "'Errors'", "]", "[", "'Error'", "]", "[", "'Message'", "]", ")", ")", "ret", "=", "_extract_instance_info", "(", "instances", ")", "__utils__", "[", "'cloud.cache_node_list'", "]", "(", "ret", ",", "provider", ",", "__opts__", ")", "return", "ret" ]
Return a list of the VMs that in this location
[ "Return", "a", "list", "of", "the", "VMs", "that", "in", "this", "location" ]
python
train
tanghaibao/goatools
goatools/go_search.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_search.py#L57-L65
def _search_vals(self, compiled_pattern, fld_val): """Search for user-regex in scalar or iterable data values.""" matches = [] if isinstance(fld_val, set): for val in fld_val: self._search_val(matches, compiled_pattern, val) elif isinstance(fld_val, str): self._search_val(matches, compiled_pattern, fld_val) return matches
[ "def", "_search_vals", "(", "self", ",", "compiled_pattern", ",", "fld_val", ")", ":", "matches", "=", "[", "]", "if", "isinstance", "(", "fld_val", ",", "set", ")", ":", "for", "val", "in", "fld_val", ":", "self", ".", "_search_val", "(", "matches", ",", "compiled_pattern", ",", "val", ")", "elif", "isinstance", "(", "fld_val", ",", "str", ")", ":", "self", ".", "_search_val", "(", "matches", ",", "compiled_pattern", ",", "fld_val", ")", "return", "matches" ]
Search for user-regex in scalar or iterable data values.
[ "Search", "for", "user", "-", "regex", "in", "scalar", "or", "iterable", "data", "values", "." ]
python
train
sorgerlab/indra
indra/literature/deft_tools.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/deft_tools.py#L46-L84
def get_text_content_for_pmids(pmids): """Get text content for articles given a list of their pmids Parameters ---------- pmids : list of str Returns ------- text_content : list of str """ pmc_pmids = set(pmc_client.filter_pmids(pmids, source_type='fulltext')) pmc_ids = [] for pmid in pmc_pmids: pmc_id = pmc_client.id_lookup(pmid, idtype='pmid')['pmcid'] if pmc_id: pmc_ids.append(pmc_id) else: pmc_pmids.discard(pmid) pmc_xmls = [] failed = set() for pmc_id in pmc_ids: if pmc_id is not None: pmc_xmls.append(pmc_client.get_xml(pmc_id)) else: failed.append(pmid) time.sleep(0.5) remaining_pmids = set(pmids) - pmc_pmids | failed abstracts = [] for pmid in remaining_pmids: abstract = pubmed_client.get_abstract(pmid) abstracts.append(abstract) time.sleep(0.5) return [text_content for source in (pmc_xmls, abstracts) for text_content in source if text_content is not None]
[ "def", "get_text_content_for_pmids", "(", "pmids", ")", ":", "pmc_pmids", "=", "set", "(", "pmc_client", ".", "filter_pmids", "(", "pmids", ",", "source_type", "=", "'fulltext'", ")", ")", "pmc_ids", "=", "[", "]", "for", "pmid", "in", "pmc_pmids", ":", "pmc_id", "=", "pmc_client", ".", "id_lookup", "(", "pmid", ",", "idtype", "=", "'pmid'", ")", "[", "'pmcid'", "]", "if", "pmc_id", ":", "pmc_ids", ".", "append", "(", "pmc_id", ")", "else", ":", "pmc_pmids", ".", "discard", "(", "pmid", ")", "pmc_xmls", "=", "[", "]", "failed", "=", "set", "(", ")", "for", "pmc_id", "in", "pmc_ids", ":", "if", "pmc_id", "is", "not", "None", ":", "pmc_xmls", ".", "append", "(", "pmc_client", ".", "get_xml", "(", "pmc_id", ")", ")", "else", ":", "failed", ".", "append", "(", "pmid", ")", "time", ".", "sleep", "(", "0.5", ")", "remaining_pmids", "=", "set", "(", "pmids", ")", "-", "pmc_pmids", "|", "failed", "abstracts", "=", "[", "]", "for", "pmid", "in", "remaining_pmids", ":", "abstract", "=", "pubmed_client", ".", "get_abstract", "(", "pmid", ")", "abstracts", ".", "append", "(", "abstract", ")", "time", ".", "sleep", "(", "0.5", ")", "return", "[", "text_content", "for", "source", "in", "(", "pmc_xmls", ",", "abstracts", ")", "for", "text_content", "in", "source", "if", "text_content", "is", "not", "None", "]" ]
Get text content for articles given a list of their pmids Parameters ---------- pmids : list of str Returns ------- text_content : list of str
[ "Get", "text", "content", "for", "articles", "given", "a", "list", "of", "their", "pmids" ]
python
train
ray-project/ray
examples/cython/cython_main.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/examples/cython/cython_main.py#L73-L85
def example6(): """Cython simple class""" ray.init() cls = ray.remote(cyth.simple_class) a1 = cls.remote() a2 = cls.remote() result1 = ray.get(a1.increment.remote()) result2 = ray.get(a2.increment.remote()) print(result1, result2)
[ "def", "example6", "(", ")", ":", "ray", ".", "init", "(", ")", "cls", "=", "ray", ".", "remote", "(", "cyth", ".", "simple_class", ")", "a1", "=", "cls", ".", "remote", "(", ")", "a2", "=", "cls", ".", "remote", "(", ")", "result1", "=", "ray", ".", "get", "(", "a1", ".", "increment", ".", "remote", "(", ")", ")", "result2", "=", "ray", ".", "get", "(", "a2", ".", "increment", ".", "remote", "(", ")", ")", "print", "(", "result1", ",", "result2", ")" ]
Cython simple class
[ "Cython", "simple", "class" ]
python
train
apache/incubator-mxnet
python/mxnet/profiler.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/profiler.py#L33-L67
def set_config(**kwargs): """Set up the configure of profiler (only accepts keyword arguments). Parameters ---------- filename : string, output file for profile data profile_all : boolean, all profile types enabled profile_symbolic : boolean, whether to profile symbolic operators profile_imperative : boolean, whether to profile imperative operators profile_memory : boolean, whether to profile memory usage profile_api : boolean, whether to profile the C API contiguous_dump : boolean, whether to periodically dump profiling data to file dump_period : float, seconds between profile data dumps aggregate_stats : boolean, whether to maintain aggregate stats in memory for console dump. Has some negative performance impact. profile_process : string whether to profile kvstore `server` or `worker`. server can only be profiled when kvstore is of type dist. if this is not passed, defaults to `worker` """ kk = kwargs.keys() vv = kwargs.values() check_call(_LIB.MXSetProcessProfilerConfig(len(kwargs), c_str_array([key for key in kk]), c_str_array([str(val) for val in vv]), profiler_kvstore_handle))
[ "def", "set_config", "(", "*", "*", "kwargs", ")", ":", "kk", "=", "kwargs", ".", "keys", "(", ")", "vv", "=", "kwargs", ".", "values", "(", ")", "check_call", "(", "_LIB", ".", "MXSetProcessProfilerConfig", "(", "len", "(", "kwargs", ")", ",", "c_str_array", "(", "[", "key", "for", "key", "in", "kk", "]", ")", ",", "c_str_array", "(", "[", "str", "(", "val", ")", "for", "val", "in", "vv", "]", ")", ",", "profiler_kvstore_handle", ")", ")" ]
Set up the configure of profiler (only accepts keyword arguments). Parameters ---------- filename : string, output file for profile data profile_all : boolean, all profile types enabled profile_symbolic : boolean, whether to profile symbolic operators profile_imperative : boolean, whether to profile imperative operators profile_memory : boolean, whether to profile memory usage profile_api : boolean, whether to profile the C API contiguous_dump : boolean, whether to periodically dump profiling data to file dump_period : float, seconds between profile data dumps aggregate_stats : boolean, whether to maintain aggregate stats in memory for console dump. Has some negative performance impact. profile_process : string whether to profile kvstore `server` or `worker`. server can only be profiled when kvstore is of type dist. if this is not passed, defaults to `worker`
[ "Set", "up", "the", "configure", "of", "profiler", "(", "only", "accepts", "keyword", "arguments", ")", "." ]
python
train
SHDShim/pytheos
pytheos/eqn_hugoniot.py
https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/eqn_hugoniot.py#L25-L60
def _dT_h_delta(T_in_kK, eta, k, threenk, c_v): """ internal function for calculation of temperature along a Hugoniot :param T_in_kK: temperature in kK scale, see Jamieson for detail :param eta: = 1 - rho0/rho :param k: = [rho0, c0, s, gamma0, q, theta0] :param threenk: see the definition in Jamieson 1983, it is a correction term mostly for Jamieson gold scale :param c_v: manual input of Cv value, if 0 calculated through Debye function :return: eta derivative of temperature """ rho0 = k[0] # g/m^3 gamma0 = k[3] # no unit q = k[4] # no unit theta0_in_kK = k[5] # K, see Jamieson 1983 for detail rho = rho0 / (1. - eta) c0 = k[1] # km/s s = k[2] # no unit dPhdelta_H = rho0 * c0 * c0 * (1. + s * eta) / \ np.power((1. - s * eta), 3.) # [g/cm^3][km/s]^2 = 1e9[kg m^2/s^2] = [GPa] Ph = hugoniot_p(rho, rho0, c0, s) # in [GPa] # calculate Cv gamma = gamma0 * np.power((1. - eta), q) theta_in_kK = theta0_in_kK * np.exp((gamma0 - gamma) / q) x = theta_in_kK / T_in_kK debye3 = debye_E(x) if c_v == 0.: c_v = threenk * (4. * debye3 - 3. * x / (np.exp(x) - 1.)) # [J/g/K] # calculate dYdX dYdX = (gamma / (1. - eta) * T_in_kK) + (dPhdelta_H * eta - Ph) / \ (2. * c_v * rho0) # print('dYdX', dYdX) return dYdX
[ "def", "_dT_h_delta", "(", "T_in_kK", ",", "eta", ",", "k", ",", "threenk", ",", "c_v", ")", ":", "rho0", "=", "k", "[", "0", "]", "# g/m^3", "gamma0", "=", "k", "[", "3", "]", "# no unit", "q", "=", "k", "[", "4", "]", "# no unit", "theta0_in_kK", "=", "k", "[", "5", "]", "# K, see Jamieson 1983 for detail", "rho", "=", "rho0", "/", "(", "1.", "-", "eta", ")", "c0", "=", "k", "[", "1", "]", "# km/s", "s", "=", "k", "[", "2", "]", "# no unit", "dPhdelta_H", "=", "rho0", "*", "c0", "*", "c0", "*", "(", "1.", "+", "s", "*", "eta", ")", "/", "np", ".", "power", "(", "(", "1.", "-", "s", "*", "eta", ")", ",", "3.", ")", "# [g/cm^3][km/s]^2 = 1e9[kg m^2/s^2] = [GPa]", "Ph", "=", "hugoniot_p", "(", "rho", ",", "rho0", ",", "c0", ",", "s", ")", "# in [GPa]", "# calculate Cv", "gamma", "=", "gamma0", "*", "np", ".", "power", "(", "(", "1.", "-", "eta", ")", ",", "q", ")", "theta_in_kK", "=", "theta0_in_kK", "*", "np", ".", "exp", "(", "(", "gamma0", "-", "gamma", ")", "/", "q", ")", "x", "=", "theta_in_kK", "/", "T_in_kK", "debye3", "=", "debye_E", "(", "x", ")", "if", "c_v", "==", "0.", ":", "c_v", "=", "threenk", "*", "(", "4.", "*", "debye3", "-", "3.", "*", "x", "/", "(", "np", ".", "exp", "(", "x", ")", "-", "1.", ")", ")", "# [J/g/K]", "# calculate dYdX", "dYdX", "=", "(", "gamma", "/", "(", "1.", "-", "eta", ")", "*", "T_in_kK", ")", "+", "(", "dPhdelta_H", "*", "eta", "-", "Ph", ")", "/", "(", "2.", "*", "c_v", "*", "rho0", ")", "# print('dYdX', dYdX)", "return", "dYdX" ]
internal function for calculation of temperature along a Hugoniot :param T_in_kK: temperature in kK scale, see Jamieson for detail :param eta: = 1 - rho0/rho :param k: = [rho0, c0, s, gamma0, q, theta0] :param threenk: see the definition in Jamieson 1983, it is a correction term mostly for Jamieson gold scale :param c_v: manual input of Cv value, if 0 calculated through Debye function :return: eta derivative of temperature
[ "internal", "function", "for", "calculation", "of", "temperature", "along", "a", "Hugoniot" ]
python
train
sastrarobotics/pyHerkulex
herkulex.py
https://github.com/sastrarobotics/pyHerkulex/blob/3a42046cbfea8c7e343a04f42facba5e7bca570e/herkulex.py#L780-L796
def get_position_d(self): """ Get the D value of the current PID for position """ data = [] data.append(0x09) data.append(self.servoid) data.append(RAM_READ_REQ) data.append(POSITION_KD_RAM) data.append(BYTE2) send_data(data) rxdata = [] try: rxdata = SERPORT.read(13) return (ord(rxdata[10])*256)+(ord(rxdata[9])&0xff) except HerkulexError: raise HerkulexError("could not communicate with motors")
[ "def", "get_position_d", "(", "self", ")", ":", "data", "=", "[", "]", "data", ".", "append", "(", "0x09", ")", "data", ".", "append", "(", "self", ".", "servoid", ")", "data", ".", "append", "(", "RAM_READ_REQ", ")", "data", ".", "append", "(", "POSITION_KD_RAM", ")", "data", ".", "append", "(", "BYTE2", ")", "send_data", "(", "data", ")", "rxdata", "=", "[", "]", "try", ":", "rxdata", "=", "SERPORT", ".", "read", "(", "13", ")", "return", "(", "ord", "(", "rxdata", "[", "10", "]", ")", "*", "256", ")", "+", "(", "ord", "(", "rxdata", "[", "9", "]", ")", "&", "0xff", ")", "except", "HerkulexError", ":", "raise", "HerkulexError", "(", "\"could not communicate with motors\"", ")" ]
Get the D value of the current PID for position
[ "Get", "the", "D", "value", "of", "the", "current", "PID", "for", "position" ]
python
train
pvlib/pvlib-python
pvlib/iotools/midc.py
https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/iotools/midc.py#L91-L114
def format_index_raw(data): """Create DatetimeIndex for the Dataframe localized to the timezone provided as the label of the third column. Parameters ---------- data: Dataframe Must contain columns 'Year' and 'DOY'. Timezone must be found as the label of the third (time) column. Returns ------- data: Dataframe The data with a Datetime index localized to the provided timezone. """ tz_raw = data.columns[3] timezone = TZ_MAP.get(tz_raw, tz_raw) year = data.Year.apply(str) jday = data.DOY.apply(lambda x: '{:03d}'.format(x)) time = data[tz_raw].apply(lambda x: '{:04d}'.format(x)) index = pd.to_datetime(year + jday + time, format="%Y%j%H%M") data = data.set_index(index) data = data.tz_localize(timezone) return data
[ "def", "format_index_raw", "(", "data", ")", ":", "tz_raw", "=", "data", ".", "columns", "[", "3", "]", "timezone", "=", "TZ_MAP", ".", "get", "(", "tz_raw", ",", "tz_raw", ")", "year", "=", "data", ".", "Year", ".", "apply", "(", "str", ")", "jday", "=", "data", ".", "DOY", ".", "apply", "(", "lambda", "x", ":", "'{:03d}'", ".", "format", "(", "x", ")", ")", "time", "=", "data", "[", "tz_raw", "]", ".", "apply", "(", "lambda", "x", ":", "'{:04d}'", ".", "format", "(", "x", ")", ")", "index", "=", "pd", ".", "to_datetime", "(", "year", "+", "jday", "+", "time", ",", "format", "=", "\"%Y%j%H%M\"", ")", "data", "=", "data", ".", "set_index", "(", "index", ")", "data", "=", "data", ".", "tz_localize", "(", "timezone", ")", "return", "data" ]
Create DatetimeIndex for the Dataframe localized to the timezone provided as the label of the third column. Parameters ---------- data: Dataframe Must contain columns 'Year' and 'DOY'. Timezone must be found as the label of the third (time) column. Returns ------- data: Dataframe The data with a Datetime index localized to the provided timezone.
[ "Create", "DatetimeIndex", "for", "the", "Dataframe", "localized", "to", "the", "timezone", "provided", "as", "the", "label", "of", "the", "third", "column", "." ]
python
train
fjwCode/cerium
cerium/androiddriver.py
https://github.com/fjwCode/cerium/blob/f6e06e0dcf83a0bc924828e9d6cb81383ed2364f/cerium/androiddriver.py#L352-L370
def view_packgets_list(self, option: str = '-e', keyword: str = '') -> list: '''Show all packages. Args: option: -f see their associated file -d filter to only show disabled packages -e filter to only show enabled packages -s filter to only show system packages -3 filter to only show third party packages -i see the installer for the packages -u also include uninstalled packages -keyword: optionally only those whose name contains the text in keyword ''' if option not in ['-f', '-d', '-e', '-s', '-3', '-i', '-u']: raise ValueError(f'There is no option called {option!r}.') output, _ = self._execute( '-s', self.device_sn, 'shell', 'pm', 'list', 'packages', option, keyword) return list(map(lambda x: x[8:], output.splitlines()))
[ "def", "view_packgets_list", "(", "self", ",", "option", ":", "str", "=", "'-e'", ",", "keyword", ":", "str", "=", "''", ")", "->", "list", ":", "if", "option", "not", "in", "[", "'-f'", ",", "'-d'", ",", "'-e'", ",", "'-s'", ",", "'-3'", ",", "'-i'", ",", "'-u'", "]", ":", "raise", "ValueError", "(", "f'There is no option called {option!r}.'", ")", "output", ",", "_", "=", "self", ".", "_execute", "(", "'-s'", ",", "self", ".", "device_sn", ",", "'shell'", ",", "'pm'", ",", "'list'", ",", "'packages'", ",", "option", ",", "keyword", ")", "return", "list", "(", "map", "(", "lambda", "x", ":", "x", "[", "8", ":", "]", ",", "output", ".", "splitlines", "(", ")", ")", ")" ]
Show all packages. Args: option: -f see their associated file -d filter to only show disabled packages -e filter to only show enabled packages -s filter to only show system packages -3 filter to only show third party packages -i see the installer for the packages -u also include uninstalled packages -keyword: optionally only those whose name contains the text in keyword
[ "Show", "all", "packages", "." ]
python
train
openspending/babbage
babbage/query/drilldowns.py
https://github.com/openspending/babbage/blob/9e03efe62e0be0cceabafd4de2a09cb8ec794b92/babbage/query/drilldowns.py#L18-L28
def apply(self, q, bindings, drilldowns): """ Apply a set of grouping criteria and project them. """ info = [] for drilldown in self.parse(drilldowns): for attribute in self.cube.model.match(drilldown): info.append(attribute.ref) table, column = attribute.bind(self.cube) bindings.append(Binding(table, attribute.ref)) q = q.column(column) q = q.group_by(column) return info, q, bindings
[ "def", "apply", "(", "self", ",", "q", ",", "bindings", ",", "drilldowns", ")", ":", "info", "=", "[", "]", "for", "drilldown", "in", "self", ".", "parse", "(", "drilldowns", ")", ":", "for", "attribute", "in", "self", ".", "cube", ".", "model", ".", "match", "(", "drilldown", ")", ":", "info", ".", "append", "(", "attribute", ".", "ref", ")", "table", ",", "column", "=", "attribute", ".", "bind", "(", "self", ".", "cube", ")", "bindings", ".", "append", "(", "Binding", "(", "table", ",", "attribute", ".", "ref", ")", ")", "q", "=", "q", ".", "column", "(", "column", ")", "q", "=", "q", ".", "group_by", "(", "column", ")", "return", "info", ",", "q", ",", "bindings" ]
Apply a set of grouping criteria and project them.
[ "Apply", "a", "set", "of", "grouping", "criteria", "and", "project", "them", "." ]
python
train
googleapis/google-cloud-python
bigtable/google/cloud/bigtable/row_data.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable/row_data.py#L200-L235
def find_cells(self, column_family_id, column): """Get a time series of cells stored on this instance. For example: .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_find_cells] :end-before: [END bigtable_row_find_cells] Args: column_family_id (str): The ID of the column family. Must be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. column (bytes): The column within the column family where the cells are located. Returns: List[~google.cloud.bigtable.row_data.Cell]: The cells stored in the specified column. Raises: KeyError: If ``column_family_id`` is not among the cells stored in this row. KeyError: If ``column`` is not among the cells stored in this row for the given ``column_family_id``. """ try: column_family = self._cells[column_family_id] except KeyError: raise KeyError(_MISSING_COLUMN_FAMILY.format(column_family_id)) try: cells = column_family[column] except KeyError: raise KeyError(_MISSING_COLUMN.format(column, column_family_id)) return cells
[ "def", "find_cells", "(", "self", ",", "column_family_id", ",", "column", ")", ":", "try", ":", "column_family", "=", "self", ".", "_cells", "[", "column_family_id", "]", "except", "KeyError", ":", "raise", "KeyError", "(", "_MISSING_COLUMN_FAMILY", ".", "format", "(", "column_family_id", ")", ")", "try", ":", "cells", "=", "column_family", "[", "column", "]", "except", "KeyError", ":", "raise", "KeyError", "(", "_MISSING_COLUMN", ".", "format", "(", "column", ",", "column_family_id", ")", ")", "return", "cells" ]
Get a time series of cells stored on this instance. For example: .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_find_cells] :end-before: [END bigtable_row_find_cells] Args: column_family_id (str): The ID of the column family. Must be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. column (bytes): The column within the column family where the cells are located. Returns: List[~google.cloud.bigtable.row_data.Cell]: The cells stored in the specified column. Raises: KeyError: If ``column_family_id`` is not among the cells stored in this row. KeyError: If ``column`` is not among the cells stored in this row for the given ``column_family_id``.
[ "Get", "a", "time", "series", "of", "cells", "stored", "on", "this", "instance", "." ]
python
train
Yubico/yubikey-manager
ykman/cli/otp.py
https://github.com/Yubico/yubikey-manager/blob/3ac27bc59ae76a59db9d09a530494add2edbbabf/ykman/cli/otp.py#L98-L140
def otp(ctx, access_code): """ Manage OTP Application. The YubiKey provides two keyboard-based slots which can each be configured with a credential. Several credential types are supported. A slot configuration may be write-protected with an access code. This prevents the configuration to be overwritten without the access code provided. Mode switching the YubiKey is not possible when a slot is configured with an access code. Examples: \b Swap the configurations between the two slots: $ ykman otp swap \b Program a random challenge-response credential to slot 2: $ ykman otp chalresp --generate 2 \b Program a Yubico OTP credential to slot 2, using the serial as public id: $ ykman otp yubiotp 1 --serial-public-id \b Program a random 38 characters long static password to slot 2: $ ykman otp static --generate 2 --length 38 """ ctx.obj['controller'] = OtpController(ctx.obj['dev'].driver) if access_code is not None: if access_code == '': access_code = click.prompt( 'Enter access code', show_default=False, err=True) try: access_code = parse_access_code_hex(access_code) except Exception as e: ctx.fail('Failed to parse access code: ' + str(e)) ctx.obj['controller'].access_code = access_code
[ "def", "otp", "(", "ctx", ",", "access_code", ")", ":", "ctx", ".", "obj", "[", "'controller'", "]", "=", "OtpController", "(", "ctx", ".", "obj", "[", "'dev'", "]", ".", "driver", ")", "if", "access_code", "is", "not", "None", ":", "if", "access_code", "==", "''", ":", "access_code", "=", "click", ".", "prompt", "(", "'Enter access code'", ",", "show_default", "=", "False", ",", "err", "=", "True", ")", "try", ":", "access_code", "=", "parse_access_code_hex", "(", "access_code", ")", "except", "Exception", "as", "e", ":", "ctx", ".", "fail", "(", "'Failed to parse access code: '", "+", "str", "(", "e", ")", ")", "ctx", ".", "obj", "[", "'controller'", "]", ".", "access_code", "=", "access_code" ]
Manage OTP Application. The YubiKey provides two keyboard-based slots which can each be configured with a credential. Several credential types are supported. A slot configuration may be write-protected with an access code. This prevents the configuration to be overwritten without the access code provided. Mode switching the YubiKey is not possible when a slot is configured with an access code. Examples: \b Swap the configurations between the two slots: $ ykman otp swap \b Program a random challenge-response credential to slot 2: $ ykman otp chalresp --generate 2 \b Program a Yubico OTP credential to slot 2, using the serial as public id: $ ykman otp yubiotp 1 --serial-public-id \b Program a random 38 characters long static password to slot 2: $ ykman otp static --generate 2 --length 38
[ "Manage", "OTP", "Application", "." ]
python
train
sphinx-gallery/sphinx-gallery
sphinx_gallery/backreferences.py
https://github.com/sphinx-gallery/sphinx-gallery/blob/b0c1f6701bf3f4cef238757e1105cf3686b5e674/sphinx_gallery/backreferences.py#L111-L122
def extract_object_names_from_docs(filename): """Add matches from the text blocks (must be full names!)""" text = split_code_and_text_blocks(filename)[1] text = '\n'.join(t[1] for t in text if t[0] == 'text') regex = re.compile(r':(?:' r'func(?:tion)?|' r'meth(?:od)?|' r'attr(?:ibute)?|' r'obj(?:ect)?|' r'class):`(\S*)`' ) return [(x, x) for x in re.findall(regex, text)]
[ "def", "extract_object_names_from_docs", "(", "filename", ")", ":", "text", "=", "split_code_and_text_blocks", "(", "filename", ")", "[", "1", "]", "text", "=", "'\\n'", ".", "join", "(", "t", "[", "1", "]", "for", "t", "in", "text", "if", "t", "[", "0", "]", "==", "'text'", ")", "regex", "=", "re", ".", "compile", "(", "r':(?:'", "r'func(?:tion)?|'", "r'meth(?:od)?|'", "r'attr(?:ibute)?|'", "r'obj(?:ect)?|'", "r'class):`(\\S*)`'", ")", "return", "[", "(", "x", ",", "x", ")", "for", "x", "in", "re", ".", "findall", "(", "regex", ",", "text", ")", "]" ]
Add matches from the text blocks (must be full names!)
[ "Add", "matches", "from", "the", "text", "blocks", "(", "must", "be", "full", "names!", ")" ]
python
train
cuihantao/andes
andes/utils/math.py
https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/utils/math.py#L123-L131
def index(m, val): """ Return the indices of all the ``val`` in ``m`` """ mm = np.array(m) idx_tuple = np.where(mm == val) idx = idx_tuple[0].tolist() return idx
[ "def", "index", "(", "m", ",", "val", ")", ":", "mm", "=", "np", ".", "array", "(", "m", ")", "idx_tuple", "=", "np", ".", "where", "(", "mm", "==", "val", ")", "idx", "=", "idx_tuple", "[", "0", "]", ".", "tolist", "(", ")", "return", "idx" ]
Return the indices of all the ``val`` in ``m``
[ "Return", "the", "indices", "of", "all", "the", "val", "in", "m" ]
python
train
ubernostrum/django-flashpolicies
flashpolicies/policies.py
https://github.com/ubernostrum/django-flashpolicies/blob/fb04693504186dde859cce97bad6e83d2b380dc6/flashpolicies/policies.py#L200-L213
def _add_header_domains_xml(self, document): """ Generates the XML elements for allowed header domains. """ for domain, attrs in self.header_domains.items(): header_element = document.createElement( 'allow-http-request-headers-from' ) header_element.setAttribute('domain', domain) header_element.setAttribute('headers', ','.join(attrs['headers'])) if not attrs['secure']: header_element.setAttribute('secure', 'false') document.documentElement.appendChild(header_element)
[ "def", "_add_header_domains_xml", "(", "self", ",", "document", ")", ":", "for", "domain", ",", "attrs", "in", "self", ".", "header_domains", ".", "items", "(", ")", ":", "header_element", "=", "document", ".", "createElement", "(", "'allow-http-request-headers-from'", ")", "header_element", ".", "setAttribute", "(", "'domain'", ",", "domain", ")", "header_element", ".", "setAttribute", "(", "'headers'", ",", "','", ".", "join", "(", "attrs", "[", "'headers'", "]", ")", ")", "if", "not", "attrs", "[", "'secure'", "]", ":", "header_element", ".", "setAttribute", "(", "'secure'", ",", "'false'", ")", "document", ".", "documentElement", ".", "appendChild", "(", "header_element", ")" ]
Generates the XML elements for allowed header domains.
[ "Generates", "the", "XML", "elements", "for", "allowed", "header", "domains", "." ]
python
train
romanz/trezor-agent
libagent/device/trezor.py
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/device/trezor.py#L76-L87
def pubkey(self, identity, ecdh=False): """Return public key.""" curve_name = identity.get_curve_name(ecdh=ecdh) log.debug('"%s" getting public key (%s) from %s', identity.to_string(), curve_name, self) addr = identity.get_bip32_address(ecdh=ecdh) result = self._defs.get_public_node( self.conn, n=addr, ecdsa_curve_name=curve_name) log.debug('result: %s', result) return bytes(result.node.public_key)
[ "def", "pubkey", "(", "self", ",", "identity", ",", "ecdh", "=", "False", ")", ":", "curve_name", "=", "identity", ".", "get_curve_name", "(", "ecdh", "=", "ecdh", ")", "log", ".", "debug", "(", "'\"%s\" getting public key (%s) from %s'", ",", "identity", ".", "to_string", "(", ")", ",", "curve_name", ",", "self", ")", "addr", "=", "identity", ".", "get_bip32_address", "(", "ecdh", "=", "ecdh", ")", "result", "=", "self", ".", "_defs", ".", "get_public_node", "(", "self", ".", "conn", ",", "n", "=", "addr", ",", "ecdsa_curve_name", "=", "curve_name", ")", "log", ".", "debug", "(", "'result: %s'", ",", "result", ")", "return", "bytes", "(", "result", ".", "node", ".", "public_key", ")" ]
Return public key.
[ "Return", "public", "key", "." ]
python
train
OSLL/jabba
jabba/dep_extractor.py
https://github.com/OSLL/jabba/blob/71c1d008ab497020fba6ffa12a600721eb3f5ef7/jabba/dep_extractor.py#L132-L138
def get_includes(self, path): """ Get all includes from a config in a given path """ config = self.file_index.unfold_yaml(path) return self.get_includes_from_dict(config, extract=True)
[ "def", "get_includes", "(", "self", ",", "path", ")", ":", "config", "=", "self", ".", "file_index", ".", "unfold_yaml", "(", "path", ")", "return", "self", ".", "get_includes_from_dict", "(", "config", ",", "extract", "=", "True", ")" ]
Get all includes from a config in a given path
[ "Get", "all", "includes", "from", "a", "config", "in", "a", "given", "path" ]
python
train
pyGrowler/Growler
growler/core/router.py
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/core/router.py#L281-L298
def routerify(obj): """ Scan through attributes of object parameter looking for any which match a route signature. A router will be created and added to the object with parameter. Args: obj (object): The object (with attributes) from which to setup a router Returns: Router: The router created from attributes in the object. """ router = Router() for info in get_routing_attributes(obj): router.add_route(*info) obj.__growler_router = router return router
[ "def", "routerify", "(", "obj", ")", ":", "router", "=", "Router", "(", ")", "for", "info", "in", "get_routing_attributes", "(", "obj", ")", ":", "router", ".", "add_route", "(", "*", "info", ")", "obj", ".", "__growler_router", "=", "router", "return", "router" ]
Scan through attributes of object parameter looking for any which match a route signature. A router will be created and added to the object with parameter. Args: obj (object): The object (with attributes) from which to setup a router Returns: Router: The router created from attributes in the object.
[ "Scan", "through", "attributes", "of", "object", "parameter", "looking", "for", "any", "which", "match", "a", "route", "signature", ".", "A", "router", "will", "be", "created", "and", "added", "to", "the", "object", "with", "parameter", "." ]
python
train
FutunnOpen/futuquant
futuquant/common/sys_config.py
https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/common/sys_config.py#L23-L45
def set_client_info(cls, client_id, client_ver): """ .. py:function:: set_client_info(cls, client_id, client_ver) 设置调用api的客户端信息, 非必调接口 :param client_id: str, 客户端标识 :param client_ver: int, 客户端版本号 :return: None :example: .. code:: python from futuquant import * SysConfig.set_client_info("MyFutuQuant", 0) quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111) quote_ctx.close() """ SysConfig.CLINET_ID = client_id SysConfig.CLIENT_VER = client_ver
[ "def", "set_client_info", "(", "cls", ",", "client_id", ",", "client_ver", ")", ":", "SysConfig", ".", "CLINET_ID", "=", "client_id", "SysConfig", ".", "CLIENT_VER", "=", "client_ver" ]
.. py:function:: set_client_info(cls, client_id, client_ver) 设置调用api的客户端信息, 非必调接口 :param client_id: str, 客户端标识 :param client_ver: int, 客户端版本号 :return: None :example: .. code:: python from futuquant import * SysConfig.set_client_info("MyFutuQuant", 0) quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111) quote_ctx.close()
[ "..", "py", ":", "function", "::", "set_client_info", "(", "cls", "client_id", "client_ver", ")" ]
python
train
bitesofcode/projexui
projexui/widgets/xmultitagedit.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xmultitagedit.py#L542-L573
def mousePressEvent( self, event ): """ Make sure on a mouse release event that we have a current item. If no item is current, then our edit item will become current. :param event | <QMouseReleaseEvent> """ item = self.itemAt(event.pos()) # set the tag creation item as active if item is None: create_item = self.createItem() if create_item: self.setCurrentItem(create_item) self.editItem(create_item) # check to see if we're removing a tag else: rect = self.visualItemRect(item) if ( rect.right() - 14 < event.pos().x() ): # make sure the item is allowed to be removed via the widget if ( self.itemsRemovable() ): self.takeItem(self.row(item)) # emit the removed signal if ( not self.signalsBlocked() ): self.tagRemoved.emit(item.text()) event.ignore() return super(XMultiTagEdit, self).mousePressEvent(event)
[ "def", "mousePressEvent", "(", "self", ",", "event", ")", ":", "item", "=", "self", ".", "itemAt", "(", "event", ".", "pos", "(", ")", ")", "# set the tag creation item as active\r", "if", "item", "is", "None", ":", "create_item", "=", "self", ".", "createItem", "(", ")", "if", "create_item", ":", "self", ".", "setCurrentItem", "(", "create_item", ")", "self", ".", "editItem", "(", "create_item", ")", "# check to see if we're removing a tag\r", "else", ":", "rect", "=", "self", ".", "visualItemRect", "(", "item", ")", "if", "(", "rect", ".", "right", "(", ")", "-", "14", "<", "event", ".", "pos", "(", ")", ".", "x", "(", ")", ")", ":", "# make sure the item is allowed to be removed via the widget\r", "if", "(", "self", ".", "itemsRemovable", "(", ")", ")", ":", "self", ".", "takeItem", "(", "self", ".", "row", "(", "item", ")", ")", "# emit the removed signal\r", "if", "(", "not", "self", ".", "signalsBlocked", "(", ")", ")", ":", "self", ".", "tagRemoved", ".", "emit", "(", "item", ".", "text", "(", ")", ")", "event", ".", "ignore", "(", ")", "return", "super", "(", "XMultiTagEdit", ",", "self", ")", ".", "mousePressEvent", "(", "event", ")" ]
Make sure on a mouse release event that we have a current item. If no item is current, then our edit item will become current. :param event | <QMouseReleaseEvent>
[ "Make", "sure", "on", "a", "mouse", "release", "event", "that", "we", "have", "a", "current", "item", ".", "If", "no", "item", "is", "current", "then", "our", "edit", "item", "will", "become", "current", ".", ":", "param", "event", "|", "<QMouseReleaseEvent", ">" ]
python
train
CiscoUcs/UcsPythonSDK
src/UcsSdk/UcsHandle_Edit.py
https://github.com/CiscoUcs/UcsPythonSDK/blob/bf6b07d6abeacb922c92b198352eda4eb9e4629b/src/UcsSdk/UcsHandle_Edit.py#L1377-L1562
def GetTechSupport(self, pathPattern, ucsManager=False, ucsMgmt=False, chassisId=None, cimcId=None, adapterId=None, iomId=None, fexId=None, rackServerId=None, rackAdapterId=None, timeoutSec=600, removeFromUcs=False, dumpXml=None): """ Creates and downloads the technical support data for the respective UCSM. - pathPattern specifies the path of the tech support file to be downloaded. File should be a tar file. - ucsManager, if provided as True then technical support data for the entire UCSM instance will be created and downloaded. - ucsMgmt, if provided as True then technical support data for the entire UCSM management services(excluding fabric interconnects) will be created and downloaded. - chassisId specifies the chassis Id. - cimcId for a specific chassis. Can be 'all' also. - adapterId for a specific chassis. Can be 'all' also. - iomId for a specific chassis. Can be 'all' also. - fexId specifies the Id of a fabric extender. - rackServerId specifies the Id of a rack server. - rackAdapterId specifies the adaptor Id for a specific rack server. Can be 'all' also. - timeoutSec specifies the time in seconds after that the operation will terminate. - removeFromUcs, if specified as True then the techincal support data file will be removed from the UCS. """ from UcsBase import WriteUcsWarning, UcsUtils, ManagedObject, WriteObject, UcsUtils, UcsValidationException, \ UcsException from Mos import SysdebugTechSupport, SysdebugTechSupportCmdOpt from Ucs import ConfigConfig import os if (self._transactionInProgress): raise UcsValidationException( "UCS transaction in progress. Cannot execute GetTechSupport. Complete or Undo UCS transaction.") # raise Exception("UCS transaction in progress. Cannot execute GetTechSupport. Complete or Undo UCS transaction.") if (pathPattern == None): raise UcsValidationException("pathPattern parameter is not provided.") # raise Exception("Please provide pathPattern parameter.") if not pathPattern.endswith('.tar'): raise UcsValidationException('pathPattern should end with .tar') directory = os.path.dirname(pathPattern) if not (os.path.exists(directory)): os.makedirs(directory) inConfig = ConfigConfig() techSupportObj = None dt1 = datetime.datetime(1970, 1, 1, 12, 0, 0, 0) dt2 = datetime.datetime.utcnow() ds = (dt2 - dt1) creationTS = (ds.microseconds / 1000000) + ( ds.days * 24 * 60 * 60) + ds.seconds # Converting timedelta in to total seconds for Python version compatibility. sysdebug = ManagedObject(NamingId.SYSDEBUG_TECH_SUPPORT) sysdebug.CreationTS = str(creationTS) dn = UcsUtils.MakeDn([ManagedObject(NamingId.TOP_SYSTEM).MakeRn(), ManagedObject(NamingId.SYSDEBUG_TECH_SUP_FILE_REPOSITORY).MakeRn(), sysdebug.MakeRn()]) sysdebugTechSupport = ManagedObject(NamingId.SYSDEBUG_TECH_SUPPORT) sysdebugTechSupport.DN = dn sysdebugTechSupport.AdminState = SysdebugTechSupport.CONST_ADMIN_STATE_START sysdebugTechSupport.CreationTS = str(creationTS) sysdebugTechSupport.Status = Status.CREATED sysdebugTechSupportCmdOpt = ManagedObject(NamingId.SYSDEBUG_TECH_SUPPORT_CMD_OPT) # Parameter Set UCSM if (ucsManager): sysdebugTechSupportCmdOpt.MajorOptType = SysdebugTechSupportCmdOpt.CONST_MAJOR_OPT_TYPE_UCSM sysdebugTechSupportCmdOpt.Rn = sysdebugTechSupportCmdOpt.MakeRn() sysdebugTechSupport.AddChild(sysdebugTechSupportCmdOpt) elif (ucsMgmt): sysdebugTechSupportCmdOpt.MajorOptType = SysdebugTechSupportCmdOpt.CONST_MAJOR_OPT_TYPE_UCSM_MGMT sysdebugTechSupportCmdOpt.Rn = sysdebugTechSupportCmdOpt.MakeRn() sysdebugTechSupport.AddChild(sysdebugTechSupportCmdOpt) elif (chassisId != None): if (cimcId != None): sysdebugTechSupportCmdOpt.ChassisCimcId = str(cimcId) sysdebugTechSupportCmdOpt.ChassisId = str(chassisId) sysdebugTechSupportCmdOpt.MajorOptType = SysdebugTechSupportCmdOpt.CONST_MAJOR_OPT_TYPE_CHASSIS if (adapterId == None): sysdebugTechSupportCmdOpt.CimcAdapterId = SysdebugTechSupportCmdOpt.CONST_CIMC_ADAPTER_ID_ALL else: sysdebugTechSupportCmdOpt.CimcAdapterId = str(adapterId) sysdebugTechSupportCmdOpt.Rn = sysdebugTechSupportCmdOpt.MakeRn() sysdebugTechSupport.AddChild(sysdebugTechSupportCmdOpt) elif (iomId != None): sysdebugTechSupportCmdOpt.ChassisIomId = str(iomId) sysdebugTechSupportCmdOpt.ChassisId = str(chassisId) sysdebugTechSupportCmdOpt.MajorOptType = SysdebugTechSupportCmdOpt.CONST_MAJOR_OPT_TYPE_CHASSIS sysdebugTechSupportCmdOpt.Rn = sysdebugTechSupportCmdOpt.MakeRn() sysdebugTechSupport.AddChild(sysdebugTechSupportCmdOpt) elif (rackServerId != None): sysdebugTechSupportCmdOpt.RACK_SERVER_ID = str(iomId) if (rackAdapterId == None): sysdebugTechSupportCmdOpt.RACK_SERVER_ADAPTER_ID = SysdebugTechSupportCmdOpt.CONST_RACK_SERVER_ADAPTER_ID_ALL else: sysdebugTechSupportCmdOpt.RACK_SERVER_ADAPTER_ID = str(rackAdapterId) sysdebugTechSupportCmdOpt.MajorOptType = SysdebugTechSupportCmdOpt.CONST_MAJOR_OPT_TYPE_SERVER sysdebugTechSupportCmdOpt.Rn = sysdebugTechSupportCmdOpt.MakeRn() sysdebugTechSupport.AddChild(sysdebugTechSupportCmdOpt) elif (fexId != None): sysdebugTechSupportCmdOpt.FAB_EXT_ID = str(iomId) sysdebugTechSupportCmdOpt.MajorOptType = SysdebugTechSupportCmdOpt.CONST_MAJOR_OPT_TYPE_FEX sysdebugTechSupportCmdOpt.Rn = sysdebugTechSupportCmdOpt.MakeRn() sysdebugTechSupport.AddChild(sysdebugTechSupportCmdOpt) if (sysdebugTechSupport.GetChildCount() == 0): sysdebugTechSupport = None sysdebugTechSupportCmdOpt = None inConfig = None else: inConfig.AddChild(sysdebugTechSupport) ccm = self.ConfigConfMo(dn=dn, inConfig=inConfig, inHierarchical=YesOrNo.FALSE, dumpXml=dumpXml) duration = timeoutSec poll_interval = 2 crd = None if (ccm.errorCode == 0): # WriteUcsWarning('Waiting for the Tech Support file to become available (this may take several minutes).') status = False while (True): crd = self.ConfigResolveDn(dn, inHierarchical=YesOrNo.FALSE, dumpXml=dumpXml) if (crd.errorCode == 0): if (crd.OutConfig.GetChildCount() > 0): for techSupport in crd.OutConfig.GetChild(): if (techSupport.OperState == SysdebugTechSupport.CONST_OPER_STATE_AVAILABLE): status = True else: raise UcsValidationException('Failed to create the TechSupport file.') # raise Exception('Failed to create the TechSupport file.') else: raise UcsException(crd.errorCode, crd.errorDescr) # raise Exception('[Error]: GetTechSupport [Code]:' + crd.errorCode + ' [Description]:' + crd.errorDescr) if (status): break time.sleep(min(duration, poll_interval)) duration = max(0, (duration - poll_interval)) if duration == 0: inConfig = ConfigConfig() sysdebugTechSupport = ManagedObject(NamingId.SYSDEBUG_TECH_SUPPORT) sysdebugTechSupport.DN = dn sysdebugTechSupport.Status = Status.DELETED inConfig.AddChild(sysdebugTechSupport) ccmi = self.ConfigConfMo(dn=dn, inConfig=inConfig, inHierarchical=YesOrNo.FALSE, dumpXml=dumpXml) if (ccmi.errorCode != 0): WriteUcsWarning( '[Error]: GetTechSupport [Code]:' + ccmi.errorCode + ' [Description]:' + ccmi.errorDescr) raise UcsValidationException('TechSupport file generation timed out') # raise Exception('TechSupport file generation timed out') # WriteObject(crd.OutConfig.GetChild()) for item in crd.OutConfig.GetChild(): fileSource = "techsupport/" + item.Name try: UcsUtils.DownloadFile(self, fileSource, pathPattern) except Exception, err: WriteUcsWarning(str(err)) if (removeFromUcs): inConfig = ConfigConfig() sysdebugTechSupport = ManagedObject(NamingId.SYSDEBUG_TECH_SUPPORT) sysdebugTechSupport.DN = item.Dn sysdebugTechSupport.Status = Status.DELETED inConfig.AddChild(sysdebugTechSupport) ccmi = self.ConfigConfMo(dn=item.Dn, inConfig=inConfig, inHierarchical=YesOrNo.FALSE, dumpXml=dumpXml) if (ccmi.errorCode != 0): raise UcsException(ccmi.errorCode, ccmi.errorDescr) # raise Exception('[Error]: GetTechSupport [Code]:' + ccmi.errorCode + ' [Description]:' + ccmi.errorDescr) else: raise UcsException(ccm.errorCode, ccm.errorDescr) # raise Exception('[Error]: GetTechSupport [Code]:' + ccm.errorCode + ' [Description]:' + ccm.errorDescr) return crd.OutConfig.GetChild()
[ "def", "GetTechSupport", "(", "self", ",", "pathPattern", ",", "ucsManager", "=", "False", ",", "ucsMgmt", "=", "False", ",", "chassisId", "=", "None", ",", "cimcId", "=", "None", ",", "adapterId", "=", "None", ",", "iomId", "=", "None", ",", "fexId", "=", "None", ",", "rackServerId", "=", "None", ",", "rackAdapterId", "=", "None", ",", "timeoutSec", "=", "600", ",", "removeFromUcs", "=", "False", ",", "dumpXml", "=", "None", ")", ":", "from", "UcsBase", "import", "WriteUcsWarning", ",", "UcsUtils", ",", "ManagedObject", ",", "WriteObject", ",", "UcsUtils", ",", "UcsValidationException", ",", "UcsException", "from", "Mos", "import", "SysdebugTechSupport", ",", "SysdebugTechSupportCmdOpt", "from", "Ucs", "import", "ConfigConfig", "import", "os", "if", "(", "self", ".", "_transactionInProgress", ")", ":", "raise", "UcsValidationException", "(", "\"UCS transaction in progress. Cannot execute GetTechSupport. Complete or Undo UCS transaction.\"", ")", "# raise Exception(\"UCS transaction in progress. Cannot execute GetTechSupport. Complete or Undo UCS transaction.\")", "if", "(", "pathPattern", "==", "None", ")", ":", "raise", "UcsValidationException", "(", "\"pathPattern parameter is not provided.\"", ")", "# raise Exception(\"Please provide pathPattern parameter.\")", "if", "not", "pathPattern", ".", "endswith", "(", "'.tar'", ")", ":", "raise", "UcsValidationException", "(", "'pathPattern should end with .tar'", ")", "directory", "=", "os", ".", "path", ".", "dirname", "(", "pathPattern", ")", "if", "not", "(", "os", ".", "path", ".", "exists", "(", "directory", ")", ")", ":", "os", ".", "makedirs", "(", "directory", ")", "inConfig", "=", "ConfigConfig", "(", ")", "techSupportObj", "=", "None", "dt1", "=", "datetime", ".", "datetime", "(", "1970", ",", "1", ",", "1", ",", "12", ",", "0", ",", "0", ",", "0", ")", "dt2", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "ds", "=", "(", "dt2", "-", "dt1", ")", "creationTS", "=", "(", "ds", ".", "microseconds", "/", "1000000", ")", "+", "(", "ds", ".", "days", "*", "24", "*", "60", "*", "60", ")", "+", "ds", ".", "seconds", "# Converting timedelta in to total seconds for Python version compatibility.", "sysdebug", "=", "ManagedObject", "(", "NamingId", ".", "SYSDEBUG_TECH_SUPPORT", ")", "sysdebug", ".", "CreationTS", "=", "str", "(", "creationTS", ")", "dn", "=", "UcsUtils", ".", "MakeDn", "(", "[", "ManagedObject", "(", "NamingId", ".", "TOP_SYSTEM", ")", ".", "MakeRn", "(", ")", ",", "ManagedObject", "(", "NamingId", ".", "SYSDEBUG_TECH_SUP_FILE_REPOSITORY", ")", ".", "MakeRn", "(", ")", ",", "sysdebug", ".", "MakeRn", "(", ")", "]", ")", "sysdebugTechSupport", "=", "ManagedObject", "(", "NamingId", ".", "SYSDEBUG_TECH_SUPPORT", ")", "sysdebugTechSupport", ".", "DN", "=", "dn", "sysdebugTechSupport", ".", "AdminState", "=", "SysdebugTechSupport", ".", "CONST_ADMIN_STATE_START", "sysdebugTechSupport", ".", "CreationTS", "=", "str", "(", "creationTS", ")", "sysdebugTechSupport", ".", "Status", "=", "Status", ".", "CREATED", "sysdebugTechSupportCmdOpt", "=", "ManagedObject", "(", "NamingId", ".", "SYSDEBUG_TECH_SUPPORT_CMD_OPT", ")", "# Parameter Set UCSM", "if", "(", "ucsManager", ")", ":", "sysdebugTechSupportCmdOpt", ".", "MajorOptType", "=", "SysdebugTechSupportCmdOpt", ".", "CONST_MAJOR_OPT_TYPE_UCSM", "sysdebugTechSupportCmdOpt", ".", "Rn", "=", "sysdebugTechSupportCmdOpt", ".", "MakeRn", "(", ")", "sysdebugTechSupport", ".", "AddChild", "(", "sysdebugTechSupportCmdOpt", ")", "elif", "(", "ucsMgmt", ")", ":", "sysdebugTechSupportCmdOpt", ".", "MajorOptType", "=", "SysdebugTechSupportCmdOpt", ".", "CONST_MAJOR_OPT_TYPE_UCSM_MGMT", "sysdebugTechSupportCmdOpt", ".", "Rn", "=", "sysdebugTechSupportCmdOpt", ".", "MakeRn", "(", ")", "sysdebugTechSupport", ".", "AddChild", "(", "sysdebugTechSupportCmdOpt", ")", "elif", "(", "chassisId", "!=", "None", ")", ":", "if", "(", "cimcId", "!=", "None", ")", ":", "sysdebugTechSupportCmdOpt", ".", "ChassisCimcId", "=", "str", "(", "cimcId", ")", "sysdebugTechSupportCmdOpt", ".", "ChassisId", "=", "str", "(", "chassisId", ")", "sysdebugTechSupportCmdOpt", ".", "MajorOptType", "=", "SysdebugTechSupportCmdOpt", ".", "CONST_MAJOR_OPT_TYPE_CHASSIS", "if", "(", "adapterId", "==", "None", ")", ":", "sysdebugTechSupportCmdOpt", ".", "CimcAdapterId", "=", "SysdebugTechSupportCmdOpt", ".", "CONST_CIMC_ADAPTER_ID_ALL", "else", ":", "sysdebugTechSupportCmdOpt", ".", "CimcAdapterId", "=", "str", "(", "adapterId", ")", "sysdebugTechSupportCmdOpt", ".", "Rn", "=", "sysdebugTechSupportCmdOpt", ".", "MakeRn", "(", ")", "sysdebugTechSupport", ".", "AddChild", "(", "sysdebugTechSupportCmdOpt", ")", "elif", "(", "iomId", "!=", "None", ")", ":", "sysdebugTechSupportCmdOpt", ".", "ChassisIomId", "=", "str", "(", "iomId", ")", "sysdebugTechSupportCmdOpt", ".", "ChassisId", "=", "str", "(", "chassisId", ")", "sysdebugTechSupportCmdOpt", ".", "MajorOptType", "=", "SysdebugTechSupportCmdOpt", ".", "CONST_MAJOR_OPT_TYPE_CHASSIS", "sysdebugTechSupportCmdOpt", ".", "Rn", "=", "sysdebugTechSupportCmdOpt", ".", "MakeRn", "(", ")", "sysdebugTechSupport", ".", "AddChild", "(", "sysdebugTechSupportCmdOpt", ")", "elif", "(", "rackServerId", "!=", "None", ")", ":", "sysdebugTechSupportCmdOpt", ".", "RACK_SERVER_ID", "=", "str", "(", "iomId", ")", "if", "(", "rackAdapterId", "==", "None", ")", ":", "sysdebugTechSupportCmdOpt", ".", "RACK_SERVER_ADAPTER_ID", "=", "SysdebugTechSupportCmdOpt", ".", "CONST_RACK_SERVER_ADAPTER_ID_ALL", "else", ":", "sysdebugTechSupportCmdOpt", ".", "RACK_SERVER_ADAPTER_ID", "=", "str", "(", "rackAdapterId", ")", "sysdebugTechSupportCmdOpt", ".", "MajorOptType", "=", "SysdebugTechSupportCmdOpt", ".", "CONST_MAJOR_OPT_TYPE_SERVER", "sysdebugTechSupportCmdOpt", ".", "Rn", "=", "sysdebugTechSupportCmdOpt", ".", "MakeRn", "(", ")", "sysdebugTechSupport", ".", "AddChild", "(", "sysdebugTechSupportCmdOpt", ")", "elif", "(", "fexId", "!=", "None", ")", ":", "sysdebugTechSupportCmdOpt", ".", "FAB_EXT_ID", "=", "str", "(", "iomId", ")", "sysdebugTechSupportCmdOpt", ".", "MajorOptType", "=", "SysdebugTechSupportCmdOpt", ".", "CONST_MAJOR_OPT_TYPE_FEX", "sysdebugTechSupportCmdOpt", ".", "Rn", "=", "sysdebugTechSupportCmdOpt", ".", "MakeRn", "(", ")", "sysdebugTechSupport", ".", "AddChild", "(", "sysdebugTechSupportCmdOpt", ")", "if", "(", "sysdebugTechSupport", ".", "GetChildCount", "(", ")", "==", "0", ")", ":", "sysdebugTechSupport", "=", "None", "sysdebugTechSupportCmdOpt", "=", "None", "inConfig", "=", "None", "else", ":", "inConfig", ".", "AddChild", "(", "sysdebugTechSupport", ")", "ccm", "=", "self", ".", "ConfigConfMo", "(", "dn", "=", "dn", ",", "inConfig", "=", "inConfig", ",", "inHierarchical", "=", "YesOrNo", ".", "FALSE", ",", "dumpXml", "=", "dumpXml", ")", "duration", "=", "timeoutSec", "poll_interval", "=", "2", "crd", "=", "None", "if", "(", "ccm", ".", "errorCode", "==", "0", ")", ":", "# WriteUcsWarning('Waiting for the Tech Support file to become available (this may take several minutes).')", "status", "=", "False", "while", "(", "True", ")", ":", "crd", "=", "self", ".", "ConfigResolveDn", "(", "dn", ",", "inHierarchical", "=", "YesOrNo", ".", "FALSE", ",", "dumpXml", "=", "dumpXml", ")", "if", "(", "crd", ".", "errorCode", "==", "0", ")", ":", "if", "(", "crd", ".", "OutConfig", ".", "GetChildCount", "(", ")", ">", "0", ")", ":", "for", "techSupport", "in", "crd", ".", "OutConfig", ".", "GetChild", "(", ")", ":", "if", "(", "techSupport", ".", "OperState", "==", "SysdebugTechSupport", ".", "CONST_OPER_STATE_AVAILABLE", ")", ":", "status", "=", "True", "else", ":", "raise", "UcsValidationException", "(", "'Failed to create the TechSupport file.'", ")", "# raise Exception('Failed to create the TechSupport file.')", "else", ":", "raise", "UcsException", "(", "crd", ".", "errorCode", ",", "crd", ".", "errorDescr", ")", "# raise Exception('[Error]: GetTechSupport [Code]:' + crd.errorCode + ' [Description]:' + crd.errorDescr)", "if", "(", "status", ")", ":", "break", "time", ".", "sleep", "(", "min", "(", "duration", ",", "poll_interval", ")", ")", "duration", "=", "max", "(", "0", ",", "(", "duration", "-", "poll_interval", ")", ")", "if", "duration", "==", "0", ":", "inConfig", "=", "ConfigConfig", "(", ")", "sysdebugTechSupport", "=", "ManagedObject", "(", "NamingId", ".", "SYSDEBUG_TECH_SUPPORT", ")", "sysdebugTechSupport", ".", "DN", "=", "dn", "sysdebugTechSupport", ".", "Status", "=", "Status", ".", "DELETED", "inConfig", ".", "AddChild", "(", "sysdebugTechSupport", ")", "ccmi", "=", "self", ".", "ConfigConfMo", "(", "dn", "=", "dn", ",", "inConfig", "=", "inConfig", ",", "inHierarchical", "=", "YesOrNo", ".", "FALSE", ",", "dumpXml", "=", "dumpXml", ")", "if", "(", "ccmi", ".", "errorCode", "!=", "0", ")", ":", "WriteUcsWarning", "(", "'[Error]: GetTechSupport [Code]:'", "+", "ccmi", ".", "errorCode", "+", "' [Description]:'", "+", "ccmi", ".", "errorDescr", ")", "raise", "UcsValidationException", "(", "'TechSupport file generation timed out'", ")", "# raise Exception('TechSupport file generation timed out')", "# WriteObject(crd.OutConfig.GetChild())", "for", "item", "in", "crd", ".", "OutConfig", ".", "GetChild", "(", ")", ":", "fileSource", "=", "\"techsupport/\"", "+", "item", ".", "Name", "try", ":", "UcsUtils", ".", "DownloadFile", "(", "self", ",", "fileSource", ",", "pathPattern", ")", "except", "Exception", ",", "err", ":", "WriteUcsWarning", "(", "str", "(", "err", ")", ")", "if", "(", "removeFromUcs", ")", ":", "inConfig", "=", "ConfigConfig", "(", ")", "sysdebugTechSupport", "=", "ManagedObject", "(", "NamingId", ".", "SYSDEBUG_TECH_SUPPORT", ")", "sysdebugTechSupport", ".", "DN", "=", "item", ".", "Dn", "sysdebugTechSupport", ".", "Status", "=", "Status", ".", "DELETED", "inConfig", ".", "AddChild", "(", "sysdebugTechSupport", ")", "ccmi", "=", "self", ".", "ConfigConfMo", "(", "dn", "=", "item", ".", "Dn", ",", "inConfig", "=", "inConfig", ",", "inHierarchical", "=", "YesOrNo", ".", "FALSE", ",", "dumpXml", "=", "dumpXml", ")", "if", "(", "ccmi", ".", "errorCode", "!=", "0", ")", ":", "raise", "UcsException", "(", "ccmi", ".", "errorCode", ",", "ccmi", ".", "errorDescr", ")", "# raise Exception('[Error]: GetTechSupport [Code]:' + ccmi.errorCode + ' [Description]:' + ccmi.errorDescr)", "else", ":", "raise", "UcsException", "(", "ccm", ".", "errorCode", ",", "ccm", ".", "errorDescr", ")", "# raise Exception('[Error]: GetTechSupport [Code]:' + ccm.errorCode + ' [Description]:' + ccm.errorDescr)", "return", "crd", ".", "OutConfig", ".", "GetChild", "(", ")" ]
Creates and downloads the technical support data for the respective UCSM. - pathPattern specifies the path of the tech support file to be downloaded. File should be a tar file. - ucsManager, if provided as True then technical support data for the entire UCSM instance will be created and downloaded. - ucsMgmt, if provided as True then technical support data for the entire UCSM management services(excluding fabric interconnects) will be created and downloaded. - chassisId specifies the chassis Id. - cimcId for a specific chassis. Can be 'all' also. - adapterId for a specific chassis. Can be 'all' also. - iomId for a specific chassis. Can be 'all' also. - fexId specifies the Id of a fabric extender. - rackServerId specifies the Id of a rack server. - rackAdapterId specifies the adaptor Id for a specific rack server. Can be 'all' also. - timeoutSec specifies the time in seconds after that the operation will terminate. - removeFromUcs, if specified as True then the techincal support data file will be removed from the UCS.
[ "Creates", "and", "downloads", "the", "technical", "support", "data", "for", "the", "respective", "UCSM", ".", "-", "pathPattern", "specifies", "the", "path", "of", "the", "tech", "support", "file", "to", "be", "downloaded", ".", "File", "should", "be", "a", "tar", "file", ".", "-", "ucsManager", "if", "provided", "as", "True", "then", "technical", "support", "data", "for", "the", "entire", "UCSM", "instance", "will", "be", "created", "and", "downloaded", ".", "-", "ucsMgmt", "if", "provided", "as", "True", "then", "technical", "support", "data", "for", "the", "entire", "UCSM", "management", "services", "(", "excluding", "fabric", "interconnects", ")", "will", "be", "created", "and", "downloaded", ".", "-", "chassisId", "specifies", "the", "chassis", "Id", ".", "-", "cimcId", "for", "a", "specific", "chassis", ".", "Can", "be", "all", "also", ".", "-", "adapterId", "for", "a", "specific", "chassis", ".", "Can", "be", "all", "also", ".", "-", "iomId", "for", "a", "specific", "chassis", ".", "Can", "be", "all", "also", ".", "-", "fexId", "specifies", "the", "Id", "of", "a", "fabric", "extender", ".", "-", "rackServerId", "specifies", "the", "Id", "of", "a", "rack", "server", ".", "-", "rackAdapterId", "specifies", "the", "adaptor", "Id", "for", "a", "specific", "rack", "server", ".", "Can", "be", "all", "also", ".", "-", "timeoutSec", "specifies", "the", "time", "in", "seconds", "after", "that", "the", "operation", "will", "terminate", ".", "-", "removeFromUcs", "if", "specified", "as", "True", "then", "the", "techincal", "support", "data", "file", "will", "be", "removed", "from", "the", "UCS", "." ]
python
train
hawkular/hawkular-client-python
hawkular/metrics.py
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L131-L147
def push(self, metric_type, metric_id, value, timestamp=None): """ Pushes a single metric_id, datapoint combination to the server. This method is an assistant method for the put method by removing the need to create data structures first. :param metric_type: MetricType to be matched (required) :param metric_id: Exact string matching metric id :param value: Datapoint value (depending on the MetricType) :param timestamp: Timestamp of the datapoint. If left empty, uses current client time. Can be milliseconds since epoch or datetime instance """ if type(timestamp) is datetime: timestamp = datetime_to_time_millis(timestamp) item = create_metric(metric_type, metric_id, create_datapoint(value, timestamp)) self.put(item)
[ "def", "push", "(", "self", ",", "metric_type", ",", "metric_id", ",", "value", ",", "timestamp", "=", "None", ")", ":", "if", "type", "(", "timestamp", ")", "is", "datetime", ":", "timestamp", "=", "datetime_to_time_millis", "(", "timestamp", ")", "item", "=", "create_metric", "(", "metric_type", ",", "metric_id", ",", "create_datapoint", "(", "value", ",", "timestamp", ")", ")", "self", ".", "put", "(", "item", ")" ]
Pushes a single metric_id, datapoint combination to the server. This method is an assistant method for the put method by removing the need to create data structures first. :param metric_type: MetricType to be matched (required) :param metric_id: Exact string matching metric id :param value: Datapoint value (depending on the MetricType) :param timestamp: Timestamp of the datapoint. If left empty, uses current client time. Can be milliseconds since epoch or datetime instance
[ "Pushes", "a", "single", "metric_id", "datapoint", "combination", "to", "the", "server", "." ]
python
train
MoseleyBioinformaticsLab/ctfile
ctfile/ctfile.py
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L110-L119
def print_file(self, file_format='ctfile', f=sys.stdout): """Print representation of :class:`~ctfile.ctfile.CTfile`. :param str file_format: Format to use: ``ctfile`` or ``json``. :param f: Print to file or stdout. :type f: File-like :return: None. :rtype: :py:obj:`None`. """ print(self.writestr(file_format=file_format), file=f)
[ "def", "print_file", "(", "self", ",", "file_format", "=", "'ctfile'", ",", "f", "=", "sys", ".", "stdout", ")", ":", "print", "(", "self", ".", "writestr", "(", "file_format", "=", "file_format", ")", ",", "file", "=", "f", ")" ]
Print representation of :class:`~ctfile.ctfile.CTfile`. :param str file_format: Format to use: ``ctfile`` or ``json``. :param f: Print to file or stdout. :type f: File-like :return: None. :rtype: :py:obj:`None`.
[ "Print", "representation", "of", ":", "class", ":", "~ctfile", ".", "ctfile", ".", "CTfile", "." ]
python
train
sanger-pathogens/Fastaq
pyfastaq/tasks.py
https://github.com/sanger-pathogens/Fastaq/blob/2c775c846d2491678a9637daa320592e02c26c72/pyfastaq/tasks.py#L568-L577
def sort_by_name(infile, outfile): '''Sorts input sequence file by sort -d -k1,1, writes sorted output file.''' seqs = {} file_to_dict(infile, seqs) #seqs = list(seqs.values()) #seqs.sort() fout = utils.open_file_write(outfile) for name in sorted(seqs): print(seqs[name], file=fout) utils.close(fout)
[ "def", "sort_by_name", "(", "infile", ",", "outfile", ")", ":", "seqs", "=", "{", "}", "file_to_dict", "(", "infile", ",", "seqs", ")", "#seqs = list(seqs.values())", "#seqs.sort()", "fout", "=", "utils", ".", "open_file_write", "(", "outfile", ")", "for", "name", "in", "sorted", "(", "seqs", ")", ":", "print", "(", "seqs", "[", "name", "]", ",", "file", "=", "fout", ")", "utils", ".", "close", "(", "fout", ")" ]
Sorts input sequence file by sort -d -k1,1, writes sorted output file.
[ "Sorts", "input", "sequence", "file", "by", "sort", "-", "d", "-", "k1", "1", "writes", "sorted", "output", "file", "." ]
python
valid
BernardFW/bernard
src/bernard/i18n/translator.py
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/i18n/translator.py#L120-L138
def best_for_flags(self, flags: Flags) -> List[TransItem]: """ Given `flags`, find all items of this sentence that have an equal matching score and put them in a list. """ best_score: int = 0 best_list: List[TransItem] = [] for item in self.items: score = item.score(flags) if score == best_score: best_list.append(item) elif score > best_score: best_list = [item] best_score = score return best_list
[ "def", "best_for_flags", "(", "self", ",", "flags", ":", "Flags", ")", "->", "List", "[", "TransItem", "]", ":", "best_score", ":", "int", "=", "0", "best_list", ":", "List", "[", "TransItem", "]", "=", "[", "]", "for", "item", "in", "self", ".", "items", ":", "score", "=", "item", ".", "score", "(", "flags", ")", "if", "score", "==", "best_score", ":", "best_list", ".", "append", "(", "item", ")", "elif", "score", ">", "best_score", ":", "best_list", "=", "[", "item", "]", "best_score", "=", "score", "return", "best_list" ]
Given `flags`, find all items of this sentence that have an equal matching score and put them in a list.
[ "Given", "flags", "find", "all", "items", "of", "this", "sentence", "that", "have", "an", "equal", "matching", "score", "and", "put", "them", "in", "a", "list", "." ]
python
train
MolSSI-BSE/basis_set_exchange
basis_set_exchange/refconverters/bib.py
https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/refconverters/bib.py#L10-L33
def _ref_bib(key, ref): '''Convert a single reference to bibtex format ''' s = '' s += '@{}{{{},\n'.format(ref['type'], key) entry_lines = [] for k, v in ref.items(): if k == 'type': continue # Handle authors/editors if k == 'authors': entry_lines.append(' author = {{{}}}'.format(' and '.join(v))) elif k == 'editors': entry_lines.append(' editor = {{{}}}'.format(' and '.join(v))) else: entry_lines.append(' {} = {{{}}}'.format(k, v)) s += ',\n'.join(entry_lines) s += '\n}' return s
[ "def", "_ref_bib", "(", "key", ",", "ref", ")", ":", "s", "=", "''", "s", "+=", "'@{}{{{},\\n'", ".", "format", "(", "ref", "[", "'type'", "]", ",", "key", ")", "entry_lines", "=", "[", "]", "for", "k", ",", "v", "in", "ref", ".", "items", "(", ")", ":", "if", "k", "==", "'type'", ":", "continue", "# Handle authors/editors", "if", "k", "==", "'authors'", ":", "entry_lines", ".", "append", "(", "' author = {{{}}}'", ".", "format", "(", "' and '", ".", "join", "(", "v", ")", ")", ")", "elif", "k", "==", "'editors'", ":", "entry_lines", ".", "append", "(", "' editor = {{{}}}'", ".", "format", "(", "' and '", ".", "join", "(", "v", ")", ")", ")", "else", ":", "entry_lines", ".", "append", "(", "' {} = {{{}}}'", ".", "format", "(", "k", ",", "v", ")", ")", "s", "+=", "',\\n'", ".", "join", "(", "entry_lines", ")", "s", "+=", "'\\n}'", "return", "s" ]
Convert a single reference to bibtex format
[ "Convert", "a", "single", "reference", "to", "bibtex", "format" ]
python
train
honzamach/pynspect
pynspect/gparser.py
https://github.com/honzamach/pynspect/blob/0582dcc1f7aafe50e25a21c792ea1b3367ea5881/pynspect/gparser.py#L290-L296
def p_and_expression(tok): """and_expression : or_p_expression OP_AND and_expression | or_p_expression""" if len(tok) == 4: tok[0] = LogicalBinOpRule(tok[2], tok[1], tok[3]) else: tok[0] = tok[1]
[ "def", "p_and_expression", "(", "tok", ")", ":", "if", "len", "(", "tok", ")", "==", "4", ":", "tok", "[", "0", "]", "=", "LogicalBinOpRule", "(", "tok", "[", "2", "]", ",", "tok", "[", "1", "]", ",", "tok", "[", "3", "]", ")", "else", ":", "tok", "[", "0", "]", "=", "tok", "[", "1", "]" ]
and_expression : or_p_expression OP_AND and_expression | or_p_expression
[ "and_expression", ":", "or_p_expression", "OP_AND", "and_expression", "|", "or_p_expression" ]
python
train
google/grr
grr/client/grr_response_client/client_utils_windows.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/client_utils_windows.py#L134-L193
def FindProxies(): """Tries to find proxies by interrogating all the user's settings. This function is a modified urillib.getproxies_registry() from the standard library. We just store the proxy value in the environment for urllib to find it. TODO(user): Iterate through all the possible values if one proxy fails, in case more than one proxy is specified in different users profiles. Returns: A list of proxies. """ proxies = [] for i in range(0, 100): try: sid = winreg.EnumKey(winreg.HKEY_USERS, i) except OSError: break try: subkey = ( sid + "\\Software\\Microsoft\\Windows" "\\CurrentVersion\\Internet Settings") internet_settings = winreg.OpenKey(winreg.HKEY_USERS, subkey) proxy_enable = winreg.QueryValueEx(internet_settings, "ProxyEnable")[0] if proxy_enable: # Returned as Unicode but problems if not converted to ASCII proxy_server = str( winreg.QueryValueEx(internet_settings, "ProxyServer")[0]) if "=" in proxy_server: # Per-protocol settings for p in proxy_server.split(";"): protocol, address = p.split("=", 1) # See if address has a type:// prefix if not re.match("^([^/:]+)://", address): address = "%s://%s" % (protocol, address) proxies.append(address) else: # Use one setting for all protocols if proxy_server[:5] == "http:": proxies.append(proxy_server) else: proxies.append("http://%s" % proxy_server) internet_settings.Close() except (OSError, ValueError, TypeError): continue logging.debug("Found proxy servers: %s", proxies) return proxies
[ "def", "FindProxies", "(", ")", ":", "proxies", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "100", ")", ":", "try", ":", "sid", "=", "winreg", ".", "EnumKey", "(", "winreg", ".", "HKEY_USERS", ",", "i", ")", "except", "OSError", ":", "break", "try", ":", "subkey", "=", "(", "sid", "+", "\"\\\\Software\\\\Microsoft\\\\Windows\"", "\"\\\\CurrentVersion\\\\Internet Settings\"", ")", "internet_settings", "=", "winreg", ".", "OpenKey", "(", "winreg", ".", "HKEY_USERS", ",", "subkey", ")", "proxy_enable", "=", "winreg", ".", "QueryValueEx", "(", "internet_settings", ",", "\"ProxyEnable\"", ")", "[", "0", "]", "if", "proxy_enable", ":", "# Returned as Unicode but problems if not converted to ASCII", "proxy_server", "=", "str", "(", "winreg", ".", "QueryValueEx", "(", "internet_settings", ",", "\"ProxyServer\"", ")", "[", "0", "]", ")", "if", "\"=\"", "in", "proxy_server", ":", "# Per-protocol settings", "for", "p", "in", "proxy_server", ".", "split", "(", "\";\"", ")", ":", "protocol", ",", "address", "=", "p", ".", "split", "(", "\"=\"", ",", "1", ")", "# See if address has a type:// prefix", "if", "not", "re", ".", "match", "(", "\"^([^/:]+)://\"", ",", "address", ")", ":", "address", "=", "\"%s://%s\"", "%", "(", "protocol", ",", "address", ")", "proxies", ".", "append", "(", "address", ")", "else", ":", "# Use one setting for all protocols", "if", "proxy_server", "[", ":", "5", "]", "==", "\"http:\"", ":", "proxies", ".", "append", "(", "proxy_server", ")", "else", ":", "proxies", ".", "append", "(", "\"http://%s\"", "%", "proxy_server", ")", "internet_settings", ".", "Close", "(", ")", "except", "(", "OSError", ",", "ValueError", ",", "TypeError", ")", ":", "continue", "logging", ".", "debug", "(", "\"Found proxy servers: %s\"", ",", "proxies", ")", "return", "proxies" ]
Tries to find proxies by interrogating all the user's settings. This function is a modified urillib.getproxies_registry() from the standard library. We just store the proxy value in the environment for urllib to find it. TODO(user): Iterate through all the possible values if one proxy fails, in case more than one proxy is specified in different users profiles. Returns: A list of proxies.
[ "Tries", "to", "find", "proxies", "by", "interrogating", "all", "the", "user", "s", "settings", "." ]
python
train