repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
bolt-project/bolt
bolt/spark/array.py
https://github.com/bolt-project/bolt/blob/9cd7104aa085498da3097b72696184b9d3651c51/bolt/spark/array.py#L49-L60
def repartition(self, npartitions): """ Repartitions the underlying RDD Parameters ---------- npartitions : int Number of partitions to repartion the underlying RDD to """ rdd = self._rdd.repartition(npartitions) return self._constructor(rdd, ordered=False).__finalize__(self)
[ "def", "repartition", "(", "self", ",", "npartitions", ")", ":", "rdd", "=", "self", ".", "_rdd", ".", "repartition", "(", "npartitions", ")", "return", "self", ".", "_constructor", "(", "rdd", ",", "ordered", "=", "False", ")", ".", "__finalize__", "(", "self", ")" ]
Repartitions the underlying RDD Parameters ---------- npartitions : int Number of partitions to repartion the underlying RDD to
[ "Repartitions", "the", "underlying", "RDD" ]
python
test
twilio/twilio-python
twilio/rest/api/v2010/account/signing_key.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/signing_key.py#L117-L126
def get(self, sid): """ Constructs a SigningKeyContext :param sid: The sid :returns: twilio.rest.api.v2010.account.signing_key.SigningKeyContext :rtype: twilio.rest.api.v2010.account.signing_key.SigningKeyContext """ return SigningKeyContext(self._version, account_sid=self._solution['account_sid'], sid=sid, )
[ "def", "get", "(", "self", ",", "sid", ")", ":", "return", "SigningKeyContext", "(", "self", ".", "_version", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", "sid", "=", "sid", ",", ")" ]
Constructs a SigningKeyContext :param sid: The sid :returns: twilio.rest.api.v2010.account.signing_key.SigningKeyContext :rtype: twilio.rest.api.v2010.account.signing_key.SigningKeyContext
[ "Constructs", "a", "SigningKeyContext" ]
python
train
welbornprod/colr
colr/progress_frames.py
https://github.com/welbornprod/colr/blob/417117fdbddbc53142096685ac2af006b2bd0220/colr/progress_frames.py#L808-L832
def _build_color_variants(cls): """ Build colorized variants of all frames and return a list of all frame object names. """ # Get the basic frame types first. frametypes = cls.sets(registered=False) _colornames = [ # 'black', disabled for now, it won't show on my terminal. 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white', ] _colornames.extend('light{}'.format(s) for s in _colornames[:]) for colorname in _colornames: for framesobj in frametypes: framename = '{}_{}'.format(framesobj.name, colorname) cls.register( framesobj.as_colr(fore=colorname), name=framename, )
[ "def", "_build_color_variants", "(", "cls", ")", ":", "# Get the basic frame types first.", "frametypes", "=", "cls", ".", "sets", "(", "registered", "=", "False", ")", "_colornames", "=", "[", "# 'black', disabled for now, it won't show on my terminal.", "'red'", ",", "'green'", ",", "'yellow'", ",", "'blue'", ",", "'magenta'", ",", "'cyan'", ",", "'white'", ",", "]", "_colornames", ".", "extend", "(", "'light{}'", ".", "format", "(", "s", ")", "for", "s", "in", "_colornames", "[", ":", "]", ")", "for", "colorname", "in", "_colornames", ":", "for", "framesobj", "in", "frametypes", ":", "framename", "=", "'{}_{}'", ".", "format", "(", "framesobj", ".", "name", ",", "colorname", ")", "cls", ".", "register", "(", "framesobj", ".", "as_colr", "(", "fore", "=", "colorname", ")", ",", "name", "=", "framename", ",", ")" ]
Build colorized variants of all frames and return a list of all frame object names.
[ "Build", "colorized", "variants", "of", "all", "frames", "and", "return", "a", "list", "of", "all", "frame", "object", "names", "." ]
python
train
ga4gh/ga4gh-server
ga4gh/server/datarepo.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datarepo.py#L978-L1000
def insertReadGroupSet(self, readGroupSet): """ Inserts a the specified readGroupSet into this repository. """ programsJson = json.dumps( [protocol.toJsonDict(program) for program in readGroupSet.getPrograms()]) statsJson = json.dumps(protocol.toJsonDict(readGroupSet.getStats())) try: models.Readgroupset.create( id=readGroupSet.getId(), datasetid=readGroupSet.getParentContainer().getId(), referencesetid=readGroupSet.getReferenceSet().getId(), name=readGroupSet.getLocalId(), programs=programsJson, stats=statsJson, dataurl=readGroupSet.getDataUrl(), indexfile=readGroupSet.getIndexFile(), attributes=json.dumps(readGroupSet.getAttributes())) for readGroup in readGroupSet.getReadGroups(): self.insertReadGroup(readGroup) except Exception as e: raise exceptions.RepoManagerException(e)
[ "def", "insertReadGroupSet", "(", "self", ",", "readGroupSet", ")", ":", "programsJson", "=", "json", ".", "dumps", "(", "[", "protocol", ".", "toJsonDict", "(", "program", ")", "for", "program", "in", "readGroupSet", ".", "getPrograms", "(", ")", "]", ")", "statsJson", "=", "json", ".", "dumps", "(", "protocol", ".", "toJsonDict", "(", "readGroupSet", ".", "getStats", "(", ")", ")", ")", "try", ":", "models", ".", "Readgroupset", ".", "create", "(", "id", "=", "readGroupSet", ".", "getId", "(", ")", ",", "datasetid", "=", "readGroupSet", ".", "getParentContainer", "(", ")", ".", "getId", "(", ")", ",", "referencesetid", "=", "readGroupSet", ".", "getReferenceSet", "(", ")", ".", "getId", "(", ")", ",", "name", "=", "readGroupSet", ".", "getLocalId", "(", ")", ",", "programs", "=", "programsJson", ",", "stats", "=", "statsJson", ",", "dataurl", "=", "readGroupSet", ".", "getDataUrl", "(", ")", ",", "indexfile", "=", "readGroupSet", ".", "getIndexFile", "(", ")", ",", "attributes", "=", "json", ".", "dumps", "(", "readGroupSet", ".", "getAttributes", "(", ")", ")", ")", "for", "readGroup", "in", "readGroupSet", ".", "getReadGroups", "(", ")", ":", "self", ".", "insertReadGroup", "(", "readGroup", ")", "except", "Exception", "as", "e", ":", "raise", "exceptions", ".", "RepoManagerException", "(", "e", ")" ]
Inserts a the specified readGroupSet into this repository.
[ "Inserts", "a", "the", "specified", "readGroupSet", "into", "this", "repository", "." ]
python
train
tensorflow/mesh
mesh_tensorflow/ops.py
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4330-L4348
def processor_groups(mesh_shape, group_dims): """Groups of processors which differ only in the given dimensions. Args: mesh_shape: a Shape group_dims: a list of integers Returns: a list of lists of integers (processor numbers) """ group_numbers = [ pnum_to_group(mesh_shape, group_dims, pnum) for pnum in xrange(mesh_shape.size)] ret = [] for pnum, g in enumerate(group_numbers): while len(ret) <= g: ret.append([]) ret[g].append(pnum) return ret
[ "def", "processor_groups", "(", "mesh_shape", ",", "group_dims", ")", ":", "group_numbers", "=", "[", "pnum_to_group", "(", "mesh_shape", ",", "group_dims", ",", "pnum", ")", "for", "pnum", "in", "xrange", "(", "mesh_shape", ".", "size", ")", "]", "ret", "=", "[", "]", "for", "pnum", ",", "g", "in", "enumerate", "(", "group_numbers", ")", ":", "while", "len", "(", "ret", ")", "<=", "g", ":", "ret", ".", "append", "(", "[", "]", ")", "ret", "[", "g", "]", ".", "append", "(", "pnum", ")", "return", "ret" ]
Groups of processors which differ only in the given dimensions. Args: mesh_shape: a Shape group_dims: a list of integers Returns: a list of lists of integers (processor numbers)
[ "Groups", "of", "processors", "which", "differ", "only", "in", "the", "given", "dimensions", "." ]
python
train
FutunnOpen/futuquant
futuquant/examples/learn/max_sub.py
https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/examples/learn/max_sub.py#L55-L98
def aStockQoutation(self,code): ''' 订阅一只股票的实时行情数据,接收推送 :param code: 股票代码 :return: ''' #设置监听-->订阅-->调用接口 # 分时 self.quote_ctx.set_handler(RTDataTest()) self.quote_ctx.subscribe(code, SubType.RT_DATA) ret_code_rt_data, ret_data_rt_data = self.quote_ctx.get_rt_data(code) # 逐笔 self.quote_ctx.set_handler(TickerTest()) self.quote_ctx.subscribe(code, SubType.TICKER) ret_code_rt_ticker, ret_data_rt_ticker = self.quote_ctx.get_rt_ticker(code) # 报价 self.quote_ctx.set_handler(StockQuoteTest()) self.quote_ctx.subscribe(code, SubType.QUOTE) ret_code_stock_quote, ret_data_stock_quote = self.quote_ctx.get_stock_quote([code]) # 实时K线 self.quote_ctx.set_handler(CurKlineTest()) kTypes = [SubType.K_1M, SubType.K_5M, SubType.K_15M, SubType.K_30M, SubType.K_60M, SubType.K_DAY, SubType.K_WEEK, SubType.K_MON] auTypes = [AuType.NONE, AuType.QFQ, AuType.HFQ] num = 10 ret_code_cur_kline = RET_OK for kType in kTypes: self.quote_ctx.subscribe(code, kType) for auType in auTypes: ret_code_cur_kline_temp, ret_data_cur_kline = self.quote_ctx.get_cur_kline(code, num, kType, auType) if ret_code_cur_kline_temp is RET_ERROR: ret_code_cur_kline = RET_ERROR # 摆盘 self.quote_ctx.set_handler(OrderBookTest()) self.quote_ctx.subscribe(code, SubType.ORDER_BOOK) ret_code_order_book, ret_data_order_book = self.quote_ctx.get_order_book(code) # 经纪队列 self.quote_ctx.set_handler(BrokerTest()) self.quote_ctx.subscribe(code, SubType.BROKER) ret_code_broker_queue, bid_frame_table, ask_frame_table = self.quote_ctx.get_broker_queue(code) return ret_code_rt_data+ret_code_rt_ticker+ret_code_stock_quote+ret_code_cur_kline+ret_code_order_book+ret_code_broker_queue
[ "def", "aStockQoutation", "(", "self", ",", "code", ")", ":", "#设置监听-->订阅-->调用接口", "# 分时", "self", ".", "quote_ctx", ".", "set_handler", "(", "RTDataTest", "(", ")", ")", "self", ".", "quote_ctx", ".", "subscribe", "(", "code", ",", "SubType", ".", "RT_DATA", ")", "ret_code_rt_data", ",", "ret_data_rt_data", "=", "self", ".", "quote_ctx", ".", "get_rt_data", "(", "code", ")", "# 逐笔", "self", ".", "quote_ctx", ".", "set_handler", "(", "TickerTest", "(", ")", ")", "self", ".", "quote_ctx", ".", "subscribe", "(", "code", ",", "SubType", ".", "TICKER", ")", "ret_code_rt_ticker", ",", "ret_data_rt_ticker", "=", "self", ".", "quote_ctx", ".", "get_rt_ticker", "(", "code", ")", "# 报价", "self", ".", "quote_ctx", ".", "set_handler", "(", "StockQuoteTest", "(", ")", ")", "self", ".", "quote_ctx", ".", "subscribe", "(", "code", ",", "SubType", ".", "QUOTE", ")", "ret_code_stock_quote", ",", "ret_data_stock_quote", "=", "self", ".", "quote_ctx", ".", "get_stock_quote", "(", "[", "code", "]", ")", "# 实时K线", "self", ".", "quote_ctx", ".", "set_handler", "(", "CurKlineTest", "(", ")", ")", "kTypes", "=", "[", "SubType", ".", "K_1M", ",", "SubType", ".", "K_5M", ",", "SubType", ".", "K_15M", ",", "SubType", ".", "K_30M", ",", "SubType", ".", "K_60M", ",", "SubType", ".", "K_DAY", ",", "SubType", ".", "K_WEEK", ",", "SubType", ".", "K_MON", "]", "auTypes", "=", "[", "AuType", ".", "NONE", ",", "AuType", ".", "QFQ", ",", "AuType", ".", "HFQ", "]", "num", "=", "10", "ret_code_cur_kline", "=", "RET_OK", "for", "kType", "in", "kTypes", ":", "self", ".", "quote_ctx", ".", "subscribe", "(", "code", ",", "kType", ")", "for", "auType", "in", "auTypes", ":", "ret_code_cur_kline_temp", ",", "ret_data_cur_kline", "=", "self", ".", "quote_ctx", ".", "get_cur_kline", "(", "code", ",", "num", ",", "kType", ",", "auType", ")", "if", "ret_code_cur_kline_temp", "is", "RET_ERROR", ":", "ret_code_cur_kline", "=", "RET_ERROR", "# 摆盘", "self", ".", "quote_ctx", ".", "set_handler", "(", "OrderBookTest", "(", ")", ")", "self", ".", "quote_ctx", ".", "subscribe", "(", "code", ",", "SubType", ".", "ORDER_BOOK", ")", "ret_code_order_book", ",", "ret_data_order_book", "=", "self", ".", "quote_ctx", ".", "get_order_book", "(", "code", ")", "# 经纪队列", "self", ".", "quote_ctx", ".", "set_handler", "(", "BrokerTest", "(", ")", ")", "self", ".", "quote_ctx", ".", "subscribe", "(", "code", ",", "SubType", ".", "BROKER", ")", "ret_code_broker_queue", ",", "bid_frame_table", ",", "ask_frame_table", "=", "self", ".", "quote_ctx", ".", "get_broker_queue", "(", "code", ")", "return", "ret_code_rt_data", "+", "ret_code_rt_ticker", "+", "ret_code_stock_quote", "+", "ret_code_cur_kline", "+", "ret_code_order_book", "+", "ret_code_broker_queue" ]
订阅一只股票的实时行情数据,接收推送 :param code: 股票代码 :return:
[ "订阅一只股票的实时行情数据,接收推送", ":", "param", "code", ":", "股票代码", ":", "return", ":" ]
python
train
hollenstein/maspy
maspy/inference.py
https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/inference.py#L850-L864
def _findUniqueMappingValues(mapping): """Find mapping entries that are unique for one key (value length of 1). .. Note: This function can be used to find unique proteins by providing a peptide to protein mapping. :param mapping: dict, for each key contains a set of entries :returns: a set of unique mapping values """ uniqueMappingValues = set() for entries in viewvalues(mapping): if len(entries) == 1: uniqueMappingValues.update(entries) return uniqueMappingValues
[ "def", "_findUniqueMappingValues", "(", "mapping", ")", ":", "uniqueMappingValues", "=", "set", "(", ")", "for", "entries", "in", "viewvalues", "(", "mapping", ")", ":", "if", "len", "(", "entries", ")", "==", "1", ":", "uniqueMappingValues", ".", "update", "(", "entries", ")", "return", "uniqueMappingValues" ]
Find mapping entries that are unique for one key (value length of 1). .. Note: This function can be used to find unique proteins by providing a peptide to protein mapping. :param mapping: dict, for each key contains a set of entries :returns: a set of unique mapping values
[ "Find", "mapping", "entries", "that", "are", "unique", "for", "one", "key", "(", "value", "length", "of", "1", ")", "." ]
python
train
Fizzadar/pyinfra
pyinfra/api/state.py
https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/state.py#L365-L398
def fail_hosts(self, hosts_to_fail, activated_count=None): ''' Flag a ``set`` of hosts as failed, error for ``config.FAIL_PERCENT``. ''' if not hosts_to_fail: return activated_count = activated_count or len(self.activated_hosts) logger.debug('Failing hosts: {0}'.format(', '.join( (host.name for host in hosts_to_fail), ))) # Remove the failed hosts from the inventory self.active_hosts -= hosts_to_fail # Check we're not above the fail percent active_hosts = self.active_hosts # No hosts left! if not active_hosts: raise PyinfraError('No hosts remaining!') if self.config.FAIL_PERCENT is not None: percent_failed = ( 1 - len(active_hosts) / activated_count ) * 100 if percent_failed > self.config.FAIL_PERCENT: raise PyinfraError('Over {0}% of hosts failed ({1}%)'.format( self.config.FAIL_PERCENT, int(round(percent_failed)), ))
[ "def", "fail_hosts", "(", "self", ",", "hosts_to_fail", ",", "activated_count", "=", "None", ")", ":", "if", "not", "hosts_to_fail", ":", "return", "activated_count", "=", "activated_count", "or", "len", "(", "self", ".", "activated_hosts", ")", "logger", ".", "debug", "(", "'Failing hosts: {0}'", ".", "format", "(", "', '", ".", "join", "(", "(", "host", ".", "name", "for", "host", "in", "hosts_to_fail", ")", ",", ")", ")", ")", "# Remove the failed hosts from the inventory", "self", ".", "active_hosts", "-=", "hosts_to_fail", "# Check we're not above the fail percent", "active_hosts", "=", "self", ".", "active_hosts", "# No hosts left!", "if", "not", "active_hosts", ":", "raise", "PyinfraError", "(", "'No hosts remaining!'", ")", "if", "self", ".", "config", ".", "FAIL_PERCENT", "is", "not", "None", ":", "percent_failed", "=", "(", "1", "-", "len", "(", "active_hosts", ")", "/", "activated_count", ")", "*", "100", "if", "percent_failed", ">", "self", ".", "config", ".", "FAIL_PERCENT", ":", "raise", "PyinfraError", "(", "'Over {0}% of hosts failed ({1}%)'", ".", "format", "(", "self", ".", "config", ".", "FAIL_PERCENT", ",", "int", "(", "round", "(", "percent_failed", ")", ")", ",", ")", ")" ]
Flag a ``set`` of hosts as failed, error for ``config.FAIL_PERCENT``.
[ "Flag", "a", "set", "of", "hosts", "as", "failed", "error", "for", "config", ".", "FAIL_PERCENT", "." ]
python
train
gholt/swiftly
swiftly/cli/context.py
https://github.com/gholt/swiftly/blob/5bcc1c65323b1caf1f85adbefd9fc4988c072149/swiftly/cli/context.py#L54-L68
def write_headers(self, fp, headers, mute=None): """ Convenience function to output headers in a formatted fashion to a file-like fp, optionally muting any headers in the mute list. """ if headers: if not mute: mute = [] fmt = '%%-%ds %%s\n' % (max(len(k) for k in headers) + 1) for key in sorted(headers): if key in mute: continue fp.write(fmt % (key.title() + ':', headers[key])) fp.flush()
[ "def", "write_headers", "(", "self", ",", "fp", ",", "headers", ",", "mute", "=", "None", ")", ":", "if", "headers", ":", "if", "not", "mute", ":", "mute", "=", "[", "]", "fmt", "=", "'%%-%ds %%s\\n'", "%", "(", "max", "(", "len", "(", "k", ")", "for", "k", "in", "headers", ")", "+", "1", ")", "for", "key", "in", "sorted", "(", "headers", ")", ":", "if", "key", "in", "mute", ":", "continue", "fp", ".", "write", "(", "fmt", "%", "(", "key", ".", "title", "(", ")", "+", "':'", ",", "headers", "[", "key", "]", ")", ")", "fp", ".", "flush", "(", ")" ]
Convenience function to output headers in a formatted fashion to a file-like fp, optionally muting any headers in the mute list.
[ "Convenience", "function", "to", "output", "headers", "in", "a", "formatted", "fashion", "to", "a", "file", "-", "like", "fp", "optionally", "muting", "any", "headers", "in", "the", "mute", "list", "." ]
python
test
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L3614-L3628
def performApplicationPrelaunchCheck(self, pchAppKey): """ Returns errors that would prevent the specified application from launching immediately. Calling this function will cause the current scene application to quit, so only call it when you are actually about to launch something else. What the caller should do about these failures depends on the failure: VRApplicationError_OldApplicationQuitting - An existing application has been told to quit. Wait for a VREvent_ProcessQuit and try again. VRApplicationError_ApplicationAlreadyStarting - This application is already starting. This is a permanent failure. VRApplicationError_LaunchInProgress - A different application is already starting. This is a permanent failure. VRApplicationError_None - Go ahead and launch. Everything is clear. """ fn = self.function_table.performApplicationPrelaunchCheck result = fn(pchAppKey) return result
[ "def", "performApplicationPrelaunchCheck", "(", "self", ",", "pchAppKey", ")", ":", "fn", "=", "self", ".", "function_table", ".", "performApplicationPrelaunchCheck", "result", "=", "fn", "(", "pchAppKey", ")", "return", "result" ]
Returns errors that would prevent the specified application from launching immediately. Calling this function will cause the current scene application to quit, so only call it when you are actually about to launch something else. What the caller should do about these failures depends on the failure: VRApplicationError_OldApplicationQuitting - An existing application has been told to quit. Wait for a VREvent_ProcessQuit and try again. VRApplicationError_ApplicationAlreadyStarting - This application is already starting. This is a permanent failure. VRApplicationError_LaunchInProgress - A different application is already starting. This is a permanent failure. VRApplicationError_None - Go ahead and launch. Everything is clear.
[ "Returns", "errors", "that", "would", "prevent", "the", "specified", "application", "from", "launching", "immediately", ".", "Calling", "this", "function", "will", "cause", "the", "current", "scene", "application", "to", "quit", "so", "only", "call", "it", "when", "you", "are", "actually", "about", "to", "launch", "something", "else", ".", "What", "the", "caller", "should", "do", "about", "these", "failures", "depends", "on", "the", "failure", ":", "VRApplicationError_OldApplicationQuitting", "-", "An", "existing", "application", "has", "been", "told", "to", "quit", ".", "Wait", "for", "a", "VREvent_ProcessQuit", "and", "try", "again", ".", "VRApplicationError_ApplicationAlreadyStarting", "-", "This", "application", "is", "already", "starting", ".", "This", "is", "a", "permanent", "failure", ".", "VRApplicationError_LaunchInProgress", "-", "A", "different", "application", "is", "already", "starting", ".", "This", "is", "a", "permanent", "failure", ".", "VRApplicationError_None", "-", "Go", "ahead", "and", "launch", ".", "Everything", "is", "clear", "." ]
python
train
drj11/pypng
code/png.py
https://github.com/drj11/pypng/blob/b8220ca9f58e4c5bc1d507e713744fcb8c049225/code/png.py#L997-L1014
def make_palette_chunks(palette): """ Create the byte sequences for a ``PLTE`` and if necessary a ``tRNS`` chunk. Returned as a pair (*p*, *t*). *t* will be ``None`` if no ``tRNS`` chunk is necessary. """ p = bytearray() t = bytearray() for x in palette: p.extend(x[0:3]) if len(x) > 3: t.append(x[3]) if t: return p, t return p, None
[ "def", "make_palette_chunks", "(", "palette", ")", ":", "p", "=", "bytearray", "(", ")", "t", "=", "bytearray", "(", ")", "for", "x", "in", "palette", ":", "p", ".", "extend", "(", "x", "[", "0", ":", "3", "]", ")", "if", "len", "(", "x", ")", ">", "3", ":", "t", ".", "append", "(", "x", "[", "3", "]", ")", "if", "t", ":", "return", "p", ",", "t", "return", "p", ",", "None" ]
Create the byte sequences for a ``PLTE`` and if necessary a ``tRNS`` chunk. Returned as a pair (*p*, *t*). *t* will be ``None`` if no ``tRNS`` chunk is necessary.
[ "Create", "the", "byte", "sequences", "for", "a", "PLTE", "and", "if", "necessary", "a", "tRNS", "chunk", ".", "Returned", "as", "a", "pair", "(", "*", "p", "*", "*", "t", "*", ")", ".", "*", "t", "*", "will", "be", "None", "if", "no", "tRNS", "chunk", "is", "necessary", "." ]
python
train
casacore/python-casacore
casacore/tables/msutil.py
https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/tables/msutil.py#L362-L429
def msregularize(msname, newname): """ Regularize an MS The output MS will be such that it has the same number of baselines for each time stamp. Where needed fully flagged rows are added. Possibly missing rows are written into a separate MS <newname>-add. It is concatenated with the original MS and sorted in order of TIME, DATADESC_ID, ANTENNA1,ANTENNA2 to form a new regular MS. Note that the new MS references the input MS (it does not copy the data). It means that changes made in the new MS are also made in the input MS. If no rows were missing, the new MS is still created referencing the input MS. """ # Find out all baselines. t = table(msname) t1 = t.sort('unique ANTENNA1,ANTENNA2') nadded = 0 # Now iterate in time,band over the MS. for tsub in t.iter(['TIME', 'DATA_DESC_ID']): nmissing = t1.nrows() - tsub.nrows() if nmissing < 0: raise ValueError("A time/band chunk has too many rows") if nmissing > 0: # Rows needs to be added for the missing baselines. ant1 = str(t1.getcol('ANTENNA1')).replace(' ', ',') ant2 = str(t1.getcol('ANTENNA2')).replace(' ', ',') ant1 = tsub.getcol('ANTENNA1') ant2 = tsub.getcol('ANTENNA2') t2 = taql('select from $t1 where !any(ANTENNA1 == $ant1 &&' + ' ANTENNA2 == $ant2)') six.print_(nmissing, t1.nrows(), tsub.nrows(), t2.nrows()) if t2.nrows() != nmissing: raise ValueError("A time/band chunk behaves strangely") # If nothing added yet, create a new table. # (which has to be reopened for read/write). # Otherwise append to that new table. if nadded == 0: tnew = t2.copy(newname + "_add", deep=True) tnew = table(newname + "_add", readonly=False) else: t2.copyrows(tnew) # Set the correct time and band in the new rows. tnew.putcell('TIME', range(nadded, nadded + nmissing), tsub.getcell('TIME', 0)) tnew.putcell('DATA_DESC_ID', range(nadded, nadded + nmissing), tsub.getcell('DATA_DESC_ID', 0)) nadded += nmissing # Combine the existing table and new table. if nadded > 0: # First initialize data and flags in the added rows. taql('update $tnew set DATA=0+0i') taql('update $tnew set FLAG=True') tcomb = table([t, tnew]) tcomb.rename(newname + '_adds') tcombs = tcomb.sort('TIME,DATA_DESC_ID,ANTENNA1,ANTENNA2') else: tcombs = t.query(offset=0) tcombs.rename(newname) six.print_(newname, 'has been created; it references the original MS') if nadded > 0: six.print_(' and', newname + '_adds', 'containing', nadded, 'new rows') else: six.print_(' no rows needed to be added')
[ "def", "msregularize", "(", "msname", ",", "newname", ")", ":", "# Find out all baselines.", "t", "=", "table", "(", "msname", ")", "t1", "=", "t", ".", "sort", "(", "'unique ANTENNA1,ANTENNA2'", ")", "nadded", "=", "0", "# Now iterate in time,band over the MS.", "for", "tsub", "in", "t", ".", "iter", "(", "[", "'TIME'", ",", "'DATA_DESC_ID'", "]", ")", ":", "nmissing", "=", "t1", ".", "nrows", "(", ")", "-", "tsub", ".", "nrows", "(", ")", "if", "nmissing", "<", "0", ":", "raise", "ValueError", "(", "\"A time/band chunk has too many rows\"", ")", "if", "nmissing", ">", "0", ":", "# Rows needs to be added for the missing baselines.", "ant1", "=", "str", "(", "t1", ".", "getcol", "(", "'ANTENNA1'", ")", ")", ".", "replace", "(", "' '", ",", "','", ")", "ant2", "=", "str", "(", "t1", ".", "getcol", "(", "'ANTENNA2'", ")", ")", ".", "replace", "(", "' '", ",", "','", ")", "ant1", "=", "tsub", ".", "getcol", "(", "'ANTENNA1'", ")", "ant2", "=", "tsub", ".", "getcol", "(", "'ANTENNA2'", ")", "t2", "=", "taql", "(", "'select from $t1 where !any(ANTENNA1 == $ant1 &&'", "+", "' ANTENNA2 == $ant2)'", ")", "six", ".", "print_", "(", "nmissing", ",", "t1", ".", "nrows", "(", ")", ",", "tsub", ".", "nrows", "(", ")", ",", "t2", ".", "nrows", "(", ")", ")", "if", "t2", ".", "nrows", "(", ")", "!=", "nmissing", ":", "raise", "ValueError", "(", "\"A time/band chunk behaves strangely\"", ")", "# If nothing added yet, create a new table.", "# (which has to be reopened for read/write).", "# Otherwise append to that new table.", "if", "nadded", "==", "0", ":", "tnew", "=", "t2", ".", "copy", "(", "newname", "+", "\"_add\"", ",", "deep", "=", "True", ")", "tnew", "=", "table", "(", "newname", "+", "\"_add\"", ",", "readonly", "=", "False", ")", "else", ":", "t2", ".", "copyrows", "(", "tnew", ")", "# Set the correct time and band in the new rows.", "tnew", ".", "putcell", "(", "'TIME'", ",", "range", "(", "nadded", ",", "nadded", "+", "nmissing", ")", ",", "tsub", ".", "getcell", "(", "'TIME'", ",", "0", ")", ")", "tnew", ".", "putcell", "(", "'DATA_DESC_ID'", ",", "range", "(", "nadded", ",", "nadded", "+", "nmissing", ")", ",", "tsub", ".", "getcell", "(", "'DATA_DESC_ID'", ",", "0", ")", ")", "nadded", "+=", "nmissing", "# Combine the existing table and new table.", "if", "nadded", ">", "0", ":", "# First initialize data and flags in the added rows.", "taql", "(", "'update $tnew set DATA=0+0i'", ")", "taql", "(", "'update $tnew set FLAG=True'", ")", "tcomb", "=", "table", "(", "[", "t", ",", "tnew", "]", ")", "tcomb", ".", "rename", "(", "newname", "+", "'_adds'", ")", "tcombs", "=", "tcomb", ".", "sort", "(", "'TIME,DATA_DESC_ID,ANTENNA1,ANTENNA2'", ")", "else", ":", "tcombs", "=", "t", ".", "query", "(", "offset", "=", "0", ")", "tcombs", ".", "rename", "(", "newname", ")", "six", ".", "print_", "(", "newname", ",", "'has been created; it references the original MS'", ")", "if", "nadded", ">", "0", ":", "six", ".", "print_", "(", "' and'", ",", "newname", "+", "'_adds'", ",", "'containing'", ",", "nadded", ",", "'new rows'", ")", "else", ":", "six", ".", "print_", "(", "' no rows needed to be added'", ")" ]
Regularize an MS The output MS will be such that it has the same number of baselines for each time stamp. Where needed fully flagged rows are added. Possibly missing rows are written into a separate MS <newname>-add. It is concatenated with the original MS and sorted in order of TIME, DATADESC_ID, ANTENNA1,ANTENNA2 to form a new regular MS. Note that the new MS references the input MS (it does not copy the data). It means that changes made in the new MS are also made in the input MS. If no rows were missing, the new MS is still created referencing the input MS.
[ "Regularize", "an", "MS" ]
python
train
maas/python-libmaas
maas/client/viscera/maas.py
https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/maas.py#L167-L176
async def get_upstream_dns(cls) -> list: """Upstream DNS server addresses. Upstream DNS servers used to resolve domains not managed by this MAAS (space-separated IP addresses). Only used when MAAS is running its own DNS server. This value is used as the value of 'forwarders' in the DNS server config. """ data = await cls.get_config("upstream_dns") return [] if data is None else re.split(r'[,\s]+', data)
[ "async", "def", "get_upstream_dns", "(", "cls", ")", "->", "list", ":", "data", "=", "await", "cls", ".", "get_config", "(", "\"upstream_dns\"", ")", "return", "[", "]", "if", "data", "is", "None", "else", "re", ".", "split", "(", "r'[,\\s]+'", ",", "data", ")" ]
Upstream DNS server addresses. Upstream DNS servers used to resolve domains not managed by this MAAS (space-separated IP addresses). Only used when MAAS is running its own DNS server. This value is used as the value of 'forwarders' in the DNS server config.
[ "Upstream", "DNS", "server", "addresses", "." ]
python
train
pantsbuild/pex
pex/finders.py
https://github.com/pantsbuild/pex/blob/87b2129d860250d3b9edce75b9cb62f9789ee521/pex/finders.py#L228-L234
def get_script_from_egg(name, dist): """Returns location, content of script in distribution or (None, None) if not there.""" if dist.metadata_isdir('scripts') and name in dist.metadata_listdir('scripts'): return ( os.path.join(dist.egg_info, 'scripts', name), dist.get_metadata('scripts/%s' % name).replace('\r\n', '\n').replace('\r', '\n')) return None, None
[ "def", "get_script_from_egg", "(", "name", ",", "dist", ")", ":", "if", "dist", ".", "metadata_isdir", "(", "'scripts'", ")", "and", "name", "in", "dist", ".", "metadata_listdir", "(", "'scripts'", ")", ":", "return", "(", "os", ".", "path", ".", "join", "(", "dist", ".", "egg_info", ",", "'scripts'", ",", "name", ")", ",", "dist", ".", "get_metadata", "(", "'scripts/%s'", "%", "name", ")", ".", "replace", "(", "'\\r\\n'", ",", "'\\n'", ")", ".", "replace", "(", "'\\r'", ",", "'\\n'", ")", ")", "return", "None", ",", "None" ]
Returns location, content of script in distribution or (None, None) if not there.
[ "Returns", "location", "content", "of", "script", "in", "distribution", "or", "(", "None", "None", ")", "if", "not", "there", "." ]
python
train
ElevenPaths/AtomShields
atomshields/scanner.py
https://github.com/ElevenPaths/AtomShields/blob/e75f25393b4a7a315ec96bf9b8e654cb2200866a/atomshields/scanner.py#L419-L448
def _getClassInstance(path, args=None): """ Returns a class instance from a .py file. Args: path (str): Absolute path to .py file args (dict): Arguments passed via class constructor Returns: object: Class instance or None """ if not path.endswith(".py"): return None if args is None: args = {} classname = AtomShieldsScanner._getClassName(path) basename = os.path.basename(path).replace(".py", "") sys.path.append(os.path.dirname(path)) try: mod = __import__(basename, globals(), locals(), [classname], -1) class_ = getattr(mod, classname) instance = class_(**args) except Exception as e: AtomShieldsScanner._debug("[!] %s" % e) return None finally: sys.path.remove(os.path.dirname(path)) return instance
[ "def", "_getClassInstance", "(", "path", ",", "args", "=", "None", ")", ":", "if", "not", "path", ".", "endswith", "(", "\".py\"", ")", ":", "return", "None", "if", "args", "is", "None", ":", "args", "=", "{", "}", "classname", "=", "AtomShieldsScanner", ".", "_getClassName", "(", "path", ")", "basename", "=", "os", ".", "path", ".", "basename", "(", "path", ")", ".", "replace", "(", "\".py\"", ",", "\"\"", ")", "sys", ".", "path", ".", "append", "(", "os", ".", "path", ".", "dirname", "(", "path", ")", ")", "try", ":", "mod", "=", "__import__", "(", "basename", ",", "globals", "(", ")", ",", "locals", "(", ")", ",", "[", "classname", "]", ",", "-", "1", ")", "class_", "=", "getattr", "(", "mod", ",", "classname", ")", "instance", "=", "class_", "(", "*", "*", "args", ")", "except", "Exception", "as", "e", ":", "AtomShieldsScanner", ".", "_debug", "(", "\"[!] %s\"", "%", "e", ")", "return", "None", "finally", ":", "sys", ".", "path", ".", "remove", "(", "os", ".", "path", ".", "dirname", "(", "path", ")", ")", "return", "instance" ]
Returns a class instance from a .py file. Args: path (str): Absolute path to .py file args (dict): Arguments passed via class constructor Returns: object: Class instance or None
[ "Returns", "a", "class", "instance", "from", "a", ".", "py", "file", "." ]
python
valid
shoeffner/cvloop
tools/create_functions_ipynb.py
https://github.com/shoeffner/cvloop/blob/3ddd311e9b679d16c8fd36779931380374de343c/tools/create_functions_ipynb.py#L27-L34
def is_mod_class(mod, cls): """Checks if a class in a module was declared in that module. Args: mod: the module cls: the class """ return inspect.isclass(cls) and inspect.getmodule(cls) == mod
[ "def", "is_mod_class", "(", "mod", ",", "cls", ")", ":", "return", "inspect", ".", "isclass", "(", "cls", ")", "and", "inspect", ".", "getmodule", "(", "cls", ")", "==", "mod" ]
Checks if a class in a module was declared in that module. Args: mod: the module cls: the class
[ "Checks", "if", "a", "class", "in", "a", "module", "was", "declared", "in", "that", "module", "." ]
python
train
dossier/dossier.label
dossier/label/relation_label.py
https://github.com/dossier/dossier.label/blob/d445e56b02ffd91ad46b0872cfbff62b9afef7ec/dossier/label/relation_label.py#L280-L289
def get_related_ids(self, content_id, min_strength=None): '''Get identifiers for related identifiers. ''' related_labels = self.get_related(content_id, min_strength=min_strength) related_idents = set() for label in related_labels: related_idents.add(label.other(content_id)) return list(related_idents)
[ "def", "get_related_ids", "(", "self", ",", "content_id", ",", "min_strength", "=", "None", ")", ":", "related_labels", "=", "self", ".", "get_related", "(", "content_id", ",", "min_strength", "=", "min_strength", ")", "related_idents", "=", "set", "(", ")", "for", "label", "in", "related_labels", ":", "related_idents", ".", "add", "(", "label", ".", "other", "(", "content_id", ")", ")", "return", "list", "(", "related_idents", ")" ]
Get identifiers for related identifiers.
[ "Get", "identifiers", "for", "related", "identifiers", "." ]
python
train
intake/intake
intake/gui/catalog/select.py
https://github.com/intake/intake/blob/277b96bfdee39d8a3048ea5408c6d6716d568336/intake/gui/catalog/select.py#L139-L143
def remove_selected(self, *args): """Remove the selected catalog - allow the passing of arbitrary args so that buttons work. Also remove any nested catalogs.""" self.collapse_nested(self.selected) self.remove(self.selected)
[ "def", "remove_selected", "(", "self", ",", "*", "args", ")", ":", "self", ".", "collapse_nested", "(", "self", ".", "selected", ")", "self", ".", "remove", "(", "self", ".", "selected", ")" ]
Remove the selected catalog - allow the passing of arbitrary args so that buttons work. Also remove any nested catalogs.
[ "Remove", "the", "selected", "catalog", "-", "allow", "the", "passing", "of", "arbitrary", "args", "so", "that", "buttons", "work", ".", "Also", "remove", "any", "nested", "catalogs", "." ]
python
train
numenta/nupic
src/nupic/database/client_jobs_dao.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/database/client_jobs_dao.py#L2551-L2584
def modelsGetFieldsForCheckpointed(self, jobID, fields): """ Gets fields from all models in a job that have been checkpointed. This is used to figure out whether or not a new model should be checkpointed. Parameters: ----------------------------------------------------------------------- jobID: The jobID for the models to be searched fields: A list of fields to return Returns: a (possibly-empty) list of tuples as follows [ (model_id1, [field1, ..., fieldn]), (model_id2, [field1, ..., fieldn]), (model_id3, [field1, ..., fieldn]) ... ] """ assert len(fields) >= 1, "fields is empty" # Get a database connection and cursor with ConnectionFactory.get() as conn: dbFields = [self._models.pubToDBNameDict[f] for f in fields] dbFieldStr = ", ".join(dbFields) query = 'SELECT model_id, {fields} from {models}' \ ' WHERE job_id=%s AND model_checkpoint_id IS NOT NULL'.format( fields=dbFieldStr, models=self.modelsTableName) conn.cursor.execute(query, [jobID]) rows = conn.cursor.fetchall() return [(r[0], list(r[1:])) for r in rows]
[ "def", "modelsGetFieldsForCheckpointed", "(", "self", ",", "jobID", ",", "fields", ")", ":", "assert", "len", "(", "fields", ")", ">=", "1", ",", "\"fields is empty\"", "# Get a database connection and cursor", "with", "ConnectionFactory", ".", "get", "(", ")", "as", "conn", ":", "dbFields", "=", "[", "self", ".", "_models", ".", "pubToDBNameDict", "[", "f", "]", "for", "f", "in", "fields", "]", "dbFieldStr", "=", "\", \"", ".", "join", "(", "dbFields", ")", "query", "=", "'SELECT model_id, {fields} from {models}'", "' WHERE job_id=%s AND model_checkpoint_id IS NOT NULL'", ".", "format", "(", "fields", "=", "dbFieldStr", ",", "models", "=", "self", ".", "modelsTableName", ")", "conn", ".", "cursor", ".", "execute", "(", "query", ",", "[", "jobID", "]", ")", "rows", "=", "conn", ".", "cursor", ".", "fetchall", "(", ")", "return", "[", "(", "r", "[", "0", "]", ",", "list", "(", "r", "[", "1", ":", "]", ")", ")", "for", "r", "in", "rows", "]" ]
Gets fields from all models in a job that have been checkpointed. This is used to figure out whether or not a new model should be checkpointed. Parameters: ----------------------------------------------------------------------- jobID: The jobID for the models to be searched fields: A list of fields to return Returns: a (possibly-empty) list of tuples as follows [ (model_id1, [field1, ..., fieldn]), (model_id2, [field1, ..., fieldn]), (model_id3, [field1, ..., fieldn]) ... ]
[ "Gets", "fields", "from", "all", "models", "in", "a", "job", "that", "have", "been", "checkpointed", ".", "This", "is", "used", "to", "figure", "out", "whether", "or", "not", "a", "new", "model", "should", "be", "checkpointed", "." ]
python
valid
mikejarrett/pipcheck
pipcheck/checker.py
https://github.com/mikejarrett/pipcheck/blob/2ff47b9fd8914e1764c6e659ef39b77c1b1a12ad/pipcheck/checker.py#L122-L170
def _get_environment_updates(self, display_all_distributions=False): """ Check all pacakges installed in the environment to see if there are any updates availalble. Args: display_all_distributions (bool): Return distribution even if it is up-to-date. Defaults to ``False``. Returns: list: A list of Update objects ordered based on ``instance.name``. """ updates = [] for distribution in self.pip.get_installed_distributions(): versions = self.get_available_versions(distribution.project_name) max_version = max(versions.keys()) if versions else UNKNOW_NUM update = None distribution_version = self._parse_version(distribution.version) if versions and max_version > distribution_version: update = Update( distribution.project_name, distribution.version, versions[max_version], prelease=max_version[-1] ) elif ( display_all_distributions and max_version == distribution_version ): update = Update( distribution.project_name, distribution.version, versions[max_version], ) elif display_all_distributions: update = Update( distribution.project_name, distribution.version, UNKNOWN ) if update: updates.append(update) return sorted(updates, key=lambda x: x.name)
[ "def", "_get_environment_updates", "(", "self", ",", "display_all_distributions", "=", "False", ")", ":", "updates", "=", "[", "]", "for", "distribution", "in", "self", ".", "pip", ".", "get_installed_distributions", "(", ")", ":", "versions", "=", "self", ".", "get_available_versions", "(", "distribution", ".", "project_name", ")", "max_version", "=", "max", "(", "versions", ".", "keys", "(", ")", ")", "if", "versions", "else", "UNKNOW_NUM", "update", "=", "None", "distribution_version", "=", "self", ".", "_parse_version", "(", "distribution", ".", "version", ")", "if", "versions", "and", "max_version", ">", "distribution_version", ":", "update", "=", "Update", "(", "distribution", ".", "project_name", ",", "distribution", ".", "version", ",", "versions", "[", "max_version", "]", ",", "prelease", "=", "max_version", "[", "-", "1", "]", ")", "elif", "(", "display_all_distributions", "and", "max_version", "==", "distribution_version", ")", ":", "update", "=", "Update", "(", "distribution", ".", "project_name", ",", "distribution", ".", "version", ",", "versions", "[", "max_version", "]", ",", ")", "elif", "display_all_distributions", ":", "update", "=", "Update", "(", "distribution", ".", "project_name", ",", "distribution", ".", "version", ",", "UNKNOWN", ")", "if", "update", ":", "updates", ".", "append", "(", "update", ")", "return", "sorted", "(", "updates", ",", "key", "=", "lambda", "x", ":", "x", ".", "name", ")" ]
Check all pacakges installed in the environment to see if there are any updates availalble. Args: display_all_distributions (bool): Return distribution even if it is up-to-date. Defaults to ``False``. Returns: list: A list of Update objects ordered based on ``instance.name``.
[ "Check", "all", "pacakges", "installed", "in", "the", "environment", "to", "see", "if", "there", "are", "any", "updates", "availalble", "." ]
python
train
PMBio/limix-backup
limix/mtSet/mtset.py
https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/mtSet/mtset.py#L275-L285
def getVariances(self): """ get variances """ var = [] var.append(self.Cr.K().diagonal()) if self.bgRE: var.append(self.Cg.K().diagonal()) var.append(self.Cn.K().diagonal()) var = sp.array(var) return var
[ "def", "getVariances", "(", "self", ")", ":", "var", "=", "[", "]", "var", ".", "append", "(", "self", ".", "Cr", ".", "K", "(", ")", ".", "diagonal", "(", ")", ")", "if", "self", ".", "bgRE", ":", "var", ".", "append", "(", "self", ".", "Cg", ".", "K", "(", ")", ".", "diagonal", "(", ")", ")", "var", ".", "append", "(", "self", ".", "Cn", ".", "K", "(", ")", ".", "diagonal", "(", ")", ")", "var", "=", "sp", ".", "array", "(", "var", ")", "return", "var" ]
get variances
[ "get", "variances" ]
python
train
dcos/shakedown
shakedown/dcos/__init__.py
https://github.com/dcos/shakedown/blob/e2f9e2382788dbcd29bd18aa058b76e7c3b83b3e/shakedown/dcos/__init__.py#L94-L104
def dcos_version(): """Return the version of the running cluster. :return: DC/OS cluster version as a string """ url = _gen_url('dcos-metadata/dcos-version.json') response = dcos.http.request('get', url) if response.status_code == 200: return response.json()['version'] else: return None
[ "def", "dcos_version", "(", ")", ":", "url", "=", "_gen_url", "(", "'dcos-metadata/dcos-version.json'", ")", "response", "=", "dcos", ".", "http", ".", "request", "(", "'get'", ",", "url", ")", "if", "response", ".", "status_code", "==", "200", ":", "return", "response", ".", "json", "(", ")", "[", "'version'", "]", "else", ":", "return", "None" ]
Return the version of the running cluster. :return: DC/OS cluster version as a string
[ "Return", "the", "version", "of", "the", "running", "cluster", ".", ":", "return", ":", "DC", "/", "OS", "cluster", "version", "as", "a", "string" ]
python
train
pandas-dev/pandas
pandas/core/indexing.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexing.py#L270-L295
def _has_valid_positional_setitem_indexer(self, indexer): """ validate that an positional indexer cannot enlarge its target will raise if needed, does not modify the indexer externally """ if isinstance(indexer, dict): raise IndexError("{0} cannot enlarge its target object" .format(self.name)) else: if not isinstance(indexer, tuple): indexer = self._tuplify(indexer) for ax, i in zip(self.obj.axes, indexer): if isinstance(i, slice): # should check the stop slice? pass elif is_list_like_indexer(i): # should check the elements? pass elif is_integer(i): if i >= len(ax): raise IndexError("{name} cannot enlarge its target " "object".format(name=self.name)) elif isinstance(i, dict): raise IndexError("{name} cannot enlarge its target object" .format(name=self.name)) return True
[ "def", "_has_valid_positional_setitem_indexer", "(", "self", ",", "indexer", ")", ":", "if", "isinstance", "(", "indexer", ",", "dict", ")", ":", "raise", "IndexError", "(", "\"{0} cannot enlarge its target object\"", ".", "format", "(", "self", ".", "name", ")", ")", "else", ":", "if", "not", "isinstance", "(", "indexer", ",", "tuple", ")", ":", "indexer", "=", "self", ".", "_tuplify", "(", "indexer", ")", "for", "ax", ",", "i", "in", "zip", "(", "self", ".", "obj", ".", "axes", ",", "indexer", ")", ":", "if", "isinstance", "(", "i", ",", "slice", ")", ":", "# should check the stop slice?", "pass", "elif", "is_list_like_indexer", "(", "i", ")", ":", "# should check the elements?", "pass", "elif", "is_integer", "(", "i", ")", ":", "if", "i", ">=", "len", "(", "ax", ")", ":", "raise", "IndexError", "(", "\"{name} cannot enlarge its target \"", "\"object\"", ".", "format", "(", "name", "=", "self", ".", "name", ")", ")", "elif", "isinstance", "(", "i", ",", "dict", ")", ":", "raise", "IndexError", "(", "\"{name} cannot enlarge its target object\"", ".", "format", "(", "name", "=", "self", ".", "name", ")", ")", "return", "True" ]
validate that an positional indexer cannot enlarge its target will raise if needed, does not modify the indexer externally
[ "validate", "that", "an", "positional", "indexer", "cannot", "enlarge", "its", "target", "will", "raise", "if", "needed", "does", "not", "modify", "the", "indexer", "externally" ]
python
train
pycontribs/pyrax
pyrax/autoscale.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/autoscale.py#L731-L767
def _resolve_lbs(load_balancers): """ Takes either a single LB reference or a list of references and returns the dictionary required for creating a Scaling Group. References can be either a dict that matches the structure required by the autoscale API, a CloudLoadBalancer instance, or the ID of the load balancer. """ lb_args = [] if not isinstance(load_balancers, list): lbs = [load_balancers] else: lbs = load_balancers for lb in lbs: if isinstance(lb, dict): lb_args.append(lb) elif isinstance(lb, CloudLoadBalancer): lb_args.append({ "loadBalancerId": lb.id, "port": lb.port, }) elif isinstance(lb, tuple): lb_args.append({"loadBalancerId": lb[0], "port": lb[1]}) else: # See if it's an ID for a Load Balancer try: instance = pyrax.cloud_loadbalancers.get(lb) except Exception: raise exc.InvalidLoadBalancer("Received an invalid " "specification for a Load Balancer: '%s'" % lb) lb_args.append({ "loadBalancerId": instance.id, "port": instance.port, }) return lb_args
[ "def", "_resolve_lbs", "(", "load_balancers", ")", ":", "lb_args", "=", "[", "]", "if", "not", "isinstance", "(", "load_balancers", ",", "list", ")", ":", "lbs", "=", "[", "load_balancers", "]", "else", ":", "lbs", "=", "load_balancers", "for", "lb", "in", "lbs", ":", "if", "isinstance", "(", "lb", ",", "dict", ")", ":", "lb_args", ".", "append", "(", "lb", ")", "elif", "isinstance", "(", "lb", ",", "CloudLoadBalancer", ")", ":", "lb_args", ".", "append", "(", "{", "\"loadBalancerId\"", ":", "lb", ".", "id", ",", "\"port\"", ":", "lb", ".", "port", ",", "}", ")", "elif", "isinstance", "(", "lb", ",", "tuple", ")", ":", "lb_args", ".", "append", "(", "{", "\"loadBalancerId\"", ":", "lb", "[", "0", "]", ",", "\"port\"", ":", "lb", "[", "1", "]", "}", ")", "else", ":", "# See if it's an ID for a Load Balancer", "try", ":", "instance", "=", "pyrax", ".", "cloud_loadbalancers", ".", "get", "(", "lb", ")", "except", "Exception", ":", "raise", "exc", ".", "InvalidLoadBalancer", "(", "\"Received an invalid \"", "\"specification for a Load Balancer: '%s'\"", "%", "lb", ")", "lb_args", ".", "append", "(", "{", "\"loadBalancerId\"", ":", "instance", ".", "id", ",", "\"port\"", ":", "instance", ".", "port", ",", "}", ")", "return", "lb_args" ]
Takes either a single LB reference or a list of references and returns the dictionary required for creating a Scaling Group. References can be either a dict that matches the structure required by the autoscale API, a CloudLoadBalancer instance, or the ID of the load balancer.
[ "Takes", "either", "a", "single", "LB", "reference", "or", "a", "list", "of", "references", "and", "returns", "the", "dictionary", "required", "for", "creating", "a", "Scaling", "Group", "." ]
python
train
datacamp/protowhat
protowhat/checks/check_funcs.py
https://github.com/datacamp/protowhat/blob/a392b4e51e07a2e50e7b7f6ad918b3f5cbb63edc/protowhat/checks/check_funcs.py#L247-L319
def has_equal_ast( state, incorrect_msg="Check the {ast_path}. {extra}", sql=None, start=["expression", "subquery", "sql_script"][0], exact=None, ): """Test whether the student and solution code have identical AST representations Args: state: State instance describing student and solution code. Can be omitted if used with Ex(). incorrect_msg: feedback message if student and solution ASTs don't match sql : optional code to use instead of the solution ast that is zoomed in on. start: if ``sql`` arg is used, the parser rule to parse the sql code. One of 'expression' (the default), 'subquery', or 'sql_script'. exact: whether to require an exact match (True), or only that the student AST contains the solution AST. If not specified, this defaults to ``True`` if ``sql`` is not specified, and to ``False`` if ``sql`` is specified. You can always specify it manually. :Example: Example 1 - Suppose the solution code is :: SELECT * FROM cities and you want to verify whether the `FROM` part is correct: :: Ex().check_node('SelectStmt').from_clause().has_equal_ast() Example 2 - Suppose the solution code is :: SELECT * FROM b WHERE id > 1 AND name = 'filip' Then the following SCT makes sure ``id > 1`` was used somewhere in the WHERE clause.:: Ex().check_node('SelectStmt') \\/ .check_edge('where_clause') \\/ .has_equal_ast(sql = 'id > 1') """ ast = state.ast_dispatcher.ast_mod sol_ast = state.solution_ast if sql is None else ast.parse(sql, start) # if sql is set, exact defaults to False. # if sql not set, exact defaults to True. if exact is None: exact = sql is None stu_rep = repr(state.student_ast) sol_rep = repr(sol_ast) def get_str(ast, code, sql): if sql: return sql if isinstance(ast, str): return ast try: return ast.get_text(code) except: return None sol_str = get_str(state.solution_ast, state.solution_code, sql) _msg = incorrect_msg.format( ast_path=state.get_ast_path() or "highlighted code", extra="The checker expected to find `{}` in there.".format(sol_str) if sol_str else "Something is missing.", ) if (exact and (sol_rep != stu_rep)) or (not exact and (sol_rep not in stu_rep)): state.report(Feedback(_msg)) return state
[ "def", "has_equal_ast", "(", "state", ",", "incorrect_msg", "=", "\"Check the {ast_path}. {extra}\"", ",", "sql", "=", "None", ",", "start", "=", "[", "\"expression\"", ",", "\"subquery\"", ",", "\"sql_script\"", "]", "[", "0", "]", ",", "exact", "=", "None", ",", ")", ":", "ast", "=", "state", ".", "ast_dispatcher", ".", "ast_mod", "sol_ast", "=", "state", ".", "solution_ast", "if", "sql", "is", "None", "else", "ast", ".", "parse", "(", "sql", ",", "start", ")", "# if sql is set, exact defaults to False.", "# if sql not set, exact defaults to True.", "if", "exact", "is", "None", ":", "exact", "=", "sql", "is", "None", "stu_rep", "=", "repr", "(", "state", ".", "student_ast", ")", "sol_rep", "=", "repr", "(", "sol_ast", ")", "def", "get_str", "(", "ast", ",", "code", ",", "sql", ")", ":", "if", "sql", ":", "return", "sql", "if", "isinstance", "(", "ast", ",", "str", ")", ":", "return", "ast", "try", ":", "return", "ast", ".", "get_text", "(", "code", ")", "except", ":", "return", "None", "sol_str", "=", "get_str", "(", "state", ".", "solution_ast", ",", "state", ".", "solution_code", ",", "sql", ")", "_msg", "=", "incorrect_msg", ".", "format", "(", "ast_path", "=", "state", ".", "get_ast_path", "(", ")", "or", "\"highlighted code\"", ",", "extra", "=", "\"The checker expected to find `{}` in there.\"", ".", "format", "(", "sol_str", ")", "if", "sol_str", "else", "\"Something is missing.\"", ",", ")", "if", "(", "exact", "and", "(", "sol_rep", "!=", "stu_rep", ")", ")", "or", "(", "not", "exact", "and", "(", "sol_rep", "not", "in", "stu_rep", ")", ")", ":", "state", ".", "report", "(", "Feedback", "(", "_msg", ")", ")", "return", "state" ]
Test whether the student and solution code have identical AST representations Args: state: State instance describing student and solution code. Can be omitted if used with Ex(). incorrect_msg: feedback message if student and solution ASTs don't match sql : optional code to use instead of the solution ast that is zoomed in on. start: if ``sql`` arg is used, the parser rule to parse the sql code. One of 'expression' (the default), 'subquery', or 'sql_script'. exact: whether to require an exact match (True), or only that the student AST contains the solution AST. If not specified, this defaults to ``True`` if ``sql`` is not specified, and to ``False`` if ``sql`` is specified. You can always specify it manually. :Example: Example 1 - Suppose the solution code is :: SELECT * FROM cities and you want to verify whether the `FROM` part is correct: :: Ex().check_node('SelectStmt').from_clause().has_equal_ast() Example 2 - Suppose the solution code is :: SELECT * FROM b WHERE id > 1 AND name = 'filip' Then the following SCT makes sure ``id > 1`` was used somewhere in the WHERE clause.:: Ex().check_node('SelectStmt') \\/ .check_edge('where_clause') \\/ .has_equal_ast(sql = 'id > 1')
[ "Test", "whether", "the", "student", "and", "solution", "code", "have", "identical", "AST", "representations" ]
python
train
ValvePython/steam
steam/guard.py
https://github.com/ValvePython/steam/blob/2de1364c47598410b572114e6129eab8fff71d5b/steam/guard.py#L87-L100
def get_time(self): """ :return: Steam aligned timestamp :rtype: int """ if (self.steam_time_offset is None or (self.align_time_every and (time() - self._offset_last_check) > self.align_time_every) ): self.steam_time_offset = get_time_offset() if self.steam_time_offset is not None: self._offset_last_check = time() return int(time() + (self.steam_time_offset or 0))
[ "def", "get_time", "(", "self", ")", ":", "if", "(", "self", ".", "steam_time_offset", "is", "None", "or", "(", "self", ".", "align_time_every", "and", "(", "time", "(", ")", "-", "self", ".", "_offset_last_check", ")", ">", "self", ".", "align_time_every", ")", ")", ":", "self", ".", "steam_time_offset", "=", "get_time_offset", "(", ")", "if", "self", ".", "steam_time_offset", "is", "not", "None", ":", "self", ".", "_offset_last_check", "=", "time", "(", ")", "return", "int", "(", "time", "(", ")", "+", "(", "self", ".", "steam_time_offset", "or", "0", ")", ")" ]
:return: Steam aligned timestamp :rtype: int
[ ":", "return", ":", "Steam", "aligned", "timestamp", ":", "rtype", ":", "int" ]
python
train
openstack/networking-hyperv
networking_hyperv/neutron/agent/hnv_neutron_agent.py
https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/hnv_neutron_agent.py#L57-L68
def _provision_network(self, port_id, net_uuid, network_type, physical_network, segmentation_id): """Provision the network with the received information.""" LOG.info("Provisioning network %s", net_uuid) vswitch_name = self._get_vswitch_name(network_type, physical_network) vswitch_map = { 'network_type': network_type, 'vswitch_name': vswitch_name, 'ports': [], 'vlan_id': segmentation_id} self._network_vswitch_map[net_uuid] = vswitch_map
[ "def", "_provision_network", "(", "self", ",", "port_id", ",", "net_uuid", ",", "network_type", ",", "physical_network", ",", "segmentation_id", ")", ":", "LOG", ".", "info", "(", "\"Provisioning network %s\"", ",", "net_uuid", ")", "vswitch_name", "=", "self", ".", "_get_vswitch_name", "(", "network_type", ",", "physical_network", ")", "vswitch_map", "=", "{", "'network_type'", ":", "network_type", ",", "'vswitch_name'", ":", "vswitch_name", ",", "'ports'", ":", "[", "]", ",", "'vlan_id'", ":", "segmentation_id", "}", "self", ".", "_network_vswitch_map", "[", "net_uuid", "]", "=", "vswitch_map" ]
Provision the network with the received information.
[ "Provision", "the", "network", "with", "the", "received", "information", "." ]
python
train
edx/edx-enterprise
integrated_channels/sap_success_factors/migrations/0014_drop_historical_table.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/integrated_channels/sap_success_factors/migrations/0014_drop_historical_table.py#L7-L15
def dropHistoricalTable(apps, schema_editor): """ Drops the historical sap_success_factors table named herein. """ table_name = 'sap_success_factors_historicalsapsuccessfactorsenterprisecus80ad' if table_name in connection.introspection.table_names(): migrations.DeleteModel( name=table_name, )
[ "def", "dropHistoricalTable", "(", "apps", ",", "schema_editor", ")", ":", "table_name", "=", "'sap_success_factors_historicalsapsuccessfactorsenterprisecus80ad'", "if", "table_name", "in", "connection", ".", "introspection", ".", "table_names", "(", ")", ":", "migrations", ".", "DeleteModel", "(", "name", "=", "table_name", ",", ")" ]
Drops the historical sap_success_factors table named herein.
[ "Drops", "the", "historical", "sap_success_factors", "table", "named", "herein", "." ]
python
valid
sdispater/eloquent
eloquent/query/grammars/mysql_grammar.py
https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/query/grammars/mysql_grammar.py#L23-L38
def compile_select(self, query): """ Compile a select query into SQL :param query: A QueryBuilder instance :type query: QueryBuilder :return: The compiled sql :rtype: str """ sql = super(MySqlQueryGrammar, self).compile_select(query) if query.unions: sql = '(%s) %s' % (sql, self._compile_unions(query)) return sql
[ "def", "compile_select", "(", "self", ",", "query", ")", ":", "sql", "=", "super", "(", "MySqlQueryGrammar", ",", "self", ")", ".", "compile_select", "(", "query", ")", "if", "query", ".", "unions", ":", "sql", "=", "'(%s) %s'", "%", "(", "sql", ",", "self", ".", "_compile_unions", "(", "query", ")", ")", "return", "sql" ]
Compile a select query into SQL :param query: A QueryBuilder instance :type query: QueryBuilder :return: The compiled sql :rtype: str
[ "Compile", "a", "select", "query", "into", "SQL" ]
python
train
mongodb/mongo-python-driver
pymongo/message.py
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/message.py#L1469-L1482
def unpack_response(self, cursor_id=None, codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, user_fields=None, legacy_response=False): """Unpack a OP_MSG command response. :Parameters: - `cursor_id` (optional): Ignored, for compatibility with _OpReply. - `codec_options` (optional): an instance of :class:`~bson.codec_options.CodecOptions` """ # If _OpMsg is in-use, this cannot be a legacy response. assert not legacy_response return bson._decode_all_selective( self.payload_document, codec_options, user_fields)
[ "def", "unpack_response", "(", "self", ",", "cursor_id", "=", "None", ",", "codec_options", "=", "_UNICODE_REPLACE_CODEC_OPTIONS", ",", "user_fields", "=", "None", ",", "legacy_response", "=", "False", ")", ":", "# If _OpMsg is in-use, this cannot be a legacy response.", "assert", "not", "legacy_response", "return", "bson", ".", "_decode_all_selective", "(", "self", ".", "payload_document", ",", "codec_options", ",", "user_fields", ")" ]
Unpack a OP_MSG command response. :Parameters: - `cursor_id` (optional): Ignored, for compatibility with _OpReply. - `codec_options` (optional): an instance of :class:`~bson.codec_options.CodecOptions`
[ "Unpack", "a", "OP_MSG", "command", "response", "." ]
python
train
COALAIP/pycoalaip
coalaip/model_validators.py
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/model_validators.py#L45-L56
def is_creation_model(instance, attribute, value): """Must include at least a ``name`` key.""" creation_name = value.get('name') if not isinstance(creation_name, str): instance_name = instance.__class__.__name__ err_str = ("'name' must be given as a string in the '{attr}' " "parameter of a '{cls}'. Given " "'{value}'").format(attr=attribute.name, cls=instance_name, value=creation_name) raise ModelDataError(err_str)
[ "def", "is_creation_model", "(", "instance", ",", "attribute", ",", "value", ")", ":", "creation_name", "=", "value", ".", "get", "(", "'name'", ")", "if", "not", "isinstance", "(", "creation_name", ",", "str", ")", ":", "instance_name", "=", "instance", ".", "__class__", ".", "__name__", "err_str", "=", "(", "\"'name' must be given as a string in the '{attr}' \"", "\"parameter of a '{cls}'. Given \"", "\"'{value}'\"", ")", ".", "format", "(", "attr", "=", "attribute", ".", "name", ",", "cls", "=", "instance_name", ",", "value", "=", "creation_name", ")", "raise", "ModelDataError", "(", "err_str", ")" ]
Must include at least a ``name`` key.
[ "Must", "include", "at", "least", "a", "name", "key", "." ]
python
train
LPgenerator/django-db-mailer
dbmail/providers/boxcar/push.py
https://github.com/LPgenerator/django-db-mailer/blob/217a73c21ba5c6b68738f74b2c55a6dd2c1afe35/dbmail/providers/boxcar/push.py#L19-L48
def send(token, title, **kwargs): """ Site: https://boxcar.io/ API: http://help.boxcar.io/knowledgebase/topics/48115-boxcar-api Desc: Best app for system administrators """ headers = { "Content-type": "application/x-www-form-urlencoded", "User-Agent": "DBMail/%s" % get_version(), } data = { "user_credentials": token, "notification[title]": from_unicode(title), "notification[sound]": "notifier-2" } for k, v in kwargs.items(): data['notification[%s]' % k] = from_unicode(v) http = HTTPSConnection(kwargs.pop("api_url", "new.boxcar.io")) http.request( "POST", "/api/notifications", headers=headers, body=urlencode(data)) response = http.getresponse() if response.status != 201: raise BoxcarError(response.reason) return True
[ "def", "send", "(", "token", ",", "title", ",", "*", "*", "kwargs", ")", ":", "headers", "=", "{", "\"Content-type\"", ":", "\"application/x-www-form-urlencoded\"", ",", "\"User-Agent\"", ":", "\"DBMail/%s\"", "%", "get_version", "(", ")", ",", "}", "data", "=", "{", "\"user_credentials\"", ":", "token", ",", "\"notification[title]\"", ":", "from_unicode", "(", "title", ")", ",", "\"notification[sound]\"", ":", "\"notifier-2\"", "}", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "data", "[", "'notification[%s]'", "%", "k", "]", "=", "from_unicode", "(", "v", ")", "http", "=", "HTTPSConnection", "(", "kwargs", ".", "pop", "(", "\"api_url\"", ",", "\"new.boxcar.io\"", ")", ")", "http", ".", "request", "(", "\"POST\"", ",", "\"/api/notifications\"", ",", "headers", "=", "headers", ",", "body", "=", "urlencode", "(", "data", ")", ")", "response", "=", "http", ".", "getresponse", "(", ")", "if", "response", ".", "status", "!=", "201", ":", "raise", "BoxcarError", "(", "response", ".", "reason", ")", "return", "True" ]
Site: https://boxcar.io/ API: http://help.boxcar.io/knowledgebase/topics/48115-boxcar-api Desc: Best app for system administrators
[ "Site", ":", "https", ":", "//", "boxcar", ".", "io", "/", "API", ":", "http", ":", "//", "help", ".", "boxcar", ".", "io", "/", "knowledgebase", "/", "topics", "/", "48115", "-", "boxcar", "-", "api", "Desc", ":", "Best", "app", "for", "system", "administrators" ]
python
train
spyder-ide/spyder
spyder/plugins/help/plugin.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/help/plugin.py#L448-L452
def show_plain_text(self, text): """Show text in plain mode""" self.switch_to_plugin() self.switch_to_plain_text() self.set_plain_text(text, is_code=False)
[ "def", "show_plain_text", "(", "self", ",", "text", ")", ":", "self", ".", "switch_to_plugin", "(", ")", "self", ".", "switch_to_plain_text", "(", ")", "self", ".", "set_plain_text", "(", "text", ",", "is_code", "=", "False", ")" ]
Show text in plain mode
[ "Show", "text", "in", "plain", "mode" ]
python
train
gem/oq-engine
openquake/hmtk/faults/fault_models.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/faults/fault_models.py#L327-L363
def _generate_branching_index(self): ''' Generates a branching index (i.e. a list indicating the number of branches in each branching level. Current branching levels are: 1) Slip 2) MSR 3) Shear Modulus 4) DLR 5) MSR_Sigma 6) Config :returns: * branch_index - A 2-D numpy.ndarray where each row is a pointer to a particular combination of values * number_branches - Total number of branches (int) ''' branch_count = np.array([len(self.slip), len(self.msr), len(self.shear_modulus), len(self.disp_length_ratio), len(self.msr_sigma), len(self.config)]) n_levels = len(branch_count) number_branches = np.prod(branch_count) branch_index = np.zeros([number_branches, n_levels], dtype=int) cumval = 1 dstep = 1E-9 for iloc in range(0, n_levels): idx = np.linspace(0., float(branch_count[iloc]) - dstep, number_branches // cumval) branch_index[:, iloc] = np.reshape(np.tile(idx, [cumval, 1]), number_branches) cumval *= branch_count[iloc] return branch_index.tolist(), number_branches
[ "def", "_generate_branching_index", "(", "self", ")", ":", "branch_count", "=", "np", ".", "array", "(", "[", "len", "(", "self", ".", "slip", ")", ",", "len", "(", "self", ".", "msr", ")", ",", "len", "(", "self", ".", "shear_modulus", ")", ",", "len", "(", "self", ".", "disp_length_ratio", ")", ",", "len", "(", "self", ".", "msr_sigma", ")", ",", "len", "(", "self", ".", "config", ")", "]", ")", "n_levels", "=", "len", "(", "branch_count", ")", "number_branches", "=", "np", ".", "prod", "(", "branch_count", ")", "branch_index", "=", "np", ".", "zeros", "(", "[", "number_branches", ",", "n_levels", "]", ",", "dtype", "=", "int", ")", "cumval", "=", "1", "dstep", "=", "1E-9", "for", "iloc", "in", "range", "(", "0", ",", "n_levels", ")", ":", "idx", "=", "np", ".", "linspace", "(", "0.", ",", "float", "(", "branch_count", "[", "iloc", "]", ")", "-", "dstep", ",", "number_branches", "//", "cumval", ")", "branch_index", "[", ":", ",", "iloc", "]", "=", "np", ".", "reshape", "(", "np", ".", "tile", "(", "idx", ",", "[", "cumval", ",", "1", "]", ")", ",", "number_branches", ")", "cumval", "*=", "branch_count", "[", "iloc", "]", "return", "branch_index", ".", "tolist", "(", ")", ",", "number_branches" ]
Generates a branching index (i.e. a list indicating the number of branches in each branching level. Current branching levels are: 1) Slip 2) MSR 3) Shear Modulus 4) DLR 5) MSR_Sigma 6) Config :returns: * branch_index - A 2-D numpy.ndarray where each row is a pointer to a particular combination of values * number_branches - Total number of branches (int)
[ "Generates", "a", "branching", "index", "(", "i", ".", "e", ".", "a", "list", "indicating", "the", "number", "of", "branches", "in", "each", "branching", "level", ".", "Current", "branching", "levels", "are", ":", "1", ")", "Slip", "2", ")", "MSR", "3", ")", "Shear", "Modulus", "4", ")", "DLR", "5", ")", "MSR_Sigma", "6", ")", "Config" ]
python
train
jazzband/django-ddp
dddp/accounts/ddp.py
https://github.com/jazzband/django-ddp/blob/1e1954b06fe140346acea43582515991685e4e01/dddp/accounts/ddp.py#L513-L533
def change_password(self, old_password, new_password): """Change password.""" try: user = this.user except self.user_model.DoesNotExist: self.auth_failed() user = auth.authenticate( username=user.get_username(), password=self.get_password(old_password), ) if user is None: self.auth_failed() else: user.set_password(self.get_password(new_password)) user.save() password_changed.send( sender=__name__, request=this.request, user=user, ) return {"passwordChanged": True}
[ "def", "change_password", "(", "self", ",", "old_password", ",", "new_password", ")", ":", "try", ":", "user", "=", "this", ".", "user", "except", "self", ".", "user_model", ".", "DoesNotExist", ":", "self", ".", "auth_failed", "(", ")", "user", "=", "auth", ".", "authenticate", "(", "username", "=", "user", ".", "get_username", "(", ")", ",", "password", "=", "self", ".", "get_password", "(", "old_password", ")", ",", ")", "if", "user", "is", "None", ":", "self", ".", "auth_failed", "(", ")", "else", ":", "user", ".", "set_password", "(", "self", ".", "get_password", "(", "new_password", ")", ")", "user", ".", "save", "(", ")", "password_changed", ".", "send", "(", "sender", "=", "__name__", ",", "request", "=", "this", ".", "request", ",", "user", "=", "user", ",", ")", "return", "{", "\"passwordChanged\"", ":", "True", "}" ]
Change password.
[ "Change", "password", "." ]
python
test
terrycain/aioboto3
aioboto3/s3/inject.py
https://github.com/terrycain/aioboto3/blob/0fd192175461f7bb192f3ed9a872591caf8474ac/aioboto3/s3/inject.py#L17-L30
async def download_file(self, Bucket, Key, Filename, ExtraArgs=None, Callback=None, Config=None): """Download an S3 object to a file. Usage:: import boto3 s3 = boto3.resource('s3') s3.meta.client.download_file('mybucket', 'hello.txt', '/tmp/hello.txt') Similar behavior as S3Transfer's download_file() method, except that parameters are capitalized. """ with open(Filename, 'wb') as open_file: await download_fileobj(self, Bucket, Key, open_file, ExtraArgs=ExtraArgs, Callback=Callback, Config=Config)
[ "async", "def", "download_file", "(", "self", ",", "Bucket", ",", "Key", ",", "Filename", ",", "ExtraArgs", "=", "None", ",", "Callback", "=", "None", ",", "Config", "=", "None", ")", ":", "with", "open", "(", "Filename", ",", "'wb'", ")", "as", "open_file", ":", "await", "download_fileobj", "(", "self", ",", "Bucket", ",", "Key", ",", "open_file", ",", "ExtraArgs", "=", "ExtraArgs", ",", "Callback", "=", "Callback", ",", "Config", "=", "Config", ")" ]
Download an S3 object to a file. Usage:: import boto3 s3 = boto3.resource('s3') s3.meta.client.download_file('mybucket', 'hello.txt', '/tmp/hello.txt') Similar behavior as S3Transfer's download_file() method, except that parameters are capitalized.
[ "Download", "an", "S3", "object", "to", "a", "file", "." ]
python
train
mschwager/cohesion
lib/cohesion/filesystem.py
https://github.com/mschwager/cohesion/blob/b242ad59770940f3a0904931f27755ede009f491/lib/cohesion/filesystem.py#L21-L29
def recursively_get_files_from_directory(directory): """ Return all filenames under recursively found in a directory """ return [ os.path.join(root, filename) for root, directories, filenames in os.walk(directory) for filename in filenames ]
[ "def", "recursively_get_files_from_directory", "(", "directory", ")", ":", "return", "[", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", "for", "root", ",", "directories", ",", "filenames", "in", "os", ".", "walk", "(", "directory", ")", "for", "filename", "in", "filenames", "]" ]
Return all filenames under recursively found in a directory
[ "Return", "all", "filenames", "under", "recursively", "found", "in", "a", "directory" ]
python
train
ShawnClake/Apitax
apitax/ah/api/controllers/migrations/developers_controller.py
https://github.com/ShawnClake/Apitax/blob/2eb9c6990d4088b2503c7f13c2a76f8e59606e6d/apitax/ah/api/controllers/migrations/developers_controller.py#L54-L66
def rename_script(rename=None): # noqa: E501 """Rename a script Rename a script # noqa: E501 :param rename: The data needed to save this script :type rename: dict | bytes :rtype: Response """ if connexion.request.is_json: rename = Rename.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!'
[ "def", "rename_script", "(", "rename", "=", "None", ")", ":", "# noqa: E501", "if", "connexion", ".", "request", ".", "is_json", ":", "rename", "=", "Rename", ".", "from_dict", "(", "connexion", ".", "request", ".", "get_json", "(", ")", ")", "# noqa: E501", "return", "'do some magic!'" ]
Rename a script Rename a script # noqa: E501 :param rename: The data needed to save this script :type rename: dict | bytes :rtype: Response
[ "Rename", "a", "script" ]
python
train
onicagroup/runway
runway/commands/modules_command.py
https://github.com/onicagroup/runway/blob/3f3549ec3bf6e39b9f27d9738a1847f3a4369e7f/runway/commands/modules_command.py#L177-L193
def validate_account_id(sts_client, account_id): """Exit if get_caller_identity doesn't match account_id.""" resp = sts_client.get_caller_identity() if 'Account' in resp: if resp['Account'] == account_id: LOGGER.info('Verified current AWS account matches required ' 'account id %s.', account_id) else: LOGGER.error('Current AWS account %s does not match ' 'required account %s in Runway config.', resp['Account'], account_id) sys.exit(1) else: LOGGER.error('Error checking current account ID') sys.exit(1)
[ "def", "validate_account_id", "(", "sts_client", ",", "account_id", ")", ":", "resp", "=", "sts_client", ".", "get_caller_identity", "(", ")", "if", "'Account'", "in", "resp", ":", "if", "resp", "[", "'Account'", "]", "==", "account_id", ":", "LOGGER", ".", "info", "(", "'Verified current AWS account matches required '", "'account id %s.'", ",", "account_id", ")", "else", ":", "LOGGER", ".", "error", "(", "'Current AWS account %s does not match '", "'required account %s in Runway config.'", ",", "resp", "[", "'Account'", "]", ",", "account_id", ")", "sys", ".", "exit", "(", "1", ")", "else", ":", "LOGGER", ".", "error", "(", "'Error checking current account ID'", ")", "sys", ".", "exit", "(", "1", ")" ]
Exit if get_caller_identity doesn't match account_id.
[ "Exit", "if", "get_caller_identity", "doesn", "t", "match", "account_id", "." ]
python
train
Dallinger/Dallinger
dallinger/heroku/tools.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/heroku/tools.py#L61-L66
def all_apps(self): """Capture a backup of the app.""" cmd = ["heroku", "apps", "--json"] if self.team: cmd.extend(["--team", self.team]) return json.loads(self._result(cmd))
[ "def", "all_apps", "(", "self", ")", ":", "cmd", "=", "[", "\"heroku\"", ",", "\"apps\"", ",", "\"--json\"", "]", "if", "self", ".", "team", ":", "cmd", ".", "extend", "(", "[", "\"--team\"", ",", "self", ".", "team", "]", ")", "return", "json", ".", "loads", "(", "self", ".", "_result", "(", "cmd", ")", ")" ]
Capture a backup of the app.
[ "Capture", "a", "backup", "of", "the", "app", "." ]
python
train
refenv/cijoe
deprecated/modules/cij/liblight.py
https://github.com/refenv/cijoe/blob/21d7b2ed4ff68e0a1457e7df2db27f6334f1a379/deprecated/modules/cij/liblight.py#L75-L80
def get_chunk_meta_item(self, chunk_meta, grp, pug, chk): """Get item of chunk meta table""" num_chk = self.envs["NUM_CHK"] num_pu = self.envs["NUM_PU"] index = grp * num_pu * num_chk + pug * num_chk + chk return chunk_meta[index]
[ "def", "get_chunk_meta_item", "(", "self", ",", "chunk_meta", ",", "grp", ",", "pug", ",", "chk", ")", ":", "num_chk", "=", "self", ".", "envs", "[", "\"NUM_CHK\"", "]", "num_pu", "=", "self", ".", "envs", "[", "\"NUM_PU\"", "]", "index", "=", "grp", "*", "num_pu", "*", "num_chk", "+", "pug", "*", "num_chk", "+", "chk", "return", "chunk_meta", "[", "index", "]" ]
Get item of chunk meta table
[ "Get", "item", "of", "chunk", "meta", "table" ]
python
valid
acorg/dark-matter
bin/fasta-identity-table.py
https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/bin/fasta-identity-table.py#L111-L179
def explanation(matchAmbiguous, concise, showLengths, showGaps, showNs): """ Make an explanation of the output HTML table. @param matchAmbiguous: If C{True}, count ambiguous nucleotides that are possibly correct as actually being correct. Otherwise, we are strict and insist that only non-ambiguous nucleotides can contribute to the matching nucleotide count. @param concise: If C{True}, do not show match detail abbreviations. @param showLengths: If C{True}, include the lengths of sequences. @param showGaps: If C{True}, include the number of gaps in sequences. @param showNs: If C{True}, include the number of N characters in sequences. @return: A C{str} of HTML. """ result = [""" <h1>Sequence versus sequence identity table</h1> <p> The table cells below show the nucleotide identity fraction for the sequences (<span class="best">like this</span> for the best value in each row). The identity fraction numerator is the sum of the number of identical """] if matchAmbiguous: result.append('nucleotides plus the number of ambiguously matching ' 'nucleotides.') else: result.append('nucleotides.') result.append("""The denominator is the length of the sequence <em>for the row</em>. Sequence gaps are not included when calculating their lengths. </p> """) if showLengths or showGaps or showNs or matchAmbiguous or not concise: result.append(""" <p> Key to abbreviations: <ul> """) if showLengths: result.append('<li>L: sequence Length.</li>') if showGaps: result.append('<li>G: number of Gaps in sequence.</li>') if showNs: result.append('<li>N: number of N characters in sequence.</li>') if not concise: result.append('<li>IM: Identical nucleotide Matches.</li>') if matchAmbiguous: result.append('<li>AM: Ambiguous nucleotide Matches.</li>') result.append(""" <li>GG: Gap/Gap matches (both sequences have gaps).</li> <li>G?: Gap/Non-gap mismatches (one sequence has a gap).</li> <li>NE: Non-equal nucleotide mismatches.</li> </ul> </p> """) return '\n'.join(result)
[ "def", "explanation", "(", "matchAmbiguous", ",", "concise", ",", "showLengths", ",", "showGaps", ",", "showNs", ")", ":", "result", "=", "[", "\"\"\"\n<h1>Sequence versus sequence identity table</h1>\n\n<p>\n\nThe table cells below show the nucleotide identity fraction for the sequences\n(<span class=\"best\">like this</span> for the best value in each row). The\nidentity fraction numerator is the sum of the number of identical\n \"\"\"", "]", "if", "matchAmbiguous", ":", "result", ".", "append", "(", "'nucleotides plus the number of ambiguously matching '", "'nucleotides.'", ")", "else", ":", "result", ".", "append", "(", "'nucleotides.'", ")", "result", ".", "append", "(", "\"\"\"The denominator\nis the length of the sequence <em>for the row</em>. Sequence gaps\nare not included when calculating their lengths.\n\n</p>\n \"\"\"", ")", "if", "showLengths", "or", "showGaps", "or", "showNs", "or", "matchAmbiguous", "or", "not", "concise", ":", "result", ".", "append", "(", "\"\"\"\n<p>\n\nKey to abbreviations:\n <ul>\n \"\"\"", ")", "if", "showLengths", ":", "result", ".", "append", "(", "'<li>L: sequence Length.</li>'", ")", "if", "showGaps", ":", "result", ".", "append", "(", "'<li>G: number of Gaps in sequence.</li>'", ")", "if", "showNs", ":", "result", ".", "append", "(", "'<li>N: number of N characters in sequence.</li>'", ")", "if", "not", "concise", ":", "result", ".", "append", "(", "'<li>IM: Identical nucleotide Matches.</li>'", ")", "if", "matchAmbiguous", ":", "result", ".", "append", "(", "'<li>AM: Ambiguous nucleotide Matches.</li>'", ")", "result", ".", "append", "(", "\"\"\"\n <li>GG: Gap/Gap matches (both sequences have gaps).</li>\n <li>G?: Gap/Non-gap mismatches (one sequence has a gap).</li>\n <li>NE: Non-equal nucleotide mismatches.</li>\n </ul>\n</p>\n\"\"\"", ")", "return", "'\\n'", ".", "join", "(", "result", ")" ]
Make an explanation of the output HTML table. @param matchAmbiguous: If C{True}, count ambiguous nucleotides that are possibly correct as actually being correct. Otherwise, we are strict and insist that only non-ambiguous nucleotides can contribute to the matching nucleotide count. @param concise: If C{True}, do not show match detail abbreviations. @param showLengths: If C{True}, include the lengths of sequences. @param showGaps: If C{True}, include the number of gaps in sequences. @param showNs: If C{True}, include the number of N characters in sequences. @return: A C{str} of HTML.
[ "Make", "an", "explanation", "of", "the", "output", "HTML", "table", "." ]
python
train
bbiskup/purkinje-messages
purkinje_messages/message.py
https://github.com/bbiskup/purkinje-messages/blob/ba4217d993a86fd882bcf73d206d2910e65316dd/purkinje_messages/message.py#L141-L152
def register_eventclass(event_id): """Decorator for registering event classes for parsing """ def register(cls): if not issubclass(cls, Event): raise MessageException(('Cannot register a class that' ' is not a subclass of Event')) EVENT_REGISTRY[event_id] = cls logger.debug('######### Event registry is now: {0}'.format( EVENT_REGISTRY)) return cls return register
[ "def", "register_eventclass", "(", "event_id", ")", ":", "def", "register", "(", "cls", ")", ":", "if", "not", "issubclass", "(", "cls", ",", "Event", ")", ":", "raise", "MessageException", "(", "(", "'Cannot register a class that'", "' is not a subclass of Event'", ")", ")", "EVENT_REGISTRY", "[", "event_id", "]", "=", "cls", "logger", ".", "debug", "(", "'######### Event registry is now: {0}'", ".", "format", "(", "EVENT_REGISTRY", ")", ")", "return", "cls", "return", "register" ]
Decorator for registering event classes for parsing
[ "Decorator", "for", "registering", "event", "classes", "for", "parsing" ]
python
train
rikrd/inspire
inspirespeech/__init__.py
https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/__init__.py#L373-L417
def full_task(self, token_id, presented_pronunciation, pronunciation, pronunciation_probability, warn=True, default=True): """Provide the prediction of the full task. This function is used to predict the probability of a given pronunciation being reported for a given token. :param token_id: The token for which the prediction is provided :param pronunciation: The pronunciation for which the prediction is being made (as a list of strings or space separated string) :param pronunciation_probability: The probability of the pronunciation for the given token :param warn: Set to False in order to avoid warnings about 0 or 1 probabilities :param default: Set to False in order to avoid generating the default probabilities """ if pronunciation_probability is not None and not 0. < pronunciation_probability < 1. and warn: logging.warning('Setting a probability of [{}] to pronunciation [{}] for token [{}].\n ' 'Using probabilities of 0.0 or 1.0 ' 'may lead to likelihoods of -Infinity'.format(pronunciation_probability, pronunciation, token_id)) key = pronunciation if isinstance(key, list): if not all([isinstance(phoneme, basestring) for phoneme in key]): raise ValueError('The pronunciation must be of type string (a sequence of space separated phonemes) ' 'or of type list (containing phonemes of type strings).' 'User supplied: {}'.format(key)) key = ' '.join(pronunciation) default_preds = self._full_default(presented_pronunciation) if default else {} self['tokens'].setdefault(token_id, {}) \ .setdefault('full', default_preds) if key is not None: if pronunciation_probability is not None: self['tokens'][token_id]['full'][key] = pronunciation_probability else: if key in default_preds: self['tokens'][token_id]['full'][key] = default_preds[key] else: self['tokens'][token_id]['full'].pop(key)
[ "def", "full_task", "(", "self", ",", "token_id", ",", "presented_pronunciation", ",", "pronunciation", ",", "pronunciation_probability", ",", "warn", "=", "True", ",", "default", "=", "True", ")", ":", "if", "pronunciation_probability", "is", "not", "None", "and", "not", "0.", "<", "pronunciation_probability", "<", "1.", "and", "warn", ":", "logging", ".", "warning", "(", "'Setting a probability of [{}] to pronunciation [{}] for token [{}].\\n '", "'Using probabilities of 0.0 or 1.0 '", "'may lead to likelihoods of -Infinity'", ".", "format", "(", "pronunciation_probability", ",", "pronunciation", ",", "token_id", ")", ")", "key", "=", "pronunciation", "if", "isinstance", "(", "key", ",", "list", ")", ":", "if", "not", "all", "(", "[", "isinstance", "(", "phoneme", ",", "basestring", ")", "for", "phoneme", "in", "key", "]", ")", ":", "raise", "ValueError", "(", "'The pronunciation must be of type string (a sequence of space separated phonemes) '", "'or of type list (containing phonemes of type strings).'", "'User supplied: {}'", ".", "format", "(", "key", ")", ")", "key", "=", "' '", ".", "join", "(", "pronunciation", ")", "default_preds", "=", "self", ".", "_full_default", "(", "presented_pronunciation", ")", "if", "default", "else", "{", "}", "self", "[", "'tokens'", "]", ".", "setdefault", "(", "token_id", ",", "{", "}", ")", ".", "setdefault", "(", "'full'", ",", "default_preds", ")", "if", "key", "is", "not", "None", ":", "if", "pronunciation_probability", "is", "not", "None", ":", "self", "[", "'tokens'", "]", "[", "token_id", "]", "[", "'full'", "]", "[", "key", "]", "=", "pronunciation_probability", "else", ":", "if", "key", "in", "default_preds", ":", "self", "[", "'tokens'", "]", "[", "token_id", "]", "[", "'full'", "]", "[", "key", "]", "=", "default_preds", "[", "key", "]", "else", ":", "self", "[", "'tokens'", "]", "[", "token_id", "]", "[", "'full'", "]", ".", "pop", "(", "key", ")" ]
Provide the prediction of the full task. This function is used to predict the probability of a given pronunciation being reported for a given token. :param token_id: The token for which the prediction is provided :param pronunciation: The pronunciation for which the prediction is being made (as a list of strings or space separated string) :param pronunciation_probability: The probability of the pronunciation for the given token :param warn: Set to False in order to avoid warnings about 0 or 1 probabilities :param default: Set to False in order to avoid generating the default probabilities
[ "Provide", "the", "prediction", "of", "the", "full", "task", "." ]
python
train
spatialaudio/python-pa-ringbuffer
src/pa_ringbuffer.py
https://github.com/spatialaudio/python-pa-ringbuffer/blob/b4a5eaa9b53a437c05d196ed59e1791db159e4b0/src/pa_ringbuffer.py#L156-L175
def read(self, size=-1): """Read data from the ring buffer into a new buffer. This advances the read index after reading; calling :meth:`advance_read_index` is *not* necessary. :param size: The number of elements to be read. If not specified, all available elements are read. :type size: int, optional :returns: A new buffer containing the read data. Its size may be less than the requested *size*. :rtype: buffer """ if size < 0: size = self.read_available data = self._ffi.new('unsigned char[]', size * self.elementsize) size = self.readinto(data) return self._ffi.buffer(data, size * self.elementsize)
[ "def", "read", "(", "self", ",", "size", "=", "-", "1", ")", ":", "if", "size", "<", "0", ":", "size", "=", "self", ".", "read_available", "data", "=", "self", ".", "_ffi", ".", "new", "(", "'unsigned char[]'", ",", "size", "*", "self", ".", "elementsize", ")", "size", "=", "self", ".", "readinto", "(", "data", ")", "return", "self", ".", "_ffi", ".", "buffer", "(", "data", ",", "size", "*", "self", ".", "elementsize", ")" ]
Read data from the ring buffer into a new buffer. This advances the read index after reading; calling :meth:`advance_read_index` is *not* necessary. :param size: The number of elements to be read. If not specified, all available elements are read. :type size: int, optional :returns: A new buffer containing the read data. Its size may be less than the requested *size*. :rtype: buffer
[ "Read", "data", "from", "the", "ring", "buffer", "into", "a", "new", "buffer", "." ]
python
train
funilrys/PyFunceble
PyFunceble/config.py
https://github.com/funilrys/PyFunceble/blob/cdf69cbde120199171f7158e1c33635753e6e2f5/PyFunceble/config.py#L195-L225
def _set_path_to_configs(cls, path_to_config): """ Set the paths to the configuration files. :param path_to_config: The possible path to the config to load. :type path_to_config: str :return: The path to the config to read (0), the path to the default configuration to read as fallback.(1) :rtype: tuple """ if not path_to_config.endswith(PyFunceble.directory_separator): # The path to the config does not ends with the directory separator. # We initiate the default and the parsed variable with the directory separator. default = parsed = path_to_config + PyFunceble.directory_separator else: # The path to the config does ends with the directory separator. # We initiate the default and the parsed variable. default = parsed = path_to_config # We append the `CONFIGURATION_FILENAME` to the parsed variable. parsed += PyFunceble.CONFIGURATION_FILENAME # And we append the `DEFAULT_CONFIGURATION_FILENAME` to the default variable. default += PyFunceble.DEFAULT_CONFIGURATION_FILENAME # We finaly return a tuple which contain both informations. return (parsed, default)
[ "def", "_set_path_to_configs", "(", "cls", ",", "path_to_config", ")", ":", "if", "not", "path_to_config", ".", "endswith", "(", "PyFunceble", ".", "directory_separator", ")", ":", "# The path to the config does not ends with the directory separator.", "# We initiate the default and the parsed variable with the directory separator.", "default", "=", "parsed", "=", "path_to_config", "+", "PyFunceble", ".", "directory_separator", "else", ":", "# The path to the config does ends with the directory separator.", "# We initiate the default and the parsed variable.", "default", "=", "parsed", "=", "path_to_config", "# We append the `CONFIGURATION_FILENAME` to the parsed variable.", "parsed", "+=", "PyFunceble", ".", "CONFIGURATION_FILENAME", "# And we append the `DEFAULT_CONFIGURATION_FILENAME` to the default variable.", "default", "+=", "PyFunceble", ".", "DEFAULT_CONFIGURATION_FILENAME", "# We finaly return a tuple which contain both informations.", "return", "(", "parsed", ",", "default", ")" ]
Set the paths to the configuration files. :param path_to_config: The possible path to the config to load. :type path_to_config: str :return: The path to the config to read (0), the path to the default configuration to read as fallback.(1) :rtype: tuple
[ "Set", "the", "paths", "to", "the", "configuration", "files", "." ]
python
test
myint/language-check
setup.py
https://github.com/myint/language-check/blob/58e419833ef28a9193fcaa21193616a8a14504a9/setup.py#L363-L388
def run_3to2(args=None): """Convert Python files using lib3to2.""" args = BASE_ARGS_3TO2 if args is None else BASE_ARGS_3TO2 + args try: proc = subprocess.Popen(['3to2'] + args, stderr=subprocess.PIPE) except OSError: for path in glob.glob('*.egg'): if os.path.isdir(path) and path not in sys.path: sys.path.append(path) try: from lib3to2.main import main as lib3to2_main except ImportError: raise OSError('3to2 script is unavailable.') else: if lib3to2_main('lib3to2.fixes', args): raise Exception('lib3to2 parsing error') else: # HACK: workaround for 3to2 never returning non-zero # when using the -j option. num_errors = 0 while proc.poll() is None: line = proc.stderr.readline() sys.stderr.write(line) num_errors += line.count(': ParseError: ') if proc.returncode or num_errors: raise Exception('lib3to2 parsing error')
[ "def", "run_3to2", "(", "args", "=", "None", ")", ":", "args", "=", "BASE_ARGS_3TO2", "if", "args", "is", "None", "else", "BASE_ARGS_3TO2", "+", "args", "try", ":", "proc", "=", "subprocess", ".", "Popen", "(", "[", "'3to2'", "]", "+", "args", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "except", "OSError", ":", "for", "path", "in", "glob", ".", "glob", "(", "'*.egg'", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", "and", "path", "not", "in", "sys", ".", "path", ":", "sys", ".", "path", ".", "append", "(", "path", ")", "try", ":", "from", "lib3to2", ".", "main", "import", "main", "as", "lib3to2_main", "except", "ImportError", ":", "raise", "OSError", "(", "'3to2 script is unavailable.'", ")", "else", ":", "if", "lib3to2_main", "(", "'lib3to2.fixes'", ",", "args", ")", ":", "raise", "Exception", "(", "'lib3to2 parsing error'", ")", "else", ":", "# HACK: workaround for 3to2 never returning non-zero", "# when using the -j option.", "num_errors", "=", "0", "while", "proc", ".", "poll", "(", ")", "is", "None", ":", "line", "=", "proc", ".", "stderr", ".", "readline", "(", ")", "sys", ".", "stderr", ".", "write", "(", "line", ")", "num_errors", "+=", "line", ".", "count", "(", "': ParseError: '", ")", "if", "proc", ".", "returncode", "or", "num_errors", ":", "raise", "Exception", "(", "'lib3to2 parsing error'", ")" ]
Convert Python files using lib3to2.
[ "Convert", "Python", "files", "using", "lib3to2", "." ]
python
valid
theislab/scvelo
scvelo/tools/velocity_confidence.py
https://github.com/theislab/scvelo/blob/c7a96d70edfe705e86bf364434a9527d4fd8df11/scvelo/tools/velocity_confidence.py#L10-L56
def velocity_confidence(data, vkey='velocity', copy=False): """Computes confidences of velocities. Arguments --------- data: :class:`~anndata.AnnData` Annotated data matrix. vkey: `str` (default: `'velocity'`) Name of velocity estimates to be used. copy: `bool` (default: `False`) Return a copy instead of writing to adata. Returns ------- Returns or updates `adata` with the attributes velocity_length: `.obs` Length of the velocity vectors for each individual cell velocity_confidence: `.obs` Confidence for each cell """ adata = data.copy() if copy else data if vkey not in adata.layers.keys(): raise ValueError( 'You need to run `tl.velocity` first.') idx = np.array(adata.var[vkey + '_genes'].values, dtype=bool) X, V = adata.layers['Ms'][:, idx].copy(), adata.layers[vkey][:, idx].copy() indices = get_indices(dist=adata.uns['neighbors']['distances'])[0] V -= V.mean(1)[:, None] V_norm = norm(V) R = np.zeros(adata.n_obs) for i in range(adata.n_obs): Vi_neighs = V[indices[i]] Vi_neighs -= Vi_neighs.mean(1)[:, None] R[i] = np.mean(np.einsum('ij, j', Vi_neighs, V[i]) / (norm(Vi_neighs) * V_norm[i])[None, :]) adata.obs[vkey + '_length'] = V_norm.round(2) adata.obs[vkey + '_confidence'] = R logg.hint('added \'' + vkey + '_confidence\' (adata.obs)') if vkey + '_confidence_transition' not in adata.obs.keys(): velocity_confidence_transition(adata, vkey) return adata if copy else None
[ "def", "velocity_confidence", "(", "data", ",", "vkey", "=", "'velocity'", ",", "copy", "=", "False", ")", ":", "adata", "=", "data", ".", "copy", "(", ")", "if", "copy", "else", "data", "if", "vkey", "not", "in", "adata", ".", "layers", ".", "keys", "(", ")", ":", "raise", "ValueError", "(", "'You need to run `tl.velocity` first.'", ")", "idx", "=", "np", ".", "array", "(", "adata", ".", "var", "[", "vkey", "+", "'_genes'", "]", ".", "values", ",", "dtype", "=", "bool", ")", "X", ",", "V", "=", "adata", ".", "layers", "[", "'Ms'", "]", "[", ":", ",", "idx", "]", ".", "copy", "(", ")", ",", "adata", ".", "layers", "[", "vkey", "]", "[", ":", ",", "idx", "]", ".", "copy", "(", ")", "indices", "=", "get_indices", "(", "dist", "=", "adata", ".", "uns", "[", "'neighbors'", "]", "[", "'distances'", "]", ")", "[", "0", "]", "V", "-=", "V", ".", "mean", "(", "1", ")", "[", ":", ",", "None", "]", "V_norm", "=", "norm", "(", "V", ")", "R", "=", "np", ".", "zeros", "(", "adata", ".", "n_obs", ")", "for", "i", "in", "range", "(", "adata", ".", "n_obs", ")", ":", "Vi_neighs", "=", "V", "[", "indices", "[", "i", "]", "]", "Vi_neighs", "-=", "Vi_neighs", ".", "mean", "(", "1", ")", "[", ":", ",", "None", "]", "R", "[", "i", "]", "=", "np", ".", "mean", "(", "np", ".", "einsum", "(", "'ij, j'", ",", "Vi_neighs", ",", "V", "[", "i", "]", ")", "/", "(", "norm", "(", "Vi_neighs", ")", "*", "V_norm", "[", "i", "]", ")", "[", "None", ",", ":", "]", ")", "adata", ".", "obs", "[", "vkey", "+", "'_length'", "]", "=", "V_norm", ".", "round", "(", "2", ")", "adata", ".", "obs", "[", "vkey", "+", "'_confidence'", "]", "=", "R", "logg", ".", "hint", "(", "'added \\''", "+", "vkey", "+", "'_confidence\\' (adata.obs)'", ")", "if", "vkey", "+", "'_confidence_transition'", "not", "in", "adata", ".", "obs", ".", "keys", "(", ")", ":", "velocity_confidence_transition", "(", "adata", ",", "vkey", ")", "return", "adata", "if", "copy", "else", "None" ]
Computes confidences of velocities. Arguments --------- data: :class:`~anndata.AnnData` Annotated data matrix. vkey: `str` (default: `'velocity'`) Name of velocity estimates to be used. copy: `bool` (default: `False`) Return a copy instead of writing to adata. Returns ------- Returns or updates `adata` with the attributes velocity_length: `.obs` Length of the velocity vectors for each individual cell velocity_confidence: `.obs` Confidence for each cell
[ "Computes", "confidences", "of", "velocities", "." ]
python
train
heikomuller/sco-datastore
scodata/image.py
https://github.com/heikomuller/sco-datastore/blob/7180a6b51150667e47629da566aedaa742e39342/scodata/image.py#L806-L828
def create_object(self, name, image_sets): """Create a prediction image set list. Parameters ---------- name : string User-provided name for the image group. image_sets : list(PredictionImageSet) List of prediction image sets Returns ------- PredictionImageSetHandle Object handle for created prediction image set """ # Create a new object identifier identifier = str(uuid.uuid4()).replace('-','') properties = {datastore.PROPERTY_NAME: name} # Create the image group object and store it in the database before # returning it. obj = PredictionImageSetHandle(identifier, properties, image_sets) self.insert_object(obj) return obj
[ "def", "create_object", "(", "self", ",", "name", ",", "image_sets", ")", ":", "# Create a new object identifier", "identifier", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", ".", "replace", "(", "'-'", ",", "''", ")", "properties", "=", "{", "datastore", ".", "PROPERTY_NAME", ":", "name", "}", "# Create the image group object and store it in the database before", "# returning it.", "obj", "=", "PredictionImageSetHandle", "(", "identifier", ",", "properties", ",", "image_sets", ")", "self", ".", "insert_object", "(", "obj", ")", "return", "obj" ]
Create a prediction image set list. Parameters ---------- name : string User-provided name for the image group. image_sets : list(PredictionImageSet) List of prediction image sets Returns ------- PredictionImageSetHandle Object handle for created prediction image set
[ "Create", "a", "prediction", "image", "set", "list", "." ]
python
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L1699-L1707
def TENSES(self): """ Yields a list of tenses for this language, excluding negations. Each tense is a (tense, person, number, mood, aspect)-tuple. """ a = set(TENSES[id] for id in self._format) a = a.union(set(TENSES[id] for id in self._default.keys())) a = a.union(set(TENSES[id] for id in self._default.values())) a = sorted(x[:-2] for x in a if x[-2] is False) # Exclude negation. return a
[ "def", "TENSES", "(", "self", ")", ":", "a", "=", "set", "(", "TENSES", "[", "id", "]", "for", "id", "in", "self", ".", "_format", ")", "a", "=", "a", ".", "union", "(", "set", "(", "TENSES", "[", "id", "]", "for", "id", "in", "self", ".", "_default", ".", "keys", "(", ")", ")", ")", "a", "=", "a", ".", "union", "(", "set", "(", "TENSES", "[", "id", "]", "for", "id", "in", "self", ".", "_default", ".", "values", "(", ")", ")", ")", "a", "=", "sorted", "(", "x", "[", ":", "-", "2", "]", "for", "x", "in", "a", "if", "x", "[", "-", "2", "]", "is", "False", ")", "# Exclude negation.", "return", "a" ]
Yields a list of tenses for this language, excluding negations. Each tense is a (tense, person, number, mood, aspect)-tuple.
[ "Yields", "a", "list", "of", "tenses", "for", "this", "language", "excluding", "negations", ".", "Each", "tense", "is", "a", "(", "tense", "person", "number", "mood", "aspect", ")", "-", "tuple", "." ]
python
train
CiscoTestAutomation/yang
ncdiff/src/yang/ncdiff/model.py
https://github.com/CiscoTestAutomation/yang/blob/c70ec5ac5a91f276c4060009203770ece92e76b4/ncdiff/src/yang/ncdiff/model.py#L813-L836
def get_diff_str(self, element, length): '''get_diff_str High-level api: Produce a string that indicates the difference between two models. Parameters ---------- element : `Element` A node in model tree. length : `int` String length that has been consumed. Returns ------- str A string that indicates the difference between two models. ''' spaces = ' '*(self.get_width(element) - length) return spaces + element.get('diff')
[ "def", "get_diff_str", "(", "self", ",", "element", ",", "length", ")", ":", "spaces", "=", "' '", "*", "(", "self", ".", "get_width", "(", "element", ")", "-", "length", ")", "return", "spaces", "+", "element", ".", "get", "(", "'diff'", ")" ]
get_diff_str High-level api: Produce a string that indicates the difference between two models. Parameters ---------- element : `Element` A node in model tree. length : `int` String length that has been consumed. Returns ------- str A string that indicates the difference between two models.
[ "get_diff_str" ]
python
train
mattja/nsim
nsim/analysesN/phase.py
https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analysesN/phase.py#L42-L44
def circmean(dts, axis=2): """Circular mean phase""" return np.exp(1.0j * dts).mean(axis=axis).angle()
[ "def", "circmean", "(", "dts", ",", "axis", "=", "2", ")", ":", "return", "np", ".", "exp", "(", "1.0j", "*", "dts", ")", ".", "mean", "(", "axis", "=", "axis", ")", ".", "angle", "(", ")" ]
Circular mean phase
[ "Circular", "mean", "phase" ]
python
train
alex-kostirin/pyatomac
atomac/ldtpd/table.py
https://github.com/alex-kostirin/pyatomac/blob/3f46f6feb4504315eec07abb18bb41be4d257aeb/atomac/ldtpd/table.py#L80-L131
def multiselect(self, window_name, object_name, row_text_list, partial_match=False): """ Select multiple row @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @param row_text_list: Row list with matching text to select @type row_text: string @return: 1 on success. @rtype: integer """ object_handle = self._get_object_handle(window_name, object_name) if not object_handle.AXEnabled: raise LdtpServerException(u"Object %s state disabled" % object_name) object_handle.activate() selected = False try: window = self._get_front_most_window() except (IndexError,): window = self._get_any_window() for row_text in row_text_list: selected = False for cell in object_handle.AXRows: parent_cell = cell cell = self._getfirstmatchingchild(cell, "(AXTextField|AXStaticText)") if not cell: continue if re.match(row_text, cell.AXValue): selected = True if not parent_cell.AXSelected: x, y, width, height = self._getobjectsize(parent_cell) window.clickMouseButtonLeftWithMods((x + width / 2, y + height / 2), ['<command_l>']) # Following selection doesn't work # parent_cell.AXSelected=True self.wait(0.5) else: # Selected pass break if not selected: raise LdtpServerException(u"Unable to select row: %s" % row_text) if not selected: raise LdtpServerException(u"Unable to select any row") return 1
[ "def", "multiselect", "(", "self", ",", "window_name", ",", "object_name", ",", "row_text_list", ",", "partial_match", "=", "False", ")", ":", "object_handle", "=", "self", ".", "_get_object_handle", "(", "window_name", ",", "object_name", ")", "if", "not", "object_handle", ".", "AXEnabled", ":", "raise", "LdtpServerException", "(", "u\"Object %s state disabled\"", "%", "object_name", ")", "object_handle", ".", "activate", "(", ")", "selected", "=", "False", "try", ":", "window", "=", "self", ".", "_get_front_most_window", "(", ")", "except", "(", "IndexError", ",", ")", ":", "window", "=", "self", ".", "_get_any_window", "(", ")", "for", "row_text", "in", "row_text_list", ":", "selected", "=", "False", "for", "cell", "in", "object_handle", ".", "AXRows", ":", "parent_cell", "=", "cell", "cell", "=", "self", ".", "_getfirstmatchingchild", "(", "cell", ",", "\"(AXTextField|AXStaticText)\"", ")", "if", "not", "cell", ":", "continue", "if", "re", ".", "match", "(", "row_text", ",", "cell", ".", "AXValue", ")", ":", "selected", "=", "True", "if", "not", "parent_cell", ".", "AXSelected", ":", "x", ",", "y", ",", "width", ",", "height", "=", "self", ".", "_getobjectsize", "(", "parent_cell", ")", "window", ".", "clickMouseButtonLeftWithMods", "(", "(", "x", "+", "width", "/", "2", ",", "y", "+", "height", "/", "2", ")", ",", "[", "'<command_l>'", "]", ")", "# Following selection doesn't work", "# parent_cell.AXSelected=True", "self", ".", "wait", "(", "0.5", ")", "else", ":", "# Selected", "pass", "break", "if", "not", "selected", ":", "raise", "LdtpServerException", "(", "u\"Unable to select row: %s\"", "%", "row_text", ")", "if", "not", "selected", ":", "raise", "LdtpServerException", "(", "u\"Unable to select any row\"", ")", "return", "1" ]
Select multiple row @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @param row_text_list: Row list with matching text to select @type row_text: string @return: 1 on success. @rtype: integer
[ "Select", "multiple", "row" ]
python
valid
Azure/azure-cli-extensions
src/interactive/azext_interactive/azclishell/az_completer.py
https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/interactive/azext_interactive/azclishell/az_completer.py#L191-L199
def get_arg_name(self, param): """ gets the argument name used in the command table for a parameter """ if self.current_command in self.cmdtab: for arg in self.cmdtab[self.current_command].arguments: for name in self.cmdtab[self.current_command].arguments[arg].options_list: if name == param: return arg return None
[ "def", "get_arg_name", "(", "self", ",", "param", ")", ":", "if", "self", ".", "current_command", "in", "self", ".", "cmdtab", ":", "for", "arg", "in", "self", ".", "cmdtab", "[", "self", ".", "current_command", "]", ".", "arguments", ":", "for", "name", "in", "self", ".", "cmdtab", "[", "self", ".", "current_command", "]", ".", "arguments", "[", "arg", "]", ".", "options_list", ":", "if", "name", "==", "param", ":", "return", "arg", "return", "None" ]
gets the argument name used in the command table for a parameter
[ "gets", "the", "argument", "name", "used", "in", "the", "command", "table", "for", "a", "parameter" ]
python
train
googledatalab/pydatalab
datalab/utils/commands/_commands.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/utils/commands/_commands.py#L49-L64
def create_args(line, namespace): """ Expand any meta-variable references in the argument list. """ args = [] # Using shlex.split handles quotes args and escape characters. for arg in shlex.split(line): if not arg: continue if arg[0] == '$': var_name = arg[1:] if var_name in namespace: args.append((namespace[var_name])) else: raise Exception('Undefined variable referenced in command line: %s' % arg) else: args.append(arg) return args
[ "def", "create_args", "(", "line", ",", "namespace", ")", ":", "args", "=", "[", "]", "# Using shlex.split handles quotes args and escape characters.", "for", "arg", "in", "shlex", ".", "split", "(", "line", ")", ":", "if", "not", "arg", ":", "continue", "if", "arg", "[", "0", "]", "==", "'$'", ":", "var_name", "=", "arg", "[", "1", ":", "]", "if", "var_name", "in", "namespace", ":", "args", ".", "append", "(", "(", "namespace", "[", "var_name", "]", ")", ")", "else", ":", "raise", "Exception", "(", "'Undefined variable referenced in command line: %s'", "%", "arg", ")", "else", ":", "args", ".", "append", "(", "arg", ")", "return", "args" ]
Expand any meta-variable references in the argument list.
[ "Expand", "any", "meta", "-", "variable", "references", "in", "the", "argument", "list", "." ]
python
train
inveniosoftware/invenio-formatter
invenio_formatter/views.py
https://github.com/inveniosoftware/invenio-formatter/blob/aa25f36742e809f05e116b52e8255cdb362e5642/invenio_formatter/views.py#L20-L67
def create_badge_blueprint(allowed_types): """Create the badge blueprint. :param allowed_types: A list of allowed types. :returns: A Flask blueprint. """ from invenio_formatter.context_processors.badges import \ generate_badge_png, generate_badge_svg blueprint = Blueprint( 'invenio_formatter_badges', __name__, template_folder='templates', ) @blueprint.route( '/badge/<any({0}):title>/<path:value>.<any(svg, png):ext>'.format( ', '.join(allowed_types))) def badge(title, value, ext='svg'): """Generate a badge response.""" if ext == 'svg': generator = generate_badge_svg mimetype = 'image/svg+xml' elif ext == 'png': generator = generate_badge_png mimetype = 'image/png' badge_title_mapping = \ current_app.config['FORMATTER_BADGES_TITLE_MAPPING'].get( title, title) response = Response(generator(badge_title_mapping, value), mimetype=mimetype) # Generate Etag from badge title and value. hashable_badge = "{0}.{1}".format(badge_title_mapping, value).encode('utf-8') response.set_etag(hashlib.sha1(hashable_badge).hexdigest()) # Add headers to prevent caching. response.headers["Pragma"] = "no-cache" response.cache_control.no_cache = True response.cache_control.max_age = \ current_app.config['FORMATTER_BADGES_MAX_CACHE_AGE'] response.last_modified = dt.utcnow() extra = timedelta( seconds=current_app.config['FORMATTER_BADGES_MAX_CACHE_AGE']) response.expires = response.last_modified + extra return response.make_conditional(request) return blueprint
[ "def", "create_badge_blueprint", "(", "allowed_types", ")", ":", "from", "invenio_formatter", ".", "context_processors", ".", "badges", "import", "generate_badge_png", ",", "generate_badge_svg", "blueprint", "=", "Blueprint", "(", "'invenio_formatter_badges'", ",", "__name__", ",", "template_folder", "=", "'templates'", ",", ")", "@", "blueprint", ".", "route", "(", "'/badge/<any({0}):title>/<path:value>.<any(svg, png):ext>'", ".", "format", "(", "', '", ".", "join", "(", "allowed_types", ")", ")", ")", "def", "badge", "(", "title", ",", "value", ",", "ext", "=", "'svg'", ")", ":", "\"\"\"Generate a badge response.\"\"\"", "if", "ext", "==", "'svg'", ":", "generator", "=", "generate_badge_svg", "mimetype", "=", "'image/svg+xml'", "elif", "ext", "==", "'png'", ":", "generator", "=", "generate_badge_png", "mimetype", "=", "'image/png'", "badge_title_mapping", "=", "current_app", ".", "config", "[", "'FORMATTER_BADGES_TITLE_MAPPING'", "]", ".", "get", "(", "title", ",", "title", ")", "response", "=", "Response", "(", "generator", "(", "badge_title_mapping", ",", "value", ")", ",", "mimetype", "=", "mimetype", ")", "# Generate Etag from badge title and value.", "hashable_badge", "=", "\"{0}.{1}\"", ".", "format", "(", "badge_title_mapping", ",", "value", ")", ".", "encode", "(", "'utf-8'", ")", "response", ".", "set_etag", "(", "hashlib", ".", "sha1", "(", "hashable_badge", ")", ".", "hexdigest", "(", ")", ")", "# Add headers to prevent caching.", "response", ".", "headers", "[", "\"Pragma\"", "]", "=", "\"no-cache\"", "response", ".", "cache_control", ".", "no_cache", "=", "True", "response", ".", "cache_control", ".", "max_age", "=", "current_app", ".", "config", "[", "'FORMATTER_BADGES_MAX_CACHE_AGE'", "]", "response", ".", "last_modified", "=", "dt", ".", "utcnow", "(", ")", "extra", "=", "timedelta", "(", "seconds", "=", "current_app", ".", "config", "[", "'FORMATTER_BADGES_MAX_CACHE_AGE'", "]", ")", "response", ".", "expires", "=", "response", ".", "last_modified", "+", "extra", "return", "response", ".", "make_conditional", "(", "request", ")", "return", "blueprint" ]
Create the badge blueprint. :param allowed_types: A list of allowed types. :returns: A Flask blueprint.
[ "Create", "the", "badge", "blueprint", "." ]
python
train
peshay/tpm
tpm.py
https://github.com/peshay/tpm/blob/8e64a4d8b89d54bdd2c92d965463a7508aa3d0bc/tpm.py#L430-L436
def create_mypassword(self, data): """Create my password.""" # http://teampasswordmanager.com/docs/api-my-passwords/#create_password log.info('Create MyPassword with %s' % data) NewID = self.post('my_passwords.json', data).get('id') log.info('MyPassword has been created with %s' % NewID) return NewID
[ "def", "create_mypassword", "(", "self", ",", "data", ")", ":", "# http://teampasswordmanager.com/docs/api-my-passwords/#create_password", "log", ".", "info", "(", "'Create MyPassword with %s'", "%", "data", ")", "NewID", "=", "self", ".", "post", "(", "'my_passwords.json'", ",", "data", ")", ".", "get", "(", "'id'", ")", "log", ".", "info", "(", "'MyPassword has been created with %s'", "%", "NewID", ")", "return", "NewID" ]
Create my password.
[ "Create", "my", "password", "." ]
python
train
UDST/urbansim
urbansim/models/regression.py
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L460-L469
def columns_used(self): """ Returns all the columns used in this model for filtering and in the model expression. """ return list(tz.unique(tz.concatv( util.columns_in_filters(self.fit_filters), util.columns_in_filters(self.predict_filters), util.columns_in_formula(self.model_expression))))
[ "def", "columns_used", "(", "self", ")", ":", "return", "list", "(", "tz", ".", "unique", "(", "tz", ".", "concatv", "(", "util", ".", "columns_in_filters", "(", "self", ".", "fit_filters", ")", ",", "util", ".", "columns_in_filters", "(", "self", ".", "predict_filters", ")", ",", "util", ".", "columns_in_formula", "(", "self", ".", "model_expression", ")", ")", ")", ")" ]
Returns all the columns used in this model for filtering and in the model expression.
[ "Returns", "all", "the", "columns", "used", "in", "this", "model", "for", "filtering", "and", "in", "the", "model", "expression", "." ]
python
train
NuGrid/NuGridPy
nugridpy/nugridse.py
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/nugridse.py#L1586-L2024
def _kip_cont2(self, sparse, cycle_start=0, cycle_end=0, plot=['dcoeff'], thresholds=[1.0E+12], xax='log_time_left', alphas=[1.0], yllim=0., yulim=0., y_res=2000, xllim=0., xulim=0., age='seconds', sparse_intrinsic=20, engen=False, netnuc_name='eps_nuc', engenalpha=0.6, outfile='plot.pdf', annotation='', KEPLER=False): """ !! EXPERIMENTAL FEATURE (flagged as private) !! This function creates a Kippenhahn diagram as a contour plot of the .se.h5 or .out.h5 files using any continuous variable (columns in the hdf5 cycle data). Multiple columns may be plotted, their name indicated in the list "plot", and their thresholds in the list "thresholds". Currently, this is only designed to take one threshold for each variable but future versions will hopefully be able to plot with multiple thresholds, however you may circumvent this issue by repeating the variable in "plots" and entering a second threshold for it in "thresholds". Parameters ---------- sparse : integer x-axis (timestep) sparsity. The true sparsity is sparse*sparse_intrinsic. Try 100 or 500 for .se.h5 and 20 for .out.h5 files and for preliminaxllimry plots. cycle_start : integer, optional Cycle from which you wish to plot. The default is 0. cycle_end : integer, optional Maximum cycle that you wish to plot. If cycle_end is 0, then it will plot up to the last cycle available. The default is 0. plot : list, optional 1-D array containing the variables to be plotted (as strings, e.g. plots=['dcoeff','C-13']. I recommend always plotting 'dcoeff' as plots[0]). The default is ['dcoeff']. thresholds : list, optional 1-D array containing the thresholds corresponding to the variables in "plots". The default is [1.0E+12]. xax : string, optional x-axis quantity; either 'log_time_left' or 'cycles'. The default is 'log_time_left'. alphas : list, optional Array containing the opacity (0 to 1) of the contour for each variable. The default is [1.0]. yllim : float, optional Lower plot limit for y-axis (mass co-ordinate). The default is 0.. yulim : float, optional Upper plot limit for y-axis (mass co-ordinate). The default is 0.. y_res : integer, optional y-axis resolution. Defaults to 2000 but increasing to as much as 10000 does not significantly affect the plotting time. The default is 2000. xllim : float, optional Lower plot limit for x-axis. The default is 0.. xulim : float, optional Upper plot limit for x-axis. The default is 0.. age : string, optional Either 'years' or 'seconds', depending on the data. The default is 'seconds'. sparse_intrinsic : integer, optional Sparsity of timesteps in the data provided (usually 20 for .out.h5 files and 1 for .se.h5 files). The default is 20. engen : boolean, optional Whether the user would like to plot Kippenhahn of convective zones and energy generation. If True, please still include plots=['dcoeff'] and thresholds=[1.0E+12'] in your call. This will require the data to have an 'eps_nuc' column, so the plot is only working for .se.h5 files from MESA in the current se library. This is the most recent addition, so probably the most buggy. The plot script will automatically calculate and assign multiple thresholds according to the model. The default is False. netnuc_name : string, optional The name of the column containing (eps_nuc-eps_neu). If you do not have available eps_neu, then you can give netnuc_name="eps_nuc" to just plot energy generation. The default is "eps_nuc". engenalpha : float, optional Opacity of the energy generation contours. The default is 0.6. outfile : string, optional The name to save the plot as. The default is 'plot.pdf'. annotation : string, optional Some optional annotation to add to the plot. The default is ''. KEPLER : boolean, optional The default is False. """ # Organize cycles and ages: original_cyclelist = self.se.cycles if cycle_end==0.: cycle_end = original_cyclelist[-1] cycle_end = old_div(int(cycle_end),sparse_intrinsic) - 1 if cycle_start==0: pass else: cycle_start = old_div(int(cycle_start),sparse_intrinsic) - 1 cyclelist = original_cyclelist[cycle_start:cycle_end:sparse] # fix for KEPLER restart counting at O burning: original_ages= self.se.ages age_at_restart = 0. age_at_restart_idx = 0 if KEPLER == True: for i in range(1,len(original_ages)): if (original_ages[i]-original_ages[i-1]) < 0.: age_at_restart_idx = i-1 age_at_restart = original_ages[i-1] print('age restart found at cycle = '+str(age_at_restart_idx)+', age = '+str(age_at_restart)) KEPLER = True break for i in range(age_at_restart_idx+1,len(original_ages)): original_ages[i] = original_ages[i] + age_at_restart # Figure: fig = pl.figure() ax = pl.axes() params = {'axes.labelsize': 15, 'text.fontsize': 15, 'legend.fontsize': 15, 'xtick.labelsize': 15, 'ytick.labelsize': 15, 'text.usetex': False} fsize=18 pl.rcParams.update(params) # X-axis: if xax == 'log_time_left': if KEPLER == True: xxtmp = original_ages[cycle_start:cycle_end:sparse] else: xxtmp = self.se.ages[cycle_start:cycle_end:sparse] if age == 'years': if KEPLER == True: pass else: xxtmp = self.se.ages[cycle_start:cycle_end:sparse] elif age == 'seconds': if KEPLER == True: for i in range(len(cyclelist)): xxtmp[i] = old_div(original_ages[cycle_start:cycle_end:sparse][i],31558149.984) else: for i in range(len(cyclelist)): xxtmp[i] = old_div(self.se.ages[cycle_start:cycle_end:sparse][i],31558149.984) if xax == 'cycles': xx = cyclelist xxtmp = cyclelist # Set up x-axis according to whether ages are in years or seconds and # re-write as log(time left). The last entry will always be -inf in this # way so we calculate it by extrapolating the anti-penultimate and # penultimate entries. # Modified from the GENEC gdic (R. Hirschi) if xax == 'log_time_left': xx=np.zeros(len(xxtmp)) agemin = max(old_div(abs(xxtmp[-1]-xxtmp[-2]),5.),1.e-10) for i in np.arange(len(xxtmp)): if xxtmp[-1]-xxtmp[i]>agemin: xx[i]=np.log10(xxtmp[-1]-xxtmp[i]+agemin) else : xx[i]=np.log10(agemin) ax.set_xlabel('$log_\mathrm{10}(t_\mathrm{end}-t)\;[\mathrm{yr}]$',fontsize=fsize-1) if xax == 'cycles': ax.set_xlabel('$\mathrm{CYCLE}$',fontsize=fsize-1) # Y-axis limits and resolution: totalmass = [] try: m_ini = float(self.se.get('mini')) except: mini=m.se.get(0,'total_mass') mini=old_div(mini,constants.mass_sun) print('getting Mini from 1st cycle') if yulim==0.: yulim = m_ini dy = old_div(m_ini,float(y_res)) vlinetol = 1.0E-8 # Set up (y-axis) vector and a 3-D (hist) array to store all of the # contours. y = np.arange(0., m_ini, dy) if engen == True: Z = np.zeros([len(y),len(xxtmp),len(plot)+2],float) else: Z = np.zeros([len(y),len(xxtmp),len(plot)],float) # Define function extracting the contour boundaries which will be # called for every cycle in cyclelist, for every variable to be plotted # along with its corresponding threshold(s). def getlims(variable_array,thresh,massco_array): """This function returns the variable boundaries (in mass) for a cycle, given the cycle's variable and mass columns, ensuring that the boundaries are ordered centre to surface (as some .se.h5 files are the opposite).""" plotlims = [] # Just a fix for some get problem I was having: if len(massco_array) == 2: massco_array = massco_array[0] if len(variable_array) == 2: variable_array = variable_array[0] if massco_array[0] > massco_array[-1]: for j in range(-1,-len(variable_array)-1,-1): if j == -1: if variable_array[j] >= thresh: plotlims.append(massco_array[j]) else: pass elif (variable_array[j]-thresh)*(variable_array[j+1]-thresh) < 0: plotlims.append(massco_array[j]) if j == -len(variable_array): if variable_array[j] >= thresh: plotlims.append(massco_array[j]) return plotlims else: for j in range(len(variable_array)): if j == 0: if variable_array[j] >= thresh: plotlims.append(massco_array[j]) else: pass elif (variable_array[j]-thresh)*(variable_array[j-1]-thresh) < 0: plotlims.append(massco_array[j]) if j == len(variable_array)-1: if variable_array[j] >= thresh: plotlims.append(massco_array[j]) return plotlims # Flag preventing plotting any other variables on an energy generation # Kippenhahn plot: if engen == True: plot = ['dcoeff'] # This loop gets the mass co-ordinate array and the variable arrays, # calls to get the boundaries in order, and populates the contour array. ypscoeff = [-1,-1,-1] # this should have same length as plot - quick fix for yps. total_massco = [] for i in range(len(cyclelist)): # print 'CYCLE: ', cyclelist[i] massco = self.se.get(cyclelist[i],'mass') total_massco.append(max(massco)) plotlimits=[] for j in range(len(plot)): if plot[j][1] == '-' or plot[j][2] == '-': # Assume file has yps, not iso_massf ypsthere = True try: variables = self.se.get(cyclelist[i],'yps') # If this is not the case, do the usual se calls for iso_massf except KeyError: variables = self.se.get(cyclelist[i],plot[j]) ypsthere = False # If yps is there, ask which indices correspond to the # elements that are to be plotted, one by one. if ypsthere == True: if ypscoeff[j] == -1: ypscoeff[j] = int(input("What integer is your element "+str(plot[j])+" in the 'yps' array? ")) else: pass variables = self.se.get(cyclelist[i],'yps')[:,ypscoeff[j]] else: variables = self.se.get(cyclelist[i],plot[j]) plotlims = getlims(variables,thresholds[j],massco) plotlimits.append(plotlims) percent = int(i*100/len(cyclelist)) sys.stdout.flush() sys.stdout.write("\rcreating color map " + "...%d%%" % percent) for g in range(len(plot)): for k in range(0,len(plotlimits[g]),2): llimit = plotlimits[g][k] ulimit = plotlimits[g][k+1] #if xx[i] >= 0: for f in range(y_res): if llimit<=y[f] and ulimit>y[f]: Z[f,i,g]=1. #else: # ax.axvline(xx[i],ymin=llimit/m_ini,ymax=ulimit/m_ini,color='#8B8386',alpha=alphas[0],linewidth=0.5) # This function determines the adjacent two mass cells to a point in the # y-vector (which contains mass co-ordinates centre to surface, split into # y_res chunks), returning their index in the mass co-ordinate vector for # that timestep/cycle. def find_nearest(array,value): """ Returns [lower,upper] indexes locating adjacent mass cells (in the massco vector) around y-value (one of y_res points equally spaced between centre and surface). """ idx=(np.abs(array-value)).argmin() lims=np.zeros([2],int) if idx == len(array)-1: # SJONES post-mod lims[0] = idx - 1 # SJONES post-mod lims[1] = idx # SJONES post-mod return lims if array[idx] < value: if array[idx]-array[idx+1] < 0.: lims[0] = idx lims[1] = idx-1 return lims else: lims[0] = idx lims[1] = idx+1 return lims elif array[idx] > value: if array[idx]-array[idx+1] < 0.: lims[0] = idx+1 lims[1] = idx return lims else: lims[0] = idx-1 lims[1] = idx return lims # This flag enebles the loop below it to populate the contour array for # energy generation. It does not take threshold arguments, as the array # contains the log of the energy generation rather than "above" or "below". # Because of this, contour boundaries are automatically calculated # according to the max energy generation in the model. dummy_engen=[] engen_signs = [] if engen == True: # Requires eps_nuc array in the data. Produces energy generation contour # by linearly interpolating eps_nuc between mass co-ordinates according # to the y-resolution: for i in range(len(cyclelist)): # print 'CYCLE: ', cyclelist[i] max_energy_gen = 0. min_energy_gen = 0. massco = self.se.get(cyclelist[i],'mass') if len(massco) <= 10: massco=massco[0] dummy_engen = self.se.get(cyclelist[i],netnuc_name) if len(dummy_engen) <= 10: dummy_engen = dummy_engen[0] for f in range(len(dummy_engen)): # make all values absolute, but note in engen_signs which were negative: if dummy_engen[f] == 0.: engen_signs.append(1.) else: engen_signs.append(old_div(dummy_engen[f],abs(dummy_engen[f]))) if abs(engen_signs[f]) != 1.: print('engen sign not +/- 1!!') print('engen_signs['+str(f)+'] = ',engen_signs[f]) print('dummy_engen[f] = ', dummy_engen[f]) sys.exit() dummy_engen[f] = abs(dummy_engen[f]) log_epsnuc = np.log10(dummy_engen) # now insert the correct signs again: for f in range(len(log_epsnuc)): log_epsnuc[f] = log_epsnuc[f]*engen_signs[f] #for f in range(len(log_epsnuc)): #if str(log_epsnuc[f]) == 'nan': log_epsnuc[f] = 0. # print log_epsnuc percent = int(i*100/len(cyclelist)) sys.stdout.flush() sys.stdout.write("\rcreating color map " + "...%d%%" % percent) for j in range(len(y)): if j == len(y)-1: energy_here = 0. elif j == 0: energy_here = log_epsnuc[-1] elif y[j] > max(massco): energy_here = 0. else: lims = find_nearest(massco,y[j]) frac = old_div((y[j]-massco[lims[0]]),(massco[lims[1]]-massco[lims[0]])) energy_here = frac*(log_epsnuc[lims[1]]-log_epsnuc[lims[0]]) + log_epsnuc[lims[0]] if energy_here > max_energy_gen: max_energy_gen = energy_here if energy_here < min_energy_gen: min_energy_gen = energy_here if abs(max_energy_gen) > 100.: print(y[j]) print(engen_signs[f], log_epsnuc[f], frac, lims[0], lims[1], massco[lims[0]], massco[lims[1]]) print((massco[lims[1]]-massco[lims[0]]), (y[j]-massco[lims[0]])) print(max_energy_gen) print('exit due to energy generation > 100') sys.exit() # print energy_here # print max_energy_gen # if energy_here >= 0.: #Z[j,i,1] = 10**energy_here #SJONES comment if energy_here < 0.: energy_here = 0. Z[j,i,1] = energy_here # if energy_here < 0.: # Z[j,i,2] = 10**energy_here # Here we define the colourmap for the energy generation and an array # containing a list of colours in which to plot each variable (in the # order that the variables appear in "plots") iso_colours is obsolete # but was for when we tried plotting isotopes with just their boundary # lines as opposed to shading (for clarity). Colourmaps of these choices # are written to cmap (array). engen_cmap=mpl.cm.get_cmap('Blues') engen_cmap.set_under(color='w',alpha=engenalpha) enloss_cmap=mpl.cm.get_cmap('Reds') colours = ['#8B8386','m','g','b'] iso_colours = ['b','r','y'] cmap = [] for i in range(len(plot)): cmap.append(mpl.colors.ListedColormap(['w',colours[i]])) print('plotting contours') if xllim==0. and xulim==0.: ax.axis([float(xx[0]),float(xx[-1]),yllim,yulim]) else: ax.axis([xllim,xulim,yllim,yulim]) # Plot all of the contours. Levels indicates to only plot the shaded # regions and not plot the white regions, so that they are essentially # transparent. If engen=True, then the energy generation levels # (boundary values) are calculated (in dex) from 2 to the maximum in # steps of 2. for i in range(len(plot)): ax.contourf(xx,y,Z[:,:,i],levels=[0.5,1.5],colors=colours[i], alpha=alphas[i]) if engen == True: #ceiling = int(max_energy_gen+1) #floor = int(min_energy_gen+1) #cburn = ax.contourf(xx,y,Z[:,:,1],cmap=engen_cmap,locator=mpl.ticker.LogLocator(),alpha=engenalpha) # SJONES comment cburn = ax.contourf(xx,y,Z[:,:,1],cmap=engen_cmap,alpha=engenalpha,levels=list(range(5,32,5))) cbarburn = pl.colorbar(cburn) # if min_energy_gen != 0: # closs = ax.contourf(xx,y,Z[:,:,2],cmap=enloss_cmap,locator=mpl.ticker.LogLocator(),alpha=engenalpha) # cbarloss = pl.colorbar(closs) # cbarburn.set_label('$\epsilon_\mathrm{nuc}-\epsilon_{\\nu} \; (\mathrm{erg\,g}^{-1}\mathrm{\,s}^{-1})$, > 0',fontsize=fsize) cbarburn.set_label('$log_{10}(\epsilon_\mathrm{nuc}) \; (\mathrm{erg\,g}^{-1}\mathrm{\,s}^{-1})$, > 0',fontsize=fsize) pl.plot(xx,total_massco,color='k') pl.text(0.9,0.9,annotation,horizontalalignment='right',transform = ax.transAxes,fontsize=fsize) pl.ylabel('$\mathrm{Mass}\;[M_\odot]$',fontsize=fsize-1) pl.savefig(outfile) print(outfile+' is done.') pl.show()
[ "def", "_kip_cont2", "(", "self", ",", "sparse", ",", "cycle_start", "=", "0", ",", "cycle_end", "=", "0", ",", "plot", "=", "[", "'dcoeff'", "]", ",", "thresholds", "=", "[", "1.0E+12", "]", ",", "xax", "=", "'log_time_left'", ",", "alphas", "=", "[", "1.0", "]", ",", "yllim", "=", "0.", ",", "yulim", "=", "0.", ",", "y_res", "=", "2000", ",", "xllim", "=", "0.", ",", "xulim", "=", "0.", ",", "age", "=", "'seconds'", ",", "sparse_intrinsic", "=", "20", ",", "engen", "=", "False", ",", "netnuc_name", "=", "'eps_nuc'", ",", "engenalpha", "=", "0.6", ",", "outfile", "=", "'plot.pdf'", ",", "annotation", "=", "''", ",", "KEPLER", "=", "False", ")", ":", "# Organize cycles and ages:", "original_cyclelist", "=", "self", ".", "se", ".", "cycles", "if", "cycle_end", "==", "0.", ":", "cycle_end", "=", "original_cyclelist", "[", "-", "1", "]", "cycle_end", "=", "old_div", "(", "int", "(", "cycle_end", ")", ",", "sparse_intrinsic", ")", "-", "1", "if", "cycle_start", "==", "0", ":", "pass", "else", ":", "cycle_start", "=", "old_div", "(", "int", "(", "cycle_start", ")", ",", "sparse_intrinsic", ")", "-", "1", "cyclelist", "=", "original_cyclelist", "[", "cycle_start", ":", "cycle_end", ":", "sparse", "]", "# fix for KEPLER restart counting at O burning:", "original_ages", "=", "self", ".", "se", ".", "ages", "age_at_restart", "=", "0.", "age_at_restart_idx", "=", "0", "if", "KEPLER", "==", "True", ":", "for", "i", "in", "range", "(", "1", ",", "len", "(", "original_ages", ")", ")", ":", "if", "(", "original_ages", "[", "i", "]", "-", "original_ages", "[", "i", "-", "1", "]", ")", "<", "0.", ":", "age_at_restart_idx", "=", "i", "-", "1", "age_at_restart", "=", "original_ages", "[", "i", "-", "1", "]", "print", "(", "'age restart found at cycle = '", "+", "str", "(", "age_at_restart_idx", ")", "+", "', age = '", "+", "str", "(", "age_at_restart", ")", ")", "KEPLER", "=", "True", "break", "for", "i", "in", "range", "(", "age_at_restart_idx", "+", "1", ",", "len", "(", "original_ages", ")", ")", ":", "original_ages", "[", "i", "]", "=", "original_ages", "[", "i", "]", "+", "age_at_restart", "# Figure:", "fig", "=", "pl", ".", "figure", "(", ")", "ax", "=", "pl", ".", "axes", "(", ")", "params", "=", "{", "'axes.labelsize'", ":", "15", ",", "'text.fontsize'", ":", "15", ",", "'legend.fontsize'", ":", "15", ",", "'xtick.labelsize'", ":", "15", ",", "'ytick.labelsize'", ":", "15", ",", "'text.usetex'", ":", "False", "}", "fsize", "=", "18", "pl", ".", "rcParams", ".", "update", "(", "params", ")", "# X-axis:", "if", "xax", "==", "'log_time_left'", ":", "if", "KEPLER", "==", "True", ":", "xxtmp", "=", "original_ages", "[", "cycle_start", ":", "cycle_end", ":", "sparse", "]", "else", ":", "xxtmp", "=", "self", ".", "se", ".", "ages", "[", "cycle_start", ":", "cycle_end", ":", "sparse", "]", "if", "age", "==", "'years'", ":", "if", "KEPLER", "==", "True", ":", "pass", "else", ":", "xxtmp", "=", "self", ".", "se", ".", "ages", "[", "cycle_start", ":", "cycle_end", ":", "sparse", "]", "elif", "age", "==", "'seconds'", ":", "if", "KEPLER", "==", "True", ":", "for", "i", "in", "range", "(", "len", "(", "cyclelist", ")", ")", ":", "xxtmp", "[", "i", "]", "=", "old_div", "(", "original_ages", "[", "cycle_start", ":", "cycle_end", ":", "sparse", "]", "[", "i", "]", ",", "31558149.984", ")", "else", ":", "for", "i", "in", "range", "(", "len", "(", "cyclelist", ")", ")", ":", "xxtmp", "[", "i", "]", "=", "old_div", "(", "self", ".", "se", ".", "ages", "[", "cycle_start", ":", "cycle_end", ":", "sparse", "]", "[", "i", "]", ",", "31558149.984", ")", "if", "xax", "==", "'cycles'", ":", "xx", "=", "cyclelist", "xxtmp", "=", "cyclelist", "# Set up x-axis according to whether ages are in years or seconds and", "# re-write as log(time left). The last entry will always be -inf in this", "# way so we calculate it by extrapolating the anti-penultimate and", "# penultimate entries.", "# Modified from the GENEC gdic (R. Hirschi)", "if", "xax", "==", "'log_time_left'", ":", "xx", "=", "np", ".", "zeros", "(", "len", "(", "xxtmp", ")", ")", "agemin", "=", "max", "(", "old_div", "(", "abs", "(", "xxtmp", "[", "-", "1", "]", "-", "xxtmp", "[", "-", "2", "]", ")", ",", "5.", ")", ",", "1.e-10", ")", "for", "i", "in", "np", ".", "arange", "(", "len", "(", "xxtmp", ")", ")", ":", "if", "xxtmp", "[", "-", "1", "]", "-", "xxtmp", "[", "i", "]", ">", "agemin", ":", "xx", "[", "i", "]", "=", "np", ".", "log10", "(", "xxtmp", "[", "-", "1", "]", "-", "xxtmp", "[", "i", "]", "+", "agemin", ")", "else", ":", "xx", "[", "i", "]", "=", "np", ".", "log10", "(", "agemin", ")", "ax", ".", "set_xlabel", "(", "'$log_\\mathrm{10}(t_\\mathrm{end}-t)\\;[\\mathrm{yr}]$'", ",", "fontsize", "=", "fsize", "-", "1", ")", "if", "xax", "==", "'cycles'", ":", "ax", ".", "set_xlabel", "(", "'$\\mathrm{CYCLE}$'", ",", "fontsize", "=", "fsize", "-", "1", ")", "# Y-axis limits and resolution:", "totalmass", "=", "[", "]", "try", ":", "m_ini", "=", "float", "(", "self", ".", "se", ".", "get", "(", "'mini'", ")", ")", "except", ":", "mini", "=", "m", ".", "se", ".", "get", "(", "0", ",", "'total_mass'", ")", "mini", "=", "old_div", "(", "mini", ",", "constants", ".", "mass_sun", ")", "print", "(", "'getting Mini from 1st cycle'", ")", "if", "yulim", "==", "0.", ":", "yulim", "=", "m_ini", "dy", "=", "old_div", "(", "m_ini", ",", "float", "(", "y_res", ")", ")", "vlinetol", "=", "1.0E-8", "# Set up (y-axis) vector and a 3-D (hist) array to store all of the", "# contours.", "y", "=", "np", ".", "arange", "(", "0.", ",", "m_ini", ",", "dy", ")", "if", "engen", "==", "True", ":", "Z", "=", "np", ".", "zeros", "(", "[", "len", "(", "y", ")", ",", "len", "(", "xxtmp", ")", ",", "len", "(", "plot", ")", "+", "2", "]", ",", "float", ")", "else", ":", "Z", "=", "np", ".", "zeros", "(", "[", "len", "(", "y", ")", ",", "len", "(", "xxtmp", ")", ",", "len", "(", "plot", ")", "]", ",", "float", ")", "# Define function extracting the contour boundaries which will be", "# called for every cycle in cyclelist, for every variable to be plotted", "# along with its corresponding threshold(s).", "def", "getlims", "(", "variable_array", ",", "thresh", ",", "massco_array", ")", ":", "\"\"\"This function returns the variable boundaries (in mass) for a cycle,\n given the cycle's variable and mass columns, ensuring that the boundaries\n are ordered centre to surface (as some .se.h5 files are the opposite).\"\"\"", "plotlims", "=", "[", "]", "# Just a fix for some get problem I was having:", "if", "len", "(", "massco_array", ")", "==", "2", ":", "massco_array", "=", "massco_array", "[", "0", "]", "if", "len", "(", "variable_array", ")", "==", "2", ":", "variable_array", "=", "variable_array", "[", "0", "]", "if", "massco_array", "[", "0", "]", ">", "massco_array", "[", "-", "1", "]", ":", "for", "j", "in", "range", "(", "-", "1", ",", "-", "len", "(", "variable_array", ")", "-", "1", ",", "-", "1", ")", ":", "if", "j", "==", "-", "1", ":", "if", "variable_array", "[", "j", "]", ">=", "thresh", ":", "plotlims", ".", "append", "(", "massco_array", "[", "j", "]", ")", "else", ":", "pass", "elif", "(", "variable_array", "[", "j", "]", "-", "thresh", ")", "*", "(", "variable_array", "[", "j", "+", "1", "]", "-", "thresh", ")", "<", "0", ":", "plotlims", ".", "append", "(", "massco_array", "[", "j", "]", ")", "if", "j", "==", "-", "len", "(", "variable_array", ")", ":", "if", "variable_array", "[", "j", "]", ">=", "thresh", ":", "plotlims", ".", "append", "(", "massco_array", "[", "j", "]", ")", "return", "plotlims", "else", ":", "for", "j", "in", "range", "(", "len", "(", "variable_array", ")", ")", ":", "if", "j", "==", "0", ":", "if", "variable_array", "[", "j", "]", ">=", "thresh", ":", "plotlims", ".", "append", "(", "massco_array", "[", "j", "]", ")", "else", ":", "pass", "elif", "(", "variable_array", "[", "j", "]", "-", "thresh", ")", "*", "(", "variable_array", "[", "j", "-", "1", "]", "-", "thresh", ")", "<", "0", ":", "plotlims", ".", "append", "(", "massco_array", "[", "j", "]", ")", "if", "j", "==", "len", "(", "variable_array", ")", "-", "1", ":", "if", "variable_array", "[", "j", "]", ">=", "thresh", ":", "plotlims", ".", "append", "(", "massco_array", "[", "j", "]", ")", "return", "plotlims", "# Flag preventing plotting any other variables on an energy generation", "# Kippenhahn plot:", "if", "engen", "==", "True", ":", "plot", "=", "[", "'dcoeff'", "]", "# This loop gets the mass co-ordinate array and the variable arrays,", "# calls to get the boundaries in order, and populates the contour array.", "ypscoeff", "=", "[", "-", "1", ",", "-", "1", ",", "-", "1", "]", "# this should have same length as plot - quick fix for yps.", "total_massco", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "cyclelist", ")", ")", ":", "# print 'CYCLE: ', cyclelist[i]", "massco", "=", "self", ".", "se", ".", "get", "(", "cyclelist", "[", "i", "]", ",", "'mass'", ")", "total_massco", ".", "append", "(", "max", "(", "massco", ")", ")", "plotlimits", "=", "[", "]", "for", "j", "in", "range", "(", "len", "(", "plot", ")", ")", ":", "if", "plot", "[", "j", "]", "[", "1", "]", "==", "'-'", "or", "plot", "[", "j", "]", "[", "2", "]", "==", "'-'", ":", "# Assume file has yps, not iso_massf", "ypsthere", "=", "True", "try", ":", "variables", "=", "self", ".", "se", ".", "get", "(", "cyclelist", "[", "i", "]", ",", "'yps'", ")", "# If this is not the case, do the usual se calls for iso_massf", "except", "KeyError", ":", "variables", "=", "self", ".", "se", ".", "get", "(", "cyclelist", "[", "i", "]", ",", "plot", "[", "j", "]", ")", "ypsthere", "=", "False", "# If yps is there, ask which indices correspond to the", "# elements that are to be plotted, one by one.", "if", "ypsthere", "==", "True", ":", "if", "ypscoeff", "[", "j", "]", "==", "-", "1", ":", "ypscoeff", "[", "j", "]", "=", "int", "(", "input", "(", "\"What integer is your element \"", "+", "str", "(", "plot", "[", "j", "]", ")", "+", "\" in the 'yps' array? \"", ")", ")", "else", ":", "pass", "variables", "=", "self", ".", "se", ".", "get", "(", "cyclelist", "[", "i", "]", ",", "'yps'", ")", "[", ":", ",", "ypscoeff", "[", "j", "]", "]", "else", ":", "variables", "=", "self", ".", "se", ".", "get", "(", "cyclelist", "[", "i", "]", ",", "plot", "[", "j", "]", ")", "plotlims", "=", "getlims", "(", "variables", ",", "thresholds", "[", "j", "]", ",", "massco", ")", "plotlimits", ".", "append", "(", "plotlims", ")", "percent", "=", "int", "(", "i", "*", "100", "/", "len", "(", "cyclelist", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "sys", ".", "stdout", ".", "write", "(", "\"\\rcreating color map \"", "+", "\"...%d%%\"", "%", "percent", ")", "for", "g", "in", "range", "(", "len", "(", "plot", ")", ")", ":", "for", "k", "in", "range", "(", "0", ",", "len", "(", "plotlimits", "[", "g", "]", ")", ",", "2", ")", ":", "llimit", "=", "plotlimits", "[", "g", "]", "[", "k", "]", "ulimit", "=", "plotlimits", "[", "g", "]", "[", "k", "+", "1", "]", "#if xx[i] >= 0:", "for", "f", "in", "range", "(", "y_res", ")", ":", "if", "llimit", "<=", "y", "[", "f", "]", "and", "ulimit", ">", "y", "[", "f", "]", ":", "Z", "[", "f", ",", "i", ",", "g", "]", "=", "1.", "#else:", "# ax.axvline(xx[i],ymin=llimit/m_ini,ymax=ulimit/m_ini,color='#8B8386',alpha=alphas[0],linewidth=0.5)", "# This function determines the adjacent two mass cells to a point in the", "# y-vector (which contains mass co-ordinates centre to surface, split into", "# y_res chunks), returning their index in the mass co-ordinate vector for", "# that timestep/cycle.", "def", "find_nearest", "(", "array", ",", "value", ")", ":", "\"\"\"\n Returns [lower,upper] indexes locating adjacent mass cells\n (in the massco vector) around y-value (one of y_res points\n equally spaced between centre and surface).\n\n \"\"\"", "idx", "=", "(", "np", ".", "abs", "(", "array", "-", "value", ")", ")", ".", "argmin", "(", ")", "lims", "=", "np", ".", "zeros", "(", "[", "2", "]", ",", "int", ")", "if", "idx", "==", "len", "(", "array", ")", "-", "1", ":", "# SJONES post-mod", "lims", "[", "0", "]", "=", "idx", "-", "1", "# SJONES post-mod", "lims", "[", "1", "]", "=", "idx", "# SJONES post-mod", "return", "lims", "if", "array", "[", "idx", "]", "<", "value", ":", "if", "array", "[", "idx", "]", "-", "array", "[", "idx", "+", "1", "]", "<", "0.", ":", "lims", "[", "0", "]", "=", "idx", "lims", "[", "1", "]", "=", "idx", "-", "1", "return", "lims", "else", ":", "lims", "[", "0", "]", "=", "idx", "lims", "[", "1", "]", "=", "idx", "+", "1", "return", "lims", "elif", "array", "[", "idx", "]", ">", "value", ":", "if", "array", "[", "idx", "]", "-", "array", "[", "idx", "+", "1", "]", "<", "0.", ":", "lims", "[", "0", "]", "=", "idx", "+", "1", "lims", "[", "1", "]", "=", "idx", "return", "lims", "else", ":", "lims", "[", "0", "]", "=", "idx", "-", "1", "lims", "[", "1", "]", "=", "idx", "return", "lims", "# This flag enebles the loop below it to populate the contour array for", "# energy generation. It does not take threshold arguments, as the array", "# contains the log of the energy generation rather than \"above\" or \"below\".", "# Because of this, contour boundaries are automatically calculated", "# according to the max energy generation in the model.", "dummy_engen", "=", "[", "]", "engen_signs", "=", "[", "]", "if", "engen", "==", "True", ":", "# Requires eps_nuc array in the data. Produces energy generation contour", "# by linearly interpolating eps_nuc between mass co-ordinates according", "# to the y-resolution:", "for", "i", "in", "range", "(", "len", "(", "cyclelist", ")", ")", ":", "# print 'CYCLE: ', cyclelist[i]", "max_energy_gen", "=", "0.", "min_energy_gen", "=", "0.", "massco", "=", "self", ".", "se", ".", "get", "(", "cyclelist", "[", "i", "]", ",", "'mass'", ")", "if", "len", "(", "massco", ")", "<=", "10", ":", "massco", "=", "massco", "[", "0", "]", "dummy_engen", "=", "self", ".", "se", ".", "get", "(", "cyclelist", "[", "i", "]", ",", "netnuc_name", ")", "if", "len", "(", "dummy_engen", ")", "<=", "10", ":", "dummy_engen", "=", "dummy_engen", "[", "0", "]", "for", "f", "in", "range", "(", "len", "(", "dummy_engen", ")", ")", ":", "# make all values absolute, but note in engen_signs which were negative:", "if", "dummy_engen", "[", "f", "]", "==", "0.", ":", "engen_signs", ".", "append", "(", "1.", ")", "else", ":", "engen_signs", ".", "append", "(", "old_div", "(", "dummy_engen", "[", "f", "]", ",", "abs", "(", "dummy_engen", "[", "f", "]", ")", ")", ")", "if", "abs", "(", "engen_signs", "[", "f", "]", ")", "!=", "1.", ":", "print", "(", "'engen sign not +/- 1!!'", ")", "print", "(", "'engen_signs['", "+", "str", "(", "f", ")", "+", "'] = '", ",", "engen_signs", "[", "f", "]", ")", "print", "(", "'dummy_engen[f] = '", ",", "dummy_engen", "[", "f", "]", ")", "sys", ".", "exit", "(", ")", "dummy_engen", "[", "f", "]", "=", "abs", "(", "dummy_engen", "[", "f", "]", ")", "log_epsnuc", "=", "np", ".", "log10", "(", "dummy_engen", ")", "# now insert the correct signs again:", "for", "f", "in", "range", "(", "len", "(", "log_epsnuc", ")", ")", ":", "log_epsnuc", "[", "f", "]", "=", "log_epsnuc", "[", "f", "]", "*", "engen_signs", "[", "f", "]", "#for f in range(len(log_epsnuc)):", "#if str(log_epsnuc[f]) == 'nan': log_epsnuc[f] = 0.", "# print log_epsnuc", "percent", "=", "int", "(", "i", "*", "100", "/", "len", "(", "cyclelist", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "sys", ".", "stdout", ".", "write", "(", "\"\\rcreating color map \"", "+", "\"...%d%%\"", "%", "percent", ")", "for", "j", "in", "range", "(", "len", "(", "y", ")", ")", ":", "if", "j", "==", "len", "(", "y", ")", "-", "1", ":", "energy_here", "=", "0.", "elif", "j", "==", "0", ":", "energy_here", "=", "log_epsnuc", "[", "-", "1", "]", "elif", "y", "[", "j", "]", ">", "max", "(", "massco", ")", ":", "energy_here", "=", "0.", "else", ":", "lims", "=", "find_nearest", "(", "massco", ",", "y", "[", "j", "]", ")", "frac", "=", "old_div", "(", "(", "y", "[", "j", "]", "-", "massco", "[", "lims", "[", "0", "]", "]", ")", ",", "(", "massco", "[", "lims", "[", "1", "]", "]", "-", "massco", "[", "lims", "[", "0", "]", "]", ")", ")", "energy_here", "=", "frac", "*", "(", "log_epsnuc", "[", "lims", "[", "1", "]", "]", "-", "log_epsnuc", "[", "lims", "[", "0", "]", "]", ")", "+", "log_epsnuc", "[", "lims", "[", "0", "]", "]", "if", "energy_here", ">", "max_energy_gen", ":", "max_energy_gen", "=", "energy_here", "if", "energy_here", "<", "min_energy_gen", ":", "min_energy_gen", "=", "energy_here", "if", "abs", "(", "max_energy_gen", ")", ">", "100.", ":", "print", "(", "y", "[", "j", "]", ")", "print", "(", "engen_signs", "[", "f", "]", ",", "log_epsnuc", "[", "f", "]", ",", "frac", ",", "lims", "[", "0", "]", ",", "lims", "[", "1", "]", ",", "massco", "[", "lims", "[", "0", "]", "]", ",", "massco", "[", "lims", "[", "1", "]", "]", ")", "print", "(", "(", "massco", "[", "lims", "[", "1", "]", "]", "-", "massco", "[", "lims", "[", "0", "]", "]", ")", ",", "(", "y", "[", "j", "]", "-", "massco", "[", "lims", "[", "0", "]", "]", ")", ")", "print", "(", "max_energy_gen", ")", "print", "(", "'exit due to energy generation > 100'", ")", "sys", ".", "exit", "(", ")", "# print energy_here", "# print max_energy_gen", "# if energy_here >= 0.:", "#Z[j,i,1] = 10**energy_here #SJONES comment", "if", "energy_here", "<", "0.", ":", "energy_here", "=", "0.", "Z", "[", "j", ",", "i", ",", "1", "]", "=", "energy_here", "# if energy_here < 0.:", "# Z[j,i,2] = 10**energy_here", "# Here we define the colourmap for the energy generation and an array", "# containing a list of colours in which to plot each variable (in the", "# order that the variables appear in \"plots\") iso_colours is obsolete", "# but was for when we tried plotting isotopes with just their boundary", "# lines as opposed to shading (for clarity). Colourmaps of these choices", "# are written to cmap (array).", "engen_cmap", "=", "mpl", ".", "cm", ".", "get_cmap", "(", "'Blues'", ")", "engen_cmap", ".", "set_under", "(", "color", "=", "'w'", ",", "alpha", "=", "engenalpha", ")", "enloss_cmap", "=", "mpl", ".", "cm", ".", "get_cmap", "(", "'Reds'", ")", "colours", "=", "[", "'#8B8386'", ",", "'m'", ",", "'g'", ",", "'b'", "]", "iso_colours", "=", "[", "'b'", ",", "'r'", ",", "'y'", "]", "cmap", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "plot", ")", ")", ":", "cmap", ".", "append", "(", "mpl", ".", "colors", ".", "ListedColormap", "(", "[", "'w'", ",", "colours", "[", "i", "]", "]", ")", ")", "print", "(", "'plotting contours'", ")", "if", "xllim", "==", "0.", "and", "xulim", "==", "0.", ":", "ax", ".", "axis", "(", "[", "float", "(", "xx", "[", "0", "]", ")", ",", "float", "(", "xx", "[", "-", "1", "]", ")", ",", "yllim", ",", "yulim", "]", ")", "else", ":", "ax", ".", "axis", "(", "[", "xllim", ",", "xulim", ",", "yllim", ",", "yulim", "]", ")", "# Plot all of the contours. Levels indicates to only plot the shaded", "# regions and not plot the white regions, so that they are essentially", "# transparent. If engen=True, then the energy generation levels", "# (boundary values) are calculated (in dex) from 2 to the maximum in", "# steps of 2.", "for", "i", "in", "range", "(", "len", "(", "plot", ")", ")", ":", "ax", ".", "contourf", "(", "xx", ",", "y", ",", "Z", "[", ":", ",", ":", ",", "i", "]", ",", "levels", "=", "[", "0.5", ",", "1.5", "]", ",", "colors", "=", "colours", "[", "i", "]", ",", "alpha", "=", "alphas", "[", "i", "]", ")", "if", "engen", "==", "True", ":", "#ceiling = int(max_energy_gen+1)", "#floor = int(min_energy_gen+1)", "#cburn = ax.contourf(xx,y,Z[:,:,1],cmap=engen_cmap,locator=mpl.ticker.LogLocator(),alpha=engenalpha) # SJONES comment", "cburn", "=", "ax", ".", "contourf", "(", "xx", ",", "y", ",", "Z", "[", ":", ",", ":", ",", "1", "]", ",", "cmap", "=", "engen_cmap", ",", "alpha", "=", "engenalpha", ",", "levels", "=", "list", "(", "range", "(", "5", ",", "32", ",", "5", ")", ")", ")", "cbarburn", "=", "pl", ".", "colorbar", "(", "cburn", ")", "# if min_energy_gen != 0:", "# closs = ax.contourf(xx,y,Z[:,:,2],cmap=enloss_cmap,locator=mpl.ticker.LogLocator(),alpha=engenalpha)", "# cbarloss = pl.colorbar(closs)", "# cbarburn.set_label('$\\epsilon_\\mathrm{nuc}-\\epsilon_{\\\\nu} \\; (\\mathrm{erg\\,g}^{-1}\\mathrm{\\,s}^{-1})$, > 0',fontsize=fsize)", "cbarburn", ".", "set_label", "(", "'$log_{10}(\\epsilon_\\mathrm{nuc}) \\; (\\mathrm{erg\\,g}^{-1}\\mathrm{\\,s}^{-1})$, > 0'", ",", "fontsize", "=", "fsize", ")", "pl", ".", "plot", "(", "xx", ",", "total_massco", ",", "color", "=", "'k'", ")", "pl", ".", "text", "(", "0.9", ",", "0.9", ",", "annotation", ",", "horizontalalignment", "=", "'right'", ",", "transform", "=", "ax", ".", "transAxes", ",", "fontsize", "=", "fsize", ")", "pl", ".", "ylabel", "(", "'", "\\m", "\\;", "o", "dot", "]", "$", "'", ",", "fontsize", "=", "fsize", "-", "1", ")", "pl", ".", "savefig", "(", "outfile", ")", "print", "(", "outfile", "+", "' is done.'", ")", "pl", ".", "show", "(", ")" ]
!! EXPERIMENTAL FEATURE (flagged as private) !! This function creates a Kippenhahn diagram as a contour plot of the .se.h5 or .out.h5 files using any continuous variable (columns in the hdf5 cycle data). Multiple columns may be plotted, their name indicated in the list "plot", and their thresholds in the list "thresholds". Currently, this is only designed to take one threshold for each variable but future versions will hopefully be able to plot with multiple thresholds, however you may circumvent this issue by repeating the variable in "plots" and entering a second threshold for it in "thresholds". Parameters ---------- sparse : integer x-axis (timestep) sparsity. The true sparsity is sparse*sparse_intrinsic. Try 100 or 500 for .se.h5 and 20 for .out.h5 files and for preliminaxllimry plots. cycle_start : integer, optional Cycle from which you wish to plot. The default is 0. cycle_end : integer, optional Maximum cycle that you wish to plot. If cycle_end is 0, then it will plot up to the last cycle available. The default is 0. plot : list, optional 1-D array containing the variables to be plotted (as strings, e.g. plots=['dcoeff','C-13']. I recommend always plotting 'dcoeff' as plots[0]). The default is ['dcoeff']. thresholds : list, optional 1-D array containing the thresholds corresponding to the variables in "plots". The default is [1.0E+12]. xax : string, optional x-axis quantity; either 'log_time_left' or 'cycles'. The default is 'log_time_left'. alphas : list, optional Array containing the opacity (0 to 1) of the contour for each variable. The default is [1.0]. yllim : float, optional Lower plot limit for y-axis (mass co-ordinate). The default is 0.. yulim : float, optional Upper plot limit for y-axis (mass co-ordinate). The default is 0.. y_res : integer, optional y-axis resolution. Defaults to 2000 but increasing to as much as 10000 does not significantly affect the plotting time. The default is 2000. xllim : float, optional Lower plot limit for x-axis. The default is 0.. xulim : float, optional Upper plot limit for x-axis. The default is 0.. age : string, optional Either 'years' or 'seconds', depending on the data. The default is 'seconds'. sparse_intrinsic : integer, optional Sparsity of timesteps in the data provided (usually 20 for .out.h5 files and 1 for .se.h5 files). The default is 20. engen : boolean, optional Whether the user would like to plot Kippenhahn of convective zones and energy generation. If True, please still include plots=['dcoeff'] and thresholds=[1.0E+12'] in your call. This will require the data to have an 'eps_nuc' column, so the plot is only working for .se.h5 files from MESA in the current se library. This is the most recent addition, so probably the most buggy. The plot script will automatically calculate and assign multiple thresholds according to the model. The default is False. netnuc_name : string, optional The name of the column containing (eps_nuc-eps_neu). If you do not have available eps_neu, then you can give netnuc_name="eps_nuc" to just plot energy generation. The default is "eps_nuc". engenalpha : float, optional Opacity of the energy generation contours. The default is 0.6. outfile : string, optional The name to save the plot as. The default is 'plot.pdf'. annotation : string, optional Some optional annotation to add to the plot. The default is ''. KEPLER : boolean, optional The default is False.
[ "!!", "EXPERIMENTAL", "FEATURE", "(", "flagged", "as", "private", ")", "!!" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_qos.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_qos.py#L873-L884
def nas_auto_qos_set_cos(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") nas = ET.SubElement(config, "nas", xmlns="urn:brocade.com:mgmt:brocade-qos") auto_qos = ET.SubElement(nas, "auto-qos") set = ET.SubElement(auto_qos, "set") cos = ET.SubElement(set, "cos") cos.text = kwargs.pop('cos') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "nas_auto_qos_set_cos", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "nas", "=", "ET", ".", "SubElement", "(", "config", ",", "\"nas\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-qos\"", ")", "auto_qos", "=", "ET", ".", "SubElement", "(", "nas", ",", "\"auto-qos\"", ")", "set", "=", "ET", ".", "SubElement", "(", "auto_qos", ",", "\"set\"", ")", "cos", "=", "ET", ".", "SubElement", "(", "set", ",", "\"cos\"", ")", "cos", ".", "text", "=", "kwargs", ".", "pop", "(", "'cos'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
jadolg/rocketchat_API
rocketchat_API/rocketchat.py
https://github.com/jadolg/rocketchat_API/blob/f220d094434991cb9892418245f054ea06f28aad/rocketchat_API/rocketchat.py#L243-L245
def chat_update(self, room_id, msg_id, text, **kwargs): """Updates the text of the chat message.""" return self.__call_api_post('chat.update', roomId=room_id, msgId=msg_id, text=text, kwargs=kwargs)
[ "def", "chat_update", "(", "self", ",", "room_id", ",", "msg_id", ",", "text", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "__call_api_post", "(", "'chat.update'", ",", "roomId", "=", "room_id", ",", "msgId", "=", "msg_id", ",", "text", "=", "text", ",", "kwargs", "=", "kwargs", ")" ]
Updates the text of the chat message.
[ "Updates", "the", "text", "of", "the", "chat", "message", "." ]
python
train
psss/did
did/utils.py
https://github.com/psss/did/blob/04e4ee6f1aa14c0cae3ba9f9803871f3f98279cb/did/utils.py#L345-L376
def set(self, mode=None): """ Set the coloring mode If enabled, some objects (like case run Status) are printed in color to easily spot failures, errors and so on. By default the feature is enabled when script is attached to a terminal. Possible values are:: COLOR=0 ... COLOR_OFF .... coloring disabled COLOR=1 ... COLOR_ON ..... coloring enabled COLOR=2 ... COLOR_AUTO ... if terminal attached (default) Environment variable COLOR can be used to set up the coloring to the desired mode without modifying code. """ # Detect from the environment if no mode given (only once) if mode is None: # Nothing to do if already detected if self._mode is not None: return # Detect from the environment variable COLOR try: mode = int(os.environ["COLOR"]) except StandardError: mode = COLOR_AUTO elif mode < 0 or mode > 2: raise RuntimeError("Invalid color mode '{0}'".format(mode)) self._mode = mode log.debug( "Coloring {0} ({1})".format( "enabled" if self.enabled() else "disabled", self.MODES[self._mode]))
[ "def", "set", "(", "self", ",", "mode", "=", "None", ")", ":", "# Detect from the environment if no mode given (only once)", "if", "mode", "is", "None", ":", "# Nothing to do if already detected", "if", "self", ".", "_mode", "is", "not", "None", ":", "return", "# Detect from the environment variable COLOR", "try", ":", "mode", "=", "int", "(", "os", ".", "environ", "[", "\"COLOR\"", "]", ")", "except", "StandardError", ":", "mode", "=", "COLOR_AUTO", "elif", "mode", "<", "0", "or", "mode", ">", "2", ":", "raise", "RuntimeError", "(", "\"Invalid color mode '{0}'\"", ".", "format", "(", "mode", ")", ")", "self", ".", "_mode", "=", "mode", "log", ".", "debug", "(", "\"Coloring {0} ({1})\"", ".", "format", "(", "\"enabled\"", "if", "self", ".", "enabled", "(", ")", "else", "\"disabled\"", ",", "self", ".", "MODES", "[", "self", ".", "_mode", "]", ")", ")" ]
Set the coloring mode If enabled, some objects (like case run Status) are printed in color to easily spot failures, errors and so on. By default the feature is enabled when script is attached to a terminal. Possible values are:: COLOR=0 ... COLOR_OFF .... coloring disabled COLOR=1 ... COLOR_ON ..... coloring enabled COLOR=2 ... COLOR_AUTO ... if terminal attached (default) Environment variable COLOR can be used to set up the coloring to the desired mode without modifying code.
[ "Set", "the", "coloring", "mode" ]
python
train
draperjames/qtpandas
qtpandas/views/CustomDelegates.py
https://github.com/draperjames/qtpandas/blob/64294fb69f1839e53dee5ea453337266bfaf24f4/qtpandas/views/CustomDelegates.py#L183-L193
def setModelData(self, spinBox, model, index): """Gets data from the editor widget and stores it in the specified model at the item index. Args: spinBox (QDoubleSpinBox): editor widget. model (QAbstractItemModel): parent model. index (QModelIndex): model data index. """ spinBox.interpretText() value = spinBox.value() model.setData(index, value, QtCore.Qt.EditRole)
[ "def", "setModelData", "(", "self", ",", "spinBox", ",", "model", ",", "index", ")", ":", "spinBox", ".", "interpretText", "(", ")", "value", "=", "spinBox", ".", "value", "(", ")", "model", ".", "setData", "(", "index", ",", "value", ",", "QtCore", ".", "Qt", ".", "EditRole", ")" ]
Gets data from the editor widget and stores it in the specified model at the item index. Args: spinBox (QDoubleSpinBox): editor widget. model (QAbstractItemModel): parent model. index (QModelIndex): model data index.
[ "Gets", "data", "from", "the", "editor", "widget", "and", "stores", "it", "in", "the", "specified", "model", "at", "the", "item", "index", "." ]
python
train
okfn/ofs
ofs/local/zipfile.py
https://github.com/okfn/ofs/blob/c110cbecd7d0ae7e877963914a1a5af030cd6d45/ofs/local/zipfile.py#L724-L785
def _RealGetContents(self): """Read in the table of contents for the ZIP file.""" fp = self.fp endrec = _EndRecData(fp) if not endrec: raise BadZipfile("File is not a zip file") if self.debug > 1: print(endrec) size_cd = endrec[_ECD_SIZE] # bytes in central directory offset_cd = endrec[_ECD_OFFSET] # offset of central directory self.comment = endrec[_ECD_COMMENT] # archive comment # "concat" is zero, unless zip was concatenated to another file concat = endrec[_ECD_LOCATION] - size_cd - offset_cd if endrec[_ECD_SIGNATURE] == stringEndArchive64: # If Zip64 extension structures are present, account for them concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator) if self.debug > 2: inferred = concat + offset_cd print("given, inferred, offset", offset_cd, inferred, concat) # self.start_dir: Position of start of central directory self.start_dir = offset_cd + concat fp.seek(self.start_dir, 0) data = fp.read(size_cd) fp = cStringIO.StringIO(data) total = 0 while total < size_cd: centdir = fp.read(sizeCentralDir) if centdir[0:4] != stringCentralDir: raise BadZipfile("Bad magic number for central directory") centdir = struct.unpack(structCentralDir, centdir) if self.debug > 2: print(centdir) filename = fp.read(centdir[_CD_FILENAME_LENGTH]) # Create ZipInfo instance to store file information x = ZipInfo(filename) x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH]) x.comment = fp.read(centdir[_CD_COMMENT_LENGTH]) x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET] (x.create_version, x.create_system, x.extract_version, x.reserved, x.flag_bits, x.compress_type, t, d, x.CRC, x.compress_size, x.file_size) = centdir[1:12] x.volume, x.internal_attr, x.external_attr = centdir[15:18] # Convert date/time code to (year, month, day, hour, min, sec) x._raw_time = t x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F, t>>11, (t>>5)&0x3F, (t&0x1F) * 2 ) x._decodeExtra() x.header_offset = x.header_offset + concat x.filename = x._decodeFilename() self.filelist.append(x) self.NameToInfo[x.filename] = x # update total bytes read from central directory total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH] + centdir[_CD_EXTRA_FIELD_LENGTH] + centdir[_CD_COMMENT_LENGTH]) if self.debug > 2: print("total", total)
[ "def", "_RealGetContents", "(", "self", ")", ":", "fp", "=", "self", ".", "fp", "endrec", "=", "_EndRecData", "(", "fp", ")", "if", "not", "endrec", ":", "raise", "BadZipfile", "(", "\"File is not a zip file\"", ")", "if", "self", ".", "debug", ">", "1", ":", "print", "(", "endrec", ")", "size_cd", "=", "endrec", "[", "_ECD_SIZE", "]", "# bytes in central directory", "offset_cd", "=", "endrec", "[", "_ECD_OFFSET", "]", "# offset of central directory", "self", ".", "comment", "=", "endrec", "[", "_ECD_COMMENT", "]", "# archive comment", "# \"concat\" is zero, unless zip was concatenated to another file", "concat", "=", "endrec", "[", "_ECD_LOCATION", "]", "-", "size_cd", "-", "offset_cd", "if", "endrec", "[", "_ECD_SIGNATURE", "]", "==", "stringEndArchive64", ":", "# If Zip64 extension structures are present, account for them", "concat", "-=", "(", "sizeEndCentDir64", "+", "sizeEndCentDir64Locator", ")", "if", "self", ".", "debug", ">", "2", ":", "inferred", "=", "concat", "+", "offset_cd", "print", "(", "\"given, inferred, offset\"", ",", "offset_cd", ",", "inferred", ",", "concat", ")", "# self.start_dir: Position of start of central directory", "self", ".", "start_dir", "=", "offset_cd", "+", "concat", "fp", ".", "seek", "(", "self", ".", "start_dir", ",", "0", ")", "data", "=", "fp", ".", "read", "(", "size_cd", ")", "fp", "=", "cStringIO", ".", "StringIO", "(", "data", ")", "total", "=", "0", "while", "total", "<", "size_cd", ":", "centdir", "=", "fp", ".", "read", "(", "sizeCentralDir", ")", "if", "centdir", "[", "0", ":", "4", "]", "!=", "stringCentralDir", ":", "raise", "BadZipfile", "(", "\"Bad magic number for central directory\"", ")", "centdir", "=", "struct", ".", "unpack", "(", "structCentralDir", ",", "centdir", ")", "if", "self", ".", "debug", ">", "2", ":", "print", "(", "centdir", ")", "filename", "=", "fp", ".", "read", "(", "centdir", "[", "_CD_FILENAME_LENGTH", "]", ")", "# Create ZipInfo instance to store file information", "x", "=", "ZipInfo", "(", "filename", ")", "x", ".", "extra", "=", "fp", ".", "read", "(", "centdir", "[", "_CD_EXTRA_FIELD_LENGTH", "]", ")", "x", ".", "comment", "=", "fp", ".", "read", "(", "centdir", "[", "_CD_COMMENT_LENGTH", "]", ")", "x", ".", "header_offset", "=", "centdir", "[", "_CD_LOCAL_HEADER_OFFSET", "]", "(", "x", ".", "create_version", ",", "x", ".", "create_system", ",", "x", ".", "extract_version", ",", "x", ".", "reserved", ",", "x", ".", "flag_bits", ",", "x", ".", "compress_type", ",", "t", ",", "d", ",", "x", ".", "CRC", ",", "x", ".", "compress_size", ",", "x", ".", "file_size", ")", "=", "centdir", "[", "1", ":", "12", "]", "x", ".", "volume", ",", "x", ".", "internal_attr", ",", "x", ".", "external_attr", "=", "centdir", "[", "15", ":", "18", "]", "# Convert date/time code to (year, month, day, hour, min, sec)", "x", ".", "_raw_time", "=", "t", "x", ".", "date_time", "=", "(", "(", "d", ">>", "9", ")", "+", "1980", ",", "(", "d", ">>", "5", ")", "&", "0xF", ",", "d", "&", "0x1F", ",", "t", ">>", "11", ",", "(", "t", ">>", "5", ")", "&", "0x3F", ",", "(", "t", "&", "0x1F", ")", "*", "2", ")", "x", ".", "_decodeExtra", "(", ")", "x", ".", "header_offset", "=", "x", ".", "header_offset", "+", "concat", "x", ".", "filename", "=", "x", ".", "_decodeFilename", "(", ")", "self", ".", "filelist", ".", "append", "(", "x", ")", "self", ".", "NameToInfo", "[", "x", ".", "filename", "]", "=", "x", "# update total bytes read from central directory", "total", "=", "(", "total", "+", "sizeCentralDir", "+", "centdir", "[", "_CD_FILENAME_LENGTH", "]", "+", "centdir", "[", "_CD_EXTRA_FIELD_LENGTH", "]", "+", "centdir", "[", "_CD_COMMENT_LENGTH", "]", ")", "if", "self", ".", "debug", ">", "2", ":", "print", "(", "\"total\"", ",", "total", ")" ]
Read in the table of contents for the ZIP file.
[ "Read", "in", "the", "table", "of", "contents", "for", "the", "ZIP", "file", "." ]
python
train
andrea-cuttone/geoplotlib
geoplotlib/utils.py
https://github.com/andrea-cuttone/geoplotlib/blob/a1c355bccec91cabd157569fad6daf53cf7687a1/geoplotlib/utils.py#L207-L217
def from_points(lons, lats): """ Compute the BoundingBox from a set of latitudes and longitudes :param lons: longitudes :param lats: latitudes :return: BoundingBox """ north, west = max(lats), min(lons) south, east = min(lats), max(lons) return BoundingBox(north=north, west=west, south=south, east=east)
[ "def", "from_points", "(", "lons", ",", "lats", ")", ":", "north", ",", "west", "=", "max", "(", "lats", ")", ",", "min", "(", "lons", ")", "south", ",", "east", "=", "min", "(", "lats", ")", ",", "max", "(", "lons", ")", "return", "BoundingBox", "(", "north", "=", "north", ",", "west", "=", "west", ",", "south", "=", "south", ",", "east", "=", "east", ")" ]
Compute the BoundingBox from a set of latitudes and longitudes :param lons: longitudes :param lats: latitudes :return: BoundingBox
[ "Compute", "the", "BoundingBox", "from", "a", "set", "of", "latitudes", "and", "longitudes" ]
python
train
apache/airflow
airflow/www/api/experimental/endpoints.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/api/experimental/endpoints.py#L114-L131
def dag_runs(dag_id): """ Returns a list of Dag Runs for a specific DAG ID. :query param state: a query string parameter '?state=queued|running|success...' :param dag_id: String identifier of a DAG :return: List of DAG runs of a DAG with requested state, or all runs if the state is not specified """ try: state = request.args.get('state') dagruns = get_dag_runs(dag_id, state) except AirflowException as err: _log.info(err) response = jsonify(error="{}".format(err)) response.status_code = 400 return response return jsonify(dagruns)
[ "def", "dag_runs", "(", "dag_id", ")", ":", "try", ":", "state", "=", "request", ".", "args", ".", "get", "(", "'state'", ")", "dagruns", "=", "get_dag_runs", "(", "dag_id", ",", "state", ")", "except", "AirflowException", "as", "err", ":", "_log", ".", "info", "(", "err", ")", "response", "=", "jsonify", "(", "error", "=", "\"{}\"", ".", "format", "(", "err", ")", ")", "response", ".", "status_code", "=", "400", "return", "response", "return", "jsonify", "(", "dagruns", ")" ]
Returns a list of Dag Runs for a specific DAG ID. :query param state: a query string parameter '?state=queued|running|success...' :param dag_id: String identifier of a DAG :return: List of DAG runs of a DAG with requested state, or all runs if the state is not specified
[ "Returns", "a", "list", "of", "Dag", "Runs", "for", "a", "specific", "DAG", "ID", ".", ":", "query", "param", "state", ":", "a", "query", "string", "parameter", "?state", "=", "queued|running|success", "...", ":", "param", "dag_id", ":", "String", "identifier", "of", "a", "DAG", ":", "return", ":", "List", "of", "DAG", "runs", "of", "a", "DAG", "with", "requested", "state", "or", "all", "runs", "if", "the", "state", "is", "not", "specified" ]
python
test
apache/incubator-heron
heronpy/api/metrics.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heronpy/api/metrics.py#L140-L143
def add_key(self, key): """Adds a new key to this metric""" if key not in self.value: self.value[key] = ReducedMetric(self.reducer)
[ "def", "add_key", "(", "self", ",", "key", ")", ":", "if", "key", "not", "in", "self", ".", "value", ":", "self", ".", "value", "[", "key", "]", "=", "ReducedMetric", "(", "self", ".", "reducer", ")" ]
Adds a new key to this metric
[ "Adds", "a", "new", "key", "to", "this", "metric" ]
python
valid
vcs-python/vcspull
vcspull/config.py
https://github.com/vcs-python/vcspull/blob/c1827bf78d2cdebc61d82111c9aa35afd6ea6a25/vcspull/config.py#L182-L212
def load_configs(files, cwd=os.getcwd()): """Return repos from a list of files. :todo: Validate scheme, check for duplciate destinations, VCS urls :param files: paths to config file :type files: list :param cwd: current path (pass down for :func:`extract_repos` :type cwd: str :returns: expanded config dict item :rtype: list of dict """ repos = [] for f in files: _, ext = os.path.splitext(f) conf = kaptan.Kaptan(handler=ext.lstrip('.')).import_config(f) newrepos = extract_repos(conf.export('dict'), cwd) if not repos: repos.extend(newrepos) continue dupes = detect_duplicate_repos(repos, newrepos) if dupes: msg = ('repos with same path + different VCS detected!', dupes) raise exc.VCSPullException(msg) repos.extend(newrepos) return repos
[ "def", "load_configs", "(", "files", ",", "cwd", "=", "os", ".", "getcwd", "(", ")", ")", ":", "repos", "=", "[", "]", "for", "f", "in", "files", ":", "_", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "f", ")", "conf", "=", "kaptan", ".", "Kaptan", "(", "handler", "=", "ext", ".", "lstrip", "(", "'.'", ")", ")", ".", "import_config", "(", "f", ")", "newrepos", "=", "extract_repos", "(", "conf", ".", "export", "(", "'dict'", ")", ",", "cwd", ")", "if", "not", "repos", ":", "repos", ".", "extend", "(", "newrepos", ")", "continue", "dupes", "=", "detect_duplicate_repos", "(", "repos", ",", "newrepos", ")", "if", "dupes", ":", "msg", "=", "(", "'repos with same path + different VCS detected!'", ",", "dupes", ")", "raise", "exc", ".", "VCSPullException", "(", "msg", ")", "repos", ".", "extend", "(", "newrepos", ")", "return", "repos" ]
Return repos from a list of files. :todo: Validate scheme, check for duplciate destinations, VCS urls :param files: paths to config file :type files: list :param cwd: current path (pass down for :func:`extract_repos` :type cwd: str :returns: expanded config dict item :rtype: list of dict
[ "Return", "repos", "from", "a", "list", "of", "files", "." ]
python
train
timothyb0912/pylogit
pylogit/base_multinomial_cm_v2.py
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L183-L207
def check_type_of_nest_spec_keys_and_values(nest_spec): """ Ensures that the keys and values of `nest_spec` are strings and lists. Raises a helpful ValueError if they are. Parameters ---------- nest_spec : OrderedDict, or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Default == None. Returns ------- None. """ try: assert all([isinstance(k, str) for k in nest_spec]) assert all([isinstance(nest_spec[k], list) for k in nest_spec]) except AssertionError: msg = "All nest_spec keys/values must be strings/lists." raise TypeError(msg) return None
[ "def", "check_type_of_nest_spec_keys_and_values", "(", "nest_spec", ")", ":", "try", ":", "assert", "all", "(", "[", "isinstance", "(", "k", ",", "str", ")", "for", "k", "in", "nest_spec", "]", ")", "assert", "all", "(", "[", "isinstance", "(", "nest_spec", "[", "k", "]", ",", "list", ")", "for", "k", "in", "nest_spec", "]", ")", "except", "AssertionError", ":", "msg", "=", "\"All nest_spec keys/values must be strings/lists.\"", "raise", "TypeError", "(", "msg", ")", "return", "None" ]
Ensures that the keys and values of `nest_spec` are strings and lists. Raises a helpful ValueError if they are. Parameters ---------- nest_spec : OrderedDict, or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Default == None. Returns ------- None.
[ "Ensures", "that", "the", "keys", "and", "values", "of", "nest_spec", "are", "strings", "and", "lists", ".", "Raises", "a", "helpful", "ValueError", "if", "they", "are", "." ]
python
train
exxeleron/qPython
qpython/qreader.py
https://github.com/exxeleron/qPython/blob/7e64a28b1e8814a8d6b9217ce79bb8de546e62f3/qpython/qreader.py#L119-L141
def read(self, source = None, **options): ''' Reads and optionally parses a single message. :Parameters: - `source` - optional data buffer to be read, if not specified data is read from the wrapped stream :Options: - `raw` (`boolean`) - indicates whether read data should parsed or returned in raw byte form - `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are backed by raw q representation (:class:`.QTemporalList`, :class:`.QTemporal`) instances, otherwise are represented as `numpy datetime64`/`timedelta64` arrays and atoms, **Default**: ``False`` :returns: :class:`.QMessage` - read data (parsed or raw byte form) along with meta information ''' message = self.read_header(source) message.data = self.read_data(message.size, message.is_compressed, **options) return message
[ "def", "read", "(", "self", ",", "source", "=", "None", ",", "*", "*", "options", ")", ":", "message", "=", "self", ".", "read_header", "(", "source", ")", "message", ".", "data", "=", "self", ".", "read_data", "(", "message", ".", "size", ",", "message", ".", "is_compressed", ",", "*", "*", "options", ")", "return", "message" ]
Reads and optionally parses a single message. :Parameters: - `source` - optional data buffer to be read, if not specified data is read from the wrapped stream :Options: - `raw` (`boolean`) - indicates whether read data should parsed or returned in raw byte form - `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are backed by raw q representation (:class:`.QTemporalList`, :class:`.QTemporal`) instances, otherwise are represented as `numpy datetime64`/`timedelta64` arrays and atoms, **Default**: ``False`` :returns: :class:`.QMessage` - read data (parsed or raw byte form) along with meta information
[ "Reads", "and", "optionally", "parses", "a", "single", "message", ".", ":", "Parameters", ":", "-", "source", "-", "optional", "data", "buffer", "to", "be", "read", "if", "not", "specified", "data", "is", "read", "from", "the", "wrapped", "stream", ":", "Options", ":", "-", "raw", "(", "boolean", ")", "-", "indicates", "whether", "read", "data", "should", "parsed", "or", "returned", "in", "raw", "byte", "form", "-", "numpy_temporals", "(", "boolean", ")", "-", "if", "False", "temporal", "vectors", "are", "backed", "by", "raw", "q", "representation", "(", ":", "class", ":", ".", "QTemporalList", ":", "class", ":", ".", "QTemporal", ")", "instances", "otherwise", "are", "represented", "as", "numpy", "datetime64", "/", "timedelta64", "arrays", "and", "atoms", "**", "Default", "**", ":", "False", ":", "returns", ":", ":", "class", ":", ".", "QMessage", "-", "read", "data", "(", "parsed", "or", "raw", "byte", "form", ")", "along", "with", "meta", "information" ]
python
train
yougov/elastic-doc-manager
mongo_connector/doc_managers/elastic_doc_manager.py
https://github.com/yougov/elastic-doc-manager/blob/17c35f4dd3d081b171c45ba0eb9616da9dc89e82/mongo_connector/doc_managers/elastic_doc_manager.py#L454-L470
def send_buffered_operations(self): """Send buffered operations to Elasticsearch. This method is periodically called by the AutoCommitThread. """ with self.lock: try: action_buffer = self.BulkBuffer.get_buffer() if action_buffer: successes, errors = bulk(self.elastic, action_buffer) LOG.debug("Bulk request finished, successfully sent %d " "operations", successes) if errors: LOG.error( "Bulk request finished with errors: %r", errors) except es_exceptions.ElasticsearchException: LOG.exception("Bulk request failed with exception")
[ "def", "send_buffered_operations", "(", "self", ")", ":", "with", "self", ".", "lock", ":", "try", ":", "action_buffer", "=", "self", ".", "BulkBuffer", ".", "get_buffer", "(", ")", "if", "action_buffer", ":", "successes", ",", "errors", "=", "bulk", "(", "self", ".", "elastic", ",", "action_buffer", ")", "LOG", ".", "debug", "(", "\"Bulk request finished, successfully sent %d \"", "\"operations\"", ",", "successes", ")", "if", "errors", ":", "LOG", ".", "error", "(", "\"Bulk request finished with errors: %r\"", ",", "errors", ")", "except", "es_exceptions", ".", "ElasticsearchException", ":", "LOG", ".", "exception", "(", "\"Bulk request failed with exception\"", ")" ]
Send buffered operations to Elasticsearch. This method is periodically called by the AutoCommitThread.
[ "Send", "buffered", "operations", "to", "Elasticsearch", "." ]
python
train
foremast/foremast
src/foremast/utils/tasks.py
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/utils/tasks.py#L97-L122
def check_task(taskid, timeout=DEFAULT_TASK_TIMEOUT, wait=2): """Wrap check_task. Args: taskid (str): Existing Spinnaker Task ID. timeout (int, optional): Consider Task failed after given seconds. wait (int, optional): Seconds to pause between polling attempts. Returns: str: Task status. Raises: AssertionError: API did not respond with a 200 status code. :obj:`foremast.exceptions.SpinnakerTaskInconclusiveError`: Task did not reach a terminal state before the given time out. """ max_attempts = int(timeout / wait) try: return retry_call( partial(_check_task, taskid), max_attempts=max_attempts, wait=wait, exceptions=(AssertionError, ValueError), ) except ValueError: raise SpinnakerTaskInconclusiveError('Task failed to complete in {0} seconds: {1}'.format(timeout, taskid))
[ "def", "check_task", "(", "taskid", ",", "timeout", "=", "DEFAULT_TASK_TIMEOUT", ",", "wait", "=", "2", ")", ":", "max_attempts", "=", "int", "(", "timeout", "/", "wait", ")", "try", ":", "return", "retry_call", "(", "partial", "(", "_check_task", ",", "taskid", ")", ",", "max_attempts", "=", "max_attempts", ",", "wait", "=", "wait", ",", "exceptions", "=", "(", "AssertionError", ",", "ValueError", ")", ",", ")", "except", "ValueError", ":", "raise", "SpinnakerTaskInconclusiveError", "(", "'Task failed to complete in {0} seconds: {1}'", ".", "format", "(", "timeout", ",", "taskid", ")", ")" ]
Wrap check_task. Args: taskid (str): Existing Spinnaker Task ID. timeout (int, optional): Consider Task failed after given seconds. wait (int, optional): Seconds to pause between polling attempts. Returns: str: Task status. Raises: AssertionError: API did not respond with a 200 status code. :obj:`foremast.exceptions.SpinnakerTaskInconclusiveError`: Task did not reach a terminal state before the given time out.
[ "Wrap", "check_task", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/variation/validateplot.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validateplot.py#L178-L187
def plot_prep_methods(df, prep, prepi, out_file_base, outtype, title=None, size=None): """Plot comparison between BAM preparation methods. """ samples = df[(df["bamprep"] == prep)]["sample"].unique() assert len(samples) >= 1, samples out_file = "%s-%s.%s" % (out_file_base, samples[0], outtype) df = df[df["category"].isin(cat_labels)] _seaborn(df, prep, prepi, out_file, title, size) return out_file
[ "def", "plot_prep_methods", "(", "df", ",", "prep", ",", "prepi", ",", "out_file_base", ",", "outtype", ",", "title", "=", "None", ",", "size", "=", "None", ")", ":", "samples", "=", "df", "[", "(", "df", "[", "\"bamprep\"", "]", "==", "prep", ")", "]", "[", "\"sample\"", "]", ".", "unique", "(", ")", "assert", "len", "(", "samples", ")", ">=", "1", ",", "samples", "out_file", "=", "\"%s-%s.%s\"", "%", "(", "out_file_base", ",", "samples", "[", "0", "]", ",", "outtype", ")", "df", "=", "df", "[", "df", "[", "\"category\"", "]", ".", "isin", "(", "cat_labels", ")", "]", "_seaborn", "(", "df", ",", "prep", ",", "prepi", ",", "out_file", ",", "title", ",", "size", ")", "return", "out_file" ]
Plot comparison between BAM preparation methods.
[ "Plot", "comparison", "between", "BAM", "preparation", "methods", "." ]
python
train
loli/medpy
medpy/core/logger.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/core/logger.py#L121-L143
def setLevel(self, level): r"""Overrides the parent method to adapt the formatting string to the level. Parameters ---------- level : int The new log level to set. See the logging levels in the logging module for details. Examples -------- >>> import logging >>> Logger.setLevel(logging.DEBUG) """ if logging.DEBUG >= level: formatter = logging.Formatter("%(asctime)s [%(levelname)-8s] %(message)s (in %(module)s.%(funcName)s:%(lineno)s)", "%d.%m.%Y %H:%M:%S") self._handler.setFormatter(formatter) else: formatter = logging.Formatter("%(asctime)s [%(levelname)-8s] %(message)s", "%d.%m.%Y %H:%M:%S") self._handler.setFormatter(formatter) NativeLogger.setLevel(self, level)
[ "def", "setLevel", "(", "self", ",", "level", ")", ":", "if", "logging", ".", "DEBUG", ">=", "level", ":", "formatter", "=", "logging", ".", "Formatter", "(", "\"%(asctime)s [%(levelname)-8s] %(message)s (in %(module)s.%(funcName)s:%(lineno)s)\"", ",", "\"%d.%m.%Y %H:%M:%S\"", ")", "self", ".", "_handler", ".", "setFormatter", "(", "formatter", ")", "else", ":", "formatter", "=", "logging", ".", "Formatter", "(", "\"%(asctime)s [%(levelname)-8s] %(message)s\"", ",", "\"%d.%m.%Y %H:%M:%S\"", ")", "self", ".", "_handler", ".", "setFormatter", "(", "formatter", ")", "NativeLogger", ".", "setLevel", "(", "self", ",", "level", ")" ]
r"""Overrides the parent method to adapt the formatting string to the level. Parameters ---------- level : int The new log level to set. See the logging levels in the logging module for details. Examples -------- >>> import logging >>> Logger.setLevel(logging.DEBUG)
[ "r", "Overrides", "the", "parent", "method", "to", "adapt", "the", "formatting", "string", "to", "the", "level", ".", "Parameters", "----------", "level", ":", "int", "The", "new", "log", "level", "to", "set", ".", "See", "the", "logging", "levels", "in", "the", "logging", "module", "for", "details", ".", "Examples", "--------", ">>>", "import", "logging", ">>>", "Logger", ".", "setLevel", "(", "logging", ".", "DEBUG", ")" ]
python
train
readbeyond/aeneas
aeneas/container.py
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/container.py#L186-L200
def is_safe(self): """ Return ``True`` if the container can be safely extracted, that is, if all its entries are safe, ``False`` otherwise. :rtype: bool :raises: same as :func:`~aeneas.container.Container.entries` """ self.log(u"Checking if this container is safe") for entry in self.entries: if not self.is_entry_safe(entry): self.log([u"This container is not safe: found unsafe entry '%s'", entry]) return False self.log(u"This container is safe") return True
[ "def", "is_safe", "(", "self", ")", ":", "self", ".", "log", "(", "u\"Checking if this container is safe\"", ")", "for", "entry", "in", "self", ".", "entries", ":", "if", "not", "self", ".", "is_entry_safe", "(", "entry", ")", ":", "self", ".", "log", "(", "[", "u\"This container is not safe: found unsafe entry '%s'\"", ",", "entry", "]", ")", "return", "False", "self", ".", "log", "(", "u\"This container is safe\"", ")", "return", "True" ]
Return ``True`` if the container can be safely extracted, that is, if all its entries are safe, ``False`` otherwise. :rtype: bool :raises: same as :func:`~aeneas.container.Container.entries`
[ "Return", "True", "if", "the", "container", "can", "be", "safely", "extracted", "that", "is", "if", "all", "its", "entries", "are", "safe", "False", "otherwise", "." ]
python
train
StanfordVL/robosuite
robosuite/environments/baxter.py
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/environments/baxter.py#L107-L179
def _get_reference(self): """Sets up references for robots, grippers, and objects.""" super()._get_reference() # indices for joints in qpos, qvel self.robot_joints = list(self.mujoco_robot.joints) self._ref_joint_pos_indexes = [ self.sim.model.get_joint_qpos_addr(x) for x in self.robot_joints ] self._ref_joint_vel_indexes = [ self.sim.model.get_joint_qvel_addr(x) for x in self.robot_joints ] if self.use_indicator_object: ind_qpos = self.sim.model.get_joint_qpos_addr("pos_indicator") self._ref_indicator_pos_low, self._ref_indicator_pos_high = ind_qpos ind_qvel = self.sim.model.get_joint_qvel_addr("pos_indicator") self._ref_indicator_vel_low, self._ref_indicator_vel_high = ind_qvel self.indicator_id = self.sim.model.body_name2id("pos_indicator") # indices for grippers in qpos, qvel if self.has_gripper_left: self.gripper_left_joints = list(self.gripper_left.joints) self._ref_gripper_left_joint_pos_indexes = [ self.sim.model.get_joint_qpos_addr(x) for x in self.gripper_left_joints ] self._ref_gripper_left_joint_vel_indexes = [ self.sim.model.get_joint_qvel_addr(x) for x in self.gripper_left_joints ] self.left_eef_site_id = self.sim.model.site_name2id("l_g_grip_site") if self.has_gripper_right: self.gripper_right_joints = list(self.gripper_right.joints) self._ref_gripper_right_joint_pos_indexes = [ self.sim.model.get_joint_qpos_addr(x) for x in self.gripper_right_joints ] self._ref_gripper_right_joint_vel_indexes = [ self.sim.model.get_joint_qvel_addr(x) for x in self.gripper_right_joints ] self.right_eef_site_id = self.sim.model.site_name2id("grip_site") # indices for joint pos actuation, joint vel actuation, gripper actuation self._ref_joint_pos_actuator_indexes = [ self.sim.model.actuator_name2id(actuator) for actuator in self.sim.model.actuator_names if actuator.startswith("pos") ] self._ref_joint_vel_actuator_indexes = [ self.sim.model.actuator_name2id(actuator) for actuator in self.sim.model.actuator_names if actuator.startswith("vel") ] if self.has_gripper_left: self._ref_joint_gripper_left_actuator_indexes = [ self.sim.model.actuator_name2id(actuator) for actuator in self.sim.model.actuator_names if actuator.startswith("gripper_l") ] if self.has_gripper_right: self._ref_joint_gripper_right_actuator_indexes = [ self.sim.model.actuator_name2id(actuator) for actuator in self.sim.model.actuator_names if actuator.startswith("gripper_r") ] if self.has_gripper_right: # IDs of sites for gripper visualization self.eef_site_id = self.sim.model.site_name2id("grip_site") self.eef_cylinder_id = self.sim.model.site_name2id("grip_site_cylinder")
[ "def", "_get_reference", "(", "self", ")", ":", "super", "(", ")", ".", "_get_reference", "(", ")", "# indices for joints in qpos, qvel", "self", ".", "robot_joints", "=", "list", "(", "self", ".", "mujoco_robot", ".", "joints", ")", "self", ".", "_ref_joint_pos_indexes", "=", "[", "self", ".", "sim", ".", "model", ".", "get_joint_qpos_addr", "(", "x", ")", "for", "x", "in", "self", ".", "robot_joints", "]", "self", ".", "_ref_joint_vel_indexes", "=", "[", "self", ".", "sim", ".", "model", ".", "get_joint_qvel_addr", "(", "x", ")", "for", "x", "in", "self", ".", "robot_joints", "]", "if", "self", ".", "use_indicator_object", ":", "ind_qpos", "=", "self", ".", "sim", ".", "model", ".", "get_joint_qpos_addr", "(", "\"pos_indicator\"", ")", "self", ".", "_ref_indicator_pos_low", ",", "self", ".", "_ref_indicator_pos_high", "=", "ind_qpos", "ind_qvel", "=", "self", ".", "sim", ".", "model", ".", "get_joint_qvel_addr", "(", "\"pos_indicator\"", ")", "self", ".", "_ref_indicator_vel_low", ",", "self", ".", "_ref_indicator_vel_high", "=", "ind_qvel", "self", ".", "indicator_id", "=", "self", ".", "sim", ".", "model", ".", "body_name2id", "(", "\"pos_indicator\"", ")", "# indices for grippers in qpos, qvel", "if", "self", ".", "has_gripper_left", ":", "self", ".", "gripper_left_joints", "=", "list", "(", "self", ".", "gripper_left", ".", "joints", ")", "self", ".", "_ref_gripper_left_joint_pos_indexes", "=", "[", "self", ".", "sim", ".", "model", ".", "get_joint_qpos_addr", "(", "x", ")", "for", "x", "in", "self", ".", "gripper_left_joints", "]", "self", ".", "_ref_gripper_left_joint_vel_indexes", "=", "[", "self", ".", "sim", ".", "model", ".", "get_joint_qvel_addr", "(", "x", ")", "for", "x", "in", "self", ".", "gripper_left_joints", "]", "self", ".", "left_eef_site_id", "=", "self", ".", "sim", ".", "model", ".", "site_name2id", "(", "\"l_g_grip_site\"", ")", "if", "self", ".", "has_gripper_right", ":", "self", ".", "gripper_right_joints", "=", "list", "(", "self", ".", "gripper_right", ".", "joints", ")", "self", ".", "_ref_gripper_right_joint_pos_indexes", "=", "[", "self", ".", "sim", ".", "model", ".", "get_joint_qpos_addr", "(", "x", ")", "for", "x", "in", "self", ".", "gripper_right_joints", "]", "self", ".", "_ref_gripper_right_joint_vel_indexes", "=", "[", "self", ".", "sim", ".", "model", ".", "get_joint_qvel_addr", "(", "x", ")", "for", "x", "in", "self", ".", "gripper_right_joints", "]", "self", ".", "right_eef_site_id", "=", "self", ".", "sim", ".", "model", ".", "site_name2id", "(", "\"grip_site\"", ")", "# indices for joint pos actuation, joint vel actuation, gripper actuation", "self", ".", "_ref_joint_pos_actuator_indexes", "=", "[", "self", ".", "sim", ".", "model", ".", "actuator_name2id", "(", "actuator", ")", "for", "actuator", "in", "self", ".", "sim", ".", "model", ".", "actuator_names", "if", "actuator", ".", "startswith", "(", "\"pos\"", ")", "]", "self", ".", "_ref_joint_vel_actuator_indexes", "=", "[", "self", ".", "sim", ".", "model", ".", "actuator_name2id", "(", "actuator", ")", "for", "actuator", "in", "self", ".", "sim", ".", "model", ".", "actuator_names", "if", "actuator", ".", "startswith", "(", "\"vel\"", ")", "]", "if", "self", ".", "has_gripper_left", ":", "self", ".", "_ref_joint_gripper_left_actuator_indexes", "=", "[", "self", ".", "sim", ".", "model", ".", "actuator_name2id", "(", "actuator", ")", "for", "actuator", "in", "self", ".", "sim", ".", "model", ".", "actuator_names", "if", "actuator", ".", "startswith", "(", "\"gripper_l\"", ")", "]", "if", "self", ".", "has_gripper_right", ":", "self", ".", "_ref_joint_gripper_right_actuator_indexes", "=", "[", "self", ".", "sim", ".", "model", ".", "actuator_name2id", "(", "actuator", ")", "for", "actuator", "in", "self", ".", "sim", ".", "model", ".", "actuator_names", "if", "actuator", ".", "startswith", "(", "\"gripper_r\"", ")", "]", "if", "self", ".", "has_gripper_right", ":", "# IDs of sites for gripper visualization", "self", ".", "eef_site_id", "=", "self", ".", "sim", ".", "model", ".", "site_name2id", "(", "\"grip_site\"", ")", "self", ".", "eef_cylinder_id", "=", "self", ".", "sim", ".", "model", ".", "site_name2id", "(", "\"grip_site_cylinder\"", ")" ]
Sets up references for robots, grippers, and objects.
[ "Sets", "up", "references", "for", "robots", "grippers", "and", "objects", "." ]
python
train
MrYsLab/PyMata
PyMata/pymata.py
https://github.com/MrYsLab/PyMata/blob/7e0ec34670b5a0d3d6b74bcbe4f3808c845cc429/PyMata/pymata.py#L594-L616
def i2c_read(self, address, register, number_of_bytes, read_type, cb=None): """ This method requests the read of an i2c device. Results are retrieved by a call to i2c_get_read_data(). If a callback method is provided, when data is received from the device it will be sent to the callback method :param address: i2c device address :param register: register number (can be set to zero) :param number_of_bytes: number of bytes expected to be returned :param read_type: I2C_READ or I2C_READ_CONTINUOUSLY :param cb: Optional callback function to report i2c data as result of read command """ data = [address, read_type, register & 0x7f, (register >> 7) & 0x7f, number_of_bytes & 0x7f, (number_of_bytes >> 7) & 0x7f] # add or update entry in i2c_map for reply self._command_handler.i2c_map[address] = [cb, None] self._command_handler.send_sysex(self._command_handler.I2C_REQUEST, data)
[ "def", "i2c_read", "(", "self", ",", "address", ",", "register", ",", "number_of_bytes", ",", "read_type", ",", "cb", "=", "None", ")", ":", "data", "=", "[", "address", ",", "read_type", ",", "register", "&", "0x7f", ",", "(", "register", ">>", "7", ")", "&", "0x7f", ",", "number_of_bytes", "&", "0x7f", ",", "(", "number_of_bytes", ">>", "7", ")", "&", "0x7f", "]", "# add or update entry in i2c_map for reply", "self", ".", "_command_handler", ".", "i2c_map", "[", "address", "]", "=", "[", "cb", ",", "None", "]", "self", ".", "_command_handler", ".", "send_sysex", "(", "self", ".", "_command_handler", ".", "I2C_REQUEST", ",", "data", ")" ]
This method requests the read of an i2c device. Results are retrieved by a call to i2c_get_read_data(). If a callback method is provided, when data is received from the device it will be sent to the callback method :param address: i2c device address :param register: register number (can be set to zero) :param number_of_bytes: number of bytes expected to be returned :param read_type: I2C_READ or I2C_READ_CONTINUOUSLY :param cb: Optional callback function to report i2c data as result of read command
[ "This", "method", "requests", "the", "read", "of", "an", "i2c", "device", ".", "Results", "are", "retrieved", "by", "a", "call", "to", "i2c_get_read_data", "()", ".", "If", "a", "callback", "method", "is", "provided", "when", "data", "is", "received", "from", "the", "device", "it", "will", "be", "sent", "to", "the", "callback", "method" ]
python
valid
thunder-project/thunder
thunder/blocks/local.py
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/blocks/local.py#L54-L75
def unchunk(self): """ Reconstitute the chunked array back into a full ndarray. Returns ------- ndarray """ if self.padding != len(self.shape)*(0,): shape = self.values.shape arr = empty(shape, dtype=object) for inds in product(*[arange(s) for s in shape]): slices = [] for i, p, n in zip(inds, self.padding, shape): start = None if (i == 0 or p == 0) else p stop = None if (i == n-1 or p == 0) else -p slices.append(slice(start, stop, None)) arr[inds] = self.values[inds][tuple(slices)] else: arr = self.values return allstack(arr.tolist())
[ "def", "unchunk", "(", "self", ")", ":", "if", "self", ".", "padding", "!=", "len", "(", "self", ".", "shape", ")", "*", "(", "0", ",", ")", ":", "shape", "=", "self", ".", "values", ".", "shape", "arr", "=", "empty", "(", "shape", ",", "dtype", "=", "object", ")", "for", "inds", "in", "product", "(", "*", "[", "arange", "(", "s", ")", "for", "s", "in", "shape", "]", ")", ":", "slices", "=", "[", "]", "for", "i", ",", "p", ",", "n", "in", "zip", "(", "inds", ",", "self", ".", "padding", ",", "shape", ")", ":", "start", "=", "None", "if", "(", "i", "==", "0", "or", "p", "==", "0", ")", "else", "p", "stop", "=", "None", "if", "(", "i", "==", "n", "-", "1", "or", "p", "==", "0", ")", "else", "-", "p", "slices", ".", "append", "(", "slice", "(", "start", ",", "stop", ",", "None", ")", ")", "arr", "[", "inds", "]", "=", "self", ".", "values", "[", "inds", "]", "[", "tuple", "(", "slices", ")", "]", "else", ":", "arr", "=", "self", ".", "values", "return", "allstack", "(", "arr", ".", "tolist", "(", ")", ")" ]
Reconstitute the chunked array back into a full ndarray. Returns ------- ndarray
[ "Reconstitute", "the", "chunked", "array", "back", "into", "a", "full", "ndarray", "." ]
python
train
inspirehep/isbnid
isbn/isbn.py
https://github.com/inspirehep/isbnid/blob/e8136a8bd2212b8ebd77c081fa30bb4dad0e4a75/isbn/isbn.py#L108-L121
def isbn10(self): ''' Encode ISBN number in ISBN10 format Raises exception if Bookland number different from 978 @rtype: string @return: ISBN formated as ISBN10 ''' if self._id[0:3] != '978': raise ISBNError("Invalid Bookland code: {}".format(self._id[0:3])) digit10 = _digit10(self._id[3:12]) if digit10 == 10: return self._id[3:12] + 'X' else: return self._id[3:12] + str(digit10)
[ "def", "isbn10", "(", "self", ")", ":", "if", "self", ".", "_id", "[", "0", ":", "3", "]", "!=", "'978'", ":", "raise", "ISBNError", "(", "\"Invalid Bookland code: {}\"", ".", "format", "(", "self", ".", "_id", "[", "0", ":", "3", "]", ")", ")", "digit10", "=", "_digit10", "(", "self", ".", "_id", "[", "3", ":", "12", "]", ")", "if", "digit10", "==", "10", ":", "return", "self", ".", "_id", "[", "3", ":", "12", "]", "+", "'X'", "else", ":", "return", "self", ".", "_id", "[", "3", ":", "12", "]", "+", "str", "(", "digit10", ")" ]
Encode ISBN number in ISBN10 format Raises exception if Bookland number different from 978 @rtype: string @return: ISBN formated as ISBN10
[ "Encode", "ISBN", "number", "in", "ISBN10", "format", "Raises", "exception", "if", "Bookland", "number", "different", "from", "978" ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/service_hooks/service_hooks_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/service_hooks/service_hooks_client.py#L28-L49
def get_consumer_action(self, consumer_id, consumer_action_id, publisher_id=None): """GetConsumerAction. Get details about a specific consumer action. :param str consumer_id: ID for a consumer. :param str consumer_action_id: ID for a consumerActionId. :param str publisher_id: :rtype: :class:`<ConsumerAction> <azure.devops.v5_0.service_hooks.models.ConsumerAction>` """ route_values = {} if consumer_id is not None: route_values['consumerId'] = self._serialize.url('consumer_id', consumer_id, 'str') if consumer_action_id is not None: route_values['consumerActionId'] = self._serialize.url('consumer_action_id', consumer_action_id, 'str') query_parameters = {} if publisher_id is not None: query_parameters['publisherId'] = self._serialize.query('publisher_id', publisher_id, 'str') response = self._send(http_method='GET', location_id='c3428e90-7a69-4194-8ed8-0f153185ee0d', version='5.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('ConsumerAction', response)
[ "def", "get_consumer_action", "(", "self", ",", "consumer_id", ",", "consumer_action_id", ",", "publisher_id", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "consumer_id", "is", "not", "None", ":", "route_values", "[", "'consumerId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'consumer_id'", ",", "consumer_id", ",", "'str'", ")", "if", "consumer_action_id", "is", "not", "None", ":", "route_values", "[", "'consumerActionId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'consumer_action_id'", ",", "consumer_action_id", ",", "'str'", ")", "query_parameters", "=", "{", "}", "if", "publisher_id", "is", "not", "None", ":", "query_parameters", "[", "'publisherId'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'publisher_id'", ",", "publisher_id", ",", "'str'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'c3428e90-7a69-4194-8ed8-0f153185ee0d'", ",", "version", "=", "'5.0'", ",", "route_values", "=", "route_values", ",", "query_parameters", "=", "query_parameters", ")", "return", "self", ".", "_deserialize", "(", "'ConsumerAction'", ",", "response", ")" ]
GetConsumerAction. Get details about a specific consumer action. :param str consumer_id: ID for a consumer. :param str consumer_action_id: ID for a consumerActionId. :param str publisher_id: :rtype: :class:`<ConsumerAction> <azure.devops.v5_0.service_hooks.models.ConsumerAction>`
[ "GetConsumerAction", ".", "Get", "details", "about", "a", "specific", "consumer", "action", ".", ":", "param", "str", "consumer_id", ":", "ID", "for", "a", "consumer", ".", ":", "param", "str", "consumer_action_id", ":", "ID", "for", "a", "consumerActionId", ".", ":", "param", "str", "publisher_id", ":", ":", "rtype", ":", ":", "class", ":", "<ConsumerAction", ">", "<azure", ".", "devops", ".", "v5_0", ".", "service_hooks", ".", "models", ".", "ConsumerAction", ">" ]
python
train
senaite/senaite.core
bika/lims/browser/worksheet/views/add_analyses.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/worksheet/views/add_analyses.py#L153-L168
def handle_submit(self): """Handle form submission """ wst_uid = self.request.form.get("getWorksheetTemplate") if not wst_uid: return False layout = self.context.getLayout() wst = api.get_object_by_uid(wst_uid) self.request["context_uid"] = api.get_uid(self.context) self.context.applyWorksheetTemplate(wst) if len(self.context.getLayout()) == len(layout): return False return True
[ "def", "handle_submit", "(", "self", ")", ":", "wst_uid", "=", "self", ".", "request", ".", "form", ".", "get", "(", "\"getWorksheetTemplate\"", ")", "if", "not", "wst_uid", ":", "return", "False", "layout", "=", "self", ".", "context", ".", "getLayout", "(", ")", "wst", "=", "api", ".", "get_object_by_uid", "(", "wst_uid", ")", "self", ".", "request", "[", "\"context_uid\"", "]", "=", "api", ".", "get_uid", "(", "self", ".", "context", ")", "self", ".", "context", ".", "applyWorksheetTemplate", "(", "wst", ")", "if", "len", "(", "self", ".", "context", ".", "getLayout", "(", ")", ")", "==", "len", "(", "layout", ")", ":", "return", "False", "return", "True" ]
Handle form submission
[ "Handle", "form", "submission" ]
python
train
dls-controls/pymalcolm
malcolm/modules/pmac/infos.py
https://github.com/dls-controls/pymalcolm/blob/80ea667e4da26365a6cebc0249f52fdc744bd983/malcolm/modules/pmac/infos.py#L83-L101
def _make_padded_ramp(self, v1, v2, pad_velocity, total_time): """Makes a ramp that looks like this: v1 \______ pad_velocity | |\ | | \v2 t1 tp t2 Such that whole section takes total_time """ # The time taken to ramp from v1 to pad_velocity t1 = self.acceleration_time(v1, pad_velocity) # Then on to v2 t2 = self.acceleration_time(pad_velocity, v2) # The distance during the pad tp = total_time - t1 - t2 # Yield the points yield t1, pad_velocity yield tp, pad_velocity yield t2, v2
[ "def", "_make_padded_ramp", "(", "self", ",", "v1", ",", "v2", ",", "pad_velocity", ",", "total_time", ")", ":", "# The time taken to ramp from v1 to pad_velocity", "t1", "=", "self", ".", "acceleration_time", "(", "v1", ",", "pad_velocity", ")", "# Then on to v2", "t2", "=", "self", ".", "acceleration_time", "(", "pad_velocity", ",", "v2", ")", "# The distance during the pad", "tp", "=", "total_time", "-", "t1", "-", "t2", "# Yield the points", "yield", "t1", ",", "pad_velocity", "yield", "tp", ",", "pad_velocity", "yield", "t2", ",", "v2" ]
Makes a ramp that looks like this: v1 \______ pad_velocity | |\ | | \v2 t1 tp t2 Such that whole section takes total_time
[ "Makes", "a", "ramp", "that", "looks", "like", "this", ":" ]
python
train
Julian/jsonschema
docs/jsonschema_role.py
https://github.com/Julian/jsonschema/blob/a72332004cdc3ba456de7918bc32059822b2f69a/docs/jsonschema_role.py#L43-L75
def fetch_or_load(spec_path): """ Fetch a new specification or use the cache if it's current. Arguments: cache_path: the path to a cached specification """ headers = {} try: modified = datetime.utcfromtimestamp(os.path.getmtime(spec_path)) date = modified.strftime("%a, %d %b %Y %I:%M:%S UTC") headers["If-Modified-Since"] = date except OSError as error: if error.errno != errno.ENOENT: raise request = urllib.Request(VALIDATION_SPEC, headers=headers) response = urllib.urlopen(request, cafile=certifi.where()) if response.code == 200: with open(spec_path, "w+b") as spec: spec.writelines(response) spec.seek(0) return html.parse(spec) with open(spec_path) as spec: return html.parse(spec)
[ "def", "fetch_or_load", "(", "spec_path", ")", ":", "headers", "=", "{", "}", "try", ":", "modified", "=", "datetime", ".", "utcfromtimestamp", "(", "os", ".", "path", ".", "getmtime", "(", "spec_path", ")", ")", "date", "=", "modified", ".", "strftime", "(", "\"%a, %d %b %Y %I:%M:%S UTC\"", ")", "headers", "[", "\"If-Modified-Since\"", "]", "=", "date", "except", "OSError", "as", "error", ":", "if", "error", ".", "errno", "!=", "errno", ".", "ENOENT", ":", "raise", "request", "=", "urllib", ".", "Request", "(", "VALIDATION_SPEC", ",", "headers", "=", "headers", ")", "response", "=", "urllib", ".", "urlopen", "(", "request", ",", "cafile", "=", "certifi", ".", "where", "(", ")", ")", "if", "response", ".", "code", "==", "200", ":", "with", "open", "(", "spec_path", ",", "\"w+b\"", ")", "as", "spec", ":", "spec", ".", "writelines", "(", "response", ")", "spec", ".", "seek", "(", "0", ")", "return", "html", ".", "parse", "(", "spec", ")", "with", "open", "(", "spec_path", ")", "as", "spec", ":", "return", "html", ".", "parse", "(", "spec", ")" ]
Fetch a new specification or use the cache if it's current. Arguments: cache_path: the path to a cached specification
[ "Fetch", "a", "new", "specification", "or", "use", "the", "cache", "if", "it", "s", "current", "." ]
python
train
MisterWil/abodepy
abodepy/devices/switch.py
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/switch.py#L10-L17
def switch_on(self): """Turn the switch on.""" success = self.set_status(CONST.STATUS_ON_INT) if success: self._json_state['status'] = CONST.STATUS_ON return success
[ "def", "switch_on", "(", "self", ")", ":", "success", "=", "self", ".", "set_status", "(", "CONST", ".", "STATUS_ON_INT", ")", "if", "success", ":", "self", ".", "_json_state", "[", "'status'", "]", "=", "CONST", ".", "STATUS_ON", "return", "success" ]
Turn the switch on.
[ "Turn", "the", "switch", "on", "." ]
python
train
quantopian/zipline
zipline/pipeline/expression.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/expression.py#L213-L236
def _validate(self): """ Ensure that our expression string has variables of the form x_0, x_1, ... x_(N - 1), where N is the length of our inputs. """ variable_names, _unused = getExprNames(self._expr, {}) expr_indices = [] for name in variable_names: if name == 'inf': continue match = _VARIABLE_NAME_RE.match(name) if not match: raise ValueError("%r is not a valid variable name" % name) expr_indices.append(int(match.group(2))) expr_indices.sort() expected_indices = list(range(len(self.inputs))) if expr_indices != expected_indices: raise ValueError( "Expected %s for variable indices, but got %s" % ( expected_indices, expr_indices, ) ) super(NumericalExpression, self)._validate()
[ "def", "_validate", "(", "self", ")", ":", "variable_names", ",", "_unused", "=", "getExprNames", "(", "self", ".", "_expr", ",", "{", "}", ")", "expr_indices", "=", "[", "]", "for", "name", "in", "variable_names", ":", "if", "name", "==", "'inf'", ":", "continue", "match", "=", "_VARIABLE_NAME_RE", ".", "match", "(", "name", ")", "if", "not", "match", ":", "raise", "ValueError", "(", "\"%r is not a valid variable name\"", "%", "name", ")", "expr_indices", ".", "append", "(", "int", "(", "match", ".", "group", "(", "2", ")", ")", ")", "expr_indices", ".", "sort", "(", ")", "expected_indices", "=", "list", "(", "range", "(", "len", "(", "self", ".", "inputs", ")", ")", ")", "if", "expr_indices", "!=", "expected_indices", ":", "raise", "ValueError", "(", "\"Expected %s for variable indices, but got %s\"", "%", "(", "expected_indices", ",", "expr_indices", ",", ")", ")", "super", "(", "NumericalExpression", ",", "self", ")", ".", "_validate", "(", ")" ]
Ensure that our expression string has variables of the form x_0, x_1, ... x_(N - 1), where N is the length of our inputs.
[ "Ensure", "that", "our", "expression", "string", "has", "variables", "of", "the", "form", "x_0", "x_1", "...", "x_", "(", "N", "-", "1", ")", "where", "N", "is", "the", "length", "of", "our", "inputs", "." ]
python
train
danilobellini/audiolazy
audiolazy/lazy_stream.py
https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/audiolazy/lazy_stream.py#L290-L297
def copy(self): """ Returns a "T" (tee) copy of the given stream, allowing the calling stream to continue being used. """ a, b = it.tee(self._data) # 2 generators, not thread-safe self._data = a return Stream(b)
[ "def", "copy", "(", "self", ")", ":", "a", ",", "b", "=", "it", ".", "tee", "(", "self", ".", "_data", ")", "# 2 generators, not thread-safe", "self", ".", "_data", "=", "a", "return", "Stream", "(", "b", ")" ]
Returns a "T" (tee) copy of the given stream, allowing the calling stream to continue being used.
[ "Returns", "a", "T", "(", "tee", ")", "copy", "of", "the", "given", "stream", "allowing", "the", "calling", "stream", "to", "continue", "being", "used", "." ]
python
train
DomainTools/python_api
domaintools/api.py
https://github.com/DomainTools/python_api/blob/17be85fd4913fbe14d7660a4f4829242f1663e60/domaintools/api.py#L10-L12
def delimited(items, character='|'): """Returns a character delimited version of the provided list as a Python string""" return '|'.join(items) if type(items) in (list, tuple, set) else items
[ "def", "delimited", "(", "items", ",", "character", "=", "'|'", ")", ":", "return", "'|'", ".", "join", "(", "items", ")", "if", "type", "(", "items", ")", "in", "(", "list", ",", "tuple", ",", "set", ")", "else", "items" ]
Returns a character delimited version of the provided list as a Python string
[ "Returns", "a", "character", "delimited", "version", "of", "the", "provided", "list", "as", "a", "Python", "string" ]
python
train
yougov/openpack
openpack/basepack.py
https://github.com/yougov/openpack/blob/1412ec34c1bab6ba6c8ae5490c2205d696f13717/openpack/basepack.py#L46-L52
def related(self, reltype): """Return a list of parts related to this one via reltype.""" parts = [] package = getattr(self, 'package', None) or self for rel in self.relationships.types.get(reltype, []): parts.append(package[posixpath.join(self.base, rel.target)]) return parts
[ "def", "related", "(", "self", ",", "reltype", ")", ":", "parts", "=", "[", "]", "package", "=", "getattr", "(", "self", ",", "'package'", ",", "None", ")", "or", "self", "for", "rel", "in", "self", ".", "relationships", ".", "types", ".", "get", "(", "reltype", ",", "[", "]", ")", ":", "parts", ".", "append", "(", "package", "[", "posixpath", ".", "join", "(", "self", ".", "base", ",", "rel", ".", "target", ")", "]", ")", "return", "parts" ]
Return a list of parts related to this one via reltype.
[ "Return", "a", "list", "of", "parts", "related", "to", "this", "one", "via", "reltype", "." ]
python
test
Microsoft/nni
src/sdk/pynni/nni/networkmorphism_tuner/bayesian.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/networkmorphism_tuner/bayesian.py#L243-L266
def edit_distance_matrix(train_x, train_y=None): """Calculate the edit distance. Args: train_x: A list of neural architectures. train_y: A list of neural architectures. Returns: An edit-distance matrix. """ if train_y is None: ret = np.zeros((train_x.shape[0], train_x.shape[0])) for x_index, x in enumerate(train_x): for y_index, y in enumerate(train_x): if x_index == y_index: ret[x_index][y_index] = 0 elif x_index < y_index: ret[x_index][y_index] = edit_distance(x, y) else: ret[x_index][y_index] = ret[y_index][x_index] return ret ret = np.zeros((train_x.shape[0], train_y.shape[0])) for x_index, x in enumerate(train_x): for y_index, y in enumerate(train_y): ret[x_index][y_index] = edit_distance(x, y) return ret
[ "def", "edit_distance_matrix", "(", "train_x", ",", "train_y", "=", "None", ")", ":", "if", "train_y", "is", "None", ":", "ret", "=", "np", ".", "zeros", "(", "(", "train_x", ".", "shape", "[", "0", "]", ",", "train_x", ".", "shape", "[", "0", "]", ")", ")", "for", "x_index", ",", "x", "in", "enumerate", "(", "train_x", ")", ":", "for", "y_index", ",", "y", "in", "enumerate", "(", "train_x", ")", ":", "if", "x_index", "==", "y_index", ":", "ret", "[", "x_index", "]", "[", "y_index", "]", "=", "0", "elif", "x_index", "<", "y_index", ":", "ret", "[", "x_index", "]", "[", "y_index", "]", "=", "edit_distance", "(", "x", ",", "y", ")", "else", ":", "ret", "[", "x_index", "]", "[", "y_index", "]", "=", "ret", "[", "y_index", "]", "[", "x_index", "]", "return", "ret", "ret", "=", "np", ".", "zeros", "(", "(", "train_x", ".", "shape", "[", "0", "]", ",", "train_y", ".", "shape", "[", "0", "]", ")", ")", "for", "x_index", ",", "x", "in", "enumerate", "(", "train_x", ")", ":", "for", "y_index", ",", "y", "in", "enumerate", "(", "train_y", ")", ":", "ret", "[", "x_index", "]", "[", "y_index", "]", "=", "edit_distance", "(", "x", ",", "y", ")", "return", "ret" ]
Calculate the edit distance. Args: train_x: A list of neural architectures. train_y: A list of neural architectures. Returns: An edit-distance matrix.
[ "Calculate", "the", "edit", "distance", ".", "Args", ":", "train_x", ":", "A", "list", "of", "neural", "architectures", ".", "train_y", ":", "A", "list", "of", "neural", "architectures", ".", "Returns", ":", "An", "edit", "-", "distance", "matrix", "." ]
python
train
mitsei/dlkit
dlkit/json_/osid/searches.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/osid/searches.py#L65-L91
def limit_result_set(self, start, end): """By default, searches return all matching results. This method restricts the number of results by setting the start and end of the result set, starting from 1. The starting and ending results can be used for paging results when a certain ordering is requested. The ending position must be greater than the starting position. arg: start (cardinal): the start of the result set arg: end (cardinal): the end of the result set raise: InvalidArgument - ``end`` is less than or equal to ``start`` *compliance: mandatory -- This method must be implemented.* """ if not isinstance(start, int) or not isinstance(end, int): raise errors.InvalidArgument('start and end arguments must be integers.') if end <= start: raise errors.InvalidArgument('End must be greater than start.') # because Python is 0 indexed # Spec says that passing in (1, 25) should include 25 entries (1 - 25) # Python indices 0 - 24 # Python [#:##] stops before the last index, but does not include it self._limit_result_set_start = start - 1 self._limit_result_set_end = end
[ "def", "limit_result_set", "(", "self", ",", "start", ",", "end", ")", ":", "if", "not", "isinstance", "(", "start", ",", "int", ")", "or", "not", "isinstance", "(", "end", ",", "int", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "'start and end arguments must be integers.'", ")", "if", "end", "<=", "start", ":", "raise", "errors", ".", "InvalidArgument", "(", "'End must be greater than start.'", ")", "# because Python is 0 indexed", "# Spec says that passing in (1, 25) should include 25 entries (1 - 25)", "# Python indices 0 - 24", "# Python [#:##] stops before the last index, but does not include it", "self", ".", "_limit_result_set_start", "=", "start", "-", "1", "self", ".", "_limit_result_set_end", "=", "end" ]
By default, searches return all matching results. This method restricts the number of results by setting the start and end of the result set, starting from 1. The starting and ending results can be used for paging results when a certain ordering is requested. The ending position must be greater than the starting position. arg: start (cardinal): the start of the result set arg: end (cardinal): the end of the result set raise: InvalidArgument - ``end`` is less than or equal to ``start`` *compliance: mandatory -- This method must be implemented.*
[ "By", "default", "searches", "return", "all", "matching", "results", "." ]
python
train
watson-developer-cloud/python-sdk
ibm_watson/visual_recognition_v3.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/visual_recognition_v3.py#L1596-L1607
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'width') and self.width is not None: _dict['width'] = self.width if hasattr(self, 'height') and self.height is not None: _dict['height'] = self.height if hasattr(self, 'left') and self.left is not None: _dict['left'] = self.left if hasattr(self, 'top') and self.top is not None: _dict['top'] = self.top return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'width'", ")", "and", "self", ".", "width", "is", "not", "None", ":", "_dict", "[", "'width'", "]", "=", "self", ".", "width", "if", "hasattr", "(", "self", ",", "'height'", ")", "and", "self", ".", "height", "is", "not", "None", ":", "_dict", "[", "'height'", "]", "=", "self", ".", "height", "if", "hasattr", "(", "self", ",", "'left'", ")", "and", "self", ".", "left", "is", "not", "None", ":", "_dict", "[", "'left'", "]", "=", "self", ".", "left", "if", "hasattr", "(", "self", ",", "'top'", ")", "and", "self", ".", "top", "is", "not", "None", ":", "_dict", "[", "'top'", "]", "=", "self", ".", "top", "return", "_dict" ]
Return a json dictionary representing this model.
[ "Return", "a", "json", "dictionary", "representing", "this", "model", "." ]
python
train
pbacterio/dj-paas-env
dj_paas_env/database.py
https://github.com/pbacterio/dj-paas-env/blob/1f86bf0a0253cac9eae2442b1e3a8643426077b4/dj_paas_env/database.py#L70-L81
def data_dir(default='data'): """ Return persistent data directory or ``default`` if not found Warning: Do not use this directory to store sqlite databases in producction """ if 'OPENSHIFT_DATA_DIR' in os.environ: return os.environ['OPENSHIFT_DATA_DIR'] if 'GONDOR_DATA_DIR' in os.environ: return os.environ['GONDOR_DATA_DIR'] if provider.detect() == provider.DOTCLOUD: return os.path.expanduser('~/data') return default
[ "def", "data_dir", "(", "default", "=", "'data'", ")", ":", "if", "'OPENSHIFT_DATA_DIR'", "in", "os", ".", "environ", ":", "return", "os", ".", "environ", "[", "'OPENSHIFT_DATA_DIR'", "]", "if", "'GONDOR_DATA_DIR'", "in", "os", ".", "environ", ":", "return", "os", ".", "environ", "[", "'GONDOR_DATA_DIR'", "]", "if", "provider", ".", "detect", "(", ")", "==", "provider", ".", "DOTCLOUD", ":", "return", "os", ".", "path", ".", "expanduser", "(", "'~/data'", ")", "return", "default" ]
Return persistent data directory or ``default`` if not found Warning: Do not use this directory to store sqlite databases in producction
[ "Return", "persistent", "data", "directory", "or", "default", "if", "not", "found", "Warning", ":", "Do", "not", "use", "this", "directory", "to", "store", "sqlite", "databases", "in", "producction" ]
python
train
awslabs/serverless-application-model
samtranslator/intrinsics/actions.py
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/intrinsics/actions.py#L41-L52
def can_handle(self, input_dict): """ Validates that the input dictionary contains only one key and is of the given intrinsic_name :param input_dict: Input dictionary representing the intrinsic function :return: True if it matches expected structure, False otherwise """ return input_dict is not None \ and isinstance(input_dict, dict) \ and len(input_dict) == 1 \ and self.intrinsic_name in input_dict
[ "def", "can_handle", "(", "self", ",", "input_dict", ")", ":", "return", "input_dict", "is", "not", "None", "and", "isinstance", "(", "input_dict", ",", "dict", ")", "and", "len", "(", "input_dict", ")", "==", "1", "and", "self", ".", "intrinsic_name", "in", "input_dict" ]
Validates that the input dictionary contains only one key and is of the given intrinsic_name :param input_dict: Input dictionary representing the intrinsic function :return: True if it matches expected structure, False otherwise
[ "Validates", "that", "the", "input", "dictionary", "contains", "only", "one", "key", "and", "is", "of", "the", "given", "intrinsic_name" ]
python
train
bxlab/bx-python
lib/bx/align/epo.py
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/align/epo.py#L249-L278
def intervals(self, reverse, thr=0): """return a list of (0-based half-open) intervals representing the match regions of the cigar for example 4MD4M2DM with reverse=False will produce [(0,4), (5,9), (11,12)] 4MD4M2DM with reverse=True will produce [(0,1), (3,7), (8,12)] (= 12 - previous interval) :param reverse: whether to iterate in the reverse direction (right-to-left) (this is passed as is to self.cigar_iter) :type reverse: boolean :param thr: shift all intervals by this much :type thr: integer :return: list of pairs""" d = [(thr,thr)] dl = 0 for tup in self.cigar_iter(reverse): if tup[1] == "D": dl = tup[0] else: s = d[-1][1] + dl d.append( (s, s+tup[0]) ) assert d[0] == (thr, thr) # assert that nr. of Ms in the interval == sum of produced intervals assert sum( t[0] for t in self.cigar_iter(False) if t[1] == "M" ) == sum( t[1]-t[0] for t in d ) d_sum = sum( t[1]-t[0] for t in d ) assert self.end - self.start + 1 == d_sum, "[ (%d, %d) = %d ] != %d" % (self.start, self.end, self.end-self.start+1, d_sum) return d[1:]
[ "def", "intervals", "(", "self", ",", "reverse", ",", "thr", "=", "0", ")", ":", "d", "=", "[", "(", "thr", ",", "thr", ")", "]", "dl", "=", "0", "for", "tup", "in", "self", ".", "cigar_iter", "(", "reverse", ")", ":", "if", "tup", "[", "1", "]", "==", "\"D\"", ":", "dl", "=", "tup", "[", "0", "]", "else", ":", "s", "=", "d", "[", "-", "1", "]", "[", "1", "]", "+", "dl", "d", ".", "append", "(", "(", "s", ",", "s", "+", "tup", "[", "0", "]", ")", ")", "assert", "d", "[", "0", "]", "==", "(", "thr", ",", "thr", ")", "# assert that nr. of Ms in the interval == sum of produced intervals", "assert", "sum", "(", "t", "[", "0", "]", "for", "t", "in", "self", ".", "cigar_iter", "(", "False", ")", "if", "t", "[", "1", "]", "==", "\"M\"", ")", "==", "sum", "(", "t", "[", "1", "]", "-", "t", "[", "0", "]", "for", "t", "in", "d", ")", "d_sum", "=", "sum", "(", "t", "[", "1", "]", "-", "t", "[", "0", "]", "for", "t", "in", "d", ")", "assert", "self", ".", "end", "-", "self", ".", "start", "+", "1", "==", "d_sum", ",", "\"[ (%d, %d) = %d ] != %d\"", "%", "(", "self", ".", "start", ",", "self", ".", "end", ",", "self", ".", "end", "-", "self", ".", "start", "+", "1", ",", "d_sum", ")", "return", "d", "[", "1", ":", "]" ]
return a list of (0-based half-open) intervals representing the match regions of the cigar for example 4MD4M2DM with reverse=False will produce [(0,4), (5,9), (11,12)] 4MD4M2DM with reverse=True will produce [(0,1), (3,7), (8,12)] (= 12 - previous interval) :param reverse: whether to iterate in the reverse direction (right-to-left) (this is passed as is to self.cigar_iter) :type reverse: boolean :param thr: shift all intervals by this much :type thr: integer :return: list of pairs
[ "return", "a", "list", "of", "(", "0", "-", "based", "half", "-", "open", ")", "intervals", "representing", "the", "match", "regions", "of", "the", "cigar" ]
python
train
django-treebeard/django-treebeard
treebeard/mp_tree.py
https://github.com/django-treebeard/django-treebeard/blob/8042ee939cb45394909237da447f8925e3cc6aa3/treebeard/mp_tree.py#L965-L973
def get_prev_sibling(self): """ :returns: The previous node's sibling, or None if it was the leftmost sibling. """ try: return self.get_siblings().filter(path__lt=self.path).reverse()[0] except IndexError: return None
[ "def", "get_prev_sibling", "(", "self", ")", ":", "try", ":", "return", "self", ".", "get_siblings", "(", ")", ".", "filter", "(", "path__lt", "=", "self", ".", "path", ")", ".", "reverse", "(", ")", "[", "0", "]", "except", "IndexError", ":", "return", "None" ]
:returns: The previous node's sibling, or None if it was the leftmost sibling.
[ ":", "returns", ":", "The", "previous", "node", "s", "sibling", "or", "None", "if", "it", "was", "the", "leftmost", "sibling", "." ]
python
train
yfpeng/bioc
bioc/biocjson/decoder.py
https://github.com/yfpeng/bioc/blob/47ddaa010960d9ba673aefe068e7bbaf39f0fff4/bioc/biocjson/decoder.py#L90-L103
def load(fp, **kwargs) -> BioCCollection: """ Deserialize fp (a .read()-supporting text file or binary file containing a JSON document) to a BioCCollection object Args: fp: a file containing a JSON document **kwargs: Returns: BioCCollection: a collection """ obj = json.load(fp, **kwargs) return parse_collection(obj)
[ "def", "load", "(", "fp", ",", "*", "*", "kwargs", ")", "->", "BioCCollection", ":", "obj", "=", "json", ".", "load", "(", "fp", ",", "*", "*", "kwargs", ")", "return", "parse_collection", "(", "obj", ")" ]
Deserialize fp (a .read()-supporting text file or binary file containing a JSON document) to a BioCCollection object Args: fp: a file containing a JSON document **kwargs: Returns: BioCCollection: a collection
[ "Deserialize", "fp", "(", "a", ".", "read", "()", "-", "supporting", "text", "file", "or", "binary", "file", "containing", "a", "JSON", "document", ")", "to", "a", "BioCCollection", "object", "Args", ":", "fp", ":", "a", "file", "containing", "a", "JSON", "document", "**", "kwargs", ":", "Returns", ":", "BioCCollection", ":", "a", "collection" ]
python
train
yunpian/yunpian-python-sdk
yunpian_python_sdk/ypclient.py
https://github.com/yunpian/yunpian-python-sdk/blob/405a1196ec83fdf29ff454f74ef036974be11970/yunpian_python_sdk/ypclient.py#L85-L105
def api(self, name): '''return special API by package's name''' assert name, 'name is none' if flow.__name__ == name: api = flow.FlowApi() elif sign.__name__ == name: api = sign.SignApi() elif sms.__name__ == name: api = sms.SmsApi() elif tpl.__name__ == name: api = tpl.TplApi() elif user.__name__ == name: api = user.UserApi() elif voice.__name__ == name: api = voice.VoiceApi() assert api, "not found api-" + name api._init(self._clnt) return api
[ "def", "api", "(", "self", ",", "name", ")", ":", "assert", "name", ",", "'name is none'", "if", "flow", ".", "__name__", "==", "name", ":", "api", "=", "flow", ".", "FlowApi", "(", ")", "elif", "sign", ".", "__name__", "==", "name", ":", "api", "=", "sign", ".", "SignApi", "(", ")", "elif", "sms", ".", "__name__", "==", "name", ":", "api", "=", "sms", ".", "SmsApi", "(", ")", "elif", "tpl", ".", "__name__", "==", "name", ":", "api", "=", "tpl", ".", "TplApi", "(", ")", "elif", "user", ".", "__name__", "==", "name", ":", "api", "=", "user", ".", "UserApi", "(", ")", "elif", "voice", ".", "__name__", "==", "name", ":", "api", "=", "voice", ".", "VoiceApi", "(", ")", "assert", "api", ",", "\"not found api-\"", "+", "name", "api", ".", "_init", "(", "self", ".", "_clnt", ")", "return", "api" ]
return special API by package's name
[ "return", "special", "API", "by", "package", "s", "name" ]
python
train
ggaughan/pipe2py
pipe2py/modules/pipeitembuilder.py
https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipeitembuilder.py#L23-L49
def asyncPipeItembuilder(context=None, _INPUT=None, conf=None, **kwargs): """A source that asynchronously builds an item. Loopable. Parameters ---------- context : pipe2py.Context object _INPUT : asyncPipe like object (twisted Deferred iterable of items) conf : { 'attrs': [ {'key': {'value': 'title'}, 'value': {'value': 'new title'}}, {'key': {'value': 'desc.content'}, 'value': {'value': 'new desc'}} ] } Returns ------ _OUTPUT : twisted.internet.defer.Deferred generator of items """ pkwargs = cdicts(opts, kwargs) asyncFuncs = yield asyncGetSplits(None, conf['attrs'], **pkwargs) _input = yield _INPUT finite = utils.finitize(_input) inputs = imap(DotDict, finite) pieces = yield asyncImap(asyncFuncs[0], inputs) results = imap(utils.parse_params, pieces) _OUTPUT = imap(DotDict, results) returnValue(_OUTPUT)
[ "def", "asyncPipeItembuilder", "(", "context", "=", "None", ",", "_INPUT", "=", "None", ",", "conf", "=", "None", ",", "*", "*", "kwargs", ")", ":", "pkwargs", "=", "cdicts", "(", "opts", ",", "kwargs", ")", "asyncFuncs", "=", "yield", "asyncGetSplits", "(", "None", ",", "conf", "[", "'attrs'", "]", ",", "*", "*", "pkwargs", ")", "_input", "=", "yield", "_INPUT", "finite", "=", "utils", ".", "finitize", "(", "_input", ")", "inputs", "=", "imap", "(", "DotDict", ",", "finite", ")", "pieces", "=", "yield", "asyncImap", "(", "asyncFuncs", "[", "0", "]", ",", "inputs", ")", "results", "=", "imap", "(", "utils", ".", "parse_params", ",", "pieces", ")", "_OUTPUT", "=", "imap", "(", "DotDict", ",", "results", ")", "returnValue", "(", "_OUTPUT", ")" ]
A source that asynchronously builds an item. Loopable. Parameters ---------- context : pipe2py.Context object _INPUT : asyncPipe like object (twisted Deferred iterable of items) conf : { 'attrs': [ {'key': {'value': 'title'}, 'value': {'value': 'new title'}}, {'key': {'value': 'desc.content'}, 'value': {'value': 'new desc'}} ] } Returns ------ _OUTPUT : twisted.internet.defer.Deferred generator of items
[ "A", "source", "that", "asynchronously", "builds", "an", "item", ".", "Loopable", "." ]
python
train
MacHu-GWU/angora-project
angora/bot/macro.py
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/bot/macro.py#L187-L191
def Left(self, n = 1, dl = 0): """左方向键n次 """ self.Delay(dl) self.keyboard.tap_key(self.keyboard.left_key, n)
[ "def", "Left", "(", "self", ",", "n", "=", "1", ",", "dl", "=", "0", ")", ":", "self", ".", "Delay", "(", "dl", ")", "self", ".", "keyboard", ".", "tap_key", "(", "self", ".", "keyboard", ".", "left_key", ",", "n", ")" ]
左方向键n次
[ "左方向键n次" ]
python
train
pybel/pybel-artifactory
src/pybel_artifactory/deploy.py
https://github.com/pybel/pybel-artifactory/blob/720107780a59be2ef08885290dfa519b1da62871/src/pybel_artifactory/deploy.py#L93-L111
def deploy_annotation(filename, module_name, hash_check=True, auth=None): """Deploy a file to the Artifactory BEL annotation cache. :param str filename: The physical file path :param str module_name: The name of the module to deploy to :param bool hash_check: Ensure the hash is unique before deploying :param tuple[str] auth: A pair of (str username, str password) to give to the auth keyword of the constructor of :class:`artifactory.ArtifactoryPath`. Defaults to the result of :func:`get_arty_auth`. :return: The resource path, if it was deployed successfully, else none. :rtype: Optional[str] """ return _deploy_helper( filename, module_name, get_annotation_module_url, get_annotation_today, hash_check=hash_check, auth=auth )
[ "def", "deploy_annotation", "(", "filename", ",", "module_name", ",", "hash_check", "=", "True", ",", "auth", "=", "None", ")", ":", "return", "_deploy_helper", "(", "filename", ",", "module_name", ",", "get_annotation_module_url", ",", "get_annotation_today", ",", "hash_check", "=", "hash_check", ",", "auth", "=", "auth", ")" ]
Deploy a file to the Artifactory BEL annotation cache. :param str filename: The physical file path :param str module_name: The name of the module to deploy to :param bool hash_check: Ensure the hash is unique before deploying :param tuple[str] auth: A pair of (str username, str password) to give to the auth keyword of the constructor of :class:`artifactory.ArtifactoryPath`. Defaults to the result of :func:`get_arty_auth`. :return: The resource path, if it was deployed successfully, else none. :rtype: Optional[str]
[ "Deploy", "a", "file", "to", "the", "Artifactory", "BEL", "annotation", "cache", "." ]
python
train