repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
junzis/pyModeS
pyModeS/decoder/bds/bds60.py
https://github.com/junzis/pyModeS/blob/8cd5655a04b08171a9ad5f1ffd232b7e0178ea53/pyModeS/decoder/bds/bds60.py#L104-L119
def ias60(msg): """Indicated airspeed Args: msg (String): 28 bytes hexadecimal message (BDS60) string Returns: int: indicated airspeed in knots """ d = hex2bin(data(msg)) if d[12] == '0': return None ias = bin2int(d[13:23]) # kts return ias
[ "def", "ias60", "(", "msg", ")", ":", "d", "=", "hex2bin", "(", "data", "(", "msg", ")", ")", "if", "d", "[", "12", "]", "==", "'0'", ":", "return", "None", "ias", "=", "bin2int", "(", "d", "[", "13", ":", "23", "]", ")", "# kts", "return", "ias" ]
Indicated airspeed Args: msg (String): 28 bytes hexadecimal message (BDS60) string Returns: int: indicated airspeed in knots
[ "Indicated", "airspeed" ]
python
train
h2oai/h2o-3
scripts/extractGLRMRuntimeJavaLog.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/scripts/extractGLRMRuntimeJavaLog.py#L115-L132
def main(argv): """ Main program. Take user input, parse it and call other functions to execute the commands and extract run summary and store run result in json file @return: none """ global g_test_root_dir global g_temp_filename if len(argv) < 2: print("invoke this script as python extractGLRMRuntimeJavaLog.py javatextlog.\n") sys.exit(1) else: # we may be in business javaLogText = argv[1] # filename while java log is stored print("your java text is {0}".format(javaLogText)) extractRunInto(javaLogText)
[ "def", "main", "(", "argv", ")", ":", "global", "g_test_root_dir", "global", "g_temp_filename", "if", "len", "(", "argv", ")", "<", "2", ":", "print", "(", "\"invoke this script as python extractGLRMRuntimeJavaLog.py javatextlog.\\n\"", ")", "sys", ".", "exit", "(", "1", ")", "else", ":", "# we may be in business", "javaLogText", "=", "argv", "[", "1", "]", "# filename while java log is stored", "print", "(", "\"your java text is {0}\"", ".", "format", "(", "javaLogText", ")", ")", "extractRunInto", "(", "javaLogText", ")" ]
Main program. Take user input, parse it and call other functions to execute the commands and extract run summary and store run result in json file @return: none
[ "Main", "program", ".", "Take", "user", "input", "parse", "it", "and", "call", "other", "functions", "to", "execute", "the", "commands", "and", "extract", "run", "summary", "and", "store", "run", "result", "in", "json", "file" ]
python
test
nats-io/python-nats
nats/io/client.py
https://github.com/nats-io/python-nats/blob/4a409319c409e7e55ce8377b64b406375c5f455b/nats/io/client.py#L983-L1009
def _process_info(self, info_line): """ Process INFO lines sent by the server to reconfigure client with latest updates from cluster to enable server discovery. """ info = tornado.escape.json_decode(info_line.decode()) if 'connect_urls' in info: if info['connect_urls']: connect_urls = [] for connect_url in info['connect_urls']: uri = urlparse("nats://%s" % connect_url) srv = Srv(uri) srv.discovered = True # Filter for any similar server in the server pool already. should_add = True for s in self._server_pool: if uri.netloc == s.uri.netloc: should_add = False if should_add: connect_urls.append(srv) if self.options["dont_randomize"] is not True: shuffle(connect_urls) for srv in connect_urls: self._server_pool.append(srv)
[ "def", "_process_info", "(", "self", ",", "info_line", ")", ":", "info", "=", "tornado", ".", "escape", ".", "json_decode", "(", "info_line", ".", "decode", "(", ")", ")", "if", "'connect_urls'", "in", "info", ":", "if", "info", "[", "'connect_urls'", "]", ":", "connect_urls", "=", "[", "]", "for", "connect_url", "in", "info", "[", "'connect_urls'", "]", ":", "uri", "=", "urlparse", "(", "\"nats://%s\"", "%", "connect_url", ")", "srv", "=", "Srv", "(", "uri", ")", "srv", ".", "discovered", "=", "True", "# Filter for any similar server in the server pool already.", "should_add", "=", "True", "for", "s", "in", "self", ".", "_server_pool", ":", "if", "uri", ".", "netloc", "==", "s", ".", "uri", ".", "netloc", ":", "should_add", "=", "False", "if", "should_add", ":", "connect_urls", ".", "append", "(", "srv", ")", "if", "self", ".", "options", "[", "\"dont_randomize\"", "]", "is", "not", "True", ":", "shuffle", "(", "connect_urls", ")", "for", "srv", "in", "connect_urls", ":", "self", ".", "_server_pool", ".", "append", "(", "srv", ")" ]
Process INFO lines sent by the server to reconfigure client with latest updates from cluster to enable server discovery.
[ "Process", "INFO", "lines", "sent", "by", "the", "server", "to", "reconfigure", "client", "with", "latest", "updates", "from", "cluster", "to", "enable", "server", "discovery", "." ]
python
train
stevearc/dql
dql/expressions/selection.py
https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/expressions/selection.py#L52-L61
def div(a, b): """ Divide two values, ignoring None """ if a is None: if b is None: return None else: return 1 / b elif b is None: return a return a / b
[ "def", "div", "(", "a", ",", "b", ")", ":", "if", "a", "is", "None", ":", "if", "b", "is", "None", ":", "return", "None", "else", ":", "return", "1", "/", "b", "elif", "b", "is", "None", ":", "return", "a", "return", "a", "/", "b" ]
Divide two values, ignoring None
[ "Divide", "two", "values", "ignoring", "None" ]
python
train
intelsdi-x/snap-plugin-lib-py
snap_plugin/v1/plugin.py
https://github.com/intelsdi-x/snap-plugin-lib-py/blob/8da5d00ac5f9d2b48a7239563ac7788209891ca4/snap_plugin/v1/plugin.py#L63-L79
def _make_standalone_handler(preamble): """Class factory used so that preamble can be passed to :py:class:`_StandaloneHandler` without use of static members""" class _StandaloneHandler(BaseHTTPRequestHandler, object): """HTTP Handler for standalone mode""" def do_GET(self): self.send_response(200) self.send_header('Content-type', 'application/json; charset=utf-8') self.send_header('Content-length', len(preamble)) self.end_headers() self.wfile.write(preamble.encode('utf-8')) def log_message(self, format, *args): # suppress logging on requests return return _StandaloneHandler
[ "def", "_make_standalone_handler", "(", "preamble", ")", ":", "class", "_StandaloneHandler", "(", "BaseHTTPRequestHandler", ",", "object", ")", ":", "\"\"\"HTTP Handler for standalone mode\"\"\"", "def", "do_GET", "(", "self", ")", ":", "self", ".", "send_response", "(", "200", ")", "self", ".", "send_header", "(", "'Content-type'", ",", "'application/json; charset=utf-8'", ")", "self", ".", "send_header", "(", "'Content-length'", ",", "len", "(", "preamble", ")", ")", "self", ".", "end_headers", "(", ")", "self", ".", "wfile", ".", "write", "(", "preamble", ".", "encode", "(", "'utf-8'", ")", ")", "def", "log_message", "(", "self", ",", "format", ",", "*", "args", ")", ":", "# suppress logging on requests", "return", "return", "_StandaloneHandler" ]
Class factory used so that preamble can be passed to :py:class:`_StandaloneHandler` without use of static members
[ "Class", "factory", "used", "so", "that", "preamble", "can", "be", "passed", "to", ":", "py", ":", "class", ":", "_StandaloneHandler", "without", "use", "of", "static", "members" ]
python
train
mlperf/training
rnn_translator/pytorch/seq2seq/train/fp_optimizers.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/rnn_translator/pytorch/seq2seq/train/fp_optimizers.py#L63-L76
def initialize_model(self, model): """ Initializes internal state and build fp32 master copy of weights. :param model: fp16 model """ logging.info('Initializing fp32 clone weights') self.fp16_model = model self.fp16_model.zero_grad() self.fp32_params = [param.to(torch.float32).detach() for param in model.parameters()] for param in self.fp32_params: param.requires_grad = True
[ "def", "initialize_model", "(", "self", ",", "model", ")", ":", "logging", ".", "info", "(", "'Initializing fp32 clone weights'", ")", "self", ".", "fp16_model", "=", "model", "self", ".", "fp16_model", ".", "zero_grad", "(", ")", "self", ".", "fp32_params", "=", "[", "param", ".", "to", "(", "torch", ".", "float32", ")", ".", "detach", "(", ")", "for", "param", "in", "model", ".", "parameters", "(", ")", "]", "for", "param", "in", "self", ".", "fp32_params", ":", "param", ".", "requires_grad", "=", "True" ]
Initializes internal state and build fp32 master copy of weights. :param model: fp16 model
[ "Initializes", "internal", "state", "and", "build", "fp32", "master", "copy", "of", "weights", "." ]
python
train
pantsbuild/pex
pex/link.py
https://github.com/pantsbuild/pex/blob/87b2129d860250d3b9edce75b9cb62f9789ee521/pex/link.py#L57-L63
def from_filename(cls, filename): """Return a :class:`Link` wrapping the local filename.""" result = cls._FROM_FILENAME_CACHE.get(filename) if result is None: result = cls(cls._normalize(filename)) cls._FROM_FILENAME_CACHE.store(filename, result) return result
[ "def", "from_filename", "(", "cls", ",", "filename", ")", ":", "result", "=", "cls", ".", "_FROM_FILENAME_CACHE", ".", "get", "(", "filename", ")", "if", "result", "is", "None", ":", "result", "=", "cls", "(", "cls", ".", "_normalize", "(", "filename", ")", ")", "cls", ".", "_FROM_FILENAME_CACHE", ".", "store", "(", "filename", ",", "result", ")", "return", "result" ]
Return a :class:`Link` wrapping the local filename.
[ "Return", "a", ":", "class", ":", "Link", "wrapping", "the", "local", "filename", "." ]
python
train
sorgerlab/indra
indra/util/__init__.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/__init__.py#L216-L219
def flatten(l): """Flatten a nested list.""" return sum(map(flatten, l), []) \ if isinstance(l, list) or isinstance(l, tuple) else [l]
[ "def", "flatten", "(", "l", ")", ":", "return", "sum", "(", "map", "(", "flatten", ",", "l", ")", ",", "[", "]", ")", "if", "isinstance", "(", "l", ",", "list", ")", "or", "isinstance", "(", "l", ",", "tuple", ")", "else", "[", "l", "]" ]
Flatten a nested list.
[ "Flatten", "a", "nested", "list", "." ]
python
train
mlperf/training
reinforcement/tensorflow/minigo/utils.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/utils.py#L41-L47
def parse_game_result(result): "Parse an SGF result string into value target." if re.match(r'[bB]\+', result): return 1 if re.match(r'[wW]\+', result): return -1 return 0
[ "def", "parse_game_result", "(", "result", ")", ":", "if", "re", ".", "match", "(", "r'[bB]\\+'", ",", "result", ")", ":", "return", "1", "if", "re", ".", "match", "(", "r'[wW]\\+'", ",", "result", ")", ":", "return", "-", "1", "return", "0" ]
Parse an SGF result string into value target.
[ "Parse", "an", "SGF", "result", "string", "into", "value", "target", "." ]
python
train
edibledinos/pwnypack
pwnypack/flow.py
https://github.com/edibledinos/pwnypack/blob/e0a5a8e6ef3f4f1f7e1b91ee379711f4a49cb0e6/pwnypack/flow.py#L487-L502
def write(self, data, echo=None): """ Write data to channel. Args: data(bytes): The data to write to the channel. echo(bool): Whether to echo the written data to stdout. Raises: EOFError: If the channel was closed before all data was sent. """ if echo or (echo is None and self.echo): sys.stdout.write(data.decode('latin1')) sys.stdout.flush() self.channel.write(data)
[ "def", "write", "(", "self", ",", "data", ",", "echo", "=", "None", ")", ":", "if", "echo", "or", "(", "echo", "is", "None", "and", "self", ".", "echo", ")", ":", "sys", ".", "stdout", ".", "write", "(", "data", ".", "decode", "(", "'latin1'", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "self", ".", "channel", ".", "write", "(", "data", ")" ]
Write data to channel. Args: data(bytes): The data to write to the channel. echo(bool): Whether to echo the written data to stdout. Raises: EOFError: If the channel was closed before all data was sent.
[ "Write", "data", "to", "channel", "." ]
python
train
visualfabriq/bquery
bquery/benchmarks/bench_pos.py
https://github.com/visualfabriq/bquery/blob/3702e974696e22876944a3339affad2f29e1ee06/bquery/benchmarks/bench_pos.py#L14-L20
def ctime(message=None): "Counts the time spent in some context" t = time.time() yield if message: print message + ":\t", print round(time.time() - t, 4), "sec"
[ "def", "ctime", "(", "message", "=", "None", ")", ":", "t", "=", "time", ".", "time", "(", ")", "yield", "if", "message", ":", "print", "message", "+", "\":\\t\"", ",", "print", "round", "(", "time", ".", "time", "(", ")", "-", "t", ",", "4", ")", ",", "\"sec\"" ]
Counts the time spent in some context
[ "Counts", "the", "time", "spent", "in", "some", "context" ]
python
train
dmlc/gluon-nlp
scripts/parsing/common/k_means.py
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/parsing/common/k_means.py#L108-L145
def _recenter(self): """ one iteration of k-means """ for split_idx in range(len(self._splits)): split = self._splits[split_idx] len_idx = self._split2len_idx[split] if split == self._splits[-1]: continue right_split = self._splits[split_idx + 1] # Try shifting the centroid to the left if len_idx > 0 and self._lengths[len_idx - 1] not in self._split_cntr: new_split = self._lengths[len_idx - 1] left_delta = self._len_cntr[split] * (right_split - new_split) - self._split_cntr[split] * ( split - new_split) if left_delta < 0: self._splits[split_idx] = new_split self._split2len_idx[new_split] = len_idx - 1 del self._split2len_idx[split] self._split_cntr[split] -= self._len_cntr[split] self._split_cntr[right_split] += self._len_cntr[split] self._split_cntr[new_split] = self._split_cntr[split] del self._split_cntr[split] # Try shifting the centroid to the right elif len_idx < len(self._lengths) - 2 and self._lengths[len_idx + 1] not in self._split_cntr: new_split = self._lengths[len_idx + 1] right_delta = self._split_cntr[split] * (new_split - split) - self._len_cntr[split] * ( new_split - split) if right_delta <= 0: self._splits[split_idx] = new_split self._split2len_idx[new_split] = len_idx + 1 del self._split2len_idx[split] self._split_cntr[split] += self._len_cntr[split] self._split_cntr[right_split] -= self._len_cntr[split] self._split_cntr[new_split] = self._split_cntr[split] del self._split_cntr[split]
[ "def", "_recenter", "(", "self", ")", ":", "for", "split_idx", "in", "range", "(", "len", "(", "self", ".", "_splits", ")", ")", ":", "split", "=", "self", ".", "_splits", "[", "split_idx", "]", "len_idx", "=", "self", ".", "_split2len_idx", "[", "split", "]", "if", "split", "==", "self", ".", "_splits", "[", "-", "1", "]", ":", "continue", "right_split", "=", "self", ".", "_splits", "[", "split_idx", "+", "1", "]", "# Try shifting the centroid to the left", "if", "len_idx", ">", "0", "and", "self", ".", "_lengths", "[", "len_idx", "-", "1", "]", "not", "in", "self", ".", "_split_cntr", ":", "new_split", "=", "self", ".", "_lengths", "[", "len_idx", "-", "1", "]", "left_delta", "=", "self", ".", "_len_cntr", "[", "split", "]", "*", "(", "right_split", "-", "new_split", ")", "-", "self", ".", "_split_cntr", "[", "split", "]", "*", "(", "split", "-", "new_split", ")", "if", "left_delta", "<", "0", ":", "self", ".", "_splits", "[", "split_idx", "]", "=", "new_split", "self", ".", "_split2len_idx", "[", "new_split", "]", "=", "len_idx", "-", "1", "del", "self", ".", "_split2len_idx", "[", "split", "]", "self", ".", "_split_cntr", "[", "split", "]", "-=", "self", ".", "_len_cntr", "[", "split", "]", "self", ".", "_split_cntr", "[", "right_split", "]", "+=", "self", ".", "_len_cntr", "[", "split", "]", "self", ".", "_split_cntr", "[", "new_split", "]", "=", "self", ".", "_split_cntr", "[", "split", "]", "del", "self", ".", "_split_cntr", "[", "split", "]", "# Try shifting the centroid to the right", "elif", "len_idx", "<", "len", "(", "self", ".", "_lengths", ")", "-", "2", "and", "self", ".", "_lengths", "[", "len_idx", "+", "1", "]", "not", "in", "self", ".", "_split_cntr", ":", "new_split", "=", "self", ".", "_lengths", "[", "len_idx", "+", "1", "]", "right_delta", "=", "self", ".", "_split_cntr", "[", "split", "]", "*", "(", "new_split", "-", "split", ")", "-", "self", ".", "_len_cntr", "[", "split", "]", "*", "(", "new_split", "-", "split", ")", "if", "right_delta", "<=", "0", ":", "self", ".", "_splits", "[", "split_idx", "]", "=", "new_split", "self", ".", "_split2len_idx", "[", "new_split", "]", "=", "len_idx", "+", "1", "del", "self", ".", "_split2len_idx", "[", "split", "]", "self", ".", "_split_cntr", "[", "split", "]", "+=", "self", ".", "_len_cntr", "[", "split", "]", "self", ".", "_split_cntr", "[", "right_split", "]", "-=", "self", ".", "_len_cntr", "[", "split", "]", "self", ".", "_split_cntr", "[", "new_split", "]", "=", "self", ".", "_split_cntr", "[", "split", "]", "del", "self", ".", "_split_cntr", "[", "split", "]" ]
one iteration of k-means
[ "one", "iteration", "of", "k", "-", "means" ]
python
train
google/tangent
tangent/comments.py
https://github.com/google/tangent/blob/6533e83af09de7345d1b438512679992f080dcc9/tangent/comments.py#L49-L71
def remove_repeated_comments(node): """Remove comments that repeat themselves. Multiple statements might be annotated with the same comment. This way if one of the statements is deleted during optimization passes, the comment won't be lost. This pass removes sequences of identical comments, leaving only the first one. Args: node: An AST Returns: An AST where comments are not repeated in sequence. """ last_comment = {'text': None} for _node in gast.walk(node): if anno.hasanno(_node, 'comment'): comment = anno.getanno(_node, 'comment') if comment['text'] == last_comment['text']: anno.delanno(_node, 'comment') last_comment = comment return node
[ "def", "remove_repeated_comments", "(", "node", ")", ":", "last_comment", "=", "{", "'text'", ":", "None", "}", "for", "_node", "in", "gast", ".", "walk", "(", "node", ")", ":", "if", "anno", ".", "hasanno", "(", "_node", ",", "'comment'", ")", ":", "comment", "=", "anno", ".", "getanno", "(", "_node", ",", "'comment'", ")", "if", "comment", "[", "'text'", "]", "==", "last_comment", "[", "'text'", "]", ":", "anno", ".", "delanno", "(", "_node", ",", "'comment'", ")", "last_comment", "=", "comment", "return", "node" ]
Remove comments that repeat themselves. Multiple statements might be annotated with the same comment. This way if one of the statements is deleted during optimization passes, the comment won't be lost. This pass removes sequences of identical comments, leaving only the first one. Args: node: An AST Returns: An AST where comments are not repeated in sequence.
[ "Remove", "comments", "that", "repeat", "themselves", "." ]
python
train
waqasbhatti/astrobase
astrobase/hatsurveys/hplc.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/hatsurveys/hplc.py#L405-L502
def concatenate_textlcs_for_objectid(lcbasedir, objectid, aperture='TF1', postfix='.gz', sortby='rjd', normalize=True, recursive=True): '''This concatenates all text LCs for an objectid with the given aperture. Does not care about overlaps or duplicates. The light curves must all be from the same aperture. The intended use is to concatenate light curves across CCDs or instrument changes for a single object. These can then be normalized later using standard astrobase tools to search for variablity and/or periodicity. lcbasedir is the directory to start searching in. objectid is the object to search for. aperture is the aperture postfix to use: (TF1 = aperture 1, TF2 = aperture 2, TF3 = aperture 3) sortby is a column to sort the final concatenated light curve by in ascending order. If normalize is True, then each light curve's magnitude columns are normalized to zero, and the whole light curve is then normalized to the global median magnitude for each magnitude column. If recursive is True, then the function will search recursively in lcbasedir for any light curves matching the specified criteria. This may take a while, especially on network filesystems. The returned lcdict has an extra column: 'lcn' that tracks which measurement belongs to which input light curve. This can be used with lcdict['concatenated'] which relates input light curve index to input light curve filepath. Finally, there is an 'nconcatenated' key in the lcdict that contains the total number of concatenated light curves. ''' LOGINFO('looking for light curves for %s, aperture %s in directory: %s' % (objectid, aperture, lcbasedir)) if recursive is False: matching = glob.glob(os.path.join(lcbasedir, '*%s*%s*%s' % (objectid, aperture, postfix))) else: # use recursive glob for Python 3.5+ if sys.version_info[:2] > (3,4): matching = glob.glob(os.path.join(lcbasedir, '**', '*%s*%s*%s' % (objectid, aperture, postfix)), recursive=True) LOGINFO('found %s files: %s' % (len(matching), repr(matching))) # otherwise, use os.walk and glob else: # use os.walk to go through the directories walker = os.walk(lcbasedir) matching = [] for root, dirs, _files in walker: for sdir in dirs: searchpath = os.path.join(root, sdir, '*%s*%s*%s' % (objectid, aperture, postfix)) foundfiles = glob.glob(searchpath) if foundfiles: matching.extend(foundfiles) LOGINFO( 'found %s in dir: %s' % (repr(foundfiles), os.path.join(root,sdir)) ) # now that we have all the files, concatenate them # a single file will be returned as normalized if matching and len(matching) > 0: clcdict = concatenate_textlcs(matching, sortby=sortby, normalize=normalize) return clcdict else: LOGERROR('did not find any light curves for %s and aperture %s' % (objectid, aperture)) return None
[ "def", "concatenate_textlcs_for_objectid", "(", "lcbasedir", ",", "objectid", ",", "aperture", "=", "'TF1'", ",", "postfix", "=", "'.gz'", ",", "sortby", "=", "'rjd'", ",", "normalize", "=", "True", ",", "recursive", "=", "True", ")", ":", "LOGINFO", "(", "'looking for light curves for %s, aperture %s in directory: %s'", "%", "(", "objectid", ",", "aperture", ",", "lcbasedir", ")", ")", "if", "recursive", "is", "False", ":", "matching", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "lcbasedir", ",", "'*%s*%s*%s'", "%", "(", "objectid", ",", "aperture", ",", "postfix", ")", ")", ")", "else", ":", "# use recursive glob for Python 3.5+", "if", "sys", ".", "version_info", "[", ":", "2", "]", ">", "(", "3", ",", "4", ")", ":", "matching", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "lcbasedir", ",", "'**'", ",", "'*%s*%s*%s'", "%", "(", "objectid", ",", "aperture", ",", "postfix", ")", ")", ",", "recursive", "=", "True", ")", "LOGINFO", "(", "'found %s files: %s'", "%", "(", "len", "(", "matching", ")", ",", "repr", "(", "matching", ")", ")", ")", "# otherwise, use os.walk and glob", "else", ":", "# use os.walk to go through the directories", "walker", "=", "os", ".", "walk", "(", "lcbasedir", ")", "matching", "=", "[", "]", "for", "root", ",", "dirs", ",", "_files", "in", "walker", ":", "for", "sdir", "in", "dirs", ":", "searchpath", "=", "os", ".", "path", ".", "join", "(", "root", ",", "sdir", ",", "'*%s*%s*%s'", "%", "(", "objectid", ",", "aperture", ",", "postfix", ")", ")", "foundfiles", "=", "glob", ".", "glob", "(", "searchpath", ")", "if", "foundfiles", ":", "matching", ".", "extend", "(", "foundfiles", ")", "LOGINFO", "(", "'found %s in dir: %s'", "%", "(", "repr", "(", "foundfiles", ")", ",", "os", ".", "path", ".", "join", "(", "root", ",", "sdir", ")", ")", ")", "# now that we have all the files, concatenate them", "# a single file will be returned as normalized", "if", "matching", "and", "len", "(", "matching", ")", ">", "0", ":", "clcdict", "=", "concatenate_textlcs", "(", "matching", ",", "sortby", "=", "sortby", ",", "normalize", "=", "normalize", ")", "return", "clcdict", "else", ":", "LOGERROR", "(", "'did not find any light curves for %s and aperture %s'", "%", "(", "objectid", ",", "aperture", ")", ")", "return", "None" ]
This concatenates all text LCs for an objectid with the given aperture. Does not care about overlaps or duplicates. The light curves must all be from the same aperture. The intended use is to concatenate light curves across CCDs or instrument changes for a single object. These can then be normalized later using standard astrobase tools to search for variablity and/or periodicity. lcbasedir is the directory to start searching in. objectid is the object to search for. aperture is the aperture postfix to use: (TF1 = aperture 1, TF2 = aperture 2, TF3 = aperture 3) sortby is a column to sort the final concatenated light curve by in ascending order. If normalize is True, then each light curve's magnitude columns are normalized to zero, and the whole light curve is then normalized to the global median magnitude for each magnitude column. If recursive is True, then the function will search recursively in lcbasedir for any light curves matching the specified criteria. This may take a while, especially on network filesystems. The returned lcdict has an extra column: 'lcn' that tracks which measurement belongs to which input light curve. This can be used with lcdict['concatenated'] which relates input light curve index to input light curve filepath. Finally, there is an 'nconcatenated' key in the lcdict that contains the total number of concatenated light curves.
[ "This", "concatenates", "all", "text", "LCs", "for", "an", "objectid", "with", "the", "given", "aperture", "." ]
python
valid
BerkeleyAutomation/autolab_core
autolab_core/csv_model.py
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/csv_model.py#L441-L472
def get_or_create(full_filename, headers_types=None, default_entry=''): """Load a .csv file into a CSVModel if the file exists, or create a new CSVModel with the given filename if the file does not exist. Parameters ---------- full_filename : :obj:`str` The file path to a .csv file. headers_types : :obj:`list` of :obj:`tuple` of :obj:`str`, :obj:`str` A list of tuples, where the first element in each tuple is the string header for a column and the second element is that column's data type as a string. default_entry : :obj:`str` The default entry for cells in the CSV. Returns ------- :obj:`CSVModel` The CSVModel initialized with the data in the given file, or a new CSVModel tied to the filename if the file doesn't currently exist. """ # convert dictionaries to list if isinstance(headers_types, dict): headers_types_list = [(k,v) for k,v in headers_types.items()] headers_types = headers_types_list if os.path.isfile(full_filename): return CSVModel.load(full_filename) else: return CSVModel(full_filename, headers_types, default_entry=default_entry)
[ "def", "get_or_create", "(", "full_filename", ",", "headers_types", "=", "None", ",", "default_entry", "=", "''", ")", ":", "# convert dictionaries to list", "if", "isinstance", "(", "headers_types", ",", "dict", ")", ":", "headers_types_list", "=", "[", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "headers_types", ".", "items", "(", ")", "]", "headers_types", "=", "headers_types_list", "if", "os", ".", "path", ".", "isfile", "(", "full_filename", ")", ":", "return", "CSVModel", ".", "load", "(", "full_filename", ")", "else", ":", "return", "CSVModel", "(", "full_filename", ",", "headers_types", ",", "default_entry", "=", "default_entry", ")" ]
Load a .csv file into a CSVModel if the file exists, or create a new CSVModel with the given filename if the file does not exist. Parameters ---------- full_filename : :obj:`str` The file path to a .csv file. headers_types : :obj:`list` of :obj:`tuple` of :obj:`str`, :obj:`str` A list of tuples, where the first element in each tuple is the string header for a column and the second element is that column's data type as a string. default_entry : :obj:`str` The default entry for cells in the CSV. Returns ------- :obj:`CSVModel` The CSVModel initialized with the data in the given file, or a new CSVModel tied to the filename if the file doesn't currently exist.
[ "Load", "a", ".", "csv", "file", "into", "a", "CSVModel", "if", "the", "file", "exists", "or", "create", "a", "new", "CSVModel", "with", "the", "given", "filename", "if", "the", "file", "does", "not", "exist", "." ]
python
train
ArangoDB-Community/pyArango
pyArango/collection.py
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/collection.py#L442-L451
def hasField(cls, fieldName) : """returns True/False wether the collection has field K in it's schema. Use the dot notation for the nested fields: address.street""" path = fieldName.split(".") v = cls._fields for k in path : try : v = v[k] except KeyError : return False return True
[ "def", "hasField", "(", "cls", ",", "fieldName", ")", ":", "path", "=", "fieldName", ".", "split", "(", "\".\"", ")", "v", "=", "cls", ".", "_fields", "for", "k", "in", "path", ":", "try", ":", "v", "=", "v", "[", "k", "]", "except", "KeyError", ":", "return", "False", "return", "True" ]
returns True/False wether the collection has field K in it's schema. Use the dot notation for the nested fields: address.street
[ "returns", "True", "/", "False", "wether", "the", "collection", "has", "field", "K", "in", "it", "s", "schema", ".", "Use", "the", "dot", "notation", "for", "the", "nested", "fields", ":", "address", ".", "street" ]
python
train
chrisrink10/basilisp
src/basilisp/lang/compiler/__init__.py
https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/compiler/__init__.py#L204-L218
def compile_bytecode( code: List[types.CodeType], gctx: GeneratorContext, optimizer: PythonASTOptimizer, module: types.ModuleType, ) -> None: """Compile cached bytecode into the given module. The Basilisp import hook attempts to cache bytecode while compiling Basilisp namespaces. When the cached bytecode is reloaded from disk, it needs to be compiled within a bootstrapped module. This function bootstraps the module and then proceeds to compile a collection of bytecodes into the module.""" _bootstrap_module(gctx, optimizer, module) for bytecode in code: exec(bytecode, module.__dict__)
[ "def", "compile_bytecode", "(", "code", ":", "List", "[", "types", ".", "CodeType", "]", ",", "gctx", ":", "GeneratorContext", ",", "optimizer", ":", "PythonASTOptimizer", ",", "module", ":", "types", ".", "ModuleType", ",", ")", "->", "None", ":", "_bootstrap_module", "(", "gctx", ",", "optimizer", ",", "module", ")", "for", "bytecode", "in", "code", ":", "exec", "(", "bytecode", ",", "module", ".", "__dict__", ")" ]
Compile cached bytecode into the given module. The Basilisp import hook attempts to cache bytecode while compiling Basilisp namespaces. When the cached bytecode is reloaded from disk, it needs to be compiled within a bootstrapped module. This function bootstraps the module and then proceeds to compile a collection of bytecodes into the module.
[ "Compile", "cached", "bytecode", "into", "the", "given", "module", "." ]
python
test
numberoverzero/bloop
bloop/session.py
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/session.py#L82-L104
def load_items(self, items): """Loads any number of items in chunks, handling continuation tokens. :param items: Unpacked in chunks into "RequestItems" for :func:`boto3.DynamoDB.Client.batch_get_item`. """ loaded_items = {} requests = collections.deque(create_batch_get_chunks(items)) while requests: request = requests.pop() try: response = self.dynamodb_client.batch_get_item(RequestItems=request) except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error while loading items.") from error # Accumulate results for table_name, table_items in response.get("Responses", {}).items(): loaded_items.setdefault(table_name, []).extend(table_items) # Push additional request onto the deque. # "UnprocessedKeys" is {} if this request is done if response["UnprocessedKeys"]: requests.append(response["UnprocessedKeys"]) return loaded_items
[ "def", "load_items", "(", "self", ",", "items", ")", ":", "loaded_items", "=", "{", "}", "requests", "=", "collections", ".", "deque", "(", "create_batch_get_chunks", "(", "items", ")", ")", "while", "requests", ":", "request", "=", "requests", ".", "pop", "(", ")", "try", ":", "response", "=", "self", ".", "dynamodb_client", ".", "batch_get_item", "(", "RequestItems", "=", "request", ")", "except", "botocore", ".", "exceptions", ".", "ClientError", "as", "error", ":", "raise", "BloopException", "(", "\"Unexpected error while loading items.\"", ")", "from", "error", "# Accumulate results", "for", "table_name", ",", "table_items", "in", "response", ".", "get", "(", "\"Responses\"", ",", "{", "}", ")", ".", "items", "(", ")", ":", "loaded_items", ".", "setdefault", "(", "table_name", ",", "[", "]", ")", ".", "extend", "(", "table_items", ")", "# Push additional request onto the deque.", "# \"UnprocessedKeys\" is {} if this request is done", "if", "response", "[", "\"UnprocessedKeys\"", "]", ":", "requests", ".", "append", "(", "response", "[", "\"UnprocessedKeys\"", "]", ")", "return", "loaded_items" ]
Loads any number of items in chunks, handling continuation tokens. :param items: Unpacked in chunks into "RequestItems" for :func:`boto3.DynamoDB.Client.batch_get_item`.
[ "Loads", "any", "number", "of", "items", "in", "chunks", "handling", "continuation", "tokens", "." ]
python
train
jeffknupp/sandman2
sandman2/app.py
https://github.com/jeffknupp/sandman2/blob/1ce21d6f7a6df77fa96fab694b0f9bb8469c166b/sandman2/app.py#L95-L123
def register_service(cls, primary_key_type): """Register an API service endpoint. :param cls: The class to register :param str primary_key_type: The type (as a string) of the primary_key field """ view_func = cls.as_view(cls.__name__.lower()) # pylint: disable=no-member methods = set(cls.__model__.__methods__) # pylint: disable=no-member if 'GET' in methods: # pylint: disable=no-member current_app.add_url_rule( cls.__model__.__url__ + '/', defaults={'resource_id': None}, view_func=view_func, methods=['GET']) current_app.add_url_rule( '{resource}/meta'.format(resource=cls.__model__.__url__), view_func=view_func, methods=['GET']) if 'POST' in methods: # pylint: disable=no-member current_app.add_url_rule( cls.__model__.__url__ + '/', view_func=view_func, methods=['POST', ]) current_app.add_url_rule( '{resource}/<{pk_type}:{pk}>'.format( resource=cls.__model__.__url__, pk='resource_id', pk_type=primary_key_type), view_func=view_func, methods=methods - {'POST'}) current_app.classes.append(cls)
[ "def", "register_service", "(", "cls", ",", "primary_key_type", ")", ":", "view_func", "=", "cls", ".", "as_view", "(", "cls", ".", "__name__", ".", "lower", "(", ")", ")", "# pylint: disable=no-member", "methods", "=", "set", "(", "cls", ".", "__model__", ".", "__methods__", ")", "# pylint: disable=no-member", "if", "'GET'", "in", "methods", ":", "# pylint: disable=no-member", "current_app", ".", "add_url_rule", "(", "cls", ".", "__model__", ".", "__url__", "+", "'/'", ",", "defaults", "=", "{", "'resource_id'", ":", "None", "}", ",", "view_func", "=", "view_func", ",", "methods", "=", "[", "'GET'", "]", ")", "current_app", ".", "add_url_rule", "(", "'{resource}/meta'", ".", "format", "(", "resource", "=", "cls", ".", "__model__", ".", "__url__", ")", ",", "view_func", "=", "view_func", ",", "methods", "=", "[", "'GET'", "]", ")", "if", "'POST'", "in", "methods", ":", "# pylint: disable=no-member", "current_app", ".", "add_url_rule", "(", "cls", ".", "__model__", ".", "__url__", "+", "'/'", ",", "view_func", "=", "view_func", ",", "methods", "=", "[", "'POST'", ",", "]", ")", "current_app", ".", "add_url_rule", "(", "'{resource}/<{pk_type}:{pk}>'", ".", "format", "(", "resource", "=", "cls", ".", "__model__", ".", "__url__", ",", "pk", "=", "'resource_id'", ",", "pk_type", "=", "primary_key_type", ")", ",", "view_func", "=", "view_func", ",", "methods", "=", "methods", "-", "{", "'POST'", "}", ")", "current_app", ".", "classes", ".", "append", "(", "cls", ")" ]
Register an API service endpoint. :param cls: The class to register :param str primary_key_type: The type (as a string) of the primary_key field
[ "Register", "an", "API", "service", "endpoint", "." ]
python
train
Garee/pytodoist
pytodoist/todoist.py
https://github.com/Garee/pytodoist/blob/3359cbff485ebdbbb4ffbd58d71e21a817874dd7/pytodoist/todoist.py#L562-L600
def search_tasks(self, *queries): """Return a list of tasks that match some search criteria. .. note:: Example queries can be found `here <https://todoist.com/Help/timeQuery>`_. .. note:: A standard set of queries are available in the :class:`pytodoist.todoist.Query` class. :param queries: Return tasks that match at least one of these queries. :type queries: list str :return: A list tasks that match at least one query. :rtype: list of :class:`pytodoist.todoist.Task` >>> from pytodoist import todoist >>> user = todoist.login('[email protected]', 'password') >>> tasks = user.search_tasks(todoist.Query.TOMORROW, '18 Sep') """ queries = json.dumps(queries) response = API.query(self.api_token, queries) _fail_if_contains_errors(response) query_results = response.json() tasks = [] for result in query_results: if 'data' not in result: continue all_tasks = result['data'] if result['type'] == Query.ALL: all_projects = all_tasks for project_json in all_projects: uncompleted_tasks = project_json.get('uncompleted', []) completed_tasks = project_json.get('completed', []) all_tasks = uncompleted_tasks + completed_tasks for task_json in all_tasks: project_id = task_json['project_id'] project = self.projects[project_id] task = Task(task_json, project) tasks.append(task) return tasks
[ "def", "search_tasks", "(", "self", ",", "*", "queries", ")", ":", "queries", "=", "json", ".", "dumps", "(", "queries", ")", "response", "=", "API", ".", "query", "(", "self", ".", "api_token", ",", "queries", ")", "_fail_if_contains_errors", "(", "response", ")", "query_results", "=", "response", ".", "json", "(", ")", "tasks", "=", "[", "]", "for", "result", "in", "query_results", ":", "if", "'data'", "not", "in", "result", ":", "continue", "all_tasks", "=", "result", "[", "'data'", "]", "if", "result", "[", "'type'", "]", "==", "Query", ".", "ALL", ":", "all_projects", "=", "all_tasks", "for", "project_json", "in", "all_projects", ":", "uncompleted_tasks", "=", "project_json", ".", "get", "(", "'uncompleted'", ",", "[", "]", ")", "completed_tasks", "=", "project_json", ".", "get", "(", "'completed'", ",", "[", "]", ")", "all_tasks", "=", "uncompleted_tasks", "+", "completed_tasks", "for", "task_json", "in", "all_tasks", ":", "project_id", "=", "task_json", "[", "'project_id'", "]", "project", "=", "self", ".", "projects", "[", "project_id", "]", "task", "=", "Task", "(", "task_json", ",", "project", ")", "tasks", ".", "append", "(", "task", ")", "return", "tasks" ]
Return a list of tasks that match some search criteria. .. note:: Example queries can be found `here <https://todoist.com/Help/timeQuery>`_. .. note:: A standard set of queries are available in the :class:`pytodoist.todoist.Query` class. :param queries: Return tasks that match at least one of these queries. :type queries: list str :return: A list tasks that match at least one query. :rtype: list of :class:`pytodoist.todoist.Task` >>> from pytodoist import todoist >>> user = todoist.login('[email protected]', 'password') >>> tasks = user.search_tasks(todoist.Query.TOMORROW, '18 Sep')
[ "Return", "a", "list", "of", "tasks", "that", "match", "some", "search", "criteria", "." ]
python
train
tensorflow/lucid
lucid/misc/gl/meshutil.py
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/gl/meshutil.py#L87-L96
def _unify_rows(a): """Unify lengths of each row of a.""" lens = np.fromiter(map(len, a), np.int32) if not (lens[0] == lens).all(): out = np.zeros((len(a), lens.max()), np.float32) for i, row in enumerate(a): out[i, :lens[i]] = row else: out = np.float32(a) return out
[ "def", "_unify_rows", "(", "a", ")", ":", "lens", "=", "np", ".", "fromiter", "(", "map", "(", "len", ",", "a", ")", ",", "np", ".", "int32", ")", "if", "not", "(", "lens", "[", "0", "]", "==", "lens", ")", ".", "all", "(", ")", ":", "out", "=", "np", ".", "zeros", "(", "(", "len", "(", "a", ")", ",", "lens", ".", "max", "(", ")", ")", ",", "np", ".", "float32", ")", "for", "i", ",", "row", "in", "enumerate", "(", "a", ")", ":", "out", "[", "i", ",", ":", "lens", "[", "i", "]", "]", "=", "row", "else", ":", "out", "=", "np", ".", "float32", "(", "a", ")", "return", "out" ]
Unify lengths of each row of a.
[ "Unify", "lengths", "of", "each", "row", "of", "a", "." ]
python
train
kmedian/ctmc
ctmc/ctmc_class.py
https://github.com/kmedian/ctmc/blob/e30747f797ce777fd2aaa1b7ee5a77e91d7db5e4/ctmc/ctmc_class.py#L17-L30
def fit(self, X, y=None): """Calls the ctmc.ctmc function Parameters ---------- X : list of lists (see ctmc function 'data') y not used, present for API consistence purpose. """ self.transmat, self.genmat, self.transcount, self.statetime = ctmc( X, self.numstates, self.transintv, self.toltime, self.debug) return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "self", ".", "transmat", ",", "self", ".", "genmat", ",", "self", ".", "transcount", ",", "self", ".", "statetime", "=", "ctmc", "(", "X", ",", "self", ".", "numstates", ",", "self", ".", "transintv", ",", "self", ".", "toltime", ",", "self", ".", "debug", ")", "return", "self" ]
Calls the ctmc.ctmc function Parameters ---------- X : list of lists (see ctmc function 'data') y not used, present for API consistence purpose.
[ "Calls", "the", "ctmc", ".", "ctmc", "function" ]
python
train
caseyjlaw/rtpipe
rtpipe/interactive.py
https://github.com/caseyjlaw/rtpipe/blob/ac33e4332cf215091a63afbb3137850876d73ec0/rtpipe/interactive.py#L692-L704
def colorsat(l,m): """ Returns color for given l,m Designed to look like a color wheel that is more saturated in middle. """ lm = np.zeros(len(l), dtype='complex') lm.real = l lm.imag = m red = 0.5*(1+np.cos(np.angle(lm))) green = 0.5*(1+np.cos(np.angle(lm) + 2*3.14/3)) blue = 0.5*(1+np.cos(np.angle(lm) - 2*3.14/3)) amp = 256*np.abs(lm)/np.abs(lm).max() return ["#%02x%02x%02x" % (np.floor(amp[i]*red[i]), np.floor(amp[i]*green[i]), np.floor(amp[i]*blue[i])) for i in range(len(l))]
[ "def", "colorsat", "(", "l", ",", "m", ")", ":", "lm", "=", "np", ".", "zeros", "(", "len", "(", "l", ")", ",", "dtype", "=", "'complex'", ")", "lm", ".", "real", "=", "l", "lm", ".", "imag", "=", "m", "red", "=", "0.5", "*", "(", "1", "+", "np", ".", "cos", "(", "np", ".", "angle", "(", "lm", ")", ")", ")", "green", "=", "0.5", "*", "(", "1", "+", "np", ".", "cos", "(", "np", ".", "angle", "(", "lm", ")", "+", "2", "*", "3.14", "/", "3", ")", ")", "blue", "=", "0.5", "*", "(", "1", "+", "np", ".", "cos", "(", "np", ".", "angle", "(", "lm", ")", "-", "2", "*", "3.14", "/", "3", ")", ")", "amp", "=", "256", "*", "np", ".", "abs", "(", "lm", ")", "/", "np", ".", "abs", "(", "lm", ")", ".", "max", "(", ")", "return", "[", "\"#%02x%02x%02x\"", "%", "(", "np", ".", "floor", "(", "amp", "[", "i", "]", "*", "red", "[", "i", "]", ")", ",", "np", ".", "floor", "(", "amp", "[", "i", "]", "*", "green", "[", "i", "]", ")", ",", "np", ".", "floor", "(", "amp", "[", "i", "]", "*", "blue", "[", "i", "]", ")", ")", "for", "i", "in", "range", "(", "len", "(", "l", ")", ")", "]" ]
Returns color for given l,m Designed to look like a color wheel that is more saturated in middle.
[ "Returns", "color", "for", "given", "l", "m", "Designed", "to", "look", "like", "a", "color", "wheel", "that", "is", "more", "saturated", "in", "middle", "." ]
python
train
buildbot/buildbot
master/buildbot/process/users/manual.py
https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/buildbot/process/users/manual.py#L37-L83
def formatResults(self, op, results): """ This formats the results of the database operations for printing back to the caller @param op: operation to perform (add, remove, update, get) @type op: string @param results: results from db queries in perspective_commandline @type results: list @returns: string containing formatted results """ formatted_results = "" if op == 'add': # list, alternating ident, uid formatted_results += "user(s) added:\n" for user in results: if isinstance(user, str): formatted_results += "identifier: %s\n" % user else: formatted_results += "uid: %d\n\n" % user elif op == 'remove': # list of dictionaries formatted_results += "user(s) removed:\n" for user in results: if user: formatted_results += "identifier: %s\n" % (user) elif op == 'update': # list, alternating ident, None formatted_results += "user(s) updated:\n" for user in results: if user: formatted_results += "identifier: %s\n" % (user) elif op == 'get': # list of dictionaries formatted_results += "user(s) found:\n" for user in results: if user: for key in sorted(user.keys()): if key != 'bb_password': formatted_results += "%s: %s\n" % (key, user[key]) formatted_results += "\n" else: formatted_results += "no match found\n" return formatted_results
[ "def", "formatResults", "(", "self", ",", "op", ",", "results", ")", ":", "formatted_results", "=", "\"\"", "if", "op", "==", "'add'", ":", "# list, alternating ident, uid", "formatted_results", "+=", "\"user(s) added:\\n\"", "for", "user", "in", "results", ":", "if", "isinstance", "(", "user", ",", "str", ")", ":", "formatted_results", "+=", "\"identifier: %s\\n\"", "%", "user", "else", ":", "formatted_results", "+=", "\"uid: %d\\n\\n\"", "%", "user", "elif", "op", "==", "'remove'", ":", "# list of dictionaries", "formatted_results", "+=", "\"user(s) removed:\\n\"", "for", "user", "in", "results", ":", "if", "user", ":", "formatted_results", "+=", "\"identifier: %s\\n\"", "%", "(", "user", ")", "elif", "op", "==", "'update'", ":", "# list, alternating ident, None", "formatted_results", "+=", "\"user(s) updated:\\n\"", "for", "user", "in", "results", ":", "if", "user", ":", "formatted_results", "+=", "\"identifier: %s\\n\"", "%", "(", "user", ")", "elif", "op", "==", "'get'", ":", "# list of dictionaries", "formatted_results", "+=", "\"user(s) found:\\n\"", "for", "user", "in", "results", ":", "if", "user", ":", "for", "key", "in", "sorted", "(", "user", ".", "keys", "(", ")", ")", ":", "if", "key", "!=", "'bb_password'", ":", "formatted_results", "+=", "\"%s: %s\\n\"", "%", "(", "key", ",", "user", "[", "key", "]", ")", "formatted_results", "+=", "\"\\n\"", "else", ":", "formatted_results", "+=", "\"no match found\\n\"", "return", "formatted_results" ]
This formats the results of the database operations for printing back to the caller @param op: operation to perform (add, remove, update, get) @type op: string @param results: results from db queries in perspective_commandline @type results: list @returns: string containing formatted results
[ "This", "formats", "the", "results", "of", "the", "database", "operations", "for", "printing", "back", "to", "the", "caller" ]
python
train
juju/theblues
theblues/identity_manager.py
https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/identity_manager.py#L64-L92
def discharge(self, username, macaroon): """Discharge the macarooon for the identity. Raise a ServerError if an error occurs in the request process. @param username The logged in user. @param macaroon The macaroon returned from the charm store. @return The resulting base64 encoded macaroon. @raises ServerError when making request to the discharge endpoint InvalidMacaroon when the macaroon passedin or discharged is invalid """ caveats = macaroon.third_party_caveats() if len(caveats) != 1: raise InvalidMacaroon( 'Invalid number of third party caveats (1 != {})' ''.format(len(caveats))) url = '{}discharger/discharge?discharge-for-user={}&id={}'.format( self.url, quote(username), caveats[0][1]) logging.debug('Sending identity info to {}'.format(url)) logging.debug('data is {}'.format(caveats[0][1])) response = make_request(url, method='POST', timeout=self.timeout) try: macaroon = response['Macaroon'] json_macaroon = json.dumps(macaroon) except (KeyError, UnicodeDecodeError) as err: raise InvalidMacaroon( 'Invalid macaroon from discharger: {}'.format(err.message)) return base64.urlsafe_b64encode(json_macaroon.encode('utf-8'))
[ "def", "discharge", "(", "self", ",", "username", ",", "macaroon", ")", ":", "caveats", "=", "macaroon", ".", "third_party_caveats", "(", ")", "if", "len", "(", "caveats", ")", "!=", "1", ":", "raise", "InvalidMacaroon", "(", "'Invalid number of third party caveats (1 != {})'", "''", ".", "format", "(", "len", "(", "caveats", ")", ")", ")", "url", "=", "'{}discharger/discharge?discharge-for-user={}&id={}'", ".", "format", "(", "self", ".", "url", ",", "quote", "(", "username", ")", ",", "caveats", "[", "0", "]", "[", "1", "]", ")", "logging", ".", "debug", "(", "'Sending identity info to {}'", ".", "format", "(", "url", ")", ")", "logging", ".", "debug", "(", "'data is {}'", ".", "format", "(", "caveats", "[", "0", "]", "[", "1", "]", ")", ")", "response", "=", "make_request", "(", "url", ",", "method", "=", "'POST'", ",", "timeout", "=", "self", ".", "timeout", ")", "try", ":", "macaroon", "=", "response", "[", "'Macaroon'", "]", "json_macaroon", "=", "json", ".", "dumps", "(", "macaroon", ")", "except", "(", "KeyError", ",", "UnicodeDecodeError", ")", "as", "err", ":", "raise", "InvalidMacaroon", "(", "'Invalid macaroon from discharger: {}'", ".", "format", "(", "err", ".", "message", ")", ")", "return", "base64", ".", "urlsafe_b64encode", "(", "json_macaroon", ".", "encode", "(", "'utf-8'", ")", ")" ]
Discharge the macarooon for the identity. Raise a ServerError if an error occurs in the request process. @param username The logged in user. @param macaroon The macaroon returned from the charm store. @return The resulting base64 encoded macaroon. @raises ServerError when making request to the discharge endpoint InvalidMacaroon when the macaroon passedin or discharged is invalid
[ "Discharge", "the", "macarooon", "for", "the", "identity", "." ]
python
train
sbaechler/django-multilingual-search
multilingual/elasticsearch_backend.py
https://github.com/sbaechler/django-multilingual-search/blob/485c690d865da3267b19e073e28d3e2290f36611/multilingual/elasticsearch_backend.py#L96-L106
def clear(self, models=None, commit=True): """ Clears all indexes for the current project. :param models: if specified, only deletes the entries for the given models. :param commit: This is ignored by Haystack (maybe a bug?) """ for language in self.languages: self.log.debug('clearing index for {0}'.format(language)) self.index_name = self._index_name_for_language(language) super(ElasticsearchMultilingualSearchBackend, self).clear(models, commit) self._reset_existing_mapping()
[ "def", "clear", "(", "self", ",", "models", "=", "None", ",", "commit", "=", "True", ")", ":", "for", "language", "in", "self", ".", "languages", ":", "self", ".", "log", ".", "debug", "(", "'clearing index for {0}'", ".", "format", "(", "language", ")", ")", "self", ".", "index_name", "=", "self", ".", "_index_name_for_language", "(", "language", ")", "super", "(", "ElasticsearchMultilingualSearchBackend", ",", "self", ")", ".", "clear", "(", "models", ",", "commit", ")", "self", ".", "_reset_existing_mapping", "(", ")" ]
Clears all indexes for the current project. :param models: if specified, only deletes the entries for the given models. :param commit: This is ignored by Haystack (maybe a bug?)
[ "Clears", "all", "indexes", "for", "the", "current", "project", ".", ":", "param", "models", ":", "if", "specified", "only", "deletes", "the", "entries", "for", "the", "given", "models", ".", ":", "param", "commit", ":", "This", "is", "ignored", "by", "Haystack", "(", "maybe", "a", "bug?", ")" ]
python
train
DallasMorningNews/django-datafreezer
datafreezer/views.py
https://github.com/DallasMorningNews/django-datafreezer/blob/982dcf2015c80a280f1a093e32977cb71d4ea7aa/datafreezer/views.py#L264-L299
def grab_names_from_emails(email_list): """Return a dictionary mapping names to email addresses. Only gives a response if the email is found in the staff API/JSON. Expects an API of the format = [ { 'email': '[email protected]', ... 'fullName': 'Frank Oo' }, ... ] """ all_staff = STAFF_LIST emails_names = {} for email in email_list: for person in all_staff: if email == person['email'] and email not in emails_names: emails_names[email] = person['fullName'] # print emails_names[email] for email in email_list: matched = False for assignment in emails_names: if email == assignment: matched = True if not matched: emails_names[email] = email return emails_names
[ "def", "grab_names_from_emails", "(", "email_list", ")", ":", "all_staff", "=", "STAFF_LIST", "emails_names", "=", "{", "}", "for", "email", "in", "email_list", ":", "for", "person", "in", "all_staff", ":", "if", "email", "==", "person", "[", "'email'", "]", "and", "email", "not", "in", "emails_names", ":", "emails_names", "[", "email", "]", "=", "person", "[", "'fullName'", "]", "# print emails_names[email]", "for", "email", "in", "email_list", ":", "matched", "=", "False", "for", "assignment", "in", "emails_names", ":", "if", "email", "==", "assignment", ":", "matched", "=", "True", "if", "not", "matched", ":", "emails_names", "[", "email", "]", "=", "email", "return", "emails_names" ]
Return a dictionary mapping names to email addresses. Only gives a response if the email is found in the staff API/JSON. Expects an API of the format = [ { 'email': '[email protected]', ... 'fullName': 'Frank Oo' }, ... ]
[ "Return", "a", "dictionary", "mapping", "names", "to", "email", "addresses", "." ]
python
train
awacha/credolib
credolib/io.py
https://github.com/awacha/credolib/blob/11c0be3eea7257d3d6e13697d3e76ce538f2f1b2/credolib/io.py#L29-L55
def load_headers(fsns:List[int]): """Load header files """ ip = get_ipython() ip.user_ns['_headers'] = {} for type_ in ['raw', 'processed']: print("Loading %d headers (%s)" % (len(fsns), type_), flush=True) processed = type_ == 'processed' headers = [] for f in fsns: for l in [l_ for l_ in ip.user_ns['_loaders'] if l_.processed == processed]: try: headers.append(l.loadheader(f)) break except FileNotFoundError: continue allsamplenames = {h.title for h in headers} if not headers: print('NO HEADERS READ FOR TYPE "%s"' % type_) else: print("%d headers (%s) out of %d have been loaded successfully." % (len(headers), type_, len(fsns))) print('Read FSN range:', min([h.fsn for h in headers]), 'to', max([h.fsn for h in headers])) print("Samples covered by these headers:") print(" " + "\n ".join(sorted(allsamplenames)), flush=True) if processed: ip.user_ns['allsamplenames'] = allsamplenames ip.user_ns['_headers'][type_] = headers
[ "def", "load_headers", "(", "fsns", ":", "List", "[", "int", "]", ")", ":", "ip", "=", "get_ipython", "(", ")", "ip", ".", "user_ns", "[", "'_headers'", "]", "=", "{", "}", "for", "type_", "in", "[", "'raw'", ",", "'processed'", "]", ":", "print", "(", "\"Loading %d headers (%s)\"", "%", "(", "len", "(", "fsns", ")", ",", "type_", ")", ",", "flush", "=", "True", ")", "processed", "=", "type_", "==", "'processed'", "headers", "=", "[", "]", "for", "f", "in", "fsns", ":", "for", "l", "in", "[", "l_", "for", "l_", "in", "ip", ".", "user_ns", "[", "'_loaders'", "]", "if", "l_", ".", "processed", "==", "processed", "]", ":", "try", ":", "headers", ".", "append", "(", "l", ".", "loadheader", "(", "f", ")", ")", "break", "except", "FileNotFoundError", ":", "continue", "allsamplenames", "=", "{", "h", ".", "title", "for", "h", "in", "headers", "}", "if", "not", "headers", ":", "print", "(", "'NO HEADERS READ FOR TYPE \"%s\"'", "%", "type_", ")", "else", ":", "print", "(", "\"%d headers (%s) out of %d have been loaded successfully.\"", "%", "(", "len", "(", "headers", ")", ",", "type_", ",", "len", "(", "fsns", ")", ")", ")", "print", "(", "'Read FSN range:'", ",", "min", "(", "[", "h", ".", "fsn", "for", "h", "in", "headers", "]", ")", ",", "'to'", ",", "max", "(", "[", "h", ".", "fsn", "for", "h", "in", "headers", "]", ")", ")", "print", "(", "\"Samples covered by these headers:\"", ")", "print", "(", "\" \"", "+", "\"\\n \"", ".", "join", "(", "sorted", "(", "allsamplenames", ")", ")", ",", "flush", "=", "True", ")", "if", "processed", ":", "ip", ".", "user_ns", "[", "'allsamplenames'", "]", "=", "allsamplenames", "ip", ".", "user_ns", "[", "'_headers'", "]", "[", "type_", "]", "=", "headers" ]
Load header files
[ "Load", "header", "files" ]
python
train
flyte/upnpclient
upnpclient/upnp.py
https://github.com/flyte/upnpclient/blob/5529b950df33c0eaf0c24a9a307cf00fe627d0ad/upnpclient/upnp.py#L456-L543
def validate_arg(arg, argdef): """ Validate an incoming (unicode) string argument according the UPnP spec. Raises UPNPError. """ datatype = argdef['datatype'] reasons = set() ranges = { 'ui1': (int, 0, 255), 'ui2': (int, 0, 65535), 'ui4': (int, 0, 4294967295), 'i1': (int, -128, 127), 'i2': (int, -32768, 32767), 'i4': (int, -2147483648, 2147483647), 'r4': (Decimal, Decimal('3.40282347E+38'), Decimal('1.17549435E-38')) } try: if datatype in set(ranges.keys()): v_type, v_min, v_max = ranges[datatype] if not v_min <= v_type(arg) <= v_max: reasons.add('%r datatype must be a number in the range %s to %s' % ( datatype, v_min, v_max)) elif datatype in {'r8', 'number', 'float', 'fixed.14.4'}: v = Decimal(arg) if v < 0: assert Decimal('-1.79769313486232E308') <= v <= Decimal('4.94065645841247E-324') else: assert Decimal('4.94065645841247E-324') <= v <= Decimal('1.79769313486232E308') elif datatype == 'char': v = arg.decode('utf8') if six.PY2 or isinstance(arg, bytes) else arg assert len(v) == 1 elif datatype == 'string': v = arg.decode("utf8") if six.PY2 or isinstance(arg, bytes) else arg if argdef['allowed_values'] and v not in argdef['allowed_values']: reasons.add('Value %r not in allowed values list' % arg) elif datatype == 'date': v = parse_date(arg) if any((v.hour, v.minute, v.second)): reasons.add("'date' datatype must not contain a time") elif datatype in ('dateTime', 'dateTime.tz'): v = parse_date(arg) if datatype == 'dateTime' and v.tzinfo is not None: reasons.add("'dateTime' datatype must not contain a timezone") elif datatype in ('time', 'time.tz'): now = datetime.datetime.utcnow() v = parse_date(arg, default=now) if v.tzinfo is not None: now += v.utcoffset() if not all(( v.day == now.day, v.month == now.month, v.year == now.year)): reasons.add('%r datatype must not contain a date' % datatype) if datatype == 'time' and v.tzinfo is not None: reasons.add('%r datatype must not have timezone information' % datatype) elif datatype == 'boolean': valid = {'true', 'yes', '1', 'false', 'no', '0'} if arg.lower() not in valid: reasons.add('%r datatype must be one of %s' % (datatype, ','.join(valid))) elif datatype == 'bin.base64': b64decode(arg) elif datatype == 'bin.hex': unhexlify(arg) elif datatype == 'uri': urlparse(arg) elif datatype == 'uuid': if not re.match( r'^[0-9a-f]{8}\-[0-9a-f]{4}\-[0-9a-f]{4}\-[0-9a-f]{4}\-[0-9a-f]{12}$', arg, re.I): reasons.add('%r datatype must contain a valid UUID') else: reasons.add("%r datatype is unrecognised." % datatype) except ValueError as exc: reasons.add(str(exc)) return not bool(len(reasons)), reasons
[ "def", "validate_arg", "(", "arg", ",", "argdef", ")", ":", "datatype", "=", "argdef", "[", "'datatype'", "]", "reasons", "=", "set", "(", ")", "ranges", "=", "{", "'ui1'", ":", "(", "int", ",", "0", ",", "255", ")", ",", "'ui2'", ":", "(", "int", ",", "0", ",", "65535", ")", ",", "'ui4'", ":", "(", "int", ",", "0", ",", "4294967295", ")", ",", "'i1'", ":", "(", "int", ",", "-", "128", ",", "127", ")", ",", "'i2'", ":", "(", "int", ",", "-", "32768", ",", "32767", ")", ",", "'i4'", ":", "(", "int", ",", "-", "2147483648", ",", "2147483647", ")", ",", "'r4'", ":", "(", "Decimal", ",", "Decimal", "(", "'3.40282347E+38'", ")", ",", "Decimal", "(", "'1.17549435E-38'", ")", ")", "}", "try", ":", "if", "datatype", "in", "set", "(", "ranges", ".", "keys", "(", ")", ")", ":", "v_type", ",", "v_min", ",", "v_max", "=", "ranges", "[", "datatype", "]", "if", "not", "v_min", "<=", "v_type", "(", "arg", ")", "<=", "v_max", ":", "reasons", ".", "add", "(", "'%r datatype must be a number in the range %s to %s'", "%", "(", "datatype", ",", "v_min", ",", "v_max", ")", ")", "elif", "datatype", "in", "{", "'r8'", ",", "'number'", ",", "'float'", ",", "'fixed.14.4'", "}", ":", "v", "=", "Decimal", "(", "arg", ")", "if", "v", "<", "0", ":", "assert", "Decimal", "(", "'-1.79769313486232E308'", ")", "<=", "v", "<=", "Decimal", "(", "'4.94065645841247E-324'", ")", "else", ":", "assert", "Decimal", "(", "'4.94065645841247E-324'", ")", "<=", "v", "<=", "Decimal", "(", "'1.79769313486232E308'", ")", "elif", "datatype", "==", "'char'", ":", "v", "=", "arg", ".", "decode", "(", "'utf8'", ")", "if", "six", ".", "PY2", "or", "isinstance", "(", "arg", ",", "bytes", ")", "else", "arg", "assert", "len", "(", "v", ")", "==", "1", "elif", "datatype", "==", "'string'", ":", "v", "=", "arg", ".", "decode", "(", "\"utf8\"", ")", "if", "six", ".", "PY2", "or", "isinstance", "(", "arg", ",", "bytes", ")", "else", "arg", "if", "argdef", "[", "'allowed_values'", "]", "and", "v", "not", "in", "argdef", "[", "'allowed_values'", "]", ":", "reasons", ".", "add", "(", "'Value %r not in allowed values list'", "%", "arg", ")", "elif", "datatype", "==", "'date'", ":", "v", "=", "parse_date", "(", "arg", ")", "if", "any", "(", "(", "v", ".", "hour", ",", "v", ".", "minute", ",", "v", ".", "second", ")", ")", ":", "reasons", ".", "add", "(", "\"'date' datatype must not contain a time\"", ")", "elif", "datatype", "in", "(", "'dateTime'", ",", "'dateTime.tz'", ")", ":", "v", "=", "parse_date", "(", "arg", ")", "if", "datatype", "==", "'dateTime'", "and", "v", ".", "tzinfo", "is", "not", "None", ":", "reasons", ".", "add", "(", "\"'dateTime' datatype must not contain a timezone\"", ")", "elif", "datatype", "in", "(", "'time'", ",", "'time.tz'", ")", ":", "now", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "v", "=", "parse_date", "(", "arg", ",", "default", "=", "now", ")", "if", "v", ".", "tzinfo", "is", "not", "None", ":", "now", "+=", "v", ".", "utcoffset", "(", ")", "if", "not", "all", "(", "(", "v", ".", "day", "==", "now", ".", "day", ",", "v", ".", "month", "==", "now", ".", "month", ",", "v", ".", "year", "==", "now", ".", "year", ")", ")", ":", "reasons", ".", "add", "(", "'%r datatype must not contain a date'", "%", "datatype", ")", "if", "datatype", "==", "'time'", "and", "v", ".", "tzinfo", "is", "not", "None", ":", "reasons", ".", "add", "(", "'%r datatype must not have timezone information'", "%", "datatype", ")", "elif", "datatype", "==", "'boolean'", ":", "valid", "=", "{", "'true'", ",", "'yes'", ",", "'1'", ",", "'false'", ",", "'no'", ",", "'0'", "}", "if", "arg", ".", "lower", "(", ")", "not", "in", "valid", ":", "reasons", ".", "add", "(", "'%r datatype must be one of %s'", "%", "(", "datatype", ",", "','", ".", "join", "(", "valid", ")", ")", ")", "elif", "datatype", "==", "'bin.base64'", ":", "b64decode", "(", "arg", ")", "elif", "datatype", "==", "'bin.hex'", ":", "unhexlify", "(", "arg", ")", "elif", "datatype", "==", "'uri'", ":", "urlparse", "(", "arg", ")", "elif", "datatype", "==", "'uuid'", ":", "if", "not", "re", ".", "match", "(", "r'^[0-9a-f]{8}\\-[0-9a-f]{4}\\-[0-9a-f]{4}\\-[0-9a-f]{4}\\-[0-9a-f]{12}$'", ",", "arg", ",", "re", ".", "I", ")", ":", "reasons", ".", "add", "(", "'%r datatype must contain a valid UUID'", ")", "else", ":", "reasons", ".", "add", "(", "\"%r datatype is unrecognised.\"", "%", "datatype", ")", "except", "ValueError", "as", "exc", ":", "reasons", ".", "add", "(", "str", "(", "exc", ")", ")", "return", "not", "bool", "(", "len", "(", "reasons", ")", ")", ",", "reasons" ]
Validate an incoming (unicode) string argument according the UPnP spec. Raises UPNPError.
[ "Validate", "an", "incoming", "(", "unicode", ")", "string", "argument", "according", "the", "UPnP", "spec", ".", "Raises", "UPNPError", "." ]
python
train
koordinates/python-client
koordinates/layers.py
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/layers.py#L271-L284
def publish(self, version_id=None): """ Creates a publish task just for this version, which publishes as soon as any import is complete. :return: the publish task :rtype: Publish :raises Conflict: If the version is already published, or already has a publish job. """ if not version_id: version_id = self.version.id target_url = self._client.get_url('VERSION', 'POST', 'publish', {'layer_id': self.id, 'version_id': version_id}) r = self._client.request('POST', target_url, json={}) return self._client.get_manager(Publish).create_from_result(r.json())
[ "def", "publish", "(", "self", ",", "version_id", "=", "None", ")", ":", "if", "not", "version_id", ":", "version_id", "=", "self", ".", "version", ".", "id", "target_url", "=", "self", ".", "_client", ".", "get_url", "(", "'VERSION'", ",", "'POST'", ",", "'publish'", ",", "{", "'layer_id'", ":", "self", ".", "id", ",", "'version_id'", ":", "version_id", "}", ")", "r", "=", "self", ".", "_client", ".", "request", "(", "'POST'", ",", "target_url", ",", "json", "=", "{", "}", ")", "return", "self", ".", "_client", ".", "get_manager", "(", "Publish", ")", ".", "create_from_result", "(", "r", ".", "json", "(", ")", ")" ]
Creates a publish task just for this version, which publishes as soon as any import is complete. :return: the publish task :rtype: Publish :raises Conflict: If the version is already published, or already has a publish job.
[ "Creates", "a", "publish", "task", "just", "for", "this", "version", "which", "publishes", "as", "soon", "as", "any", "import", "is", "complete", "." ]
python
train
Enteee/pdml2flow
pdml2flow/autovivification.py
https://github.com/Enteee/pdml2flow/blob/bc9efe379b0b2406bfbbbd8e0f678b1f63805c66/pdml2flow/autovivification.py#L57-L67
def cast_dicts(self, to=DEFAULT, d=DEFAULT): """Returns a copy of d with all dicts casted to the type 'to'.""" if to is DEFAULT: to = type(self) if d is DEFAULT: d = self if isinstance(d, list): return [v for v in (self.cast_dicts(to, v) for v in d)] elif isinstance(d, dict): return to({k: v for k, v in ((k, self.cast_dicts(to, v)) for k, v in d.items())}) return d
[ "def", "cast_dicts", "(", "self", ",", "to", "=", "DEFAULT", ",", "d", "=", "DEFAULT", ")", ":", "if", "to", "is", "DEFAULT", ":", "to", "=", "type", "(", "self", ")", "if", "d", "is", "DEFAULT", ":", "d", "=", "self", "if", "isinstance", "(", "d", ",", "list", ")", ":", "return", "[", "v", "for", "v", "in", "(", "self", ".", "cast_dicts", "(", "to", ",", "v", ")", "for", "v", "in", "d", ")", "]", "elif", "isinstance", "(", "d", ",", "dict", ")", ":", "return", "to", "(", "{", "k", ":", "v", "for", "k", ",", "v", "in", "(", "(", "k", ",", "self", ".", "cast_dicts", "(", "to", ",", "v", ")", ")", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", ")", "}", ")", "return", "d" ]
Returns a copy of d with all dicts casted to the type 'to'.
[ "Returns", "a", "copy", "of", "d", "with", "all", "dicts", "casted", "to", "the", "type", "to", "." ]
python
train
HDI-Project/MLPrimitives
mlprimitives/custom/timeseries_preprocessing.py
https://github.com/HDI-Project/MLPrimitives/blob/bf415f9f751724ff545a1156ddfd7524e320f469/mlprimitives/custom/timeseries_preprocessing.py#L7-L23
def rolling_window_sequences(X, index, window_size, target_size, target_column): """Create rolling window sequences out of timeseries data.""" out_X = list() out_y = list() X_index = list() y_index = list() target = X[:, target_column] for start in range(len(X) - window_size - target_size + 1): end = start + window_size out_X.append(X[start:end]) out_y.append(target[end:end + target_size]) X_index.append(index[start]) y_index.append(index[end]) return np.asarray(out_X), np.asarray(out_y), np.asarray(X_index), np.asarray(y_index)
[ "def", "rolling_window_sequences", "(", "X", ",", "index", ",", "window_size", ",", "target_size", ",", "target_column", ")", ":", "out_X", "=", "list", "(", ")", "out_y", "=", "list", "(", ")", "X_index", "=", "list", "(", ")", "y_index", "=", "list", "(", ")", "target", "=", "X", "[", ":", ",", "target_column", "]", "for", "start", "in", "range", "(", "len", "(", "X", ")", "-", "window_size", "-", "target_size", "+", "1", ")", ":", "end", "=", "start", "+", "window_size", "out_X", ".", "append", "(", "X", "[", "start", ":", "end", "]", ")", "out_y", ".", "append", "(", "target", "[", "end", ":", "end", "+", "target_size", "]", ")", "X_index", ".", "append", "(", "index", "[", "start", "]", ")", "y_index", ".", "append", "(", "index", "[", "end", "]", ")", "return", "np", ".", "asarray", "(", "out_X", ")", ",", "np", ".", "asarray", "(", "out_y", ")", ",", "np", ".", "asarray", "(", "X_index", ")", ",", "np", ".", "asarray", "(", "y_index", ")" ]
Create rolling window sequences out of timeseries data.
[ "Create", "rolling", "window", "sequences", "out", "of", "timeseries", "data", "." ]
python
train
pgmpy/pgmpy
pgmpy/base/DAG.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/base/DAG.py#L127-L170
def add_nodes_from(self, nodes, weights=None): """ Add multiple nodes to the Graph. **The behviour of adding weights is different than in networkx. Parameters ---------- nodes: iterable container A container of nodes (list, dict, set, or any hashable python object). weights: list, tuple (default=None) A container of weights (int, float). The weight value at index i is associated with the variable at index i. Examples -------- >>> from pgmpy.base import DAG >>> G = DAG() >>> G.add_nodes_from(nodes=['A', 'B', 'C']) >>> sorted(G.nodes()) ['A', 'B', 'C'] Adding nodes with weights: >>> G.add_nodes_from(nodes=['D', 'E'], weights=[0.3, 0.6]) >>> G.node['D'] {'weight': 0.3} >>> G.node['E'] {'weight': 0.6} >>> G.node['A'] {'weight': None} """ nodes = list(nodes) if weights: if len(nodes) != len(weights): raise ValueError("The number of elements in nodes and weights" "should be equal.") for index in range(len(nodes)): self.add_node(node=nodes[index], weight=weights[index]) else: for node in nodes: self.add_node(node=node)
[ "def", "add_nodes_from", "(", "self", ",", "nodes", ",", "weights", "=", "None", ")", ":", "nodes", "=", "list", "(", "nodes", ")", "if", "weights", ":", "if", "len", "(", "nodes", ")", "!=", "len", "(", "weights", ")", ":", "raise", "ValueError", "(", "\"The number of elements in nodes and weights\"", "\"should be equal.\"", ")", "for", "index", "in", "range", "(", "len", "(", "nodes", ")", ")", ":", "self", ".", "add_node", "(", "node", "=", "nodes", "[", "index", "]", ",", "weight", "=", "weights", "[", "index", "]", ")", "else", ":", "for", "node", "in", "nodes", ":", "self", ".", "add_node", "(", "node", "=", "node", ")" ]
Add multiple nodes to the Graph. **The behviour of adding weights is different than in networkx. Parameters ---------- nodes: iterable container A container of nodes (list, dict, set, or any hashable python object). weights: list, tuple (default=None) A container of weights (int, float). The weight value at index i is associated with the variable at index i. Examples -------- >>> from pgmpy.base import DAG >>> G = DAG() >>> G.add_nodes_from(nodes=['A', 'B', 'C']) >>> sorted(G.nodes()) ['A', 'B', 'C'] Adding nodes with weights: >>> G.add_nodes_from(nodes=['D', 'E'], weights=[0.3, 0.6]) >>> G.node['D'] {'weight': 0.3} >>> G.node['E'] {'weight': 0.6} >>> G.node['A'] {'weight': None}
[ "Add", "multiple", "nodes", "to", "the", "Graph", "." ]
python
train
tommyod/streprogen
streprogen/program.py
https://github.com/tommyod/streprogen/blob/21b903618e8b2d398bceb394d18d7c74ca984def/streprogen/program.py#L415-L439
def _initialize_render_dictionary(self): """Initialize a dictionary for rendered values. Examples ------- >>> program = Program('My training program') >>> program._initialize_render_dictionary() >>> program._rendered is False False """ self._rendered = dict() # Iterate over all weeks for week in range(1, self.duration + 1): self._rendered[week] = dict() # Iterate over all days for day in self.days: self._rendered[week][day] = dict() # Iterate over all main exercises for dynamic_ex in day.dynamic_exercises: self._rendered[week][day][dynamic_ex] = dict()
[ "def", "_initialize_render_dictionary", "(", "self", ")", ":", "self", ".", "_rendered", "=", "dict", "(", ")", "# Iterate over all weeks", "for", "week", "in", "range", "(", "1", ",", "self", ".", "duration", "+", "1", ")", ":", "self", ".", "_rendered", "[", "week", "]", "=", "dict", "(", ")", "# Iterate over all days", "for", "day", "in", "self", ".", "days", ":", "self", ".", "_rendered", "[", "week", "]", "[", "day", "]", "=", "dict", "(", ")", "# Iterate over all main exercises", "for", "dynamic_ex", "in", "day", ".", "dynamic_exercises", ":", "self", ".", "_rendered", "[", "week", "]", "[", "day", "]", "[", "dynamic_ex", "]", "=", "dict", "(", ")" ]
Initialize a dictionary for rendered values. Examples ------- >>> program = Program('My training program') >>> program._initialize_render_dictionary() >>> program._rendered is False False
[ "Initialize", "a", "dictionary", "for", "rendered", "values", ".", "Examples", "-------", ">>>", "program", "=", "Program", "(", "My", "training", "program", ")", ">>>", "program", ".", "_initialize_render_dictionary", "()", ">>>", "program", ".", "_rendered", "is", "False", "False" ]
python
train
materialsproject/pymatgen
pymatgen/electronic_structure/dos.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/electronic_structure/dos.py#L635-L658
def get_site_t2g_eg_resolved_dos(self, site): """ Get the t2g, eg projected DOS for a particular site. Args: site: Site in Structure associated with CompleteDos. Returns: A dict {"e_g": Dos, "t2g": Dos} containing summed e_g and t2g DOS for the site. """ t2g_dos = [] eg_dos = [] for s, atom_dos in self.pdos.items(): if s == site: for orb, pdos in atom_dos.items(): if orb in (Orbital.dxy, Orbital.dxz, Orbital.dyz): t2g_dos.append(pdos) elif orb in (Orbital.dx2, Orbital.dz2): eg_dos.append(pdos) return {"t2g": Dos(self.efermi, self.energies, functools.reduce(add_densities, t2g_dos)), "e_g": Dos(self.efermi, self.energies, functools.reduce(add_densities, eg_dos))}
[ "def", "get_site_t2g_eg_resolved_dos", "(", "self", ",", "site", ")", ":", "t2g_dos", "=", "[", "]", "eg_dos", "=", "[", "]", "for", "s", ",", "atom_dos", "in", "self", ".", "pdos", ".", "items", "(", ")", ":", "if", "s", "==", "site", ":", "for", "orb", ",", "pdos", "in", "atom_dos", ".", "items", "(", ")", ":", "if", "orb", "in", "(", "Orbital", ".", "dxy", ",", "Orbital", ".", "dxz", ",", "Orbital", ".", "dyz", ")", ":", "t2g_dos", ".", "append", "(", "pdos", ")", "elif", "orb", "in", "(", "Orbital", ".", "dx2", ",", "Orbital", ".", "dz2", ")", ":", "eg_dos", ".", "append", "(", "pdos", ")", "return", "{", "\"t2g\"", ":", "Dos", "(", "self", ".", "efermi", ",", "self", ".", "energies", ",", "functools", ".", "reduce", "(", "add_densities", ",", "t2g_dos", ")", ")", ",", "\"e_g\"", ":", "Dos", "(", "self", ".", "efermi", ",", "self", ".", "energies", ",", "functools", ".", "reduce", "(", "add_densities", ",", "eg_dos", ")", ")", "}" ]
Get the t2g, eg projected DOS for a particular site. Args: site: Site in Structure associated with CompleteDos. Returns: A dict {"e_g": Dos, "t2g": Dos} containing summed e_g and t2g DOS for the site.
[ "Get", "the", "t2g", "eg", "projected", "DOS", "for", "a", "particular", "site", "." ]
python
train
google/grumpy
third_party/stdlib/difflib.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/difflib.py#L764-L810
def get_close_matches(word, possibilities, n=3, cutoff=0.6): """Use SequenceMatcher to return list of the best "good enough" matches. word is a sequence for which close matches are desired (typically a string). possibilities is a list of sequences against which to match word (typically a list of strings). Optional arg n (default 3) is the maximum number of close matches to return. n must be > 0. Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities that don't score at least that similar to word are ignored. The best (no more than n) matches among the possibilities are returned in a list, sorted by similarity score, most similar first. >>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"]) ['apple', 'ape'] >>> import keyword as _keyword >>> get_close_matches("wheel", _keyword.kwlist) ['while'] >>> get_close_matches("apple", _keyword.kwlist) [] >>> get_close_matches("accept", _keyword.kwlist) ['except'] """ if not n > 0: raise ValueError("n must be > 0: %r" % (n,)) if not 0.0 <= cutoff <= 1.0: raise ValueError("cutoff must be in [0.0, 1.0]: %r" % (cutoff,)) result = [] s = SequenceMatcher() s.set_seq2(word) for x in possibilities: s.set_seq1(x) if s.real_quick_ratio() >= cutoff and \ s.quick_ratio() >= cutoff and \ s.ratio() >= cutoff: result.append((s.ratio(), x)) # Move the best scorers to head of list result = heapq.nlargest(n, result) # Strip scores for the best n matches return [x for score, x in result]
[ "def", "get_close_matches", "(", "word", ",", "possibilities", ",", "n", "=", "3", ",", "cutoff", "=", "0.6", ")", ":", "if", "not", "n", ">", "0", ":", "raise", "ValueError", "(", "\"n must be > 0: %r\"", "%", "(", "n", ",", ")", ")", "if", "not", "0.0", "<=", "cutoff", "<=", "1.0", ":", "raise", "ValueError", "(", "\"cutoff must be in [0.0, 1.0]: %r\"", "%", "(", "cutoff", ",", ")", ")", "result", "=", "[", "]", "s", "=", "SequenceMatcher", "(", ")", "s", ".", "set_seq2", "(", "word", ")", "for", "x", "in", "possibilities", ":", "s", ".", "set_seq1", "(", "x", ")", "if", "s", ".", "real_quick_ratio", "(", ")", ">=", "cutoff", "and", "s", ".", "quick_ratio", "(", ")", ">=", "cutoff", "and", "s", ".", "ratio", "(", ")", ">=", "cutoff", ":", "result", ".", "append", "(", "(", "s", ".", "ratio", "(", ")", ",", "x", ")", ")", "# Move the best scorers to head of list", "result", "=", "heapq", ".", "nlargest", "(", "n", ",", "result", ")", "# Strip scores for the best n matches", "return", "[", "x", "for", "score", ",", "x", "in", "result", "]" ]
Use SequenceMatcher to return list of the best "good enough" matches. word is a sequence for which close matches are desired (typically a string). possibilities is a list of sequences against which to match word (typically a list of strings). Optional arg n (default 3) is the maximum number of close matches to return. n must be > 0. Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities that don't score at least that similar to word are ignored. The best (no more than n) matches among the possibilities are returned in a list, sorted by similarity score, most similar first. >>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"]) ['apple', 'ape'] >>> import keyword as _keyword >>> get_close_matches("wheel", _keyword.kwlist) ['while'] >>> get_close_matches("apple", _keyword.kwlist) [] >>> get_close_matches("accept", _keyword.kwlist) ['except']
[ "Use", "SequenceMatcher", "to", "return", "list", "of", "the", "best", "good", "enough", "matches", "." ]
python
valid
jpmml/sklearn2pmml
sklearn2pmml/__init__.py
https://github.com/jpmml/sklearn2pmml/blob/0a455a54323989473c16f9efc9a77cef3ff64c1e/sklearn2pmml/__init__.py#L181-L258
def sklearn2pmml(pipeline, pmml, user_classpath = [], with_repr = False, debug = False, java_encoding = "UTF-8"): """Converts a fitted Scikit-Learn pipeline to PMML. Parameters: ---------- pipeline: PMMLPipeline The pipeline. pmml: string The path to where the PMML document should be stored. user_classpath: list of strings, optional The paths to JAR files that provide custom Transformer, Selector and/or Estimator converter classes. The JPMML-SkLearn classpath is constructed by appending user JAR files to package JAR files. with_repr: boolean, optional If true, insert the string representation of pipeline into the PMML document. debug: boolean, optional If true, print information about the conversion process. java_encoding: string, optional The character encoding to use for decoding Java output and error byte streams. """ if debug: java_version = _java_version(java_encoding) if java_version is None: java_version = ("java", "N/A") print("python: {0}".format(platform.python_version())) print("sklearn: {0}".format(sklearn.__version__)) print("sklearn.externals.joblib: {0}".format(joblib.__version__)) print("pandas: {0}".format(pandas.__version__)) print("sklearn_pandas: {0}".format(sklearn_pandas.__version__)) print("sklearn2pmml: {0}".format(__version__)) print("{0}: {1}".format(java_version[0], java_version[1])) if not isinstance(pipeline, PMMLPipeline): raise TypeError("The pipeline object is not an instance of " + PMMLPipeline.__name__ + ". Use the 'sklearn2pmml.make_pmml_pipeline(obj)' utility function to translate a regular Scikit-Learn estimator or pipeline to a PMML pipeline") estimator = pipeline._final_estimator cmd = ["java", "-cp", os.pathsep.join(_classpath(user_classpath)), "org.jpmml.sklearn.Main"] dumps = [] try: if with_repr: pipeline.repr_ = repr(pipeline) # if isinstance(estimator, H2OEstimator): if hasattr(estimator, "download_mojo"): estimator_mojo = estimator.download_mojo() dumps.append(estimator_mojo) estimator._mojo_path = estimator_mojo pipeline_pkl = _dump(pipeline, "pipeline") cmd.extend(["--pkl-pipeline-input", pipeline_pkl]) dumps.append(pipeline_pkl) cmd.extend(["--pmml-output", pmml]) if debug: print("Executing command:\n{0}".format(" ".join(cmd))) try: process = Popen(cmd, stdout = PIPE, stderr = PIPE, bufsize = 1) except OSError: raise RuntimeError("Java is not installed, or the Java executable is not on system path") output, error = process.communicate() retcode = process.poll() if debug or retcode: if(len(output) > 0): print("Standard output:\n{0}".format(_decode(output, java_encoding))) else: print("Standard output is empty") if(len(error) > 0): print("Standard error:\n{0}".format(_decode(error, java_encoding))) else: print("Standard error is empty") if retcode: raise RuntimeError("The JPMML-SkLearn conversion application has failed. The Java executable should have printed more information about the failure into its standard output and/or standard error streams") finally: if debug: print("Preserved joblib dump file(s): {0}".format(" ".join(dumps))) else: for dump in dumps: os.remove(dump)
[ "def", "sklearn2pmml", "(", "pipeline", ",", "pmml", ",", "user_classpath", "=", "[", "]", ",", "with_repr", "=", "False", ",", "debug", "=", "False", ",", "java_encoding", "=", "\"UTF-8\"", ")", ":", "if", "debug", ":", "java_version", "=", "_java_version", "(", "java_encoding", ")", "if", "java_version", "is", "None", ":", "java_version", "=", "(", "\"java\"", ",", "\"N/A\"", ")", "print", "(", "\"python: {0}\"", ".", "format", "(", "platform", ".", "python_version", "(", ")", ")", ")", "print", "(", "\"sklearn: {0}\"", ".", "format", "(", "sklearn", ".", "__version__", ")", ")", "print", "(", "\"sklearn.externals.joblib: {0}\"", ".", "format", "(", "joblib", ".", "__version__", ")", ")", "print", "(", "\"pandas: {0}\"", ".", "format", "(", "pandas", ".", "__version__", ")", ")", "print", "(", "\"sklearn_pandas: {0}\"", ".", "format", "(", "sklearn_pandas", ".", "__version__", ")", ")", "print", "(", "\"sklearn2pmml: {0}\"", ".", "format", "(", "__version__", ")", ")", "print", "(", "\"{0}: {1}\"", ".", "format", "(", "java_version", "[", "0", "]", ",", "java_version", "[", "1", "]", ")", ")", "if", "not", "isinstance", "(", "pipeline", ",", "PMMLPipeline", ")", ":", "raise", "TypeError", "(", "\"The pipeline object is not an instance of \"", "+", "PMMLPipeline", ".", "__name__", "+", "\". Use the 'sklearn2pmml.make_pmml_pipeline(obj)' utility function to translate a regular Scikit-Learn estimator or pipeline to a PMML pipeline\"", ")", "estimator", "=", "pipeline", ".", "_final_estimator", "cmd", "=", "[", "\"java\"", ",", "\"-cp\"", ",", "os", ".", "pathsep", ".", "join", "(", "_classpath", "(", "user_classpath", ")", ")", ",", "\"org.jpmml.sklearn.Main\"", "]", "dumps", "=", "[", "]", "try", ":", "if", "with_repr", ":", "pipeline", ".", "repr_", "=", "repr", "(", "pipeline", ")", "# if isinstance(estimator, H2OEstimator):", "if", "hasattr", "(", "estimator", ",", "\"download_mojo\"", ")", ":", "estimator_mojo", "=", "estimator", ".", "download_mojo", "(", ")", "dumps", ".", "append", "(", "estimator_mojo", ")", "estimator", ".", "_mojo_path", "=", "estimator_mojo", "pipeline_pkl", "=", "_dump", "(", "pipeline", ",", "\"pipeline\"", ")", "cmd", ".", "extend", "(", "[", "\"--pkl-pipeline-input\"", ",", "pipeline_pkl", "]", ")", "dumps", ".", "append", "(", "pipeline_pkl", ")", "cmd", ".", "extend", "(", "[", "\"--pmml-output\"", ",", "pmml", "]", ")", "if", "debug", ":", "print", "(", "\"Executing command:\\n{0}\"", ".", "format", "(", "\" \"", ".", "join", "(", "cmd", ")", ")", ")", "try", ":", "process", "=", "Popen", "(", "cmd", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ",", "bufsize", "=", "1", ")", "except", "OSError", ":", "raise", "RuntimeError", "(", "\"Java is not installed, or the Java executable is not on system path\"", ")", "output", ",", "error", "=", "process", ".", "communicate", "(", ")", "retcode", "=", "process", ".", "poll", "(", ")", "if", "debug", "or", "retcode", ":", "if", "(", "len", "(", "output", ")", ">", "0", ")", ":", "print", "(", "\"Standard output:\\n{0}\"", ".", "format", "(", "_decode", "(", "output", ",", "java_encoding", ")", ")", ")", "else", ":", "print", "(", "\"Standard output is empty\"", ")", "if", "(", "len", "(", "error", ")", ">", "0", ")", ":", "print", "(", "\"Standard error:\\n{0}\"", ".", "format", "(", "_decode", "(", "error", ",", "java_encoding", ")", ")", ")", "else", ":", "print", "(", "\"Standard error is empty\"", ")", "if", "retcode", ":", "raise", "RuntimeError", "(", "\"The JPMML-SkLearn conversion application has failed. The Java executable should have printed more information about the failure into its standard output and/or standard error streams\"", ")", "finally", ":", "if", "debug", ":", "print", "(", "\"Preserved joblib dump file(s): {0}\"", ".", "format", "(", "\" \"", ".", "join", "(", "dumps", ")", ")", ")", "else", ":", "for", "dump", "in", "dumps", ":", "os", ".", "remove", "(", "dump", ")" ]
Converts a fitted Scikit-Learn pipeline to PMML. Parameters: ---------- pipeline: PMMLPipeline The pipeline. pmml: string The path to where the PMML document should be stored. user_classpath: list of strings, optional The paths to JAR files that provide custom Transformer, Selector and/or Estimator converter classes. The JPMML-SkLearn classpath is constructed by appending user JAR files to package JAR files. with_repr: boolean, optional If true, insert the string representation of pipeline into the PMML document. debug: boolean, optional If true, print information about the conversion process. java_encoding: string, optional The character encoding to use for decoding Java output and error byte streams.
[ "Converts", "a", "fitted", "Scikit", "-", "Learn", "pipeline", "to", "PMML", "." ]
python
valid
Robpol86/libnl
libnl/msg.py
https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/msg.py#L86-L99
def nlmsg_attrdata(nlh, hdrlen): """Head of attributes data. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L143 Positional arguments: nlh -- Netlink message header (nlmsghdr class instance). hdrlen -- length of family specific header (integer). Returns: First attribute (nlattr class instance with others in its payload). """ data = nlmsg_data(nlh) return libnl.linux_private.netlink.nlattr(bytearray_ptr(data, libnl.linux_private.netlink.NLMSG_ALIGN(hdrlen)))
[ "def", "nlmsg_attrdata", "(", "nlh", ",", "hdrlen", ")", ":", "data", "=", "nlmsg_data", "(", "nlh", ")", "return", "libnl", ".", "linux_private", ".", "netlink", ".", "nlattr", "(", "bytearray_ptr", "(", "data", ",", "libnl", ".", "linux_private", ".", "netlink", ".", "NLMSG_ALIGN", "(", "hdrlen", ")", ")", ")" ]
Head of attributes data. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L143 Positional arguments: nlh -- Netlink message header (nlmsghdr class instance). hdrlen -- length of family specific header (integer). Returns: First attribute (nlattr class instance with others in its payload).
[ "Head", "of", "attributes", "data", "." ]
python
train
zsimic/runez
src/runez/logsetup.py
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/logsetup.py#L513-L522
def _get_formatter(fmt): """ Args: fmt (str | unicode): Format specification Returns: (logging.Formatter): Associated logging formatter """ fmt = _replace_and_pad(fmt, "%(timezone)s", LogManager.spec.timezone) return logging.Formatter(fmt)
[ "def", "_get_formatter", "(", "fmt", ")", ":", "fmt", "=", "_replace_and_pad", "(", "fmt", ",", "\"%(timezone)s\"", ",", "LogManager", ".", "spec", ".", "timezone", ")", "return", "logging", ".", "Formatter", "(", "fmt", ")" ]
Args: fmt (str | unicode): Format specification Returns: (logging.Formatter): Associated logging formatter
[ "Args", ":", "fmt", "(", "str", "|", "unicode", ")", ":", "Format", "specification" ]
python
train
ultrabug/py3status
py3status/formatter.py
https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/formatter.py#L483-L497
def update_commands(self, commands_str): """ update with commands from the block """ commands = dict(parse_qsl(commands_str, keep_blank_values=True)) _if = commands.get("if", self._if) if _if: self._if = Condition(_if) self._set_int(commands, "max_length") self._set_int(commands, "min_length") self.color = self._check_color(commands.get("color")) self.not_zero = "not_zero" in commands or self.not_zero self.show = "show" in commands or self.show self.soft = "soft" in commands or self.soft
[ "def", "update_commands", "(", "self", ",", "commands_str", ")", ":", "commands", "=", "dict", "(", "parse_qsl", "(", "commands_str", ",", "keep_blank_values", "=", "True", ")", ")", "_if", "=", "commands", ".", "get", "(", "\"if\"", ",", "self", ".", "_if", ")", "if", "_if", ":", "self", ".", "_if", "=", "Condition", "(", "_if", ")", "self", ".", "_set_int", "(", "commands", ",", "\"max_length\"", ")", "self", ".", "_set_int", "(", "commands", ",", "\"min_length\"", ")", "self", ".", "color", "=", "self", ".", "_check_color", "(", "commands", ".", "get", "(", "\"color\"", ")", ")", "self", ".", "not_zero", "=", "\"not_zero\"", "in", "commands", "or", "self", ".", "not_zero", "self", ".", "show", "=", "\"show\"", "in", "commands", "or", "self", ".", "show", "self", ".", "soft", "=", "\"soft\"", "in", "commands", "or", "self", ".", "soft" ]
update with commands from the block
[ "update", "with", "commands", "from", "the", "block" ]
python
train
bitesofcode/projexui
projexui/completers/xquerycompleter.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/completers/xquerycompleter.py#L49-L65
def pathFromIndex( self, index ): """ Returns the joined path from the given model index. This will join together the full path with periods. :param index | <QModelIndex> :return <str> """ item = self._model.itemFromIndex(index) out = [] while ( item ): out.append(nativestring(item.text())) item = item.parent() return '.'.join(reversed(out))
[ "def", "pathFromIndex", "(", "self", ",", "index", ")", ":", "item", "=", "self", ".", "_model", ".", "itemFromIndex", "(", "index", ")", "out", "=", "[", "]", "while", "(", "item", ")", ":", "out", ".", "append", "(", "nativestring", "(", "item", ".", "text", "(", ")", ")", ")", "item", "=", "item", ".", "parent", "(", ")", "return", "'.'", ".", "join", "(", "reversed", "(", "out", ")", ")" ]
Returns the joined path from the given model index. This will join together the full path with periods. :param index | <QModelIndex> :return <str>
[ "Returns", "the", "joined", "path", "from", "the", "given", "model", "index", ".", "This", "will", "join", "together", "the", "full", "path", "with", "periods", ".", ":", "param", "index", "|", "<QModelIndex", ">", ":", "return", "<str", ">" ]
python
train
aiidateam/aiida-codtools
aiida_codtools/workflows/cif_clean.py
https://github.com/aiidateam/aiida-codtools/blob/da5e4259b7a2e86cf0cc3f997e11dd36d445fa94/aiida_codtools/workflows/cif_clean.py#L103-L110
def inspect_filter_calculation(self): """Inspect the result of the CifFilterCalculation, verifying that it produced a CifData output node.""" try: node = self.ctx.cif_filter self.ctx.cif = node.outputs.cif except exceptions.NotExistent: self.report('aborting: CifFilterCalculation<{}> did not return the required cif output'.format(node.uuid)) return self.exit_codes.ERROR_CIF_FILTER_FAILED
[ "def", "inspect_filter_calculation", "(", "self", ")", ":", "try", ":", "node", "=", "self", ".", "ctx", ".", "cif_filter", "self", ".", "ctx", ".", "cif", "=", "node", ".", "outputs", ".", "cif", "except", "exceptions", ".", "NotExistent", ":", "self", ".", "report", "(", "'aborting: CifFilterCalculation<{}> did not return the required cif output'", ".", "format", "(", "node", ".", "uuid", ")", ")", "return", "self", ".", "exit_codes", ".", "ERROR_CIF_FILTER_FAILED" ]
Inspect the result of the CifFilterCalculation, verifying that it produced a CifData output node.
[ "Inspect", "the", "result", "of", "the", "CifFilterCalculation", "verifying", "that", "it", "produced", "a", "CifData", "output", "node", "." ]
python
train
jobovy/galpy
galpy/orbit/Orbit.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/orbit/Orbit.py#L2453-L2489
def pmra(self,*args,**kwargs): """ NAME: pmra PURPOSE: return proper motion in right ascension (in mas/yr) INPUT: t - (optional) time at which to get pmra (can be Quantity) obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer in the Galactocentric frame (in kpc and km/s) (default=[8.0,0.,0.,0.,220.,0.]; entries can be Quantities) OR Orbit object that corresponds to the orbit of the observer Y is ignored and always assumed to be zero ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity) vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity) OUTPUT: pm_ra(t) in mas/yr HISTORY: 2011-02-24 - Written - Bovy (NYU) """ out= self._orb.pmra(*args,**kwargs) if len(out) == 1: return out[0] else: return out
[ "def", "pmra", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "out", "=", "self", ".", "_orb", ".", "pmra", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "len", "(", "out", ")", "==", "1", ":", "return", "out", "[", "0", "]", "else", ":", "return", "out" ]
NAME: pmra PURPOSE: return proper motion in right ascension (in mas/yr) INPUT: t - (optional) time at which to get pmra (can be Quantity) obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer in the Galactocentric frame (in kpc and km/s) (default=[8.0,0.,0.,0.,220.,0.]; entries can be Quantities) OR Orbit object that corresponds to the orbit of the observer Y is ignored and always assumed to be zero ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity) vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity) OUTPUT: pm_ra(t) in mas/yr HISTORY: 2011-02-24 - Written - Bovy (NYU)
[ "NAME", ":" ]
python
train
seleniumbase/SeleniumBase
seleniumbase/fixtures/base_case.py
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/fixtures/base_case.py#L589-L596
def send_keys(self, selector, new_value, by=By.CSS_SELECTOR, timeout=settings.LARGE_TIMEOUT): """ Same as add_text() -> more reliable, but less name confusion. """ if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT: timeout = self.__get_new_timeout(timeout) if page_utils.is_xpath_selector(selector): by = By.XPATH self.add_text(selector, new_value, by=by, timeout=timeout)
[ "def", "send_keys", "(", "self", ",", "selector", ",", "new_value", ",", "by", "=", "By", ".", "CSS_SELECTOR", ",", "timeout", "=", "settings", ".", "LARGE_TIMEOUT", ")", ":", "if", "self", ".", "timeout_multiplier", "and", "timeout", "==", "settings", ".", "LARGE_TIMEOUT", ":", "timeout", "=", "self", ".", "__get_new_timeout", "(", "timeout", ")", "if", "page_utils", ".", "is_xpath_selector", "(", "selector", ")", ":", "by", "=", "By", ".", "XPATH", "self", ".", "add_text", "(", "selector", ",", "new_value", ",", "by", "=", "by", ",", "timeout", "=", "timeout", ")" ]
Same as add_text() -> more reliable, but less name confusion.
[ "Same", "as", "add_text", "()", "-", ">", "more", "reliable", "but", "less", "name", "confusion", "." ]
python
train
seleniumbase/SeleniumBase
seleniumbase/core/s3_manager.py
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/core/s3_manager.py#L36-L55
def upload_file(self, file_name, file_path): """ Upload a given file from the file_path to the bucket with the new name/path file_name. """ upload_key = Key(bucket=self.bucket, name=file_name) content_type = "text/plain" if file_name.endswith(".html"): content_type = "text/html" elif file_name.endswith(".jpg"): content_type = "image/jpeg" elif file_name.endswith(".png"): content_type = "image/png" upload_key.set_contents_from_filename( file_path, headers={"Content-Type": content_type}) upload_key.url = \ upload_key.generate_url(expires_in=3600).split("?")[0] try: upload_key.make_public() except Exception: pass
[ "def", "upload_file", "(", "self", ",", "file_name", ",", "file_path", ")", ":", "upload_key", "=", "Key", "(", "bucket", "=", "self", ".", "bucket", ",", "name", "=", "file_name", ")", "content_type", "=", "\"text/plain\"", "if", "file_name", ".", "endswith", "(", "\".html\"", ")", ":", "content_type", "=", "\"text/html\"", "elif", "file_name", ".", "endswith", "(", "\".jpg\"", ")", ":", "content_type", "=", "\"image/jpeg\"", "elif", "file_name", ".", "endswith", "(", "\".png\"", ")", ":", "content_type", "=", "\"image/png\"", "upload_key", ".", "set_contents_from_filename", "(", "file_path", ",", "headers", "=", "{", "\"Content-Type\"", ":", "content_type", "}", ")", "upload_key", ".", "url", "=", "upload_key", ".", "generate_url", "(", "expires_in", "=", "3600", ")", ".", "split", "(", "\"?\"", ")", "[", "0", "]", "try", ":", "upload_key", ".", "make_public", "(", ")", "except", "Exception", ":", "pass" ]
Upload a given file from the file_path to the bucket with the new name/path file_name.
[ "Upload", "a", "given", "file", "from", "the", "file_path", "to", "the", "bucket", "with", "the", "new", "name", "/", "path", "file_name", "." ]
python
train
ff0000/scarlet
scarlet/cms/widgets.py
https://github.com/ff0000/scarlet/blob/6c37befd810916a2d7ffff2cdb2dab57bcb6d12e/scarlet/cms/widgets.py#L640-L660
def update_links(self, request, admin_site=None): """ Called to update the widget's urls. Tries to find the bundle for the model that this foreign key points to and then asks it for the urls for adding and listing and sets them on this widget instance. The urls are only set if request.user has permissions on that url. :param request: The request for which this widget is being rendered. :param admin_site: If provided, the `admin_site` is used to lookup \ the bundle that is registered as the primary url for the model \ that this foreign key points to. """ if admin_site: bundle = admin_site.get_bundle_for_model(self.model.to) if bundle: self._api_link = self._get_bundle_link(bundle, self.view, request.user) self._add_link = self._get_bundle_link(bundle, self.add_view, request.user)
[ "def", "update_links", "(", "self", ",", "request", ",", "admin_site", "=", "None", ")", ":", "if", "admin_site", ":", "bundle", "=", "admin_site", ".", "get_bundle_for_model", "(", "self", ".", "model", ".", "to", ")", "if", "bundle", ":", "self", ".", "_api_link", "=", "self", ".", "_get_bundle_link", "(", "bundle", ",", "self", ".", "view", ",", "request", ".", "user", ")", "self", ".", "_add_link", "=", "self", ".", "_get_bundle_link", "(", "bundle", ",", "self", ".", "add_view", ",", "request", ".", "user", ")" ]
Called to update the widget's urls. Tries to find the bundle for the model that this foreign key points to and then asks it for the urls for adding and listing and sets them on this widget instance. The urls are only set if request.user has permissions on that url. :param request: The request for which this widget is being rendered. :param admin_site: If provided, the `admin_site` is used to lookup \ the bundle that is registered as the primary url for the model \ that this foreign key points to.
[ "Called", "to", "update", "the", "widget", "s", "urls", ".", "Tries", "to", "find", "the", "bundle", "for", "the", "model", "that", "this", "foreign", "key", "points", "to", "and", "then", "asks", "it", "for", "the", "urls", "for", "adding", "and", "listing", "and", "sets", "them", "on", "this", "widget", "instance", ".", "The", "urls", "are", "only", "set", "if", "request", ".", "user", "has", "permissions", "on", "that", "url", "." ]
python
train
fedora-infra/fedmsg
fedmsg/crypto/__init__.py
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/crypto/__init__.py#L166-L192
def init(**config): """ Initialize the crypto backend. The backend can be one of two plugins: - 'x509' - Uses x509 certificates. - 'gpg' - Uses GnuPG keys. """ global _implementation global _validate_implementations if config.get('crypto_backend') == 'gpg': _implementation = gpg else: _implementation = x509 _validate_implementations = [] for mod in config.get('crypto_validate_backends', []): if mod == 'gpg': _validate_implementations.append(gpg) elif mod == 'x509': _validate_implementations.append(x509) else: raise ValueError("%r is not a valid crypto backend" % mod) if not _validate_implementations: _validate_implementations.append(_implementation)
[ "def", "init", "(", "*", "*", "config", ")", ":", "global", "_implementation", "global", "_validate_implementations", "if", "config", ".", "get", "(", "'crypto_backend'", ")", "==", "'gpg'", ":", "_implementation", "=", "gpg", "else", ":", "_implementation", "=", "x509", "_validate_implementations", "=", "[", "]", "for", "mod", "in", "config", ".", "get", "(", "'crypto_validate_backends'", ",", "[", "]", ")", ":", "if", "mod", "==", "'gpg'", ":", "_validate_implementations", ".", "append", "(", "gpg", ")", "elif", "mod", "==", "'x509'", ":", "_validate_implementations", ".", "append", "(", "x509", ")", "else", ":", "raise", "ValueError", "(", "\"%r is not a valid crypto backend\"", "%", "mod", ")", "if", "not", "_validate_implementations", ":", "_validate_implementations", ".", "append", "(", "_implementation", ")" ]
Initialize the crypto backend. The backend can be one of two plugins: - 'x509' - Uses x509 certificates. - 'gpg' - Uses GnuPG keys.
[ "Initialize", "the", "crypto", "backend", "." ]
python
train
dagster-io/dagster
python_modules/dagster/dagster/core/execution.py
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/execution.py#L222-L239
def transformed_values(self): '''Return dictionary of transformed results, with keys being output names. Returns None if execution result isn't a success. Reconstructs the pipeline context to materialize values. ''' if self.success and self.transforms: with self.reconstruct_context() as context: values = { result.step_output_data.output_name: self._get_value( context, result.step_output_data ) for result in self.transforms if result.is_successful_output } return values else: return None
[ "def", "transformed_values", "(", "self", ")", ":", "if", "self", ".", "success", "and", "self", ".", "transforms", ":", "with", "self", ".", "reconstruct_context", "(", ")", "as", "context", ":", "values", "=", "{", "result", ".", "step_output_data", ".", "output_name", ":", "self", ".", "_get_value", "(", "context", ",", "result", ".", "step_output_data", ")", "for", "result", "in", "self", ".", "transforms", "if", "result", ".", "is_successful_output", "}", "return", "values", "else", ":", "return", "None" ]
Return dictionary of transformed results, with keys being output names. Returns None if execution result isn't a success. Reconstructs the pipeline context to materialize values.
[ "Return", "dictionary", "of", "transformed", "results", "with", "keys", "being", "output", "names", ".", "Returns", "None", "if", "execution", "result", "isn", "t", "a", "success", "." ]
python
test
ARMmbed/icetea
icetea_lib/tools/tools.py
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/tools/tools.py#L350-L367
def recursive_dictionary_get(keys, dictionary): """ Gets contents of requirement key recursively so users can search for specific keys inside nested requirement dicts. :param keys: key or dot separated string of keys to look for. :param dictionary: Dictionary to search from :return: results of search or None """ if "." in keys and len(keys) > 1: key = keys.split(".", 1) new_dict = dictionary.get(key[0]) # Make sure that the next level actually has a dict we can continue the search from. if not new_dict or not hasattr(new_dict, "get"): return None return recursive_dictionary_get(key[1], new_dict) else: return dictionary.get(keys) if (dictionary and hasattr(dictionary, "get")) else None
[ "def", "recursive_dictionary_get", "(", "keys", ",", "dictionary", ")", ":", "if", "\".\"", "in", "keys", "and", "len", "(", "keys", ")", ">", "1", ":", "key", "=", "keys", ".", "split", "(", "\".\"", ",", "1", ")", "new_dict", "=", "dictionary", ".", "get", "(", "key", "[", "0", "]", ")", "# Make sure that the next level actually has a dict we can continue the search from.", "if", "not", "new_dict", "or", "not", "hasattr", "(", "new_dict", ",", "\"get\"", ")", ":", "return", "None", "return", "recursive_dictionary_get", "(", "key", "[", "1", "]", ",", "new_dict", ")", "else", ":", "return", "dictionary", ".", "get", "(", "keys", ")", "if", "(", "dictionary", "and", "hasattr", "(", "dictionary", ",", "\"get\"", ")", ")", "else", "None" ]
Gets contents of requirement key recursively so users can search for specific keys inside nested requirement dicts. :param keys: key or dot separated string of keys to look for. :param dictionary: Dictionary to search from :return: results of search or None
[ "Gets", "contents", "of", "requirement", "key", "recursively", "so", "users", "can", "search", "for", "specific", "keys", "inside", "nested", "requirement", "dicts", "." ]
python
train
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/configuration.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/configuration.py#L82-L106
def load(self, updates): """Load configuration data""" # Go through in order and override the config (`.mbed_cloud_config.json` loader) for path in self.paths(): if not path: continue abs_path = os.path.abspath(os.path.expanduser(path)) if not os.path.isfile(abs_path): self._using_paths.append('missing: %s' % abs_path) continue self._using_paths.append(' exists: %s' % abs_path) with open(abs_path) as fh: self.update(json.load(fh)) # New dotenv loader - requires explicit instructions to use current working directory load_dotenv(find_dotenv(usecwd=True)) # Pluck config values out of the environment for env_var, key in {ENVVAR_API_HOST: 'host', ENVVAR_API_KEY: 'api_key'}.items(): env_value = os.getenv(env_var) if env_value is not None: self[key] = env_value if updates: self.update(updates) self.validate()
[ "def", "load", "(", "self", ",", "updates", ")", ":", "# Go through in order and override the config (`.mbed_cloud_config.json` loader)", "for", "path", "in", "self", ".", "paths", "(", ")", ":", "if", "not", "path", ":", "continue", "abs_path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "path", ")", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "abs_path", ")", ":", "self", ".", "_using_paths", ".", "append", "(", "'missing: %s'", "%", "abs_path", ")", "continue", "self", ".", "_using_paths", ".", "append", "(", "' exists: %s'", "%", "abs_path", ")", "with", "open", "(", "abs_path", ")", "as", "fh", ":", "self", ".", "update", "(", "json", ".", "load", "(", "fh", ")", ")", "# New dotenv loader - requires explicit instructions to use current working directory", "load_dotenv", "(", "find_dotenv", "(", "usecwd", "=", "True", ")", ")", "# Pluck config values out of the environment", "for", "env_var", ",", "key", "in", "{", "ENVVAR_API_HOST", ":", "'host'", ",", "ENVVAR_API_KEY", ":", "'api_key'", "}", ".", "items", "(", ")", ":", "env_value", "=", "os", ".", "getenv", "(", "env_var", ")", "if", "env_value", "is", "not", "None", ":", "self", "[", "key", "]", "=", "env_value", "if", "updates", ":", "self", ".", "update", "(", "updates", ")", "self", ".", "validate", "(", ")" ]
Load configuration data
[ "Load", "configuration", "data" ]
python
train
bspaans/python-mingus
mingus/extra/lilypond.py
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/extra/lilypond.py#L186-L195
def from_Composition(composition): """Return the LilyPond equivalent of a Composition in a string.""" # warning Throw exception if not hasattr(composition, 'tracks'): return False result = '\\header { title = "%s" composer = "%s" opus = "%s" } '\ % (composition.title, composition.author, composition.subtitle) for track in composition.tracks: result += from_Track(track) + ' ' return result[:-1]
[ "def", "from_Composition", "(", "composition", ")", ":", "# warning Throw exception", "if", "not", "hasattr", "(", "composition", ",", "'tracks'", ")", ":", "return", "False", "result", "=", "'\\\\header { title = \"%s\" composer = \"%s\" opus = \"%s\" } '", "%", "(", "composition", ".", "title", ",", "composition", ".", "author", ",", "composition", ".", "subtitle", ")", "for", "track", "in", "composition", ".", "tracks", ":", "result", "+=", "from_Track", "(", "track", ")", "+", "' '", "return", "result", "[", ":", "-", "1", "]" ]
Return the LilyPond equivalent of a Composition in a string.
[ "Return", "the", "LilyPond", "equivalent", "of", "a", "Composition", "in", "a", "string", "." ]
python
train
saltstack/salt
salt/states/boto_datapipeline.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_datapipeline.py#L395-L406
def _diff(old_pipeline_definition, new_pipeline_definition): ''' Return string diff of pipeline definitions. ''' old_pipeline_definition.pop('ResponseMetadata', None) new_pipeline_definition.pop('ResponseMetadata', None) diff = salt.utils.data.decode(difflib.unified_diff( salt.utils.json.dumps(old_pipeline_definition, indent=4).splitlines(True), salt.utils.json.dumps(new_pipeline_definition, indent=4).splitlines(True), )) return ''.join(diff)
[ "def", "_diff", "(", "old_pipeline_definition", ",", "new_pipeline_definition", ")", ":", "old_pipeline_definition", ".", "pop", "(", "'ResponseMetadata'", ",", "None", ")", "new_pipeline_definition", ".", "pop", "(", "'ResponseMetadata'", ",", "None", ")", "diff", "=", "salt", ".", "utils", ".", "data", ".", "decode", "(", "difflib", ".", "unified_diff", "(", "salt", ".", "utils", ".", "json", ".", "dumps", "(", "old_pipeline_definition", ",", "indent", "=", "4", ")", ".", "splitlines", "(", "True", ")", ",", "salt", ".", "utils", ".", "json", ".", "dumps", "(", "new_pipeline_definition", ",", "indent", "=", "4", ")", ".", "splitlines", "(", "True", ")", ",", ")", ")", "return", "''", ".", "join", "(", "diff", ")" ]
Return string diff of pipeline definitions.
[ "Return", "string", "diff", "of", "pipeline", "definitions", "." ]
python
train
binux/pyspider
pyspider/libs/url.py
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/libs/url.py#L29-L59
def _build_url(url, _params): """Build the actual URL to use.""" # Support for unicode domain names and paths. scheme, netloc, path, params, query, fragment = urlparse(url) netloc = netloc.encode('idna').decode('utf-8') if not path: path = '/' if six.PY2: if isinstance(scheme, six.text_type): scheme = scheme.encode('utf-8') if isinstance(netloc, six.text_type): netloc = netloc.encode('utf-8') if isinstance(path, six.text_type): path = path.encode('utf-8') if isinstance(params, six.text_type): params = params.encode('utf-8') if isinstance(query, six.text_type): query = query.encode('utf-8') if isinstance(fragment, six.text_type): fragment = fragment.encode('utf-8') enc_params = _encode_params(_params) if enc_params: if query: query = '%s&%s' % (query, enc_params) else: query = enc_params url = (urlunparse([scheme, netloc, path, params, query, fragment])) return url
[ "def", "_build_url", "(", "url", ",", "_params", ")", ":", "# Support for unicode domain names and paths.", "scheme", ",", "netloc", ",", "path", ",", "params", ",", "query", ",", "fragment", "=", "urlparse", "(", "url", ")", "netloc", "=", "netloc", ".", "encode", "(", "'idna'", ")", ".", "decode", "(", "'utf-8'", ")", "if", "not", "path", ":", "path", "=", "'/'", "if", "six", ".", "PY2", ":", "if", "isinstance", "(", "scheme", ",", "six", ".", "text_type", ")", ":", "scheme", "=", "scheme", ".", "encode", "(", "'utf-8'", ")", "if", "isinstance", "(", "netloc", ",", "six", ".", "text_type", ")", ":", "netloc", "=", "netloc", ".", "encode", "(", "'utf-8'", ")", "if", "isinstance", "(", "path", ",", "six", ".", "text_type", ")", ":", "path", "=", "path", ".", "encode", "(", "'utf-8'", ")", "if", "isinstance", "(", "params", ",", "six", ".", "text_type", ")", ":", "params", "=", "params", ".", "encode", "(", "'utf-8'", ")", "if", "isinstance", "(", "query", ",", "six", ".", "text_type", ")", ":", "query", "=", "query", ".", "encode", "(", "'utf-8'", ")", "if", "isinstance", "(", "fragment", ",", "six", ".", "text_type", ")", ":", "fragment", "=", "fragment", ".", "encode", "(", "'utf-8'", ")", "enc_params", "=", "_encode_params", "(", "_params", ")", "if", "enc_params", ":", "if", "query", ":", "query", "=", "'%s&%s'", "%", "(", "query", ",", "enc_params", ")", "else", ":", "query", "=", "enc_params", "url", "=", "(", "urlunparse", "(", "[", "scheme", ",", "netloc", ",", "path", ",", "params", ",", "query", ",", "fragment", "]", ")", ")", "return", "url" ]
Build the actual URL to use.
[ "Build", "the", "actual", "URL", "to", "use", "." ]
python
train
mikedh/trimesh
trimesh/base.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/base.py#L467-L483
def bounds(self): """ The axis aligned bounds of the faces of the mesh. Returns ----------- bounds : (2, 3) float Bounding box with [min, max] coordinates """ # return bounds including ONLY referenced vertices in_mesh = self.vertices[self.referenced_vertices] # get mesh bounds with min and max mesh_bounds = np.array([in_mesh.min(axis=0), in_mesh.max(axis=0)]) # should not be mutable mesh_bounds.flags.writeable = False return mesh_bounds
[ "def", "bounds", "(", "self", ")", ":", "# return bounds including ONLY referenced vertices", "in_mesh", "=", "self", ".", "vertices", "[", "self", ".", "referenced_vertices", "]", "# get mesh bounds with min and max", "mesh_bounds", "=", "np", ".", "array", "(", "[", "in_mesh", ".", "min", "(", "axis", "=", "0", ")", ",", "in_mesh", ".", "max", "(", "axis", "=", "0", ")", "]", ")", "# should not be mutable", "mesh_bounds", ".", "flags", ".", "writeable", "=", "False", "return", "mesh_bounds" ]
The axis aligned bounds of the faces of the mesh. Returns ----------- bounds : (2, 3) float Bounding box with [min, max] coordinates
[ "The", "axis", "aligned", "bounds", "of", "the", "faces", "of", "the", "mesh", "." ]
python
train
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/text.py
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/text.py#L97-L168
def get_matching_then_nonmatching_text(string_list, separator='', match_min_size=30, ignore='', end_characters='.!\r\n'): # type: (List[str], str, int, str, str) -> str """Returns a string containing matching blocks of text in a list of strings followed by non-matching. Args: string_list (List[str]): List of strings to match separator (str): Separator to add between blocks of text. Defaults to ''. match_min_size (int): Minimum block size to match on. Defaults to 30. ignore (str): Any characters to ignore in matching. Defaults to ''. end_characters (str): End characters to look for. Defaults to '.\r\n'. Returns: str: String containing matching blocks of text followed by non-matching """ def add_separator_if_needed(text_list): if separator and len(text_list) > 0 and text_list[-1][-len(separator):] != separator: text_list.append(separator) a = string_list[0] for i in range(1, len(string_list)): b = string_list[i] combined_len = len(a) + len(b) result = get_matching_text_in_strs(a, b, match_min_size=match_min_size, ignore=ignore, end_characters=end_characters) new_a = a new_b = b for text in result: new_a = new_a.replace(text, '') new_b = new_b.replace(text, '') if new_a and new_a in a: pos_a = a.index(new_a) else: pos_a = combined_len if new_b and new_b in b: pos_b = b.index(new_b) else: pos_b = combined_len if pos_b > pos_a: text_1 = new_b pos_1 = pos_b text_2 = new_a pos_2 = pos_a else: text_1 = new_a pos_1 = pos_a text_2 = new_b pos_2 = pos_b output = list() pos = 0 for text in result: output.append(text) pos += len(text) if text_1 and pos >= pos_1: add_separator_if_needed(output) output.append(text_1) pos += len(text_1) text_1 = None if text_2 and pos >= pos_2: add_separator_if_needed(output) output.append(text_2) pos += len(text_2) text_2 = None if text_1 and pos_1 == combined_len: add_separator_if_needed(output) output.append(text_1) if text_2 and pos_2 == combined_len: add_separator_if_needed(output) output.append(text_2) a = ''.join(output) return a
[ "def", "get_matching_then_nonmatching_text", "(", "string_list", ",", "separator", "=", "''", ",", "match_min_size", "=", "30", ",", "ignore", "=", "''", ",", "end_characters", "=", "'.!\\r\\n'", ")", ":", "# type: (List[str], str, int, str, str) -> str", "def", "add_separator_if_needed", "(", "text_list", ")", ":", "if", "separator", "and", "len", "(", "text_list", ")", ">", "0", "and", "text_list", "[", "-", "1", "]", "[", "-", "len", "(", "separator", ")", ":", "]", "!=", "separator", ":", "text_list", ".", "append", "(", "separator", ")", "a", "=", "string_list", "[", "0", "]", "for", "i", "in", "range", "(", "1", ",", "len", "(", "string_list", ")", ")", ":", "b", "=", "string_list", "[", "i", "]", "combined_len", "=", "len", "(", "a", ")", "+", "len", "(", "b", ")", "result", "=", "get_matching_text_in_strs", "(", "a", ",", "b", ",", "match_min_size", "=", "match_min_size", ",", "ignore", "=", "ignore", ",", "end_characters", "=", "end_characters", ")", "new_a", "=", "a", "new_b", "=", "b", "for", "text", "in", "result", ":", "new_a", "=", "new_a", ".", "replace", "(", "text", ",", "''", ")", "new_b", "=", "new_b", ".", "replace", "(", "text", ",", "''", ")", "if", "new_a", "and", "new_a", "in", "a", ":", "pos_a", "=", "a", ".", "index", "(", "new_a", ")", "else", ":", "pos_a", "=", "combined_len", "if", "new_b", "and", "new_b", "in", "b", ":", "pos_b", "=", "b", ".", "index", "(", "new_b", ")", "else", ":", "pos_b", "=", "combined_len", "if", "pos_b", ">", "pos_a", ":", "text_1", "=", "new_b", "pos_1", "=", "pos_b", "text_2", "=", "new_a", "pos_2", "=", "pos_a", "else", ":", "text_1", "=", "new_a", "pos_1", "=", "pos_a", "text_2", "=", "new_b", "pos_2", "=", "pos_b", "output", "=", "list", "(", ")", "pos", "=", "0", "for", "text", "in", "result", ":", "output", ".", "append", "(", "text", ")", "pos", "+=", "len", "(", "text", ")", "if", "text_1", "and", "pos", ">=", "pos_1", ":", "add_separator_if_needed", "(", "output", ")", "output", ".", "append", "(", "text_1", ")", "pos", "+=", "len", "(", "text_1", ")", "text_1", "=", "None", "if", "text_2", "and", "pos", ">=", "pos_2", ":", "add_separator_if_needed", "(", "output", ")", "output", ".", "append", "(", "text_2", ")", "pos", "+=", "len", "(", "text_2", ")", "text_2", "=", "None", "if", "text_1", "and", "pos_1", "==", "combined_len", ":", "add_separator_if_needed", "(", "output", ")", "output", ".", "append", "(", "text_1", ")", "if", "text_2", "and", "pos_2", "==", "combined_len", ":", "add_separator_if_needed", "(", "output", ")", "output", ".", "append", "(", "text_2", ")", "a", "=", "''", ".", "join", "(", "output", ")", "return", "a" ]
Returns a string containing matching blocks of text in a list of strings followed by non-matching. Args: string_list (List[str]): List of strings to match separator (str): Separator to add between blocks of text. Defaults to ''. match_min_size (int): Minimum block size to match on. Defaults to 30. ignore (str): Any characters to ignore in matching. Defaults to ''. end_characters (str): End characters to look for. Defaults to '.\r\n'. Returns: str: String containing matching blocks of text followed by non-matching
[ "Returns", "a", "string", "containing", "matching", "blocks", "of", "text", "in", "a", "list", "of", "strings", "followed", "by", "non", "-", "matching", "." ]
python
train
gem/oq-engine
openquake/hazardlib/calc/hazard_curve.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/calc/hazard_curve.py#L71-L91
def _cluster(param, tom, imtls, gsims, grp_ids, pmap): """ Computes the probability map in case of a cluster group """ pmapclu = AccumDict({grp_id: ProbabilityMap(len(imtls.array), len(gsims)) for grp_id in grp_ids}) # Get temporal occurrence model # Number of occurrences for the cluster first = True for nocc in range(0, 50): # TODO fix this once the occurrence rate will be used just as # an object attribute ocr = tom.occurrence_rate prob_n_occ = tom.get_probability_n_occurrences(ocr, nocc) if first: pmapclu = prob_n_occ * (~pmap)**nocc first = False else: pmapclu += prob_n_occ * (~pmap)**nocc pmap = ~pmapclu return pmap
[ "def", "_cluster", "(", "param", ",", "tom", ",", "imtls", ",", "gsims", ",", "grp_ids", ",", "pmap", ")", ":", "pmapclu", "=", "AccumDict", "(", "{", "grp_id", ":", "ProbabilityMap", "(", "len", "(", "imtls", ".", "array", ")", ",", "len", "(", "gsims", ")", ")", "for", "grp_id", "in", "grp_ids", "}", ")", "# Get temporal occurrence model", "# Number of occurrences for the cluster", "first", "=", "True", "for", "nocc", "in", "range", "(", "0", ",", "50", ")", ":", "# TODO fix this once the occurrence rate will be used just as", "# an object attribute", "ocr", "=", "tom", ".", "occurrence_rate", "prob_n_occ", "=", "tom", ".", "get_probability_n_occurrences", "(", "ocr", ",", "nocc", ")", "if", "first", ":", "pmapclu", "=", "prob_n_occ", "*", "(", "~", "pmap", ")", "**", "nocc", "first", "=", "False", "else", ":", "pmapclu", "+=", "prob_n_occ", "*", "(", "~", "pmap", ")", "**", "nocc", "pmap", "=", "~", "pmapclu", "return", "pmap" ]
Computes the probability map in case of a cluster group
[ "Computes", "the", "probability", "map", "in", "case", "of", "a", "cluster", "group" ]
python
train
tgalal/yowsup
yowsup/layers/noise/layer.py
https://github.com/tgalal/yowsup/blob/b0739461ba962bf221fc76047d9d60d8ce61bc3e/yowsup/layers/noise/layer.py#L131-L139
def send(self, data): """ :param data: :type data: bytearray | bytes :return: :rtype: """ data = bytes(data) if type(data) is not bytes else data self._wa_noiseprotocol.send(data)
[ "def", "send", "(", "self", ",", "data", ")", ":", "data", "=", "bytes", "(", "data", ")", "if", "type", "(", "data", ")", "is", "not", "bytes", "else", "data", "self", ".", "_wa_noiseprotocol", ".", "send", "(", "data", ")" ]
:param data: :type data: bytearray | bytes :return: :rtype:
[ ":", "param", "data", ":", ":", "type", "data", ":", "bytearray", "|", "bytes", ":", "return", ":", ":", "rtype", ":" ]
python
train
ASKIDA/Selenium2LibraryExtension
src/Selenium2LibraryExtension/keywords/__init__.py
https://github.com/ASKIDA/Selenium2LibraryExtension/blob/5ca3fa776063c6046dff317cb2575e4772d7541f/src/Selenium2LibraryExtension/keywords/__init__.py#L35-L44
def wait_until_element_does_not_have_focus(self, locator, timeout=None): """Waits until the element identified by `locator` doesn't have focus. You might rather want to use `Element Focus Should Not Be Set` | *Argument* | *Description* | *Example* | | locator | Selenium 2 element locator | id=my_id | | timeout | maximum time to wait before the function throws an element not found error (default=None) | 5s |""" self._info("Waiting until '%s' does not have focus" % (locator)) self._wait_until_no_error(timeout, self._check_element_focus_exp, False, locator, timeout)
[ "def", "wait_until_element_does_not_have_focus", "(", "self", ",", "locator", ",", "timeout", "=", "None", ")", ":", "self", ".", "_info", "(", "\"Waiting until '%s' does not have focus\"", "%", "(", "locator", ")", ")", "self", ".", "_wait_until_no_error", "(", "timeout", ",", "self", ".", "_check_element_focus_exp", ",", "False", ",", "locator", ",", "timeout", ")" ]
Waits until the element identified by `locator` doesn't have focus. You might rather want to use `Element Focus Should Not Be Set` | *Argument* | *Description* | *Example* | | locator | Selenium 2 element locator | id=my_id | | timeout | maximum time to wait before the function throws an element not found error (default=None) | 5s |
[ "Waits", "until", "the", "element", "identified", "by", "locator", "doesn", "t", "have", "focus", ".", "You", "might", "rather", "want", "to", "use", "Element", "Focus", "Should", "Not", "Be", "Set" ]
python
train
enkore/i3pystatus
i3pystatus/core/io.py
https://github.com/enkore/i3pystatus/blob/14cfde967cecf79b40e223e35a04600f4c875af7/i3pystatus/core/io.py#L158-L176
def suspend_signal_handler(self, signo, frame): """ By default, i3bar sends SIGSTOP to all children when it is not visible (for example, the screen sleeps or you enter full screen mode). This stops the i3pystatus process and all threads within it. For some modules, this is not desirable. Thankfully, the i3bar protocol supports setting the "stop_signal" and "cont_signal" key/value pairs in the header to allow sending a custom signal when these events occur. Here we use SIGUSR2 for both "stop_signal" and "cont_signal" and maintain a toggle to determine whether we have just been stopped or continued. When we have been stopped, notify the IntervalModule managers that they should suspend any module that does not set the keep_alive flag to a truthy value, and when we have been continued, notify the IntervalModule managers that they can resume execution of all modules. """ if signo != signal.SIGUSR2: return self.stopped = not self.stopped if self.stopped: [m.suspend() for m in IntervalModule.managers.values()] else: [m.resume() for m in IntervalModule.managers.values()]
[ "def", "suspend_signal_handler", "(", "self", ",", "signo", ",", "frame", ")", ":", "if", "signo", "!=", "signal", ".", "SIGUSR2", ":", "return", "self", ".", "stopped", "=", "not", "self", ".", "stopped", "if", "self", ".", "stopped", ":", "[", "m", ".", "suspend", "(", ")", "for", "m", "in", "IntervalModule", ".", "managers", ".", "values", "(", ")", "]", "else", ":", "[", "m", ".", "resume", "(", ")", "for", "m", "in", "IntervalModule", ".", "managers", ".", "values", "(", ")", "]" ]
By default, i3bar sends SIGSTOP to all children when it is not visible (for example, the screen sleeps or you enter full screen mode). This stops the i3pystatus process and all threads within it. For some modules, this is not desirable. Thankfully, the i3bar protocol supports setting the "stop_signal" and "cont_signal" key/value pairs in the header to allow sending a custom signal when these events occur. Here we use SIGUSR2 for both "stop_signal" and "cont_signal" and maintain a toggle to determine whether we have just been stopped or continued. When we have been stopped, notify the IntervalModule managers that they should suspend any module that does not set the keep_alive flag to a truthy value, and when we have been continued, notify the IntervalModule managers that they can resume execution of all modules.
[ "By", "default", "i3bar", "sends", "SIGSTOP", "to", "all", "children", "when", "it", "is", "not", "visible", "(", "for", "example", "the", "screen", "sleeps", "or", "you", "enter", "full", "screen", "mode", ")", ".", "This", "stops", "the", "i3pystatus", "process", "and", "all", "threads", "within", "it", ".", "For", "some", "modules", "this", "is", "not", "desirable", ".", "Thankfully", "the", "i3bar", "protocol", "supports", "setting", "the", "stop_signal", "and", "cont_signal", "key", "/", "value", "pairs", "in", "the", "header", "to", "allow", "sending", "a", "custom", "signal", "when", "these", "events", "occur", "." ]
python
train
kstateome/django-cas
cas/backends.py
https://github.com/kstateome/django-cas/blob/8a871093966f001b4dadf7d097ac326169f3c066/cas/backends.py#L75-L138
def _internal_verify_cas(ticket, service, suffix): """Verifies CAS 2.0 and 3.0 XML-based authentication ticket. Returns username on success and None on failure. """ params = {'ticket': ticket, 'service': service} if settings.CAS_PROXY_CALLBACK: params['pgtUrl'] = settings.CAS_PROXY_CALLBACK url = (urljoin(settings.CAS_SERVER_URL, suffix) + '?' + urlencode(params)) page = urlopen(url) username = None try: response = page.read() tree = ElementTree.fromstring(response) document = minidom.parseString(response) if tree[0].tag.endswith('authenticationSuccess'): if settings.CAS_RESPONSE_CALLBACKS: cas_response_callbacks(tree) username = tree[0][0].text pgt_el = document.getElementsByTagName('cas:proxyGrantingTicket') if pgt_el: pgt = pgt_el[0].firstChild.nodeValue try: pgtIou = _get_pgtiou(pgt) tgt = Tgt.objects.get(username=username) tgt.tgt = pgtIou.tgt tgt.save() pgtIou.delete() except Tgt.DoesNotExist: Tgt.objects.create(username=username, tgt=pgtIou.tgt) logger.info('Creating TGT ticket for {user}'.format( user=username )) pgtIou.delete() except Exception as e: logger.warning('Failed to do proxy authentication. {message}'.format( message=e )) else: failure = document.getElementsByTagName('cas:authenticationFailure') if failure: logger.warn('Authentication failed from CAS server: %s', failure[0].firstChild.nodeValue) except Exception as e: logger.error('Failed to verify CAS authentication: {message}'.format( message=e )) finally: page.close() return username
[ "def", "_internal_verify_cas", "(", "ticket", ",", "service", ",", "suffix", ")", ":", "params", "=", "{", "'ticket'", ":", "ticket", ",", "'service'", ":", "service", "}", "if", "settings", ".", "CAS_PROXY_CALLBACK", ":", "params", "[", "'pgtUrl'", "]", "=", "settings", ".", "CAS_PROXY_CALLBACK", "url", "=", "(", "urljoin", "(", "settings", ".", "CAS_SERVER_URL", ",", "suffix", ")", "+", "'?'", "+", "urlencode", "(", "params", ")", ")", "page", "=", "urlopen", "(", "url", ")", "username", "=", "None", "try", ":", "response", "=", "page", ".", "read", "(", ")", "tree", "=", "ElementTree", ".", "fromstring", "(", "response", ")", "document", "=", "minidom", ".", "parseString", "(", "response", ")", "if", "tree", "[", "0", "]", ".", "tag", ".", "endswith", "(", "'authenticationSuccess'", ")", ":", "if", "settings", ".", "CAS_RESPONSE_CALLBACKS", ":", "cas_response_callbacks", "(", "tree", ")", "username", "=", "tree", "[", "0", "]", "[", "0", "]", ".", "text", "pgt_el", "=", "document", ".", "getElementsByTagName", "(", "'cas:proxyGrantingTicket'", ")", "if", "pgt_el", ":", "pgt", "=", "pgt_el", "[", "0", "]", ".", "firstChild", ".", "nodeValue", "try", ":", "pgtIou", "=", "_get_pgtiou", "(", "pgt", ")", "tgt", "=", "Tgt", ".", "objects", ".", "get", "(", "username", "=", "username", ")", "tgt", ".", "tgt", "=", "pgtIou", ".", "tgt", "tgt", ".", "save", "(", ")", "pgtIou", ".", "delete", "(", ")", "except", "Tgt", ".", "DoesNotExist", ":", "Tgt", ".", "objects", ".", "create", "(", "username", "=", "username", ",", "tgt", "=", "pgtIou", ".", "tgt", ")", "logger", ".", "info", "(", "'Creating TGT ticket for {user}'", ".", "format", "(", "user", "=", "username", ")", ")", "pgtIou", ".", "delete", "(", ")", "except", "Exception", "as", "e", ":", "logger", ".", "warning", "(", "'Failed to do proxy authentication. {message}'", ".", "format", "(", "message", "=", "e", ")", ")", "else", ":", "failure", "=", "document", ".", "getElementsByTagName", "(", "'cas:authenticationFailure'", ")", "if", "failure", ":", "logger", ".", "warn", "(", "'Authentication failed from CAS server: %s'", ",", "failure", "[", "0", "]", ".", "firstChild", ".", "nodeValue", ")", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "'Failed to verify CAS authentication: {message}'", ".", "format", "(", "message", "=", "e", ")", ")", "finally", ":", "page", ".", "close", "(", ")", "return", "username" ]
Verifies CAS 2.0 and 3.0 XML-based authentication ticket. Returns username on success and None on failure.
[ "Verifies", "CAS", "2", ".", "0", "and", "3", ".", "0", "XML", "-", "based", "authentication", "ticket", "." ]
python
train
mkoura/dump2polarion
dump2polarion/csv2sqlite_cli.py
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/csv2sqlite_cli.py#L80-L113
def main(args=None): """Main function for cli.""" args = get_args(args) utils.init_log(args.log_level) if ".csv" not in args.input_file.lower(): logger.warning("Make sure the input file '%s' is in CSV format", args.input_file) try: records = csvtools.get_imported_data(args.input_file) except (EnvironmentError, Dump2PolarionException) as err: logger.fatal(err) return 1 # check if all columns required by `pytest_polarion_cfme` are there required_columns = {"id": "ID", "title": "Title"} missing_columns = [required_columns[k] for k in required_columns if k not in records.results[0]] if missing_columns: logger.fatal( "The input file '%s' is missing following columns: %s", args.input_file, ", ".join(missing_columns), ) return 1 try: dump2sqlite(records, args.output_file) # pylint: disable=broad-except except Exception as err: logger.exception(err) return 1 return 0
[ "def", "main", "(", "args", "=", "None", ")", ":", "args", "=", "get_args", "(", "args", ")", "utils", ".", "init_log", "(", "args", ".", "log_level", ")", "if", "\".csv\"", "not", "in", "args", ".", "input_file", ".", "lower", "(", ")", ":", "logger", ".", "warning", "(", "\"Make sure the input file '%s' is in CSV format\"", ",", "args", ".", "input_file", ")", "try", ":", "records", "=", "csvtools", ".", "get_imported_data", "(", "args", ".", "input_file", ")", "except", "(", "EnvironmentError", ",", "Dump2PolarionException", ")", "as", "err", ":", "logger", ".", "fatal", "(", "err", ")", "return", "1", "# check if all columns required by `pytest_polarion_cfme` are there", "required_columns", "=", "{", "\"id\"", ":", "\"ID\"", ",", "\"title\"", ":", "\"Title\"", "}", "missing_columns", "=", "[", "required_columns", "[", "k", "]", "for", "k", "in", "required_columns", "if", "k", "not", "in", "records", ".", "results", "[", "0", "]", "]", "if", "missing_columns", ":", "logger", ".", "fatal", "(", "\"The input file '%s' is missing following columns: %s\"", ",", "args", ".", "input_file", ",", "\", \"", ".", "join", "(", "missing_columns", ")", ",", ")", "return", "1", "try", ":", "dump2sqlite", "(", "records", ",", "args", ".", "output_file", ")", "# pylint: disable=broad-except", "except", "Exception", "as", "err", ":", "logger", ".", "exception", "(", "err", ")", "return", "1", "return", "0" ]
Main function for cli.
[ "Main", "function", "for", "cli", "." ]
python
train
danilobellini/dose
dose/watcher.py
https://github.com/danilobellini/dose/blob/141f48322f7812b7d32e3d5f065d4473a11102a4/dose/watcher.py#L8-L12
def to_unicode(path, errors="replace"): """Given a bytestring/unicode path, return it as unicode.""" if isinstance(path, UNICODE): return path return path.decode(sys.getfilesystemencoding(), errors)
[ "def", "to_unicode", "(", "path", ",", "errors", "=", "\"replace\"", ")", ":", "if", "isinstance", "(", "path", ",", "UNICODE", ")", ":", "return", "path", "return", "path", ".", "decode", "(", "sys", ".", "getfilesystemencoding", "(", ")", ",", "errors", ")" ]
Given a bytestring/unicode path, return it as unicode.
[ "Given", "a", "bytestring", "/", "unicode", "path", "return", "it", "as", "unicode", "." ]
python
train
gboeing/osmnx
osmnx/utils.py
https://github.com/gboeing/osmnx/blob/be59fd313bcb68af8fc79242c56194f1247e26e2/osmnx/utils.py#L426-L498
def get_nearest_node(G, point, method='haversine', return_dist=False): """ Return the graph node nearest to some specified (lat, lng) or (y, x) point, and optionally the distance between the node and the point. This function can use either a haversine or euclidean distance calculator. Parameters ---------- G : networkx multidigraph point : tuple The (lat, lng) or (y, x) point for which we will find the nearest node in the graph method : str {'haversine', 'euclidean'} Which method to use for calculating distances to find nearest node. If 'haversine', graph nodes' coordinates must be in units of decimal degrees. If 'euclidean', graph nodes' coordinates must be projected. return_dist : bool Optionally also return the distance (in meters if haversine, or graph node coordinate units if euclidean) between the point and the nearest node. Returns ------- int or tuple of (int, float) Nearest node ID or optionally a tuple of (node ID, dist), where dist is the distance (in meters if haversine, or graph node coordinate units if euclidean) between the point and nearest node """ start_time = time.time() if not G or (G.number_of_nodes() == 0): raise ValueError('G argument must be not be empty or should contain at least one node') # dump graph node coordinates into a pandas dataframe indexed by node id # with x and y columns coords = [[node, data['x'], data['y']] for node, data in G.nodes(data=True)] df = pd.DataFrame(coords, columns=['node', 'x', 'y']).set_index('node') # add columns to the dataframe representing the (constant) coordinates of # the reference point df['reference_y'] = point[0] df['reference_x'] = point[1] # calculate the distance between each node and the reference point if method == 'haversine': # calculate distance vector using haversine (ie, for # spherical lat-long geometries) distances = great_circle_vec(lat1=df['reference_y'], lng1=df['reference_x'], lat2=df['y'], lng2=df['x']) elif method == 'euclidean': # calculate distance vector using euclidean distances (ie, for projected # planar geometries) distances = euclidean_dist_vec(y1=df['reference_y'], x1=df['reference_x'], y2=df['y'], x2=df['x']) else: raise ValueError('method argument must be either "haversine" or "euclidean"') # nearest node's ID is the index label of the minimum distance nearest_node = distances.idxmin() log('Found nearest node ({}) to point {} in {:,.2f} seconds'.format(nearest_node, point, time.time()-start_time)) # if caller requested return_dist, return distance between the point and the # nearest node as well if return_dist: return nearest_node, distances.loc[nearest_node] else: return nearest_node
[ "def", "get_nearest_node", "(", "G", ",", "point", ",", "method", "=", "'haversine'", ",", "return_dist", "=", "False", ")", ":", "start_time", "=", "time", ".", "time", "(", ")", "if", "not", "G", "or", "(", "G", ".", "number_of_nodes", "(", ")", "==", "0", ")", ":", "raise", "ValueError", "(", "'G argument must be not be empty or should contain at least one node'", ")", "# dump graph node coordinates into a pandas dataframe indexed by node id", "# with x and y columns", "coords", "=", "[", "[", "node", ",", "data", "[", "'x'", "]", ",", "data", "[", "'y'", "]", "]", "for", "node", ",", "data", "in", "G", ".", "nodes", "(", "data", "=", "True", ")", "]", "df", "=", "pd", ".", "DataFrame", "(", "coords", ",", "columns", "=", "[", "'node'", ",", "'x'", ",", "'y'", "]", ")", ".", "set_index", "(", "'node'", ")", "# add columns to the dataframe representing the (constant) coordinates of", "# the reference point", "df", "[", "'reference_y'", "]", "=", "point", "[", "0", "]", "df", "[", "'reference_x'", "]", "=", "point", "[", "1", "]", "# calculate the distance between each node and the reference point", "if", "method", "==", "'haversine'", ":", "# calculate distance vector using haversine (ie, for", "# spherical lat-long geometries)", "distances", "=", "great_circle_vec", "(", "lat1", "=", "df", "[", "'reference_y'", "]", ",", "lng1", "=", "df", "[", "'reference_x'", "]", ",", "lat2", "=", "df", "[", "'y'", "]", ",", "lng2", "=", "df", "[", "'x'", "]", ")", "elif", "method", "==", "'euclidean'", ":", "# calculate distance vector using euclidean distances (ie, for projected", "# planar geometries)", "distances", "=", "euclidean_dist_vec", "(", "y1", "=", "df", "[", "'reference_y'", "]", ",", "x1", "=", "df", "[", "'reference_x'", "]", ",", "y2", "=", "df", "[", "'y'", "]", ",", "x2", "=", "df", "[", "'x'", "]", ")", "else", ":", "raise", "ValueError", "(", "'method argument must be either \"haversine\" or \"euclidean\"'", ")", "# nearest node's ID is the index label of the minimum distance", "nearest_node", "=", "distances", ".", "idxmin", "(", ")", "log", "(", "'Found nearest node ({}) to point {} in {:,.2f} seconds'", ".", "format", "(", "nearest_node", ",", "point", ",", "time", ".", "time", "(", ")", "-", "start_time", ")", ")", "# if caller requested return_dist, return distance between the point and the", "# nearest node as well", "if", "return_dist", ":", "return", "nearest_node", ",", "distances", ".", "loc", "[", "nearest_node", "]", "else", ":", "return", "nearest_node" ]
Return the graph node nearest to some specified (lat, lng) or (y, x) point, and optionally the distance between the node and the point. This function can use either a haversine or euclidean distance calculator. Parameters ---------- G : networkx multidigraph point : tuple The (lat, lng) or (y, x) point for which we will find the nearest node in the graph method : str {'haversine', 'euclidean'} Which method to use for calculating distances to find nearest node. If 'haversine', graph nodes' coordinates must be in units of decimal degrees. If 'euclidean', graph nodes' coordinates must be projected. return_dist : bool Optionally also return the distance (in meters if haversine, or graph node coordinate units if euclidean) between the point and the nearest node. Returns ------- int or tuple of (int, float) Nearest node ID or optionally a tuple of (node ID, dist), where dist is the distance (in meters if haversine, or graph node coordinate units if euclidean) between the point and nearest node
[ "Return", "the", "graph", "node", "nearest", "to", "some", "specified", "(", "lat", "lng", ")", "or", "(", "y", "x", ")", "point", "and", "optionally", "the", "distance", "between", "the", "node", "and", "the", "point", ".", "This", "function", "can", "use", "either", "a", "haversine", "or", "euclidean", "distance", "calculator", "." ]
python
train
O365/python-o365
O365/utils/attachment.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/utils/attachment.py#L298-L305
def clear(self): """ Clear the attachments """ for attachment in self.__attachments: if attachment.on_cloud: self.__removed_attachments.append(attachment) self.__attachments = [] self._update_parent_attachments() self._track_changes()
[ "def", "clear", "(", "self", ")", ":", "for", "attachment", "in", "self", ".", "__attachments", ":", "if", "attachment", ".", "on_cloud", ":", "self", ".", "__removed_attachments", ".", "append", "(", "attachment", ")", "self", ".", "__attachments", "=", "[", "]", "self", ".", "_update_parent_attachments", "(", ")", "self", ".", "_track_changes", "(", ")" ]
Clear the attachments
[ "Clear", "the", "attachments" ]
python
train
corydodt/Crosscap
crosscap/openapi.py
https://github.com/corydodt/Crosscap/blob/388a2ec36b8aa85e8f1ed692bb6e43474ba76c8e/crosscap/openapi.py#L168-L184
def representCleanOpenAPIParameter(dumper, data): """ Rename python reserved keyword fields before representing an OpenAPIParameter """ dct = _orderedCleanDict(data) # We are using "in_" as a key for the "in" parameter, since in is a Python keyword. # To represent it correctly, we then have to swap "in_" for "in". # So we do an item-by-item copy of the dct so we don't change the order when # making this swap. d2 = OrderedDict() for k, v in dct.copy().items(): if k == 'in_': d2['in'] = v else: d2[k] = v return dumper.yaml_representers[type(d2)](dumper, d2)
[ "def", "representCleanOpenAPIParameter", "(", "dumper", ",", "data", ")", ":", "dct", "=", "_orderedCleanDict", "(", "data", ")", "# We are using \"in_\" as a key for the \"in\" parameter, since in is a Python keyword.", "# To represent it correctly, we then have to swap \"in_\" for \"in\".", "# So we do an item-by-item copy of the dct so we don't change the order when", "# making this swap.", "d2", "=", "OrderedDict", "(", ")", "for", "k", ",", "v", "in", "dct", ".", "copy", "(", ")", ".", "items", "(", ")", ":", "if", "k", "==", "'in_'", ":", "d2", "[", "'in'", "]", "=", "v", "else", ":", "d2", "[", "k", "]", "=", "v", "return", "dumper", ".", "yaml_representers", "[", "type", "(", "d2", ")", "]", "(", "dumper", ",", "d2", ")" ]
Rename python reserved keyword fields before representing an OpenAPIParameter
[ "Rename", "python", "reserved", "keyword", "fields", "before", "representing", "an", "OpenAPIParameter" ]
python
train
fjwCode/cerium
cerium/androiddriver.py
https://github.com/fjwCode/cerium/blob/f6e06e0dcf83a0bc924828e9d6cb81383ed2364f/cerium/androiddriver.py#L470-L480
def app_trim_memory(self, pid: int or str, level: str = 'RUNNING_LOW') -> None: '''Trim memory. Args: level: HIDDEN | RUNNING_MODERATE | BACKGROUNDRUNNING_LOW | \ MODERATE | RUNNING_CRITICAL | COMPLETE ''' _, error = self._execute('-s', self.device_sn, 'shell', 'am', 'send-trim-memory', str(pid), level) if error and error.startswith('Error'): raise ApplicationsException(error.split(':', 1)[-1].strip())
[ "def", "app_trim_memory", "(", "self", ",", "pid", ":", "int", "or", "str", ",", "level", ":", "str", "=", "'RUNNING_LOW'", ")", "->", "None", ":", "_", ",", "error", "=", "self", ".", "_execute", "(", "'-s'", ",", "self", ".", "device_sn", ",", "'shell'", ",", "'am'", ",", "'send-trim-memory'", ",", "str", "(", "pid", ")", ",", "level", ")", "if", "error", "and", "error", ".", "startswith", "(", "'Error'", ")", ":", "raise", "ApplicationsException", "(", "error", ".", "split", "(", "':'", ",", "1", ")", "[", "-", "1", "]", ".", "strip", "(", ")", ")" ]
Trim memory. Args: level: HIDDEN | RUNNING_MODERATE | BACKGROUNDRUNNING_LOW | \ MODERATE | RUNNING_CRITICAL | COMPLETE
[ "Trim", "memory", "." ]
python
train
Erotemic/ubelt
ubelt/util_list.py
https://github.com/Erotemic/ubelt/blob/db802f3ad8abba025db74b54f86e6892b8927325/ubelt/util_list.py#L253-L294
def unique(items, key=None): """ Generates unique items in the order they appear. Args: items (Iterable): list of items key (Callable, optional): custom normalization function. If specified returns items where `key(item)` is unique. Yields: object: a unique item from the input sequence CommandLine: python -m utool.util_list --exec-unique_ordered Example: >>> import ubelt as ub >>> items = [4, 6, 6, 0, 6, 1, 0, 2, 2, 1] >>> unique_items = list(ub.unique(items)) >>> assert unique_items == [4, 6, 0, 1, 2] Example: >>> import ubelt as ub >>> items = ['A', 'a', 'b', 'B', 'C', 'c', 'D', 'e', 'D', 'E'] >>> unique_items = list(ub.unique(items, key=six.text_type.lower)) >>> assert unique_items == ['A', 'b', 'C', 'D', 'e'] >>> unique_items = list(ub.unique(items)) >>> assert unique_items == ['A', 'a', 'b', 'B', 'C', 'c', 'D', 'e', 'E'] """ seen = set() if key is None: for item in items: if item not in seen: seen.add(item) yield item else: for item in items: norm = key(item) if norm not in seen: seen.add(norm) yield item
[ "def", "unique", "(", "items", ",", "key", "=", "None", ")", ":", "seen", "=", "set", "(", ")", "if", "key", "is", "None", ":", "for", "item", "in", "items", ":", "if", "item", "not", "in", "seen", ":", "seen", ".", "add", "(", "item", ")", "yield", "item", "else", ":", "for", "item", "in", "items", ":", "norm", "=", "key", "(", "item", ")", "if", "norm", "not", "in", "seen", ":", "seen", ".", "add", "(", "norm", ")", "yield", "item" ]
Generates unique items in the order they appear. Args: items (Iterable): list of items key (Callable, optional): custom normalization function. If specified returns items where `key(item)` is unique. Yields: object: a unique item from the input sequence CommandLine: python -m utool.util_list --exec-unique_ordered Example: >>> import ubelt as ub >>> items = [4, 6, 6, 0, 6, 1, 0, 2, 2, 1] >>> unique_items = list(ub.unique(items)) >>> assert unique_items == [4, 6, 0, 1, 2] Example: >>> import ubelt as ub >>> items = ['A', 'a', 'b', 'B', 'C', 'c', 'D', 'e', 'D', 'E'] >>> unique_items = list(ub.unique(items, key=six.text_type.lower)) >>> assert unique_items == ['A', 'b', 'C', 'D', 'e'] >>> unique_items = list(ub.unique(items)) >>> assert unique_items == ['A', 'a', 'b', 'B', 'C', 'c', 'D', 'e', 'E']
[ "Generates", "unique", "items", "in", "the", "order", "they", "appear", "." ]
python
valid
log2timeline/plaso
plaso/analysis/browser_search.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/analysis/browser_search.py#L203-L222
def _ExtractYandexSearchQuery(self, url): """Extracts a search query from a Yandex search URL. Yandex: https://www.yandex.com/search/?text=query Args: url (str): URL. Returns: str: search query or None if no query was found. """ if 'text=' not in url: return None _, _, line = url.partition('text=') before_and, _, _ = line.partition('&') if not before_and: return None yandex_search_url = before_and.split()[0] return yandex_search_url.replace('+', ' ')
[ "def", "_ExtractYandexSearchQuery", "(", "self", ",", "url", ")", ":", "if", "'text='", "not", "in", "url", ":", "return", "None", "_", ",", "_", ",", "line", "=", "url", ".", "partition", "(", "'text='", ")", "before_and", ",", "_", ",", "_", "=", "line", ".", "partition", "(", "'&'", ")", "if", "not", "before_and", ":", "return", "None", "yandex_search_url", "=", "before_and", ".", "split", "(", ")", "[", "0", "]", "return", "yandex_search_url", ".", "replace", "(", "'+'", ",", "' '", ")" ]
Extracts a search query from a Yandex search URL. Yandex: https://www.yandex.com/search/?text=query Args: url (str): URL. Returns: str: search query or None if no query was found.
[ "Extracts", "a", "search", "query", "from", "a", "Yandex", "search", "URL", "." ]
python
train
googleads/googleads-python-lib
googleads/adwords.py
https://github.com/googleads/googleads-python-lib/blob/aa3b1b474b0f9789ca55ca46f4b2b57aeae38874/googleads/adwords.py#L1916-L1926
def LessThanOrEqualTo(self, value): """Sets the type of the WHERE clause as "less than or equal to. Args: value: The value to be used in the WHERE condition. Returns: The query builder that this WHERE builder links to. """ self._awql = self._CreateSingleValueCondition(value, '<=') return self._query_builder
[ "def", "LessThanOrEqualTo", "(", "self", ",", "value", ")", ":", "self", ".", "_awql", "=", "self", ".", "_CreateSingleValueCondition", "(", "value", ",", "'<='", ")", "return", "self", ".", "_query_builder" ]
Sets the type of the WHERE clause as "less than or equal to. Args: value: The value to be used in the WHERE condition. Returns: The query builder that this WHERE builder links to.
[ "Sets", "the", "type", "of", "the", "WHERE", "clause", "as", "less", "than", "or", "equal", "to", "." ]
python
train
Kozea/cairocffi
cairocffi/context.py
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/context.py#L410-L425
def set_antialias(self, antialias): """Set the :ref:`ANTIALIAS` of the rasterizer used for drawing shapes. This value is a hint, and a particular backend may or may not support a particular value. At the current time, no backend supports :obj:`SUBPIXEL <ANTIALIAS_SUBPIXEL>` when drawing shapes. Note that this option does not affect text rendering, instead see :meth:`FontOptions.set_antialias`. :param antialias: An :ref:`ANTIALIAS` string. """ cairo.cairo_set_antialias(self._pointer, antialias) self._check_status()
[ "def", "set_antialias", "(", "self", ",", "antialias", ")", ":", "cairo", ".", "cairo_set_antialias", "(", "self", ".", "_pointer", ",", "antialias", ")", "self", ".", "_check_status", "(", ")" ]
Set the :ref:`ANTIALIAS` of the rasterizer used for drawing shapes. This value is a hint, and a particular backend may or may not support a particular value. At the current time, no backend supports :obj:`SUBPIXEL <ANTIALIAS_SUBPIXEL>` when drawing shapes. Note that this option does not affect text rendering, instead see :meth:`FontOptions.set_antialias`. :param antialias: An :ref:`ANTIALIAS` string.
[ "Set", "the", ":", "ref", ":", "ANTIALIAS", "of", "the", "rasterizer", "used", "for", "drawing", "shapes", ".", "This", "value", "is", "a", "hint", "and", "a", "particular", "backend", "may", "or", "may", "not", "support", "a", "particular", "value", ".", "At", "the", "current", "time", "no", "backend", "supports", ":", "obj", ":", "SUBPIXEL", "<ANTIALIAS_SUBPIXEL", ">", "when", "drawing", "shapes", "." ]
python
train
annoviko/pyclustering
pyclustering/cluster/kmeans.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/kmeans.py#L544-L557
def __calculate_dataset_difference(self, amount_clusters): """! @brief Calculate distance from each point to each cluster center. """ dataset_differences = numpy.zeros((amount_clusters, len(self.__pointer_data))) for index_center in range(amount_clusters): if self.__metric.get_type() != type_metric.USER_DEFINED: dataset_differences[index_center] = self.__metric(self.__pointer_data, self.__centers[index_center]) else: dataset_differences[index_center] = [ self.__metric(point, self.__centers[index_center]) for point in self.__pointer_data ] return dataset_differences
[ "def", "__calculate_dataset_difference", "(", "self", ",", "amount_clusters", ")", ":", "dataset_differences", "=", "numpy", ".", "zeros", "(", "(", "amount_clusters", ",", "len", "(", "self", ".", "__pointer_data", ")", ")", ")", "for", "index_center", "in", "range", "(", "amount_clusters", ")", ":", "if", "self", ".", "__metric", ".", "get_type", "(", ")", "!=", "type_metric", ".", "USER_DEFINED", ":", "dataset_differences", "[", "index_center", "]", "=", "self", ".", "__metric", "(", "self", ".", "__pointer_data", ",", "self", ".", "__centers", "[", "index_center", "]", ")", "else", ":", "dataset_differences", "[", "index_center", "]", "=", "[", "self", ".", "__metric", "(", "point", ",", "self", ".", "__centers", "[", "index_center", "]", ")", "for", "point", "in", "self", ".", "__pointer_data", "]", "return", "dataset_differences" ]
! @brief Calculate distance from each point to each cluster center.
[ "!" ]
python
valid
saltstack/salt
salt/states/iptables.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/iptables.py#L803-L845
def flush(name, table='filter', family='ipv4', **kwargs): ''' .. versionadded:: 2014.1.0 Flush current iptables state table The table that owns the chain that should be modified family Networking family, either ipv4 or ipv6 ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} for ignore in _STATE_INTERNAL_KEYWORDS: if ignore in kwargs: del kwargs[ignore] if 'chain' not in kwargs: kwargs['chain'] = '' if __opts__['test']: ret['comment'] = 'iptables rules in {0} table {1} chain {2} family needs to be flushed'.format( name, table, family) return ret if not __salt__['iptables.flush'](table, kwargs['chain'], family): ret['changes'] = {'locale': name} ret['result'] = True ret['comment'] = 'Flush iptables rules in {0} table {1} chain {2} family'.format( table, kwargs['chain'], family ) return ret else: ret['result'] = False ret['comment'] = 'Failed to flush iptables rules' return ret
[ "def", "flush", "(", "name", ",", "table", "=", "'filter'", ",", "family", "=", "'ipv4'", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "None", ",", "'comment'", ":", "''", "}", "for", "ignore", "in", "_STATE_INTERNAL_KEYWORDS", ":", "if", "ignore", "in", "kwargs", ":", "del", "kwargs", "[", "ignore", "]", "if", "'chain'", "not", "in", "kwargs", ":", "kwargs", "[", "'chain'", "]", "=", "''", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'iptables rules in {0} table {1} chain {2} family needs to be flushed'", ".", "format", "(", "name", ",", "table", ",", "family", ")", "return", "ret", "if", "not", "__salt__", "[", "'iptables.flush'", "]", "(", "table", ",", "kwargs", "[", "'chain'", "]", ",", "family", ")", ":", "ret", "[", "'changes'", "]", "=", "{", "'locale'", ":", "name", "}", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'Flush iptables rules in {0} table {1} chain {2} family'", ".", "format", "(", "table", ",", "kwargs", "[", "'chain'", "]", ",", "family", ")", "return", "ret", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to flush iptables rules'", "return", "ret" ]
.. versionadded:: 2014.1.0 Flush current iptables state table The table that owns the chain that should be modified family Networking family, either ipv4 or ipv6
[ "..", "versionadded", "::", "2014", ".", "1", ".", "0" ]
python
train
tanghaibao/goatools
goatools/wr_tbl.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/wr_tbl.py#L86-L117
def wr_xlsx_sections(fout_xlsx, xlsx_data, **kws): """Write xlsx file containing section names followed by lines of namedtuple data.""" from goatools.wr_tbl_class import WrXlsx items_str = "items" if "items" not in kws else kws["items"] prt_hdr_min = 10 num_items = 0 if xlsx_data: # Basic data checks assert len(xlsx_data[0]) == 2, "wr_xlsx_sections EXPECTED: [(section, nts), ..." assert xlsx_data[0][1], \ "wr_xlsx_sections EXPECTED SECTION({S}) LIST TO HAVE DATA".format(S=xlsx_data[0][0]) # Open xlsx file and write title (optional) and headers. xlsxobj = WrXlsx(fout_xlsx, xlsx_data[0][1][0]._fields, **kws) worksheet = xlsxobj.add_worksheet() row_idx = xlsxobj.wr_title(worksheet) hdrs_wrote = False # Write data for section_text, data_nts in xlsx_data: num_items += len(data_nts) fmt = xlsxobj.wbfmtobj.get_fmt_section() row_idx = xlsxobj.wr_row_mergeall(worksheet, section_text, fmt, row_idx) if hdrs_wrote is False or len(data_nts) > prt_hdr_min: row_idx = xlsxobj.wr_hdrs(worksheet, row_idx) hdrs_wrote = True row_idx = xlsxobj.wr_data(data_nts, row_idx, worksheet) # Close xlsx file xlsxobj.workbook.close() sys.stdout.write(" {N:>5} {ITEMS} WROTE: {FOUT} ({S} sections)\n".format( N=num_items, ITEMS=items_str, FOUT=fout_xlsx, S=len(xlsx_data))) else: sys.stdout.write(" 0 {ITEMS}. NOT WRITING {FOUT}\n".format( ITEMS=items_str, FOUT=fout_xlsx))
[ "def", "wr_xlsx_sections", "(", "fout_xlsx", ",", "xlsx_data", ",", "*", "*", "kws", ")", ":", "from", "goatools", ".", "wr_tbl_class", "import", "WrXlsx", "items_str", "=", "\"items\"", "if", "\"items\"", "not", "in", "kws", "else", "kws", "[", "\"items\"", "]", "prt_hdr_min", "=", "10", "num_items", "=", "0", "if", "xlsx_data", ":", "# Basic data checks", "assert", "len", "(", "xlsx_data", "[", "0", "]", ")", "==", "2", ",", "\"wr_xlsx_sections EXPECTED: [(section, nts), ...\"", "assert", "xlsx_data", "[", "0", "]", "[", "1", "]", ",", "\"wr_xlsx_sections EXPECTED SECTION({S}) LIST TO HAVE DATA\"", ".", "format", "(", "S", "=", "xlsx_data", "[", "0", "]", "[", "0", "]", ")", "# Open xlsx file and write title (optional) and headers.", "xlsxobj", "=", "WrXlsx", "(", "fout_xlsx", ",", "xlsx_data", "[", "0", "]", "[", "1", "]", "[", "0", "]", ".", "_fields", ",", "*", "*", "kws", ")", "worksheet", "=", "xlsxobj", ".", "add_worksheet", "(", ")", "row_idx", "=", "xlsxobj", ".", "wr_title", "(", "worksheet", ")", "hdrs_wrote", "=", "False", "# Write data", "for", "section_text", ",", "data_nts", "in", "xlsx_data", ":", "num_items", "+=", "len", "(", "data_nts", ")", "fmt", "=", "xlsxobj", ".", "wbfmtobj", ".", "get_fmt_section", "(", ")", "row_idx", "=", "xlsxobj", ".", "wr_row_mergeall", "(", "worksheet", ",", "section_text", ",", "fmt", ",", "row_idx", ")", "if", "hdrs_wrote", "is", "False", "or", "len", "(", "data_nts", ")", ">", "prt_hdr_min", ":", "row_idx", "=", "xlsxobj", ".", "wr_hdrs", "(", "worksheet", ",", "row_idx", ")", "hdrs_wrote", "=", "True", "row_idx", "=", "xlsxobj", ".", "wr_data", "(", "data_nts", ",", "row_idx", ",", "worksheet", ")", "# Close xlsx file", "xlsxobj", ".", "workbook", ".", "close", "(", ")", "sys", ".", "stdout", ".", "write", "(", "\" {N:>5} {ITEMS} WROTE: {FOUT} ({S} sections)\\n\"", ".", "format", "(", "N", "=", "num_items", ",", "ITEMS", "=", "items_str", ",", "FOUT", "=", "fout_xlsx", ",", "S", "=", "len", "(", "xlsx_data", ")", ")", ")", "else", ":", "sys", ".", "stdout", ".", "write", "(", "\" 0 {ITEMS}. NOT WRITING {FOUT}\\n\"", ".", "format", "(", "ITEMS", "=", "items_str", ",", "FOUT", "=", "fout_xlsx", ")", ")" ]
Write xlsx file containing section names followed by lines of namedtuple data.
[ "Write", "xlsx", "file", "containing", "section", "names", "followed", "by", "lines", "of", "namedtuple", "data", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/utils/sari_hook.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/sari_hook.py#L224-L252
def sari_score(predictions, labels, features, **unused_kwargs): """Computes the SARI scores from the given source, prediction and targets. An approximate SARI scoring method since we do not glue word pieces or decode the ids and tokenize the output. By default, we use ngram order of 4. Also, this does not have beam search. Args: predictions: tensor, model predictions. labels: tensor, gold output. features: dict, containing inputs. Returns: sari: int, approx sari score """ if "inputs" not in features: raise ValueError("sari_score requires inputs feature") # Convert the inputs and outputs to a [batch_size, sequence_length] tensor. inputs = tf.squeeze(features["inputs"], axis=[-1, -2]) outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) outputs = tf.squeeze(outputs, axis=[-1, -2]) # Convert the labels to a [batch_size, 1, sequence_length] tensor. labels = tf.squeeze(labels, axis=[-1, -2]) labels = tf.expand_dims(labels, axis=1) score, _, _, _ = get_sari(inputs, outputs, labels) return score, tf.constant(1.0)
[ "def", "sari_score", "(", "predictions", ",", "labels", ",", "features", ",", "*", "*", "unused_kwargs", ")", ":", "if", "\"inputs\"", "not", "in", "features", ":", "raise", "ValueError", "(", "\"sari_score requires inputs feature\"", ")", "# Convert the inputs and outputs to a [batch_size, sequence_length] tensor.", "inputs", "=", "tf", ".", "squeeze", "(", "features", "[", "\"inputs\"", "]", ",", "axis", "=", "[", "-", "1", ",", "-", "2", "]", ")", "outputs", "=", "tf", ".", "to_int32", "(", "tf", ".", "argmax", "(", "predictions", ",", "axis", "=", "-", "1", ")", ")", "outputs", "=", "tf", ".", "squeeze", "(", "outputs", ",", "axis", "=", "[", "-", "1", ",", "-", "2", "]", ")", "# Convert the labels to a [batch_size, 1, sequence_length] tensor.", "labels", "=", "tf", ".", "squeeze", "(", "labels", ",", "axis", "=", "[", "-", "1", ",", "-", "2", "]", ")", "labels", "=", "tf", ".", "expand_dims", "(", "labels", ",", "axis", "=", "1", ")", "score", ",", "_", ",", "_", ",", "_", "=", "get_sari", "(", "inputs", ",", "outputs", ",", "labels", ")", "return", "score", ",", "tf", ".", "constant", "(", "1.0", ")" ]
Computes the SARI scores from the given source, prediction and targets. An approximate SARI scoring method since we do not glue word pieces or decode the ids and tokenize the output. By default, we use ngram order of 4. Also, this does not have beam search. Args: predictions: tensor, model predictions. labels: tensor, gold output. features: dict, containing inputs. Returns: sari: int, approx sari score
[ "Computes", "the", "SARI", "scores", "from", "the", "given", "source", "prediction", "and", "targets", "." ]
python
train
ArangoDB-Community/pyArango
pyArango/collection.py
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/collection.py#L333-L344
def ensureHashIndex(self, fields, unique = False, sparse = True, deduplicate = False) : """Creates a hash index if it does not already exist, and returns it""" data = { "type" : "hash", "fields" : fields, "unique" : unique, "sparse" : sparse, "deduplicate": deduplicate } ind = Index(self, creationData = data) self.indexes["hash"][ind.infos["id"]] = ind return ind
[ "def", "ensureHashIndex", "(", "self", ",", "fields", ",", "unique", "=", "False", ",", "sparse", "=", "True", ",", "deduplicate", "=", "False", ")", ":", "data", "=", "{", "\"type\"", ":", "\"hash\"", ",", "\"fields\"", ":", "fields", ",", "\"unique\"", ":", "unique", ",", "\"sparse\"", ":", "sparse", ",", "\"deduplicate\"", ":", "deduplicate", "}", "ind", "=", "Index", "(", "self", ",", "creationData", "=", "data", ")", "self", ".", "indexes", "[", "\"hash\"", "]", "[", "ind", ".", "infos", "[", "\"id\"", "]", "]", "=", "ind", "return", "ind" ]
Creates a hash index if it does not already exist, and returns it
[ "Creates", "a", "hash", "index", "if", "it", "does", "not", "already", "exist", "and", "returns", "it" ]
python
train
nickw444/nsw-fuel-api-client
nsw_fuel/client.py
https://github.com/nickw444/nsw-fuel-api-client/blob/06bd9ae7ad094d5965fce3a9468785247e1b5a39/nsw_fuel/client.py#L34-L45
def get_fuel_prices(self) -> GetFuelPricesResponse: """Fetches fuel prices for all stations.""" response = requests.get( '{}/prices'.format(API_URL_BASE), headers=self._get_headers(), timeout=self._timeout, ) if not response.ok: raise FuelCheckError.create(response) return GetFuelPricesResponse.deserialize(response.json())
[ "def", "get_fuel_prices", "(", "self", ")", "->", "GetFuelPricesResponse", ":", "response", "=", "requests", ".", "get", "(", "'{}/prices'", ".", "format", "(", "API_URL_BASE", ")", ",", "headers", "=", "self", ".", "_get_headers", "(", ")", ",", "timeout", "=", "self", ".", "_timeout", ",", ")", "if", "not", "response", ".", "ok", ":", "raise", "FuelCheckError", ".", "create", "(", "response", ")", "return", "GetFuelPricesResponse", ".", "deserialize", "(", "response", ".", "json", "(", ")", ")" ]
Fetches fuel prices for all stations.
[ "Fetches", "fuel", "prices", "for", "all", "stations", "." ]
python
valid
spencerahill/aospy
aospy/automate.py
https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/automate.py#L355-L508
def submit_mult_calcs(calc_suite_specs, exec_options=None): """Generate and execute all specified computations. Once the calculations are prepped and submitted for execution, any calculation that triggers any exception or error is skipped, and the rest of the calculations proceed unaffected. This prevents an error in a single calculation from crashing a large suite of calculations. Parameters ---------- calc_suite_specs : dict The specifications describing the full set of calculations to be generated and potentially executed. Accepted keys and their values: library : module or package comprising an aospy object library The aospy object library for these calculations. projects : list of aospy.Proj objects The projects to permute over. models : 'all', 'default', or list of aospy.Model objects The models to permute over. If 'all', use all models in the ``models`` attribute of each ``Proj``. If 'default', use all models in the ``default_models`` attribute of each ``Proj``. runs : 'all', 'default', or list of aospy.Run objects The runs to permute over. If 'all', use all runs in the ``runs`` attribute of each ``Model``. If 'default', use all runs in the ``default_runs`` attribute of each ``Model``. variables : list of aospy.Var objects The variables to be calculated. regions : 'all' or list of aospy.Region objects The region(s) over which any regional reductions will be performed. If 'all', use all regions in the ``regions`` attribute of each ``Proj``. date_ranges : 'default' or a list of tuples The range of dates (inclusive) over which to perform calculations. If 'default', use the ``default_start_date`` and ``default_end_date`` attribute of each ``Run``. Else provide a list of tuples, each containing a pair of start and end dates, such as ``date_ranges=[(start, end)]`` where ``start`` and ``end`` are each ``datetime.datetime`` objects, partial datetime strings (e.g. '0001'), ``np.datetime64`` objects, or ``cftime.datetime`` objects. output_time_intervals : {'ann', season-string, month-integer} The sub-annual time interval over which to aggregate. - 'ann' : Annual mean - season-string : E.g. 'JJA' for June-July-August - month-integer : 1 for January, 2 for February, etc. Each one is a separate reduction, e.g. [1, 2] would produce averages (or other specified time reduction) over all Januaries, and separately over all Februaries. output_time_regional_reductions : list of reduction string identifiers Unlike most other keys, these are not permuted over when creating the :py:class:`aospy.Calc` objects that execute the calculations; each :py:class:`aospy.Calc` performs all of the specified reductions. Accepted string identifiers are: - Gridpoint-by-gridpoint output: - 'av' : Gridpoint-by-gridpoint time-average - 'std' : Gridpoint-by-gridpoint temporal standard deviation - 'ts' : Gridpoint-by-gridpoint time-series - Averages over each region specified via `region`: - 'reg.av', 'reg.std', 'reg.ts' : analogous to 'av', 'std', 'ts' output_vertical_reductions : {None, 'vert_av', 'vert_int'}, optional How to reduce the data vertically: - None : no vertical reduction - 'vert_av' : mass-weighted vertical average - 'vert_int' : mass-weighted vertical integral input_time_intervals : {'annual', 'monthly', 'daily', '#hr'} A string specifying the time resolution of the input data. In '#hr' above, the '#' stands for a number, e.g. 3hr or 6hr, for sub-daily output. These are the suggested specifiers, but others may be used if they are also used by the DataLoaders for the given Runs. input_time_datatypes : {'inst', 'ts', 'av'} What the time axis of the input data represents: - 'inst' : Timeseries of instantaneous values - 'ts' : Timeseries of averages over the period of each time-index - 'av' : A single value averaged over a date range input_vertical_datatypes : {False, 'pressure', 'sigma'}, optional The vertical coordinate system used by the input data: - False : not defined vertically - 'pressure' : pressure coordinates - 'sigma' : hybrid sigma-pressure coordinates input_time_offsets : {None, dict}, optional How to offset input data in time to correct for metadata errors - None : no time offset applied - dict : e.g. ``{'hours': -3}`` to offset times by -3 hours See :py:meth:`aospy.utils.times.apply_time_offset`. exec_options : dict or None (default None) Options regarding how the calculations are reported, submitted, and saved. If None, default settings are used for all options. Currently supported options (each should be either `True` or `False`): - prompt_verify : (default False) If True, print summary of calculations to be performed and prompt user to confirm before submitting for execution. - parallelize : (default False) If True, submit calculations in parallel. - client : distributed.Client or None (default None) The dask.distributed Client used to schedule computations. If None and parallelize is True, a LocalCluster will be started. - write_to_tar : (default True) If True, write results of calculations to .tar files, one for each :py:class:`aospy.Run` object. These tar files have an identical directory structures the standard output relative to their root directory, which is specified via the `tar_direc_out` argument of each Proj object's instantiation. Returns ------- A list of the return values from each :py:meth:`aospy.Calc.compute` call If a calculation ran without error, this value is the :py:class:`aospy.Calc` object itself, with the results of its calculations saved in its ``data_out`` attribute. ``data_out`` is a dictionary, with the keys being the temporal-regional reduction identifiers (e.g. 'reg.av'), and the values being the corresponding result. If any error occurred during a calculation, the return value is None. Raises ------ AospyException If the ``prompt_verify`` option is set to True and the user does not respond affirmatively to the prompt. """ if exec_options is None: exec_options = dict() if exec_options.pop('prompt_verify', False): print(_print_suite_summary(calc_suite_specs)) _user_verify() calc_suite = CalcSuite(calc_suite_specs) calcs = calc_suite.create_calcs() if not calcs: raise AospyException( "The specified combination of parameters yielded zero " "calculations. Most likely, one of the parameters is " "inadvertently empty." ) return _exec_calcs(calcs, **exec_options)
[ "def", "submit_mult_calcs", "(", "calc_suite_specs", ",", "exec_options", "=", "None", ")", ":", "if", "exec_options", "is", "None", ":", "exec_options", "=", "dict", "(", ")", "if", "exec_options", ".", "pop", "(", "'prompt_verify'", ",", "False", ")", ":", "print", "(", "_print_suite_summary", "(", "calc_suite_specs", ")", ")", "_user_verify", "(", ")", "calc_suite", "=", "CalcSuite", "(", "calc_suite_specs", ")", "calcs", "=", "calc_suite", ".", "create_calcs", "(", ")", "if", "not", "calcs", ":", "raise", "AospyException", "(", "\"The specified combination of parameters yielded zero \"", "\"calculations. Most likely, one of the parameters is \"", "\"inadvertently empty.\"", ")", "return", "_exec_calcs", "(", "calcs", ",", "*", "*", "exec_options", ")" ]
Generate and execute all specified computations. Once the calculations are prepped and submitted for execution, any calculation that triggers any exception or error is skipped, and the rest of the calculations proceed unaffected. This prevents an error in a single calculation from crashing a large suite of calculations. Parameters ---------- calc_suite_specs : dict The specifications describing the full set of calculations to be generated and potentially executed. Accepted keys and their values: library : module or package comprising an aospy object library The aospy object library for these calculations. projects : list of aospy.Proj objects The projects to permute over. models : 'all', 'default', or list of aospy.Model objects The models to permute over. If 'all', use all models in the ``models`` attribute of each ``Proj``. If 'default', use all models in the ``default_models`` attribute of each ``Proj``. runs : 'all', 'default', or list of aospy.Run objects The runs to permute over. If 'all', use all runs in the ``runs`` attribute of each ``Model``. If 'default', use all runs in the ``default_runs`` attribute of each ``Model``. variables : list of aospy.Var objects The variables to be calculated. regions : 'all' or list of aospy.Region objects The region(s) over which any regional reductions will be performed. If 'all', use all regions in the ``regions`` attribute of each ``Proj``. date_ranges : 'default' or a list of tuples The range of dates (inclusive) over which to perform calculations. If 'default', use the ``default_start_date`` and ``default_end_date`` attribute of each ``Run``. Else provide a list of tuples, each containing a pair of start and end dates, such as ``date_ranges=[(start, end)]`` where ``start`` and ``end`` are each ``datetime.datetime`` objects, partial datetime strings (e.g. '0001'), ``np.datetime64`` objects, or ``cftime.datetime`` objects. output_time_intervals : {'ann', season-string, month-integer} The sub-annual time interval over which to aggregate. - 'ann' : Annual mean - season-string : E.g. 'JJA' for June-July-August - month-integer : 1 for January, 2 for February, etc. Each one is a separate reduction, e.g. [1, 2] would produce averages (or other specified time reduction) over all Januaries, and separately over all Februaries. output_time_regional_reductions : list of reduction string identifiers Unlike most other keys, these are not permuted over when creating the :py:class:`aospy.Calc` objects that execute the calculations; each :py:class:`aospy.Calc` performs all of the specified reductions. Accepted string identifiers are: - Gridpoint-by-gridpoint output: - 'av' : Gridpoint-by-gridpoint time-average - 'std' : Gridpoint-by-gridpoint temporal standard deviation - 'ts' : Gridpoint-by-gridpoint time-series - Averages over each region specified via `region`: - 'reg.av', 'reg.std', 'reg.ts' : analogous to 'av', 'std', 'ts' output_vertical_reductions : {None, 'vert_av', 'vert_int'}, optional How to reduce the data vertically: - None : no vertical reduction - 'vert_av' : mass-weighted vertical average - 'vert_int' : mass-weighted vertical integral input_time_intervals : {'annual', 'monthly', 'daily', '#hr'} A string specifying the time resolution of the input data. In '#hr' above, the '#' stands for a number, e.g. 3hr or 6hr, for sub-daily output. These are the suggested specifiers, but others may be used if they are also used by the DataLoaders for the given Runs. input_time_datatypes : {'inst', 'ts', 'av'} What the time axis of the input data represents: - 'inst' : Timeseries of instantaneous values - 'ts' : Timeseries of averages over the period of each time-index - 'av' : A single value averaged over a date range input_vertical_datatypes : {False, 'pressure', 'sigma'}, optional The vertical coordinate system used by the input data: - False : not defined vertically - 'pressure' : pressure coordinates - 'sigma' : hybrid sigma-pressure coordinates input_time_offsets : {None, dict}, optional How to offset input data in time to correct for metadata errors - None : no time offset applied - dict : e.g. ``{'hours': -3}`` to offset times by -3 hours See :py:meth:`aospy.utils.times.apply_time_offset`. exec_options : dict or None (default None) Options regarding how the calculations are reported, submitted, and saved. If None, default settings are used for all options. Currently supported options (each should be either `True` or `False`): - prompt_verify : (default False) If True, print summary of calculations to be performed and prompt user to confirm before submitting for execution. - parallelize : (default False) If True, submit calculations in parallel. - client : distributed.Client or None (default None) The dask.distributed Client used to schedule computations. If None and parallelize is True, a LocalCluster will be started. - write_to_tar : (default True) If True, write results of calculations to .tar files, one for each :py:class:`aospy.Run` object. These tar files have an identical directory structures the standard output relative to their root directory, which is specified via the `tar_direc_out` argument of each Proj object's instantiation. Returns ------- A list of the return values from each :py:meth:`aospy.Calc.compute` call If a calculation ran without error, this value is the :py:class:`aospy.Calc` object itself, with the results of its calculations saved in its ``data_out`` attribute. ``data_out`` is a dictionary, with the keys being the temporal-regional reduction identifiers (e.g. 'reg.av'), and the values being the corresponding result. If any error occurred during a calculation, the return value is None. Raises ------ AospyException If the ``prompt_verify`` option is set to True and the user does not respond affirmatively to the prompt.
[ "Generate", "and", "execute", "all", "specified", "computations", "." ]
python
train
mcrute/pydora
pydora/audio_backend.py
https://github.com/mcrute/pydora/blob/d9e353e7f19da741dcf372246b4d5640cb788488/pydora/audio_backend.py#L137-L148
def _ensure_started(self): """Ensure player backing process is started """ if self._process and self._process.poll() is None: return if not getattr(self, "_cmd"): raise RuntimeError("Player command is not configured") log.debug("Starting playback command: %r", self._cmd) self._process = SilentPopen(self._cmd) self._post_start()
[ "def", "_ensure_started", "(", "self", ")", ":", "if", "self", ".", "_process", "and", "self", ".", "_process", ".", "poll", "(", ")", "is", "None", ":", "return", "if", "not", "getattr", "(", "self", ",", "\"_cmd\"", ")", ":", "raise", "RuntimeError", "(", "\"Player command is not configured\"", ")", "log", ".", "debug", "(", "\"Starting playback command: %r\"", ",", "self", ".", "_cmd", ")", "self", ".", "_process", "=", "SilentPopen", "(", "self", ".", "_cmd", ")", "self", ".", "_post_start", "(", ")" ]
Ensure player backing process is started
[ "Ensure", "player", "backing", "process", "is", "started" ]
python
valid
05bit/peewee-async
peewee_async.py
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L269-L276
async def prefetch(self, query, *subqueries): """Asynchronous version of the `prefetch()` from peewee. :return: Query that has already cached data for subqueries """ query = self._swap_database(query) subqueries = map(self._swap_database, subqueries) return (await prefetch(query, *subqueries))
[ "async", "def", "prefetch", "(", "self", ",", "query", ",", "*", "subqueries", ")", ":", "query", "=", "self", ".", "_swap_database", "(", "query", ")", "subqueries", "=", "map", "(", "self", ".", "_swap_database", ",", "subqueries", ")", "return", "(", "await", "prefetch", "(", "query", ",", "*", "subqueries", ")", ")" ]
Asynchronous version of the `prefetch()` from peewee. :return: Query that has already cached data for subqueries
[ "Asynchronous", "version", "of", "the", "prefetch", "()", "from", "peewee", "." ]
python
train
cgoldberg/sauceclient
sauceclient.py
https://github.com/cgoldberg/sauceclient/blob/aa27b7da8eb2e483adc2754c694fe5082e1fa8f7/sauceclient.py#L76-L87
def request(self, method, url, body=None, content_type='application/json'): """Send http request.""" headers = self.make_auth_headers(content_type) connection = http_client.HTTPSConnection(self.apibase) connection.request(method, url, body, headers=headers) response = connection.getresponse() data = response.read() connection.close() if response.status not in [200, 201]: raise SauceException('{}: {}.\nSauce Status NOT OK'.format( response.status, response.reason), response=response) return json.loads(data.decode('utf-8'))
[ "def", "request", "(", "self", ",", "method", ",", "url", ",", "body", "=", "None", ",", "content_type", "=", "'application/json'", ")", ":", "headers", "=", "self", ".", "make_auth_headers", "(", "content_type", ")", "connection", "=", "http_client", ".", "HTTPSConnection", "(", "self", ".", "apibase", ")", "connection", ".", "request", "(", "method", ",", "url", ",", "body", ",", "headers", "=", "headers", ")", "response", "=", "connection", ".", "getresponse", "(", ")", "data", "=", "response", ".", "read", "(", ")", "connection", ".", "close", "(", ")", "if", "response", ".", "status", "not", "in", "[", "200", ",", "201", "]", ":", "raise", "SauceException", "(", "'{}: {}.\\nSauce Status NOT OK'", ".", "format", "(", "response", ".", "status", ",", "response", ".", "reason", ")", ",", "response", "=", "response", ")", "return", "json", ".", "loads", "(", "data", ".", "decode", "(", "'utf-8'", ")", ")" ]
Send http request.
[ "Send", "http", "request", "." ]
python
train
ic-labs/django-icekit
icekit/publishing/managers.py
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit/publishing/managers.py#L342-L367
def published(self, for_user=UNSET, force_exchange=False): """ Apply additional filtering of published items over that done in `PublishingQuerySet.published` to filter based on additional publising date fields used by Fluent. """ if for_user is not UNSET: return self.visible() queryset = super(PublishingUrlNodeQuerySet, self).published( for_user=for_user, force_exchange=force_exchange) # Exclude by publication date on the published version of items, *not* # the draft vesion, or we could get the wrong result. # Exclude fields of published copy of draft items, not draft itself... queryset = queryset.exclude( Q(publishing_is_draft=True) & Q( Q(publishing_linked__publication_date__gt=now()) | Q(publishing_linked__publication_end_date__lte=now()))) # ...and exclude fields directly on published items queryset = queryset.exclude( Q(publishing_is_draft=False) & Q( Q(publication_date__gt=now()) | Q(publication_end_date__lte=now()))) return queryset
[ "def", "published", "(", "self", ",", "for_user", "=", "UNSET", ",", "force_exchange", "=", "False", ")", ":", "if", "for_user", "is", "not", "UNSET", ":", "return", "self", ".", "visible", "(", ")", "queryset", "=", "super", "(", "PublishingUrlNodeQuerySet", ",", "self", ")", ".", "published", "(", "for_user", "=", "for_user", ",", "force_exchange", "=", "force_exchange", ")", "# Exclude by publication date on the published version of items, *not*", "# the draft vesion, or we could get the wrong result.", "# Exclude fields of published copy of draft items, not draft itself...", "queryset", "=", "queryset", ".", "exclude", "(", "Q", "(", "publishing_is_draft", "=", "True", ")", "&", "Q", "(", "Q", "(", "publishing_linked__publication_date__gt", "=", "now", "(", ")", ")", "|", "Q", "(", "publishing_linked__publication_end_date__lte", "=", "now", "(", ")", ")", ")", ")", "# ...and exclude fields directly on published items", "queryset", "=", "queryset", ".", "exclude", "(", "Q", "(", "publishing_is_draft", "=", "False", ")", "&", "Q", "(", "Q", "(", "publication_date__gt", "=", "now", "(", ")", ")", "|", "Q", "(", "publication_end_date__lte", "=", "now", "(", ")", ")", ")", ")", "return", "queryset" ]
Apply additional filtering of published items over that done in `PublishingQuerySet.published` to filter based on additional publising date fields used by Fluent.
[ "Apply", "additional", "filtering", "of", "published", "items", "over", "that", "done", "in", "PublishingQuerySet", ".", "published", "to", "filter", "based", "on", "additional", "publising", "date", "fields", "used", "by", "Fluent", "." ]
python
train
edx/edx-enterprise
consent/api/v1/views.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/consent/api/v1/views.py#L95-L115
def get_required_query_params(self, request): """ Gets ``username``, ``course_id``, and ``enterprise_customer_uuid``, which are the relevant query parameters for this API endpoint. :param request: The request to this endpoint. :return: The ``username``, ``course_id``, and ``enterprise_customer_uuid`` from the request. """ username = get_request_value(request, self.REQUIRED_PARAM_USERNAME, '') course_id = get_request_value(request, self.REQUIRED_PARAM_COURSE_ID, '') program_uuid = get_request_value(request, self.REQUIRED_PARAM_PROGRAM_UUID, '') enterprise_customer_uuid = get_request_value(request, self.REQUIRED_PARAM_ENTERPRISE_CUSTOMER) if not (username and (course_id or program_uuid) and enterprise_customer_uuid): raise ConsentAPIRequestError( self.get_missing_params_message([ ("'username'", bool(username)), ("'enterprise_customer_uuid'", bool(enterprise_customer_uuid)), ("one of 'course_id' or 'program_uuid'", bool(course_id or program_uuid)), ]) ) return username, course_id, program_uuid, enterprise_customer_uuid
[ "def", "get_required_query_params", "(", "self", ",", "request", ")", ":", "username", "=", "get_request_value", "(", "request", ",", "self", ".", "REQUIRED_PARAM_USERNAME", ",", "''", ")", "course_id", "=", "get_request_value", "(", "request", ",", "self", ".", "REQUIRED_PARAM_COURSE_ID", ",", "''", ")", "program_uuid", "=", "get_request_value", "(", "request", ",", "self", ".", "REQUIRED_PARAM_PROGRAM_UUID", ",", "''", ")", "enterprise_customer_uuid", "=", "get_request_value", "(", "request", ",", "self", ".", "REQUIRED_PARAM_ENTERPRISE_CUSTOMER", ")", "if", "not", "(", "username", "and", "(", "course_id", "or", "program_uuid", ")", "and", "enterprise_customer_uuid", ")", ":", "raise", "ConsentAPIRequestError", "(", "self", ".", "get_missing_params_message", "(", "[", "(", "\"'username'\"", ",", "bool", "(", "username", ")", ")", ",", "(", "\"'enterprise_customer_uuid'\"", ",", "bool", "(", "enterprise_customer_uuid", ")", ")", ",", "(", "\"one of 'course_id' or 'program_uuid'\"", ",", "bool", "(", "course_id", "or", "program_uuid", ")", ")", ",", "]", ")", ")", "return", "username", ",", "course_id", ",", "program_uuid", ",", "enterprise_customer_uuid" ]
Gets ``username``, ``course_id``, and ``enterprise_customer_uuid``, which are the relevant query parameters for this API endpoint. :param request: The request to this endpoint. :return: The ``username``, ``course_id``, and ``enterprise_customer_uuid`` from the request.
[ "Gets", "username", "course_id", "and", "enterprise_customer_uuid", "which", "are", "the", "relevant", "query", "parameters", "for", "this", "API", "endpoint", "." ]
python
valid
tensorflow/tensor2tensor
tensor2tensor/rl/rl_utils.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/rl_utils.py#L100-L117
def evaluate_all_configs( hparams, agent_model_dir, eval_fn=_eval_fn_with_learner ): """Evaluate the agent with multiple eval configurations.""" metrics = {} # Iterate over all combinations of sampling temperatures and whether to do # initial no-ops. for sampling_temp in hparams.eval_sampling_temps: # Iterate over a set so if eval_max_num_noops == 0 then it's 1 iteration. for max_num_noops in set([hparams.eval_max_num_noops, 0]): scores = evaluate_single_config( hparams, sampling_temp, max_num_noops, agent_model_dir, eval_fn ) for (score, clipped) in zip(scores, (True, False)): metric_name = get_metric_name(sampling_temp, max_num_noops, clipped) metrics[metric_name] = score return metrics
[ "def", "evaluate_all_configs", "(", "hparams", ",", "agent_model_dir", ",", "eval_fn", "=", "_eval_fn_with_learner", ")", ":", "metrics", "=", "{", "}", "# Iterate over all combinations of sampling temperatures and whether to do", "# initial no-ops.", "for", "sampling_temp", "in", "hparams", ".", "eval_sampling_temps", ":", "# Iterate over a set so if eval_max_num_noops == 0 then it's 1 iteration.", "for", "max_num_noops", "in", "set", "(", "[", "hparams", ".", "eval_max_num_noops", ",", "0", "]", ")", ":", "scores", "=", "evaluate_single_config", "(", "hparams", ",", "sampling_temp", ",", "max_num_noops", ",", "agent_model_dir", ",", "eval_fn", ")", "for", "(", "score", ",", "clipped", ")", "in", "zip", "(", "scores", ",", "(", "True", ",", "False", ")", ")", ":", "metric_name", "=", "get_metric_name", "(", "sampling_temp", ",", "max_num_noops", ",", "clipped", ")", "metrics", "[", "metric_name", "]", "=", "score", "return", "metrics" ]
Evaluate the agent with multiple eval configurations.
[ "Evaluate", "the", "agent", "with", "multiple", "eval", "configurations", "." ]
python
train
andreikop/qutepart
qutepart/rectangularselection.py
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/rectangularselection.py#L90-L98
def _realToVisibleColumn(self, text, realColumn): """If \t is used, real position of symbol in block and visible position differs This function converts real to visible """ generator = self._visibleCharPositionGenerator(text) for i in range(realColumn): val = next(generator) val = next(generator) return val
[ "def", "_realToVisibleColumn", "(", "self", ",", "text", ",", "realColumn", ")", ":", "generator", "=", "self", ".", "_visibleCharPositionGenerator", "(", "text", ")", "for", "i", "in", "range", "(", "realColumn", ")", ":", "val", "=", "next", "(", "generator", ")", "val", "=", "next", "(", "generator", ")", "return", "val" ]
If \t is used, real position of symbol in block and visible position differs This function converts real to visible
[ "If", "\\", "t", "is", "used", "real", "position", "of", "symbol", "in", "block", "and", "visible", "position", "differs", "This", "function", "converts", "real", "to", "visible" ]
python
train
pypa/pipenv
pipenv/vendor/pyparsing.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pyparsing.py#L1329-L1342
def setName( self, name ): """ Define name for this expression, makes debugging and exception messages clearer. Example:: Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1) Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) """ self.name = name self.errmsg = "Expected " + self.name if hasattr(self,"exception"): self.exception.msg = self.errmsg return self
[ "def", "setName", "(", "self", ",", "name", ")", ":", "self", ".", "name", "=", "name", "self", ".", "errmsg", "=", "\"Expected \"", "+", "self", ".", "name", "if", "hasattr", "(", "self", ",", "\"exception\"", ")", ":", "self", ".", "exception", ".", "msg", "=", "self", ".", "errmsg", "return", "self" ]
Define name for this expression, makes debugging and exception messages clearer. Example:: Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1) Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
[ "Define", "name", "for", "this", "expression", "makes", "debugging", "and", "exception", "messages", "clearer", "." ]
python
train
JnyJny/Geometry
Geometry/point.py
https://github.com/JnyJny/Geometry/blob/3500f815fa56c535b36d1b6fd0afe69ce5d055be/Geometry/point.py#L1353-L1387
def ccw(self, b, c, axis='z'): ''' :b: Point or point equivalent :c: Point or point equivalent :axis: optional string or integer in set('x',0,'y',1,'z',2) :return: float CCW - Counter Clockwise Returns an integer signifying the direction of rotation around 'axis' described by the angle [b, self, c]. > 0 : counter-clockwise 0 : points are collinear < 0 : clockwise Returns an integer. Raises ValueError if axis is not in 'xyz'. ''' bsuba = b - self csuba = c - self if axis in ['z', 2]: return (bsuba.x * csuba.y) - (bsuba.y * csuba.x) if axis in ['y', 1]: return (bsuba.x * csuba.z) - (bsuba.z * csuba.x) if axis in ['x', 0]: return (bsuba.y * csuba.z) - (bsuba.z * csuba.y) msg = "invalid axis '{!r}', must be one of {}".format(axis, self._keys) raise ValueError(msg)
[ "def", "ccw", "(", "self", ",", "b", ",", "c", ",", "axis", "=", "'z'", ")", ":", "bsuba", "=", "b", "-", "self", "csuba", "=", "c", "-", "self", "if", "axis", "in", "[", "'z'", ",", "2", "]", ":", "return", "(", "bsuba", ".", "x", "*", "csuba", ".", "y", ")", "-", "(", "bsuba", ".", "y", "*", "csuba", ".", "x", ")", "if", "axis", "in", "[", "'y'", ",", "1", "]", ":", "return", "(", "bsuba", ".", "x", "*", "csuba", ".", "z", ")", "-", "(", "bsuba", ".", "z", "*", "csuba", ".", "x", ")", "if", "axis", "in", "[", "'x'", ",", "0", "]", ":", "return", "(", "bsuba", ".", "y", "*", "csuba", ".", "z", ")", "-", "(", "bsuba", ".", "z", "*", "csuba", ".", "y", ")", "msg", "=", "\"invalid axis '{!r}', must be one of {}\"", ".", "format", "(", "axis", ",", "self", ".", "_keys", ")", "raise", "ValueError", "(", "msg", ")" ]
:b: Point or point equivalent :c: Point or point equivalent :axis: optional string or integer in set('x',0,'y',1,'z',2) :return: float CCW - Counter Clockwise Returns an integer signifying the direction of rotation around 'axis' described by the angle [b, self, c]. > 0 : counter-clockwise 0 : points are collinear < 0 : clockwise Returns an integer. Raises ValueError if axis is not in 'xyz'.
[ ":", "b", ":", "Point", "or", "point", "equivalent", ":", "c", ":", "Point", "or", "point", "equivalent", ":", "axis", ":", "optional", "string", "or", "integer", "in", "set", "(", "x", "0", "y", "1", "z", "2", ")", ":", "return", ":", "float" ]
python
train
Terrance/SkPy
skpy/util.py
https://github.com/Terrance/SkPy/blob/0f9489c94e8ec4d3effab4314497428872a80ad1/skpy/util.py#L70-L83
def chatToId(url): """ Extract the conversation ID from a conversation URL. Matches addresses containing ``conversations/<chat>``. Args: url (str): Skype API URL Returns: str: extracted identifier """ match = re.search(r"conversations/([0-9]+:[^/]+)", url) return match.group(1) if match else None
[ "def", "chatToId", "(", "url", ")", ":", "match", "=", "re", ".", "search", "(", "r\"conversations/([0-9]+:[^/]+)\"", ",", "url", ")", "return", "match", ".", "group", "(", "1", ")", "if", "match", "else", "None" ]
Extract the conversation ID from a conversation URL. Matches addresses containing ``conversations/<chat>``. Args: url (str): Skype API URL Returns: str: extracted identifier
[ "Extract", "the", "conversation", "ID", "from", "a", "conversation", "URL", "." ]
python
test
AtteqCom/zsl
src/zsl/resource/json_server_resource.py
https://github.com/AtteqCom/zsl/blob/ab51a96da1780ff642912396d4b85bdcb72560c1/src/zsl/resource/json_server_resource.py#L233-L247
def update(self, *args, **kwargs): """Modifies the parameters and adds metadata for update results. Currently it does not support `PUT` method, which works as replacing the resource. This is somehow questionable in relation DB. """ if request.method == 'PUT': logging.warning("Called not implemented resource method PUT") resource = super(JsonServerResource, self).update(*args, **kwargs) if resource: return resource else: return NOT_FOUND
[ "def", "update", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "request", ".", "method", "==", "'PUT'", ":", "logging", ".", "warning", "(", "\"Called not implemented resource method PUT\"", ")", "resource", "=", "super", "(", "JsonServerResource", ",", "self", ")", ".", "update", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "resource", ":", "return", "resource", "else", ":", "return", "NOT_FOUND" ]
Modifies the parameters and adds metadata for update results. Currently it does not support `PUT` method, which works as replacing the resource. This is somehow questionable in relation DB.
[ "Modifies", "the", "parameters", "and", "adds", "metadata", "for", "update", "results", "." ]
python
train
atarashansky/self-assembling-manifold
utilities.py
https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/utilities.py#L62-L107
def save_figures(filename, fig_IDs=None, **kwargs): """ Save figures. Parameters ---------- filename - str Name of output file fig_IDs - int, numpy.array, list, optional, default None A list of open figure IDs or a figure ID that will be saved to a pdf/png file respectively. **kwargs - Extra keyword arguments passed into 'matplotlib.pyplot.savefig'. """ import matplotlib.pyplot as plt if(fig_IDs is not None): if(type(fig_IDs) is list): savetype = 'pdf' else: savetype = 'png' else: savetype = 'pdf' if(savetype == 'pdf'): from matplotlib.backends.backend_pdf import PdfPages if(len(filename.split('.')) == 1): filename = filename + '.pdf' else: filename = '.'.join(filename.split('.')[:-1])+'.pdf' pdf = PdfPages(filename) if fig_IDs is None: figs = [plt.figure(n) for n in plt.get_fignums()] else: figs = [plt.figure(n) for n in fig_IDs] for fig in figs: fig.savefig(pdf, format='pdf', **kwargs) pdf.close() elif(savetype == 'png'): plt.figure(fig_IDs).savefig(filename, **kwargs)
[ "def", "save_figures", "(", "filename", ",", "fig_IDs", "=", "None", ",", "*", "*", "kwargs", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "if", "(", "fig_IDs", "is", "not", "None", ")", ":", "if", "(", "type", "(", "fig_IDs", ")", "is", "list", ")", ":", "savetype", "=", "'pdf'", "else", ":", "savetype", "=", "'png'", "else", ":", "savetype", "=", "'pdf'", "if", "(", "savetype", "==", "'pdf'", ")", ":", "from", "matplotlib", ".", "backends", ".", "backend_pdf", "import", "PdfPages", "if", "(", "len", "(", "filename", ".", "split", "(", "'.'", ")", ")", "==", "1", ")", ":", "filename", "=", "filename", "+", "'.pdf'", "else", ":", "filename", "=", "'.'", ".", "join", "(", "filename", ".", "split", "(", "'.'", ")", "[", ":", "-", "1", "]", ")", "+", "'.pdf'", "pdf", "=", "PdfPages", "(", "filename", ")", "if", "fig_IDs", "is", "None", ":", "figs", "=", "[", "plt", ".", "figure", "(", "n", ")", "for", "n", "in", "plt", ".", "get_fignums", "(", ")", "]", "else", ":", "figs", "=", "[", "plt", ".", "figure", "(", "n", ")", "for", "n", "in", "fig_IDs", "]", "for", "fig", "in", "figs", ":", "fig", ".", "savefig", "(", "pdf", ",", "format", "=", "'pdf'", ",", "*", "*", "kwargs", ")", "pdf", ".", "close", "(", ")", "elif", "(", "savetype", "==", "'png'", ")", ":", "plt", ".", "figure", "(", "fig_IDs", ")", ".", "savefig", "(", "filename", ",", "*", "*", "kwargs", ")" ]
Save figures. Parameters ---------- filename - str Name of output file fig_IDs - int, numpy.array, list, optional, default None A list of open figure IDs or a figure ID that will be saved to a pdf/png file respectively. **kwargs - Extra keyword arguments passed into 'matplotlib.pyplot.savefig'.
[ "Save", "figures", ".", "Parameters", "----------", "filename", "-", "str", "Name", "of", "output", "file", "fig_IDs", "-", "int", "numpy", ".", "array", "list", "optional", "default", "None", "A", "list", "of", "open", "figure", "IDs", "or", "a", "figure", "ID", "that", "will", "be", "saved", "to", "a", "pdf", "/", "png", "file", "respectively", ".", "**", "kwargs", "-", "Extra", "keyword", "arguments", "passed", "into", "matplotlib", ".", "pyplot", ".", "savefig", "." ]
python
train
phac-nml/sistr_cmd
sistr/src/cgmlst/extras/centroid_cgmlst_alleles.py
https://github.com/phac-nml/sistr_cmd/blob/4630fae72439723b354a94b94fbe76ad2f9f6295/sistr/src/cgmlst/extras/centroid_cgmlst_alleles.py#L55-L69
def allele_clusters(dists, t=0.025): """Flat clusters from distance matrix Args: dists (numpy.array): pdist distance matrix t (float): fcluster (tree cutting) distance threshold Returns: dict of lists: cluster number to list of indices of distances in cluster """ clusters = fcluster(linkage(dists), 0.025, criterion='distance') cluster_idx = defaultdict(list) for idx, cl in enumerate(clusters): cluster_idx[cl].append(idx) return cluster_idx
[ "def", "allele_clusters", "(", "dists", ",", "t", "=", "0.025", ")", ":", "clusters", "=", "fcluster", "(", "linkage", "(", "dists", ")", ",", "0.025", ",", "criterion", "=", "'distance'", ")", "cluster_idx", "=", "defaultdict", "(", "list", ")", "for", "idx", ",", "cl", "in", "enumerate", "(", "clusters", ")", ":", "cluster_idx", "[", "cl", "]", ".", "append", "(", "idx", ")", "return", "cluster_idx" ]
Flat clusters from distance matrix Args: dists (numpy.array): pdist distance matrix t (float): fcluster (tree cutting) distance threshold Returns: dict of lists: cluster number to list of indices of distances in cluster
[ "Flat", "clusters", "from", "distance", "matrix" ]
python
train
CalebBell/thermo
thermo/chemical.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/chemical.py#L2603-L2621
def isobaric_expansion(self): r'''Isobaric (constant-pressure) expansion of the chemical at its current phase and temperature, in units of [1/K]. .. math:: \beta = \frac{1}{V}\left(\frac{\partial V}{\partial T} \right)_P Examples -------- Radical change in value just above and below the critical temperature of water: >>> Chemical('water', T=647.1, P=22048320.0).isobaric_expansion 0.34074205839222449 >>> Chemical('water', T=647.2, P=22048320.0).isobaric_expansion 0.18143324022215077 ''' return phase_select_property(phase=self.phase, l=self.isobaric_expansion_l, g=self.isobaric_expansion_g)
[ "def", "isobaric_expansion", "(", "self", ")", ":", "return", "phase_select_property", "(", "phase", "=", "self", ".", "phase", ",", "l", "=", "self", ".", "isobaric_expansion_l", ",", "g", "=", "self", ".", "isobaric_expansion_g", ")" ]
r'''Isobaric (constant-pressure) expansion of the chemical at its current phase and temperature, in units of [1/K]. .. math:: \beta = \frac{1}{V}\left(\frac{\partial V}{\partial T} \right)_P Examples -------- Radical change in value just above and below the critical temperature of water: >>> Chemical('water', T=647.1, P=22048320.0).isobaric_expansion 0.34074205839222449 >>> Chemical('water', T=647.2, P=22048320.0).isobaric_expansion 0.18143324022215077
[ "r", "Isobaric", "(", "constant", "-", "pressure", ")", "expansion", "of", "the", "chemical", "at", "its", "current", "phase", "and", "temperature", "in", "units", "of", "[", "1", "/", "K", "]", "." ]
python
valid
MitalAshok/objecttools
objecttools/serializable.py
https://github.com/MitalAshok/objecttools/blob/bddd14d1f702c8b559d3fcc2099bc22370e16de7/objecttools/serializable.py#L161-L170
def globals(self): """Find the globals of `self` by importing `self.module`""" try: return vars(__import__(self.module, fromlist=self.module.split('.'))) except ImportError: if self.warn_import: warnings.warn(ImportWarning( 'Cannot import module {} for SerializableFunction. Restricting to builtins.'.format(self.module) )) return {'__builtins__': __builtins__}
[ "def", "globals", "(", "self", ")", ":", "try", ":", "return", "vars", "(", "__import__", "(", "self", ".", "module", ",", "fromlist", "=", "self", ".", "module", ".", "split", "(", "'.'", ")", ")", ")", "except", "ImportError", ":", "if", "self", ".", "warn_import", ":", "warnings", ".", "warn", "(", "ImportWarning", "(", "'Cannot import module {} for SerializableFunction. Restricting to builtins.'", ".", "format", "(", "self", ".", "module", ")", ")", ")", "return", "{", "'__builtins__'", ":", "__builtins__", "}" ]
Find the globals of `self` by importing `self.module`
[ "Find", "the", "globals", "of", "self", "by", "importing", "self", ".", "module" ]
python
train
helixyte/everest
everest/resources/storing.py
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/resources/storing.py#L61-L69
def load_into_collection_from_stream(collection, stream, content_type): """ Loads resources from the given resource data stream (of the specified MIME content type) into the given collection resource. """ rpr = as_representer(collection, content_type) with stream: data_el = rpr.data_from_stream(stream) rpr.resource_from_data(data_el, resource=collection)
[ "def", "load_into_collection_from_stream", "(", "collection", ",", "stream", ",", "content_type", ")", ":", "rpr", "=", "as_representer", "(", "collection", ",", "content_type", ")", "with", "stream", ":", "data_el", "=", "rpr", ".", "data_from_stream", "(", "stream", ")", "rpr", ".", "resource_from_data", "(", "data_el", ",", "resource", "=", "collection", ")" ]
Loads resources from the given resource data stream (of the specified MIME content type) into the given collection resource.
[ "Loads", "resources", "from", "the", "given", "resource", "data", "stream", "(", "of", "the", "specified", "MIME", "content", "type", ")", "into", "the", "given", "collection", "resource", "." ]
python
train
ewels/MultiQC
multiqc/modules/bowtie1/bowtie1.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/bowtie1/bowtie1.py#L93-L114
def bowtie_general_stats_table(self): """ Take the parsed stats from the Bowtie report and add it to the basic stats table at the top of the report """ headers = OrderedDict() headers['reads_aligned_percentage'] = { 'title': '% Aligned', 'description': '% reads with at least one reported alignment', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'YlGn' } headers['reads_aligned'] = { 'title': '{} Aligned'.format(config.read_count_prefix), 'description': 'reads with at least one reported alignment ({})'.format(config.read_count_desc), 'min': 0, 'scale': 'PuRd', 'modify': lambda x: x * config.read_count_multiplier, 'shared_key': 'read_count' } self.general_stats_addcols(self.bowtie_data, headers)
[ "def", "bowtie_general_stats_table", "(", "self", ")", ":", "headers", "=", "OrderedDict", "(", ")", "headers", "[", "'reads_aligned_percentage'", "]", "=", "{", "'title'", ":", "'% Aligned'", ",", "'description'", ":", "'% reads with at least one reported alignment'", ",", "'max'", ":", "100", ",", "'min'", ":", "0", ",", "'suffix'", ":", "'%'", ",", "'scale'", ":", "'YlGn'", "}", "headers", "[", "'reads_aligned'", "]", "=", "{", "'title'", ":", "'{} Aligned'", ".", "format", "(", "config", ".", "read_count_prefix", ")", ",", "'description'", ":", "'reads with at least one reported alignment ({})'", ".", "format", "(", "config", ".", "read_count_desc", ")", ",", "'min'", ":", "0", ",", "'scale'", ":", "'PuRd'", ",", "'modify'", ":", "lambda", "x", ":", "x", "*", "config", ".", "read_count_multiplier", ",", "'shared_key'", ":", "'read_count'", "}", "self", ".", "general_stats_addcols", "(", "self", ".", "bowtie_data", ",", "headers", ")" ]
Take the parsed stats from the Bowtie report and add it to the basic stats table at the top of the report
[ "Take", "the", "parsed", "stats", "from", "the", "Bowtie", "report", "and", "add", "it", "to", "the", "basic", "stats", "table", "at", "the", "top", "of", "the", "report" ]
python
train
tetframework/Tonnikala
tonnikala/languages/python/generator.py
https://github.com/tetframework/Tonnikala/blob/99d168657da1b2372ff898254f80808ea8d1b83f/tonnikala/languages/python/generator.py#L72-L97
def adjust_locations(ast_node, first_lineno, first_offset): """ Adjust the locations of the ast nodes, offsetting them to the new lineno and column offset """ line_delta = first_lineno - 1 def _fix(node): if 'lineno' in node._attributes: lineno = node.lineno col = node.col_offset # adjust the offset on the first line if lineno == 1: col += first_offset lineno += line_delta node.lineno = lineno node.col_offset = col for child in iter_child_nodes(node): _fix(child) _fix(ast_node)
[ "def", "adjust_locations", "(", "ast_node", ",", "first_lineno", ",", "first_offset", ")", ":", "line_delta", "=", "first_lineno", "-", "1", "def", "_fix", "(", "node", ")", ":", "if", "'lineno'", "in", "node", ".", "_attributes", ":", "lineno", "=", "node", ".", "lineno", "col", "=", "node", ".", "col_offset", "# adjust the offset on the first line", "if", "lineno", "==", "1", ":", "col", "+=", "first_offset", "lineno", "+=", "line_delta", "node", ".", "lineno", "=", "lineno", "node", ".", "col_offset", "=", "col", "for", "child", "in", "iter_child_nodes", "(", "node", ")", ":", "_fix", "(", "child", ")", "_fix", "(", "ast_node", ")" ]
Adjust the locations of the ast nodes, offsetting them to the new lineno and column offset
[ "Adjust", "the", "locations", "of", "the", "ast", "nodes", "offsetting", "them", "to", "the", "new", "lineno", "and", "column", "offset" ]
python
train
facelessuser/pyspelling
pyspelling/filters/cpp.py
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/cpp.py#L392-L405
def extend_src_text(self, content, context, text_list, category): """Extend the source text list with the gathered text data.""" prefix = self.prefix + '-' if self.prefix else '' for comment, line, encoding in text_list: content.append( filters.SourceText( textwrap.dedent(comment), "%s (%d)" % (context, line), encoding, prefix + category ) )
[ "def", "extend_src_text", "(", "self", ",", "content", ",", "context", ",", "text_list", ",", "category", ")", ":", "prefix", "=", "self", ".", "prefix", "+", "'-'", "if", "self", ".", "prefix", "else", "''", "for", "comment", ",", "line", ",", "encoding", "in", "text_list", ":", "content", ".", "append", "(", "filters", ".", "SourceText", "(", "textwrap", ".", "dedent", "(", "comment", ")", ",", "\"%s (%d)\"", "%", "(", "context", ",", "line", ")", ",", "encoding", ",", "prefix", "+", "category", ")", ")" ]
Extend the source text list with the gathered text data.
[ "Extend", "the", "source", "text", "list", "with", "the", "gathered", "text", "data", "." ]
python
train
eqcorrscan/EQcorrscan
eqcorrscan/core/subspace.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/subspace.py#L660-L787
def _subspace_process(streams, lowcut, highcut, filt_order, sampling_rate, multiplex, align, shift_len, reject, no_missed=True, stachans=None, parallel=False, plot=False, cores=1): """ Process stream data, internal function. :type streams: list :param streams: List of obspy.core.stream.Stream to be used to \ generate the subspace detector. These should be pre-clustered \ and aligned. :type lowcut: float :param lowcut: Lowcut in Hz, can be None to not apply filter :type highcut: float :param highcut: Highcut in Hz, can be None to not apply filter :type filt_order: int :param filt_order: Number of corners for filter. :type sampling_rate: float :param sampling_rate: Desired sampling rate in Hz :type multiplex: bool :param multiplex: Whether to multiplex the data or not. Data are \ multiplexed according to the method of Harris, see the multi \ function for details. :type stachans: list of tuple :param stachans: list of tuples of (station, channel) to use. :type align: bool :param align: Whether to align the data or not - needs to be done \ at some point :type shift_len: float :param shift_len: Maximum shift allowed for alignment in seconds. :type reject: float :param reject: Minimum correlation for traces, only used if align=True. :type no_missed: bool :param: no_missed: Reject streams with missed traces, defaults to True. \ A missing trace from lots of events will reduce the quality of the \ subspace detector if multiplexed. Only used when multi is set to True. :type plot: bool :param plot: Passed down to align traces - used to check alignment process. :return: Processed streams :rtype: list :return: Station, channel pairs in order :rtype: list of tuple :return: List of delays :rtype: list """ from multiprocessing import Pool, cpu_count processed_streams = [] if not stachans: input_stachans = list(set([(tr.stats.station, tr.stats.channel) for st in streams for tr in st.sort()])) else: input_stachans = stachans input_stachans.sort() # Make sure stations and channels are in order # Check that all channels are the same length in seconds first_length = len(streams[0][0].data) /\ streams[0][0].stats.sampling_rate for st in streams: for tr in st: if not len(tr) / tr.stats.sampling_rate == first_length: msg = 'All channels of all streams must be the same length' raise IOError(msg) for st in streams: if not parallel: processed_stream = Stream() for stachan in input_stachans: dummy, tr = _internal_process( st=st, lowcut=lowcut, highcut=highcut, filt_order=filt_order, sampling_rate=sampling_rate, first_length=first_length, stachan=stachan, debug=0) processed_stream += tr processed_streams.append(processed_stream) else: pool = Pool(processes=min(cores, cpu_count())) results = [pool.apply_async( _internal_process, (st,), {'lowcut': lowcut, 'highcut': highcut, 'filt_order': filt_order, 'sampling_rate': sampling_rate, 'first_length': first_length, 'stachan': stachan, 'debug': 0, 'i': i}) for i, stachan in enumerate(input_stachans)] pool.close() try: processed_stream = [p.get() for p in results] except KeyboardInterrupt as e: # pragma: no cover pool.terminate() raise e pool.join() processed_stream.sort(key=lambda tup: tup[0]) processed_stream = Stream([p[1] for p in processed_stream]) processed_streams.append(processed_stream) if no_missed and multiplex: for tr in processed_stream: if np.count_nonzero(tr.data) == 0: processed_streams.remove(processed_stream) print('Removed stream with empty trace') break if align: processed_streams = align_design( design_set=processed_streams, shift_len=shift_len, reject=reject, multiplex=multiplex, plot=plot, no_missed=no_missed) output_streams = [] for processed_stream in processed_streams: if len(processed_stream) == 0: # If we have removed all of the traces then onwards! continue # Need to order the stream according to input_stachans _st = Stream() for stachan in input_stachans: tr = processed_stream.select( station=stachan[0], channel=stachan[1]) if len(tr) >= 1: _st += tr[0] elif multiplex and len(tr) == 0: raise IndexError( 'Missing data for %s.%s' % (stachan[0], stachan[1])) if multiplex: st = multi(stream=_st) st = Stream(Trace(st)) st[0].stats.station = 'Multi' st[0].stats.sampling_rate = sampling_rate else: st = _st for tr in st: # Normalize the data norm = np.linalg.norm(tr.data) if not norm == 0: tr.data /= norm output_streams.append(st) return output_streams, input_stachans
[ "def", "_subspace_process", "(", "streams", ",", "lowcut", ",", "highcut", ",", "filt_order", ",", "sampling_rate", ",", "multiplex", ",", "align", ",", "shift_len", ",", "reject", ",", "no_missed", "=", "True", ",", "stachans", "=", "None", ",", "parallel", "=", "False", ",", "plot", "=", "False", ",", "cores", "=", "1", ")", ":", "from", "multiprocessing", "import", "Pool", ",", "cpu_count", "processed_streams", "=", "[", "]", "if", "not", "stachans", ":", "input_stachans", "=", "list", "(", "set", "(", "[", "(", "tr", ".", "stats", ".", "station", ",", "tr", ".", "stats", ".", "channel", ")", "for", "st", "in", "streams", "for", "tr", "in", "st", ".", "sort", "(", ")", "]", ")", ")", "else", ":", "input_stachans", "=", "stachans", "input_stachans", ".", "sort", "(", ")", "# Make sure stations and channels are in order", "# Check that all channels are the same length in seconds", "first_length", "=", "len", "(", "streams", "[", "0", "]", "[", "0", "]", ".", "data", ")", "/", "streams", "[", "0", "]", "[", "0", "]", ".", "stats", ".", "sampling_rate", "for", "st", "in", "streams", ":", "for", "tr", "in", "st", ":", "if", "not", "len", "(", "tr", ")", "/", "tr", ".", "stats", ".", "sampling_rate", "==", "first_length", ":", "msg", "=", "'All channels of all streams must be the same length'", "raise", "IOError", "(", "msg", ")", "for", "st", "in", "streams", ":", "if", "not", "parallel", ":", "processed_stream", "=", "Stream", "(", ")", "for", "stachan", "in", "input_stachans", ":", "dummy", ",", "tr", "=", "_internal_process", "(", "st", "=", "st", ",", "lowcut", "=", "lowcut", ",", "highcut", "=", "highcut", ",", "filt_order", "=", "filt_order", ",", "sampling_rate", "=", "sampling_rate", ",", "first_length", "=", "first_length", ",", "stachan", "=", "stachan", ",", "debug", "=", "0", ")", "processed_stream", "+=", "tr", "processed_streams", ".", "append", "(", "processed_stream", ")", "else", ":", "pool", "=", "Pool", "(", "processes", "=", "min", "(", "cores", ",", "cpu_count", "(", ")", ")", ")", "results", "=", "[", "pool", ".", "apply_async", "(", "_internal_process", ",", "(", "st", ",", ")", ",", "{", "'lowcut'", ":", "lowcut", ",", "'highcut'", ":", "highcut", ",", "'filt_order'", ":", "filt_order", ",", "'sampling_rate'", ":", "sampling_rate", ",", "'first_length'", ":", "first_length", ",", "'stachan'", ":", "stachan", ",", "'debug'", ":", "0", ",", "'i'", ":", "i", "}", ")", "for", "i", ",", "stachan", "in", "enumerate", "(", "input_stachans", ")", "]", "pool", ".", "close", "(", ")", "try", ":", "processed_stream", "=", "[", "p", ".", "get", "(", ")", "for", "p", "in", "results", "]", "except", "KeyboardInterrupt", "as", "e", ":", "# pragma: no cover", "pool", ".", "terminate", "(", ")", "raise", "e", "pool", ".", "join", "(", ")", "processed_stream", ".", "sort", "(", "key", "=", "lambda", "tup", ":", "tup", "[", "0", "]", ")", "processed_stream", "=", "Stream", "(", "[", "p", "[", "1", "]", "for", "p", "in", "processed_stream", "]", ")", "processed_streams", ".", "append", "(", "processed_stream", ")", "if", "no_missed", "and", "multiplex", ":", "for", "tr", "in", "processed_stream", ":", "if", "np", ".", "count_nonzero", "(", "tr", ".", "data", ")", "==", "0", ":", "processed_streams", ".", "remove", "(", "processed_stream", ")", "print", "(", "'Removed stream with empty trace'", ")", "break", "if", "align", ":", "processed_streams", "=", "align_design", "(", "design_set", "=", "processed_streams", ",", "shift_len", "=", "shift_len", ",", "reject", "=", "reject", ",", "multiplex", "=", "multiplex", ",", "plot", "=", "plot", ",", "no_missed", "=", "no_missed", ")", "output_streams", "=", "[", "]", "for", "processed_stream", "in", "processed_streams", ":", "if", "len", "(", "processed_stream", ")", "==", "0", ":", "# If we have removed all of the traces then onwards!", "continue", "# Need to order the stream according to input_stachans", "_st", "=", "Stream", "(", ")", "for", "stachan", "in", "input_stachans", ":", "tr", "=", "processed_stream", ".", "select", "(", "station", "=", "stachan", "[", "0", "]", ",", "channel", "=", "stachan", "[", "1", "]", ")", "if", "len", "(", "tr", ")", ">=", "1", ":", "_st", "+=", "tr", "[", "0", "]", "elif", "multiplex", "and", "len", "(", "tr", ")", "==", "0", ":", "raise", "IndexError", "(", "'Missing data for %s.%s'", "%", "(", "stachan", "[", "0", "]", ",", "stachan", "[", "1", "]", ")", ")", "if", "multiplex", ":", "st", "=", "multi", "(", "stream", "=", "_st", ")", "st", "=", "Stream", "(", "Trace", "(", "st", ")", ")", "st", "[", "0", "]", ".", "stats", ".", "station", "=", "'Multi'", "st", "[", "0", "]", ".", "stats", ".", "sampling_rate", "=", "sampling_rate", "else", ":", "st", "=", "_st", "for", "tr", "in", "st", ":", "# Normalize the data", "norm", "=", "np", ".", "linalg", ".", "norm", "(", "tr", ".", "data", ")", "if", "not", "norm", "==", "0", ":", "tr", ".", "data", "/=", "norm", "output_streams", ".", "append", "(", "st", ")", "return", "output_streams", ",", "input_stachans" ]
Process stream data, internal function. :type streams: list :param streams: List of obspy.core.stream.Stream to be used to \ generate the subspace detector. These should be pre-clustered \ and aligned. :type lowcut: float :param lowcut: Lowcut in Hz, can be None to not apply filter :type highcut: float :param highcut: Highcut in Hz, can be None to not apply filter :type filt_order: int :param filt_order: Number of corners for filter. :type sampling_rate: float :param sampling_rate: Desired sampling rate in Hz :type multiplex: bool :param multiplex: Whether to multiplex the data or not. Data are \ multiplexed according to the method of Harris, see the multi \ function for details. :type stachans: list of tuple :param stachans: list of tuples of (station, channel) to use. :type align: bool :param align: Whether to align the data or not - needs to be done \ at some point :type shift_len: float :param shift_len: Maximum shift allowed for alignment in seconds. :type reject: float :param reject: Minimum correlation for traces, only used if align=True. :type no_missed: bool :param: no_missed: Reject streams with missed traces, defaults to True. \ A missing trace from lots of events will reduce the quality of the \ subspace detector if multiplexed. Only used when multi is set to True. :type plot: bool :param plot: Passed down to align traces - used to check alignment process. :return: Processed streams :rtype: list :return: Station, channel pairs in order :rtype: list of tuple :return: List of delays :rtype: list
[ "Process", "stream", "data", "internal", "function", "." ]
python
train
Diviyan-Kalainathan/CausalDiscoveryToolbox
cdt/utils/io.py
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/utils/io.py#L34-L81
def read_causal_pairs(filename, scale=True, **kwargs): """Convert a ChaLearn Cause effect pairs challenge format into numpy.ndarray. :param filename: path of the file to read or DataFrame containing the data :type filename: str or pandas.DataFrame :param scale: Scale the data :type scale: bool :param kwargs: parameters to be passed to pandas.read_csv :return: Dataframe composed of (SampleID, a (numpy.ndarray) , b (numpy.ndarray)) :rtype: pandas.DataFrame """ def convert_row(row, scale): """Convert a CCEPC row into numpy.ndarrays. :param row: :type row: pandas.Series :return: tuple of sample ID and the converted data into numpy.ndarrays :rtype: tuple """ a = row["A"].split(" ") b = row["B"].split(" ") if a[0] == "": a.pop(0) b.pop(0) if a[-1] == "": a.pop(-1) b.pop(-1) a = array([float(i) for i in a]) b = array([float(i) for i in b]) if scale: a = scaler(a) b = scaler(b) return row['SampleID'], a, b if isinstance(filename, str): data = read_csv(filename, **kwargs) elif isinstance(filename, DataFrame): data = filename else: raise TypeError("Type not supported.") conv_data = [] for idx, row in data.iterrows(): conv_data.append(convert_row(row, scale)) df = DataFrame(conv_data, columns=['SampleID', 'A', 'B']) df = df.set_index("SampleID") return df
[ "def", "read_causal_pairs", "(", "filename", ",", "scale", "=", "True", ",", "*", "*", "kwargs", ")", ":", "def", "convert_row", "(", "row", ",", "scale", ")", ":", "\"\"\"Convert a CCEPC row into numpy.ndarrays.\n\n :param row:\n :type row: pandas.Series\n :return: tuple of sample ID and the converted data into numpy.ndarrays\n :rtype: tuple\n \"\"\"", "a", "=", "row", "[", "\"A\"", "]", ".", "split", "(", "\" \"", ")", "b", "=", "row", "[", "\"B\"", "]", ".", "split", "(", "\" \"", ")", "if", "a", "[", "0", "]", "==", "\"\"", ":", "a", ".", "pop", "(", "0", ")", "b", ".", "pop", "(", "0", ")", "if", "a", "[", "-", "1", "]", "==", "\"\"", ":", "a", ".", "pop", "(", "-", "1", ")", "b", ".", "pop", "(", "-", "1", ")", "a", "=", "array", "(", "[", "float", "(", "i", ")", "for", "i", "in", "a", "]", ")", "b", "=", "array", "(", "[", "float", "(", "i", ")", "for", "i", "in", "b", "]", ")", "if", "scale", ":", "a", "=", "scaler", "(", "a", ")", "b", "=", "scaler", "(", "b", ")", "return", "row", "[", "'SampleID'", "]", ",", "a", ",", "b", "if", "isinstance", "(", "filename", ",", "str", ")", ":", "data", "=", "read_csv", "(", "filename", ",", "*", "*", "kwargs", ")", "elif", "isinstance", "(", "filename", ",", "DataFrame", ")", ":", "data", "=", "filename", "else", ":", "raise", "TypeError", "(", "\"Type not supported.\"", ")", "conv_data", "=", "[", "]", "for", "idx", ",", "row", "in", "data", ".", "iterrows", "(", ")", ":", "conv_data", ".", "append", "(", "convert_row", "(", "row", ",", "scale", ")", ")", "df", "=", "DataFrame", "(", "conv_data", ",", "columns", "=", "[", "'SampleID'", ",", "'A'", ",", "'B'", "]", ")", "df", "=", "df", ".", "set_index", "(", "\"SampleID\"", ")", "return", "df" ]
Convert a ChaLearn Cause effect pairs challenge format into numpy.ndarray. :param filename: path of the file to read or DataFrame containing the data :type filename: str or pandas.DataFrame :param scale: Scale the data :type scale: bool :param kwargs: parameters to be passed to pandas.read_csv :return: Dataframe composed of (SampleID, a (numpy.ndarray) , b (numpy.ndarray)) :rtype: pandas.DataFrame
[ "Convert", "a", "ChaLearn", "Cause", "effect", "pairs", "challenge", "format", "into", "numpy", ".", "ndarray", "." ]
python
valid
proteanhq/protean
src/protean/core/repository/base.py
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/repository/base.py#L29-L34
def filter(self, criteria: Q, offset: int = 0, limit: int = 10, order_by: list = ()) -> ResultSet: """ Filter objects from the repository. Method must return a `ResultSet` object """
[ "def", "filter", "(", "self", ",", "criteria", ":", "Q", ",", "offset", ":", "int", "=", "0", ",", "limit", ":", "int", "=", "10", ",", "order_by", ":", "list", "=", "(", ")", ")", "->", "ResultSet", ":" ]
Filter objects from the repository. Method must return a `ResultSet` object
[ "Filter", "objects", "from", "the", "repository", ".", "Method", "must", "return", "a", "ResultSet", "object" ]
python
train
awslabs/sockeye
sockeye_contrib/autopilot/autopilot.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye_contrib/autopilot/autopilot.py#L234-L264
def copy_parallel_text(file_list: List[str], dest_prefix: str): """ Copy pre-compiled raw parallel files with a given prefix. Perform whitespace character normalization to ensure that only ASCII newlines are considered line breaks. :param file_list: List of file pairs to use. :param dest_prefix: Prefix for output files. """ # Group files into source-target pairs file_sets = [] for i in range(0, len(file_list), 2): file_sets.append((file_list[i], file_list[i + 1])) multiple_sets = len(file_sets) > 1 for i, (source_fname, target_fname) in enumerate(file_sets): if multiple_sets: source_dest = dest_prefix + str(i) + "." + SUFFIX_SRC_GZ target_dest = dest_prefix + str(i) + "." + SUFFIX_TRG_GZ else: source_dest = dest_prefix + SUFFIX_SRC_GZ target_dest = dest_prefix + SUFFIX_TRG_GZ logging.info("Populate: %s %s", source_dest, target_dest) with gzip.open(source_dest, "wb") as source_out, gzip.open(target_dest, "wb") as target_out: with third_party.bin_open(source_fname) as inp: for line in inp: line = (re.sub(r"\s", " ", line.decode("utf-8"))).encode("utf-8") + b"\n" source_out.write(line) with third_party.bin_open(target_fname) as inp: for line in inp: line = (re.sub(r"\s", " ", line.decode("utf-8"))).encode("utf-8") + b"\n" target_out.write(line)
[ "def", "copy_parallel_text", "(", "file_list", ":", "List", "[", "str", "]", ",", "dest_prefix", ":", "str", ")", ":", "# Group files into source-target pairs", "file_sets", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "file_list", ")", ",", "2", ")", ":", "file_sets", ".", "append", "(", "(", "file_list", "[", "i", "]", ",", "file_list", "[", "i", "+", "1", "]", ")", ")", "multiple_sets", "=", "len", "(", "file_sets", ")", ">", "1", "for", "i", ",", "(", "source_fname", ",", "target_fname", ")", "in", "enumerate", "(", "file_sets", ")", ":", "if", "multiple_sets", ":", "source_dest", "=", "dest_prefix", "+", "str", "(", "i", ")", "+", "\".\"", "+", "SUFFIX_SRC_GZ", "target_dest", "=", "dest_prefix", "+", "str", "(", "i", ")", "+", "\".\"", "+", "SUFFIX_TRG_GZ", "else", ":", "source_dest", "=", "dest_prefix", "+", "SUFFIX_SRC_GZ", "target_dest", "=", "dest_prefix", "+", "SUFFIX_TRG_GZ", "logging", ".", "info", "(", "\"Populate: %s %s\"", ",", "source_dest", ",", "target_dest", ")", "with", "gzip", ".", "open", "(", "source_dest", ",", "\"wb\"", ")", "as", "source_out", ",", "gzip", ".", "open", "(", "target_dest", ",", "\"wb\"", ")", "as", "target_out", ":", "with", "third_party", ".", "bin_open", "(", "source_fname", ")", "as", "inp", ":", "for", "line", "in", "inp", ":", "line", "=", "(", "re", ".", "sub", "(", "r\"\\s\"", ",", "\" \"", ",", "line", ".", "decode", "(", "\"utf-8\"", ")", ")", ")", ".", "encode", "(", "\"utf-8\"", ")", "+", "b\"\\n\"", "source_out", ".", "write", "(", "line", ")", "with", "third_party", ".", "bin_open", "(", "target_fname", ")", "as", "inp", ":", "for", "line", "in", "inp", ":", "line", "=", "(", "re", ".", "sub", "(", "r\"\\s\"", ",", "\" \"", ",", "line", ".", "decode", "(", "\"utf-8\"", ")", ")", ")", ".", "encode", "(", "\"utf-8\"", ")", "+", "b\"\\n\"", "target_out", ".", "write", "(", "line", ")" ]
Copy pre-compiled raw parallel files with a given prefix. Perform whitespace character normalization to ensure that only ASCII newlines are considered line breaks. :param file_list: List of file pairs to use. :param dest_prefix: Prefix for output files.
[ "Copy", "pre", "-", "compiled", "raw", "parallel", "files", "with", "a", "given", "prefix", ".", "Perform", "whitespace", "character", "normalization", "to", "ensure", "that", "only", "ASCII", "newlines", "are", "considered", "line", "breaks", "." ]
python
train