repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
soasme/rio
rio/blueprints/dashboard.py
https://github.com/soasme/rio/blob/f722eb0ff4b0382bceaff77737f0b87cb78429e7/rio/blueprints/dashboard.py#L62-L79
def new_project(): """New Project.""" form = NewProjectForm() if not form.validate_on_submit(): return jsonify(errors=form.errors), 400 data = form.data data['slug'] = slugify(data['name']) data['owner_id'] = get_current_user_id() id = add_instance('project', **data) if not id: return jsonify(errors={'name': ['duplicated slug.']}), 400 project = get_data_or_404('project', id) return jsonify(**project)
[ "def", "new_project", "(", ")", ":", "form", "=", "NewProjectForm", "(", ")", "if", "not", "form", ".", "validate_on_submit", "(", ")", ":", "return", "jsonify", "(", "errors", "=", "form", ".", "errors", ")", ",", "400", "data", "=", "form", ".", "data", "data", "[", "'slug'", "]", "=", "slugify", "(", "data", "[", "'name'", "]", ")", "data", "[", "'owner_id'", "]", "=", "get_current_user_id", "(", ")", "id", "=", "add_instance", "(", "'project'", ",", "*", "*", "data", ")", "if", "not", "id", ":", "return", "jsonify", "(", "errors", "=", "{", "'name'", ":", "[", "'duplicated slug.'", "]", "}", ")", ",", "400", "project", "=", "get_data_or_404", "(", "'project'", ",", "id", ")", "return", "jsonify", "(", "*", "*", "project", ")" ]
New Project.
[ "New", "Project", "." ]
python
train
yyuu/botornado
boto/ecs/__init__.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/ecs/__init__.py#L52-L75
def get_response(self, action, params, page=0, itemSet=None): """ Utility method to handle calls to ECS and parsing of responses. """ params['Service'] = "AWSECommerceService" params['Operation'] = action if page: params['ItemPage'] = page response = self.make_request(None, params, "/onca/xml") body = response.read() boto.log.debug(body) if response.status != 200: boto.log.error('%s %s' % (response.status, response.reason)) boto.log.error('%s' % body) raise self.ResponseError(response.status, response.reason, body) if itemSet == None: rs = ItemSet(self, action, params, page) else: rs = itemSet h = handler.XmlHandler(rs, self) xml.sax.parseString(body, h) return rs
[ "def", "get_response", "(", "self", ",", "action", ",", "params", ",", "page", "=", "0", ",", "itemSet", "=", "None", ")", ":", "params", "[", "'Service'", "]", "=", "\"AWSECommerceService\"", "params", "[", "'Operation'", "]", "=", "action", "if", "page", ":", "params", "[", "'ItemPage'", "]", "=", "page", "response", "=", "self", ".", "make_request", "(", "None", ",", "params", ",", "\"/onca/xml\"", ")", "body", "=", "response", ".", "read", "(", ")", "boto", ".", "log", ".", "debug", "(", "body", ")", "if", "response", ".", "status", "!=", "200", ":", "boto", ".", "log", ".", "error", "(", "'%s %s'", "%", "(", "response", ".", "status", ",", "response", ".", "reason", ")", ")", "boto", ".", "log", ".", "error", "(", "'%s'", "%", "body", ")", "raise", "self", ".", "ResponseError", "(", "response", ".", "status", ",", "response", ".", "reason", ",", "body", ")", "if", "itemSet", "==", "None", ":", "rs", "=", "ItemSet", "(", "self", ",", "action", ",", "params", ",", "page", ")", "else", ":", "rs", "=", "itemSet", "h", "=", "handler", ".", "XmlHandler", "(", "rs", ",", "self", ")", "xml", ".", "sax", ".", "parseString", "(", "body", ",", "h", ")", "return", "rs" ]
Utility method to handle calls to ECS and parsing of responses.
[ "Utility", "method", "to", "handle", "calls", "to", "ECS", "and", "parsing", "of", "responses", "." ]
python
train
denisenkom/pytds
src/pytds/__init__.py
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/__init__.py#L1049-L1059
def callproc(self, procname, parameters=()): """ Call a stored procedure with the given name. :param procname: The name of the procedure to call :type procname: str :keyword parameters: The optional parameters for the procedure :type parameters: sequence """ self._assert_open() return self._callproc(procname, parameters)
[ "def", "callproc", "(", "self", ",", "procname", ",", "parameters", "=", "(", ")", ")", ":", "self", ".", "_assert_open", "(", ")", "return", "self", ".", "_callproc", "(", "procname", ",", "parameters", ")" ]
Call a stored procedure with the given name. :param procname: The name of the procedure to call :type procname: str :keyword parameters: The optional parameters for the procedure :type parameters: sequence
[ "Call", "a", "stored", "procedure", "with", "the", "given", "name", "." ]
python
train
swharden/SWHLab
swhlab/indexing/style.py
https://github.com/swharden/SWHLab/blob/a86c3c65323cec809a4bd4f81919644927094bf5/swhlab/indexing/style.py#L68-L78
def frames(fname=None,menuWidth=200,launch=False): """create and save a two column frames HTML file.""" html=""" <frameset cols="%dpx,*%%"> <frame name="menu" src="index_menu.html"> <frame name="content" src="index_splash.html"> </frameset>"""%(menuWidth) with open(fname,'w') as f: f.write(html) if launch: webbrowser.open(fname)
[ "def", "frames", "(", "fname", "=", "None", ",", "menuWidth", "=", "200", ",", "launch", "=", "False", ")", ":", "html", "=", "\"\"\"\n <frameset cols=\"%dpx,*%%\">\n <frame name=\"menu\" src=\"index_menu.html\">\n <frame name=\"content\" src=\"index_splash.html\">\n </frameset>\"\"\"", "%", "(", "menuWidth", ")", "with", "open", "(", "fname", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "html", ")", "if", "launch", ":", "webbrowser", ".", "open", "(", "fname", ")" ]
create and save a two column frames HTML file.
[ "create", "and", "save", "a", "two", "column", "frames", "HTML", "file", "." ]
python
valid
pilosus/ForgeryPy3
forgery_py/forgery/russian_tax.py
https://github.com/pilosus/ForgeryPy3/blob/e15f2e59538deb4cbfceaac314f5ea897f2d5450/forgery_py/forgery/russian_tax.py#L81-L85
def legal_ogrn(): """Return a random government registration ID for a company.""" ogrn = "".join(map(str, [random.randint(1, 9) for _ in range(12)])) ogrn += str((int(ogrn) % 11 % 10)) return ogrn
[ "def", "legal_ogrn", "(", ")", ":", "ogrn", "=", "\"\"", ".", "join", "(", "map", "(", "str", ",", "[", "random", ".", "randint", "(", "1", ",", "9", ")", "for", "_", "in", "range", "(", "12", ")", "]", ")", ")", "ogrn", "+=", "str", "(", "(", "int", "(", "ogrn", ")", "%", "11", "%", "10", ")", ")", "return", "ogrn" ]
Return a random government registration ID for a company.
[ "Return", "a", "random", "government", "registration", "ID", "for", "a", "company", "." ]
python
valid
DBuildService/dockerfile-parse
dockerfile_parse/util.py
https://github.com/DBuildService/dockerfile-parse/blob/3d7b514d8b8eded1b33529cf0f6a0770a573aee0/dockerfile_parse/util.py#L107-L211
def split(self, maxsplit=None, dequote=True): """ Generator for the words of the string :param maxsplit: perform at most maxsplit splits; if None, do not limit the number of splits :param dequote: remove quotes and escape characters once consumed """ class Word(object): """ A None-or-str object which can always be appended to. Similar to a defaultdict but with only a single value. """ def __init__(self): self.value = None @property def valid(self): return self.value is not None def append(self, s): if self.value is None: self.value = s else: self.value += s num_splits = 0 word = Word() while True: ch = self.stream.read(1) if not ch: # EOF if word.valid: yield word.value return if (not self.escaped and self.envs is not None and ch == '$' and self.quotes != self.SQUOTE): while True: # Substitute environment variable braced = False varname = '' while True: ch = self.stream.read(1) if varname == '' and ch == '{': braced = True continue if not ch: # EOF break if braced and ch == '}': break if not ch.isalnum() and ch != '_': break varname += ch try: word.append(self.envs[varname]) except KeyError: pass # Check whether there is another envvar if ch != '$': break if braced and ch == '}': continue # ch now holds the next character # Figure out what our quoting/escaping state will be # after this character is_escaped = self.escaped ch_unless_consumed = self._update_quoting_state(ch) if dequote: # If we just processed a quote or escape character, # and were asked to dequote the string, consume it now ch = ch_unless_consumed # If word-splitting has been requested, check whether we are # at a whitespace character may_split = maxsplit != 0 and (maxsplit is None or num_splits < maxsplit) at_split = may_split and (self.quotes is None and not is_escaped and ch.isspace()) if at_split: # It is time to yield a word if word.valid: num_splits += 1 yield word.value word = Word() else: word.append(ch)
[ "def", "split", "(", "self", ",", "maxsplit", "=", "None", ",", "dequote", "=", "True", ")", ":", "class", "Word", "(", "object", ")", ":", "\"\"\"\n A None-or-str object which can always be appended to.\n Similar to a defaultdict but with only a single value.\n \"\"\"", "def", "__init__", "(", "self", ")", ":", "self", ".", "value", "=", "None", "@", "property", "def", "valid", "(", "self", ")", ":", "return", "self", ".", "value", "is", "not", "None", "def", "append", "(", "self", ",", "s", ")", ":", "if", "self", ".", "value", "is", "None", ":", "self", ".", "value", "=", "s", "else", ":", "self", ".", "value", "+=", "s", "num_splits", "=", "0", "word", "=", "Word", "(", ")", "while", "True", ":", "ch", "=", "self", ".", "stream", ".", "read", "(", "1", ")", "if", "not", "ch", ":", "# EOF", "if", "word", ".", "valid", ":", "yield", "word", ".", "value", "return", "if", "(", "not", "self", ".", "escaped", "and", "self", ".", "envs", "is", "not", "None", "and", "ch", "==", "'$'", "and", "self", ".", "quotes", "!=", "self", ".", "SQUOTE", ")", ":", "while", "True", ":", "# Substitute environment variable", "braced", "=", "False", "varname", "=", "''", "while", "True", ":", "ch", "=", "self", ".", "stream", ".", "read", "(", "1", ")", "if", "varname", "==", "''", "and", "ch", "==", "'{'", ":", "braced", "=", "True", "continue", "if", "not", "ch", ":", "# EOF", "break", "if", "braced", "and", "ch", "==", "'}'", ":", "break", "if", "not", "ch", ".", "isalnum", "(", ")", "and", "ch", "!=", "'_'", ":", "break", "varname", "+=", "ch", "try", ":", "word", ".", "append", "(", "self", ".", "envs", "[", "varname", "]", ")", "except", "KeyError", ":", "pass", "# Check whether there is another envvar", "if", "ch", "!=", "'$'", ":", "break", "if", "braced", "and", "ch", "==", "'}'", ":", "continue", "# ch now holds the next character", "# Figure out what our quoting/escaping state will be", "# after this character", "is_escaped", "=", "self", ".", "escaped", "ch_unless_consumed", "=", "self", ".", "_update_quoting_state", "(", "ch", ")", "if", "dequote", ":", "# If we just processed a quote or escape character,", "# and were asked to dequote the string, consume it now", "ch", "=", "ch_unless_consumed", "# If word-splitting has been requested, check whether we are", "# at a whitespace character", "may_split", "=", "maxsplit", "!=", "0", "and", "(", "maxsplit", "is", "None", "or", "num_splits", "<", "maxsplit", ")", "at_split", "=", "may_split", "and", "(", "self", ".", "quotes", "is", "None", "and", "not", "is_escaped", "and", "ch", ".", "isspace", "(", ")", ")", "if", "at_split", ":", "# It is time to yield a word", "if", "word", ".", "valid", ":", "num_splits", "+=", "1", "yield", "word", ".", "value", "word", "=", "Word", "(", ")", "else", ":", "word", ".", "append", "(", "ch", ")" ]
Generator for the words of the string :param maxsplit: perform at most maxsplit splits; if None, do not limit the number of splits :param dequote: remove quotes and escape characters once consumed
[ "Generator", "for", "the", "words", "of", "the", "string" ]
python
train
hkff/FodtlMon
fodtlmon/tools/color.py
https://github.com/hkff/FodtlMon/blob/0c9015a1a1f0a4a64d52945c86b45441d5871c56/fodtlmon/tools/color.py#L237-L267
def _parse_input(incoming): """Performs the actual conversion of tags to ANSI escaped codes. Provides a version of the input without any colors for len() and other methods. Positional arguments: incoming -- the input unicode value. Returns: 2-item tuple. First item is the parsed output. Second item is a version of the input without any colors. """ codes = dict((k, v) for k, v in _AutoCodes().items() if '{%s}' % k in incoming) color_codes = dict((k, '' if _AutoCodes.DISABLE_COLORS else '\033[{0}m'.format(v)) for k, v in codes.items()) incoming_padded = _pad_input(incoming) output_colors = incoming_padded.format(**color_codes) # Simplify: '{b}{red}' -> '\033[1m\033[31m' -> '\033[1;31m' groups = sorted(set(_RE_GROUP_SEARCH.findall(output_colors)), key=len, reverse=True) # Get codes, grouped adjacent. groups_simplified = [[x for n in _RE_NUMBER_SEARCH.findall(i) for x in n.split(';')] for i in groups] groups_compiled = ['\033[{0}m'.format(';'.join(g)) for g in groups_simplified] # Final codes. assert len(groups_compiled) == len(groups) # For testing. output_colors_simplified = output_colors for i in range(len(groups)): output_colors_simplified = output_colors_simplified.replace(groups[i], groups_compiled[i]) output_no_colors = _RE_SPLIT.sub('', output_colors_simplified) # Strip any remaining color codes. if _AutoCodes.DISABLE_COLORS: output_colors_simplified = _RE_NUMBER_SEARCH.sub('', output_colors_simplified) return output_colors_simplified, output_no_colors
[ "def", "_parse_input", "(", "incoming", ")", ":", "codes", "=", "dict", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "_AutoCodes", "(", ")", ".", "items", "(", ")", "if", "'{%s}'", "%", "k", "in", "incoming", ")", "color_codes", "=", "dict", "(", "(", "k", ",", "''", "if", "_AutoCodes", ".", "DISABLE_COLORS", "else", "'\\033[{0}m'", ".", "format", "(", "v", ")", ")", "for", "k", ",", "v", "in", "codes", ".", "items", "(", ")", ")", "incoming_padded", "=", "_pad_input", "(", "incoming", ")", "output_colors", "=", "incoming_padded", ".", "format", "(", "*", "*", "color_codes", ")", "# Simplify: '{b}{red}' -> '\\033[1m\\033[31m' -> '\\033[1;31m'", "groups", "=", "sorted", "(", "set", "(", "_RE_GROUP_SEARCH", ".", "findall", "(", "output_colors", ")", ")", ",", "key", "=", "len", ",", "reverse", "=", "True", ")", "# Get codes, grouped adjacent.", "groups_simplified", "=", "[", "[", "x", "for", "n", "in", "_RE_NUMBER_SEARCH", ".", "findall", "(", "i", ")", "for", "x", "in", "n", ".", "split", "(", "';'", ")", "]", "for", "i", "in", "groups", "]", "groups_compiled", "=", "[", "'\\033[{0}m'", ".", "format", "(", "';'", ".", "join", "(", "g", ")", ")", "for", "g", "in", "groups_simplified", "]", "# Final codes.", "assert", "len", "(", "groups_compiled", ")", "==", "len", "(", "groups", ")", "# For testing.", "output_colors_simplified", "=", "output_colors", "for", "i", "in", "range", "(", "len", "(", "groups", ")", ")", ":", "output_colors_simplified", "=", "output_colors_simplified", ".", "replace", "(", "groups", "[", "i", "]", ",", "groups_compiled", "[", "i", "]", ")", "output_no_colors", "=", "_RE_SPLIT", ".", "sub", "(", "''", ",", "output_colors_simplified", ")", "# Strip any remaining color codes.", "if", "_AutoCodes", ".", "DISABLE_COLORS", ":", "output_colors_simplified", "=", "_RE_NUMBER_SEARCH", ".", "sub", "(", "''", ",", "output_colors_simplified", ")", "return", "output_colors_simplified", ",", "output_no_colors" ]
Performs the actual conversion of tags to ANSI escaped codes. Provides a version of the input without any colors for len() and other methods. Positional arguments: incoming -- the input unicode value. Returns: 2-item tuple. First item is the parsed output. Second item is a version of the input without any colors.
[ "Performs", "the", "actual", "conversion", "of", "tags", "to", "ANSI", "escaped", "codes", "." ]
python
train
davidcarboni/Flask-Sleuth
sleuth/__init__.py
https://github.com/davidcarboni/Flask-Sleuth/blob/2191aa2a929ec43c0176ec51c7abef924b12d015/sleuth/__init__.py#L71-L88
def _tracing_information(): """Gets B3 distributed tracing information, if available. This is returned as a list, ready to be formatted into Spring Cloud Sleuth compatible format. """ # We'll collate trace information if the B3 headers have been collected: values = b3.values() if values[b3.b3_trace_id]: # Trace information would normally be sent to Zipkin if either of sampled or debug ("flags") is set to 1 # However we're not currently using Zipkin, so it's always false # exported = "true" if values[b3.b3_sampled] == '1' or values[b3.b3_flags] == '1' else "false" return [ current_app.name if current_app.name else " - ", values[b3.b3_trace_id], values[b3.b3_span_id], "false", ]
[ "def", "_tracing_information", "(", ")", ":", "# We'll collate trace information if the B3 headers have been collected:", "values", "=", "b3", ".", "values", "(", ")", "if", "values", "[", "b3", ".", "b3_trace_id", "]", ":", "# Trace information would normally be sent to Zipkin if either of sampled or debug (\"flags\") is set to 1", "# However we're not currently using Zipkin, so it's always false", "# exported = \"true\" if values[b3.b3_sampled] == '1' or values[b3.b3_flags] == '1' else \"false\"", "return", "[", "current_app", ".", "name", "if", "current_app", ".", "name", "else", "\" - \"", ",", "values", "[", "b3", ".", "b3_trace_id", "]", ",", "values", "[", "b3", ".", "b3_span_id", "]", ",", "\"false\"", ",", "]" ]
Gets B3 distributed tracing information, if available. This is returned as a list, ready to be formatted into Spring Cloud Sleuth compatible format.
[ "Gets", "B3", "distributed", "tracing", "information", "if", "available", ".", "This", "is", "returned", "as", "a", "list", "ready", "to", "be", "formatted", "into", "Spring", "Cloud", "Sleuth", "compatible", "format", "." ]
python
train
bxlab/bx-python
lib/bx_extras/stats.py
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx_extras/stats.py#L1762-L1816
def outputpairedstats(fname,writemode,name1,n1,m1,se1,min1,max1,name2,n2,m2,se2,min2,max2,statname,stat,prob): """ Prints or write to a file stats for two groups, using the name, n, mean, sterr, min and max for each group, as well as the statistic name, its value, and the associated p-value. Usage: outputpairedstats(fname,writemode, name1,n1,mean1,stderr1,min1,max1, name2,n2,mean2,stderr2,min2,max2, statname,stat,prob) Returns: None """ suffix = '' # for *s after the p-value try: x = prob.shape prob = prob[0] except: pass if prob < 0.001: suffix = ' ***' elif prob < 0.01: suffix = ' **' elif prob < 0.05: suffix = ' *' title = [['Name','N','Mean','SD','Min','Max']] lofl = title+[[name1,n1,round(m1,3),round(math.sqrt(se1),3),min1,max1], [name2,n2,round(m2,3),round(math.sqrt(se2),3),min2,max2]] if type(fname)!=StringType or len(fname)==0: print() print(statname) print() pstat.printcc(lofl) print() try: if stat.shape == (): stat = stat[0] if prob.shape == (): prob = prob[0] except: pass print('Test statistic = ',round(stat,3),' p = ',round(prob,3),suffix) print() else: file = open(fname,writemode) file.write('\n'+statname+'\n\n') file.close() writecc(lofl,fname,'a') file = open(fname,'a') try: if stat.shape == (): stat = stat[0] if prob.shape == (): prob = prob[0] except: pass file.write(pstat.list2string(['\nTest statistic = ',round(stat,4),' p = ',round(prob,4),suffix,'\n\n'])) file.close() return None
[ "def", "outputpairedstats", "(", "fname", ",", "writemode", ",", "name1", ",", "n1", ",", "m1", ",", "se1", ",", "min1", ",", "max1", ",", "name2", ",", "n2", ",", "m2", ",", "se2", ",", "min2", ",", "max2", ",", "statname", ",", "stat", ",", "prob", ")", ":", "suffix", "=", "''", "# for *s after the p-value", "try", ":", "x", "=", "prob", ".", "shape", "prob", "=", "prob", "[", "0", "]", "except", ":", "pass", "if", "prob", "<", "0.001", ":", "suffix", "=", "' ***'", "elif", "prob", "<", "0.01", ":", "suffix", "=", "' **'", "elif", "prob", "<", "0.05", ":", "suffix", "=", "' *'", "title", "=", "[", "[", "'Name'", ",", "'N'", ",", "'Mean'", ",", "'SD'", ",", "'Min'", ",", "'Max'", "]", "]", "lofl", "=", "title", "+", "[", "[", "name1", ",", "n1", ",", "round", "(", "m1", ",", "3", ")", ",", "round", "(", "math", ".", "sqrt", "(", "se1", ")", ",", "3", ")", ",", "min1", ",", "max1", "]", ",", "[", "name2", ",", "n2", ",", "round", "(", "m2", ",", "3", ")", ",", "round", "(", "math", ".", "sqrt", "(", "se2", ")", ",", "3", ")", ",", "min2", ",", "max2", "]", "]", "if", "type", "(", "fname", ")", "!=", "StringType", "or", "len", "(", "fname", ")", "==", "0", ":", "print", "(", ")", "print", "(", "statname", ")", "print", "(", ")", "pstat", ".", "printcc", "(", "lofl", ")", "print", "(", ")", "try", ":", "if", "stat", ".", "shape", "==", "(", ")", ":", "stat", "=", "stat", "[", "0", "]", "if", "prob", ".", "shape", "==", "(", ")", ":", "prob", "=", "prob", "[", "0", "]", "except", ":", "pass", "print", "(", "'Test statistic = '", ",", "round", "(", "stat", ",", "3", ")", ",", "' p = '", ",", "round", "(", "prob", ",", "3", ")", ",", "suffix", ")", "print", "(", ")", "else", ":", "file", "=", "open", "(", "fname", ",", "writemode", ")", "file", ".", "write", "(", "'\\n'", "+", "statname", "+", "'\\n\\n'", ")", "file", ".", "close", "(", ")", "writecc", "(", "lofl", ",", "fname", ",", "'a'", ")", "file", "=", "open", "(", "fname", ",", "'a'", ")", "try", ":", "if", "stat", ".", "shape", "==", "(", ")", ":", "stat", "=", "stat", "[", "0", "]", "if", "prob", ".", "shape", "==", "(", ")", ":", "prob", "=", "prob", "[", "0", "]", "except", ":", "pass", "file", ".", "write", "(", "pstat", ".", "list2string", "(", "[", "'\\nTest statistic = '", ",", "round", "(", "stat", ",", "4", ")", ",", "' p = '", ",", "round", "(", "prob", ",", "4", ")", ",", "suffix", ",", "'\\n\\n'", "]", ")", ")", "file", ".", "close", "(", ")", "return", "None" ]
Prints or write to a file stats for two groups, using the name, n, mean, sterr, min and max for each group, as well as the statistic name, its value, and the associated p-value. Usage: outputpairedstats(fname,writemode, name1,n1,mean1,stderr1,min1,max1, name2,n2,mean2,stderr2,min2,max2, statname,stat,prob) Returns: None
[ "Prints", "or", "write", "to", "a", "file", "stats", "for", "two", "groups", "using", "the", "name", "n", "mean", "sterr", "min", "and", "max", "for", "each", "group", "as", "well", "as", "the", "statistic", "name", "its", "value", "and", "the", "associated", "p", "-", "value", "." ]
python
train
jplusplus/statscraper
statscraper/BaseScraperList.py
https://github.com/jplusplus/statscraper/blob/932ec048b23d15b3dbdaf829facc55fd78ec0109/statscraper/BaseScraperList.py#L17-L21
def get_by_label(self, label): """ Return the first item with a specific label, or None. """ return next((x for x in self if x.label == label), None)
[ "def", "get_by_label", "(", "self", ",", "label", ")", ":", "return", "next", "(", "(", "x", "for", "x", "in", "self", "if", "x", ".", "label", "==", "label", ")", ",", "None", ")" ]
Return the first item with a specific label, or None.
[ "Return", "the", "first", "item", "with", "a", "specific", "label", "or", "None", "." ]
python
train
MartinHjelmare/leicacam
leicacam/async_cam.py
https://github.com/MartinHjelmare/leicacam/blob/1df37bccd34884737d3b5e169fae71dd2f21f1e2/leicacam/async_cam.py#L21-L25
async def connect(self): """Connect to LASAF through a CAM-socket.""" self.reader, self.writer = await asyncio.open_connection( self.host, self.port, loop=self.loop) self.welcome_msg = await self.reader.read(self.buffer_size)
[ "async", "def", "connect", "(", "self", ")", ":", "self", ".", "reader", ",", "self", ".", "writer", "=", "await", "asyncio", ".", "open_connection", "(", "self", ".", "host", ",", "self", ".", "port", ",", "loop", "=", "self", ".", "loop", ")", "self", ".", "welcome_msg", "=", "await", "self", ".", "reader", ".", "read", "(", "self", ".", "buffer_size", ")" ]
Connect to LASAF through a CAM-socket.
[ "Connect", "to", "LASAF", "through", "a", "CAM", "-", "socket", "." ]
python
test
DeepHorizons/iarm
iarm/arm_instructions/memory.py
https://github.com/DeepHorizons/iarm/blob/b913c9fd577b793a6bbced78b78a5d8d7cd88de4/iarm/arm_instructions/memory.py#L281-L305
def LDRSH(self, params): """ LDRSH Ra, [Rb, Rc] Load a half word from memory, sign extend, and put into Ra Ra, Rb, and Rc must be low registers """ # TODO LDRSH cant use immediates Ra, Rb, Rc = self.get_three_parameters(self.THREE_PARAMETER_WITH_BRACKETS, params) self.check_arguments(low_registers=(Ra, Rb, Rc)) def LDRSH_func(): # TODO does memory read up? if (self.register[Rb] + self.register[Rc]) % 2 != 0: raise iarm.exceptions.HardFault( "Memory access not half word aligned\nR{}: {}\nR{}: {}".format(Rb, self.register[Rb], Rc, self.register[Rc])) self.register[Ra] = 0 for i in range(2): self.register[Ra] |= (self.memory[self.register[Rb] + self.register[Rc] + i] << (8 * i)) if self.register[Ra] & (1 << 15): self.register[Ra] |= (0xFFFF << 16) return LDRSH_func
[ "def", "LDRSH", "(", "self", ",", "params", ")", ":", "# TODO LDRSH cant use immediates", "Ra", ",", "Rb", ",", "Rc", "=", "self", ".", "get_three_parameters", "(", "self", ".", "THREE_PARAMETER_WITH_BRACKETS", ",", "params", ")", "self", ".", "check_arguments", "(", "low_registers", "=", "(", "Ra", ",", "Rb", ",", "Rc", ")", ")", "def", "LDRSH_func", "(", ")", ":", "# TODO does memory read up?", "if", "(", "self", ".", "register", "[", "Rb", "]", "+", "self", ".", "register", "[", "Rc", "]", ")", "%", "2", "!=", "0", ":", "raise", "iarm", ".", "exceptions", ".", "HardFault", "(", "\"Memory access not half word aligned\\nR{}: {}\\nR{}: {}\"", ".", "format", "(", "Rb", ",", "self", ".", "register", "[", "Rb", "]", ",", "Rc", ",", "self", ".", "register", "[", "Rc", "]", ")", ")", "self", ".", "register", "[", "Ra", "]", "=", "0", "for", "i", "in", "range", "(", "2", ")", ":", "self", ".", "register", "[", "Ra", "]", "|=", "(", "self", ".", "memory", "[", "self", ".", "register", "[", "Rb", "]", "+", "self", ".", "register", "[", "Rc", "]", "+", "i", "]", "<<", "(", "8", "*", "i", ")", ")", "if", "self", ".", "register", "[", "Ra", "]", "&", "(", "1", "<<", "15", ")", ":", "self", ".", "register", "[", "Ra", "]", "|=", "(", "0xFFFF", "<<", "16", ")", "return", "LDRSH_func" ]
LDRSH Ra, [Rb, Rc] Load a half word from memory, sign extend, and put into Ra Ra, Rb, and Rc must be low registers
[ "LDRSH", "Ra", "[", "Rb", "Rc", "]" ]
python
train
riverrun/drat
drat/analysis.py
https://github.com/riverrun/drat/blob/50cbbf69c022b6ca6641cd55386813b0695c21f5/drat/analysis.py#L81-L88
def dale_chall(self, diff_count, words, sentences): """Calculate Dale-Chall readability score.""" pdw = diff_count / words * 100 asl = words / sentences raw = 0.1579 * (pdw) + 0.0496 * asl if pdw > 5: return raw + 3.6365 return raw
[ "def", "dale_chall", "(", "self", ",", "diff_count", ",", "words", ",", "sentences", ")", ":", "pdw", "=", "diff_count", "/", "words", "*", "100", "asl", "=", "words", "/", "sentences", "raw", "=", "0.1579", "*", "(", "pdw", ")", "+", "0.0496", "*", "asl", "if", "pdw", ">", "5", ":", "return", "raw", "+", "3.6365", "return", "raw" ]
Calculate Dale-Chall readability score.
[ "Calculate", "Dale", "-", "Chall", "readability", "score", "." ]
python
train
nugget/python-insteonplm
insteonplm/states/onOff.py
https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L563-L567
def set_led_brightness(self, brightness): """Set the LED brightness for the current group/button.""" set_cmd = self._create_set_property_msg("_led_brightness", 0x07, brightness) self._send_method(set_cmd, self._property_set)
[ "def", "set_led_brightness", "(", "self", ",", "brightness", ")", ":", "set_cmd", "=", "self", ".", "_create_set_property_msg", "(", "\"_led_brightness\"", ",", "0x07", ",", "brightness", ")", "self", ".", "_send_method", "(", "set_cmd", ",", "self", ".", "_property_set", ")" ]
Set the LED brightness for the current group/button.
[ "Set", "the", "LED", "brightness", "for", "the", "current", "group", "/", "button", "." ]
python
train
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/bson/__init__.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/bson/__init__.py#L863-L893
def decode_iter(data, codec_options=DEFAULT_CODEC_OPTIONS): """Decode BSON data to multiple documents as a generator. Works similarly to the decode_all function, but yields one document at a time. `data` must be a string of concatenated, valid, BSON-encoded documents. :Parameters: - `data`: BSON data - `codec_options` (optional): An instance of :class:`~bson.codec_options.CodecOptions`. .. versionchanged:: 3.0 Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with `codec_options`. .. versionadded:: 2.8 """ if not isinstance(codec_options, CodecOptions): raise _CODEC_OPTIONS_TYPE_ERROR position = 0 end = len(data) - 1 while position < end: obj_size = _UNPACK_INT(data[position:position + 4])[0] elements = data[position:position + obj_size] position += obj_size yield _bson_to_dict(elements, codec_options)
[ "def", "decode_iter", "(", "data", ",", "codec_options", "=", "DEFAULT_CODEC_OPTIONS", ")", ":", "if", "not", "isinstance", "(", "codec_options", ",", "CodecOptions", ")", ":", "raise", "_CODEC_OPTIONS_TYPE_ERROR", "position", "=", "0", "end", "=", "len", "(", "data", ")", "-", "1", "while", "position", "<", "end", ":", "obj_size", "=", "_UNPACK_INT", "(", "data", "[", "position", ":", "position", "+", "4", "]", ")", "[", "0", "]", "elements", "=", "data", "[", "position", ":", "position", "+", "obj_size", "]", "position", "+=", "obj_size", "yield", "_bson_to_dict", "(", "elements", ",", "codec_options", ")" ]
Decode BSON data to multiple documents as a generator. Works similarly to the decode_all function, but yields one document at a time. `data` must be a string of concatenated, valid, BSON-encoded documents. :Parameters: - `data`: BSON data - `codec_options` (optional): An instance of :class:`~bson.codec_options.CodecOptions`. .. versionchanged:: 3.0 Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with `codec_options`. .. versionadded:: 2.8
[ "Decode", "BSON", "data", "to", "multiple", "documents", "as", "a", "generator", "." ]
python
train
etingof/pyasn1
pyasn1/type/univ.py
https://github.com/etingof/pyasn1/blob/25cf116ef8d11bb0e08454c0f3635c9f4002c2d6/pyasn1/type/univ.py#L968-L987
def fromHexString(value): """Create a |ASN.1| object initialized from the hex string. Parameters ---------- value: :class:`str` Text string like 'DEADBEEF' """ r = [] p = [] for v in value: if p: r.append(int(p + v, 16)) p = None else: p = v if p: r.append(int(p + '0', 16)) return octets.ints2octs(r)
[ "def", "fromHexString", "(", "value", ")", ":", "r", "=", "[", "]", "p", "=", "[", "]", "for", "v", "in", "value", ":", "if", "p", ":", "r", ".", "append", "(", "int", "(", "p", "+", "v", ",", "16", ")", ")", "p", "=", "None", "else", ":", "p", "=", "v", "if", "p", ":", "r", ".", "append", "(", "int", "(", "p", "+", "'0'", ",", "16", ")", ")", "return", "octets", ".", "ints2octs", "(", "r", ")" ]
Create a |ASN.1| object initialized from the hex string. Parameters ---------- value: :class:`str` Text string like 'DEADBEEF'
[ "Create", "a", "|ASN", ".", "1|", "object", "initialized", "from", "the", "hex", "string", "." ]
python
train
etcher-be/emiz
emiz/avwx/core.py
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/core.py#L61-L70
def unpack_fraction(num: str) -> str: """ Returns unpacked fraction string 5/2 -> 2 1/2 """ nums = [int(n) for n in num.split('/') if n] if len(nums) == 2 and nums[0] > nums[1]: over = nums[0] // nums[1] rem = nums[0] % nums[1] return f'{over} {rem}/{nums[1]}' return num
[ "def", "unpack_fraction", "(", "num", ":", "str", ")", "->", "str", ":", "nums", "=", "[", "int", "(", "n", ")", "for", "n", "in", "num", ".", "split", "(", "'/'", ")", "if", "n", "]", "if", "len", "(", "nums", ")", "==", "2", "and", "nums", "[", "0", "]", ">", "nums", "[", "1", "]", ":", "over", "=", "nums", "[", "0", "]", "//", "nums", "[", "1", "]", "rem", "=", "nums", "[", "0", "]", "%", "nums", "[", "1", "]", "return", "f'{over} {rem}/{nums[1]}'", "return", "num" ]
Returns unpacked fraction string 5/2 -> 2 1/2
[ "Returns", "unpacked", "fraction", "string", "5", "/", "2", "-", ">", "2", "1", "/", "2" ]
python
train
faucamp/python-gsmmodem
gsmmodem/serial_comms.py
https://github.com/faucamp/python-gsmmodem/blob/834c68b1387ca2c91e2210faa8f75526b39723b5/gsmmodem/serial_comms.py#L83-L118
def _readLoop(self): """ Read thread main loop Reads lines from the connected device """ try: readTermSeq = list(self.RX_EOL_SEQ) readTermLen = len(readTermSeq) rxBuffer = [] while self.alive: data = self.serial.read(1) if data != '': # check for timeout #print >> sys.stderr, ' RX:', data,'({0})'.format(ord(data)) rxBuffer.append(data) if rxBuffer[-readTermLen:] == readTermSeq: # A line (or other logical segment) has been read line = ''.join(rxBuffer[:-readTermLen]) rxBuffer = [] if len(line) > 0: #print 'calling handler' self._handleLineRead(line) elif self._expectResponseTermSeq: if rxBuffer[-len(self._expectResponseTermSeq):] == self._expectResponseTermSeq: line = ''.join(rxBuffer) rxBuffer = [] self._handleLineRead(line, checkForResponseTerm=False) #else: #' <RX timeout>' except serial.SerialException as e: self.alive = False try: self.serial.close() except Exception: #pragma: no cover pass # Notify the fatal error handler self.fatalErrorCallback(e)
[ "def", "_readLoop", "(", "self", ")", ":", "try", ":", "readTermSeq", "=", "list", "(", "self", ".", "RX_EOL_SEQ", ")", "readTermLen", "=", "len", "(", "readTermSeq", ")", "rxBuffer", "=", "[", "]", "while", "self", ".", "alive", ":", "data", "=", "self", ".", "serial", ".", "read", "(", "1", ")", "if", "data", "!=", "''", ":", "# check for timeout", "#print >> sys.stderr, ' RX:', data,'({0})'.format(ord(data))", "rxBuffer", ".", "append", "(", "data", ")", "if", "rxBuffer", "[", "-", "readTermLen", ":", "]", "==", "readTermSeq", ":", "# A line (or other logical segment) has been read", "line", "=", "''", ".", "join", "(", "rxBuffer", "[", ":", "-", "readTermLen", "]", ")", "rxBuffer", "=", "[", "]", "if", "len", "(", "line", ")", ">", "0", ":", "#print 'calling handler' ", "self", ".", "_handleLineRead", "(", "line", ")", "elif", "self", ".", "_expectResponseTermSeq", ":", "if", "rxBuffer", "[", "-", "len", "(", "self", ".", "_expectResponseTermSeq", ")", ":", "]", "==", "self", ".", "_expectResponseTermSeq", ":", "line", "=", "''", ".", "join", "(", "rxBuffer", ")", "rxBuffer", "=", "[", "]", "self", ".", "_handleLineRead", "(", "line", ",", "checkForResponseTerm", "=", "False", ")", "#else:", "#' <RX timeout>'", "except", "serial", ".", "SerialException", "as", "e", ":", "self", ".", "alive", "=", "False", "try", ":", "self", ".", "serial", ".", "close", "(", ")", "except", "Exception", ":", "#pragma: no cover", "pass", "# Notify the fatal error handler", "self", ".", "fatalErrorCallback", "(", "e", ")" ]
Read thread main loop Reads lines from the connected device
[ "Read", "thread", "main", "loop", "Reads", "lines", "from", "the", "connected", "device" ]
python
train
cackharot/suds-py3
suds/properties.py
https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/properties.py#L322-L335
def unlink(self, *others): """ Unlink (disassociate) the specified properties object. @param others: The list object to unlink. Unspecified means unlink all. @type others: [L{Properties},..] @return: self @rtype: L{Properties} """ if not len(others): others = self.links[:] for p in self.links[:]: if p in others: p.teardown() return self
[ "def", "unlink", "(", "self", ",", "*", "others", ")", ":", "if", "not", "len", "(", "others", ")", ":", "others", "=", "self", ".", "links", "[", ":", "]", "for", "p", "in", "self", ".", "links", "[", ":", "]", ":", "if", "p", "in", "others", ":", "p", ".", "teardown", "(", ")", "return", "self" ]
Unlink (disassociate) the specified properties object. @param others: The list object to unlink. Unspecified means unlink all. @type others: [L{Properties},..] @return: self @rtype: L{Properties}
[ "Unlink", "(", "disassociate", ")", "the", "specified", "properties", "object", "." ]
python
train
OCA/openupgradelib
openupgradelib/openupgrade.py
https://github.com/OCA/openupgradelib/blob/b220b6498075d62c1b64073cc934513a465cfd85/openupgradelib/openupgrade.py#L180-L220
def allow_pgcodes(cr, *codes): """Context manager that will omit specified error codes. E.g., suppose you expect a migration to produce unique constraint violations and you want to ignore them. Then you could just do:: with allow_pgcodes(cr, psycopg2.errorcodes.UNIQUE_VIOLATION): cr.execute("INSERT INTO me (name) SELECT name FROM you") .. warning:: **All** sentences inside this context will be rolled back if **a single error** is raised, so the above example would insert **nothing** if a single row violates a unique constraint. This would ignore duplicate files but insert the others:: cr.execute("SELECT name FROM you") for row in cr.fetchall(): with allow_pgcodes(cr, psycopg2.errorcodes.UNIQUE_VIOLATION): cr.execute("INSERT INTO me (name) VALUES (%s)", row[0]) :param *str codes: Undefined amount of error codes found in :mod:`psycopg2.errorcodes` that are allowed. Codes can have either 2 characters (indicating an error class) or 5 (indicating a concrete error). Any other errors will be raised. """ try: with cr.savepoint(): with core.tools.mute_logger('odoo.sql_db'): yield except (ProgrammingError, IntegrityError) as error: msg = "Code: {code}. Class: {class_}. Error: {error}.".format( code=error.pgcode, class_=errorcodes.lookup(error.pgcode[:2]), error=errorcodes.lookup(error.pgcode)) if error.pgcode in codes or error.pgcode[:2] in codes: logger.info(msg) else: logger.exception(msg) raise
[ "def", "allow_pgcodes", "(", "cr", ",", "*", "codes", ")", ":", "try", ":", "with", "cr", ".", "savepoint", "(", ")", ":", "with", "core", ".", "tools", ".", "mute_logger", "(", "'odoo.sql_db'", ")", ":", "yield", "except", "(", "ProgrammingError", ",", "IntegrityError", ")", "as", "error", ":", "msg", "=", "\"Code: {code}. Class: {class_}. Error: {error}.\"", ".", "format", "(", "code", "=", "error", ".", "pgcode", ",", "class_", "=", "errorcodes", ".", "lookup", "(", "error", ".", "pgcode", "[", ":", "2", "]", ")", ",", "error", "=", "errorcodes", ".", "lookup", "(", "error", ".", "pgcode", ")", ")", "if", "error", ".", "pgcode", "in", "codes", "or", "error", ".", "pgcode", "[", ":", "2", "]", "in", "codes", ":", "logger", ".", "info", "(", "msg", ")", "else", ":", "logger", ".", "exception", "(", "msg", ")", "raise" ]
Context manager that will omit specified error codes. E.g., suppose you expect a migration to produce unique constraint violations and you want to ignore them. Then you could just do:: with allow_pgcodes(cr, psycopg2.errorcodes.UNIQUE_VIOLATION): cr.execute("INSERT INTO me (name) SELECT name FROM you") .. warning:: **All** sentences inside this context will be rolled back if **a single error** is raised, so the above example would insert **nothing** if a single row violates a unique constraint. This would ignore duplicate files but insert the others:: cr.execute("SELECT name FROM you") for row in cr.fetchall(): with allow_pgcodes(cr, psycopg2.errorcodes.UNIQUE_VIOLATION): cr.execute("INSERT INTO me (name) VALUES (%s)", row[0]) :param *str codes: Undefined amount of error codes found in :mod:`psycopg2.errorcodes` that are allowed. Codes can have either 2 characters (indicating an error class) or 5 (indicating a concrete error). Any other errors will be raised.
[ "Context", "manager", "that", "will", "omit", "specified", "error", "codes", "." ]
python
train
andreasjansson/head-in-the-clouds
headintheclouds/dependencies/PyDbLite/PyDbLite.py
https://github.com/andreasjansson/head-in-the-clouds/blob/32c1d00d01036834dc94368e7f38b0afd3f7a82f/headintheclouds/dependencies/PyDbLite/PyDbLite.py#L240-L247
def commit(self): """Write the database to a file""" out = open(self.name,'wb') cPickle.dump(self.fields,out,self.protocol) cPickle.dump(self.next_id,out,self.protocol) cPickle.dump(self.records,out,self.protocol) cPickle.dump(self.indices,out,self.protocol) out.close()
[ "def", "commit", "(", "self", ")", ":", "out", "=", "open", "(", "self", ".", "name", ",", "'wb'", ")", "cPickle", ".", "dump", "(", "self", ".", "fields", ",", "out", ",", "self", ".", "protocol", ")", "cPickle", ".", "dump", "(", "self", ".", "next_id", ",", "out", ",", "self", ".", "protocol", ")", "cPickle", ".", "dump", "(", "self", ".", "records", ",", "out", ",", "self", ".", "protocol", ")", "cPickle", ".", "dump", "(", "self", ".", "indices", ",", "out", ",", "self", ".", "protocol", ")", "out", ".", "close", "(", ")" ]
Write the database to a file
[ "Write", "the", "database", "to", "a", "file" ]
python
train
O365/python-o365
O365/excel.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/excel.py#L1364-L1395
def update(self, *, name=None, show_headers=None, show_totals=None, style=None): """ Updates this table :param str name: the name of the table :param bool show_headers: whether or not to show the headers :param bool show_totals: whether or not to show the totals :param str style: the style of the table :return: Success or Failure """ if name is None and show_headers is None and show_totals is None and style is None: raise ValueError('Provide at least one parameter to update') data = {} if name: data['name'] = name if show_headers: data['showHeaders'] = show_headers if show_totals: data['showTotals'] = show_totals if style: data['style'] = style response = self.session.patch(self.build_url(''), data=data) if not response: return False data = response.json() self.name = data.get('name', self.name) self.show_headers = data.get('showHeaders', self.show_headers) self.show_totals = data.get('showTotals', self.show_totals) self.style = data.get('style', self.style) return True
[ "def", "update", "(", "self", ",", "*", ",", "name", "=", "None", ",", "show_headers", "=", "None", ",", "show_totals", "=", "None", ",", "style", "=", "None", ")", ":", "if", "name", "is", "None", "and", "show_headers", "is", "None", "and", "show_totals", "is", "None", "and", "style", "is", "None", ":", "raise", "ValueError", "(", "'Provide at least one parameter to update'", ")", "data", "=", "{", "}", "if", "name", ":", "data", "[", "'name'", "]", "=", "name", "if", "show_headers", ":", "data", "[", "'showHeaders'", "]", "=", "show_headers", "if", "show_totals", ":", "data", "[", "'showTotals'", "]", "=", "show_totals", "if", "style", ":", "data", "[", "'style'", "]", "=", "style", "response", "=", "self", ".", "session", ".", "patch", "(", "self", ".", "build_url", "(", "''", ")", ",", "data", "=", "data", ")", "if", "not", "response", ":", "return", "False", "data", "=", "response", ".", "json", "(", ")", "self", ".", "name", "=", "data", ".", "get", "(", "'name'", ",", "self", ".", "name", ")", "self", ".", "show_headers", "=", "data", ".", "get", "(", "'showHeaders'", ",", "self", ".", "show_headers", ")", "self", ".", "show_totals", "=", "data", ".", "get", "(", "'showTotals'", ",", "self", ".", "show_totals", ")", "self", ".", "style", "=", "data", ".", "get", "(", "'style'", ",", "self", ".", "style", ")", "return", "True" ]
Updates this table :param str name: the name of the table :param bool show_headers: whether or not to show the headers :param bool show_totals: whether or not to show the totals :param str style: the style of the table :return: Success or Failure
[ "Updates", "this", "table", ":", "param", "str", "name", ":", "the", "name", "of", "the", "table", ":", "param", "bool", "show_headers", ":", "whether", "or", "not", "to", "show", "the", "headers", ":", "param", "bool", "show_totals", ":", "whether", "or", "not", "to", "show", "the", "totals", ":", "param", "str", "style", ":", "the", "style", "of", "the", "table", ":", "return", ":", "Success", "or", "Failure" ]
python
train
rq/rq-scheduler
rq_scheduler/scheduler.py
https://github.com/rq/rq-scheduler/blob/ee60c19e42a46ba787f762733a0036aa0cf2f7b7/rq_scheduler/scheduler.py#L68-L80
def acquire_lock(self): """ Acquire lock before scheduling jobs to prevent another scheduler from scheduling jobs at the same time. This function returns True if a lock is acquired. False otherwise. """ key = '%s_lock' % self.scheduler_key now = time.time() expires = int(self._interval) + 10 self._lock_acquired = self.connection.set( key, now, ex=expires, nx=True) return self._lock_acquired
[ "def", "acquire_lock", "(", "self", ")", ":", "key", "=", "'%s_lock'", "%", "self", ".", "scheduler_key", "now", "=", "time", ".", "time", "(", ")", "expires", "=", "int", "(", "self", ".", "_interval", ")", "+", "10", "self", ".", "_lock_acquired", "=", "self", ".", "connection", ".", "set", "(", "key", ",", "now", ",", "ex", "=", "expires", ",", "nx", "=", "True", ")", "return", "self", ".", "_lock_acquired" ]
Acquire lock before scheduling jobs to prevent another scheduler from scheduling jobs at the same time. This function returns True if a lock is acquired. False otherwise.
[ "Acquire", "lock", "before", "scheduling", "jobs", "to", "prevent", "another", "scheduler", "from", "scheduling", "jobs", "at", "the", "same", "time", "." ]
python
train
ecordell/pymacaroons
pymacaroons/serializers/json_serializer.py
https://github.com/ecordell/pymacaroons/blob/c941614df15fe732ea432a62788e45410bcb868d/pymacaroons/serializers/json_serializer.py#L172-L181
def _read_json_binary_field(deserialized, field): ''' Read the value of a JSON field that may be string or base64-encoded. ''' val = deserialized.get(field) if val is not None: return utils.convert_to_bytes(val) val = deserialized.get(field + '64') if val is None: return None return utils.raw_urlsafe_b64decode(val)
[ "def", "_read_json_binary_field", "(", "deserialized", ",", "field", ")", ":", "val", "=", "deserialized", ".", "get", "(", "field", ")", "if", "val", "is", "not", "None", ":", "return", "utils", ".", "convert_to_bytes", "(", "val", ")", "val", "=", "deserialized", ".", "get", "(", "field", "+", "'64'", ")", "if", "val", "is", "None", ":", "return", "None", "return", "utils", ".", "raw_urlsafe_b64decode", "(", "val", ")" ]
Read the value of a JSON field that may be string or base64-encoded.
[ "Read", "the", "value", "of", "a", "JSON", "field", "that", "may", "be", "string", "or", "base64", "-", "encoded", "." ]
python
train
goshuirc/irc
girc/features.py
https://github.com/goshuirc/irc/blob/d6a5e3e04d337566c009b087f108cd76f9e122cc/girc/features.py#L48-L86
def _simplify_feature_value(self, name, value): """Return simplified and more pythonic feature values.""" if name == 'prefix': channel_modes, channel_chars = value.split(')') channel_modes = channel_modes[1:] # [::-1] to reverse order and go from lowest to highest privs value = OrderedDict(list(zip(channel_modes, channel_chars))[::-1]) return value elif name == 'chanmodes': value = value.split(',') return value elif name == 'targmax': max_available = {} for sort in value.split(','): command, limit = sort.split(':') command = command.casefold() max_available[command] = limit_to_number(limit) return max_available elif name == 'chanlimit': limit_available = {} for sort in value.split(','): chan_types, limit = sort.split(':') for prefix in chan_types: limit_available[prefix] = limit_to_number(limit) return limit_available elif name in _limits: value = limit_to_number(value) return value else: return value
[ "def", "_simplify_feature_value", "(", "self", ",", "name", ",", "value", ")", ":", "if", "name", "==", "'prefix'", ":", "channel_modes", ",", "channel_chars", "=", "value", ".", "split", "(", "')'", ")", "channel_modes", "=", "channel_modes", "[", "1", ":", "]", "# [::-1] to reverse order and go from lowest to highest privs", "value", "=", "OrderedDict", "(", "list", "(", "zip", "(", "channel_modes", ",", "channel_chars", ")", ")", "[", ":", ":", "-", "1", "]", ")", "return", "value", "elif", "name", "==", "'chanmodes'", ":", "value", "=", "value", ".", "split", "(", "','", ")", "return", "value", "elif", "name", "==", "'targmax'", ":", "max_available", "=", "{", "}", "for", "sort", "in", "value", ".", "split", "(", "','", ")", ":", "command", ",", "limit", "=", "sort", ".", "split", "(", "':'", ")", "command", "=", "command", ".", "casefold", "(", ")", "max_available", "[", "command", "]", "=", "limit_to_number", "(", "limit", ")", "return", "max_available", "elif", "name", "==", "'chanlimit'", ":", "limit_available", "=", "{", "}", "for", "sort", "in", "value", ".", "split", "(", "','", ")", ":", "chan_types", ",", "limit", "=", "sort", ".", "split", "(", "':'", ")", "for", "prefix", "in", "chan_types", ":", "limit_available", "[", "prefix", "]", "=", "limit_to_number", "(", "limit", ")", "return", "limit_available", "elif", "name", "in", "_limits", ":", "value", "=", "limit_to_number", "(", "value", ")", "return", "value", "else", ":", "return", "value" ]
Return simplified and more pythonic feature values.
[ "Return", "simplified", "and", "more", "pythonic", "feature", "values", "." ]
python
train
pandas-dev/pandas
pandas/core/generic.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L7915-L8212
def resample(self, rule, how=None, axis=0, fill_method=None, closed=None, label=None, convention='start', kind=None, loffset=None, limit=None, base=0, on=None, level=None): """ Resample time-series data. Convenience method for frequency conversion and resampling of time series. Object must have a datetime-like index (`DatetimeIndex`, `PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values to the `on` or `level` keyword. Parameters ---------- rule : str The offset string or object representing target conversion. how : str Method for down/re-sampling, default to 'mean' for downsampling. .. deprecated:: 0.18.0 The new syntax is ``.resample(...).mean()``, or ``.resample(...).apply(<func>)`` axis : {0 or 'index', 1 or 'columns'}, default 0 Which axis to use for up- or down-sampling. For `Series` this will default to 0, i.e. along the rows. Must be `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`. fill_method : str, default None Filling method for upsampling. .. deprecated:: 0.18.0 The new syntax is ``.resample(...).<func>()``, e.g. ``.resample(...).pad()`` closed : {'right', 'left'}, default None Which side of bin interval is closed. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. label : {'right', 'left'}, default None Which bin edge label to label bucket with. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. convention : {'start', 'end', 's', 'e'}, default 'start' For `PeriodIndex` only, controls whether to use the start or end of `rule`. kind : {'timestamp', 'period'}, optional, default None Pass 'timestamp' to convert the resulting index to a `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`. By default the input representation is retained. loffset : timedelta, default None Adjust the resampled time labels. limit : int, default None Maximum size gap when reindexing with `fill_method`. .. deprecated:: 0.18.0 base : int, default 0 For frequencies that evenly subdivide 1 day, the "origin" of the aggregated intervals. For example, for '5min' frequency, base could range from 0 through 4. Defaults to 0. on : str, optional For a DataFrame, column to use instead of index for resampling. Column must be datetime-like. .. versionadded:: 0.19.0 level : str or int, optional For a MultiIndex, level (name or number) to use for resampling. `level` must be datetime-like. .. versionadded:: 0.19.0 Returns ------- Resampler object See Also -------- groupby : Group by mapping, function, label, or list of labels. Series.resample : Resample a Series. DataFrame.resample: Resample a DataFrame. Notes ----- See the `user guide <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling>`_ for more. To learn more about the offset strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- Start by creating a series with 9 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=9, freq='T') >>> series = pd.Series(range(9), index=index) >>> series 2000-01-01 00:00:00 0 2000-01-01 00:01:00 1 2000-01-01 00:02:00 2 2000-01-01 00:03:00 3 2000-01-01 00:04:00 4 2000-01-01 00:05:00 5 2000-01-01 00:06:00 6 2000-01-01 00:07:00 7 2000-01-01 00:08:00 8 Freq: T, dtype: int64 Downsample the series into 3 minute bins and sum the values of the timestamps falling into a bin. >>> series.resample('3T').sum() 2000-01-01 00:00:00 3 2000-01-01 00:03:00 12 2000-01-01 00:06:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but label each bin using the right edge instead of the left. Please note that the value in the bucket used as the label is not included in the bucket, which it labels. For example, in the original series the bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed value in the resampled bucket with the label ``2000-01-01 00:03:00`` does not include 3 (if it did, the summed value would be 6, not 3). To include this value close the right side of the bin interval as illustrated in the example below this one. >>> series.resample('3T', label='right').sum() 2000-01-01 00:03:00 3 2000-01-01 00:06:00 12 2000-01-01 00:09:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but close the right side of the bin interval. >>> series.resample('3T', label='right', closed='right').sum() 2000-01-01 00:00:00 0 2000-01-01 00:03:00 6 2000-01-01 00:06:00 15 2000-01-01 00:09:00 15 Freq: 3T, dtype: int64 Upsample the series into 30 second bins. >>> series.resample('30S').asfreq()[0:5] # Select first 5 rows 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 1.0 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 Freq: 30S, dtype: float64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``pad`` method. >>> series.resample('30S').pad()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 0 2000-01-01 00:01:00 1 2000-01-01 00:01:30 1 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``bfill`` method. >>> series.resample('30S').bfill()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 1 2000-01-01 00:01:00 1 2000-01-01 00:01:30 2 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Pass a custom function via ``apply`` >>> def custom_resampler(array_like): ... return np.sum(array_like) + 5 ... >>> series.resample('3T').apply(custom_resampler) 2000-01-01 00:00:00 8 2000-01-01 00:03:00 17 2000-01-01 00:06:00 26 Freq: 3T, dtype: int64 For a Series with a PeriodIndex, the keyword `convention` can be used to control whether to use the start or end of `rule`. Resample a year by quarter using 'start' `convention`. Values are assigned to the first quarter of the period. >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01', ... freq='A', ... periods=2)) >>> s 2012 1 2013 2 Freq: A-DEC, dtype: int64 >>> s.resample('Q', convention='start').asfreq() 2012Q1 1.0 2012Q2 NaN 2012Q3 NaN 2012Q4 NaN 2013Q1 2.0 2013Q2 NaN 2013Q3 NaN 2013Q4 NaN Freq: Q-DEC, dtype: float64 Resample quarters by month using 'end' `convention`. Values are assigned to the last month of the period. >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01', ... freq='Q', ... periods=4)) >>> q 2018Q1 1 2018Q2 2 2018Q3 3 2018Q4 4 Freq: Q-DEC, dtype: int64 >>> q.resample('M', convention='end').asfreq() 2018-03 1.0 2018-04 NaN 2018-05 NaN 2018-06 2.0 2018-07 NaN 2018-08 NaN 2018-09 3.0 2018-10 NaN 2018-11 NaN 2018-12 4.0 Freq: M, dtype: float64 For DataFrame objects, the keyword `on` can be used to specify the column instead of the index for resampling. >>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}) >>> df = pd.DataFrame(d) >>> df['week_starting'] = pd.date_range('01/01/2018', ... periods=8, ... freq='W') >>> df price volume week_starting 0 10 50 2018-01-07 1 11 60 2018-01-14 2 9 40 2018-01-21 3 13 100 2018-01-28 4 14 50 2018-02-04 5 18 100 2018-02-11 6 17 40 2018-02-18 7 19 50 2018-02-25 >>> df.resample('M', on='week_starting').mean() price volume week_starting 2018-01-31 10.75 62.5 2018-02-28 17.00 60.0 For a DataFrame with MultiIndex, the keyword `level` can be used to specify on which level the resampling needs to take place. >>> days = pd.date_range('1/1/2000', periods=4, freq='D') >>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}) >>> df2 = pd.DataFrame(d2, ... index=pd.MultiIndex.from_product([days, ... ['morning', ... 'afternoon']] ... )) >>> df2 price volume 2000-01-01 morning 10 50 afternoon 11 60 2000-01-02 morning 9 40 afternoon 13 100 2000-01-03 morning 14 50 afternoon 18 100 2000-01-04 morning 17 40 afternoon 19 50 >>> df2.resample('D', level=0).sum() price volume 2000-01-01 21 110 2000-01-02 22 140 2000-01-03 32 150 2000-01-04 36 90 """ from pandas.core.resample import (resample, _maybe_process_deprecations) axis = self._get_axis_number(axis) r = resample(self, freq=rule, label=label, closed=closed, axis=axis, kind=kind, loffset=loffset, convention=convention, base=base, key=on, level=level) return _maybe_process_deprecations(r, how=how, fill_method=fill_method, limit=limit)
[ "def", "resample", "(", "self", ",", "rule", ",", "how", "=", "None", ",", "axis", "=", "0", ",", "fill_method", "=", "None", ",", "closed", "=", "None", ",", "label", "=", "None", ",", "convention", "=", "'start'", ",", "kind", "=", "None", ",", "loffset", "=", "None", ",", "limit", "=", "None", ",", "base", "=", "0", ",", "on", "=", "None", ",", "level", "=", "None", ")", ":", "from", "pandas", ".", "core", ".", "resample", "import", "(", "resample", ",", "_maybe_process_deprecations", ")", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "r", "=", "resample", "(", "self", ",", "freq", "=", "rule", ",", "label", "=", "label", ",", "closed", "=", "closed", ",", "axis", "=", "axis", ",", "kind", "=", "kind", ",", "loffset", "=", "loffset", ",", "convention", "=", "convention", ",", "base", "=", "base", ",", "key", "=", "on", ",", "level", "=", "level", ")", "return", "_maybe_process_deprecations", "(", "r", ",", "how", "=", "how", ",", "fill_method", "=", "fill_method", ",", "limit", "=", "limit", ")" ]
Resample time-series data. Convenience method for frequency conversion and resampling of time series. Object must have a datetime-like index (`DatetimeIndex`, `PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values to the `on` or `level` keyword. Parameters ---------- rule : str The offset string or object representing target conversion. how : str Method for down/re-sampling, default to 'mean' for downsampling. .. deprecated:: 0.18.0 The new syntax is ``.resample(...).mean()``, or ``.resample(...).apply(<func>)`` axis : {0 or 'index', 1 or 'columns'}, default 0 Which axis to use for up- or down-sampling. For `Series` this will default to 0, i.e. along the rows. Must be `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`. fill_method : str, default None Filling method for upsampling. .. deprecated:: 0.18.0 The new syntax is ``.resample(...).<func>()``, e.g. ``.resample(...).pad()`` closed : {'right', 'left'}, default None Which side of bin interval is closed. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. label : {'right', 'left'}, default None Which bin edge label to label bucket with. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. convention : {'start', 'end', 's', 'e'}, default 'start' For `PeriodIndex` only, controls whether to use the start or end of `rule`. kind : {'timestamp', 'period'}, optional, default None Pass 'timestamp' to convert the resulting index to a `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`. By default the input representation is retained. loffset : timedelta, default None Adjust the resampled time labels. limit : int, default None Maximum size gap when reindexing with `fill_method`. .. deprecated:: 0.18.0 base : int, default 0 For frequencies that evenly subdivide 1 day, the "origin" of the aggregated intervals. For example, for '5min' frequency, base could range from 0 through 4. Defaults to 0. on : str, optional For a DataFrame, column to use instead of index for resampling. Column must be datetime-like. .. versionadded:: 0.19.0 level : str or int, optional For a MultiIndex, level (name or number) to use for resampling. `level` must be datetime-like. .. versionadded:: 0.19.0 Returns ------- Resampler object See Also -------- groupby : Group by mapping, function, label, or list of labels. Series.resample : Resample a Series. DataFrame.resample: Resample a DataFrame. Notes ----- See the `user guide <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling>`_ for more. To learn more about the offset strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- Start by creating a series with 9 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=9, freq='T') >>> series = pd.Series(range(9), index=index) >>> series 2000-01-01 00:00:00 0 2000-01-01 00:01:00 1 2000-01-01 00:02:00 2 2000-01-01 00:03:00 3 2000-01-01 00:04:00 4 2000-01-01 00:05:00 5 2000-01-01 00:06:00 6 2000-01-01 00:07:00 7 2000-01-01 00:08:00 8 Freq: T, dtype: int64 Downsample the series into 3 minute bins and sum the values of the timestamps falling into a bin. >>> series.resample('3T').sum() 2000-01-01 00:00:00 3 2000-01-01 00:03:00 12 2000-01-01 00:06:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but label each bin using the right edge instead of the left. Please note that the value in the bucket used as the label is not included in the bucket, which it labels. For example, in the original series the bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed value in the resampled bucket with the label ``2000-01-01 00:03:00`` does not include 3 (if it did, the summed value would be 6, not 3). To include this value close the right side of the bin interval as illustrated in the example below this one. >>> series.resample('3T', label='right').sum() 2000-01-01 00:03:00 3 2000-01-01 00:06:00 12 2000-01-01 00:09:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but close the right side of the bin interval. >>> series.resample('3T', label='right', closed='right').sum() 2000-01-01 00:00:00 0 2000-01-01 00:03:00 6 2000-01-01 00:06:00 15 2000-01-01 00:09:00 15 Freq: 3T, dtype: int64 Upsample the series into 30 second bins. >>> series.resample('30S').asfreq()[0:5] # Select first 5 rows 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 1.0 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 Freq: 30S, dtype: float64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``pad`` method. >>> series.resample('30S').pad()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 0 2000-01-01 00:01:00 1 2000-01-01 00:01:30 1 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``bfill`` method. >>> series.resample('30S').bfill()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 1 2000-01-01 00:01:00 1 2000-01-01 00:01:30 2 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Pass a custom function via ``apply`` >>> def custom_resampler(array_like): ... return np.sum(array_like) + 5 ... >>> series.resample('3T').apply(custom_resampler) 2000-01-01 00:00:00 8 2000-01-01 00:03:00 17 2000-01-01 00:06:00 26 Freq: 3T, dtype: int64 For a Series with a PeriodIndex, the keyword `convention` can be used to control whether to use the start or end of `rule`. Resample a year by quarter using 'start' `convention`. Values are assigned to the first quarter of the period. >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01', ... freq='A', ... periods=2)) >>> s 2012 1 2013 2 Freq: A-DEC, dtype: int64 >>> s.resample('Q', convention='start').asfreq() 2012Q1 1.0 2012Q2 NaN 2012Q3 NaN 2012Q4 NaN 2013Q1 2.0 2013Q2 NaN 2013Q3 NaN 2013Q4 NaN Freq: Q-DEC, dtype: float64 Resample quarters by month using 'end' `convention`. Values are assigned to the last month of the period. >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01', ... freq='Q', ... periods=4)) >>> q 2018Q1 1 2018Q2 2 2018Q3 3 2018Q4 4 Freq: Q-DEC, dtype: int64 >>> q.resample('M', convention='end').asfreq() 2018-03 1.0 2018-04 NaN 2018-05 NaN 2018-06 2.0 2018-07 NaN 2018-08 NaN 2018-09 3.0 2018-10 NaN 2018-11 NaN 2018-12 4.0 Freq: M, dtype: float64 For DataFrame objects, the keyword `on` can be used to specify the column instead of the index for resampling. >>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}) >>> df = pd.DataFrame(d) >>> df['week_starting'] = pd.date_range('01/01/2018', ... periods=8, ... freq='W') >>> df price volume week_starting 0 10 50 2018-01-07 1 11 60 2018-01-14 2 9 40 2018-01-21 3 13 100 2018-01-28 4 14 50 2018-02-04 5 18 100 2018-02-11 6 17 40 2018-02-18 7 19 50 2018-02-25 >>> df.resample('M', on='week_starting').mean() price volume week_starting 2018-01-31 10.75 62.5 2018-02-28 17.00 60.0 For a DataFrame with MultiIndex, the keyword `level` can be used to specify on which level the resampling needs to take place. >>> days = pd.date_range('1/1/2000', periods=4, freq='D') >>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}) >>> df2 = pd.DataFrame(d2, ... index=pd.MultiIndex.from_product([days, ... ['morning', ... 'afternoon']] ... )) >>> df2 price volume 2000-01-01 morning 10 50 afternoon 11 60 2000-01-02 morning 9 40 afternoon 13 100 2000-01-03 morning 14 50 afternoon 18 100 2000-01-04 morning 17 40 afternoon 19 50 >>> df2.resample('D', level=0).sum() price volume 2000-01-01 21 110 2000-01-02 22 140 2000-01-03 32 150 2000-01-04 36 90
[ "Resample", "time", "-", "series", "data", "." ]
python
train
horazont/aioxmpp
aioxmpp/pubsub/service.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/pubsub/service.py#L357-L390
def unsubscribe(self, jid, node=None, *, subscription_jid=None, subid=None): """ Unsubscribe from a node. :param jid: Address of the PubSub service. :type jid: :class:`aioxmpp.JID` :param node: Name of the PubSub node to unsubscribe from. :type node: :class:`str` :param subscription_jid: The address to subscribe from the service. :type subscription_jid: :class:`aioxmpp.JID` :param subid: Unique ID of the subscription to remove. :type subid: :class:`str` :raises aioxmpp.errors.XMPPError: as returned by the service By default, the unsubscribe request will be for the bare JID of the client. It can be specified explicitly using the `subscription_jid` argument. If available, the `subid` should also be specified. If an error occurs, the corresponding :class:`~.errors.XMPPError` is raised. """ subscription_jid = subscription_jid or self.client.local_jid.bare() iq = aioxmpp.stanza.IQ(to=jid, type_=aioxmpp.structs.IQType.SET) iq.payload = pubsub_xso.Request( pubsub_xso.Unsubscribe(subscription_jid, node=node, subid=subid) ) yield from self.client.send(iq)
[ "def", "unsubscribe", "(", "self", ",", "jid", ",", "node", "=", "None", ",", "*", ",", "subscription_jid", "=", "None", ",", "subid", "=", "None", ")", ":", "subscription_jid", "=", "subscription_jid", "or", "self", ".", "client", ".", "local_jid", ".", "bare", "(", ")", "iq", "=", "aioxmpp", ".", "stanza", ".", "IQ", "(", "to", "=", "jid", ",", "type_", "=", "aioxmpp", ".", "structs", ".", "IQType", ".", "SET", ")", "iq", ".", "payload", "=", "pubsub_xso", ".", "Request", "(", "pubsub_xso", ".", "Unsubscribe", "(", "subscription_jid", ",", "node", "=", "node", ",", "subid", "=", "subid", ")", ")", "yield", "from", "self", ".", "client", ".", "send", "(", "iq", ")" ]
Unsubscribe from a node. :param jid: Address of the PubSub service. :type jid: :class:`aioxmpp.JID` :param node: Name of the PubSub node to unsubscribe from. :type node: :class:`str` :param subscription_jid: The address to subscribe from the service. :type subscription_jid: :class:`aioxmpp.JID` :param subid: Unique ID of the subscription to remove. :type subid: :class:`str` :raises aioxmpp.errors.XMPPError: as returned by the service By default, the unsubscribe request will be for the bare JID of the client. It can be specified explicitly using the `subscription_jid` argument. If available, the `subid` should also be specified. If an error occurs, the corresponding :class:`~.errors.XMPPError` is raised.
[ "Unsubscribe", "from", "a", "node", "." ]
python
train
google/transitfeed
merge.py
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L193-L216
def _GenerateStatsTable(self, feed_merger): """Generate an HTML table of merge statistics. Args: feed_merger: The FeedMerger instance. Returns: The generated HTML as a string. """ rows = [] rows.append('<tr><th class="header"/><th class="header">Merged</th>' '<th class="header">Copied from old feed</th>' '<th class="header">Copied from new feed</th></tr>') for merger in feed_merger.GetMergerList(): stats = merger.GetMergeStats() if stats is None: continue merged, not_merged_a, not_merged_b = stats rows.append('<tr><th class="header">%s</th>' '<td class="header">%d</td>' '<td class="header">%d</td>' '<td class="header">%d</td></tr>' % (merger.DATASET_NAME, merged, not_merged_a, not_merged_b)) return '<table>%s</table>' % '\n'.join(rows)
[ "def", "_GenerateStatsTable", "(", "self", ",", "feed_merger", ")", ":", "rows", "=", "[", "]", "rows", ".", "append", "(", "'<tr><th class=\"header\"/><th class=\"header\">Merged</th>'", "'<th class=\"header\">Copied from old feed</th>'", "'<th class=\"header\">Copied from new feed</th></tr>'", ")", "for", "merger", "in", "feed_merger", ".", "GetMergerList", "(", ")", ":", "stats", "=", "merger", ".", "GetMergeStats", "(", ")", "if", "stats", "is", "None", ":", "continue", "merged", ",", "not_merged_a", ",", "not_merged_b", "=", "stats", "rows", ".", "append", "(", "'<tr><th class=\"header\">%s</th>'", "'<td class=\"header\">%d</td>'", "'<td class=\"header\">%d</td>'", "'<td class=\"header\">%d</td></tr>'", "%", "(", "merger", ".", "DATASET_NAME", ",", "merged", ",", "not_merged_a", ",", "not_merged_b", ")", ")", "return", "'<table>%s</table>'", "%", "'\\n'", ".", "join", "(", "rows", ")" ]
Generate an HTML table of merge statistics. Args: feed_merger: The FeedMerger instance. Returns: The generated HTML as a string.
[ "Generate", "an", "HTML", "table", "of", "merge", "statistics", "." ]
python
train
ska-sa/purr
Purr/LogEntry.py
https://github.com/ska-sa/purr/blob/4c848768d0485d0f88b30850d0d5372221b21b66/Purr/LogEntry.py#L372-L384
def setPrevUpNextLinks(self, prev=None, up=None, next=None): """Sets Prev link to point to the LogEntry object "prev". Set that object's Next link to point to us. Sets the "up" link to the URL 'up' (if up != None.) Sets the Next link to the entry 'next' (if next != None), or to nothing if next == ''.""" if prev is not None: if prev: self._prev_link = quote_url(prev._relIndexLink()) prev._next_link = quote_url(self._relIndexLink()) else: self._prev_link = None if up is not None: self._up_link = up and quote_url(up) if next is not None: self._next_link = next and quote_url(next._relIndexLink())
[ "def", "setPrevUpNextLinks", "(", "self", ",", "prev", "=", "None", ",", "up", "=", "None", ",", "next", "=", "None", ")", ":", "if", "prev", "is", "not", "None", ":", "if", "prev", ":", "self", ".", "_prev_link", "=", "quote_url", "(", "prev", ".", "_relIndexLink", "(", ")", ")", "prev", ".", "_next_link", "=", "quote_url", "(", "self", ".", "_relIndexLink", "(", ")", ")", "else", ":", "self", ".", "_prev_link", "=", "None", "if", "up", "is", "not", "None", ":", "self", ".", "_up_link", "=", "up", "and", "quote_url", "(", "up", ")", "if", "next", "is", "not", "None", ":", "self", ".", "_next_link", "=", "next", "and", "quote_url", "(", "next", ".", "_relIndexLink", "(", ")", ")" ]
Sets Prev link to point to the LogEntry object "prev". Set that object's Next link to point to us. Sets the "up" link to the URL 'up' (if up != None.) Sets the Next link to the entry 'next' (if next != None), or to nothing if next == ''.
[ "Sets", "Prev", "link", "to", "point", "to", "the", "LogEntry", "object", "prev", ".", "Set", "that", "object", "s", "Next", "link", "to", "point", "to", "us", ".", "Sets", "the", "up", "link", "to", "the", "URL", "up", "(", "if", "up", "!", "=", "None", ".", ")", "Sets", "the", "Next", "link", "to", "the", "entry", "next", "(", "if", "next", "!", "=", "None", ")", "or", "to", "nothing", "if", "next", "==", "." ]
python
train
bububa/pyTOP
pyTOP/simba.py
https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/simba.py#L367-L378
def update(self, campaign_id, title, is_smooth, online_status, nick=None): '''xxxxx.xxxxx.campaign.update =================================== 更新一个推广计划,可以设置推广计划名字、是否平滑消耗,只有在设置了日限额后平滑消耗才会产生作用。''' request = TOPRequest('xxxxx.xxxxx.campaign.update') request['campaign_id'] = campaign_id request['title'] = title request['is_smooth'] = is_smooth request['online_status'] = online_status if nick!=None: request['nick'] = nick self.create(self.execute(request), fields=['success','result','success','result_code','result_message'], models={'result':Campaign}) return self.result
[ "def", "update", "(", "self", ",", "campaign_id", ",", "title", ",", "is_smooth", ",", "online_status", ",", "nick", "=", "None", ")", ":", "request", "=", "TOPRequest", "(", "'xxxxx.xxxxx.campaign.update'", ")", "request", "[", "'campaign_id'", "]", "=", "campaign_id", "request", "[", "'title'", "]", "=", "title", "request", "[", "'is_smooth'", "]", "=", "is_smooth", "request", "[", "'online_status'", "]", "=", "online_status", "if", "nick", "!=", "None", ":", "request", "[", "'nick'", "]", "=", "nick", "self", ".", "create", "(", "self", ".", "execute", "(", "request", ")", ",", "fields", "=", "[", "'success'", ",", "'result'", ",", "'success'", ",", "'result_code'", ",", "'result_message'", "]", ",", "models", "=", "{", "'result'", ":", "Campaign", "}", ")", "return", "self", ".", "result" ]
xxxxx.xxxxx.campaign.update =================================== 更新一个推广计划,可以设置推广计划名字、是否平滑消耗,只有在设置了日限额后平滑消耗才会产生作用。
[ "xxxxx", ".", "xxxxx", ".", "campaign", ".", "update", "===================================", "更新一个推广计划,可以设置推广计划名字、是否平滑消耗,只有在设置了日限额后平滑消耗才会产生作用。" ]
python
train
ib-lundgren/flask-oauthprovider
examples/mongo_demoprovider/login.py
https://github.com/ib-lundgren/flask-oauthprovider/blob/6c91e8c11fc3cee410cb755d52d9d2c5331ee324/examples/mongo_demoprovider/login.py#L65-L82
def create_profile(): """If this is the user's first login, the create_or_login function will redirect here so that the user can set up his profile. """ if g.user is not None or 'openid' not in session: return redirect(url_for('index')) if request.method == 'POST': name = request.form['name'] email = request.form['email'] if not name: flash(u'Error: you have to provide a name') elif '@' not in email: flash(u'Error: you have to enter a valid email address') else: flash(u'Profile successfully created') User.get_collection().insert(User(name, email, session['openid'])) return redirect(oid.get_next_url()) return render_template('create_profile.html', next_url=oid.get_next_url())
[ "def", "create_profile", "(", ")", ":", "if", "g", ".", "user", "is", "not", "None", "or", "'openid'", "not", "in", "session", ":", "return", "redirect", "(", "url_for", "(", "'index'", ")", ")", "if", "request", ".", "method", "==", "'POST'", ":", "name", "=", "request", ".", "form", "[", "'name'", "]", "email", "=", "request", ".", "form", "[", "'email'", "]", "if", "not", "name", ":", "flash", "(", "u'Error: you have to provide a name'", ")", "elif", "'@'", "not", "in", "email", ":", "flash", "(", "u'Error: you have to enter a valid email address'", ")", "else", ":", "flash", "(", "u'Profile successfully created'", ")", "User", ".", "get_collection", "(", ")", ".", "insert", "(", "User", "(", "name", ",", "email", ",", "session", "[", "'openid'", "]", ")", ")", "return", "redirect", "(", "oid", ".", "get_next_url", "(", ")", ")", "return", "render_template", "(", "'create_profile.html'", ",", "next_url", "=", "oid", ".", "get_next_url", "(", ")", ")" ]
If this is the user's first login, the create_or_login function will redirect here so that the user can set up his profile.
[ "If", "this", "is", "the", "user", "s", "first", "login", "the", "create_or_login", "function", "will", "redirect", "here", "so", "that", "the", "user", "can", "set", "up", "his", "profile", "." ]
python
train
Yelp/kafka-utils
kafka_utils/kafka_cluster_manager/cluster_info/genetic_balancer.py
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/genetic_balancer.py#L1097-L1104
def pending_assignment(self): """Return the pending partition assignment that this state represents.""" return { self.partitions[pid].name: [ self.brokers[bid].id for bid in self.replicas[pid] ] for pid in set(self.pending_partitions) }
[ "def", "pending_assignment", "(", "self", ")", ":", "return", "{", "self", ".", "partitions", "[", "pid", "]", ".", "name", ":", "[", "self", ".", "brokers", "[", "bid", "]", ".", "id", "for", "bid", "in", "self", ".", "replicas", "[", "pid", "]", "]", "for", "pid", "in", "set", "(", "self", ".", "pending_partitions", ")", "}" ]
Return the pending partition assignment that this state represents.
[ "Return", "the", "pending", "partition", "assignment", "that", "this", "state", "represents", "." ]
python
train
madedotcom/photon-pump
photonpump/messages.py
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/messages.py#L155-L170
def from_bytes(cls, data): """ I am so sorry. """ len_username = int.from_bytes(data[0:2], byteorder="big") offset_username = 2 + len_username username = data[2:offset_username].decode("UTF-8") offset_password = 2 + offset_username len_password = int.from_bytes( data[offset_username:offset_password], byteorder="big" ) pass_begin = offset_password pass_end = offset_password + len_password password = data[pass_begin:pass_end].decode("UTF-8") return cls(username, password)
[ "def", "from_bytes", "(", "cls", ",", "data", ")", ":", "len_username", "=", "int", ".", "from_bytes", "(", "data", "[", "0", ":", "2", "]", ",", "byteorder", "=", "\"big\"", ")", "offset_username", "=", "2", "+", "len_username", "username", "=", "data", "[", "2", ":", "offset_username", "]", ".", "decode", "(", "\"UTF-8\"", ")", "offset_password", "=", "2", "+", "offset_username", "len_password", "=", "int", ".", "from_bytes", "(", "data", "[", "offset_username", ":", "offset_password", "]", ",", "byteorder", "=", "\"big\"", ")", "pass_begin", "=", "offset_password", "pass_end", "=", "offset_password", "+", "len_password", "password", "=", "data", "[", "pass_begin", ":", "pass_end", "]", ".", "decode", "(", "\"UTF-8\"", ")", "return", "cls", "(", "username", ",", "password", ")" ]
I am so sorry.
[ "I", "am", "so", "sorry", "." ]
python
train
orbingol/NURBS-Python
geomdl/_tessellate.py
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/_tessellate.py#L217-L246
def surface_tessellate(v1, v2, v3, v4, vidx, tidx, trim_curves, tessellate_args): """ Triangular tessellation algorithm for surfaces with no trims. This function can be directly used as an input to :func:`.make_triangle_mesh` using ``tessellate_func`` keyword argument. :param v1: vertex 1 :type v1: Vertex :param v2: vertex 2 :type v2: Vertex :param v3: vertex 3 :type v3: Vertex :param v4: vertex 4 :type v4: Vertex :param vidx: vertex numbering start value :type vidx: int :param tidx: triangle numbering start value :type tidx: int :param trim_curves: trim curves :type: list, tuple :param tessellate_args: tessellation arguments :type tessellate_args: dict :return: lists of vertex and triangle objects in (vertex_list, triangle_list) format :type: tuple """ # Triangulate vertices tris = polygon_triangulate(tidx, v1, v2, v3, v4) # Return vertex and triangle lists return [], tris
[ "def", "surface_tessellate", "(", "v1", ",", "v2", ",", "v3", ",", "v4", ",", "vidx", ",", "tidx", ",", "trim_curves", ",", "tessellate_args", ")", ":", "# Triangulate vertices", "tris", "=", "polygon_triangulate", "(", "tidx", ",", "v1", ",", "v2", ",", "v3", ",", "v4", ")", "# Return vertex and triangle lists", "return", "[", "]", ",", "tris" ]
Triangular tessellation algorithm for surfaces with no trims. This function can be directly used as an input to :func:`.make_triangle_mesh` using ``tessellate_func`` keyword argument. :param v1: vertex 1 :type v1: Vertex :param v2: vertex 2 :type v2: Vertex :param v3: vertex 3 :type v3: Vertex :param v4: vertex 4 :type v4: Vertex :param vidx: vertex numbering start value :type vidx: int :param tidx: triangle numbering start value :type tidx: int :param trim_curves: trim curves :type: list, tuple :param tessellate_args: tessellation arguments :type tessellate_args: dict :return: lists of vertex and triangle objects in (vertex_list, triangle_list) format :type: tuple
[ "Triangular", "tessellation", "algorithm", "for", "surfaces", "with", "no", "trims", "." ]
python
train
p3trus/slave
slave/protocol.py
https://github.com/p3trus/slave/blob/bdc74e73bd0f47b74a090c43aa2283c469cde3be/slave/protocol.py#L117-L147
def parse_response(self, response, header=None): """Parses the response message. The following graph shows the structure of response messages. :: +----------+ +--+ data sep +<-+ | +----------+ | | | +--------+ +------------+ | +------+ | +-->| header +------->+ header sep +---+--->+ data +----+----+ | +--------+ +------------+ +------+ | | | --+ +----------+ +--> | +--+ data sep +<-+ | | | +----------+ | | | | | | | | +------+ | | +--------------------------------------+--->+ data +----+----+ +------+ """ response = response.decode(self.encoding) if header: header = "".join((self.resp_prefix, header, self.resp_header_sep)) if not response.startswith(header): raise IEC60488.ParsingError('Response header mismatch') response = response[len(header):] return response.split(self.resp_data_sep)
[ "def", "parse_response", "(", "self", ",", "response", ",", "header", "=", "None", ")", ":", "response", "=", "response", ".", "decode", "(", "self", ".", "encoding", ")", "if", "header", ":", "header", "=", "\"\"", ".", "join", "(", "(", "self", ".", "resp_prefix", ",", "header", ",", "self", ".", "resp_header_sep", ")", ")", "if", "not", "response", ".", "startswith", "(", "header", ")", ":", "raise", "IEC60488", ".", "ParsingError", "(", "'Response header mismatch'", ")", "response", "=", "response", "[", "len", "(", "header", ")", ":", "]", "return", "response", ".", "split", "(", "self", ".", "resp_data_sep", ")" ]
Parses the response message. The following graph shows the structure of response messages. :: +----------+ +--+ data sep +<-+ | +----------+ | | | +--------+ +------------+ | +------+ | +-->| header +------->+ header sep +---+--->+ data +----+----+ | +--------+ +------------+ +------+ | | | --+ +----------+ +--> | +--+ data sep +<-+ | | | +----------+ | | | | | | | | +------+ | | +--------------------------------------+--->+ data +----+----+ +------+
[ "Parses", "the", "response", "message", "." ]
python
train
coinkite/connectrum
connectrum/client.py
https://github.com/coinkite/connectrum/blob/99948f92cc5c3ecb1a8a70146294014e608e50fc/connectrum/client.py#L279-L294
def subscribe(self, method, *params): ''' Perform a remote command which will stream events/data to us. Expects a method name, which look like: server.peers.subscribe .. and sometimes take arguments, all of which are positional. Returns a tuple: (Future, asyncio.Queue). The future will have the result of the initial call, and the queue will receive additional responses as they happen. ''' assert '.' in method assert method.endswith('subscribe') return self._send_request(method, params, is_subscribe=True)
[ "def", "subscribe", "(", "self", ",", "method", ",", "*", "params", ")", ":", "assert", "'.'", "in", "method", "assert", "method", ".", "endswith", "(", "'subscribe'", ")", "return", "self", ".", "_send_request", "(", "method", ",", "params", ",", "is_subscribe", "=", "True", ")" ]
Perform a remote command which will stream events/data to us. Expects a method name, which look like: server.peers.subscribe .. and sometimes take arguments, all of which are positional. Returns a tuple: (Future, asyncio.Queue). The future will have the result of the initial call, and the queue will receive additional responses as they happen.
[ "Perform", "a", "remote", "command", "which", "will", "stream", "events", "/", "data", "to", "us", "." ]
python
train
hydpy-dev/hydpy
hydpy/auxs/xmltools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/auxs/xmltools.py#L1396-L1516
def item(self): """ ToDo >>> from hydpy.core.examples import prepare_full_example_1 >>> prepare_full_example_1() >>> from hydpy import HydPy, TestIO, XMLInterface, pub >>> hp = HydPy('LahnH') >>> pub.timegrids = '1996-01-01', '1996-01-06', '1d' >>> with TestIO(): ... hp.prepare_everything() ... interface = XMLInterface('multiple_runs.xml') >>> var = interface.exchange.itemgroups[0].models[0].subvars[0].vars[0] >>> item = var.item >>> item.value array(2.0) >>> hp.elements.land_dill.model.parameters.control.alpha alpha(1.0) >>> item.update_variables() >>> hp.elements.land_dill.model.parameters.control.alpha alpha(2.0) >>> var = interface.exchange.itemgroups[0].models[2].subvars[0].vars[0] >>> item = var.item >>> item.value array(5.0) >>> hp.elements.stream_dill_lahn_2.model.parameters.control.lag lag(0.0) >>> item.update_variables() >>> hp.elements.stream_dill_lahn_2.model.parameters.control.lag lag(5.0) >>> var = interface.exchange.itemgroups[1].models[0].subvars[0].vars[0] >>> item = var.item >>> item.name 'sm_lahn_2' >>> item.value array(123.0) >>> hp.elements.land_lahn_2.model.sequences.states.sm sm(138.31396, 135.71124, 147.54968, 145.47142, 154.96405, 153.32805, 160.91917, 159.62434, 165.65575, 164.63255) >>> item.update_variables() >>> hp.elements.land_lahn_2.model.sequences.states.sm sm(123.0, 123.0, 123.0, 123.0, 123.0, 123.0, 123.0, 123.0, 123.0, 123.0) >>> var = interface.exchange.itemgroups[1].models[0].subvars[0].vars[1] >>> item = var.item >>> item.name 'sm_lahn_1' >>> item.value array([ 110., 120., 130., 140., 150., 160., 170., 180., 190., 200., 210., 220., 230.]) >>> hp.elements.land_lahn_1.model.sequences.states.sm sm(99.27505, 96.17726, 109.16576, 106.39745, 117.97304, 115.56252, 125.81523, 123.73198, 132.80035, 130.91684, 138.95523, 137.25983, 142.84148) >>> from hydpy import pub >>> with pub.options.warntrim(False): ... item.update_variables() >>> hp.elements.land_lahn_1.model.sequences.states.sm sm(110.0, 120.0, 130.0, 140.0, 150.0, 160.0, 170.0, 180.0, 190.0, 200.0, 206.0, 206.0, 206.0) >>> for element in pub.selections.headwaters.elements: ... element.model.parameters.control.rfcf(1.1) >>> for element in pub.selections.nonheadwaters.elements: ... element.model.parameters.control.rfcf(1.0) >>> for subvars in interface.exchange.itemgroups[2].models[0].subvars: ... for var in subvars.vars: ... var.item.update_variables() >>> for element in hp.elements.catchment: ... print(element, repr(element.model.parameters.control.sfcf)) land_dill sfcf(1.4) land_lahn_1 sfcf(1.4) land_lahn_2 sfcf(1.2) land_lahn_3 sfcf(field=1.1, forest=1.2) >>> var = interface.exchange.itemgroups[3].models[0].subvars[1].vars[0] >>> hp.elements.land_dill.model.sequences.states.sm = 1.0 >>> for name, target in var.item.yield_name2value(): ... print(name, target) # doctest: +ELLIPSIS land_dill_states_sm [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, \ 1.0, 1.0, 1.0] land_lahn_1_states_sm [110.0, 120.0, 130.0, 140.0, 150.0, 160.0, \ 170.0, 180.0, 190.0, 200.0, 206.0, 206.0, 206.0] land_lahn_2_states_sm [123.0, 123.0, 123.0, 123.0, 123.0, 123.0, \ 123.0, 123.0, 123.0, 123.0] land_lahn_3_states_sm [101.3124...] >>> vars_ = interface.exchange.itemgroups[3].models[0].subvars[0].vars >>> qt = hp.elements.land_dill.model.sequences.fluxes.qt >>> qt(1.0) >>> qt.series = 2.0 >>> for var in vars_: ... for name, target in var.item.yield_name2value(): ... print(name, target) # doctest: +ELLIPSIS land_dill_fluxes_qt 1.0 land_dill_fluxes_qt_series [2.0, 2.0, 2.0, 2.0, 2.0] >>> var = interface.exchange.itemgroups[3].nodes[0].vars[0] >>> hp.nodes.dill.sequences.sim.series = range(5) >>> for name, target in var.item.yield_name2value(): ... print(name, target) # doctest: +ELLIPSIS dill_nodes_sim_series [0.0, 1.0, 2.0, 3.0, 4.0] >>> for name, target in var.item.yield_name2value(2, 4): ... print(name, target) # doctest: +ELLIPSIS dill_nodes_sim_series [2.0, 3.0] """ target = f'{self.master.name}.{self.name}' if self.master.name == 'nodes': master = self.master.name itemgroup = self.master.master.name else: master = self.master.master.name itemgroup = self.master.master.master.name itemclass = _ITEMGROUP2ITEMCLASS[itemgroup] if itemgroup == 'getitems': return self._get_getitem(target, master, itemclass) return self._get_changeitem(target, master, itemclass, itemgroup)
[ "def", "item", "(", "self", ")", ":", "target", "=", "f'{self.master.name}.{self.name}'", "if", "self", ".", "master", ".", "name", "==", "'nodes'", ":", "master", "=", "self", ".", "master", ".", "name", "itemgroup", "=", "self", ".", "master", ".", "master", ".", "name", "else", ":", "master", "=", "self", ".", "master", ".", "master", ".", "name", "itemgroup", "=", "self", ".", "master", ".", "master", ".", "master", ".", "name", "itemclass", "=", "_ITEMGROUP2ITEMCLASS", "[", "itemgroup", "]", "if", "itemgroup", "==", "'getitems'", ":", "return", "self", ".", "_get_getitem", "(", "target", ",", "master", ",", "itemclass", ")", "return", "self", ".", "_get_changeitem", "(", "target", ",", "master", ",", "itemclass", ",", "itemgroup", ")" ]
ToDo >>> from hydpy.core.examples import prepare_full_example_1 >>> prepare_full_example_1() >>> from hydpy import HydPy, TestIO, XMLInterface, pub >>> hp = HydPy('LahnH') >>> pub.timegrids = '1996-01-01', '1996-01-06', '1d' >>> with TestIO(): ... hp.prepare_everything() ... interface = XMLInterface('multiple_runs.xml') >>> var = interface.exchange.itemgroups[0].models[0].subvars[0].vars[0] >>> item = var.item >>> item.value array(2.0) >>> hp.elements.land_dill.model.parameters.control.alpha alpha(1.0) >>> item.update_variables() >>> hp.elements.land_dill.model.parameters.control.alpha alpha(2.0) >>> var = interface.exchange.itemgroups[0].models[2].subvars[0].vars[0] >>> item = var.item >>> item.value array(5.0) >>> hp.elements.stream_dill_lahn_2.model.parameters.control.lag lag(0.0) >>> item.update_variables() >>> hp.elements.stream_dill_lahn_2.model.parameters.control.lag lag(5.0) >>> var = interface.exchange.itemgroups[1].models[0].subvars[0].vars[0] >>> item = var.item >>> item.name 'sm_lahn_2' >>> item.value array(123.0) >>> hp.elements.land_lahn_2.model.sequences.states.sm sm(138.31396, 135.71124, 147.54968, 145.47142, 154.96405, 153.32805, 160.91917, 159.62434, 165.65575, 164.63255) >>> item.update_variables() >>> hp.elements.land_lahn_2.model.sequences.states.sm sm(123.0, 123.0, 123.0, 123.0, 123.0, 123.0, 123.0, 123.0, 123.0, 123.0) >>> var = interface.exchange.itemgroups[1].models[0].subvars[0].vars[1] >>> item = var.item >>> item.name 'sm_lahn_1' >>> item.value array([ 110., 120., 130., 140., 150., 160., 170., 180., 190., 200., 210., 220., 230.]) >>> hp.elements.land_lahn_1.model.sequences.states.sm sm(99.27505, 96.17726, 109.16576, 106.39745, 117.97304, 115.56252, 125.81523, 123.73198, 132.80035, 130.91684, 138.95523, 137.25983, 142.84148) >>> from hydpy import pub >>> with pub.options.warntrim(False): ... item.update_variables() >>> hp.elements.land_lahn_1.model.sequences.states.sm sm(110.0, 120.0, 130.0, 140.0, 150.0, 160.0, 170.0, 180.0, 190.0, 200.0, 206.0, 206.0, 206.0) >>> for element in pub.selections.headwaters.elements: ... element.model.parameters.control.rfcf(1.1) >>> for element in pub.selections.nonheadwaters.elements: ... element.model.parameters.control.rfcf(1.0) >>> for subvars in interface.exchange.itemgroups[2].models[0].subvars: ... for var in subvars.vars: ... var.item.update_variables() >>> for element in hp.elements.catchment: ... print(element, repr(element.model.parameters.control.sfcf)) land_dill sfcf(1.4) land_lahn_1 sfcf(1.4) land_lahn_2 sfcf(1.2) land_lahn_3 sfcf(field=1.1, forest=1.2) >>> var = interface.exchange.itemgroups[3].models[0].subvars[1].vars[0] >>> hp.elements.land_dill.model.sequences.states.sm = 1.0 >>> for name, target in var.item.yield_name2value(): ... print(name, target) # doctest: +ELLIPSIS land_dill_states_sm [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, \ 1.0, 1.0, 1.0] land_lahn_1_states_sm [110.0, 120.0, 130.0, 140.0, 150.0, 160.0, \ 170.0, 180.0, 190.0, 200.0, 206.0, 206.0, 206.0] land_lahn_2_states_sm [123.0, 123.0, 123.0, 123.0, 123.0, 123.0, \ 123.0, 123.0, 123.0, 123.0] land_lahn_3_states_sm [101.3124...] >>> vars_ = interface.exchange.itemgroups[3].models[0].subvars[0].vars >>> qt = hp.elements.land_dill.model.sequences.fluxes.qt >>> qt(1.0) >>> qt.series = 2.0 >>> for var in vars_: ... for name, target in var.item.yield_name2value(): ... print(name, target) # doctest: +ELLIPSIS land_dill_fluxes_qt 1.0 land_dill_fluxes_qt_series [2.0, 2.0, 2.0, 2.0, 2.0] >>> var = interface.exchange.itemgroups[3].nodes[0].vars[0] >>> hp.nodes.dill.sequences.sim.series = range(5) >>> for name, target in var.item.yield_name2value(): ... print(name, target) # doctest: +ELLIPSIS dill_nodes_sim_series [0.0, 1.0, 2.0, 3.0, 4.0] >>> for name, target in var.item.yield_name2value(2, 4): ... print(name, target) # doctest: +ELLIPSIS dill_nodes_sim_series [2.0, 3.0]
[ "ToDo" ]
python
train
dhermes/bezier
docs/make_images.py
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/docs/make_images.py#L1130-L1138
def classify_intersection8(s, curve1, surface1, curve2, surface2): """Image for :func:`._surface_helpers.classify_intersection` docstring.""" if NO_IMAGES: return ax = classify_help(s, curve1, surface1, curve2, surface2, None) ax.set_xlim(-1.125, 1.125) ax.set_ylim(-0.125, 1.125) save_image(ax.figure, "classify_intersection8.png")
[ "def", "classify_intersection8", "(", "s", ",", "curve1", ",", "surface1", ",", "curve2", ",", "surface2", ")", ":", "if", "NO_IMAGES", ":", "return", "ax", "=", "classify_help", "(", "s", ",", "curve1", ",", "surface1", ",", "curve2", ",", "surface2", ",", "None", ")", "ax", ".", "set_xlim", "(", "-", "1.125", ",", "1.125", ")", "ax", ".", "set_ylim", "(", "-", "0.125", ",", "1.125", ")", "save_image", "(", "ax", ".", "figure", ",", "\"classify_intersection8.png\"", ")" ]
Image for :func:`._surface_helpers.classify_intersection` docstring.
[ "Image", "for", ":", "func", ":", ".", "_surface_helpers", ".", "classify_intersection", "docstring", "." ]
python
train
saltstack/salt
salt/modules/kubernetesmod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L1563-L1583
def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj
[ "def", "__dict_to_service_spec", "(", "spec", ")", ":", "spec_obj", "=", "kubernetes", ".", "client", ".", "V1ServiceSpec", "(", ")", "for", "key", ",", "value", "in", "iteritems", "(", "spec", ")", ":", "# pylint: disable=too-many-nested-blocks", "if", "key", "==", "'ports'", ":", "spec_obj", ".", "ports", "=", "[", "]", "for", "port", "in", "value", ":", "kube_port", "=", "kubernetes", ".", "client", ".", "V1ServicePort", "(", ")", "if", "isinstance", "(", "port", ",", "dict", ")", ":", "for", "port_key", ",", "port_value", "in", "iteritems", "(", "port", ")", ":", "if", "hasattr", "(", "kube_port", ",", "port_key", ")", ":", "setattr", "(", "kube_port", ",", "port_key", ",", "port_value", ")", "else", ":", "kube_port", ".", "port", "=", "port", "spec_obj", ".", "ports", ".", "append", "(", "kube_port", ")", "elif", "hasattr", "(", "spec_obj", ",", "key", ")", ":", "setattr", "(", "spec_obj", ",", "key", ",", "value", ")", "return", "spec_obj" ]
Converts a dictionary into kubernetes V1ServiceSpec instance.
[ "Converts", "a", "dictionary", "into", "kubernetes", "V1ServiceSpec", "instance", "." ]
python
train
google/tangent
tangent/naming.py
https://github.com/google/tangent/blob/6533e83af09de7345d1b438512679992f080dcc9/tangent/naming.py#L171-L184
def unique(self, name): """Make a variable name unique by appending a number if needed.""" # Make sure the name is valid name = self.valid(name) # Make sure it's not too long name = self.trim(name) # Now make sure it's unique unique_name = name i = 2 while unique_name in self.names: unique_name = name + str(i) i += 1 self.names.add(unique_name) return unique_name
[ "def", "unique", "(", "self", ",", "name", ")", ":", "# Make sure the name is valid", "name", "=", "self", ".", "valid", "(", "name", ")", "# Make sure it's not too long", "name", "=", "self", ".", "trim", "(", "name", ")", "# Now make sure it's unique", "unique_name", "=", "name", "i", "=", "2", "while", "unique_name", "in", "self", ".", "names", ":", "unique_name", "=", "name", "+", "str", "(", "i", ")", "i", "+=", "1", "self", ".", "names", ".", "add", "(", "unique_name", ")", "return", "unique_name" ]
Make a variable name unique by appending a number if needed.
[ "Make", "a", "variable", "name", "unique", "by", "appending", "a", "number", "if", "needed", "." ]
python
train
HPAC/matchpy
matchpy/matching/many_to_one.py
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/many_to_one.py#L369-L392
def _internal_add(self, pattern: Pattern, label, renaming) -> int: """Add a new pattern to the matcher. Equivalent patterns are not added again. However, patterns that are structurally equivalent, but have different constraints or different variable names are distinguished by the matcher. Args: pattern: The pattern to add. Returns: The internal id for the pattern. This is mainly used by the :class:`CommutativeMatcher`. """ pattern_index = len(self.patterns) renamed_constraints = [c.with_renamed_vars(renaming) for c in pattern.local_constraints] constraint_indices = [self._add_constraint(c, pattern_index) for c in renamed_constraints] self.patterns.append((pattern, label, constraint_indices)) self.pattern_vars.append(renaming) pattern = rename_variables(pattern.expression, renaming) state = self.root patterns_stack = [deque([pattern])] self._process_pattern_stack(state, patterns_stack, renamed_constraints, pattern_index) return pattern_index
[ "def", "_internal_add", "(", "self", ",", "pattern", ":", "Pattern", ",", "label", ",", "renaming", ")", "->", "int", ":", "pattern_index", "=", "len", "(", "self", ".", "patterns", ")", "renamed_constraints", "=", "[", "c", ".", "with_renamed_vars", "(", "renaming", ")", "for", "c", "in", "pattern", ".", "local_constraints", "]", "constraint_indices", "=", "[", "self", ".", "_add_constraint", "(", "c", ",", "pattern_index", ")", "for", "c", "in", "renamed_constraints", "]", "self", ".", "patterns", ".", "append", "(", "(", "pattern", ",", "label", ",", "constraint_indices", ")", ")", "self", ".", "pattern_vars", ".", "append", "(", "renaming", ")", "pattern", "=", "rename_variables", "(", "pattern", ".", "expression", ",", "renaming", ")", "state", "=", "self", ".", "root", "patterns_stack", "=", "[", "deque", "(", "[", "pattern", "]", ")", "]", "self", ".", "_process_pattern_stack", "(", "state", ",", "patterns_stack", ",", "renamed_constraints", ",", "pattern_index", ")", "return", "pattern_index" ]
Add a new pattern to the matcher. Equivalent patterns are not added again. However, patterns that are structurally equivalent, but have different constraints or different variable names are distinguished by the matcher. Args: pattern: The pattern to add. Returns: The internal id for the pattern. This is mainly used by the :class:`CommutativeMatcher`.
[ "Add", "a", "new", "pattern", "to", "the", "matcher", "." ]
python
train
Jammy2211/PyAutoLens
autolens/data/array/grids.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/array/grids.py#L1034-L1067
def relocated_grid_from_grid_jit(grid, border_grid): """ Relocate the coordinates of a grid to its border if they are outside the border. This is performed as \ follows: 1) Use the mean value of the grid's y and x coordinates to determine the origin of the grid. 2) Compute the radial distance of every grid coordinate from the origin. 3) For every coordinate, find its nearest pixel in the border. 4) Determine if it is outside the border, by comparing its radial distance from the origin to its paid \ border pixel's radial distance. 5) If its radial distance is larger, use the ratio of radial distances to move the coordinate to the border \ (if its inside the border, do nothing). """ border_origin = np.zeros(2) border_origin[0] = np.mean(border_grid[:, 0]) border_origin[1] = np.mean(border_grid[:, 1]) border_grid_radii = np.sqrt(np.add(np.square(np.subtract(border_grid[:, 0], border_origin[0])), np.square(np.subtract(border_grid[:, 1], border_origin[1])))) border_min_radii = np.min(border_grid_radii) grid_radii = np.sqrt(np.add(np.square(np.subtract(grid[:, 0], border_origin[0])), np.square(np.subtract(grid[:, 1], border_origin[1])))) for pixel_index in range(grid.shape[0]): if grid_radii[pixel_index] > border_min_radii: closest_pixel_index = np.argmin(np.square(grid[pixel_index, 0] - border_grid[:, 0]) + np.square(grid[pixel_index, 1] - border_grid[:, 1])) move_factor = border_grid_radii[closest_pixel_index] / grid_radii[pixel_index] if move_factor < 1.0: grid[pixel_index, :] = move_factor * (grid[pixel_index, :] - border_origin[:]) + border_origin[:] return grid
[ "def", "relocated_grid_from_grid_jit", "(", "grid", ",", "border_grid", ")", ":", "border_origin", "=", "np", ".", "zeros", "(", "2", ")", "border_origin", "[", "0", "]", "=", "np", ".", "mean", "(", "border_grid", "[", ":", ",", "0", "]", ")", "border_origin", "[", "1", "]", "=", "np", ".", "mean", "(", "border_grid", "[", ":", ",", "1", "]", ")", "border_grid_radii", "=", "np", ".", "sqrt", "(", "np", ".", "add", "(", "np", ".", "square", "(", "np", ".", "subtract", "(", "border_grid", "[", ":", ",", "0", "]", ",", "border_origin", "[", "0", "]", ")", ")", ",", "np", ".", "square", "(", "np", ".", "subtract", "(", "border_grid", "[", ":", ",", "1", "]", ",", "border_origin", "[", "1", "]", ")", ")", ")", ")", "border_min_radii", "=", "np", ".", "min", "(", "border_grid_radii", ")", "grid_radii", "=", "np", ".", "sqrt", "(", "np", ".", "add", "(", "np", ".", "square", "(", "np", ".", "subtract", "(", "grid", "[", ":", ",", "0", "]", ",", "border_origin", "[", "0", "]", ")", ")", ",", "np", ".", "square", "(", "np", ".", "subtract", "(", "grid", "[", ":", ",", "1", "]", ",", "border_origin", "[", "1", "]", ")", ")", ")", ")", "for", "pixel_index", "in", "range", "(", "grid", ".", "shape", "[", "0", "]", ")", ":", "if", "grid_radii", "[", "pixel_index", "]", ">", "border_min_radii", ":", "closest_pixel_index", "=", "np", ".", "argmin", "(", "np", ".", "square", "(", "grid", "[", "pixel_index", ",", "0", "]", "-", "border_grid", "[", ":", ",", "0", "]", ")", "+", "np", ".", "square", "(", "grid", "[", "pixel_index", ",", "1", "]", "-", "border_grid", "[", ":", ",", "1", "]", ")", ")", "move_factor", "=", "border_grid_radii", "[", "closest_pixel_index", "]", "/", "grid_radii", "[", "pixel_index", "]", "if", "move_factor", "<", "1.0", ":", "grid", "[", "pixel_index", ",", ":", "]", "=", "move_factor", "*", "(", "grid", "[", "pixel_index", ",", ":", "]", "-", "border_origin", "[", ":", "]", ")", "+", "border_origin", "[", ":", "]", "return", "grid" ]
Relocate the coordinates of a grid to its border if they are outside the border. This is performed as \ follows: 1) Use the mean value of the grid's y and x coordinates to determine the origin of the grid. 2) Compute the radial distance of every grid coordinate from the origin. 3) For every coordinate, find its nearest pixel in the border. 4) Determine if it is outside the border, by comparing its radial distance from the origin to its paid \ border pixel's radial distance. 5) If its radial distance is larger, use the ratio of radial distances to move the coordinate to the border \ (if its inside the border, do nothing).
[ "Relocate", "the", "coordinates", "of", "a", "grid", "to", "its", "border", "if", "they", "are", "outside", "the", "border", ".", "This", "is", "performed", "as", "\\", "follows", ":" ]
python
valid
coops/r53
src/r53/r53.py
https://github.com/coops/r53/blob/3c4e7242ad65b0e1ad4ba6b4ac893c7d501ceb0a/src/r53/r53.py#L39-L71
def fetch_config(zone, conn): """Fetch all pieces of a Route 53 config from Amazon. Args: zone: string, hosted zone id. conn: boto.route53.Route53Connection Returns: list of ElementTrees, one for each piece of config.""" more_to_fetch = True cfg_chunks = [] next_name = None next_type = None next_identifier = None while more_to_fetch == True: more_to_fetch = False getstr = '/%s/hostedzone/%s/rrset' % (R53_API_VERSION, zone) if next_name is not None: getstr += '?name=%s&type=%s' % (next_name, next_type) if next_identifier is not None: getstr += '&identifier=%s' % next_identifier log.debug('requesting %s' % getstr) resp = conn.make_request('GET', getstr) etree = lxml.etree.parse(resp) cfg_chunks.append(etree) root = etree.getroot() truncated = root.find('{%s}IsTruncated' % R53_XMLNS) if truncated is not None and truncated.text == 'true': more_to_fetch = True next_name = root.find('{%s}NextRecordName' % R53_XMLNS).text next_type = root.find('{%s}NextRecordType' % R53_XMLNS).text try: next_identifier = root.find('{%s}NextRecordIdentifier' % R53_XMLNS).text except AttributeError: # may not have next_identifier next_identifier = None return cfg_chunks
[ "def", "fetch_config", "(", "zone", ",", "conn", ")", ":", "more_to_fetch", "=", "True", "cfg_chunks", "=", "[", "]", "next_name", "=", "None", "next_type", "=", "None", "next_identifier", "=", "None", "while", "more_to_fetch", "==", "True", ":", "more_to_fetch", "=", "False", "getstr", "=", "'/%s/hostedzone/%s/rrset'", "%", "(", "R53_API_VERSION", ",", "zone", ")", "if", "next_name", "is", "not", "None", ":", "getstr", "+=", "'?name=%s&type=%s'", "%", "(", "next_name", ",", "next_type", ")", "if", "next_identifier", "is", "not", "None", ":", "getstr", "+=", "'&identifier=%s'", "%", "next_identifier", "log", ".", "debug", "(", "'requesting %s'", "%", "getstr", ")", "resp", "=", "conn", ".", "make_request", "(", "'GET'", ",", "getstr", ")", "etree", "=", "lxml", ".", "etree", ".", "parse", "(", "resp", ")", "cfg_chunks", ".", "append", "(", "etree", ")", "root", "=", "etree", ".", "getroot", "(", ")", "truncated", "=", "root", ".", "find", "(", "'{%s}IsTruncated'", "%", "R53_XMLNS", ")", "if", "truncated", "is", "not", "None", "and", "truncated", ".", "text", "==", "'true'", ":", "more_to_fetch", "=", "True", "next_name", "=", "root", ".", "find", "(", "'{%s}NextRecordName'", "%", "R53_XMLNS", ")", ".", "text", "next_type", "=", "root", ".", "find", "(", "'{%s}NextRecordType'", "%", "R53_XMLNS", ")", ".", "text", "try", ":", "next_identifier", "=", "root", ".", "find", "(", "'{%s}NextRecordIdentifier'", "%", "R53_XMLNS", ")", ".", "text", "except", "AttributeError", ":", "# may not have next_identifier", "next_identifier", "=", "None", "return", "cfg_chunks" ]
Fetch all pieces of a Route 53 config from Amazon. Args: zone: string, hosted zone id. conn: boto.route53.Route53Connection Returns: list of ElementTrees, one for each piece of config.
[ "Fetch", "all", "pieces", "of", "a", "Route", "53", "config", "from", "Amazon", "." ]
python
test
barrust/mediawiki
setup.py
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/setup.py#L14-L18
def read_file(filepath): """ read the file """ with io.open(filepath, "r") as filepointer: res = filepointer.read() return res
[ "def", "read_file", "(", "filepath", ")", ":", "with", "io", ".", "open", "(", "filepath", ",", "\"r\"", ")", "as", "filepointer", ":", "res", "=", "filepointer", ".", "read", "(", ")", "return", "res" ]
read the file
[ "read", "the", "file" ]
python
train
saltstack/salt
salt/cloud/clouds/digitalocean.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/digitalocean.py#L124-L152
def avail_images(call=None): ''' Return a list of the images that are on the provider ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' ) fetch = True page = 1 ret = {} while fetch: items = query(method='images', command='?page=' + six.text_type(page) + '&per_page=200') for image in items['images']: ret[image['name']] = {} for item in six.iterkeys(image): ret[image['name']][item] = image[item] page += 1 try: fetch = 'next' in items['links']['pages'] except KeyError: fetch = False return ret
[ "def", "avail_images", "(", "call", "=", "None", ")", ":", "if", "call", "==", "'action'", ":", "raise", "SaltCloudSystemExit", "(", "'The avail_images function must be called with '", "'-f or --function, or with the --list-images option'", ")", "fetch", "=", "True", "page", "=", "1", "ret", "=", "{", "}", "while", "fetch", ":", "items", "=", "query", "(", "method", "=", "'images'", ",", "command", "=", "'?page='", "+", "six", ".", "text_type", "(", "page", ")", "+", "'&per_page=200'", ")", "for", "image", "in", "items", "[", "'images'", "]", ":", "ret", "[", "image", "[", "'name'", "]", "]", "=", "{", "}", "for", "item", "in", "six", ".", "iterkeys", "(", "image", ")", ":", "ret", "[", "image", "[", "'name'", "]", "]", "[", "item", "]", "=", "image", "[", "item", "]", "page", "+=", "1", "try", ":", "fetch", "=", "'next'", "in", "items", "[", "'links'", "]", "[", "'pages'", "]", "except", "KeyError", ":", "fetch", "=", "False", "return", "ret" ]
Return a list of the images that are on the provider
[ "Return", "a", "list", "of", "the", "images", "that", "are", "on", "the", "provider" ]
python
train
fridex/json2sql
json2sql/json2sql.py
https://github.com/fridex/json2sql/blob/a0851dd79827a684319b03fb899e129f81ff2d3a/json2sql/json2sql.py#L67-L110
def json2sql(raw_json=None, **definition): # pylint: disable=too-many-branches """Convert raw dictionary, JSON/YAML to SQL statement. :param raw_json: raw JSON/YAML or file to convert to SQL statement :type raw_json: str or file :return: raw SQL statement :rtype: str """ if raw_json and definition: raise InputError("Cannot process dict and kwargs input at the same time") definition = load_input(raw_json or definition) if not isinstance(definition, dict): raise UnknownStatementError("Unknown statement parsed: %s (type: %s)" % (definition, type(definition))) try: statement = definition.pop('statement', None) if statement is None: raise NoStatementError("No statement provided") statement = statement.lower() if statement == 'delete': return delete2sql(definition) elif statement == 'insert': return insert2sql(definition) elif statement == 'select': return select2sql(definition) elif statement == 'update': return update2sql(definition) elif statement == 'replace': return replace2sql(definition) else: raise UnknownStatementError("Unknown statement provided '%s' in definition %s" % (statement, definition)) except Exception as exc: # pylint: disable=broad-except if isinstance(exc, Json2SqlError): raise raise Json2SqlInternalError("Internal json2sql error: %s" % str(exc)) from exc # Just to make Pylint happy :) raise Json2SqlInternalError("Unreachable state reached")
[ "def", "json2sql", "(", "raw_json", "=", "None", ",", "*", "*", "definition", ")", ":", "# pylint: disable=too-many-branches", "if", "raw_json", "and", "definition", ":", "raise", "InputError", "(", "\"Cannot process dict and kwargs input at the same time\"", ")", "definition", "=", "load_input", "(", "raw_json", "or", "definition", ")", "if", "not", "isinstance", "(", "definition", ",", "dict", ")", ":", "raise", "UnknownStatementError", "(", "\"Unknown statement parsed: %s (type: %s)\"", "%", "(", "definition", ",", "type", "(", "definition", ")", ")", ")", "try", ":", "statement", "=", "definition", ".", "pop", "(", "'statement'", ",", "None", ")", "if", "statement", "is", "None", ":", "raise", "NoStatementError", "(", "\"No statement provided\"", ")", "statement", "=", "statement", ".", "lower", "(", ")", "if", "statement", "==", "'delete'", ":", "return", "delete2sql", "(", "definition", ")", "elif", "statement", "==", "'insert'", ":", "return", "insert2sql", "(", "definition", ")", "elif", "statement", "==", "'select'", ":", "return", "select2sql", "(", "definition", ")", "elif", "statement", "==", "'update'", ":", "return", "update2sql", "(", "definition", ")", "elif", "statement", "==", "'replace'", ":", "return", "replace2sql", "(", "definition", ")", "else", ":", "raise", "UnknownStatementError", "(", "\"Unknown statement provided '%s' in definition %s\"", "%", "(", "statement", ",", "definition", ")", ")", "except", "Exception", "as", "exc", ":", "# pylint: disable=broad-except", "if", "isinstance", "(", "exc", ",", "Json2SqlError", ")", ":", "raise", "raise", "Json2SqlInternalError", "(", "\"Internal json2sql error: %s\"", "%", "str", "(", "exc", ")", ")", "from", "exc", "# Just to make Pylint happy :)", "raise", "Json2SqlInternalError", "(", "\"Unreachable state reached\"", ")" ]
Convert raw dictionary, JSON/YAML to SQL statement. :param raw_json: raw JSON/YAML or file to convert to SQL statement :type raw_json: str or file :return: raw SQL statement :rtype: str
[ "Convert", "raw", "dictionary", "JSON", "/", "YAML", "to", "SQL", "statement", "." ]
python
train
davidhuser/dhis2.py
dhis2/utils.py
https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/utils.py#L102-L116
def version_to_int(value): """ Convert version info to integer :param value: the version received from system/info, e.g. "2.28" :return: integer from version, e.g. 28, None if it couldn't be parsed """ # remove '-SNAPSHOT' value = value.replace('-SNAPSHOT', '') # remove '-RCx' if '-RC' in value: value = value.split('-RC', 1)[0] try: return int(value.split('.')[1]) except (ValueError, IndexError): return
[ "def", "version_to_int", "(", "value", ")", ":", "# remove '-SNAPSHOT'", "value", "=", "value", ".", "replace", "(", "'-SNAPSHOT'", ",", "''", ")", "# remove '-RCx'", "if", "'-RC'", "in", "value", ":", "value", "=", "value", ".", "split", "(", "'-RC'", ",", "1", ")", "[", "0", "]", "try", ":", "return", "int", "(", "value", ".", "split", "(", "'.'", ")", "[", "1", "]", ")", "except", "(", "ValueError", ",", "IndexError", ")", ":", "return" ]
Convert version info to integer :param value: the version received from system/info, e.g. "2.28" :return: integer from version, e.g. 28, None if it couldn't be parsed
[ "Convert", "version", "info", "to", "integer", ":", "param", "value", ":", "the", "version", "received", "from", "system", "/", "info", "e", ".", "g", ".", "2", ".", "28", ":", "return", ":", "integer", "from", "version", "e", ".", "g", ".", "28", "None", "if", "it", "couldn", "t", "be", "parsed" ]
python
train
saltstack/salt
salt/cloud/clouds/nova.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/nova.py#L1357-L1362
def virtual_interface_create(name, net_name, **kwargs): ''' Create private networks ''' conn = get_conn() return conn.virtual_interface_create(name, net_name)
[ "def", "virtual_interface_create", "(", "name", ",", "net_name", ",", "*", "*", "kwargs", ")", ":", "conn", "=", "get_conn", "(", ")", "return", "conn", ".", "virtual_interface_create", "(", "name", ",", "net_name", ")" ]
Create private networks
[ "Create", "private", "networks" ]
python
train
saltstack/salt
salt/modules/file.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L3128-L3173
def write(path, *args, **kwargs): ''' .. versionadded:: 2014.7.0 Write text to a file, overwriting any existing contents. path path to file `*args` strings to write to the file CLI Example: .. code-block:: bash salt '*' file.write /etc/motd \\ "With all thine offerings thou shalt offer salt." .. admonition:: Attention If you need to pass a string to append and that string contains an equal sign, you **must** include the argument name, args. For example: .. code-block:: bash salt '*' file.write /etc/motd args='cheese=spam' salt '*' file.write /etc/motd args="['cheese=spam','spam=cheese']" ''' path = os.path.expanduser(path) if 'args' in kwargs: if isinstance(kwargs['args'], list): args = kwargs['args'] else: args = [kwargs['args']] contents = [] for line in args: contents.append('{0}\n'.format(line)) with salt.utils.files.fopen(path, "w") as ofile: ofile.write(salt.utils.stringutils.to_str(''.join(contents))) return 'Wrote {0} lines to "{1}"'.format(len(contents), path)
[ "def", "write", "(", "path", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "path", "=", "os", ".", "path", ".", "expanduser", "(", "path", ")", "if", "'args'", "in", "kwargs", ":", "if", "isinstance", "(", "kwargs", "[", "'args'", "]", ",", "list", ")", ":", "args", "=", "kwargs", "[", "'args'", "]", "else", ":", "args", "=", "[", "kwargs", "[", "'args'", "]", "]", "contents", "=", "[", "]", "for", "line", "in", "args", ":", "contents", ".", "append", "(", "'{0}\\n'", ".", "format", "(", "line", ")", ")", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "path", ",", "\"w\"", ")", "as", "ofile", ":", "ofile", ".", "write", "(", "salt", ".", "utils", ".", "stringutils", ".", "to_str", "(", "''", ".", "join", "(", "contents", ")", ")", ")", "return", "'Wrote {0} lines to \"{1}\"'", ".", "format", "(", "len", "(", "contents", ")", ",", "path", ")" ]
.. versionadded:: 2014.7.0 Write text to a file, overwriting any existing contents. path path to file `*args` strings to write to the file CLI Example: .. code-block:: bash salt '*' file.write /etc/motd \\ "With all thine offerings thou shalt offer salt." .. admonition:: Attention If you need to pass a string to append and that string contains an equal sign, you **must** include the argument name, args. For example: .. code-block:: bash salt '*' file.write /etc/motd args='cheese=spam' salt '*' file.write /etc/motd args="['cheese=spam','spam=cheese']"
[ "..", "versionadded", "::", "2014", ".", "7", ".", "0" ]
python
train
xhtml2pdf/xhtml2pdf
xhtml2pdf/context.py
https://github.com/xhtml2pdf/xhtml2pdf/blob/230357a392f48816532d3c2fa082a680b80ece48/xhtml2pdf/context.py#L812-L818
def getFile(self, name, relative=None): """ Returns a file name or None """ if self.pathCallback is not None: return getFile(self._getFileDeprecated(name, relative)) return getFile(name, relative or self.pathDirectory)
[ "def", "getFile", "(", "self", ",", "name", ",", "relative", "=", "None", ")", ":", "if", "self", ".", "pathCallback", "is", "not", "None", ":", "return", "getFile", "(", "self", ".", "_getFileDeprecated", "(", "name", ",", "relative", ")", ")", "return", "getFile", "(", "name", ",", "relative", "or", "self", ".", "pathDirectory", ")" ]
Returns a file name or None
[ "Returns", "a", "file", "name", "or", "None" ]
python
train
vladimarius/pyap
pyap/parser.py
https://github.com/vladimarius/pyap/blob/7896b5293982a30c1443e0c81c1ca32eeb8db15c/pyap/parser.py#L68-L79
def _parse_address(self, address_string): '''Parses address into parts''' match = utils.match(self.rules, address_string, flags=re.VERBOSE | re.U) if match: match_as_dict = match.groupdict() match_as_dict.update({'country_id': self.country}) # combine results cleaned_dict = self._combine_results(match_as_dict) # create object containing results return address.Address(**cleaned_dict) return False
[ "def", "_parse_address", "(", "self", ",", "address_string", ")", ":", "match", "=", "utils", ".", "match", "(", "self", ".", "rules", ",", "address_string", ",", "flags", "=", "re", ".", "VERBOSE", "|", "re", ".", "U", ")", "if", "match", ":", "match_as_dict", "=", "match", ".", "groupdict", "(", ")", "match_as_dict", ".", "update", "(", "{", "'country_id'", ":", "self", ".", "country", "}", ")", "# combine results", "cleaned_dict", "=", "self", ".", "_combine_results", "(", "match_as_dict", ")", "# create object containing results", "return", "address", ".", "Address", "(", "*", "*", "cleaned_dict", ")", "return", "False" ]
Parses address into parts
[ "Parses", "address", "into", "parts" ]
python
train
nickmckay/LiPD-utilities
Matlab/bagit.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Matlab/bagit.py#L263-L319
def save(self, processes=1, manifests=False): """ save will persist any changes that have been made to the bag metadata (self.info). If you have modified the payload of the bag (added, modified, removed files in the data directory) and want to regenerate manifests set the manifests parameter to True. The default is False since you wouldn't want a save to accidentally create a new manifest for a corrupted bag. If you want to control the number of processes that are used when recalculating checksums use the processes parameter. """ # Error checking if not self.path: raise BagError("Bag does not have a path.") # Change working directory to bag directory so helper functions work old_dir = os.path.abspath(os.path.curdir) os.chdir(self.path) # Generate new manifest files if manifests: unbaggable = _can_bag(self.path) if unbaggable: logger.error("no write permissions for the following directories and files: \n%s", unbaggable) raise BagError("Not all files/folders can be moved.") unreadable_dirs, unreadable_files = _can_read(self.path) if unreadable_dirs or unreadable_files: if unreadable_dirs: logger.error("The following directories do not have read permissions: \n%s", unreadable_dirs) if unreadable_files: logger.error("The following files do not have read permissions: \n%s", unreadable_files) raise BagError("Read permissions are required to calculate file fixities.") oxum = None self.algs = list(set(self.algs)) # Dedupe for alg in self.algs: logger.info('updating manifest-%s.txt', alg) oxum = _make_manifest('manifest-%s.txt' % alg, 'data', processes, alg) # Update Payload-Oxum logger.info('updating %s', self.tag_file_name) if oxum: self.info['Payload-Oxum'] = oxum _make_tag_file(self.tag_file_name, self.info) # Update tag-manifest for changes to manifest & bag-info files for alg in self.algs: _make_tagmanifest_file(alg, self.path) # Reload the manifests self._load_manifests() os.chdir(old_dir)
[ "def", "save", "(", "self", ",", "processes", "=", "1", ",", "manifests", "=", "False", ")", ":", "# Error checking", "if", "not", "self", ".", "path", ":", "raise", "BagError", "(", "\"Bag does not have a path.\"", ")", "# Change working directory to bag directory so helper functions work", "old_dir", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "curdir", ")", "os", ".", "chdir", "(", "self", ".", "path", ")", "# Generate new manifest files", "if", "manifests", ":", "unbaggable", "=", "_can_bag", "(", "self", ".", "path", ")", "if", "unbaggable", ":", "logger", ".", "error", "(", "\"no write permissions for the following directories and files: \\n%s\"", ",", "unbaggable", ")", "raise", "BagError", "(", "\"Not all files/folders can be moved.\"", ")", "unreadable_dirs", ",", "unreadable_files", "=", "_can_read", "(", "self", ".", "path", ")", "if", "unreadable_dirs", "or", "unreadable_files", ":", "if", "unreadable_dirs", ":", "logger", ".", "error", "(", "\"The following directories do not have read permissions: \\n%s\"", ",", "unreadable_dirs", ")", "if", "unreadable_files", ":", "logger", ".", "error", "(", "\"The following files do not have read permissions: \\n%s\"", ",", "unreadable_files", ")", "raise", "BagError", "(", "\"Read permissions are required to calculate file fixities.\"", ")", "oxum", "=", "None", "self", ".", "algs", "=", "list", "(", "set", "(", "self", ".", "algs", ")", ")", "# Dedupe", "for", "alg", "in", "self", ".", "algs", ":", "logger", ".", "info", "(", "'updating manifest-%s.txt'", ",", "alg", ")", "oxum", "=", "_make_manifest", "(", "'manifest-%s.txt'", "%", "alg", ",", "'data'", ",", "processes", ",", "alg", ")", "# Update Payload-Oxum", "logger", ".", "info", "(", "'updating %s'", ",", "self", ".", "tag_file_name", ")", "if", "oxum", ":", "self", ".", "info", "[", "'Payload-Oxum'", "]", "=", "oxum", "_make_tag_file", "(", "self", ".", "tag_file_name", ",", "self", ".", "info", ")", "# Update tag-manifest for changes to manifest & bag-info files", "for", "alg", "in", "self", ".", "algs", ":", "_make_tagmanifest_file", "(", "alg", ",", "self", ".", "path", ")", "# Reload the manifests", "self", ".", "_load_manifests", "(", ")", "os", ".", "chdir", "(", "old_dir", ")" ]
save will persist any changes that have been made to the bag metadata (self.info). If you have modified the payload of the bag (added, modified, removed files in the data directory) and want to regenerate manifests set the manifests parameter to True. The default is False since you wouldn't want a save to accidentally create a new manifest for a corrupted bag. If you want to control the number of processes that are used when recalculating checksums use the processes parameter.
[ "save", "will", "persist", "any", "changes", "that", "have", "been", "made", "to", "the", "bag", "metadata", "(", "self", ".", "info", ")", "." ]
python
train
saltstack/salt
salt/states/iptables.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/iptables.py#L617-L732
def delete(name, table='filter', family='ipv4', **kwargs): ''' .. versionadded:: 2014.1.0 Delete a rule to a chain name A user-defined name to call this rule by in another part of a state or formula. This should not be an actual rule. table The table that owns the chain that should be modified family Networking family, either ipv4 or ipv6 All other arguments are passed in with the same name as the long option that would normally be used for iptables, with one exception: ``--state`` is specified as `connstate` instead of `state` (not to be confused with `ctstate`). Jump options that doesn't take arguments should be passed in with an empty string. ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'rules' in kwargs: ret['changes']['locale'] = [] comments = [] save = False for rule in kwargs['rules']: if 'rules' in rule: del rule['rules'] if '__agg__' in rule: del rule['__agg__'] if 'save' in rule and rule['save']: if rule['save'] is not True: save_file = rule['save'] else: save_file = True rule['save'] = False _ret = delete(**rule) if 'locale' in _ret['changes']: ret['changes']['locale'].append(_ret['changes']['locale']) comments.append(_ret['comment']) ret['result'] = _ret['result'] if save: if save_file is True: save_file = None __salt__['iptables.save'](save_file, family=family) if not ret['changes']['locale']: del ret['changes']['locale'] ret['comment'] = '\n'.join(comments) return ret for ignore in _STATE_INTERNAL_KEYWORDS: if ignore in kwargs: del kwargs[ignore] kwargs['name'] = name kwargs['table'] = table rule = __salt__['iptables.build_rule'](family=family, **kwargs) command = __salt__['iptables.build_rule'](full=True, family=family, command='D', **kwargs) if not __salt__['iptables.check'](table, kwargs['chain'], rule, family) is True: if 'position' not in kwargs: ret['result'] = True ret['comment'] = 'iptables rule for {0} already absent for {1} ({2})'.format( name, family, command.strip()) return ret if __opts__['test']: ret['comment'] = 'iptables rule for {0} needs to be deleted for {1} ({2})'.format( name, family, command.strip()) return ret if 'position' in kwargs: result = __salt__['iptables.delete']( table, kwargs['chain'], family=family, position=kwargs['position']) else: result = __salt__['iptables.delete']( table, kwargs['chain'], family=family, rule=rule) if not result: ret['changes'] = {'locale': name} ret['result'] = True ret['comment'] = 'Delete iptables rule for {0} {1}'.format( name, command.strip()) if 'save' in kwargs: if kwargs['save']: out = __salt__['iptables.save'](filename=None, family=family) ret['comment'] = ('Deleted and saved iptables rule {0} for {1}\n' '{2}\n{3}').format(name, family, command.strip(), out) return ret else: ret['result'] = False ret['comment'] = ('Failed to delete iptables rule for {0}.\n' 'Attempted rule was {1}').format( name, command.strip()) return ret
[ "def", "delete", "(", "name", ",", "table", "=", "'filter'", ",", "family", "=", "'ipv4'", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "None", ",", "'comment'", ":", "''", "}", "if", "'rules'", "in", "kwargs", ":", "ret", "[", "'changes'", "]", "[", "'locale'", "]", "=", "[", "]", "comments", "=", "[", "]", "save", "=", "False", "for", "rule", "in", "kwargs", "[", "'rules'", "]", ":", "if", "'rules'", "in", "rule", ":", "del", "rule", "[", "'rules'", "]", "if", "'__agg__'", "in", "rule", ":", "del", "rule", "[", "'__agg__'", "]", "if", "'save'", "in", "rule", "and", "rule", "[", "'save'", "]", ":", "if", "rule", "[", "'save'", "]", "is", "not", "True", ":", "save_file", "=", "rule", "[", "'save'", "]", "else", ":", "save_file", "=", "True", "rule", "[", "'save'", "]", "=", "False", "_ret", "=", "delete", "(", "*", "*", "rule", ")", "if", "'locale'", "in", "_ret", "[", "'changes'", "]", ":", "ret", "[", "'changes'", "]", "[", "'locale'", "]", ".", "append", "(", "_ret", "[", "'changes'", "]", "[", "'locale'", "]", ")", "comments", ".", "append", "(", "_ret", "[", "'comment'", "]", ")", "ret", "[", "'result'", "]", "=", "_ret", "[", "'result'", "]", "if", "save", ":", "if", "save_file", "is", "True", ":", "save_file", "=", "None", "__salt__", "[", "'iptables.save'", "]", "(", "save_file", ",", "family", "=", "family", ")", "if", "not", "ret", "[", "'changes'", "]", "[", "'locale'", "]", ":", "del", "ret", "[", "'changes'", "]", "[", "'locale'", "]", "ret", "[", "'comment'", "]", "=", "'\\n'", ".", "join", "(", "comments", ")", "return", "ret", "for", "ignore", "in", "_STATE_INTERNAL_KEYWORDS", ":", "if", "ignore", "in", "kwargs", ":", "del", "kwargs", "[", "ignore", "]", "kwargs", "[", "'name'", "]", "=", "name", "kwargs", "[", "'table'", "]", "=", "table", "rule", "=", "__salt__", "[", "'iptables.build_rule'", "]", "(", "family", "=", "family", ",", "*", "*", "kwargs", ")", "command", "=", "__salt__", "[", "'iptables.build_rule'", "]", "(", "full", "=", "True", ",", "family", "=", "family", ",", "command", "=", "'D'", ",", "*", "*", "kwargs", ")", "if", "not", "__salt__", "[", "'iptables.check'", "]", "(", "table", ",", "kwargs", "[", "'chain'", "]", ",", "rule", ",", "family", ")", "is", "True", ":", "if", "'position'", "not", "in", "kwargs", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'iptables rule for {0} already absent for {1} ({2})'", ".", "format", "(", "name", ",", "family", ",", "command", ".", "strip", "(", ")", ")", "return", "ret", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'iptables rule for {0} needs to be deleted for {1} ({2})'", ".", "format", "(", "name", ",", "family", ",", "command", ".", "strip", "(", ")", ")", "return", "ret", "if", "'position'", "in", "kwargs", ":", "result", "=", "__salt__", "[", "'iptables.delete'", "]", "(", "table", ",", "kwargs", "[", "'chain'", "]", ",", "family", "=", "family", ",", "position", "=", "kwargs", "[", "'position'", "]", ")", "else", ":", "result", "=", "__salt__", "[", "'iptables.delete'", "]", "(", "table", ",", "kwargs", "[", "'chain'", "]", ",", "family", "=", "family", ",", "rule", "=", "rule", ")", "if", "not", "result", ":", "ret", "[", "'changes'", "]", "=", "{", "'locale'", ":", "name", "}", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'Delete iptables rule for {0} {1}'", ".", "format", "(", "name", ",", "command", ".", "strip", "(", ")", ")", "if", "'save'", "in", "kwargs", ":", "if", "kwargs", "[", "'save'", "]", ":", "out", "=", "__salt__", "[", "'iptables.save'", "]", "(", "filename", "=", "None", ",", "family", "=", "family", ")", "ret", "[", "'comment'", "]", "=", "(", "'Deleted and saved iptables rule {0} for {1}\\n'", "'{2}\\n{3}'", ")", ".", "format", "(", "name", ",", "family", ",", "command", ".", "strip", "(", ")", ",", "out", ")", "return", "ret", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "(", "'Failed to delete iptables rule for {0}.\\n'", "'Attempted rule was {1}'", ")", ".", "format", "(", "name", ",", "command", ".", "strip", "(", ")", ")", "return", "ret" ]
.. versionadded:: 2014.1.0 Delete a rule to a chain name A user-defined name to call this rule by in another part of a state or formula. This should not be an actual rule. table The table that owns the chain that should be modified family Networking family, either ipv4 or ipv6 All other arguments are passed in with the same name as the long option that would normally be used for iptables, with one exception: ``--state`` is specified as `connstate` instead of `state` (not to be confused with `ctstate`). Jump options that doesn't take arguments should be passed in with an empty string.
[ "..", "versionadded", "::", "2014", ".", "1", ".", "0" ]
python
train
scopus-api/scopus
scopus/deprecated_/scopus_author.py
https://github.com/scopus-api/scopus/blob/27ce02dd3095bfdab9d3e8475543d7c17767d1ab/scopus/deprecated_/scopus_author.py#L256-L260
def get_document_eids(self, *args, **kwds): """Return list of EIDs for the author using ScopusSearch.""" search = ScopusSearch('au-id({})'.format(self.author_id), *args, **kwds) return search.get_eids()
[ "def", "get_document_eids", "(", "self", ",", "*", "args", ",", "*", "*", "kwds", ")", ":", "search", "=", "ScopusSearch", "(", "'au-id({})'", ".", "format", "(", "self", ".", "author_id", ")", ",", "*", "args", ",", "*", "*", "kwds", ")", "return", "search", ".", "get_eids", "(", ")" ]
Return list of EIDs for the author using ScopusSearch.
[ "Return", "list", "of", "EIDs", "for", "the", "author", "using", "ScopusSearch", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xviewwidget/xview.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xviewwidget/xview.py#L984-L1001
def registerView(viewType, location='Central'): """ Registers the inputed view type to the given location. The location \ is just a way to group and organize potential view plugins for a \ particular widget, and is determined per application. This eases \ use when building a plugin based system. It has no relevance to the \ XView class itself where you register a view. :param viewType | <subclass of XView> """ # update the dispatch signals sigs = getattr(viewType, '__xview_signals__', []) XView.dispatch(location).registerSignals(sigs) location = nativestring(location) XView._registry.setdefault(location, {}) XView._registry[location][viewType.viewName()] = viewType XView.dispatch(location).emit('registeredView(QVariant)', viewType)
[ "def", "registerView", "(", "viewType", ",", "location", "=", "'Central'", ")", ":", "# update the dispatch signals", "sigs", "=", "getattr", "(", "viewType", ",", "'__xview_signals__'", ",", "[", "]", ")", "XView", ".", "dispatch", "(", "location", ")", ".", "registerSignals", "(", "sigs", ")", "location", "=", "nativestring", "(", "location", ")", "XView", ".", "_registry", ".", "setdefault", "(", "location", ",", "{", "}", ")", "XView", ".", "_registry", "[", "location", "]", "[", "viewType", ".", "viewName", "(", ")", "]", "=", "viewType", "XView", ".", "dispatch", "(", "location", ")", ".", "emit", "(", "'registeredView(QVariant)'", ",", "viewType", ")" ]
Registers the inputed view type to the given location. The location \ is just a way to group and organize potential view plugins for a \ particular widget, and is determined per application. This eases \ use when building a plugin based system. It has no relevance to the \ XView class itself where you register a view. :param viewType | <subclass of XView>
[ "Registers", "the", "inputed", "view", "type", "to", "the", "given", "location", ".", "The", "location", "\\", "is", "just", "a", "way", "to", "group", "and", "organize", "potential", "view", "plugins", "for", "a", "\\", "particular", "widget", "and", "is", "determined", "per", "application", ".", "This", "eases", "\\", "use", "when", "building", "a", "plugin", "based", "system", ".", "It", "has", "no", "relevance", "to", "the", "\\", "XView", "class", "itself", "where", "you", "register", "a", "view", ".", ":", "param", "viewType", "|", "<subclass", "of", "XView", ">" ]
python
train
PGower/PyCanvas
pycanvas/apis/grade_change_log.py
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/grade_change_log.py#L73-L98
def query_by_student(self, student_id, end_time=None, start_time=None): """ Query by student. List grade change events for a given student. """ path = {} data = {} params = {} # REQUIRED - PATH - student_id """ID""" path["student_id"] = student_id # OPTIONAL - start_time """The beginning of the time range from which you want events.""" if start_time is not None: params["start_time"] = start_time # OPTIONAL - end_time """The end of the time range from which you want events.""" if end_time is not None: params["end_time"] = end_time self.logger.debug("GET /api/v1/audit/grade_change/students/{student_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/audit/grade_change/students/{student_id}".format(**path), data=data, params=params, all_pages=True)
[ "def", "query_by_student", "(", "self", ",", "student_id", ",", "end_time", "=", "None", ",", "start_time", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - student_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"student_id\"", "]", "=", "student_id", "# OPTIONAL - start_time\r", "\"\"\"The beginning of the time range from which you want events.\"\"\"", "if", "start_time", "is", "not", "None", ":", "params", "[", "\"start_time\"", "]", "=", "start_time", "# OPTIONAL - end_time\r", "\"\"\"The end of the time range from which you want events.\"\"\"", "if", "end_time", "is", "not", "None", ":", "params", "[", "\"end_time\"", "]", "=", "end_time", "self", ".", "logger", ".", "debug", "(", "\"GET /api/v1/audit/grade_change/students/{student_id} with query params: {params} and form data: {data}\"", ".", "format", "(", "params", "=", "params", ",", "data", "=", "data", ",", "*", "*", "path", ")", ")", "return", "self", ".", "generic_request", "(", "\"GET\"", ",", "\"/api/v1/audit/grade_change/students/{student_id}\"", ".", "format", "(", "*", "*", "path", ")", ",", "data", "=", "data", ",", "params", "=", "params", ",", "all_pages", "=", "True", ")" ]
Query by student. List grade change events for a given student.
[ "Query", "by", "student", ".", "List", "grade", "change", "events", "for", "a", "given", "student", "." ]
python
train
bunq/sdk_python
bunq/sdk/model/generated/endpoint.py
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/model/generated/endpoint.py#L14938-L14982
def is_all_field_none(self): """ :rtype: bool """ if self._id_ is not None: return False if self._created is not None: return False if self._updated is not None: return False if self._counter_alias is not None: return False if self._user_alias_cancelled is not None: return False if self._monetary_account_id is not None: return False if self._draft_share_invite_bank_id is not None: return False if self._share_detail is not None: return False if self._status is not None: return False if self._share_type is not None: return False if self._start_date is not None: return False if self._end_date is not None: return False if self._description is not None: return False return True
[ "def", "is_all_field_none", "(", "self", ")", ":", "if", "self", ".", "_id_", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_created", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_updated", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_counter_alias", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_user_alias_cancelled", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_monetary_account_id", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_draft_share_invite_bank_id", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_share_detail", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_status", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_share_type", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_start_date", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_end_date", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_description", "is", "not", "None", ":", "return", "False", "return", "True" ]
:rtype: bool
[ ":", "rtype", ":", "bool" ]
python
train
dnanexus/dx-toolkit
src/python/dxpy/api.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/api.py#L275-L281
def applet_list_projects(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /applet-xxxx/listProjects API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Cloning#API-method%3A-%2Fclass-xxxx%2FlistProjects """ return DXHTTPRequest('/%s/listProjects' % object_id, input_params, always_retry=always_retry, **kwargs)
[ "def", "applet_list_projects", "(", "object_id", ",", "input_params", "=", "{", "}", ",", "always_retry", "=", "True", ",", "*", "*", "kwargs", ")", ":", "return", "DXHTTPRequest", "(", "'/%s/listProjects'", "%", "object_id", ",", "input_params", ",", "always_retry", "=", "always_retry", ",", "*", "*", "kwargs", ")" ]
Invokes the /applet-xxxx/listProjects API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Cloning#API-method%3A-%2Fclass-xxxx%2FlistProjects
[ "Invokes", "the", "/", "applet", "-", "xxxx", "/", "listProjects", "API", "method", "." ]
python
train
ValvePython/steam
steam/core/crypto.py
https://github.com/ValvePython/steam/blob/2de1364c47598410b572114e6129eab8fff71d5b/steam/core/crypto.py#L34-L45
def generate_session_key(hmac_secret=b''): """ :param hmac_secret: optional HMAC :type hmac_secret: :class:`bytes` :return: (session_key, encrypted_session_key) tuple :rtype: :class:`tuple` """ session_key = random_bytes(32) encrypted_session_key = PKCS1_OAEP.new(UniverseKey.Public, SHA1)\ .encrypt(session_key + hmac_secret) return (session_key, encrypted_session_key)
[ "def", "generate_session_key", "(", "hmac_secret", "=", "b''", ")", ":", "session_key", "=", "random_bytes", "(", "32", ")", "encrypted_session_key", "=", "PKCS1_OAEP", ".", "new", "(", "UniverseKey", ".", "Public", ",", "SHA1", ")", ".", "encrypt", "(", "session_key", "+", "hmac_secret", ")", "return", "(", "session_key", ",", "encrypted_session_key", ")" ]
:param hmac_secret: optional HMAC :type hmac_secret: :class:`bytes` :return: (session_key, encrypted_session_key) tuple :rtype: :class:`tuple`
[ ":", "param", "hmac_secret", ":", "optional", "HMAC", ":", "type", "hmac_secret", ":", ":", "class", ":", "bytes", ":", "return", ":", "(", "session_key", "encrypted_session_key", ")", "tuple", ":", "rtype", ":", ":", "class", ":", "tuple" ]
python
train
econ-ark/HARK
HARK/interpolation.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/interpolation.py#L3371-L3410
def calcLogSumChoiceProbs(Vals, sigma): ''' Returns the final optimal value and choice probabilities given the choice specific value functions `Vals`. Probabilities are degenerate if sigma == 0.0. Parameters ---------- Vals : [numpy.array] A numpy.array that holds choice specific values at common grid points. sigma : float A number that controls the variance of the taste shocks Returns ------- V : [numpy.array] A numpy.array that holds the integrated value function. P : [numpy.array] A numpy.array that holds the discrete choice probabilities ''' # Assumes that NaNs have been replaced by -numpy.inf or similar if sigma == 0.0: # We could construct a linear index here and use unravel_index. Pflat = np.argmax(Vals, axis=0) V = np.zeros(Vals[0].shape) Probs = np.zeros(Vals.shape) for i in range(Vals.shape[0]): optimalIndices = Pflat == i V[optimalIndices] = Vals[i][optimalIndices] Probs[i][optimalIndices] = 1 return V, Probs # else we have a taste shock maxV = np.max(Vals, axis=0) # calculate maxV+sigma*log(sum_i=1^J exp((V[i]-maxV))/sigma) sumexp = np.sum(np.exp((Vals-maxV)/sigma), axis=0) LogSumV = np.log(sumexp) LogSumV = maxV + sigma*LogSumV Probs = np.exp((Vals-LogSumV)/sigma) return LogSumV, Probs
[ "def", "calcLogSumChoiceProbs", "(", "Vals", ",", "sigma", ")", ":", "# Assumes that NaNs have been replaced by -numpy.inf or similar", "if", "sigma", "==", "0.0", ":", "# We could construct a linear index here and use unravel_index.", "Pflat", "=", "np", ".", "argmax", "(", "Vals", ",", "axis", "=", "0", ")", "V", "=", "np", ".", "zeros", "(", "Vals", "[", "0", "]", ".", "shape", ")", "Probs", "=", "np", ".", "zeros", "(", "Vals", ".", "shape", ")", "for", "i", "in", "range", "(", "Vals", ".", "shape", "[", "0", "]", ")", ":", "optimalIndices", "=", "Pflat", "==", "i", "V", "[", "optimalIndices", "]", "=", "Vals", "[", "i", "]", "[", "optimalIndices", "]", "Probs", "[", "i", "]", "[", "optimalIndices", "]", "=", "1", "return", "V", ",", "Probs", "# else we have a taste shock", "maxV", "=", "np", ".", "max", "(", "Vals", ",", "axis", "=", "0", ")", "# calculate maxV+sigma*log(sum_i=1^J exp((V[i]-maxV))/sigma)", "sumexp", "=", "np", ".", "sum", "(", "np", ".", "exp", "(", "(", "Vals", "-", "maxV", ")", "/", "sigma", ")", ",", "axis", "=", "0", ")", "LogSumV", "=", "np", ".", "log", "(", "sumexp", ")", "LogSumV", "=", "maxV", "+", "sigma", "*", "LogSumV", "Probs", "=", "np", ".", "exp", "(", "(", "Vals", "-", "LogSumV", ")", "/", "sigma", ")", "return", "LogSumV", ",", "Probs" ]
Returns the final optimal value and choice probabilities given the choice specific value functions `Vals`. Probabilities are degenerate if sigma == 0.0. Parameters ---------- Vals : [numpy.array] A numpy.array that holds choice specific values at common grid points. sigma : float A number that controls the variance of the taste shocks Returns ------- V : [numpy.array] A numpy.array that holds the integrated value function. P : [numpy.array] A numpy.array that holds the discrete choice probabilities
[ "Returns", "the", "final", "optimal", "value", "and", "choice", "probabilities", "given", "the", "choice", "specific", "value", "functions", "Vals", ".", "Probabilities", "are", "degenerate", "if", "sigma", "==", "0", ".", "0", ".", "Parameters", "----------", "Vals", ":", "[", "numpy", ".", "array", "]", "A", "numpy", ".", "array", "that", "holds", "choice", "specific", "values", "at", "common", "grid", "points", ".", "sigma", ":", "float", "A", "number", "that", "controls", "the", "variance", "of", "the", "taste", "shocks", "Returns", "-------", "V", ":", "[", "numpy", ".", "array", "]", "A", "numpy", ".", "array", "that", "holds", "the", "integrated", "value", "function", ".", "P", ":", "[", "numpy", ".", "array", "]", "A", "numpy", ".", "array", "that", "holds", "the", "discrete", "choice", "probabilities" ]
python
train
sgaynetdinov/py-vkontakte
vk/auth.py
https://github.com/sgaynetdinov/py-vkontakte/blob/c09654f89008b5847418bb66f1f9c408cd7aa128/vk/auth.py#L35-L76
def get_url_authcode_flow_user(client_id, redirect_uri, display="page", scope=None, state=None): """Authorization Code Flow for User Access Token Use Authorization Code Flow to run VK API methods from the server side of an application. Access token received this way is not bound to an ip address but set of permissions that can be granted is limited for security reasons. Args: client_id (int): Application id. redirect_uri (str): Address to redirect user after authorization. display (str): Sets authorization page appearance. Sets: {`page`, `popup`, `mobile`} Defaults to `page` scope (:obj:`str`, optional): Permissions bit mask, to check on authorization and request if necessary. More scope: https://vk.com/dev/permissions state (:obj:`str`, optional): An arbitrary string that will be returned together with authorization result. Returns: str: Url Examples: >>> vk.get_url_authcode_flow_user(1, 'http://example.com/', scope="wall,email") 'https://oauth.vk.com/authorize?client_id=1&display=page&redirect_uri=http://example.com/&scope=wall,email&response_type=code .. _Docs: https://vk.com/dev/authcode_flow_user """ url = "https://oauth.vk.com/authorize" params = { "client_id": client_id, "redirect_uri": redirect_uri, "display": display, "response_type": "code" } if scope: params['scope'] = scope if state: params['state'] = state return u"{url}?{params}".format(url=url, params=urlencode(params))
[ "def", "get_url_authcode_flow_user", "(", "client_id", ",", "redirect_uri", ",", "display", "=", "\"page\"", ",", "scope", "=", "None", ",", "state", "=", "None", ")", ":", "url", "=", "\"https://oauth.vk.com/authorize\"", "params", "=", "{", "\"client_id\"", ":", "client_id", ",", "\"redirect_uri\"", ":", "redirect_uri", ",", "\"display\"", ":", "display", ",", "\"response_type\"", ":", "\"code\"", "}", "if", "scope", ":", "params", "[", "'scope'", "]", "=", "scope", "if", "state", ":", "params", "[", "'state'", "]", "=", "state", "return", "u\"{url}?{params}\"", ".", "format", "(", "url", "=", "url", ",", "params", "=", "urlencode", "(", "params", ")", ")" ]
Authorization Code Flow for User Access Token Use Authorization Code Flow to run VK API methods from the server side of an application. Access token received this way is not bound to an ip address but set of permissions that can be granted is limited for security reasons. Args: client_id (int): Application id. redirect_uri (str): Address to redirect user after authorization. display (str): Sets authorization page appearance. Sets: {`page`, `popup`, `mobile`} Defaults to `page` scope (:obj:`str`, optional): Permissions bit mask, to check on authorization and request if necessary. More scope: https://vk.com/dev/permissions state (:obj:`str`, optional): An arbitrary string that will be returned together with authorization result. Returns: str: Url Examples: >>> vk.get_url_authcode_flow_user(1, 'http://example.com/', scope="wall,email") 'https://oauth.vk.com/authorize?client_id=1&display=page&redirect_uri=http://example.com/&scope=wall,email&response_type=code .. _Docs: https://vk.com/dev/authcode_flow_user
[ "Authorization", "Code", "Flow", "for", "User", "Access", "Token" ]
python
train
cyface/django-termsandconditions
termsandconditions/pipeline.py
https://github.com/cyface/django-termsandconditions/blob/e18f06d0bad1e047f99222d1153f6e2b3bd5224f/termsandconditions/pipeline.py#L28-L36
def redirect_to_terms_accept(current_path='/', slug='default'): """Redirect the user to the terms and conditions accept page.""" redirect_url_parts = list(urlparse(ACCEPT_TERMS_PATH)) if slug != 'default': redirect_url_parts[2] += slug querystring = QueryDict(redirect_url_parts[4], mutable=True) querystring[TERMS_RETURNTO_PARAM] = current_path redirect_url_parts[4] = querystring.urlencode(safe='/') return HttpResponseRedirect(urlunparse(redirect_url_parts))
[ "def", "redirect_to_terms_accept", "(", "current_path", "=", "'/'", ",", "slug", "=", "'default'", ")", ":", "redirect_url_parts", "=", "list", "(", "urlparse", "(", "ACCEPT_TERMS_PATH", ")", ")", "if", "slug", "!=", "'default'", ":", "redirect_url_parts", "[", "2", "]", "+=", "slug", "querystring", "=", "QueryDict", "(", "redirect_url_parts", "[", "4", "]", ",", "mutable", "=", "True", ")", "querystring", "[", "TERMS_RETURNTO_PARAM", "]", "=", "current_path", "redirect_url_parts", "[", "4", "]", "=", "querystring", ".", "urlencode", "(", "safe", "=", "'/'", ")", "return", "HttpResponseRedirect", "(", "urlunparse", "(", "redirect_url_parts", ")", ")" ]
Redirect the user to the terms and conditions accept page.
[ "Redirect", "the", "user", "to", "the", "terms", "and", "conditions", "accept", "page", "." ]
python
train
biosignalsnotebooks/biosignalsnotebooks
biosignalsnotebooks/build/lib/biosignalsnotebooks/external_packages/novainstrumentation/tools.py
https://github.com/biosignalsnotebooks/biosignalsnotebooks/blob/aaa01d4125180b3a34f1e26e0d3ff08c23f666d3/biosignalsnotebooks/build/lib/biosignalsnotebooks/external_packages/novainstrumentation/tools.py#L95-L129
def load_with_cache(file_, recache=False, sampling=1, columns=None, temp_dir='.', data_type='int16'): """@brief This function loads a file from the current directory and saves the cached file to later executions. It's also possible to make a recache or a subsampling of the signal and choose only a few columns of the signal, to accelerate the opening process. @param file String: the name of the file to open. @param recache Boolean: indication whether it's done recache or not (default = false). @param sampling Integer: the sampling step. if 1, the signal isn't sampled (default = 1). @param columns Array-Like: the columns to read from the file. if None, all columns are considered (default = None). @return data Array-Like: the data from the file. TODO: Should save cache in a different directory TODO: Create test function and check size of generated files TODO: receive a file handle """ cfile = '%s.npy' % file_ if (not path.exists(cfile)) or recache: if columns == None: data = np.loadtxt(file_)[::sampling, :] else: data = np.loadtxt(file_)[::sampling, columns] np.save(cfile, data.astype(data_type)) else: data = np.load(cfile) return data
[ "def", "load_with_cache", "(", "file_", ",", "recache", "=", "False", ",", "sampling", "=", "1", ",", "columns", "=", "None", ",", "temp_dir", "=", "'.'", ",", "data_type", "=", "'int16'", ")", ":", "cfile", "=", "'%s.npy'", "%", "file_", "if", "(", "not", "path", ".", "exists", "(", "cfile", ")", ")", "or", "recache", ":", "if", "columns", "==", "None", ":", "data", "=", "np", ".", "loadtxt", "(", "file_", ")", "[", ":", ":", "sampling", ",", ":", "]", "else", ":", "data", "=", "np", ".", "loadtxt", "(", "file_", ")", "[", ":", ":", "sampling", ",", "columns", "]", "np", ".", "save", "(", "cfile", ",", "data", ".", "astype", "(", "data_type", ")", ")", "else", ":", "data", "=", "np", ".", "load", "(", "cfile", ")", "return", "data" ]
@brief This function loads a file from the current directory and saves the cached file to later executions. It's also possible to make a recache or a subsampling of the signal and choose only a few columns of the signal, to accelerate the opening process. @param file String: the name of the file to open. @param recache Boolean: indication whether it's done recache or not (default = false). @param sampling Integer: the sampling step. if 1, the signal isn't sampled (default = 1). @param columns Array-Like: the columns to read from the file. if None, all columns are considered (default = None). @return data Array-Like: the data from the file. TODO: Should save cache in a different directory TODO: Create test function and check size of generated files TODO: receive a file handle
[ "@brief", "This", "function", "loads", "a", "file", "from", "the", "current", "directory", "and", "saves", "the", "cached", "file", "to", "later", "executions", ".", "It", "s", "also", "possible", "to", "make", "a", "recache", "or", "a", "subsampling", "of", "the", "signal", "and", "choose", "only", "a", "few", "columns", "of", "the", "signal", "to", "accelerate", "the", "opening", "process", "." ]
python
train
lsbardel/python-stdnet
stdnet/utils/py2py3.py
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/utils/py2py3.py#L94-L101
def to_string(s, encoding=None, errors='strict'): """Inverse of to_bytes""" encoding = encoding or 'utf-8' if isinstance(s, bytes): return s.decode(encoding, errors) if not is_string(s): s = string_type(s) return s
[ "def", "to_string", "(", "s", ",", "encoding", "=", "None", ",", "errors", "=", "'strict'", ")", ":", "encoding", "=", "encoding", "or", "'utf-8'", "if", "isinstance", "(", "s", ",", "bytes", ")", ":", "return", "s", ".", "decode", "(", "encoding", ",", "errors", ")", "if", "not", "is_string", "(", "s", ")", ":", "s", "=", "string_type", "(", "s", ")", "return", "s" ]
Inverse of to_bytes
[ "Inverse", "of", "to_bytes" ]
python
train
waqasbhatti/astrobase
astrobase/fakelcs/recovery.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/fakelcs/recovery.py#L683-L831
def variable_index_gridsearch_magbin(simbasedir, stetson_stdev_range=(1.0,20.0), inveta_stdev_range=(1.0,20.0), iqr_stdev_range=(1.0,20.0), ngridpoints=32, ngridworkers=None): '''This runs a variable index grid search per magbin. For each magbin, this does a grid search using the stetson and inveta ranges provided and tries to optimize the Matthews Correlation Coefficient (best value is +1.0), indicating the best possible separation of variables vs. nonvariables. The thresholds on these two variable indexes that produce the largest coeff for the collection of fake LCs will probably be the ones that work best for actual variable classification on the real LCs. https://en.wikipedia.org/wiki/Matthews_correlation_coefficient For each grid-point, calculates the true positives, false positives, true negatives, false negatives. Then gets the precision and recall, confusion matrix, and the ROC curve for variable vs. nonvariable. Once we've identified the best thresholds to use, we can then calculate variable object numbers: - as a function of magnitude - as a function of period - as a function of number of detections - as a function of amplitude of variability Writes everything back to `simbasedir/fakevar-recovery.pkl`. Use the plotting function below to make plots for the results. Parameters ---------- simbasedir : str The directory where the fake LCs are located. stetson_stdev_range : sequence of 2 floats The min and max values of the Stetson J variability index to generate a grid over these to test for the values of this index that produce the 'best' recovery rate for the injected variable stars. inveta_stdev_range : sequence of 2 floats The min and max values of the 1/eta variability index to generate a grid over these to test for the values of this index that produce the 'best' recovery rate for the injected variable stars. iqr_stdev_range : sequence of 2 floats The min and max values of the IQR variability index to generate a grid over these to test for the values of this index that produce the 'best' recovery rate for the injected variable stars. ngridpoints : int The number of grid points for each variability index grid. Remember that this function will be searching in 3D and will require lots of time to run if ngridpoints is too large. For the default number of grid points and 25000 simulated light curves, this takes about 3 days to run on a 40 (effective) core machine with 2 x Xeon E5-2650v3 CPUs. ngridworkers : int or None The number of parallel grid search workers that will be launched. Returns ------- dict The returned dict contains a list of recovery stats for each magbin and each grid point in the variability index grids that were used. This dict can be passed to the plotting function below to plot the results. ''' # make the output directory where all the pkls from the variability # threshold runs will go outdir = os.path.join(simbasedir,'recvar-threshold-pkls') if not os.path.exists(outdir): os.mkdir(outdir) # get the info from the simbasedir with open(os.path.join(simbasedir, 'fakelcs-info.pkl'),'rb') as infd: siminfo = pickle.load(infd) # get the column defs for the fakelcs timecols = siminfo['timecols'] magcols = siminfo['magcols'] errcols = siminfo['errcols'] # get the magbinmedians to use for the recovery processing magbinmedians = siminfo['magrms'][magcols[0]]['binned_sdssr_median'] # generate the grids for stetson and inveta stetson_grid = np.linspace(stetson_stdev_range[0], stetson_stdev_range[1], num=ngridpoints) inveta_grid = np.linspace(inveta_stdev_range[0], inveta_stdev_range[1], num=ngridpoints) iqr_grid = np.linspace(iqr_stdev_range[0], iqr_stdev_range[1], num=ngridpoints) # generate the grid stet_inveta_iqr_grid = [] for stet in stetson_grid: for inveta in inveta_grid: for iqr in iqr_grid: grid_point = [stet, inveta, iqr] stet_inveta_iqr_grid.append(grid_point) # the output dict grid_results = {'stetson_grid':stetson_grid, 'inveta_grid':inveta_grid, 'iqr_grid':iqr_grid, 'stet_inveta_iqr_grid':stet_inveta_iqr_grid, 'magbinmedians':magbinmedians, 'timecols':timecols, 'magcols':magcols, 'errcols':errcols, 'simbasedir':os.path.abspath(simbasedir), 'recovery':[]} # set up the pool pool = mp.Pool(ngridworkers) # run the grid search per magbinmedian for magbinmedian in magbinmedians: LOGINFO('running stetson J-inveta grid-search ' 'for magbinmedian = %.3f...' % magbinmedian) tasks = [(simbasedir, gp, magbinmedian) for gp in stet_inveta_iqr_grid] thisbin_results = pool.map(magbin_varind_gridsearch_worker, tasks) grid_results['recovery'].append(thisbin_results) pool.close() pool.join() LOGINFO('done.') with open(os.path.join(simbasedir, 'fakevar-recovery-per-magbin.pkl'),'wb') as outfd: pickle.dump(grid_results,outfd,pickle.HIGHEST_PROTOCOL) return grid_results
[ "def", "variable_index_gridsearch_magbin", "(", "simbasedir", ",", "stetson_stdev_range", "=", "(", "1.0", ",", "20.0", ")", ",", "inveta_stdev_range", "=", "(", "1.0", ",", "20.0", ")", ",", "iqr_stdev_range", "=", "(", "1.0", ",", "20.0", ")", ",", "ngridpoints", "=", "32", ",", "ngridworkers", "=", "None", ")", ":", "# make the output directory where all the pkls from the variability", "# threshold runs will go", "outdir", "=", "os", ".", "path", ".", "join", "(", "simbasedir", ",", "'recvar-threshold-pkls'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "outdir", ")", ":", "os", ".", "mkdir", "(", "outdir", ")", "# get the info from the simbasedir", "with", "open", "(", "os", ".", "path", ".", "join", "(", "simbasedir", ",", "'fakelcs-info.pkl'", ")", ",", "'rb'", ")", "as", "infd", ":", "siminfo", "=", "pickle", ".", "load", "(", "infd", ")", "# get the column defs for the fakelcs", "timecols", "=", "siminfo", "[", "'timecols'", "]", "magcols", "=", "siminfo", "[", "'magcols'", "]", "errcols", "=", "siminfo", "[", "'errcols'", "]", "# get the magbinmedians to use for the recovery processing", "magbinmedians", "=", "siminfo", "[", "'magrms'", "]", "[", "magcols", "[", "0", "]", "]", "[", "'binned_sdssr_median'", "]", "# generate the grids for stetson and inveta", "stetson_grid", "=", "np", ".", "linspace", "(", "stetson_stdev_range", "[", "0", "]", ",", "stetson_stdev_range", "[", "1", "]", ",", "num", "=", "ngridpoints", ")", "inveta_grid", "=", "np", ".", "linspace", "(", "inveta_stdev_range", "[", "0", "]", ",", "inveta_stdev_range", "[", "1", "]", ",", "num", "=", "ngridpoints", ")", "iqr_grid", "=", "np", ".", "linspace", "(", "iqr_stdev_range", "[", "0", "]", ",", "iqr_stdev_range", "[", "1", "]", ",", "num", "=", "ngridpoints", ")", "# generate the grid", "stet_inveta_iqr_grid", "=", "[", "]", "for", "stet", "in", "stetson_grid", ":", "for", "inveta", "in", "inveta_grid", ":", "for", "iqr", "in", "iqr_grid", ":", "grid_point", "=", "[", "stet", ",", "inveta", ",", "iqr", "]", "stet_inveta_iqr_grid", ".", "append", "(", "grid_point", ")", "# the output dict", "grid_results", "=", "{", "'stetson_grid'", ":", "stetson_grid", ",", "'inveta_grid'", ":", "inveta_grid", ",", "'iqr_grid'", ":", "iqr_grid", ",", "'stet_inveta_iqr_grid'", ":", "stet_inveta_iqr_grid", ",", "'magbinmedians'", ":", "magbinmedians", ",", "'timecols'", ":", "timecols", ",", "'magcols'", ":", "magcols", ",", "'errcols'", ":", "errcols", ",", "'simbasedir'", ":", "os", ".", "path", ".", "abspath", "(", "simbasedir", ")", ",", "'recovery'", ":", "[", "]", "}", "# set up the pool", "pool", "=", "mp", ".", "Pool", "(", "ngridworkers", ")", "# run the grid search per magbinmedian", "for", "magbinmedian", "in", "magbinmedians", ":", "LOGINFO", "(", "'running stetson J-inveta grid-search '", "'for magbinmedian = %.3f...'", "%", "magbinmedian", ")", "tasks", "=", "[", "(", "simbasedir", ",", "gp", ",", "magbinmedian", ")", "for", "gp", "in", "stet_inveta_iqr_grid", "]", "thisbin_results", "=", "pool", ".", "map", "(", "magbin_varind_gridsearch_worker", ",", "tasks", ")", "grid_results", "[", "'recovery'", "]", ".", "append", "(", "thisbin_results", ")", "pool", ".", "close", "(", ")", "pool", ".", "join", "(", ")", "LOGINFO", "(", "'done.'", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "simbasedir", ",", "'fakevar-recovery-per-magbin.pkl'", ")", ",", "'wb'", ")", "as", "outfd", ":", "pickle", ".", "dump", "(", "grid_results", ",", "outfd", ",", "pickle", ".", "HIGHEST_PROTOCOL", ")", "return", "grid_results" ]
This runs a variable index grid search per magbin. For each magbin, this does a grid search using the stetson and inveta ranges provided and tries to optimize the Matthews Correlation Coefficient (best value is +1.0), indicating the best possible separation of variables vs. nonvariables. The thresholds on these two variable indexes that produce the largest coeff for the collection of fake LCs will probably be the ones that work best for actual variable classification on the real LCs. https://en.wikipedia.org/wiki/Matthews_correlation_coefficient For each grid-point, calculates the true positives, false positives, true negatives, false negatives. Then gets the precision and recall, confusion matrix, and the ROC curve for variable vs. nonvariable. Once we've identified the best thresholds to use, we can then calculate variable object numbers: - as a function of magnitude - as a function of period - as a function of number of detections - as a function of amplitude of variability Writes everything back to `simbasedir/fakevar-recovery.pkl`. Use the plotting function below to make plots for the results. Parameters ---------- simbasedir : str The directory where the fake LCs are located. stetson_stdev_range : sequence of 2 floats The min and max values of the Stetson J variability index to generate a grid over these to test for the values of this index that produce the 'best' recovery rate for the injected variable stars. inveta_stdev_range : sequence of 2 floats The min and max values of the 1/eta variability index to generate a grid over these to test for the values of this index that produce the 'best' recovery rate for the injected variable stars. iqr_stdev_range : sequence of 2 floats The min and max values of the IQR variability index to generate a grid over these to test for the values of this index that produce the 'best' recovery rate for the injected variable stars. ngridpoints : int The number of grid points for each variability index grid. Remember that this function will be searching in 3D and will require lots of time to run if ngridpoints is too large. For the default number of grid points and 25000 simulated light curves, this takes about 3 days to run on a 40 (effective) core machine with 2 x Xeon E5-2650v3 CPUs. ngridworkers : int or None The number of parallel grid search workers that will be launched. Returns ------- dict The returned dict contains a list of recovery stats for each magbin and each grid point in the variability index grids that were used. This dict can be passed to the plotting function below to plot the results.
[ "This", "runs", "a", "variable", "index", "grid", "search", "per", "magbin", "." ]
python
valid
toomore/goristock
grs/goristock.py
https://github.com/toomore/goristock/blob/e61f57f11a626cfbc4afbf66337fd9d1c51e3e71/grs/goristock.py#L455-L459
def MAVOL_serial(self,days,rev=0): """ see make_serial() 成較量移動平均 list 化,資料格式請見 def make_serial() """ return self.make_serial(self.stock_vol,days,rev=0)
[ "def", "MAVOL_serial", "(", "self", ",", "days", ",", "rev", "=", "0", ")", ":", "return", "self", ".", "make_serial", "(", "self", ".", "stock_vol", ",", "days", ",", "rev", "=", "0", ")" ]
see make_serial() 成較量移動平均 list 化,資料格式請見 def make_serial()
[ "see", "make_serial", "()", "成較量移動平均", "list", "化,資料格式請見", "def", "make_serial", "()" ]
python
train
honsiorovskyi/codeharvester
src/codeharvester/harvester.py
https://github.com/honsiorovskyi/codeharvester/blob/301b907b32ef9bbdb7099657100fbd3829c3ecc8/src/codeharvester/harvester.py#L107-L150
def replace_requirements(self, infilename, outfile_initial=None): """ Recursively replaces the requirements in the files with the content of the requirements. Returns final temporary file opened for reading. """ infile = open(infilename, 'r') # extract the requirements for this file that were not skipped from the global database _indexes = tuple(z[0] for z in filter(lambda x: x[1] == infilename, enumerate(self.req_parents))) req_paths = tuple(z[1] for z in filter(lambda x: x[0] in _indexes, enumerate(self.req_paths))) req_linenos = tuple(z[1] for z in filter(lambda x: x[0] in _indexes, enumerate(self.req_linenos))) if outfile_initial: outfile = outfile_initial else: outfile = tempfile.TemporaryFile('w+') # write the input file to the output, replacing # the requirement statements with the requirements themselves for i, line in enumerate(infile.readlines()): if i in req_linenos: req_path = req_paths[req_linenos.index(i)] # skip unresolved requirement if not req_path: continue # recursion req_file = self.replace_requirements(req_path) # insert something at cursor position self.insert_requirement(outfile, req_file, req_path) req_file.close() else: outfile.write(line) infile.close() if not outfile_initial: outfile.seek(0) return outfile
[ "def", "replace_requirements", "(", "self", ",", "infilename", ",", "outfile_initial", "=", "None", ")", ":", "infile", "=", "open", "(", "infilename", ",", "'r'", ")", "# extract the requirements for this file that were not skipped from the global database", "_indexes", "=", "tuple", "(", "z", "[", "0", "]", "for", "z", "in", "filter", "(", "lambda", "x", ":", "x", "[", "1", "]", "==", "infilename", ",", "enumerate", "(", "self", ".", "req_parents", ")", ")", ")", "req_paths", "=", "tuple", "(", "z", "[", "1", "]", "for", "z", "in", "filter", "(", "lambda", "x", ":", "x", "[", "0", "]", "in", "_indexes", ",", "enumerate", "(", "self", ".", "req_paths", ")", ")", ")", "req_linenos", "=", "tuple", "(", "z", "[", "1", "]", "for", "z", "in", "filter", "(", "lambda", "x", ":", "x", "[", "0", "]", "in", "_indexes", ",", "enumerate", "(", "self", ".", "req_linenos", ")", ")", ")", "if", "outfile_initial", ":", "outfile", "=", "outfile_initial", "else", ":", "outfile", "=", "tempfile", ".", "TemporaryFile", "(", "'w+'", ")", "# write the input file to the output, replacing", "# the requirement statements with the requirements themselves", "for", "i", ",", "line", "in", "enumerate", "(", "infile", ".", "readlines", "(", ")", ")", ":", "if", "i", "in", "req_linenos", ":", "req_path", "=", "req_paths", "[", "req_linenos", ".", "index", "(", "i", ")", "]", "# skip unresolved requirement", "if", "not", "req_path", ":", "continue", "# recursion", "req_file", "=", "self", ".", "replace_requirements", "(", "req_path", ")", "# insert something at cursor position", "self", ".", "insert_requirement", "(", "outfile", ",", "req_file", ",", "req_path", ")", "req_file", ".", "close", "(", ")", "else", ":", "outfile", ".", "write", "(", "line", ")", "infile", ".", "close", "(", ")", "if", "not", "outfile_initial", ":", "outfile", ".", "seek", "(", "0", ")", "return", "outfile" ]
Recursively replaces the requirements in the files with the content of the requirements. Returns final temporary file opened for reading.
[ "Recursively", "replaces", "the", "requirements", "in", "the", "files", "with", "the", "content", "of", "the", "requirements", ".", "Returns", "final", "temporary", "file", "opened", "for", "reading", "." ]
python
train
jayvdb/flake8-putty
flake8_putty/extension.py
https://github.com/jayvdb/flake8-putty/blob/854b2c6daef409974c2f5e9c5acaf0a069b0ff23/flake8_putty/extension.py#L44-L69
def putty_ignore_code(options, code): """Implement pep8 'ignore_code' hook.""" reporter, line_number, offset, text, check = get_reporter_state() try: line = reporter.lines[line_number - 1] except IndexError: line = '' options.ignore = options._orig_ignore options.select = options._orig_select for rule in options.putty_ignore: if rule.match(reporter.filename, line, list(reporter.counters) + [code]): if rule._append_codes: options.ignore = options.ignore + rule.codes else: options.ignore = rule.codes for rule in options.putty_select: if rule.match(reporter.filename, line, list(reporter.counters) + [code]): if rule._append_codes: options.select = options.select + rule.codes else: options.select = rule.codes return ignore_code(options, code)
[ "def", "putty_ignore_code", "(", "options", ",", "code", ")", ":", "reporter", ",", "line_number", ",", "offset", ",", "text", ",", "check", "=", "get_reporter_state", "(", ")", "try", ":", "line", "=", "reporter", ".", "lines", "[", "line_number", "-", "1", "]", "except", "IndexError", ":", "line", "=", "''", "options", ".", "ignore", "=", "options", ".", "_orig_ignore", "options", ".", "select", "=", "options", ".", "_orig_select", "for", "rule", "in", "options", ".", "putty_ignore", ":", "if", "rule", ".", "match", "(", "reporter", ".", "filename", ",", "line", ",", "list", "(", "reporter", ".", "counters", ")", "+", "[", "code", "]", ")", ":", "if", "rule", ".", "_append_codes", ":", "options", ".", "ignore", "=", "options", ".", "ignore", "+", "rule", ".", "codes", "else", ":", "options", ".", "ignore", "=", "rule", ".", "codes", "for", "rule", "in", "options", ".", "putty_select", ":", "if", "rule", ".", "match", "(", "reporter", ".", "filename", ",", "line", ",", "list", "(", "reporter", ".", "counters", ")", "+", "[", "code", "]", ")", ":", "if", "rule", ".", "_append_codes", ":", "options", ".", "select", "=", "options", ".", "select", "+", "rule", ".", "codes", "else", ":", "options", ".", "select", "=", "rule", ".", "codes", "return", "ignore_code", "(", "options", ",", "code", ")" ]
Implement pep8 'ignore_code' hook.
[ "Implement", "pep8", "ignore_code", "hook", "." ]
python
train
slarse/clanimtk
clanimtk/core.py
https://github.com/slarse/clanimtk/blob/cb93d2e914c3ecc4e0007745ff4d546318cf3902/clanimtk/core.py#L252-L270
def _get_back_up_generator(frame_function, *args, **kwargs): """Create a generator for the provided animation function that backs up the cursor after a frame. Assumes that the animation function provides a generator that yields strings of constant width and height. Args: frame_function: A function that returns a FrameGenerator. args: Arguments for frame_function. kwargs: Keyword arguments for frame_function. Returns: a generator that generates backspace/backline characters for the animation func generator. """ lines = next(frame_function(*args, **kwargs)).split('\n') width = len(lines[0]) height = len(lines) if height == 1: return util.BACKSPACE_GEN(width) return util.BACKLINE_GEN(height)
[ "def", "_get_back_up_generator", "(", "frame_function", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "lines", "=", "next", "(", "frame_function", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", ".", "split", "(", "'\\n'", ")", "width", "=", "len", "(", "lines", "[", "0", "]", ")", "height", "=", "len", "(", "lines", ")", "if", "height", "==", "1", ":", "return", "util", ".", "BACKSPACE_GEN", "(", "width", ")", "return", "util", ".", "BACKLINE_GEN", "(", "height", ")" ]
Create a generator for the provided animation function that backs up the cursor after a frame. Assumes that the animation function provides a generator that yields strings of constant width and height. Args: frame_function: A function that returns a FrameGenerator. args: Arguments for frame_function. kwargs: Keyword arguments for frame_function. Returns: a generator that generates backspace/backline characters for the animation func generator.
[ "Create", "a", "generator", "for", "the", "provided", "animation", "function", "that", "backs", "up", "the", "cursor", "after", "a", "frame", ".", "Assumes", "that", "the", "animation", "function", "provides", "a", "generator", "that", "yields", "strings", "of", "constant", "width", "and", "height", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/frontend/terminal/embed.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/terminal/embed.py#L254-L283
def embed(**kwargs): """Call this to embed IPython at the current point in your program. The first invocation of this will create an :class:`InteractiveShellEmbed` instance and then call it. Consecutive calls just call the already created instance. Here is a simple example:: from IPython import embed a = 10 b = 20 embed('First time') c = 30 d = 40 embed Full customization can be done by passing a :class:`Struct` in as the config argument. """ config = kwargs.get('config') header = kwargs.pop('header', u'') if config is None: config = load_default_config() config.InteractiveShellEmbed = config.TerminalInteractiveShell kwargs['config'] = config global _embedded_shell if _embedded_shell is None: _embedded_shell = InteractiveShellEmbed(**kwargs) _embedded_shell(header=header, stack_depth=2)
[ "def", "embed", "(", "*", "*", "kwargs", ")", ":", "config", "=", "kwargs", ".", "get", "(", "'config'", ")", "header", "=", "kwargs", ".", "pop", "(", "'header'", ",", "u''", ")", "if", "config", "is", "None", ":", "config", "=", "load_default_config", "(", ")", "config", ".", "InteractiveShellEmbed", "=", "config", ".", "TerminalInteractiveShell", "kwargs", "[", "'config'", "]", "=", "config", "global", "_embedded_shell", "if", "_embedded_shell", "is", "None", ":", "_embedded_shell", "=", "InteractiveShellEmbed", "(", "*", "*", "kwargs", ")", "_embedded_shell", "(", "header", "=", "header", ",", "stack_depth", "=", "2", ")" ]
Call this to embed IPython at the current point in your program. The first invocation of this will create an :class:`InteractiveShellEmbed` instance and then call it. Consecutive calls just call the already created instance. Here is a simple example:: from IPython import embed a = 10 b = 20 embed('First time') c = 30 d = 40 embed Full customization can be done by passing a :class:`Struct` in as the config argument.
[ "Call", "this", "to", "embed", "IPython", "at", "the", "current", "point", "in", "your", "program", "." ]
python
test
saltstack/salt
salt/utils/network.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/network.py#L173-L183
def generate_minion_id(): ''' Return only first element of the hostname from all possible list. :return: ''' try: ret = salt.utils.stringutils.to_unicode(_generate_minion_id().first()) except TypeError: ret = None return ret or 'localhost'
[ "def", "generate_minion_id", "(", ")", ":", "try", ":", "ret", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "_generate_minion_id", "(", ")", ".", "first", "(", ")", ")", "except", "TypeError", ":", "ret", "=", "None", "return", "ret", "or", "'localhost'" ]
Return only first element of the hostname from all possible list. :return:
[ "Return", "only", "first", "element", "of", "the", "hostname", "from", "all", "possible", "list", "." ]
python
train
PyCQA/astroid
astroid/protocols.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/protocols.py#L402-L439
def _resolve_assignment_parts(parts, assign_path, context): """recursive function to resolve multiple assignments""" assign_path = assign_path[:] index = assign_path.pop(0) for part in parts: assigned = None if isinstance(part, nodes.Dict): # A dictionary in an iterating context try: assigned, _ = part.items[index] except IndexError: return elif hasattr(part, "getitem"): index_node = nodes.Const(index) try: assigned = part.getitem(index_node, context) except (exceptions.AstroidTypeError, exceptions.AstroidIndexError): return if not assigned: return if not assign_path: # we achieved to resolved the assignment path, don't infer the # last part yield assigned elif assigned is util.Uninferable: return else: # we are not yet on the last part of the path search on each # possibly inferred value try: yield from _resolve_assignment_parts( assigned.infer(context), assign_path, context ) except exceptions.InferenceError: return
[ "def", "_resolve_assignment_parts", "(", "parts", ",", "assign_path", ",", "context", ")", ":", "assign_path", "=", "assign_path", "[", ":", "]", "index", "=", "assign_path", ".", "pop", "(", "0", ")", "for", "part", "in", "parts", ":", "assigned", "=", "None", "if", "isinstance", "(", "part", ",", "nodes", ".", "Dict", ")", ":", "# A dictionary in an iterating context", "try", ":", "assigned", ",", "_", "=", "part", ".", "items", "[", "index", "]", "except", "IndexError", ":", "return", "elif", "hasattr", "(", "part", ",", "\"getitem\"", ")", ":", "index_node", "=", "nodes", ".", "Const", "(", "index", ")", "try", ":", "assigned", "=", "part", ".", "getitem", "(", "index_node", ",", "context", ")", "except", "(", "exceptions", ".", "AstroidTypeError", ",", "exceptions", ".", "AstroidIndexError", ")", ":", "return", "if", "not", "assigned", ":", "return", "if", "not", "assign_path", ":", "# we achieved to resolved the assignment path, don't infer the", "# last part", "yield", "assigned", "elif", "assigned", "is", "util", ".", "Uninferable", ":", "return", "else", ":", "# we are not yet on the last part of the path search on each", "# possibly inferred value", "try", ":", "yield", "from", "_resolve_assignment_parts", "(", "assigned", ".", "infer", "(", "context", ")", ",", "assign_path", ",", "context", ")", "except", "exceptions", ".", "InferenceError", ":", "return" ]
recursive function to resolve multiple assignments
[ "recursive", "function", "to", "resolve", "multiple", "assignments" ]
python
train
Cue/scales
src/greplin/scales/flaskhandler.py
https://github.com/Cue/scales/blob/0aced26eb050ceb98ee9d5d6cdca8db448666986/src/greplin/scales/flaskhandler.py#L64-L74
def serveInBackground(port, serverName, prefix='/status/'): """Convenience function: spawn a background server thread that will serve HTTP requests to get the status. Returns the thread.""" import flask, threading from wsgiref.simple_server import make_server app = flask.Flask(__name__) registerStatsHandler(app, serverName, prefix) server = threading.Thread(target=make_server('', port, app).serve_forever) server.daemon = True server.start() return server
[ "def", "serveInBackground", "(", "port", ",", "serverName", ",", "prefix", "=", "'/status/'", ")", ":", "import", "flask", ",", "threading", "from", "wsgiref", ".", "simple_server", "import", "make_server", "app", "=", "flask", ".", "Flask", "(", "__name__", ")", "registerStatsHandler", "(", "app", ",", "serverName", ",", "prefix", ")", "server", "=", "threading", ".", "Thread", "(", "target", "=", "make_server", "(", "''", ",", "port", ",", "app", ")", ".", "serve_forever", ")", "server", ".", "daemon", "=", "True", "server", ".", "start", "(", ")", "return", "server" ]
Convenience function: spawn a background server thread that will serve HTTP requests to get the status. Returns the thread.
[ "Convenience", "function", ":", "spawn", "a", "background", "server", "thread", "that", "will", "serve", "HTTP", "requests", "to", "get", "the", "status", ".", "Returns", "the", "thread", "." ]
python
train
abe-winter/pg13-py
pg13/pgmock_dbapi2.py
https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/pgmock_dbapi2.py#L195-L201
def call_cur(f): "decorator for opening a connection and passing a cursor to the function" @functools.wraps(f) def f2(self, *args, **kwargs): with self.withcur() as cur: return f(self, cur, *args, **kwargs) return f2
[ "def", "call_cur", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "f2", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "with", "self", ".", "withcur", "(", ")", "as", "cur", ":", "return", "f", "(", "self", ",", "cur", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "f2" ]
decorator for opening a connection and passing a cursor to the function
[ "decorator", "for", "opening", "a", "connection", "and", "passing", "a", "cursor", "to", "the", "function" ]
python
train
mitsei/dlkit
dlkit/json_/authorization/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/authorization/sessions.py#L3247-L3266
def is_descendant_of_vault(self, id_, vault_id): """Tests if an ``Id`` is a descendant of a vault. arg: id (osid.id.Id): an ``Id`` arg: vault_id (osid.id.Id): the ``Id`` of a vault return: (boolean) - ``true`` if the ``id`` is a descendant of the ``vault_id,`` ``false`` otherwise raise: NotFound - ``vault_id`` not found raise: NullArgument - ``vault_id`` or ``id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` is not found return ``false``. """ # Implemented from template for # osid.resource.BinHierarchySession.is_descendant_of_bin if self._catalog_session is not None: return self._catalog_session.is_descendant_of_catalog(id_=id_, catalog_id=vault_id) return self._hierarchy_session.is_descendant(id_=id_, descendant_id=vault_id)
[ "def", "is_descendant_of_vault", "(", "self", ",", "id_", ",", "vault_id", ")", ":", "# Implemented from template for", "# osid.resource.BinHierarchySession.is_descendant_of_bin", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_session", ".", "is_descendant_of_catalog", "(", "id_", "=", "id_", ",", "catalog_id", "=", "vault_id", ")", "return", "self", ".", "_hierarchy_session", ".", "is_descendant", "(", "id_", "=", "id_", ",", "descendant_id", "=", "vault_id", ")" ]
Tests if an ``Id`` is a descendant of a vault. arg: id (osid.id.Id): an ``Id`` arg: vault_id (osid.id.Id): the ``Id`` of a vault return: (boolean) - ``true`` if the ``id`` is a descendant of the ``vault_id,`` ``false`` otherwise raise: NotFound - ``vault_id`` not found raise: NullArgument - ``vault_id`` or ``id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` is not found return ``false``.
[ "Tests", "if", "an", "Id", "is", "a", "descendant", "of", "a", "vault", "." ]
python
train
hobson/aima
aima/text.py
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/text.py#L153-L155
def present_results(self, query_text, n=10): "Get results for the query and present them." self.present(self.query(query_text, n))
[ "def", "present_results", "(", "self", ",", "query_text", ",", "n", "=", "10", ")", ":", "self", ".", "present", "(", "self", ".", "query", "(", "query_text", ",", "n", ")", ")" ]
Get results for the query and present them.
[ "Get", "results", "for", "the", "query", "and", "present", "them", "." ]
python
valid
hazelcast/hazelcast-remote-controller
python-controller/hzrc/RemoteController.py
https://github.com/hazelcast/hazelcast-remote-controller/blob/41b9e7d2d722b69ff79642eb34b702c9a6087635/python-controller/hzrc/RemoteController.py#L366-L373
def resumeMember(self, clusterId, memberId): """ Parameters: - clusterId - memberId """ self.send_resumeMember(clusterId, memberId) return self.recv_resumeMember()
[ "def", "resumeMember", "(", "self", ",", "clusterId", ",", "memberId", ")", ":", "self", ".", "send_resumeMember", "(", "clusterId", ",", "memberId", ")", "return", "self", ".", "recv_resumeMember", "(", ")" ]
Parameters: - clusterId - memberId
[ "Parameters", ":", "-", "clusterId", "-", "memberId" ]
python
train
saltstack/salt
salt/scripts.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/scripts.py#L552-L563
def salt_support(): ''' Run Salt Support that collects system data, logs etc for debug and support purposes. :return: ''' import salt.cli.support.collector if '' in sys.path: sys.path.remove('') client = salt.cli.support.collector.SaltSupport() _install_signal_handlers(client) client.run()
[ "def", "salt_support", "(", ")", ":", "import", "salt", ".", "cli", ".", "support", ".", "collector", "if", "''", "in", "sys", ".", "path", ":", "sys", ".", "path", ".", "remove", "(", "''", ")", "client", "=", "salt", ".", "cli", ".", "support", ".", "collector", ".", "SaltSupport", "(", ")", "_install_signal_handlers", "(", "client", ")", "client", ".", "run", "(", ")" ]
Run Salt Support that collects system data, logs etc for debug and support purposes. :return:
[ "Run", "Salt", "Support", "that", "collects", "system", "data", "logs", "etc", "for", "debug", "and", "support", "purposes", ".", ":", "return", ":" ]
python
train
skorch-dev/skorch
examples/nuclei_image_segmentation/utils.py
https://github.com/skorch-dev/skorch/blob/5b9b8b7b7712cb6e5aaa759d9608ea6269d5bcd3/examples/nuclei_image_segmentation/utils.py#L42-L62
def plot_mask_cell(true_mask, predicted_mask, cell, suffix, ax1, ax2, ax3, padding=16): """Plots a single cell with a its true mask and predicuted mask""" for ax in [ax1, ax2, ax3]: ax.grid(False) ax.set_xticks([]) ax.set_yticks([]) ax1.imshow(true_mask[padding:-padding, padding:-padding], cmap='viridis') ax1.set_title('True Mask - {}'.format(suffix)) ax2.imshow( predicted_mask[padding:-padding, padding:-padding], cmap='viridis') ax2.set_title('Predicted Mask - {}'.format(suffix)) ax3.imshow(convert_cell_to_img(cell, padding=padding)) ax3.set_title('Image - {}'.format(suffix)) return ax1, ax2, ax3
[ "def", "plot_mask_cell", "(", "true_mask", ",", "predicted_mask", ",", "cell", ",", "suffix", ",", "ax1", ",", "ax2", ",", "ax3", ",", "padding", "=", "16", ")", ":", "for", "ax", "in", "[", "ax1", ",", "ax2", ",", "ax3", "]", ":", "ax", ".", "grid", "(", "False", ")", "ax", ".", "set_xticks", "(", "[", "]", ")", "ax", ".", "set_yticks", "(", "[", "]", ")", "ax1", ".", "imshow", "(", "true_mask", "[", "padding", ":", "-", "padding", ",", "padding", ":", "-", "padding", "]", ",", "cmap", "=", "'viridis'", ")", "ax1", ".", "set_title", "(", "'True Mask - {}'", ".", "format", "(", "suffix", ")", ")", "ax2", ".", "imshow", "(", "predicted_mask", "[", "padding", ":", "-", "padding", ",", "padding", ":", "-", "padding", "]", ",", "cmap", "=", "'viridis'", ")", "ax2", ".", "set_title", "(", "'Predicted Mask - {}'", ".", "format", "(", "suffix", ")", ")", "ax3", ".", "imshow", "(", "convert_cell_to_img", "(", "cell", ",", "padding", "=", "padding", ")", ")", "ax3", ".", "set_title", "(", "'Image - {}'", ".", "format", "(", "suffix", ")", ")", "return", "ax1", ",", "ax2", ",", "ax3" ]
Plots a single cell with a its true mask and predicuted mask
[ "Plots", "a", "single", "cell", "with", "a", "its", "true", "mask", "and", "predicuted", "mask" ]
python
train
src-d/modelforge
modelforge/meta.py
https://github.com/src-d/modelforge/blob/4f73c2bf0318261ac01bc8b6c0d4250a5d303418/modelforge/meta.py#L73-L95
def extract_model_meta(base_meta: dict, extra_meta: dict, model_url: str) -> dict: """ Merge the metadata from the backend and the extra metadata into a dict which is suitable for \ `index.json`. :param base_meta: tree["meta"] :class:`dict` containing data from the backend. :param extra_meta: dict containing data from the user, similar to `template_meta.json`. :param model_url: public URL of the model. :return: converted dict. """ meta = {"default": {"default": base_meta["uuid"], "description": base_meta["description"], "code": extra_meta["code"]}} del base_meta["model"] del base_meta["uuid"] meta["model"] = base_meta meta["model"].update({k: extra_meta[k] for k in ("code", "datasets", "references", "tags", "extra")}) response = requests.get(model_url, stream=True) meta["model"]["size"] = humanize.naturalsize(int(response.headers["content-length"])) meta["model"]["url"] = model_url meta["model"]["created_at"] = format_datetime(meta["model"]["created_at"]) return meta
[ "def", "extract_model_meta", "(", "base_meta", ":", "dict", ",", "extra_meta", ":", "dict", ",", "model_url", ":", "str", ")", "->", "dict", ":", "meta", "=", "{", "\"default\"", ":", "{", "\"default\"", ":", "base_meta", "[", "\"uuid\"", "]", ",", "\"description\"", ":", "base_meta", "[", "\"description\"", "]", ",", "\"code\"", ":", "extra_meta", "[", "\"code\"", "]", "}", "}", "del", "base_meta", "[", "\"model\"", "]", "del", "base_meta", "[", "\"uuid\"", "]", "meta", "[", "\"model\"", "]", "=", "base_meta", "meta", "[", "\"model\"", "]", ".", "update", "(", "{", "k", ":", "extra_meta", "[", "k", "]", "for", "k", "in", "(", "\"code\"", ",", "\"datasets\"", ",", "\"references\"", ",", "\"tags\"", ",", "\"extra\"", ")", "}", ")", "response", "=", "requests", ".", "get", "(", "model_url", ",", "stream", "=", "True", ")", "meta", "[", "\"model\"", "]", "[", "\"size\"", "]", "=", "humanize", ".", "naturalsize", "(", "int", "(", "response", ".", "headers", "[", "\"content-length\"", "]", ")", ")", "meta", "[", "\"model\"", "]", "[", "\"url\"", "]", "=", "model_url", "meta", "[", "\"model\"", "]", "[", "\"created_at\"", "]", "=", "format_datetime", "(", "meta", "[", "\"model\"", "]", "[", "\"created_at\"", "]", ")", "return", "meta" ]
Merge the metadata from the backend and the extra metadata into a dict which is suitable for \ `index.json`. :param base_meta: tree["meta"] :class:`dict` containing data from the backend. :param extra_meta: dict containing data from the user, similar to `template_meta.json`. :param model_url: public URL of the model. :return: converted dict.
[ "Merge", "the", "metadata", "from", "the", "backend", "and", "the", "extra", "metadata", "into", "a", "dict", "which", "is", "suitable", "for", "\\", "index", ".", "json", "." ]
python
train
python-gitlab/python-gitlab
gitlab/v4/objects.py
https://github.com/python-gitlab/python-gitlab/blob/16de1b03fde3dbbe8f851614dd1d8c09de102fe5/gitlab/v4/objects.py#L1544-L1557
def cherry_pick(self, branch, **kwargs): """Cherry-pick a commit into a branch. Args: branch (str): Name of target branch **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabCherryPickError: If the cherry-pick could not be performed """ path = '%s/%s/cherry_pick' % (self.manager.path, self.get_id()) post_data = {'branch': branch} self.manager.gitlab.http_post(path, post_data=post_data, **kwargs)
[ "def", "cherry_pick", "(", "self", ",", "branch", ",", "*", "*", "kwargs", ")", ":", "path", "=", "'%s/%s/cherry_pick'", "%", "(", "self", ".", "manager", ".", "path", ",", "self", ".", "get_id", "(", ")", ")", "post_data", "=", "{", "'branch'", ":", "branch", "}", "self", ".", "manager", ".", "gitlab", ".", "http_post", "(", "path", ",", "post_data", "=", "post_data", ",", "*", "*", "kwargs", ")" ]
Cherry-pick a commit into a branch. Args: branch (str): Name of target branch **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabCherryPickError: If the cherry-pick could not be performed
[ "Cherry", "-", "pick", "a", "commit", "into", "a", "branch", "." ]
python
train
lpantano/seqcluster
seqcluster/libs/inputs.py
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/inputs.py#L27-L55
def parse_ma_file(seq_obj, in_file): """ read seqs.ma file and create dict with sequence object """ name = "" index = 1 total = defaultdict(int) with open(in_file) as handle_in: line = handle_in.readline().strip() cols = line.split("\t") samples = cols[2:] for line in handle_in: line = line.strip() cols = line.split("\t") name = int(cols[0].replace("seq_", "")) seq = cols[1] exp = {} for i in range(len(samples)): exp[samples[i]] = int(cols[i+2]) total[samples[i]] += int(cols[i+2]) index = index+1 if name in seq_obj: seq_obj[name].set_freq(exp) seq_obj[name].set_seq(seq) # new_s = sequence(seq, exp, index) # seq_l[name] = new_s seq_obj = _normalize_seqs(seq_obj, total) return seq_obj, total, index
[ "def", "parse_ma_file", "(", "seq_obj", ",", "in_file", ")", ":", "name", "=", "\"\"", "index", "=", "1", "total", "=", "defaultdict", "(", "int", ")", "with", "open", "(", "in_file", ")", "as", "handle_in", ":", "line", "=", "handle_in", ".", "readline", "(", ")", ".", "strip", "(", ")", "cols", "=", "line", ".", "split", "(", "\"\\t\"", ")", "samples", "=", "cols", "[", "2", ":", "]", "for", "line", "in", "handle_in", ":", "line", "=", "line", ".", "strip", "(", ")", "cols", "=", "line", ".", "split", "(", "\"\\t\"", ")", "name", "=", "int", "(", "cols", "[", "0", "]", ".", "replace", "(", "\"seq_\"", ",", "\"\"", ")", ")", "seq", "=", "cols", "[", "1", "]", "exp", "=", "{", "}", "for", "i", "in", "range", "(", "len", "(", "samples", ")", ")", ":", "exp", "[", "samples", "[", "i", "]", "]", "=", "int", "(", "cols", "[", "i", "+", "2", "]", ")", "total", "[", "samples", "[", "i", "]", "]", "+=", "int", "(", "cols", "[", "i", "+", "2", "]", ")", "index", "=", "index", "+", "1", "if", "name", "in", "seq_obj", ":", "seq_obj", "[", "name", "]", ".", "set_freq", "(", "exp", ")", "seq_obj", "[", "name", "]", ".", "set_seq", "(", "seq", ")", "# new_s = sequence(seq, exp, index)", "# seq_l[name] = new_s", "seq_obj", "=", "_normalize_seqs", "(", "seq_obj", ",", "total", ")", "return", "seq_obj", ",", "total", ",", "index" ]
read seqs.ma file and create dict with sequence object
[ "read", "seqs", ".", "ma", "file", "and", "create", "dict", "with", "sequence", "object" ]
python
train
Esri/ArcREST
src/arcrest/ags/layer.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/ags/layer.py#L1344-L1347
def asJSON(self): """ returns the data source as JSON """ self._json = json.dumps(self.asDictionary) return self._json
[ "def", "asJSON", "(", "self", ")", ":", "self", ".", "_json", "=", "json", ".", "dumps", "(", "self", ".", "asDictionary", ")", "return", "self", ".", "_json" ]
returns the data source as JSON
[ "returns", "the", "data", "source", "as", "JSON" ]
python
train
saltstack/salt
salt/modules/pkgin.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pkgin.py#L645-L660
def file_list(package, **kwargs): ''' List the files that belong to a package. CLI Examples: .. code-block:: bash salt '*' pkg.file_list nginx ''' ret = file_dict(package) files = [] for pkg_files in six.itervalues(ret['files']): files.extend(pkg_files) ret['files'] = files return ret
[ "def", "file_list", "(", "package", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "file_dict", "(", "package", ")", "files", "=", "[", "]", "for", "pkg_files", "in", "six", ".", "itervalues", "(", "ret", "[", "'files'", "]", ")", ":", "files", ".", "extend", "(", "pkg_files", ")", "ret", "[", "'files'", "]", "=", "files", "return", "ret" ]
List the files that belong to a package. CLI Examples: .. code-block:: bash salt '*' pkg.file_list nginx
[ "List", "the", "files", "that", "belong", "to", "a", "package", "." ]
python
train
delph-in/pydelphin
delphin/mrs/eds.py
https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/mrs/eds.py#L224-L258
def from_triples(cls, triples): """ Decode triples, as from :meth:`to_triples`, into an Eds object. """ nids, nd, edges = [], {}, [] for src, rel, tgt in triples: if src not in nd: nids.append(src) nd[src] = {'pred': None, 'lnk': None, 'carg': None, 'si': []} if rel == 'predicate': nd[src]['pred'] = Pred.surface_or_abstract(tgt) elif rel == 'lnk': cfrom, cto = tgt.strip('"<>').split(':') nd[src]['lnk'] = Lnk.charspan(int(cfrom), int(cto)) elif rel == 'carg': if (tgt[0], tgt[-1]) == ('"', '"'): tgt = tgt[1:-1] nd[src]['carg'] = tgt elif rel == 'type': nd[src]['si'].append((CVARSORT, tgt)) elif rel.islower(): nd[src]['si'].append((rel, tgt)) else: edges.append((src, rel, tgt)) nodes = [ Node( nodeid=nid, pred=nd[nid]['pred'], sortinfo=nd[nid]['si'], lnk=nd[nid]['lnk'], carg=nd[nid]['carg'] ) for nid in nids ] top = nids[0] if nids else None return cls(top=top, nodes=nodes, edges=edges)
[ "def", "from_triples", "(", "cls", ",", "triples", ")", ":", "nids", ",", "nd", ",", "edges", "=", "[", "]", ",", "{", "}", ",", "[", "]", "for", "src", ",", "rel", ",", "tgt", "in", "triples", ":", "if", "src", "not", "in", "nd", ":", "nids", ".", "append", "(", "src", ")", "nd", "[", "src", "]", "=", "{", "'pred'", ":", "None", ",", "'lnk'", ":", "None", ",", "'carg'", ":", "None", ",", "'si'", ":", "[", "]", "}", "if", "rel", "==", "'predicate'", ":", "nd", "[", "src", "]", "[", "'pred'", "]", "=", "Pred", ".", "surface_or_abstract", "(", "tgt", ")", "elif", "rel", "==", "'lnk'", ":", "cfrom", ",", "cto", "=", "tgt", ".", "strip", "(", "'\"<>'", ")", ".", "split", "(", "':'", ")", "nd", "[", "src", "]", "[", "'lnk'", "]", "=", "Lnk", ".", "charspan", "(", "int", "(", "cfrom", ")", ",", "int", "(", "cto", ")", ")", "elif", "rel", "==", "'carg'", ":", "if", "(", "tgt", "[", "0", "]", ",", "tgt", "[", "-", "1", "]", ")", "==", "(", "'\"'", ",", "'\"'", ")", ":", "tgt", "=", "tgt", "[", "1", ":", "-", "1", "]", "nd", "[", "src", "]", "[", "'carg'", "]", "=", "tgt", "elif", "rel", "==", "'type'", ":", "nd", "[", "src", "]", "[", "'si'", "]", ".", "append", "(", "(", "CVARSORT", ",", "tgt", ")", ")", "elif", "rel", ".", "islower", "(", ")", ":", "nd", "[", "src", "]", "[", "'si'", "]", ".", "append", "(", "(", "rel", ",", "tgt", ")", ")", "else", ":", "edges", ".", "append", "(", "(", "src", ",", "rel", ",", "tgt", ")", ")", "nodes", "=", "[", "Node", "(", "nodeid", "=", "nid", ",", "pred", "=", "nd", "[", "nid", "]", "[", "'pred'", "]", ",", "sortinfo", "=", "nd", "[", "nid", "]", "[", "'si'", "]", ",", "lnk", "=", "nd", "[", "nid", "]", "[", "'lnk'", "]", ",", "carg", "=", "nd", "[", "nid", "]", "[", "'carg'", "]", ")", "for", "nid", "in", "nids", "]", "top", "=", "nids", "[", "0", "]", "if", "nids", "else", "None", "return", "cls", "(", "top", "=", "top", ",", "nodes", "=", "nodes", ",", "edges", "=", "edges", ")" ]
Decode triples, as from :meth:`to_triples`, into an Eds object.
[ "Decode", "triples", "as", "from", ":", "meth", ":", "to_triples", "into", "an", "Eds", "object", "." ]
python
train
Garee/pytodoist
pytodoist/todoist.py
https://github.com/Garee/pytodoist/blob/3359cbff485ebdbbb4ffbd58d71e21a817874dd7/pytodoist/todoist.py#L135-L167
def register_with_google(full_name, email, oauth2_token, lang=None, timezone=None): """Register a new Todoist account by linking a Google account. :param full_name: The user's full name. :type full_name: str :param email: The user's email address. :type email: str :param oauth2_token: The oauth2 token associated with the email. :type oauth2_token: str :param lang: The user's language. :type lang: str :param timezone: The user's timezone. :type timezone: str :return: The Todoist user. :rtype: :class:`pytodoist.todoist.User` .. note:: It is up to you to obtain the valid oauth2 token. >>> from pytodoist import todoist >>> oauth2_token = 'oauth2_token' >>> user = todoist.register_with_google('John Doe', '[email protected]', ... oauth2_token) >>> print(user.full_name) John Doe """ response = API.login_with_google(email, oauth2_token, auto_signup=1, full_name=full_name, lang=lang, timezone=timezone) _fail_if_contains_errors(response) user_json = response.json() user = User(user_json) return user
[ "def", "register_with_google", "(", "full_name", ",", "email", ",", "oauth2_token", ",", "lang", "=", "None", ",", "timezone", "=", "None", ")", ":", "response", "=", "API", ".", "login_with_google", "(", "email", ",", "oauth2_token", ",", "auto_signup", "=", "1", ",", "full_name", "=", "full_name", ",", "lang", "=", "lang", ",", "timezone", "=", "timezone", ")", "_fail_if_contains_errors", "(", "response", ")", "user_json", "=", "response", ".", "json", "(", ")", "user", "=", "User", "(", "user_json", ")", "return", "user" ]
Register a new Todoist account by linking a Google account. :param full_name: The user's full name. :type full_name: str :param email: The user's email address. :type email: str :param oauth2_token: The oauth2 token associated with the email. :type oauth2_token: str :param lang: The user's language. :type lang: str :param timezone: The user's timezone. :type timezone: str :return: The Todoist user. :rtype: :class:`pytodoist.todoist.User` .. note:: It is up to you to obtain the valid oauth2 token. >>> from pytodoist import todoist >>> oauth2_token = 'oauth2_token' >>> user = todoist.register_with_google('John Doe', '[email protected]', ... oauth2_token) >>> print(user.full_name) John Doe
[ "Register", "a", "new", "Todoist", "account", "by", "linking", "a", "Google", "account", "." ]
python
train
django-treebeard/django-treebeard
treebeard/models.py
https://github.com/django-treebeard/django-treebeard/blob/8042ee939cb45394909237da447f8925e3cc6aa3/treebeard/models.py#L317-L329
def get_next_sibling(self): """ :returns: The next node's sibling, or None if it was the rightmost sibling. """ siblings = self.get_siblings() ids = [obj.pk for obj in siblings] if self.pk in ids: idx = ids.index(self.pk) if idx < len(siblings) - 1: return siblings[idx + 1]
[ "def", "get_next_sibling", "(", "self", ")", ":", "siblings", "=", "self", ".", "get_siblings", "(", ")", "ids", "=", "[", "obj", ".", "pk", "for", "obj", "in", "siblings", "]", "if", "self", ".", "pk", "in", "ids", ":", "idx", "=", "ids", ".", "index", "(", "self", ".", "pk", ")", "if", "idx", "<", "len", "(", "siblings", ")", "-", "1", ":", "return", "siblings", "[", "idx", "+", "1", "]" ]
:returns: The next node's sibling, or None if it was the rightmost sibling.
[ ":", "returns", ":" ]
python
train
IBMStreams/pypi.streamsx
streamsx/topology/state.py
https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/topology/state.py#L177-L190
def operator_driven(drain_timeout=_DEFAULT_DRAIN, reset_timeout=_DEFAULT_RESET, max_consecutive_attempts=_DEFAULT_ATTEMPTS): """Define an operator-driven consistent region configuration. The source operator triggers drain and checkpoint cycles for the region. Args: drain_timeout: The drain timeout, as either a :py:class:`datetime.timedelta` value or the number of seconds as a `float`. If not specified, the default value is 180 seconds. reset_timeout: The reset timeout, as either a :py:class:`datetime.timedelta` value or the number of seconds as a `float`. If not specified, the default value is 180 seconds. max_consecutive_attempts(int): The maximum number of consecutive attempts to reset the region. This must be an integer value between 1 and 2147483647, inclusive. If not specified, the default value is 5. Returns: ConsistentRegionConfig: the configuration. """ return ConsistentRegionConfig(trigger=ConsistentRegionConfig.Trigger.OPERATOR_DRIVEN, drain_timeout=drain_timeout, reset_timeout=reset_timeout, max_consecutive_attempts=max_consecutive_attempts)
[ "def", "operator_driven", "(", "drain_timeout", "=", "_DEFAULT_DRAIN", ",", "reset_timeout", "=", "_DEFAULT_RESET", ",", "max_consecutive_attempts", "=", "_DEFAULT_ATTEMPTS", ")", ":", "return", "ConsistentRegionConfig", "(", "trigger", "=", "ConsistentRegionConfig", ".", "Trigger", ".", "OPERATOR_DRIVEN", ",", "drain_timeout", "=", "drain_timeout", ",", "reset_timeout", "=", "reset_timeout", ",", "max_consecutive_attempts", "=", "max_consecutive_attempts", ")" ]
Define an operator-driven consistent region configuration. The source operator triggers drain and checkpoint cycles for the region. Args: drain_timeout: The drain timeout, as either a :py:class:`datetime.timedelta` value or the number of seconds as a `float`. If not specified, the default value is 180 seconds. reset_timeout: The reset timeout, as either a :py:class:`datetime.timedelta` value or the number of seconds as a `float`. If not specified, the default value is 180 seconds. max_consecutive_attempts(int): The maximum number of consecutive attempts to reset the region. This must be an integer value between 1 and 2147483647, inclusive. If not specified, the default value is 5. Returns: ConsistentRegionConfig: the configuration.
[ "Define", "an", "operator", "-", "driven", "consistent", "region", "configuration", ".", "The", "source", "operator", "triggers", "drain", "and", "checkpoint", "cycles", "for", "the", "region", "." ]
python
train
noahbenson/pimms
pimms/immutable.py
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/immutable.py#L444-L461
def param(f): ''' The @param decorator, usable in an immutable class (see immutable), specifies that the following function is actually a transformation on an input parameter; the parameter is required, and is set to the value returned by the function decorated by the parameter; i.e., if you decorate the function abc with @param, then imm.abc = x will result in imm's abc attribute being set to the value of type(imm).abc(x). ''' (args, varargs, kwargs, dflts) = getargspec_py27like(f) if varargs is not None or kwargs is not None or dflts: raise ValueError('Params may not accept variable, variadic keyword, or default arguments') if len(args) != 1: raise ValueError('Parameter transformation functions must take exactly one argument') f._pimms_immutable_data_ = {} f._pimms_immutable_data_['is_param'] = True f._pimms_immutable_data_['name'] = f.__name__ f = staticmethod(f) return f
[ "def", "param", "(", "f", ")", ":", "(", "args", ",", "varargs", ",", "kwargs", ",", "dflts", ")", "=", "getargspec_py27like", "(", "f", ")", "if", "varargs", "is", "not", "None", "or", "kwargs", "is", "not", "None", "or", "dflts", ":", "raise", "ValueError", "(", "'Params may not accept variable, variadic keyword, or default arguments'", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "raise", "ValueError", "(", "'Parameter transformation functions must take exactly one argument'", ")", "f", ".", "_pimms_immutable_data_", "=", "{", "}", "f", ".", "_pimms_immutable_data_", "[", "'is_param'", "]", "=", "True", "f", ".", "_pimms_immutable_data_", "[", "'name'", "]", "=", "f", ".", "__name__", "f", "=", "staticmethod", "(", "f", ")", "return", "f" ]
The @param decorator, usable in an immutable class (see immutable), specifies that the following function is actually a transformation on an input parameter; the parameter is required, and is set to the value returned by the function decorated by the parameter; i.e., if you decorate the function abc with @param, then imm.abc = x will result in imm's abc attribute being set to the value of type(imm).abc(x).
[ "The" ]
python
train
MisterWil/abodepy
abodepy/__init__.py
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/__init__.py#L330-L350
def set_setting(self, setting, value, area='1', validate_value=True): """Set an abode system setting to a given value.""" setting = setting.lower() if setting not in CONST.ALL_SETTINGS: raise AbodeException(ERROR.INVALID_SETTING, CONST.ALL_SETTINGS) if setting in CONST.PANEL_SETTINGS: url = CONST.SETTINGS_URL data = self._panel_settings(setting, value, validate_value) elif setting in CONST.AREA_SETTINGS: url = CONST.AREAS_URL data = self._area_settings(area, setting, value, validate_value) elif setting in CONST.SOUND_SETTINGS: url = CONST.SOUNDS_URL data = self._sound_settings(area, setting, value, validate_value) elif setting in CONST.SIREN_SETTINGS: url = CONST.SIREN_URL data = self._siren_settings(setting, value, validate_value) return self.send_request(method="put", url=url, data=data)
[ "def", "set_setting", "(", "self", ",", "setting", ",", "value", ",", "area", "=", "'1'", ",", "validate_value", "=", "True", ")", ":", "setting", "=", "setting", ".", "lower", "(", ")", "if", "setting", "not", "in", "CONST", ".", "ALL_SETTINGS", ":", "raise", "AbodeException", "(", "ERROR", ".", "INVALID_SETTING", ",", "CONST", ".", "ALL_SETTINGS", ")", "if", "setting", "in", "CONST", ".", "PANEL_SETTINGS", ":", "url", "=", "CONST", ".", "SETTINGS_URL", "data", "=", "self", ".", "_panel_settings", "(", "setting", ",", "value", ",", "validate_value", ")", "elif", "setting", "in", "CONST", ".", "AREA_SETTINGS", ":", "url", "=", "CONST", ".", "AREAS_URL", "data", "=", "self", ".", "_area_settings", "(", "area", ",", "setting", ",", "value", ",", "validate_value", ")", "elif", "setting", "in", "CONST", ".", "SOUND_SETTINGS", ":", "url", "=", "CONST", ".", "SOUNDS_URL", "data", "=", "self", ".", "_sound_settings", "(", "area", ",", "setting", ",", "value", ",", "validate_value", ")", "elif", "setting", "in", "CONST", ".", "SIREN_SETTINGS", ":", "url", "=", "CONST", ".", "SIREN_URL", "data", "=", "self", ".", "_siren_settings", "(", "setting", ",", "value", ",", "validate_value", ")", "return", "self", ".", "send_request", "(", "method", "=", "\"put\"", ",", "url", "=", "url", ",", "data", "=", "data", ")" ]
Set an abode system setting to a given value.
[ "Set", "an", "abode", "system", "setting", "to", "a", "given", "value", "." ]
python
train
openmicroscopy/omero-marshal
omero_marshal/legacy/affinetransform.py
https://github.com/openmicroscopy/omero-marshal/blob/0f427927b471a19f14b434452de88e16d621c487/omero_marshal/legacy/affinetransform.py#L38-L83
def convert_svg_transform(self, transform): """ Converts a string representing a SVG transform into AffineTransform fields. See https://www.w3.org/TR/SVG/coords.html#TransformAttribute for the specification of the transform strings. skewX and skewY are not supported. Raises: ValueError: If transform is not a valid and supported SVG transform. """ tr, args = transform[:-1].split('(') a = map(float, args.split(' ')) # Handle various string tranformations if tr == 'matrix': pass elif tr == 'translate': a = [1.0, 0.0, 0.0, 1.0, a[0], a[1] if len(a) > 1 else 0.0] elif tr == 'scale': a = [a[0], 0.0, 0.0, a[-1], 0.0, 0.0] elif tr == 'rotate': x = a[1] if len(a) > 1 else 0.0 y = a[2] if len(a) > 1 else 0.0 rad = radians(a[0]) s = sin(rad) c = cos(rad) a = [ c, s, -s, c, x * (1 - c) + y * s, -x * s + y * (1 - c), ] else: raise ValueError('Unknown transformation "%s"' % transform) self._svg_transform = transform self._a00 = a[0] self._a10 = a[1] self._a01 = a[2] self._a11 = a[3] self._a02 = a[4] self._a12 = a[5]
[ "def", "convert_svg_transform", "(", "self", ",", "transform", ")", ":", "tr", ",", "args", "=", "transform", "[", ":", "-", "1", "]", ".", "split", "(", "'('", ")", "a", "=", "map", "(", "float", ",", "args", ".", "split", "(", "' '", ")", ")", "# Handle various string tranformations", "if", "tr", "==", "'matrix'", ":", "pass", "elif", "tr", "==", "'translate'", ":", "a", "=", "[", "1.0", ",", "0.0", ",", "0.0", ",", "1.0", ",", "a", "[", "0", "]", ",", "a", "[", "1", "]", "if", "len", "(", "a", ")", ">", "1", "else", "0.0", "]", "elif", "tr", "==", "'scale'", ":", "a", "=", "[", "a", "[", "0", "]", ",", "0.0", ",", "0.0", ",", "a", "[", "-", "1", "]", ",", "0.0", ",", "0.0", "]", "elif", "tr", "==", "'rotate'", ":", "x", "=", "a", "[", "1", "]", "if", "len", "(", "a", ")", ">", "1", "else", "0.0", "y", "=", "a", "[", "2", "]", "if", "len", "(", "a", ")", ">", "1", "else", "0.0", "rad", "=", "radians", "(", "a", "[", "0", "]", ")", "s", "=", "sin", "(", "rad", ")", "c", "=", "cos", "(", "rad", ")", "a", "=", "[", "c", ",", "s", ",", "-", "s", ",", "c", ",", "x", "*", "(", "1", "-", "c", ")", "+", "y", "*", "s", ",", "-", "x", "*", "s", "+", "y", "*", "(", "1", "-", "c", ")", ",", "]", "else", ":", "raise", "ValueError", "(", "'Unknown transformation \"%s\"'", "%", "transform", ")", "self", ".", "_svg_transform", "=", "transform", "self", ".", "_a00", "=", "a", "[", "0", "]", "self", ".", "_a10", "=", "a", "[", "1", "]", "self", ".", "_a01", "=", "a", "[", "2", "]", "self", ".", "_a11", "=", "a", "[", "3", "]", "self", ".", "_a02", "=", "a", "[", "4", "]", "self", ".", "_a12", "=", "a", "[", "5", "]" ]
Converts a string representing a SVG transform into AffineTransform fields. See https://www.w3.org/TR/SVG/coords.html#TransformAttribute for the specification of the transform strings. skewX and skewY are not supported. Raises: ValueError: If transform is not a valid and supported SVG transform.
[ "Converts", "a", "string", "representing", "a", "SVG", "transform", "into", "AffineTransform", "fields", ".", "See", "https", ":", "//", "www", ".", "w3", ".", "org", "/", "TR", "/", "SVG", "/", "coords", ".", "html#TransformAttribute", "for", "the", "specification", "of", "the", "transform", "strings", ".", "skewX", "and", "skewY", "are", "not", "supported", ".", "Raises", ":", "ValueError", ":", "If", "transform", "is", "not", "a", "valid", "and", "supported", "SVG", "transform", "." ]
python
train
nitmir/django-cas-server
cas_server/cas.py
https://github.com/nitmir/django-cas-server/blob/d106181b94c444f1946269da5c20f6c904840ad3/cas_server/cas.py#L185-L188
def verify_ticket(self, ticket): """Verifies CAS 2.0+/3.0+ XML-based authentication ticket and returns extended attributes""" (response, charset) = self.get_verification_response(ticket) return self.verify_response(response, charset)
[ "def", "verify_ticket", "(", "self", ",", "ticket", ")", ":", "(", "response", ",", "charset", ")", "=", "self", ".", "get_verification_response", "(", "ticket", ")", "return", "self", ".", "verify_response", "(", "response", ",", "charset", ")" ]
Verifies CAS 2.0+/3.0+ XML-based authentication ticket and returns extended attributes
[ "Verifies", "CAS", "2", ".", "0", "+", "/", "3", ".", "0", "+", "XML", "-", "based", "authentication", "ticket", "and", "returns", "extended", "attributes" ]
python
train
ismms-himc/clustergrammer2
clustergrammer2/clustergrammer_fun/categories.py
https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/categories.py#L137-L234
def calc_cat_clust_order(net, inst_rc): ''' cluster category subset of data ''' from .__init__ import Network from copy import deepcopy from . import calc_clust, run_filter inst_keys = list(net.dat['node_info'][inst_rc].keys()) all_cats = [x for x in inst_keys if 'cat-' in x] if len(all_cats) > 0: for inst_name_cat in all_cats: tmp_name = 'dict_' + inst_name_cat.replace('-', '_') dict_cat = net.dat['node_info'][inst_rc][tmp_name] unordered_cats = dict_cat.keys() ordered_cats = order_categories(unordered_cats) # this is the ordering of the columns based on their category, not # including their clustering ordering within category all_cat_orders = [] tmp_names_list = [] for inst_cat in ordered_cats: inst_nodes = dict_cat[inst_cat] tmp_names_list.extend(inst_nodes) # cat_net = deepcopy(Network()) # cat_net.dat['mat'] = deepcopy(net.dat['mat']) # cat_net.dat['nodes'] = deepcopy(net.dat['nodes']) # cat_df = cat_net.dat_to_df() # sub_df = {} # if inst_rc == 'col': # sub_df['mat'] = cat_df['mat'][inst_nodes] # elif inst_rc == 'row': # # need to transpose df # cat_df['mat'] = cat_df['mat'].transpose() # sub_df['mat'] = cat_df['mat'][inst_nodes] # sub_df['mat'] = sub_df['mat'].transpose() # # filter matrix before clustering # ################################### # threshold = 0.0001 # sub_df = run_filter.df_filter_row_sum(sub_df, threshold) # sub_df = run_filter.df_filter_col_sum(sub_df, threshold) # # load back to dat # cat_net.df_to_dat(sub_df) # cat_mat_shape = cat_net.dat['mat'].shape # print('***************') # try: # if cat_mat_shape[0]>1 and cat_mat_shape[1] > 1 and all_are_numbers == False: # calc_clust.cluster_row_and_col(cat_net, 'cos') # inst_cat_order = cat_net.dat['node_info'][inst_rc]['clust'] # else: # inst_cat_order = list(range(len(cat_net.dat['nodes'][inst_rc]))) # except: # inst_cat_order = list(range(len(cat_net.dat['nodes'][inst_rc]))) # prev_order_len = len(all_cat_orders) # # add prev order length to the current order number # inst_cat_order = [i + prev_order_len for i in inst_cat_order] # all_cat_orders.extend(inst_cat_order) # # generate ordered list of row/col names, which will be used to # # assign the order to specific nodes # names_clust_list = [x for (y, x) in sorted(zip(all_cat_orders, # tmp_names_list))] names_clust_list = tmp_names_list # calc category-cluster order final_order = [] for i in range(len(net.dat['nodes'][inst_rc])): inst_node_name = net.dat['nodes'][inst_rc][i] inst_node_num = names_clust_list.index(inst_node_name) final_order.append(inst_node_num) inst_index_cat = inst_name_cat.replace('-', '_') + '_index' net.dat['node_info'][inst_rc][inst_index_cat] = final_order
[ "def", "calc_cat_clust_order", "(", "net", ",", "inst_rc", ")", ":", "from", ".", "__init__", "import", "Network", "from", "copy", "import", "deepcopy", "from", ".", "import", "calc_clust", ",", "run_filter", "inst_keys", "=", "list", "(", "net", ".", "dat", "[", "'node_info'", "]", "[", "inst_rc", "]", ".", "keys", "(", ")", ")", "all_cats", "=", "[", "x", "for", "x", "in", "inst_keys", "if", "'cat-'", "in", "x", "]", "if", "len", "(", "all_cats", ")", ">", "0", ":", "for", "inst_name_cat", "in", "all_cats", ":", "tmp_name", "=", "'dict_'", "+", "inst_name_cat", ".", "replace", "(", "'-'", ",", "'_'", ")", "dict_cat", "=", "net", ".", "dat", "[", "'node_info'", "]", "[", "inst_rc", "]", "[", "tmp_name", "]", "unordered_cats", "=", "dict_cat", ".", "keys", "(", ")", "ordered_cats", "=", "order_categories", "(", "unordered_cats", ")", "# this is the ordering of the columns based on their category, not", "# including their clustering ordering within category", "all_cat_orders", "=", "[", "]", "tmp_names_list", "=", "[", "]", "for", "inst_cat", "in", "ordered_cats", ":", "inst_nodes", "=", "dict_cat", "[", "inst_cat", "]", "tmp_names_list", ".", "extend", "(", "inst_nodes", ")", "# cat_net = deepcopy(Network())", "# cat_net.dat['mat'] = deepcopy(net.dat['mat'])", "# cat_net.dat['nodes'] = deepcopy(net.dat['nodes'])", "# cat_df = cat_net.dat_to_df()", "# sub_df = {}", "# if inst_rc == 'col':", "# sub_df['mat'] = cat_df['mat'][inst_nodes]", "# elif inst_rc == 'row':", "# # need to transpose df", "# cat_df['mat'] = cat_df['mat'].transpose()", "# sub_df['mat'] = cat_df['mat'][inst_nodes]", "# sub_df['mat'] = sub_df['mat'].transpose()", "# # filter matrix before clustering", "# ###################################", "# threshold = 0.0001", "# sub_df = run_filter.df_filter_row_sum(sub_df, threshold)", "# sub_df = run_filter.df_filter_col_sum(sub_df, threshold)", "# # load back to dat", "# cat_net.df_to_dat(sub_df)", "# cat_mat_shape = cat_net.dat['mat'].shape", "# print('***************')", "# try:", "# if cat_mat_shape[0]>1 and cat_mat_shape[1] > 1 and all_are_numbers == False:", "# calc_clust.cluster_row_and_col(cat_net, 'cos')", "# inst_cat_order = cat_net.dat['node_info'][inst_rc]['clust']", "# else:", "# inst_cat_order = list(range(len(cat_net.dat['nodes'][inst_rc])))", "# except:", "# inst_cat_order = list(range(len(cat_net.dat['nodes'][inst_rc])))", "# prev_order_len = len(all_cat_orders)", "# # add prev order length to the current order number", "# inst_cat_order = [i + prev_order_len for i in inst_cat_order]", "# all_cat_orders.extend(inst_cat_order)", "# # generate ordered list of row/col names, which will be used to", "# # assign the order to specific nodes", "# names_clust_list = [x for (y, x) in sorted(zip(all_cat_orders,", "# tmp_names_list))]", "names_clust_list", "=", "tmp_names_list", "# calc category-cluster order", "final_order", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "net", ".", "dat", "[", "'nodes'", "]", "[", "inst_rc", "]", ")", ")", ":", "inst_node_name", "=", "net", ".", "dat", "[", "'nodes'", "]", "[", "inst_rc", "]", "[", "i", "]", "inst_node_num", "=", "names_clust_list", ".", "index", "(", "inst_node_name", ")", "final_order", ".", "append", "(", "inst_node_num", ")", "inst_index_cat", "=", "inst_name_cat", ".", "replace", "(", "'-'", ",", "'_'", ")", "+", "'_index'", "net", ".", "dat", "[", "'node_info'", "]", "[", "inst_rc", "]", "[", "inst_index_cat", "]", "=", "final_order" ]
cluster category subset of data
[ "cluster", "category", "subset", "of", "data" ]
python
train
mdgoldberg/sportsref
sportsref/nfl/boxscores.py
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/boxscores.py#L224-L236
def over_under(self): """ Returns the over/under for the game as a float, or np.nan if not available. """ doc = self.get_doc() table = doc('table#game_info') giTable = sportsref.utils.parse_info_table(table) if 'over_under' in giTable: ou = giTable['over_under'] return float(ou.split()[0]) else: return None
[ "def", "over_under", "(", "self", ")", ":", "doc", "=", "self", ".", "get_doc", "(", ")", "table", "=", "doc", "(", "'table#game_info'", ")", "giTable", "=", "sportsref", ".", "utils", ".", "parse_info_table", "(", "table", ")", "if", "'over_under'", "in", "giTable", ":", "ou", "=", "giTable", "[", "'over_under'", "]", "return", "float", "(", "ou", ".", "split", "(", ")", "[", "0", "]", ")", "else", ":", "return", "None" ]
Returns the over/under for the game as a float, or np.nan if not available.
[ "Returns", "the", "over", "/", "under", "for", "the", "game", "as", "a", "float", "or", "np", ".", "nan", "if", "not", "available", "." ]
python
test
gem/oq-engine
openquake/baselib/node.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/baselib/node.py#L757-L771
def context(fname, node): """ Context manager managing exceptions and adding line number of the current node and name of the current file to the error message. :param fname: the current file being processed :param node: the current node being processed """ try: yield node except Exception: etype, exc, tb = sys.exc_info() msg = 'node %s: %s, line %s of %s' % ( striptag(node.tag), exc, getattr(node, 'lineno', '?'), fname) raise_(etype, msg, tb)
[ "def", "context", "(", "fname", ",", "node", ")", ":", "try", ":", "yield", "node", "except", "Exception", ":", "etype", ",", "exc", ",", "tb", "=", "sys", ".", "exc_info", "(", ")", "msg", "=", "'node %s: %s, line %s of %s'", "%", "(", "striptag", "(", "node", ".", "tag", ")", ",", "exc", ",", "getattr", "(", "node", ",", "'lineno'", ",", "'?'", ")", ",", "fname", ")", "raise_", "(", "etype", ",", "msg", ",", "tb", ")" ]
Context manager managing exceptions and adding line number of the current node and name of the current file to the error message. :param fname: the current file being processed :param node: the current node being processed
[ "Context", "manager", "managing", "exceptions", "and", "adding", "line", "number", "of", "the", "current", "node", "and", "name", "of", "the", "current", "file", "to", "the", "error", "message", "." ]
python
train
apple/turicreate
src/unity/python/turicreate/toolkits/_internal_utils.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L203-L217
def _SGraphFromJsonTree(json_str): """ Convert the Json Tree to SGraph """ g = json.loads(json_str) vertices = [_Vertex(x['id'], dict([(str(k), v) for k, v in _six.iteritems(x) if k != 'id'])) for x in g['vertices']] edges = [_Edge(x['src'], x['dst'], dict([(str(k), v) for k, v in _six.iteritems(x) if k != 'src' and k != 'dst'])) for x in g['edges']] sg = _SGraph().add_vertices(vertices) if len(edges) > 0: sg = sg.add_edges(edges) return sg
[ "def", "_SGraphFromJsonTree", "(", "json_str", ")", ":", "g", "=", "json", ".", "loads", "(", "json_str", ")", "vertices", "=", "[", "_Vertex", "(", "x", "[", "'id'", "]", ",", "dict", "(", "[", "(", "str", "(", "k", ")", ",", "v", ")", "for", "k", ",", "v", "in", "_six", ".", "iteritems", "(", "x", ")", "if", "k", "!=", "'id'", "]", ")", ")", "for", "x", "in", "g", "[", "'vertices'", "]", "]", "edges", "=", "[", "_Edge", "(", "x", "[", "'src'", "]", ",", "x", "[", "'dst'", "]", ",", "dict", "(", "[", "(", "str", "(", "k", ")", ",", "v", ")", "for", "k", ",", "v", "in", "_six", ".", "iteritems", "(", "x", ")", "if", "k", "!=", "'src'", "and", "k", "!=", "'dst'", "]", ")", ")", "for", "x", "in", "g", "[", "'edges'", "]", "]", "sg", "=", "_SGraph", "(", ")", ".", "add_vertices", "(", "vertices", ")", "if", "len", "(", "edges", ")", ">", "0", ":", "sg", "=", "sg", ".", "add_edges", "(", "edges", ")", "return", "sg" ]
Convert the Json Tree to SGraph
[ "Convert", "the", "Json", "Tree", "to", "SGraph" ]
python
train
line/line-bot-sdk-python
linebot/utils.py
https://github.com/line/line-bot-sdk-python/blob/1b38bfc2497ff3e3c75be4b50e0f1b7425a07ce0/linebot/utils.py#L39-L47
def to_camel_case(text): """Convert to camel case. :param str text: :rtype: str :return: """ split = text.split('_') return split[0] + "".join(x.title() for x in split[1:])
[ "def", "to_camel_case", "(", "text", ")", ":", "split", "=", "text", ".", "split", "(", "'_'", ")", "return", "split", "[", "0", "]", "+", "\"\"", ".", "join", "(", "x", ".", "title", "(", ")", "for", "x", "in", "split", "[", "1", ":", "]", ")" ]
Convert to camel case. :param str text: :rtype: str :return:
[ "Convert", "to", "camel", "case", "." ]
python
train
HDI-Project/MLBlocks
mlblocks/primitives.py
https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/primitives.py#L82-L117
def load_primitive(name): """Locate and load the JSON annotation of the given primitive. All the paths found in PRIMTIVE_PATHS will be scanned to find a JSON file with the given name, and as soon as a JSON with the given name is found it is returned. Args: name (str): name of the primitive to look for. The name should correspond to the primitive, not to the filename, as the `.json` extension will be added dynamically. Returns: dict: The content of the JSON annotation file loaded into a dict. Raises: ValueError: A `ValueError` will be raised if the primitive cannot be found. """ for base_path in get_primitives_paths(): parts = name.split('.') number_of_parts = len(parts) for folder_parts in range(number_of_parts): folder = os.path.join(base_path, *parts[:folder_parts]) filename = '.'.join(parts[folder_parts:]) + '.json' json_path = os.path.join(folder, filename) if os.path.isfile(json_path): with open(json_path, 'r') as json_file: LOGGER.debug('Loading primitive %s from %s', name, json_path) return json.load(json_file) raise ValueError("Unknown primitive: {}".format(name))
[ "def", "load_primitive", "(", "name", ")", ":", "for", "base_path", "in", "get_primitives_paths", "(", ")", ":", "parts", "=", "name", ".", "split", "(", "'.'", ")", "number_of_parts", "=", "len", "(", "parts", ")", "for", "folder_parts", "in", "range", "(", "number_of_parts", ")", ":", "folder", "=", "os", ".", "path", ".", "join", "(", "base_path", ",", "*", "parts", "[", ":", "folder_parts", "]", ")", "filename", "=", "'.'", ".", "join", "(", "parts", "[", "folder_parts", ":", "]", ")", "+", "'.json'", "json_path", "=", "os", ".", "path", ".", "join", "(", "folder", ",", "filename", ")", "if", "os", ".", "path", ".", "isfile", "(", "json_path", ")", ":", "with", "open", "(", "json_path", ",", "'r'", ")", "as", "json_file", ":", "LOGGER", ".", "debug", "(", "'Loading primitive %s from %s'", ",", "name", ",", "json_path", ")", "return", "json", ".", "load", "(", "json_file", ")", "raise", "ValueError", "(", "\"Unknown primitive: {}\"", ".", "format", "(", "name", ")", ")" ]
Locate and load the JSON annotation of the given primitive. All the paths found in PRIMTIVE_PATHS will be scanned to find a JSON file with the given name, and as soon as a JSON with the given name is found it is returned. Args: name (str): name of the primitive to look for. The name should correspond to the primitive, not to the filename, as the `.json` extension will be added dynamically. Returns: dict: The content of the JSON annotation file loaded into a dict. Raises: ValueError: A `ValueError` will be raised if the primitive cannot be found.
[ "Locate", "and", "load", "the", "JSON", "annotation", "of", "the", "given", "primitive", "." ]
python
train
prompt-toolkit/pymux
pymux/server.py
https://github.com/prompt-toolkit/pymux/blob/3f66e62b9de4b2251c7f9afad6c516dc5a30ec67/pymux/server.py#L137-L158
def _create_app(self, color_depth, term='xterm'): """ Create CommandLineInterface for this client. Called when the client wants to attach the UI to the server. """ output = Vt100_Output(_SocketStdout(self._send_packet), lambda: self.size, term=term, write_binary=False) self.client_state = self.pymux.add_client( input=self._pipeinput, output=output, connection=self, color_depth=color_depth) print('Start running app...') future = self.client_state.app.run_async() print('Start running app got future...', future) @future.add_done_callback def done(_): print('APP DONE.........') print(future.result()) self._close_connection()
[ "def", "_create_app", "(", "self", ",", "color_depth", ",", "term", "=", "'xterm'", ")", ":", "output", "=", "Vt100_Output", "(", "_SocketStdout", "(", "self", ".", "_send_packet", ")", ",", "lambda", ":", "self", ".", "size", ",", "term", "=", "term", ",", "write_binary", "=", "False", ")", "self", ".", "client_state", "=", "self", ".", "pymux", ".", "add_client", "(", "input", "=", "self", ".", "_pipeinput", ",", "output", "=", "output", ",", "connection", "=", "self", ",", "color_depth", "=", "color_depth", ")", "print", "(", "'Start running app...'", ")", "future", "=", "self", ".", "client_state", ".", "app", ".", "run_async", "(", ")", "print", "(", "'Start running app got future...'", ",", "future", ")", "@", "future", ".", "add_done_callback", "def", "done", "(", "_", ")", ":", "print", "(", "'APP DONE.........'", ")", "print", "(", "future", ".", "result", "(", ")", ")", "self", ".", "_close_connection", "(", ")" ]
Create CommandLineInterface for this client. Called when the client wants to attach the UI to the server.
[ "Create", "CommandLineInterface", "for", "this", "client", ".", "Called", "when", "the", "client", "wants", "to", "attach", "the", "UI", "to", "the", "server", "." ]
python
train
kbr/fritzconnection
fritzconnection/fritzconnection.py
https://github.com/kbr/fritzconnection/blob/b183f759ef19dd1652371e912d36cfe34f6639ac/fritzconnection/fritzconnection.py#L268-L278
def _get_arguments(self, action_node): """ Returns a dictionary of arguments for the given action_node. """ arguments = {} argument_nodes = action_node.iterfind( r'./ns:argumentList/ns:argument', namespaces={'ns': self.namespace}) for argument_node in argument_nodes: argument = self._get_argument(argument_node) arguments[argument.name] = argument return arguments
[ "def", "_get_arguments", "(", "self", ",", "action_node", ")", ":", "arguments", "=", "{", "}", "argument_nodes", "=", "action_node", ".", "iterfind", "(", "r'./ns:argumentList/ns:argument'", ",", "namespaces", "=", "{", "'ns'", ":", "self", ".", "namespace", "}", ")", "for", "argument_node", "in", "argument_nodes", ":", "argument", "=", "self", ".", "_get_argument", "(", "argument_node", ")", "arguments", "[", "argument", ".", "name", "]", "=", "argument", "return", "arguments" ]
Returns a dictionary of arguments for the given action_node.
[ "Returns", "a", "dictionary", "of", "arguments", "for", "the", "given", "action_node", "." ]
python
train