repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
numenta/htmresearch
htmresearch/algorithms/location_modules.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/location_modules.py#L314-L346
def _learn(connections, rng, learningSegments, activeInput, potentialOverlaps, initialPermanence, sampleSize, permanenceIncrement, permanenceDecrement, maxSynapsesPerSegment): """ Adjust synapse permanences, grow new synapses, and grow new segments. @param learningActiveSegments (numpy array) @param learningMatchingSegments (numpy array) @param segmentsToPunish (numpy array) @param activeInput (numpy array) @param potentialOverlaps (numpy array) """ # Learn on existing segments connections.adjustSynapses(learningSegments, activeInput, permanenceIncrement, -permanenceDecrement) # Grow new synapses. Calculate "maxNew", the maximum number of synapses to # grow per segment. "maxNew" might be a number or it might be a list of # numbers. if sampleSize == -1: maxNew = len(activeInput) else: maxNew = sampleSize - potentialOverlaps[learningSegments] if maxSynapsesPerSegment != -1: synapseCounts = connections.mapSegmentsToSynapseCounts( learningSegments) numSynapsesToReachMax = maxSynapsesPerSegment - synapseCounts maxNew = np.where(maxNew <= numSynapsesToReachMax, maxNew, numSynapsesToReachMax) connections.growSynapsesToSample(learningSegments, activeInput, maxNew, initialPermanence, rng)
[ "def", "_learn", "(", "connections", ",", "rng", ",", "learningSegments", ",", "activeInput", ",", "potentialOverlaps", ",", "initialPermanence", ",", "sampleSize", ",", "permanenceIncrement", ",", "permanenceDecrement", ",", "maxSynapsesPerSegment", ")", ":", "# Learn on existing segments", "connections", ".", "adjustSynapses", "(", "learningSegments", ",", "activeInput", ",", "permanenceIncrement", ",", "-", "permanenceDecrement", ")", "# Grow new synapses. Calculate \"maxNew\", the maximum number of synapses to", "# grow per segment. \"maxNew\" might be a number or it might be a list of", "# numbers.", "if", "sampleSize", "==", "-", "1", ":", "maxNew", "=", "len", "(", "activeInput", ")", "else", ":", "maxNew", "=", "sampleSize", "-", "potentialOverlaps", "[", "learningSegments", "]", "if", "maxSynapsesPerSegment", "!=", "-", "1", ":", "synapseCounts", "=", "connections", ".", "mapSegmentsToSynapseCounts", "(", "learningSegments", ")", "numSynapsesToReachMax", "=", "maxSynapsesPerSegment", "-", "synapseCounts", "maxNew", "=", "np", ".", "where", "(", "maxNew", "<=", "numSynapsesToReachMax", ",", "maxNew", ",", "numSynapsesToReachMax", ")", "connections", ".", "growSynapsesToSample", "(", "learningSegments", ",", "activeInput", ",", "maxNew", ",", "initialPermanence", ",", "rng", ")" ]
Adjust synapse permanences, grow new synapses, and grow new segments. @param learningActiveSegments (numpy array) @param learningMatchingSegments (numpy array) @param segmentsToPunish (numpy array) @param activeInput (numpy array) @param potentialOverlaps (numpy array)
[ "Adjust", "synapse", "permanences", "grow", "new", "synapses", "and", "grow", "new", "segments", "." ]
python
train
42.242424
squdle/baseconvert
baseconvert/baseconvert.py
https://github.com/squdle/baseconvert/blob/26c9a2c07c2ffcde7d078fb812419ca6d388900b/baseconvert/baseconvert.py#L388-L425
def fractional_base(fractional_part, input_base=10, output_base=10, max_depth=100): """ Convert the fractional part of a number from any base to any base. Args: fractional_part(iterable container): The fractional part of a number in the following form: ( ".", int, int, int, ...) input_base(int): The base to convert from (defualt 10). output_base(int): The base to convert to (default 10). max_depth(int): The maximum number of decimal digits to output. Returns: The converted number as a tuple of digits. Example: >>> fractional_base((".", 6,),10,16,10) ('.', 9, 9, 9, 9, 9, 9, 9, 9, 9, 9) """ fractional_part = fractional_part[1:] fractional_digits = len(fractional_part) numerator = 0 for i, value in enumerate(fractional_part, 1): numerator += value * input_base ** (fractional_digits - i) denominator = input_base ** fractional_digits i = 1 digits = [] while(i < max_depth + 1): numerator *= output_base ** i digit = numerator // denominator numerator -= digit * denominator denominator *= output_base ** i digits.append(digit) i += 1 greatest_common_divisor = gcd(numerator, denominator) numerator //= greatest_common_divisor denominator //= greatest_common_divisor return (".",) + tuple(digits)
[ "def", "fractional_base", "(", "fractional_part", ",", "input_base", "=", "10", ",", "output_base", "=", "10", ",", "max_depth", "=", "100", ")", ":", "fractional_part", "=", "fractional_part", "[", "1", ":", "]", "fractional_digits", "=", "len", "(", "fractional_part", ")", "numerator", "=", "0", "for", "i", ",", "value", "in", "enumerate", "(", "fractional_part", ",", "1", ")", ":", "numerator", "+=", "value", "*", "input_base", "**", "(", "fractional_digits", "-", "i", ")", "denominator", "=", "input_base", "**", "fractional_digits", "i", "=", "1", "digits", "=", "[", "]", "while", "(", "i", "<", "max_depth", "+", "1", ")", ":", "numerator", "*=", "output_base", "**", "i", "digit", "=", "numerator", "//", "denominator", "numerator", "-=", "digit", "*", "denominator", "denominator", "*=", "output_base", "**", "i", "digits", ".", "append", "(", "digit", ")", "i", "+=", "1", "greatest_common_divisor", "=", "gcd", "(", "numerator", ",", "denominator", ")", "numerator", "//=", "greatest_common_divisor", "denominator", "//=", "greatest_common_divisor", "return", "(", "\".\"", ",", ")", "+", "tuple", "(", "digits", ")" ]
Convert the fractional part of a number from any base to any base. Args: fractional_part(iterable container): The fractional part of a number in the following form: ( ".", int, int, int, ...) input_base(int): The base to convert from (defualt 10). output_base(int): The base to convert to (default 10). max_depth(int): The maximum number of decimal digits to output. Returns: The converted number as a tuple of digits. Example: >>> fractional_base((".", 6,),10,16,10) ('.', 9, 9, 9, 9, 9, 9, 9, 9, 9, 9)
[ "Convert", "the", "fractional", "part", "of", "a", "number", "from", "any", "base", "to", "any", "base", ".", "Args", ":", "fractional_part", "(", "iterable", "container", ")", ":", "The", "fractional", "part", "of", "a", "number", "in", "the", "following", "form", ":", "(", ".", "int", "int", "int", "...", ")", "input_base", "(", "int", ")", ":", "The", "base", "to", "convert", "from", "(", "defualt", "10", ")", ".", "output_base", "(", "int", ")", ":", "The", "base", "to", "convert", "to", "(", "default", "10", ")", ".", "max_depth", "(", "int", ")", ":", "The", "maximum", "number", "of", "decimal", "digits", "to", "output", ".", "Returns", ":", "The", "converted", "number", "as", "a", "tuple", "of", "digits", ".", "Example", ":", ">>>", "fractional_base", "((", ".", "6", ")", "10", "16", "10", ")", "(", ".", "9", "9", "9", "9", "9", "9", "9", "9", "9", "9", ")" ]
python
train
37.789474
kmike/port-for
port_for/docopt.py
https://github.com/kmike/port-for/blob/f61ebf3c2caf54eabe8233b40ef67b973176a6f5/port_for/docopt.py#L59-L66
def fix_list_arguments(self): """Find arguments that should accumulate values and fix them.""" either = [list(c.children) for c in self.either.children] for case in either: case = [c for c in case if case.count(c) > 1] for a in [e for e in case if type(e) == Argument]: a.value = [] return self
[ "def", "fix_list_arguments", "(", "self", ")", ":", "either", "=", "[", "list", "(", "c", ".", "children", ")", "for", "c", "in", "self", ".", "either", ".", "children", "]", "for", "case", "in", "either", ":", "case", "=", "[", "c", "for", "c", "in", "case", "if", "case", ".", "count", "(", "c", ")", ">", "1", "]", "for", "a", "in", "[", "e", "for", "e", "in", "case", "if", "type", "(", "e", ")", "==", "Argument", "]", ":", "a", ".", "value", "=", "[", "]", "return", "self" ]
Find arguments that should accumulate values and fix them.
[ "Find", "arguments", "that", "should", "accumulate", "values", "and", "fix", "them", "." ]
python
train
44.875
push-things/wallabag_api
wallabag_api/wallabag.py
https://github.com/push-things/wallabag_api/blob/8d1e10a6ebc03d1ac9af2b38b57eb69f29b4216e/wallabag_api/wallabag.py#L94-L115
async def handle_json_response(responses): """ get the json data response :param responses: the json response :return the json data without 'root' node """ json_data = {} if responses.status != 200: err_msg = HttpProcessingError(code=responses.status, message=await responses.json()) logging.error("Wallabag: aiohttp error {err_msg}".format( err_msg=err_msg)) else: try: json_data = responses.json() except ClientResponseError as e: # sometimes json_data does not return any json() without # any error. This is due to the grabbing URL which "rejects" # the URL logging.error("Wallabag: aiohttp error {code} {message}" .format(code=e.code, message=e.message)) return await json_data
[ "async", "def", "handle_json_response", "(", "responses", ")", ":", "json_data", "=", "{", "}", "if", "responses", ".", "status", "!=", "200", ":", "err_msg", "=", "HttpProcessingError", "(", "code", "=", "responses", ".", "status", ",", "message", "=", "await", "responses", ".", "json", "(", ")", ")", "logging", ".", "error", "(", "\"Wallabag: aiohttp error {err_msg}\"", ".", "format", "(", "err_msg", "=", "err_msg", ")", ")", "else", ":", "try", ":", "json_data", "=", "responses", ".", "json", "(", ")", "except", "ClientResponseError", "as", "e", ":", "# sometimes json_data does not return any json() without", "# any error. This is due to the grabbing URL which \"rejects\"", "# the URL", "logging", ".", "error", "(", "\"Wallabag: aiohttp error {code} {message}\"", ".", "format", "(", "code", "=", "e", ".", "code", ",", "message", "=", "e", ".", "message", ")", ")", "return", "await", "json_data" ]
get the json data response :param responses: the json response :return the json data without 'root' node
[ "get", "the", "json", "data", "response", ":", "param", "responses", ":", "the", "json", "response", ":", "return", "the", "json", "data", "without", "root", "node" ]
python
train
43.090909
TeamHG-Memex/eli5
eli5/formatters/utils.py
https://github.com/TeamHG-Memex/eli5/blob/371b402a0676295c05e582a2dd591f7af476b86b/eli5/formatters/utils.py#L99-L140
def tabulate(data, # type: List[List[Any]] header=None, # type: Optional[List[Any]] col_align=None, # type: Union[str, List[str]] ): # type: (...) -> List[str] """ Format data as a table without any fancy features. col_align: l/r/c or a list/string of l/r/c. l = left, r = right, c = center Return a list of strings (lines of the table). """ if not data and not header: return [] if data: n_cols = len(data[0]) else: assert header is not None n_cols = len(header) if not all(len(row) == n_cols for row in data): raise ValueError('data is not rectangular') if col_align is None: col_align = ['l'] * n_cols elif isinstance(col_align, six.string_types) and len(col_align) == 1: col_align = [col_align] * n_cols else: col_align = list(col_align) if len(col_align) != n_cols: raise ValueError('col_align length does not match number of columns') if header and len(header) != n_cols: raise ValueError('header length does not match number of columns') if header: data = [header] + data data = [[six.text_type(x) for x in row] for row in data] col_width = [max(len(row[col_i]) for row in data) for col_i in range(n_cols)] if header: data.insert(1, ['-' * width for width in col_width]) line_tpl = u' '.join( u'{:%s%s}' % ({'l': '', 'r': '>', 'c': '^'}[align], width) for align, width in zip(col_align, col_width)) return [line_tpl.format(*row) for row in data]
[ "def", "tabulate", "(", "data", ",", "# type: List[List[Any]]", "header", "=", "None", ",", "# type: Optional[List[Any]]", "col_align", "=", "None", ",", "# type: Union[str, List[str]]", ")", ":", "# type: (...) -> List[str]", "if", "not", "data", "and", "not", "header", ":", "return", "[", "]", "if", "data", ":", "n_cols", "=", "len", "(", "data", "[", "0", "]", ")", "else", ":", "assert", "header", "is", "not", "None", "n_cols", "=", "len", "(", "header", ")", "if", "not", "all", "(", "len", "(", "row", ")", "==", "n_cols", "for", "row", "in", "data", ")", ":", "raise", "ValueError", "(", "'data is not rectangular'", ")", "if", "col_align", "is", "None", ":", "col_align", "=", "[", "'l'", "]", "*", "n_cols", "elif", "isinstance", "(", "col_align", ",", "six", ".", "string_types", ")", "and", "len", "(", "col_align", ")", "==", "1", ":", "col_align", "=", "[", "col_align", "]", "*", "n_cols", "else", ":", "col_align", "=", "list", "(", "col_align", ")", "if", "len", "(", "col_align", ")", "!=", "n_cols", ":", "raise", "ValueError", "(", "'col_align length does not match number of columns'", ")", "if", "header", "and", "len", "(", "header", ")", "!=", "n_cols", ":", "raise", "ValueError", "(", "'header length does not match number of columns'", ")", "if", "header", ":", "data", "=", "[", "header", "]", "+", "data", "data", "=", "[", "[", "six", ".", "text_type", "(", "x", ")", "for", "x", "in", "row", "]", "for", "row", "in", "data", "]", "col_width", "=", "[", "max", "(", "len", "(", "row", "[", "col_i", "]", ")", "for", "row", "in", "data", ")", "for", "col_i", "in", "range", "(", "n_cols", ")", "]", "if", "header", ":", "data", ".", "insert", "(", "1", ",", "[", "'-'", "*", "width", "for", "width", "in", "col_width", "]", ")", "line_tpl", "=", "u' '", ".", "join", "(", "u'{:%s%s}'", "%", "(", "{", "'l'", ":", "''", ",", "'r'", ":", "'>'", ",", "'c'", ":", "'^'", "}", "[", "align", "]", ",", "width", ")", "for", "align", ",", "width", "in", "zip", "(", "col_align", ",", "col_width", ")", ")", "return", "[", "line_tpl", ".", "format", "(", "*", "row", ")", "for", "row", "in", "data", "]" ]
Format data as a table without any fancy features. col_align: l/r/c or a list/string of l/r/c. l = left, r = right, c = center Return a list of strings (lines of the table).
[ "Format", "data", "as", "a", "table", "without", "any", "fancy", "features", ".", "col_align", ":", "l", "/", "r", "/", "c", "or", "a", "list", "/", "string", "of", "l", "/", "r", "/", "c", ".", "l", "=", "left", "r", "=", "right", "c", "=", "center", "Return", "a", "list", "of", "strings", "(", "lines", "of", "the", "table", ")", "." ]
python
train
37.095238
manns/pyspread
pyspread/src/gui/_chart_dialog.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_chart_dialog.py#L654-L659
def OnLabelSizeIntCtrl(self, event): """Label size IntCtrl event handler""" self.attrs["labelsize"] = event.GetValue() post_command_event(self, self.DrawChartMsg)
[ "def", "OnLabelSizeIntCtrl", "(", "self", ",", "event", ")", ":", "self", ".", "attrs", "[", "\"labelsize\"", "]", "=", "event", ".", "GetValue", "(", ")", "post_command_event", "(", "self", ",", "self", ".", "DrawChartMsg", ")" ]
Label size IntCtrl event handler
[ "Label", "size", "IntCtrl", "event", "handler" ]
python
train
30.5
saltstack/salt
salt/modules/file.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L546-L582
def lsattr(path): ''' .. versionadded:: 2018.3.0 .. versionchanged:: 2018.3.1 If ``lsattr`` is not installed on the system, ``None`` is returned. .. versionchanged:: 2018.3.4 If on ``AIX``, ``None`` is returned even if in filesystem as lsattr on ``AIX`` is not the same thing as the linux version. Obtain the modifiable attributes of the given file. If path is to a directory, an empty list is returned. path path to file to obtain attributes of. File/directory must exist. CLI Example: .. code-block:: bash salt '*' file.lsattr foo1.txt ''' if not salt.utils.path.which('lsattr') or salt.utils.platform.is_aix(): return None if not os.path.exists(path): raise SaltInvocationError("File or directory does not exist: " + path) cmd = ['lsattr', path] result = __salt__['cmd.run'](cmd, ignore_retcode=True, python_shell=False) results = {} for line in result.splitlines(): if not line.startswith('lsattr: '): vals = line.split(None, 1) results[vals[1]] = re.findall(r"[aAcCdDeijPsStTu]", vals[0]) return results
[ "def", "lsattr", "(", "path", ")", ":", "if", "not", "salt", ".", "utils", ".", "path", ".", "which", "(", "'lsattr'", ")", "or", "salt", ".", "utils", ".", "platform", ".", "is_aix", "(", ")", ":", "return", "None", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "raise", "SaltInvocationError", "(", "\"File or directory does not exist: \"", "+", "path", ")", "cmd", "=", "[", "'lsattr'", ",", "path", "]", "result", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ",", "ignore_retcode", "=", "True", ",", "python_shell", "=", "False", ")", "results", "=", "{", "}", "for", "line", "in", "result", ".", "splitlines", "(", ")", ":", "if", "not", "line", ".", "startswith", "(", "'lsattr: '", ")", ":", "vals", "=", "line", ".", "split", "(", "None", ",", "1", ")", "results", "[", "vals", "[", "1", "]", "]", "=", "re", ".", "findall", "(", "r\"[aAcCdDeijPsStTu]\"", ",", "vals", "[", "0", "]", ")", "return", "results" ]
.. versionadded:: 2018.3.0 .. versionchanged:: 2018.3.1 If ``lsattr`` is not installed on the system, ``None`` is returned. .. versionchanged:: 2018.3.4 If on ``AIX``, ``None`` is returned even if in filesystem as lsattr on ``AIX`` is not the same thing as the linux version. Obtain the modifiable attributes of the given file. If path is to a directory, an empty list is returned. path path to file to obtain attributes of. File/directory must exist. CLI Example: .. code-block:: bash salt '*' file.lsattr foo1.txt
[ "..", "versionadded", "::", "2018", ".", "3", ".", "0", "..", "versionchanged", "::", "2018", ".", "3", ".", "1", "If", "lsattr", "is", "not", "installed", "on", "the", "system", "None", "is", "returned", ".", "..", "versionchanged", "::", "2018", ".", "3", ".", "4", "If", "on", "AIX", "None", "is", "returned", "even", "if", "in", "filesystem", "as", "lsattr", "on", "AIX", "is", "not", "the", "same", "thing", "as", "the", "linux", "version", "." ]
python
train
30.72973
idlesign/django-sitetree
sitetree/sitetreeapp.py
https://github.com/idlesign/django-sitetree/blob/61de4608e6e415247c75fe8691027d7c4ed0d1e7/sitetree/sitetreeapp.py#L581-L597
def calculate_item_depth(self, tree_alias, item_id, depth=0): """Calculates depth of the item in the tree. :param str|unicode tree_alias: :param int item_id: :param int depth: :rtype: int """ item = self.get_item_by_id(tree_alias, item_id) if hasattr(item, 'depth'): depth = item.depth + depth else: if item.parent is not None: depth = self.calculate_item_depth(tree_alias, item.parent.id, depth + 1) return depth
[ "def", "calculate_item_depth", "(", "self", ",", "tree_alias", ",", "item_id", ",", "depth", "=", "0", ")", ":", "item", "=", "self", ".", "get_item_by_id", "(", "tree_alias", ",", "item_id", ")", "if", "hasattr", "(", "item", ",", "'depth'", ")", ":", "depth", "=", "item", ".", "depth", "+", "depth", "else", ":", "if", "item", ".", "parent", "is", "not", "None", ":", "depth", "=", "self", ".", "calculate_item_depth", "(", "tree_alias", ",", "item", ".", "parent", ".", "id", ",", "depth", "+", "1", ")", "return", "depth" ]
Calculates depth of the item in the tree. :param str|unicode tree_alias: :param int item_id: :param int depth: :rtype: int
[ "Calculates", "depth", "of", "the", "item", "in", "the", "tree", "." ]
python
test
30.588235
linuxwhatelse/mapper
mapper.py
https://github.com/linuxwhatelse/mapper/blob/3481715b2a36d2da8bf5e9c6da80ceaed0d7ca59/mapper.py#L166-L237
def call(self, url, method=None, args=None): """Calls the first function matching the urls pattern and method. Args: url (str): Url for which to call a matching function. method (str, optional): The method used while registering a function. Defaults to None args (dict, optional): Additional args to be passed to the matching function. Returns: The functions return value or `None` if no function was called. """ if not args: args = {} if sys.version_info.major == 3: data = urllib.parse.urlparse(url) path = data.path.rstrip('/') + '/' _args = dict(urllib.parse.parse_qs(data.query, keep_blank_values=True)) elif sys.version_info.major == 2: data = urlparse.urlparse(url) path = data.path.rstrip('/') + '/' _args = dict(urlparse.parse_qs(data.query, keep_blank_values=True)) for elem in self._data_store: pattern = elem['pattern'] function = elem['function'] _method = elem['method'] type_cast = elem['type_cast'] result = re.match(pattern, path) # Found matching method if result and _method == method: _args = dict(_args, **result.groupdict()) # Unpack value lists (due to urllib.parse.parse_qs) in case # theres only one value available for key, val in _args.items(): if isinstance(_args[key], list) and len(_args[key]) == 1: _args[key] = _args[key][0] # Apply typ-casting if necessary for key, val in type_cast.items(): # Not within available _args, no type-cast required if key not in _args: continue # Is None or empty, no type-cast required if not _args[key]: continue # Try and cast the values if isinstance(_args[key], list): for i, _val in enumerate(_args[key]): _args[key][i] = self._cast(_val, val) else: _args[key] = self._cast(_args[key], val) requiered_args = self._get_function_args(function) for key, val in args.items(): if key in requiered_args: _args[key] = val return function(**_args) return None
[ "def", "call", "(", "self", ",", "url", ",", "method", "=", "None", ",", "args", "=", "None", ")", ":", "if", "not", "args", ":", "args", "=", "{", "}", "if", "sys", ".", "version_info", ".", "major", "==", "3", ":", "data", "=", "urllib", ".", "parse", ".", "urlparse", "(", "url", ")", "path", "=", "data", ".", "path", ".", "rstrip", "(", "'/'", ")", "+", "'/'", "_args", "=", "dict", "(", "urllib", ".", "parse", ".", "parse_qs", "(", "data", ".", "query", ",", "keep_blank_values", "=", "True", ")", ")", "elif", "sys", ".", "version_info", ".", "major", "==", "2", ":", "data", "=", "urlparse", ".", "urlparse", "(", "url", ")", "path", "=", "data", ".", "path", ".", "rstrip", "(", "'/'", ")", "+", "'/'", "_args", "=", "dict", "(", "urlparse", ".", "parse_qs", "(", "data", ".", "query", ",", "keep_blank_values", "=", "True", ")", ")", "for", "elem", "in", "self", ".", "_data_store", ":", "pattern", "=", "elem", "[", "'pattern'", "]", "function", "=", "elem", "[", "'function'", "]", "_method", "=", "elem", "[", "'method'", "]", "type_cast", "=", "elem", "[", "'type_cast'", "]", "result", "=", "re", ".", "match", "(", "pattern", ",", "path", ")", "# Found matching method", "if", "result", "and", "_method", "==", "method", ":", "_args", "=", "dict", "(", "_args", ",", "*", "*", "result", ".", "groupdict", "(", ")", ")", "# Unpack value lists (due to urllib.parse.parse_qs) in case", "# theres only one value available", "for", "key", ",", "val", "in", "_args", ".", "items", "(", ")", ":", "if", "isinstance", "(", "_args", "[", "key", "]", ",", "list", ")", "and", "len", "(", "_args", "[", "key", "]", ")", "==", "1", ":", "_args", "[", "key", "]", "=", "_args", "[", "key", "]", "[", "0", "]", "# Apply typ-casting if necessary", "for", "key", ",", "val", "in", "type_cast", ".", "items", "(", ")", ":", "# Not within available _args, no type-cast required", "if", "key", "not", "in", "_args", ":", "continue", "# Is None or empty, no type-cast required", "if", "not", "_args", "[", "key", "]", ":", "continue", "# Try and cast the values", "if", "isinstance", "(", "_args", "[", "key", "]", ",", "list", ")", ":", "for", "i", ",", "_val", "in", "enumerate", "(", "_args", "[", "key", "]", ")", ":", "_args", "[", "key", "]", "[", "i", "]", "=", "self", ".", "_cast", "(", "_val", ",", "val", ")", "else", ":", "_args", "[", "key", "]", "=", "self", ".", "_cast", "(", "_args", "[", "key", "]", ",", "val", ")", "requiered_args", "=", "self", ".", "_get_function_args", "(", "function", ")", "for", "key", ",", "val", "in", "args", ".", "items", "(", ")", ":", "if", "key", "in", "requiered_args", ":", "_args", "[", "key", "]", "=", "val", "return", "function", "(", "*", "*", "_args", ")", "return", "None" ]
Calls the first function matching the urls pattern and method. Args: url (str): Url for which to call a matching function. method (str, optional): The method used while registering a function. Defaults to None args (dict, optional): Additional args to be passed to the matching function. Returns: The functions return value or `None` if no function was called.
[ "Calls", "the", "first", "function", "matching", "the", "urls", "pattern", "and", "method", "." ]
python
test
37.277778
amoffat/sh
sh.py
https://github.com/amoffat/sh/blob/858adf0c682af4c40e41f34d6926696b7a5d3b12/sh.py#L522-L560
def which(program, paths=None): """ takes a program name or full path, plus an optional collection of search paths, and returns the full path of the requested executable. if paths is specified, it is the entire list of search paths, and the PATH env is not used at all. otherwise, PATH env is used to look for the program """ def is_exe(fpath): return (os.path.exists(fpath) and os.access(fpath, os.X_OK) and os.path.isfile(os.path.realpath(fpath))) found_path = None fpath, fname = os.path.split(program) # if there's a path component, then we've specified a path to the program, # and we should just test if that program is executable. if it is, return if fpath: program = os.path.abspath(os.path.expanduser(program)) if is_exe(program): found_path = program # otherwise, we've just passed in the program name, and we need to search # the paths to find where it actually lives else: paths_to_search = [] if isinstance(paths, (tuple, list)): paths_to_search.extend(paths) else: env_paths = os.environ.get("PATH", "").split(os.pathsep) paths_to_search.extend(env_paths) for path in paths_to_search: exe_file = os.path.join(path, program) if is_exe(exe_file): found_path = exe_file break return found_path
[ "def", "which", "(", "program", ",", "paths", "=", "None", ")", ":", "def", "is_exe", "(", "fpath", ")", ":", "return", "(", "os", ".", "path", ".", "exists", "(", "fpath", ")", "and", "os", ".", "access", "(", "fpath", ",", "os", ".", "X_OK", ")", "and", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "realpath", "(", "fpath", ")", ")", ")", "found_path", "=", "None", "fpath", ",", "fname", "=", "os", ".", "path", ".", "split", "(", "program", ")", "# if there's a path component, then we've specified a path to the program,", "# and we should just test if that program is executable. if it is, return", "if", "fpath", ":", "program", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "program", ")", ")", "if", "is_exe", "(", "program", ")", ":", "found_path", "=", "program", "# otherwise, we've just passed in the program name, and we need to search", "# the paths to find where it actually lives", "else", ":", "paths_to_search", "=", "[", "]", "if", "isinstance", "(", "paths", ",", "(", "tuple", ",", "list", ")", ")", ":", "paths_to_search", ".", "extend", "(", "paths", ")", "else", ":", "env_paths", "=", "os", ".", "environ", ".", "get", "(", "\"PATH\"", ",", "\"\"", ")", ".", "split", "(", "os", ".", "pathsep", ")", "paths_to_search", ".", "extend", "(", "env_paths", ")", "for", "path", "in", "paths_to_search", ":", "exe_file", "=", "os", ".", "path", ".", "join", "(", "path", ",", "program", ")", "if", "is_exe", "(", "exe_file", ")", ":", "found_path", "=", "exe_file", "break", "return", "found_path" ]
takes a program name or full path, plus an optional collection of search paths, and returns the full path of the requested executable. if paths is specified, it is the entire list of search paths, and the PATH env is not used at all. otherwise, PATH env is used to look for the program
[ "takes", "a", "program", "name", "or", "full", "path", "plus", "an", "optional", "collection", "of", "search", "paths", "and", "returns", "the", "full", "path", "of", "the", "requested", "executable", ".", "if", "paths", "is", "specified", "it", "is", "the", "entire", "list", "of", "search", "paths", "and", "the", "PATH", "env", "is", "not", "used", "at", "all", ".", "otherwise", "PATH", "env", "is", "used", "to", "look", "for", "the", "program" ]
python
train
36.512821
apache/spark
python/pyspark/mllib/stat/_statistics.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/stat/_statistics.py#L97-L157
def corr(x, y=None, method=None): """ Compute the correlation (matrix) for the input RDD(s) using the specified method. Methods currently supported: I{pearson (default), spearman}. If a single RDD of Vectors is passed in, a correlation matrix comparing the columns in the input RDD is returned. Use C{method=} to specify the method to be used for single RDD inout. If two RDDs of floats are passed in, a single float is returned. :param x: an RDD of vector for which the correlation matrix is to be computed, or an RDD of float of the same cardinality as y when y is specified. :param y: an RDD of float of the same cardinality as x. :param method: String specifying the method to use for computing correlation. Supported: `pearson` (default), `spearman` :return: Correlation matrix comparing columns in x. >>> x = sc.parallelize([1.0, 0.0, -2.0], 2) >>> y = sc.parallelize([4.0, 5.0, 3.0], 2) >>> zeros = sc.parallelize([0.0, 0.0, 0.0], 2) >>> abs(Statistics.corr(x, y) - 0.6546537) < 1e-7 True >>> Statistics.corr(x, y) == Statistics.corr(x, y, "pearson") True >>> Statistics.corr(x, y, "spearman") 0.5 >>> from math import isnan >>> isnan(Statistics.corr(x, zeros)) True >>> from pyspark.mllib.linalg import Vectors >>> rdd = sc.parallelize([Vectors.dense([1, 0, 0, -2]), Vectors.dense([4, 5, 0, 3]), ... Vectors.dense([6, 7, 0, 8]), Vectors.dense([9, 0, 0, 1])]) >>> pearsonCorr = Statistics.corr(rdd) >>> print(str(pearsonCorr).replace('nan', 'NaN')) [[ 1. 0.05564149 NaN 0.40047142] [ 0.05564149 1. NaN 0.91359586] [ NaN NaN 1. NaN] [ 0.40047142 0.91359586 NaN 1. ]] >>> spearmanCorr = Statistics.corr(rdd, method="spearman") >>> print(str(spearmanCorr).replace('nan', 'NaN')) [[ 1. 0.10540926 NaN 0.4 ] [ 0.10540926 1. NaN 0.9486833 ] [ NaN NaN 1. NaN] [ 0.4 0.9486833 NaN 1. ]] >>> try: ... Statistics.corr(rdd, "spearman") ... print("Method name as second argument without 'method=' shouldn't be allowed.") ... except TypeError: ... pass """ # Check inputs to determine whether a single value or a matrix is needed for output. # Since it's legal for users to use the method name as the second argument, we need to # check if y is used to specify the method name instead. if type(y) == str: raise TypeError("Use 'method=' to specify method name.") if not y: return callMLlibFunc("corr", x.map(_convert_to_vector), method).toArray() else: return callMLlibFunc("corr", x.map(float), y.map(float), method)
[ "def", "corr", "(", "x", ",", "y", "=", "None", ",", "method", "=", "None", ")", ":", "# Check inputs to determine whether a single value or a matrix is needed for output.", "# Since it's legal for users to use the method name as the second argument, we need to", "# check if y is used to specify the method name instead.", "if", "type", "(", "y", ")", "==", "str", ":", "raise", "TypeError", "(", "\"Use 'method=' to specify method name.\"", ")", "if", "not", "y", ":", "return", "callMLlibFunc", "(", "\"corr\"", ",", "x", ".", "map", "(", "_convert_to_vector", ")", ",", "method", ")", ".", "toArray", "(", ")", "else", ":", "return", "callMLlibFunc", "(", "\"corr\"", ",", "x", ".", "map", "(", "float", ")", ",", "y", ".", "map", "(", "float", ")", ",", "method", ")" ]
Compute the correlation (matrix) for the input RDD(s) using the specified method. Methods currently supported: I{pearson (default), spearman}. If a single RDD of Vectors is passed in, a correlation matrix comparing the columns in the input RDD is returned. Use C{method=} to specify the method to be used for single RDD inout. If two RDDs of floats are passed in, a single float is returned. :param x: an RDD of vector for which the correlation matrix is to be computed, or an RDD of float of the same cardinality as y when y is specified. :param y: an RDD of float of the same cardinality as x. :param method: String specifying the method to use for computing correlation. Supported: `pearson` (default), `spearman` :return: Correlation matrix comparing columns in x. >>> x = sc.parallelize([1.0, 0.0, -2.0], 2) >>> y = sc.parallelize([4.0, 5.0, 3.0], 2) >>> zeros = sc.parallelize([0.0, 0.0, 0.0], 2) >>> abs(Statistics.corr(x, y) - 0.6546537) < 1e-7 True >>> Statistics.corr(x, y) == Statistics.corr(x, y, "pearson") True >>> Statistics.corr(x, y, "spearman") 0.5 >>> from math import isnan >>> isnan(Statistics.corr(x, zeros)) True >>> from pyspark.mllib.linalg import Vectors >>> rdd = sc.parallelize([Vectors.dense([1, 0, 0, -2]), Vectors.dense([4, 5, 0, 3]), ... Vectors.dense([6, 7, 0, 8]), Vectors.dense([9, 0, 0, 1])]) >>> pearsonCorr = Statistics.corr(rdd) >>> print(str(pearsonCorr).replace('nan', 'NaN')) [[ 1. 0.05564149 NaN 0.40047142] [ 0.05564149 1. NaN 0.91359586] [ NaN NaN 1. NaN] [ 0.40047142 0.91359586 NaN 1. ]] >>> spearmanCorr = Statistics.corr(rdd, method="spearman") >>> print(str(spearmanCorr).replace('nan', 'NaN')) [[ 1. 0.10540926 NaN 0.4 ] [ 0.10540926 1. NaN 0.9486833 ] [ NaN NaN 1. NaN] [ 0.4 0.9486833 NaN 1. ]] >>> try: ... Statistics.corr(rdd, "spearman") ... print("Method name as second argument without 'method=' shouldn't be allowed.") ... except TypeError: ... pass
[ "Compute", "the", "correlation", "(", "matrix", ")", "for", "the", "input", "RDD", "(", "s", ")", "using", "the", "specified", "method", ".", "Methods", "currently", "supported", ":", "I", "{", "pearson", "(", "default", ")", "spearman", "}", "." ]
python
train
50.229508
pyvisa/pyvisa
pyvisa/highlevel.py
https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/highlevel.py#L928-L943
def open(self, session, resource_name, access_mode=constants.AccessModes.no_lock, open_timeout=constants.VI_TMO_IMMEDIATE): """Opens a session to the specified resource. Corresponds to viOpen function of the VISA library. :param session: Resource Manager session (should always be a session returned from open_default_resource_manager()). :param resource_name: Unique symbolic name of a resource. :param access_mode: Specifies the mode by which the resource is to be accessed. :type access_mode: :class:`pyvisa.constants.AccessModes` :param open_timeout: Specifies the maximum time period (in milliseconds) that this operation waits before returning an error. :return: Unique logical identifier reference to a session, return value of the library call. :rtype: session, :class:`pyvisa.constants.StatusCode` """ raise NotImplementedError
[ "def", "open", "(", "self", ",", "session", ",", "resource_name", ",", "access_mode", "=", "constants", ".", "AccessModes", ".", "no_lock", ",", "open_timeout", "=", "constants", ".", "VI_TMO_IMMEDIATE", ")", ":", "raise", "NotImplementedError" ]
Opens a session to the specified resource. Corresponds to viOpen function of the VISA library. :param session: Resource Manager session (should always be a session returned from open_default_resource_manager()). :param resource_name: Unique symbolic name of a resource. :param access_mode: Specifies the mode by which the resource is to be accessed. :type access_mode: :class:`pyvisa.constants.AccessModes` :param open_timeout: Specifies the maximum time period (in milliseconds) that this operation waits before returning an error. :return: Unique logical identifier reference to a session, return value of the library call. :rtype: session, :class:`pyvisa.constants.StatusCode`
[ "Opens", "a", "session", "to", "the", "specified", "resource", "." ]
python
train
59.5625
Cue/scales
src/greplin/scales/samplestats.py
https://github.com/Cue/scales/blob/0aced26eb050ceb98ee9d5d6cdca8db448666986/src/greplin/scales/samplestats.py#L222-L233
def update(self, value): """Add a value to the sample.""" super(UniformSample, self).update(value) self.count += 1 c = self.count if c < len(self.sample): self.sample[c-1] = value else: r = random.randint(0, c) if r < len(self.sample): self.sample[r] = value
[ "def", "update", "(", "self", ",", "value", ")", ":", "super", "(", "UniformSample", ",", "self", ")", ".", "update", "(", "value", ")", "self", ".", "count", "+=", "1", "c", "=", "self", ".", "count", "if", "c", "<", "len", "(", "self", ".", "sample", ")", ":", "self", ".", "sample", "[", "c", "-", "1", "]", "=", "value", "else", ":", "r", "=", "random", ".", "randint", "(", "0", ",", "c", ")", "if", "r", "<", "len", "(", "self", ".", "sample", ")", ":", "self", ".", "sample", "[", "r", "]", "=", "value" ]
Add a value to the sample.
[ "Add", "a", "value", "to", "the", "sample", "." ]
python
train
24.833333
cloudtools/stacker
stacker/blueprints/base.py
https://github.com/cloudtools/stacker/blob/ad6013a03a560c46ba3c63c4d153336273e6da5d/stacker/blueprints/base.py#L153-L170
def validate_allowed_values(allowed_values, value): """Support a variable defining which values it allows. Args: allowed_values (Optional[list]): A list of allowed values from the variable definition value (obj): The object representing the value provided for the variable Returns: bool: Boolean for whether or not the value is valid. """ # ignore CFNParameter, troposphere handles these for us if not allowed_values or isinstance(value, CFNParameter): return True return value in allowed_values
[ "def", "validate_allowed_values", "(", "allowed_values", ",", "value", ")", ":", "# ignore CFNParameter, troposphere handles these for us", "if", "not", "allowed_values", "or", "isinstance", "(", "value", ",", "CFNParameter", ")", ":", "return", "True", "return", "value", "in", "allowed_values" ]
Support a variable defining which values it allows. Args: allowed_values (Optional[list]): A list of allowed values from the variable definition value (obj): The object representing the value provided for the variable Returns: bool: Boolean for whether or not the value is valid.
[ "Support", "a", "variable", "defining", "which", "values", "it", "allows", "." ]
python
train
31.444444
ncclient/ncclient
ncclient/transport/session.py
https://github.com/ncclient/ncclient/blob/2b75f2c6a06bd2a5d1be67b01bb65c5ffd2e2d7a/ncclient/transport/session.py#L164-L169
def send(self, message): """Send the supplied *message* (xml string) to NETCONF server.""" if not self.connected: raise TransportError('Not connected to NETCONF server') self.logger.debug('queueing %s', message) self._q.put(message)
[ "def", "send", "(", "self", ",", "message", ")", ":", "if", "not", "self", ".", "connected", ":", "raise", "TransportError", "(", "'Not connected to NETCONF server'", ")", "self", ".", "logger", ".", "debug", "(", "'queueing %s'", ",", "message", ")", "self", ".", "_q", ".", "put", "(", "message", ")" ]
Send the supplied *message* (xml string) to NETCONF server.
[ "Send", "the", "supplied", "*", "message", "*", "(", "xml", "string", ")", "to", "NETCONF", "server", "." ]
python
train
45.166667
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L302-L344
def add_standard_attention_hparams(hparams): """Adds the hparams used by get_standardized_layers.""" # All hyperparameters ending in "dropout" are automatically set to 0.0 # when not in training mode. # hparams used and which should have been defined outside (in # common_hparams): # Global flags # hparams.mode # hparams.hidden_size # Pre-post processing flags # hparams.layer_preprocess_sequence # hparams.layer_postprocess_sequence # hparams.layer_prepostprocess_dropout # hparams.norm_type # hparams.norm_epsilon # Mixture-of-Expert flags # hparams.moe_hidden_sizes # hparams.moe_num_experts # hparams.moe_k # hparams.moe_loss_coef # Attention layers flags hparams.add_hparam("num_heads", 8) hparams.add_hparam("attention_key_channels", 0) hparams.add_hparam("attention_value_channels", 0) hparams.add_hparam("attention_dropout", 0.0) # Attention: Local hparams.add_hparam("attention_loc_block_length", 256) # Attention: Local (unmasked only): How much to look left. hparams.add_hparam("attention_loc_block_width", 128) # Attention: Memory-compressed hparams.add_hparam("attention_red_factor", 3) hparams.add_hparam("attention_red_type", "conv") hparams.add_hparam("attention_red_nonlinearity", "none") # Fully connected layers flags # To be more consistent, should use filter_size to also control the MOE # size if moe_hidden_sizes not set. hparams.add_hparam("filter_size", 2048) hparams.add_hparam("relu_dropout", 0.0) return hparams
[ "def", "add_standard_attention_hparams", "(", "hparams", ")", ":", "# All hyperparameters ending in \"dropout\" are automatically set to 0.0", "# when not in training mode.", "# hparams used and which should have been defined outside (in", "# common_hparams):", "# Global flags", "# hparams.mode", "# hparams.hidden_size", "# Pre-post processing flags", "# hparams.layer_preprocess_sequence", "# hparams.layer_postprocess_sequence", "# hparams.layer_prepostprocess_dropout", "# hparams.norm_type", "# hparams.norm_epsilon", "# Mixture-of-Expert flags", "# hparams.moe_hidden_sizes", "# hparams.moe_num_experts", "# hparams.moe_k", "# hparams.moe_loss_coef", "# Attention layers flags", "hparams", ".", "add_hparam", "(", "\"num_heads\"", ",", "8", ")", "hparams", ".", "add_hparam", "(", "\"attention_key_channels\"", ",", "0", ")", "hparams", ".", "add_hparam", "(", "\"attention_value_channels\"", ",", "0", ")", "hparams", ".", "add_hparam", "(", "\"attention_dropout\"", ",", "0.0", ")", "# Attention: Local", "hparams", ".", "add_hparam", "(", "\"attention_loc_block_length\"", ",", "256", ")", "# Attention: Local (unmasked only): How much to look left.", "hparams", ".", "add_hparam", "(", "\"attention_loc_block_width\"", ",", "128", ")", "# Attention: Memory-compressed", "hparams", ".", "add_hparam", "(", "\"attention_red_factor\"", ",", "3", ")", "hparams", ".", "add_hparam", "(", "\"attention_red_type\"", ",", "\"conv\"", ")", "hparams", ".", "add_hparam", "(", "\"attention_red_nonlinearity\"", ",", "\"none\"", ")", "# Fully connected layers flags", "# To be more consistent, should use filter_size to also control the MOE", "# size if moe_hidden_sizes not set.", "hparams", ".", "add_hparam", "(", "\"filter_size\"", ",", "2048", ")", "hparams", ".", "add_hparam", "(", "\"relu_dropout\"", ",", "0.0", ")", "return", "hparams" ]
Adds the hparams used by get_standardized_layers.
[ "Adds", "the", "hparams", "used", "by", "get_standardized_layers", "." ]
python
train
34.372093
spyder-ide/spyder
spyder/plugins/ipythonconsole/widgets/client.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/ipythonconsole/widgets/client.py#L314-L339
def show_kernel_error(self, error): """Show kernel initialization errors in infowidget.""" # Replace end of line chars with <br> eol = sourcecode.get_eol_chars(error) if eol: error = error.replace(eol, '<br>') # Don't break lines in hyphens # From https://stackoverflow.com/q/7691569/438386 error = error.replace('-', '&#8209') # Create error page message = _("An error ocurred while starting the kernel") kernel_error_template = Template(KERNEL_ERROR) self.info_page = kernel_error_template.substitute( css_path=self.css_path, message=message, error=error) # Show error self.set_info_page() self.shellwidget.hide() self.infowidget.show() # Tell the client we're in error mode self.is_error_shown = True
[ "def", "show_kernel_error", "(", "self", ",", "error", ")", ":", "# Replace end of line chars with <br>\r", "eol", "=", "sourcecode", ".", "get_eol_chars", "(", "error", ")", "if", "eol", ":", "error", "=", "error", ".", "replace", "(", "eol", ",", "'<br>'", ")", "# Don't break lines in hyphens\r", "# From https://stackoverflow.com/q/7691569/438386\r", "error", "=", "error", ".", "replace", "(", "'-'", ",", "'&#8209'", ")", "# Create error page\r", "message", "=", "_", "(", "\"An error ocurred while starting the kernel\"", ")", "kernel_error_template", "=", "Template", "(", "KERNEL_ERROR", ")", "self", ".", "info_page", "=", "kernel_error_template", ".", "substitute", "(", "css_path", "=", "self", ".", "css_path", ",", "message", "=", "message", ",", "error", "=", "error", ")", "# Show error\r", "self", ".", "set_info_page", "(", ")", "self", ".", "shellwidget", ".", "hide", "(", ")", "self", ".", "infowidget", ".", "show", "(", ")", "# Tell the client we're in error mode\r", "self", ".", "is_error_shown", "=", "True" ]
Show kernel initialization errors in infowidget.
[ "Show", "kernel", "initialization", "errors", "in", "infowidget", "." ]
python
train
34.269231
openai/baselines
baselines/common/tf_util.py
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/tf_util.py#L51-L56
def get_session(config=None): """Get default session or create one with a given config""" sess = tf.get_default_session() if sess is None: sess = make_session(config=config, make_default=True) return sess
[ "def", "get_session", "(", "config", "=", "None", ")", ":", "sess", "=", "tf", ".", "get_default_session", "(", ")", "if", "sess", "is", "None", ":", "sess", "=", "make_session", "(", "config", "=", "config", ",", "make_default", "=", "True", ")", "return", "sess" ]
Get default session or create one with a given config
[ "Get", "default", "session", "or", "create", "one", "with", "a", "given", "config" ]
python
valid
37.166667
LuminosoInsight/luminoso-api-client-python
luminoso_api/v5_client.py
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_client.py#L302-L308
def upload(self, path, docs, **params): """ A deprecated alias for post(path, docs=docs), included only for backward compatibility. """ logger.warning('The upload method is deprecated; use post instead.') return self.post(path, docs=docs)
[ "def", "upload", "(", "self", ",", "path", ",", "docs", ",", "*", "*", "params", ")", ":", "logger", ".", "warning", "(", "'The upload method is deprecated; use post instead.'", ")", "return", "self", ".", "post", "(", "path", ",", "docs", "=", "docs", ")" ]
A deprecated alias for post(path, docs=docs), included only for backward compatibility.
[ "A", "deprecated", "alias", "for", "post", "(", "path", "docs", "=", "docs", ")", "included", "only", "for", "backward", "compatibility", "." ]
python
test
40
xiaocong/uiautomator
uiautomator/__init__.py
https://github.com/xiaocong/uiautomator/blob/9a0c892ffd056713f91aa2153d1533c5b0553a1c/uiautomator/__init__.py#L767-L788
def press(self): ''' press key via name or key code. Supported key name includes: home, back, left, right, up, down, center, menu, search, enter, delete(or del), recent(recent apps), volume_up, volume_down, volume_mute, camera, power. Usage: d.press.back() # press back key d.press.menu() # press home key d.press(89) # press keycode ''' @param_to_property( key=["home", "back", "left", "right", "up", "down", "center", "menu", "search", "enter", "delete", "del", "recent", "volume_up", "volume_down", "volume_mute", "camera", "power"] ) def _press(key, meta=None): if isinstance(key, int): return self.server.jsonrpc.pressKeyCode(key, meta) if meta else self.server.jsonrpc.pressKeyCode(key) else: return self.server.jsonrpc.pressKey(str(key)) return _press
[ "def", "press", "(", "self", ")", ":", "@", "param_to_property", "(", "key", "=", "[", "\"home\"", ",", "\"back\"", ",", "\"left\"", ",", "\"right\"", ",", "\"up\"", ",", "\"down\"", ",", "\"center\"", ",", "\"menu\"", ",", "\"search\"", ",", "\"enter\"", ",", "\"delete\"", ",", "\"del\"", ",", "\"recent\"", ",", "\"volume_up\"", ",", "\"volume_down\"", ",", "\"volume_mute\"", ",", "\"camera\"", ",", "\"power\"", "]", ")", "def", "_press", "(", "key", ",", "meta", "=", "None", ")", ":", "if", "isinstance", "(", "key", ",", "int", ")", ":", "return", "self", ".", "server", ".", "jsonrpc", ".", "pressKeyCode", "(", "key", ",", "meta", ")", "if", "meta", "else", "self", ".", "server", ".", "jsonrpc", ".", "pressKeyCode", "(", "key", ")", "else", ":", "return", "self", ".", "server", ".", "jsonrpc", ".", "pressKey", "(", "str", "(", "key", ")", ")", "return", "_press" ]
press key via name or key code. Supported key name includes: home, back, left, right, up, down, center, menu, search, enter, delete(or del), recent(recent apps), volume_up, volume_down, volume_mute, camera, power. Usage: d.press.back() # press back key d.press.menu() # press home key d.press(89) # press keycode
[ "press", "key", "via", "name", "or", "key", "code", ".", "Supported", "key", "name", "includes", ":", "home", "back", "left", "right", "up", "down", "center", "menu", "search", "enter", "delete", "(", "or", "del", ")", "recent", "(", "recent", "apps", ")", "volume_up", "volume_down", "volume_mute", "camera", "power", ".", "Usage", ":", "d", ".", "press", ".", "back", "()", "#", "press", "back", "key", "d", ".", "press", ".", "menu", "()", "#", "press", "home", "key", "d", ".", "press", "(", "89", ")", "#", "press", "keycode" ]
python
train
43.5
unt-libraries/edtf-validate
edtf_validate/valid_edtf.py
https://github.com/unt-libraries/edtf-validate/blob/d6d63141919a66aea4ff1c31fa0cb8ff744ef9d9/edtf_validate/valid_edtf.py#L354-L406
def zero_year_special_case(from_date, to_date, start, end): """strptime does not resolve a 0000 year, we must handle this.""" if start == 'pos' and end == 'pos': # always interval from earlier to later if from_date.startswith('0000') and not to_date.startswith('0000'): return True # always interval from later to earlier if not from_date.startswith('0000') and to_date.startswith('0000'): return False # an interval from 0000-MM-DD/0000-MM-DD ??? PARSE !!! if from_date.startswith('0000') and to_date.startswith('0000'): # fill from date assuming first subsequent date object if missing # missing m+d, assume jan 1 if len(from_date) == 4: fm, fd = 1, 1 # missing d, assume the 1st elif len(from_date) == 7: fm, fd = int(from_date[5:7]), 1 # not missing any date objects elif len(from_date) == 10: fm, fd = int(from_date[5:7]), int(from_date[8:10]) # fill to date assuming first subsequent date object if missing # missing m+d, assume jan 1 if len(to_date) == 4: tm, td = 1, 1 # missing d, assume the 1st elif len(to_date) == 7: tm, td = int(to_date[5:7]), 1 # not missing any date objects elif len(to_date) == 10: tm, td = int(to_date[5:7]), int(to_date[8:10]) # equality check if from_date == to_date: return True # compare the dates if fm <= tm: if fd <= td: return True else: return False else: return False # these cases are always one way or the other # "-0000" is an invalid edtf elif start == 'neg' and end == 'neg': return False # False unless start is not "0000" elif start == 'neg' and end == 'pos': if from_date.startswith("0000"): return False else: return True
[ "def", "zero_year_special_case", "(", "from_date", ",", "to_date", ",", "start", ",", "end", ")", ":", "if", "start", "==", "'pos'", "and", "end", "==", "'pos'", ":", "# always interval from earlier to later", "if", "from_date", ".", "startswith", "(", "'0000'", ")", "and", "not", "to_date", ".", "startswith", "(", "'0000'", ")", ":", "return", "True", "# always interval from later to earlier", "if", "not", "from_date", ".", "startswith", "(", "'0000'", ")", "and", "to_date", ".", "startswith", "(", "'0000'", ")", ":", "return", "False", "# an interval from 0000-MM-DD/0000-MM-DD ??? PARSE !!!", "if", "from_date", ".", "startswith", "(", "'0000'", ")", "and", "to_date", ".", "startswith", "(", "'0000'", ")", ":", "# fill from date assuming first subsequent date object if missing", "# missing m+d, assume jan 1", "if", "len", "(", "from_date", ")", "==", "4", ":", "fm", ",", "fd", "=", "1", ",", "1", "# missing d, assume the 1st", "elif", "len", "(", "from_date", ")", "==", "7", ":", "fm", ",", "fd", "=", "int", "(", "from_date", "[", "5", ":", "7", "]", ")", ",", "1", "# not missing any date objects", "elif", "len", "(", "from_date", ")", "==", "10", ":", "fm", ",", "fd", "=", "int", "(", "from_date", "[", "5", ":", "7", "]", ")", ",", "int", "(", "from_date", "[", "8", ":", "10", "]", ")", "# fill to date assuming first subsequent date object if missing", "# missing m+d, assume jan 1", "if", "len", "(", "to_date", ")", "==", "4", ":", "tm", ",", "td", "=", "1", ",", "1", "# missing d, assume the 1st", "elif", "len", "(", "to_date", ")", "==", "7", ":", "tm", ",", "td", "=", "int", "(", "to_date", "[", "5", ":", "7", "]", ")", ",", "1", "# not missing any date objects", "elif", "len", "(", "to_date", ")", "==", "10", ":", "tm", ",", "td", "=", "int", "(", "to_date", "[", "5", ":", "7", "]", ")", ",", "int", "(", "to_date", "[", "8", ":", "10", "]", ")", "# equality check", "if", "from_date", "==", "to_date", ":", "return", "True", "# compare the dates", "if", "fm", "<=", "tm", ":", "if", "fd", "<=", "td", ":", "return", "True", "else", ":", "return", "False", "else", ":", "return", "False", "# these cases are always one way or the other", "# \"-0000\" is an invalid edtf", "elif", "start", "==", "'neg'", "and", "end", "==", "'neg'", ":", "return", "False", "# False unless start is not \"0000\"", "elif", "start", "==", "'neg'", "and", "end", "==", "'pos'", ":", "if", "from_date", ".", "startswith", "(", "\"0000\"", ")", ":", "return", "False", "else", ":", "return", "True" ]
strptime does not resolve a 0000 year, we must handle this.
[ "strptime", "does", "not", "resolve", "a", "0000", "year", "we", "must", "handle", "this", "." ]
python
train
39.603774
openstack/networking-cisco
networking_cisco/plugins/cisco/device_manager/plugging_drivers/aci_vlan_trunking_driver.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/device_manager/plugging_drivers/aci_vlan_trunking_driver.py#L297-L362
def allocate_hosting_port(self, context, router_id, port_db, network_type, hosting_device_id): """Get the VLAN and port for this hosting device The VLAN used between the APIC and the external router is stored by the APIC driver. This calls into the APIC driver to first get the ACI VRF information associated with this port, then uses that to look up the VLAN to use for this port to the external router (kept as part of the L3 Out policy in ACI). """ # If this is a router interface, the VLAN comes from APIC. # If it's the gateway, the VLAN comes from the segment ID if port_db.get('device_owner') == DEVICE_OWNER_ROUTER_GW: ext_dict, net = self._get_external_network_dict(context, port_db) # If an OpFlex network is used on the external network, # the actual segment ID comes from the config file if net and net.get('provider:network_type') == 'opflex': if ext_dict.get('segmentation_id'): return {'allocated_port_id': port_db.id, 'allocated_vlan': ext_dict['segmentation_id']} else: raise AciDriverConfigMissingSegmentationId(ext_net=net) return super(AciVLANTrunkingPlugDriver, self).allocate_hosting_port( context, router_id, port_db, network_type, hosting_device_id) # shouldn't happen, but just in case if port_db.get('device_owner') != DEVICE_OWNER_ROUTER_INTF: return # get the external network that this port connects to. # if there isn't an external gateway yet on the router, # then don't allocate a port router = self.l3_plugin.get_router(context, router_id) gw_info = router[EXTERNAL_GW_INFO] if not gw_info: return network_id = gw_info.get('network_id') networks = self._core_plugin.get_networks( context.elevated(), {'id': [network_id]}) l3out_network = networks[0] l3out_name = self.get_ext_net_name(l3out_network['name']) # For VLAN apic driver provides VLAN tag details = self.get_vrf_context(context, router_id, port_db) if details is None: LOG.debug('aci_vlan_trunking_driver: No vrf_details') return vrf_name = details.get('vrf_name') vrf_tenant = details.get('vrf_tenant') allocated_vlan = self.apic_driver.l3out_vlan_alloc.get_vlan_allocated( l3out_name, vrf_name, vrf_tenant=vrf_tenant) if allocated_vlan is None: if not vrf_tenant: # TODO(tbachman): I can't remember why this is here return super(AciVLANTrunkingPlugDriver, self).allocate_hosting_port( context, router_id, port_db, network_type, hosting_device_id ) # Database must have been messed up if this happens ... return return {'allocated_port_id': port_db.id, 'allocated_vlan': allocated_vlan}
[ "def", "allocate_hosting_port", "(", "self", ",", "context", ",", "router_id", ",", "port_db", ",", "network_type", ",", "hosting_device_id", ")", ":", "# If this is a router interface, the VLAN comes from APIC.", "# If it's the gateway, the VLAN comes from the segment ID", "if", "port_db", ".", "get", "(", "'device_owner'", ")", "==", "DEVICE_OWNER_ROUTER_GW", ":", "ext_dict", ",", "net", "=", "self", ".", "_get_external_network_dict", "(", "context", ",", "port_db", ")", "# If an OpFlex network is used on the external network,", "# the actual segment ID comes from the config file", "if", "net", "and", "net", ".", "get", "(", "'provider:network_type'", ")", "==", "'opflex'", ":", "if", "ext_dict", ".", "get", "(", "'segmentation_id'", ")", ":", "return", "{", "'allocated_port_id'", ":", "port_db", ".", "id", ",", "'allocated_vlan'", ":", "ext_dict", "[", "'segmentation_id'", "]", "}", "else", ":", "raise", "AciDriverConfigMissingSegmentationId", "(", "ext_net", "=", "net", ")", "return", "super", "(", "AciVLANTrunkingPlugDriver", ",", "self", ")", ".", "allocate_hosting_port", "(", "context", ",", "router_id", ",", "port_db", ",", "network_type", ",", "hosting_device_id", ")", "# shouldn't happen, but just in case", "if", "port_db", ".", "get", "(", "'device_owner'", ")", "!=", "DEVICE_OWNER_ROUTER_INTF", ":", "return", "# get the external network that this port connects to.", "# if there isn't an external gateway yet on the router,", "# then don't allocate a port", "router", "=", "self", ".", "l3_plugin", ".", "get_router", "(", "context", ",", "router_id", ")", "gw_info", "=", "router", "[", "EXTERNAL_GW_INFO", "]", "if", "not", "gw_info", ":", "return", "network_id", "=", "gw_info", ".", "get", "(", "'network_id'", ")", "networks", "=", "self", ".", "_core_plugin", ".", "get_networks", "(", "context", ".", "elevated", "(", ")", ",", "{", "'id'", ":", "[", "network_id", "]", "}", ")", "l3out_network", "=", "networks", "[", "0", "]", "l3out_name", "=", "self", ".", "get_ext_net_name", "(", "l3out_network", "[", "'name'", "]", ")", "# For VLAN apic driver provides VLAN tag", "details", "=", "self", ".", "get_vrf_context", "(", "context", ",", "router_id", ",", "port_db", ")", "if", "details", "is", "None", ":", "LOG", ".", "debug", "(", "'aci_vlan_trunking_driver: No vrf_details'", ")", "return", "vrf_name", "=", "details", ".", "get", "(", "'vrf_name'", ")", "vrf_tenant", "=", "details", ".", "get", "(", "'vrf_tenant'", ")", "allocated_vlan", "=", "self", ".", "apic_driver", ".", "l3out_vlan_alloc", ".", "get_vlan_allocated", "(", "l3out_name", ",", "vrf_name", ",", "vrf_tenant", "=", "vrf_tenant", ")", "if", "allocated_vlan", "is", "None", ":", "if", "not", "vrf_tenant", ":", "# TODO(tbachman): I can't remember why this is here", "return", "super", "(", "AciVLANTrunkingPlugDriver", ",", "self", ")", ".", "allocate_hosting_port", "(", "context", ",", "router_id", ",", "port_db", ",", "network_type", ",", "hosting_device_id", ")", "# Database must have been messed up if this happens ...", "return", "return", "{", "'allocated_port_id'", ":", "port_db", ".", "id", ",", "'allocated_vlan'", ":", "allocated_vlan", "}" ]
Get the VLAN and port for this hosting device The VLAN used between the APIC and the external router is stored by the APIC driver. This calls into the APIC driver to first get the ACI VRF information associated with this port, then uses that to look up the VLAN to use for this port to the external router (kept as part of the L3 Out policy in ACI).
[ "Get", "the", "VLAN", "and", "port", "for", "this", "hosting", "device" ]
python
train
48.69697
vkorn/pyvizio
custom_components/vizio/media_player.py
https://github.com/vkorn/pyvizio/blob/7153c9ad544195c867c14f8f03c97dba416c0a7a/custom_components/vizio/media_player.py#L242-L252
def set_volume_level(self, volume): """Set volume level.""" if self._volume_level is not None: if volume > self._volume_level: num = int(self._max_volume * (volume - self._volume_level)) self._volume_level = volume self._device.vol_up(num=num) elif volume < self._volume_level: num = int(self._max_volume * (self._volume_level - volume)) self._volume_level = volume self._device.vol_down(num=num)
[ "def", "set_volume_level", "(", "self", ",", "volume", ")", ":", "if", "self", ".", "_volume_level", "is", "not", "None", ":", "if", "volume", ">", "self", ".", "_volume_level", ":", "num", "=", "int", "(", "self", ".", "_max_volume", "*", "(", "volume", "-", "self", ".", "_volume_level", ")", ")", "self", ".", "_volume_level", "=", "volume", "self", ".", "_device", ".", "vol_up", "(", "num", "=", "num", ")", "elif", "volume", "<", "self", ".", "_volume_level", ":", "num", "=", "int", "(", "self", ".", "_max_volume", "*", "(", "self", ".", "_volume_level", "-", "volume", ")", ")", "self", ".", "_volume_level", "=", "volume", "self", ".", "_device", ".", "vol_down", "(", "num", "=", "num", ")" ]
Set volume level.
[ "Set", "volume", "level", "." ]
python
test
47.454545
tradenity/python-sdk
tradenity/resources/store_credit_payment.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/store_credit_payment.py#L725-L746
def replace_store_credit_payment_by_id(cls, store_credit_payment_id, store_credit_payment, **kwargs): """Replace StoreCreditPayment Replace all attributes of StoreCreditPayment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_store_credit_payment_by_id(store_credit_payment_id, store_credit_payment, async=True) >>> result = thread.get() :param async bool :param str store_credit_payment_id: ID of storeCreditPayment to replace (required) :param StoreCreditPayment store_credit_payment: Attributes of storeCreditPayment to replace (required) :return: StoreCreditPayment If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_store_credit_payment_by_id_with_http_info(store_credit_payment_id, store_credit_payment, **kwargs) else: (data) = cls._replace_store_credit_payment_by_id_with_http_info(store_credit_payment_id, store_credit_payment, **kwargs) return data
[ "def", "replace_store_credit_payment_by_id", "(", "cls", ",", "store_credit_payment_id", ",", "store_credit_payment", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_replace_store_credit_payment_by_id_with_http_info", "(", "store_credit_payment_id", ",", "store_credit_payment", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_replace_store_credit_payment_by_id_with_http_info", "(", "store_credit_payment_id", ",", "store_credit_payment", ",", "*", "*", "kwargs", ")", "return", "data" ]
Replace StoreCreditPayment Replace all attributes of StoreCreditPayment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_store_credit_payment_by_id(store_credit_payment_id, store_credit_payment, async=True) >>> result = thread.get() :param async bool :param str store_credit_payment_id: ID of storeCreditPayment to replace (required) :param StoreCreditPayment store_credit_payment: Attributes of storeCreditPayment to replace (required) :return: StoreCreditPayment If the method is called asynchronously, returns the request thread.
[ "Replace", "StoreCreditPayment" ]
python
train
55.454545
zqfang/GSEApy
gseapy/gsea.py
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/gsea.py#L650-L691
def runSamplesPermu(self, df, gmt=None): """Single Sample GSEA workflow with permutation procedure""" assert self.min_size <= self.max_size mkdirs(self.outdir) self.resultsOnSamples = OrderedDict() outdir = self.outdir # iter throught each sample for name, ser in df.iteritems(): self.outdir = os.path.join(outdir, str(name)) self._logger.info("Run Sample: %s " % name) mkdirs(self.outdir) # sort ranking values from high to low or reverse dat2 = ser.sort_values(ascending=self.ascending) # reset integer index, or caused unwanted problems # df.reset_index(drop=True, inplace=True) # compute ES, NES, pval, FDR, RES gsea_results, hit_ind,rank_ES, subsets = gsea_compute(data=dat2, n=self.permutation_num, gmt=gmt, weighted_score_type=self.weighted_score_type, permutation_type='gene_set', method=None, pheno_pos='', pheno_neg='', classes=None, ascending=self.ascending, processes=self._processes, seed=self.seed, single=True, scale=self.scale) # write file res_zip = zip(subsets, list(gsea_results), hit_ind, rank_ES) self._save_results(zipdata=res_zip, outdir=self.outdir, module=self.module, gmt=gmt, rank_metric=dat2, permutation_type="gene_sets") self.resultsOnSamples[name] = self.res2d.es # plotting if self._noplot: continue self._logger.info("Plotting Sample: %s \n" % name) self._plotting(rank_metric=dat2, results=self.results, graph_num=self.graph_num, outdir=self.outdir, figsize=self.figsize, format=self.format) # save es, nes to file self._save(outdir) return
[ "def", "runSamplesPermu", "(", "self", ",", "df", ",", "gmt", "=", "None", ")", ":", "assert", "self", ".", "min_size", "<=", "self", ".", "max_size", "mkdirs", "(", "self", ".", "outdir", ")", "self", ".", "resultsOnSamples", "=", "OrderedDict", "(", ")", "outdir", "=", "self", ".", "outdir", "# iter throught each sample", "for", "name", ",", "ser", "in", "df", ".", "iteritems", "(", ")", ":", "self", ".", "outdir", "=", "os", ".", "path", ".", "join", "(", "outdir", ",", "str", "(", "name", ")", ")", "self", ".", "_logger", ".", "info", "(", "\"Run Sample: %s \"", "%", "name", ")", "mkdirs", "(", "self", ".", "outdir", ")", "# sort ranking values from high to low or reverse", "dat2", "=", "ser", ".", "sort_values", "(", "ascending", "=", "self", ".", "ascending", ")", "# reset integer index, or caused unwanted problems", "# df.reset_index(drop=True, inplace=True)", "# compute ES, NES, pval, FDR, RES", "gsea_results", ",", "hit_ind", ",", "rank_ES", ",", "subsets", "=", "gsea_compute", "(", "data", "=", "dat2", ",", "n", "=", "self", ".", "permutation_num", ",", "gmt", "=", "gmt", ",", "weighted_score_type", "=", "self", ".", "weighted_score_type", ",", "permutation_type", "=", "'gene_set'", ",", "method", "=", "None", ",", "pheno_pos", "=", "''", ",", "pheno_neg", "=", "''", ",", "classes", "=", "None", ",", "ascending", "=", "self", ".", "ascending", ",", "processes", "=", "self", ".", "_processes", ",", "seed", "=", "self", ".", "seed", ",", "single", "=", "True", ",", "scale", "=", "self", ".", "scale", ")", "# write file", "res_zip", "=", "zip", "(", "subsets", ",", "list", "(", "gsea_results", ")", ",", "hit_ind", ",", "rank_ES", ")", "self", ".", "_save_results", "(", "zipdata", "=", "res_zip", ",", "outdir", "=", "self", ".", "outdir", ",", "module", "=", "self", ".", "module", ",", "gmt", "=", "gmt", ",", "rank_metric", "=", "dat2", ",", "permutation_type", "=", "\"gene_sets\"", ")", "self", ".", "resultsOnSamples", "[", "name", "]", "=", "self", ".", "res2d", ".", "es", "# plotting", "if", "self", ".", "_noplot", ":", "continue", "self", ".", "_logger", ".", "info", "(", "\"Plotting Sample: %s \\n\"", "%", "name", ")", "self", ".", "_plotting", "(", "rank_metric", "=", "dat2", ",", "results", "=", "self", ".", "results", ",", "graph_num", "=", "self", ".", "graph_num", ",", "outdir", "=", "self", ".", "outdir", ",", "figsize", "=", "self", ".", "figsize", ",", "format", "=", "self", ".", "format", ")", "# save es, nes to file", "self", ".", "_save", "(", "outdir", ")", "return" ]
Single Sample GSEA workflow with permutation procedure
[ "Single", "Sample", "GSEA", "workflow", "with", "permutation", "procedure" ]
python
test
52.642857
sony/nnabla
python/src/nnabla/experimental/graph_converters/sequential.py
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/experimental/graph_converters/sequential.py#L17-L29
def convert(self, vroot, entry_variables): """Convert a given graph. Convert a given graph using the `converters` in the order of the registeration, i.e., sequentially. Args: vroot (:obj:`Variable`): NNabla Variable entry_variables (:obj:`Variable`): Entry variable from which the conversion starts. """ for converter in self.converters: vroot = converter.convert(vroot, entry_variables) return vroot
[ "def", "convert", "(", "self", ",", "vroot", ",", "entry_variables", ")", ":", "for", "converter", "in", "self", ".", "converters", ":", "vroot", "=", "converter", ".", "convert", "(", "vroot", ",", "entry_variables", ")", "return", "vroot" ]
Convert a given graph. Convert a given graph using the `converters` in the order of the registeration, i.e., sequentially. Args: vroot (:obj:`Variable`): NNabla Variable entry_variables (:obj:`Variable`): Entry variable from which the conversion starts.
[ "Convert", "a", "given", "graph", "." ]
python
train
36.538462
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/parallel/client/asyncresult.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/client/asyncresult.py#L334-L345
def elapsed(self): """elapsed time since initial submission""" if self.ready(): return self.wall_time now = submitted = datetime.now() for msg_id in self.msg_ids: if msg_id in self._client.metadata: stamp = self._client.metadata[msg_id]['submitted'] if stamp and stamp < submitted: submitted = stamp return _total_seconds(now-submitted)
[ "def", "elapsed", "(", "self", ")", ":", "if", "self", ".", "ready", "(", ")", ":", "return", "self", ".", "wall_time", "now", "=", "submitted", "=", "datetime", ".", "now", "(", ")", "for", "msg_id", "in", "self", ".", "msg_ids", ":", "if", "msg_id", "in", "self", ".", "_client", ".", "metadata", ":", "stamp", "=", "self", ".", "_client", ".", "metadata", "[", "msg_id", "]", "[", "'submitted'", "]", "if", "stamp", "and", "stamp", "<", "submitted", ":", "submitted", "=", "stamp", "return", "_total_seconds", "(", "now", "-", "submitted", ")" ]
elapsed time since initial submission
[ "elapsed", "time", "since", "initial", "submission" ]
python
test
37.5
rflamary/POT
examples/plot_otda_linear_mapping.py
https://github.com/rflamary/POT/blob/c5108efc7b6702e1af3928bef1032e6b37734d1c/examples/plot_otda_linear_mapping.py#L80-L82
def im2mat(I): """Converts and image to matrix (one pixel per line)""" return I.reshape((I.shape[0] * I.shape[1], I.shape[2]))
[ "def", "im2mat", "(", "I", ")", ":", "return", "I", ".", "reshape", "(", "(", "I", ".", "shape", "[", "0", "]", "*", "I", ".", "shape", "[", "1", "]", ",", "I", ".", "shape", "[", "2", "]", ")", ")" ]
Converts and image to matrix (one pixel per line)
[ "Converts", "and", "image", "to", "matrix", "(", "one", "pixel", "per", "line", ")" ]
python
train
44
jason-weirather/py-seq-tools
seqtools/errors.py
https://github.com/jason-weirather/py-seq-tools/blob/f642c2c73ffef2acc83656a78059a476fc734ca1/seqtools/errors.py#L346-L359
def set_observable(self,tseq,qseq): """Set the observable sequence data :param tseq: target sequence (from the homopolymer) :param qseq: query sequence ( from the homopolymer) :type tseq: string :type qseq: string """ tnt = None qnt = None if len(tseq) > 0: tnt = tseq[0] if len(qseq) > 0: qnt = qseq[0] self._observable.set(len(tseq),len(qseq),tnt,qnt)
[ "def", "set_observable", "(", "self", ",", "tseq", ",", "qseq", ")", ":", "tnt", "=", "None", "qnt", "=", "None", "if", "len", "(", "tseq", ")", ">", "0", ":", "tnt", "=", "tseq", "[", "0", "]", "if", "len", "(", "qseq", ")", ">", "0", ":", "qnt", "=", "qseq", "[", "0", "]", "self", ".", "_observable", ".", "set", "(", "len", "(", "tseq", ")", ",", "len", "(", "qseq", ")", ",", "tnt", ",", "qnt", ")" ]
Set the observable sequence data :param tseq: target sequence (from the homopolymer) :param qseq: query sequence ( from the homopolymer) :type tseq: string :type qseq: string
[ "Set", "the", "observable", "sequence", "data" ]
python
train
27.571429
MisterWil/skybellpy
skybellpy/device.py
https://github.com/MisterWil/skybellpy/blob/ac966d9f590cda7654f6de7eecc94e2103459eef/skybellpy/device.py#L112-L131
def _update_events(self): """Update our cached list of latest activity events.""" events = self._skybell.dev_cache(self, CONST.EVENT) or {} for activity in self._activities: event = activity.get(CONST.EVENT) created_at = activity.get(CONST.CREATED_AT) old_event = events.get(event) if old_event and created_at < old_event.get(CONST.CREATED_AT): continue else: events[event] = activity self._skybell.update_dev_cache( self, { CONST.EVENT: events })
[ "def", "_update_events", "(", "self", ")", ":", "events", "=", "self", ".", "_skybell", ".", "dev_cache", "(", "self", ",", "CONST", ".", "EVENT", ")", "or", "{", "}", "for", "activity", "in", "self", ".", "_activities", ":", "event", "=", "activity", ".", "get", "(", "CONST", ".", "EVENT", ")", "created_at", "=", "activity", ".", "get", "(", "CONST", ".", "CREATED_AT", ")", "old_event", "=", "events", ".", "get", "(", "event", ")", "if", "old_event", "and", "created_at", "<", "old_event", ".", "get", "(", "CONST", ".", "CREATED_AT", ")", ":", "continue", "else", ":", "events", "[", "event", "]", "=", "activity", "self", ".", "_skybell", ".", "update_dev_cache", "(", "self", ",", "{", "CONST", ".", "EVENT", ":", "events", "}", ")" ]
Update our cached list of latest activity events.
[ "Update", "our", "cached", "list", "of", "latest", "activity", "events", "." ]
python
train
30.4
senaite/senaite.core
bika/lims/adapters/referencewidgetvocabulary.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/adapters/referencewidgetvocabulary.py#L181-L187
def is_sortable_index(self, index_name, catalog): """Returns whether the index is sortable """ index = self.get_index(index_name, catalog) if not index: return False return index.meta_type in ["FieldIndex", "DateIndex"]
[ "def", "is_sortable_index", "(", "self", ",", "index_name", ",", "catalog", ")", ":", "index", "=", "self", ".", "get_index", "(", "index_name", ",", "catalog", ")", "if", "not", "index", ":", "return", "False", "return", "index", ".", "meta_type", "in", "[", "\"FieldIndex\"", ",", "\"DateIndex\"", "]" ]
Returns whether the index is sortable
[ "Returns", "whether", "the", "index", "is", "sortable" ]
python
train
37.857143
bokeh/bokeh
bokeh/driving.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/driving.py#L73-L94
def bounce(sequence): ''' Return a driver function that can advance a "bounced" sequence of values. .. code-block:: none seq = [0, 1, 2, 3] # bounce(seq) => [0, 1, 2, 3, 3, 2, 1, 0, 0, 1, 2, ...] Args: sequence (seq) : a sequence of values for the driver to bounce ''' N = len(sequence) def f(i): div, mod = divmod(i, N) if div % 2 == 0: return sequence[mod] else: return sequence[N-mod-1] return partial(force, sequence=_advance(f))
[ "def", "bounce", "(", "sequence", ")", ":", "N", "=", "len", "(", "sequence", ")", "def", "f", "(", "i", ")", ":", "div", ",", "mod", "=", "divmod", "(", "i", ",", "N", ")", "if", "div", "%", "2", "==", "0", ":", "return", "sequence", "[", "mod", "]", "else", ":", "return", "sequence", "[", "N", "-", "mod", "-", "1", "]", "return", "partial", "(", "force", ",", "sequence", "=", "_advance", "(", "f", ")", ")" ]
Return a driver function that can advance a "bounced" sequence of values. .. code-block:: none seq = [0, 1, 2, 3] # bounce(seq) => [0, 1, 2, 3, 3, 2, 1, 0, 0, 1, 2, ...] Args: sequence (seq) : a sequence of values for the driver to bounce
[ "Return", "a", "driver", "function", "that", "can", "advance", "a", "bounced", "sequence", "of", "values", "." ]
python
train
23.681818
tensorflow/tensorboard
tensorboard/plugins/text/summary_v2.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/text/summary_v2.py#L63-L89
def text_pb(tag, data, description=None): """Create a text tf.Summary protobuf. Arguments: tag: String tag for the summary. data: A Python bytestring (of type bytes), a Unicode string, or a numpy data array of those types. description: Optional long-form description for this summary, as a `str`. Markdown is supported. Defaults to empty. Raises: TypeError: If the type of the data is unsupported. Returns: A `tf.Summary` protobuf object. """ try: tensor = tensor_util.make_tensor_proto(data, dtype=np.object) except TypeError as e: raise TypeError('tensor must be of type string', e) summary_metadata = metadata.create_summary_metadata( display_name=None, description=description) summary = summary_pb2.Summary() summary.value.add(tag=tag, metadata=summary_metadata, tensor=tensor) return summary
[ "def", "text_pb", "(", "tag", ",", "data", ",", "description", "=", "None", ")", ":", "try", ":", "tensor", "=", "tensor_util", ".", "make_tensor_proto", "(", "data", ",", "dtype", "=", "np", ".", "object", ")", "except", "TypeError", "as", "e", ":", "raise", "TypeError", "(", "'tensor must be of type string'", ",", "e", ")", "summary_metadata", "=", "metadata", ".", "create_summary_metadata", "(", "display_name", "=", "None", ",", "description", "=", "description", ")", "summary", "=", "summary_pb2", ".", "Summary", "(", ")", "summary", ".", "value", ".", "add", "(", "tag", "=", "tag", ",", "metadata", "=", "summary_metadata", ",", "tensor", "=", "tensor", ")", "return", "summary" ]
Create a text tf.Summary protobuf. Arguments: tag: String tag for the summary. data: A Python bytestring (of type bytes), a Unicode string, or a numpy data array of those types. description: Optional long-form description for this summary, as a `str`. Markdown is supported. Defaults to empty. Raises: TypeError: If the type of the data is unsupported. Returns: A `tf.Summary` protobuf object.
[ "Create", "a", "text", "tf", ".", "Summary", "protobuf", "." ]
python
train
32.666667
BeyondTheClouds/enoslib
enoslib/infra/enos_vagrant/provider.py
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_vagrant/provider.py#L22-L105
def init(self, force_deploy=False): """Reserve and deploys the vagrant boxes. Args: force_deploy (bool): True iff new machines should be started """ machines = self.provider_conf.machines networks = self.provider_conf.networks _networks = [] for network in networks: ipnet = IPNetwork(network.cidr) _networks.append({ "netpool": list(ipnet)[10:-10], "cidr": network.cidr, "roles": network.roles, "gateway": ipnet.ip }) vagrant_machines = [] vagrant_roles = {} j = 0 for machine in machines: for _ in range(machine.number): vagrant_machine = { "name": "enos-%s" % j, "cpu": machine.flavour_desc["core"], "mem": machine.flavour_desc["mem"], "ips": [n["netpool"].pop() for n in _networks], } vagrant_machines.append(vagrant_machine) # Assign the machines to the right roles for role in machine.roles: vagrant_roles.setdefault(role, []).append(vagrant_machine) j = j + 1 logger.debug(vagrant_roles) loader = FileSystemLoader(searchpath=TEMPLATE_DIR) env = Environment(loader=loader, autoescape=True) template = env.get_template('Vagrantfile.j2') vagrantfile = template.render(machines=vagrant_machines, provider_conf=self.provider_conf) vagrantfile_path = os.path.join(os.getcwd(), "Vagrantfile") with open(vagrantfile_path, 'w') as f: f.write(vagrantfile) # Build env for Vagrant with a copy of env variables (needed by # subprocess opened by vagrant v_env = dict(os.environ) v_env['VAGRANT_DEFAULT_PROVIDER'] = self.provider_conf.backend v = vagrant.Vagrant(root=os.getcwd(), quiet_stdout=False, quiet_stderr=False, env=v_env) if force_deploy: v.destroy() v.up() v.provision() roles = {} for role, machines in vagrant_roles.items(): for machine in machines: keyfile = v.keyfile(vm_name=machine['name']) port = v.port(vm_name=machine['name']) address = v.hostname(vm_name=machine['name']) roles.setdefault(role, []).append( Host(address, alias=machine['name'], user=self.provider_conf.user, port=port, keyfile=keyfile)) networks = [{ 'cidr': str(n["cidr"]), 'start': str(n["netpool"][0]), 'end': str(n["netpool"][-1]), 'dns': '8.8.8.8', 'gateway': str(n["gateway"]), 'roles': n["roles"] } for n in _networks] logger.debug(roles) logger.debug(networks) return (roles, networks)
[ "def", "init", "(", "self", ",", "force_deploy", "=", "False", ")", ":", "machines", "=", "self", ".", "provider_conf", ".", "machines", "networks", "=", "self", ".", "provider_conf", ".", "networks", "_networks", "=", "[", "]", "for", "network", "in", "networks", ":", "ipnet", "=", "IPNetwork", "(", "network", ".", "cidr", ")", "_networks", ".", "append", "(", "{", "\"netpool\"", ":", "list", "(", "ipnet", ")", "[", "10", ":", "-", "10", "]", ",", "\"cidr\"", ":", "network", ".", "cidr", ",", "\"roles\"", ":", "network", ".", "roles", ",", "\"gateway\"", ":", "ipnet", ".", "ip", "}", ")", "vagrant_machines", "=", "[", "]", "vagrant_roles", "=", "{", "}", "j", "=", "0", "for", "machine", "in", "machines", ":", "for", "_", "in", "range", "(", "machine", ".", "number", ")", ":", "vagrant_machine", "=", "{", "\"name\"", ":", "\"enos-%s\"", "%", "j", ",", "\"cpu\"", ":", "machine", ".", "flavour_desc", "[", "\"core\"", "]", ",", "\"mem\"", ":", "machine", ".", "flavour_desc", "[", "\"mem\"", "]", ",", "\"ips\"", ":", "[", "n", "[", "\"netpool\"", "]", ".", "pop", "(", ")", "for", "n", "in", "_networks", "]", ",", "}", "vagrant_machines", ".", "append", "(", "vagrant_machine", ")", "# Assign the machines to the right roles", "for", "role", "in", "machine", ".", "roles", ":", "vagrant_roles", ".", "setdefault", "(", "role", ",", "[", "]", ")", ".", "append", "(", "vagrant_machine", ")", "j", "=", "j", "+", "1", "logger", ".", "debug", "(", "vagrant_roles", ")", "loader", "=", "FileSystemLoader", "(", "searchpath", "=", "TEMPLATE_DIR", ")", "env", "=", "Environment", "(", "loader", "=", "loader", ",", "autoescape", "=", "True", ")", "template", "=", "env", ".", "get_template", "(", "'Vagrantfile.j2'", ")", "vagrantfile", "=", "template", ".", "render", "(", "machines", "=", "vagrant_machines", ",", "provider_conf", "=", "self", ".", "provider_conf", ")", "vagrantfile_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "\"Vagrantfile\"", ")", "with", "open", "(", "vagrantfile_path", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "vagrantfile", ")", "# Build env for Vagrant with a copy of env variables (needed by", "# subprocess opened by vagrant", "v_env", "=", "dict", "(", "os", ".", "environ", ")", "v_env", "[", "'VAGRANT_DEFAULT_PROVIDER'", "]", "=", "self", ".", "provider_conf", ".", "backend", "v", "=", "vagrant", ".", "Vagrant", "(", "root", "=", "os", ".", "getcwd", "(", ")", ",", "quiet_stdout", "=", "False", ",", "quiet_stderr", "=", "False", ",", "env", "=", "v_env", ")", "if", "force_deploy", ":", "v", ".", "destroy", "(", ")", "v", ".", "up", "(", ")", "v", ".", "provision", "(", ")", "roles", "=", "{", "}", "for", "role", ",", "machines", "in", "vagrant_roles", ".", "items", "(", ")", ":", "for", "machine", "in", "machines", ":", "keyfile", "=", "v", ".", "keyfile", "(", "vm_name", "=", "machine", "[", "'name'", "]", ")", "port", "=", "v", ".", "port", "(", "vm_name", "=", "machine", "[", "'name'", "]", ")", "address", "=", "v", ".", "hostname", "(", "vm_name", "=", "machine", "[", "'name'", "]", ")", "roles", ".", "setdefault", "(", "role", ",", "[", "]", ")", ".", "append", "(", "Host", "(", "address", ",", "alias", "=", "machine", "[", "'name'", "]", ",", "user", "=", "self", ".", "provider_conf", ".", "user", ",", "port", "=", "port", ",", "keyfile", "=", "keyfile", ")", ")", "networks", "=", "[", "{", "'cidr'", ":", "str", "(", "n", "[", "\"cidr\"", "]", ")", ",", "'start'", ":", "str", "(", "n", "[", "\"netpool\"", "]", "[", "0", "]", ")", ",", "'end'", ":", "str", "(", "n", "[", "\"netpool\"", "]", "[", "-", "1", "]", ")", ",", "'dns'", ":", "'8.8.8.8'", ",", "'gateway'", ":", "str", "(", "n", "[", "\"gateway\"", "]", ")", ",", "'roles'", ":", "n", "[", "\"roles\"", "]", "}", "for", "n", "in", "_networks", "]", "logger", ".", "debug", "(", "roles", ")", "logger", ".", "debug", "(", "networks", ")", "return", "(", "roles", ",", "networks", ")" ]
Reserve and deploys the vagrant boxes. Args: force_deploy (bool): True iff new machines should be started
[ "Reserve", "and", "deploys", "the", "vagrant", "boxes", "." ]
python
train
36.904762
Duke-GCB/DukeDSClient
ddsc/core/projectuploader.py
https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/projectuploader.py#L186-L194
def visit_project(self, item): """ Adds create project command to task runner if project doesn't already exist. """ if not item.remote_id: command = CreateProjectCommand(self.settings, item) self.task_runner_add(None, item, command) else: self.settings.project_id = item.remote_id
[ "def", "visit_project", "(", "self", ",", "item", ")", ":", "if", "not", "item", ".", "remote_id", ":", "command", "=", "CreateProjectCommand", "(", "self", ".", "settings", ",", "item", ")", "self", ".", "task_runner_add", "(", "None", ",", "item", ",", "command", ")", "else", ":", "self", ".", "settings", ".", "project_id", "=", "item", ".", "remote_id" ]
Adds create project command to task runner if project doesn't already exist.
[ "Adds", "create", "project", "command", "to", "task", "runner", "if", "project", "doesn", "t", "already", "exist", "." ]
python
train
38.666667
KelSolaar/Foundations
foundations/nodes.py
https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/nodes.py#L436-L450
def list_attributes(self): """ Returns the Node attributes names. Usage:: >>> node_a = AbstractNode("MyNodeA", attributeA=Attribute(), attributeB=Attribute()) >>> node_a.list_attributes() ['attributeB', 'attributeA'] :return: Attributes names. :rtype: list """ return [attribute for attribute, value in self.iteritems() if issubclass(value.__class__, Attribute)]
[ "def", "list_attributes", "(", "self", ")", ":", "return", "[", "attribute", "for", "attribute", ",", "value", "in", "self", ".", "iteritems", "(", ")", "if", "issubclass", "(", "value", ".", "__class__", ",", "Attribute", ")", "]" ]
Returns the Node attributes names. Usage:: >>> node_a = AbstractNode("MyNodeA", attributeA=Attribute(), attributeB=Attribute()) >>> node_a.list_attributes() ['attributeB', 'attributeA'] :return: Attributes names. :rtype: list
[ "Returns", "the", "Node", "attributes", "names", "." ]
python
train
29.6
siznax/wptools
wptools/category.py
https://github.com/siznax/wptools/blob/100eaea585c34aa9ad87a9eda8982bb4898f6ec9/wptools/category.py#L104-L122
def _set_data(self, action): """ Set category member data from API response """ data = self._load_response(action) self._handle_continuations(data, 'category') if action == 'category': members = data.get('query').get('categorymembers') if members: self._add_members(members) if action == 'random': rand = data['query']['random'][0] data = {'pageid': rand.get('id'), 'title': rand.get('title')} self.data.update(data) self.params.update(data)
[ "def", "_set_data", "(", "self", ",", "action", ")", ":", "data", "=", "self", ".", "_load_response", "(", "action", ")", "self", ".", "_handle_continuations", "(", "data", ",", "'category'", ")", "if", "action", "==", "'category'", ":", "members", "=", "data", ".", "get", "(", "'query'", ")", ".", "get", "(", "'categorymembers'", ")", "if", "members", ":", "self", ".", "_add_members", "(", "members", ")", "if", "action", "==", "'random'", ":", "rand", "=", "data", "[", "'query'", "]", "[", "'random'", "]", "[", "0", "]", "data", "=", "{", "'pageid'", ":", "rand", ".", "get", "(", "'id'", ")", ",", "'title'", ":", "rand", ".", "get", "(", "'title'", ")", "}", "self", ".", "data", ".", "update", "(", "data", ")", "self", ".", "params", ".", "update", "(", "data", ")" ]
Set category member data from API response
[ "Set", "category", "member", "data", "from", "API", "response" ]
python
train
31.052632
chukysoria/pyspotify-connect
spotifyconnect/utils.py
https://github.com/chukysoria/pyspotify-connect/blob/bd157fa4fb2b51b3641f198a35384678c1a4fa11/spotifyconnect/utils.py#L77-L86
def num_listeners(self, event=None): """Return the number of listeners for ``event``. Return the total number of listeners for all events on this object if ``event`` is :class:`None`. """ if event is not None: return len(self._listeners[event]) else: return sum(len(l) for l in self._listeners.values())
[ "def", "num_listeners", "(", "self", ",", "event", "=", "None", ")", ":", "if", "event", "is", "not", "None", ":", "return", "len", "(", "self", ".", "_listeners", "[", "event", "]", ")", "else", ":", "return", "sum", "(", "len", "(", "l", ")", "for", "l", "in", "self", ".", "_listeners", ".", "values", "(", ")", ")" ]
Return the number of listeners for ``event``. Return the total number of listeners for all events on this object if ``event`` is :class:`None`.
[ "Return", "the", "number", "of", "listeners", "for", "event", "." ]
python
train
36.7
theislab/scvelo
scvelo/tools/utils.py
https://github.com/theislab/scvelo/blob/c7a96d70edfe705e86bf364434a9527d4fd8df11/scvelo/tools/utils.py#L30-L33
def prod_sum_var(A, B): """dot product and sum over axis 1 (var) equivalent to np.sum(A * B, 1) """ return A.multiply(B).sum(1).A1 if issparse(A) else np.einsum('ij, ij -> i', A, B)
[ "def", "prod_sum_var", "(", "A", ",", "B", ")", ":", "return", "A", ".", "multiply", "(", "B", ")", ".", "sum", "(", "1", ")", ".", "A1", "if", "issparse", "(", "A", ")", "else", "np", ".", "einsum", "(", "'ij, ij -> i'", ",", "A", ",", "B", ")" ]
dot product and sum over axis 1 (var) equivalent to np.sum(A * B, 1)
[ "dot", "product", "and", "sum", "over", "axis", "1", "(", "var", ")", "equivalent", "to", "np", ".", "sum", "(", "A", "*", "B", "1", ")" ]
python
train
47.5
mitsei/dlkit
dlkit/records/osid/base_records.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/osid/base_records.py#L985-L1011
def add_text(self, text, label=None): """stub""" if label is None: label = self._label_metadata['default_string_values'][0] else: if not self.my_osid_object_form._is_valid_string( label, self.get_label_metadata()) or '.' in label: raise InvalidArgument('label') if text is None: raise NullArgument('text cannot be none') if not (self.my_osid_object_form._is_valid_string( text, self.get_text_metadata()) or isinstance(text, DisplayText)): raise InvalidArgument('text') if utilities.is_string(text): self.my_osid_object_form._my_map['texts'][label] = { 'text': text, 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE) } else: self.my_osid_object_form._my_map['texts'][label] = { 'text': text.text, 'languageTypeId': str(text.language_type), 'scriptTypeId': str(text.script_type), 'formatTypeId': str(text.format_type) }
[ "def", "add_text", "(", "self", ",", "text", ",", "label", "=", "None", ")", ":", "if", "label", "is", "None", ":", "label", "=", "self", ".", "_label_metadata", "[", "'default_string_values'", "]", "[", "0", "]", "else", ":", "if", "not", "self", ".", "my_osid_object_form", ".", "_is_valid_string", "(", "label", ",", "self", ".", "get_label_metadata", "(", ")", ")", "or", "'.'", "in", "label", ":", "raise", "InvalidArgument", "(", "'label'", ")", "if", "text", "is", "None", ":", "raise", "NullArgument", "(", "'text cannot be none'", ")", "if", "not", "(", "self", ".", "my_osid_object_form", ".", "_is_valid_string", "(", "text", ",", "self", ".", "get_text_metadata", "(", ")", ")", "or", "isinstance", "(", "text", ",", "DisplayText", ")", ")", ":", "raise", "InvalidArgument", "(", "'text'", ")", "if", "utilities", ".", "is_string", "(", "text", ")", ":", "self", ".", "my_osid_object_form", ".", "_my_map", "[", "'texts'", "]", "[", "label", "]", "=", "{", "'text'", ":", "text", ",", "'languageTypeId'", ":", "str", "(", "DEFAULT_LANGUAGE_TYPE", ")", ",", "'scriptTypeId'", ":", "str", "(", "DEFAULT_SCRIPT_TYPE", ")", ",", "'formatTypeId'", ":", "str", "(", "DEFAULT_FORMAT_TYPE", ")", "}", "else", ":", "self", ".", "my_osid_object_form", ".", "_my_map", "[", "'texts'", "]", "[", "label", "]", "=", "{", "'text'", ":", "text", ".", "text", ",", "'languageTypeId'", ":", "str", "(", "text", ".", "language_type", ")", ",", "'scriptTypeId'", ":", "str", "(", "text", ".", "script_type", ")", ",", "'formatTypeId'", ":", "str", "(", "text", ".", "format_type", ")", "}" ]
stub
[ "stub" ]
python
train
44.518519
SoCo/SoCo
soco/core.py
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/core.py#L724-L735
def bass(self): """int: The speaker's bass EQ. An integer between -10 and 10. """ response = self.renderingControl.GetBass([ ('InstanceID', 0), ('Channel', 'Master'), ]) bass = response['CurrentBass'] return int(bass)
[ "def", "bass", "(", "self", ")", ":", "response", "=", "self", ".", "renderingControl", ".", "GetBass", "(", "[", "(", "'InstanceID'", ",", "0", ")", ",", "(", "'Channel'", ",", "'Master'", ")", ",", "]", ")", "bass", "=", "response", "[", "'CurrentBass'", "]", "return", "int", "(", "bass", ")" ]
int: The speaker's bass EQ. An integer between -10 and 10.
[ "int", ":", "The", "speaker", "s", "bass", "EQ", "." ]
python
train
24
raags/ipmitool
ipmi/ipmi.py
https://github.com/raags/ipmitool/blob/830081623c0ec75d560123a559f0bb201f26cde6/ipmi/ipmi.py#L77-L83
def _subprocess_method(self, command): """Use the subprocess module to execute ipmitool commands and and set status """ p = subprocess.Popen([self._ipmitool_path] + self.args + command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.output, self.error = p.communicate() self.status = p.returncode
[ "def", "_subprocess_method", "(", "self", ",", "command", ")", ":", "p", "=", "subprocess", ".", "Popen", "(", "[", "self", ".", "_ipmitool_path", "]", "+", "self", ".", "args", "+", "command", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "self", ".", "output", ",", "self", ".", "error", "=", "p", ".", "communicate", "(", ")", "self", ".", "status", "=", "p", ".", "returncode" ]
Use the subprocess module to execute ipmitool commands and and set status
[ "Use", "the", "subprocess", "module", "to", "execute", "ipmitool", "commands", "and", "and", "set", "status" ]
python
train
49.142857
chaoss/grimoirelab-cereslib
cereslib/enrich/enrich.py
https://github.com/chaoss/grimoirelab-cereslib/blob/5110e6ca490a4f24bec3124286ebf51fd4e08bdd/cereslib/enrich/enrich.py#L790-L812
def enrich(self, column1, column2): """ This method calculates the difference in seconds between the 2 columns (column2 - column1) The final result may provided negative values depending on the values from column1 and column2. :param column1: first column. Values in column1 must be datetime type :param column2: second column. Values in column2 must be datetime type :type column1: string :type column2: string :return: original dataframe with a new column with the difference between column2 - column1 :rtype: pandas.DataFrame """ if column1 not in self.data.columns or \ column2 not in self.data.columns: return self.data self.data["timedifference"] = (self.data[column2] - self.data[column1]) / np.timedelta64(1, 's') return self.data
[ "def", "enrich", "(", "self", ",", "column1", ",", "column2", ")", ":", "if", "column1", "not", "in", "self", ".", "data", ".", "columns", "or", "column2", "not", "in", "self", ".", "data", ".", "columns", ":", "return", "self", ".", "data", "self", ".", "data", "[", "\"timedifference\"", "]", "=", "(", "self", ".", "data", "[", "column2", "]", "-", "self", ".", "data", "[", "column1", "]", ")", "/", "np", ".", "timedelta64", "(", "1", ",", "'s'", ")", "return", "self", ".", "data" ]
This method calculates the difference in seconds between the 2 columns (column2 - column1) The final result may provided negative values depending on the values from column1 and column2. :param column1: first column. Values in column1 must be datetime type :param column2: second column. Values in column2 must be datetime type :type column1: string :type column2: string :return: original dataframe with a new column with the difference between column2 - column1 :rtype: pandas.DataFrame
[ "This", "method", "calculates", "the", "difference", "in", "seconds", "between", "the", "2", "columns", "(", "column2", "-", "column1", ")" ]
python
train
37.913043
EricCrosson/stump
stump/stump.py
https://github.com/EricCrosson/stump/blob/eb4d9f0dbe2642f86d47ca1b5f51fb7801bb09ab/stump/stump.py#L161-L183
def debug(f, *args, **kwargs): """Automatically log progress on function entry and exit. Default logging value: debug. *Logging with values contained in the parameters of the decorated function* Message (args[0]) may be a string to be formatted with parameters passed to the decorated function. Each '{varname}' will be replaced by the value of the parameter of the same name. *Keyword parameters* - log :: integer - Specifies a custom level of logging to pass to the active logger. - Default: DEBUG *Exceptions:* - IndexError and ValueError - will be returned if *args contains a string that does not correspond to a parameter name of the decorated function, or if there are more '{}'s than there are *args. """ kwargs.update({'log': logging.DEBUG}) return _stump(f, *args, **kwargs)
[ "def", "debug", "(", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "{", "'log'", ":", "logging", ".", "DEBUG", "}", ")", "return", "_stump", "(", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Automatically log progress on function entry and exit. Default logging value: debug. *Logging with values contained in the parameters of the decorated function* Message (args[0]) may be a string to be formatted with parameters passed to the decorated function. Each '{varname}' will be replaced by the value of the parameter of the same name. *Keyword parameters* - log :: integer - Specifies a custom level of logging to pass to the active logger. - Default: DEBUG *Exceptions:* - IndexError and ValueError - will be returned if *args contains a string that does not correspond to a parameter name of the decorated function, or if there are more '{}'s than there are *args.
[ "Automatically", "log", "progress", "on", "function", "entry", "and", "exit", ".", "Default", "logging", "value", ":", "debug", "." ]
python
train
37.043478
mental32/spotify.py
spotify/models/library.py
https://github.com/mental32/spotify.py/blob/bb296cac7c3dd289908906b7069bd80f43950515/spotify/models/library.py#L79-L88
async def remove_albums(self, *albums): """Remove one or more albums from the current user’s ‘Your Music’ library. Parameters ---------- albums : Sequence[Union[Album, str]] A sequence of artist objects or spotify IDs """ _albums = [(obj if isinstance(obj, str) else obj.id) for obj in albums] await self.user.http.delete_saved_albums(','.join(_albums))
[ "async", "def", "remove_albums", "(", "self", ",", "*", "albums", ")", ":", "_albums", "=", "[", "(", "obj", "if", "isinstance", "(", "obj", ",", "str", ")", "else", "obj", ".", "id", ")", "for", "obj", "in", "albums", "]", "await", "self", ".", "user", ".", "http", ".", "delete_saved_albums", "(", "','", ".", "join", "(", "_albums", ")", ")" ]
Remove one or more albums from the current user’s ‘Your Music’ library. Parameters ---------- albums : Sequence[Union[Album, str]] A sequence of artist objects or spotify IDs
[ "Remove", "one", "or", "more", "albums", "from", "the", "current", "user’s", "‘Your", "Music’", "library", "." ]
python
test
41.3
pyviz/holoviews
holoviews/core/dimension.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/dimension.py#L1125-L1146
def dimension_values(self, dimension, expanded=True, flat=True): """Return the values along the requested dimension. Args: dimension: The dimension to return values for expanded (bool, optional): Whether to expand values Whether to return the expanded values, behavior depends on the type of data: * Columnar: If false returns unique values * Geometry: If false returns scalar values per geometry * Gridded: If false returns 1D coordinates flat (bool, optional): Whether to flatten array Returns: NumPy array of values along the requested dimension """ val = self._cached_constants.get(dimension, None) if val: return np.array([val]) else: raise Exception("Dimension %s not found in %s." % (dimension, self.__class__.__name__))
[ "def", "dimension_values", "(", "self", ",", "dimension", ",", "expanded", "=", "True", ",", "flat", "=", "True", ")", ":", "val", "=", "self", ".", "_cached_constants", ".", "get", "(", "dimension", ",", "None", ")", "if", "val", ":", "return", "np", ".", "array", "(", "[", "val", "]", ")", "else", ":", "raise", "Exception", "(", "\"Dimension %s not found in %s.\"", "%", "(", "dimension", ",", "self", ".", "__class__", ".", "__name__", ")", ")" ]
Return the values along the requested dimension. Args: dimension: The dimension to return values for expanded (bool, optional): Whether to expand values Whether to return the expanded values, behavior depends on the type of data: * Columnar: If false returns unique values * Geometry: If false returns scalar values per geometry * Gridded: If false returns 1D coordinates flat (bool, optional): Whether to flatten array Returns: NumPy array of values along the requested dimension
[ "Return", "the", "values", "along", "the", "requested", "dimension", "." ]
python
train
43.181818
tensorflow/hub
tensorflow_hub/image_util.py
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/image_util.py#L89-L125
def get_num_image_channels(module_or_spec, signature=None, input_name=None): """Returns expected num_channels dimensions of an image input. This is for advanced users only who expect to handle modules with image inputs that might not have the 3 usual RGB channels. Args: module_or_spec: a Module or ModuleSpec that accepts image inputs. signature: a string with the key of the signature in question. If None, the default signature is used. input_name: a string with the input name for images. If None, the conventional input name `images` for the default signature is used. Returns: An integer with the number of input channels to the module. Raises: ValueError: If the channel information is missing or malformed. """ if input_name is None: input_name = "images" input_info_dict = module_or_spec.get_input_info_dict(signature) try: shape = input_info_dict[input_name].get_shape() except KeyError: raise ValueError("Module is missing input '%s' in signature '%s'." % (input_name, signature or "default")) try: _, _, _, num_channels = shape.as_list() if num_channels is None: raise ValueError except ValueError: raise ValueError( "Shape of module input is %s, " "expected [batch_size, height, width, num_channels] " "with known num_channels" % shape) return num_channels
[ "def", "get_num_image_channels", "(", "module_or_spec", ",", "signature", "=", "None", ",", "input_name", "=", "None", ")", ":", "if", "input_name", "is", "None", ":", "input_name", "=", "\"images\"", "input_info_dict", "=", "module_or_spec", ".", "get_input_info_dict", "(", "signature", ")", "try", ":", "shape", "=", "input_info_dict", "[", "input_name", "]", ".", "get_shape", "(", ")", "except", "KeyError", ":", "raise", "ValueError", "(", "\"Module is missing input '%s' in signature '%s'.\"", "%", "(", "input_name", ",", "signature", "or", "\"default\"", ")", ")", "try", ":", "_", ",", "_", ",", "_", ",", "num_channels", "=", "shape", ".", "as_list", "(", ")", "if", "num_channels", "is", "None", ":", "raise", "ValueError", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Shape of module input is %s, \"", "\"expected [batch_size, height, width, num_channels] \"", "\"with known num_channels\"", "%", "shape", ")", "return", "num_channels" ]
Returns expected num_channels dimensions of an image input. This is for advanced users only who expect to handle modules with image inputs that might not have the 3 usual RGB channels. Args: module_or_spec: a Module or ModuleSpec that accepts image inputs. signature: a string with the key of the signature in question. If None, the default signature is used. input_name: a string with the input name for images. If None, the conventional input name `images` for the default signature is used. Returns: An integer with the number of input channels to the module. Raises: ValueError: If the channel information is missing or malformed.
[ "Returns", "expected", "num_channels", "dimensions", "of", "an", "image", "input", "." ]
python
train
37.081081
tradenity/python-sdk
tradenity/resources/refund_operation.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/refund_operation.py#L228-L250
def list_all_refund_operations(cls, **kwargs): """List RefundOperations Return a list of RefundOperations This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_refund_operations(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[RefundOperation] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_refund_operations_with_http_info(**kwargs) else: (data) = cls._list_all_refund_operations_with_http_info(**kwargs) return data
[ "def", "list_all_refund_operations", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_list_all_refund_operations_with_http_info", "(", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_list_all_refund_operations_with_http_info", "(", "*", "*", "kwargs", ")", "return", "data" ]
List RefundOperations Return a list of RefundOperations This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_refund_operations(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[RefundOperation] If the method is called asynchronously, returns the request thread.
[ "List", "RefundOperations" ]
python
train
38.782609
tensorflow/cleverhans
cleverhans/attack_bundling.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attack_bundling.py#L690-L699
def filter(self, run_counts, criteria): """ Return run counts only for examples that are still correctly classified """ correctness = criteria['correctness'] assert correctness.dtype == np.bool filtered_counts = deep_copy(run_counts) for key in filtered_counts: filtered_counts[key] = filtered_counts[key][correctness] return filtered_counts
[ "def", "filter", "(", "self", ",", "run_counts", ",", "criteria", ")", ":", "correctness", "=", "criteria", "[", "'correctness'", "]", "assert", "correctness", ".", "dtype", "==", "np", ".", "bool", "filtered_counts", "=", "deep_copy", "(", "run_counts", ")", "for", "key", "in", "filtered_counts", ":", "filtered_counts", "[", "key", "]", "=", "filtered_counts", "[", "key", "]", "[", "correctness", "]", "return", "filtered_counts" ]
Return run counts only for examples that are still correctly classified
[ "Return", "run", "counts", "only", "for", "examples", "that", "are", "still", "correctly", "classified" ]
python
train
37
Yelp/kafka-utils
kafka_utils/kafka_cluster_manager/cluster_info/genetic_balancer.py
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/genetic_balancer.py#L181-L269
def rebalance(self): """The genetic rebalancing algorithm runs for a fixed number of generations. Each generation has two phases: exploration and pruning. In exploration, a large set of possible states are found by randomly applying assignment changes to the existing states. In pruning, each state is given a score based on the balance of the cluster and the states with the highest scores are chosen as the starting states for the next generation. """ if self.args.num_gens < self.args.max_partition_movements: self.log.warning( "num-gens ({num_gens}) is less than max-partition-movements" " ({max_partition_movements}). max-partition-movements will" " never be reached.".format( num_gens=self.args.num_gens, max_partition_movements=self.args.max_partition_movements, ) ) if self.args.replication_groups: self.log.info("Rebalancing replicas across replication groups...") rg_movement_count, rg_movement_size = self.rebalance_replicas( max_movement_count=self.args.max_partition_movements, max_movement_size=self.args.max_movement_size, ) self.log.info( "Done rebalancing replicas. %d partitions moved.", rg_movement_count, ) else: rg_movement_size = 0 rg_movement_count = 0 # Use a fixed random seed to make results reproducible. random.seed(RANDOM_SEED) # NOTE: only active brokers are considered when rebalancing state = _State( self.cluster_topology, brokers=self.cluster_topology.active_brokers ) state.movement_size = rg_movement_size pop = {state} do_rebalance = self.args.brokers or self.args.leaders # Cannot rebalance when all partitions have zero weight because the # score function is undefined. if do_rebalance and not state.total_weight: self.log.error( "Rebalance impossible. All partitions have zero weight.", ) do_rebalance = False if do_rebalance: self.log.info("Rebalancing with genetic algorithm.") # Run the genetic algorithm for a fixed number of generations. for i in range(self.args.num_gens): start = time.time() pop_candidates = self._explore(pop) pop = self._prune(pop_candidates) end = time.time() self.log.debug( "Generation %d: keeping %d of %d assignment(s) in %f seconds", i, len(pop), len(pop_candidates), end - start, ) # Choose the state with the greatest score. state = sorted(pop, key=self._score, reverse=True)[0] self.log.info( "Done rebalancing. %d partitions moved.", state.movement_count, ) self.log.info("Total movement size: %f", state.movement_size) assignment = state.assignment # Since only active brokers are considered when rebalancing, inactive # brokers need to be added back to the new assignment. all_brokers = set(self.cluster_topology.brokers.values()) inactive_brokers = all_brokers - set(state.brokers) for partition_name, replicas in assignment: for broker in inactive_brokers: if broker in self.cluster_topology.partitions[partition_name].replicas: replicas.append(broker.id) self.cluster_topology.update_cluster_topology(assignment)
[ "def", "rebalance", "(", "self", ")", ":", "if", "self", ".", "args", ".", "num_gens", "<", "self", ".", "args", ".", "max_partition_movements", ":", "self", ".", "log", ".", "warning", "(", "\"num-gens ({num_gens}) is less than max-partition-movements\"", "\" ({max_partition_movements}). max-partition-movements will\"", "\" never be reached.\"", ".", "format", "(", "num_gens", "=", "self", ".", "args", ".", "num_gens", ",", "max_partition_movements", "=", "self", ".", "args", ".", "max_partition_movements", ",", ")", ")", "if", "self", ".", "args", ".", "replication_groups", ":", "self", ".", "log", ".", "info", "(", "\"Rebalancing replicas across replication groups...\"", ")", "rg_movement_count", ",", "rg_movement_size", "=", "self", ".", "rebalance_replicas", "(", "max_movement_count", "=", "self", ".", "args", ".", "max_partition_movements", ",", "max_movement_size", "=", "self", ".", "args", ".", "max_movement_size", ",", ")", "self", ".", "log", ".", "info", "(", "\"Done rebalancing replicas. %d partitions moved.\"", ",", "rg_movement_count", ",", ")", "else", ":", "rg_movement_size", "=", "0", "rg_movement_count", "=", "0", "# Use a fixed random seed to make results reproducible.", "random", ".", "seed", "(", "RANDOM_SEED", ")", "# NOTE: only active brokers are considered when rebalancing", "state", "=", "_State", "(", "self", ".", "cluster_topology", ",", "brokers", "=", "self", ".", "cluster_topology", ".", "active_brokers", ")", "state", ".", "movement_size", "=", "rg_movement_size", "pop", "=", "{", "state", "}", "do_rebalance", "=", "self", ".", "args", ".", "brokers", "or", "self", ".", "args", ".", "leaders", "# Cannot rebalance when all partitions have zero weight because the", "# score function is undefined.", "if", "do_rebalance", "and", "not", "state", ".", "total_weight", ":", "self", ".", "log", ".", "error", "(", "\"Rebalance impossible. All partitions have zero weight.\"", ",", ")", "do_rebalance", "=", "False", "if", "do_rebalance", ":", "self", ".", "log", ".", "info", "(", "\"Rebalancing with genetic algorithm.\"", ")", "# Run the genetic algorithm for a fixed number of generations.", "for", "i", "in", "range", "(", "self", ".", "args", ".", "num_gens", ")", ":", "start", "=", "time", ".", "time", "(", ")", "pop_candidates", "=", "self", ".", "_explore", "(", "pop", ")", "pop", "=", "self", ".", "_prune", "(", "pop_candidates", ")", "end", "=", "time", ".", "time", "(", ")", "self", ".", "log", ".", "debug", "(", "\"Generation %d: keeping %d of %d assignment(s) in %f seconds\"", ",", "i", ",", "len", "(", "pop", ")", ",", "len", "(", "pop_candidates", ")", ",", "end", "-", "start", ",", ")", "# Choose the state with the greatest score.", "state", "=", "sorted", "(", "pop", ",", "key", "=", "self", ".", "_score", ",", "reverse", "=", "True", ")", "[", "0", "]", "self", ".", "log", ".", "info", "(", "\"Done rebalancing. %d partitions moved.\"", ",", "state", ".", "movement_count", ",", ")", "self", ".", "log", ".", "info", "(", "\"Total movement size: %f\"", ",", "state", ".", "movement_size", ")", "assignment", "=", "state", ".", "assignment", "# Since only active brokers are considered when rebalancing, inactive", "# brokers need to be added back to the new assignment.", "all_brokers", "=", "set", "(", "self", ".", "cluster_topology", ".", "brokers", ".", "values", "(", ")", ")", "inactive_brokers", "=", "all_brokers", "-", "set", "(", "state", ".", "brokers", ")", "for", "partition_name", ",", "replicas", "in", "assignment", ":", "for", "broker", "in", "inactive_brokers", ":", "if", "broker", "in", "self", ".", "cluster_topology", ".", "partitions", "[", "partition_name", "]", ".", "replicas", ":", "replicas", ".", "append", "(", "broker", ".", "id", ")", "self", ".", "cluster_topology", ".", "update_cluster_topology", "(", "assignment", ")" ]
The genetic rebalancing algorithm runs for a fixed number of generations. Each generation has two phases: exploration and pruning. In exploration, a large set of possible states are found by randomly applying assignment changes to the existing states. In pruning, each state is given a score based on the balance of the cluster and the states with the highest scores are chosen as the starting states for the next generation.
[ "The", "genetic", "rebalancing", "algorithm", "runs", "for", "a", "fixed", "number", "of", "generations", ".", "Each", "generation", "has", "two", "phases", ":", "exploration", "and", "pruning", ".", "In", "exploration", "a", "large", "set", "of", "possible", "states", "are", "found", "by", "randomly", "applying", "assignment", "changes", "to", "the", "existing", "states", ".", "In", "pruning", "each", "state", "is", "given", "a", "score", "based", "on", "the", "balance", "of", "the", "cluster", "and", "the", "states", "with", "the", "highest", "scores", "are", "chosen", "as", "the", "starting", "states", "for", "the", "next", "generation", "." ]
python
train
42.134831
markrwilliams/txdarn
txdarn/protocol.py
https://github.com/markrwilliams/txdarn/blob/154d25a1ac78c4e2877c0656e3b9cea4332eda57/txdarn/protocol.py#L89-L95
def stop(self): """Permanently stop sending heartbeats.""" if not self.stopped: self.stopped = True if self.pendingHeartbeat is not None: self.pendingHeartbeat.cancel() self.pendingHeartbeat = None
[ "def", "stop", "(", "self", ")", ":", "if", "not", "self", ".", "stopped", ":", "self", ".", "stopped", "=", "True", "if", "self", ".", "pendingHeartbeat", "is", "not", "None", ":", "self", ".", "pendingHeartbeat", ".", "cancel", "(", ")", "self", ".", "pendingHeartbeat", "=", "None" ]
Permanently stop sending heartbeats.
[ "Permanently", "stop", "sending", "heartbeats", "." ]
python
train
37.571429
reiinakano/xcessiv
xcessiv/functions.py
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/functions.py#L94-L127
def verify_dataset(X, y): """Verifies if a dataset is valid for use i.e. scikit-learn format Used to verify a dataset by returning shape and basic statistics of returned data. This will also provide quick and dirty check on capability of host machine to process the data. Args: X (array-like): Features array y (array-like): Label array Returns: X_shape (2-tuple of int): Shape of X returned y_shape (1-tuple of int): Shape of y returned Raises: AssertionError: `X_shape` must be of length 2 and `y_shape` must be of length 1. `X` must have the same number of elements as `y` i.e. X_shape[0] == y_shape[0]. If any of these conditions are not met, an AssertionError is raised. """ X_shape, y_shape = np.array(X).shape, np.array(y).shape if len(X_shape) != 2: raise exceptions.UserError("X must be 2-dimensional array") if len(y_shape) != 1: raise exceptions.UserError("y must be 1-dimensional array") if X_shape[0] != y_shape[0]: raise exceptions.UserError("X must have same number of elements as y") return dict( features_shape=X_shape, labels_shape=y_shape )
[ "def", "verify_dataset", "(", "X", ",", "y", ")", ":", "X_shape", ",", "y_shape", "=", "np", ".", "array", "(", "X", ")", ".", "shape", ",", "np", ".", "array", "(", "y", ")", ".", "shape", "if", "len", "(", "X_shape", ")", "!=", "2", ":", "raise", "exceptions", ".", "UserError", "(", "\"X must be 2-dimensional array\"", ")", "if", "len", "(", "y_shape", ")", "!=", "1", ":", "raise", "exceptions", ".", "UserError", "(", "\"y must be 1-dimensional array\"", ")", "if", "X_shape", "[", "0", "]", "!=", "y_shape", "[", "0", "]", ":", "raise", "exceptions", ".", "UserError", "(", "\"X must have same number of elements as y\"", ")", "return", "dict", "(", "features_shape", "=", "X_shape", ",", "labels_shape", "=", "y_shape", ")" ]
Verifies if a dataset is valid for use i.e. scikit-learn format Used to verify a dataset by returning shape and basic statistics of returned data. This will also provide quick and dirty check on capability of host machine to process the data. Args: X (array-like): Features array y (array-like): Label array Returns: X_shape (2-tuple of int): Shape of X returned y_shape (1-tuple of int): Shape of y returned Raises: AssertionError: `X_shape` must be of length 2 and `y_shape` must be of length 1. `X` must have the same number of elements as `y` i.e. X_shape[0] == y_shape[0]. If any of these conditions are not met, an AssertionError is raised.
[ "Verifies", "if", "a", "dataset", "is", "valid", "for", "use", "i", ".", "e", ".", "scikit", "-", "learn", "format" ]
python
train
35.411765
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/quaternion.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/quaternion.py#L574-L582
def _q_to_dcm(self, q): """ Create DCM (Matrix3) from q :param q: array q which represents a quaternion [w, x, y, z] :returns: Matrix3 """ assert(len(q) == 4) arr = super(Quaternion, self)._q_to_dcm(q) return self._dcm_array_to_matrix3(arr)
[ "def", "_q_to_dcm", "(", "self", ",", "q", ")", ":", "assert", "(", "len", "(", "q", ")", "==", "4", ")", "arr", "=", "super", "(", "Quaternion", ",", "self", ")", ".", "_q_to_dcm", "(", "q", ")", "return", "self", ".", "_dcm_array_to_matrix3", "(", "arr", ")" ]
Create DCM (Matrix3) from q :param q: array q which represents a quaternion [w, x, y, z] :returns: Matrix3
[ "Create", "DCM", "(", "Matrix3", ")", "from", "q", ":", "param", "q", ":", "array", "q", "which", "represents", "a", "quaternion", "[", "w", "x", "y", "z", "]", ":", "returns", ":", "Matrix3" ]
python
train
32.888889
KrzyHonk/bpmn-python
bpmn_python/bpmn_diagram_rep.py
https://github.com/KrzyHonk/bpmn-python/blob/6e5e28e3d656dbf5bd3d85d78fe8e3f2fb462629/bpmn_python/bpmn_diagram_rep.py#L170-L180
def get_flow_by_id(self, flow_id): """ Gets an edge (flow) with requested ID. Returns a tuple, where first value is node ID, second - a dictionary of all node attributes. :param flow_id: string with edge ID. """ tmp_flows = self.diagram_graph.edges(data=True) for flow in tmp_flows: if flow[2][consts.Consts.id] == flow_id: return flow
[ "def", "get_flow_by_id", "(", "self", ",", "flow_id", ")", ":", "tmp_flows", "=", "self", ".", "diagram_graph", ".", "edges", "(", "data", "=", "True", ")", "for", "flow", "in", "tmp_flows", ":", "if", "flow", "[", "2", "]", "[", "consts", ".", "Consts", ".", "id", "]", "==", "flow_id", ":", "return", "flow" ]
Gets an edge (flow) with requested ID. Returns a tuple, where first value is node ID, second - a dictionary of all node attributes. :param flow_id: string with edge ID.
[ "Gets", "an", "edge", "(", "flow", ")", "with", "requested", "ID", ".", "Returns", "a", "tuple", "where", "first", "value", "is", "node", "ID", "second", "-", "a", "dictionary", "of", "all", "node", "attributes", "." ]
python
train
37.272727
crytic/slither
slither/core/declarations/contract.py
https://github.com/crytic/slither/blob/04c147f7e50223c6af458ca430befae747ccd259/slither/core/declarations/contract.py#L421-L429
def get_event_from_name(self, event_name): """ Return an event from a name Args: event_name (str): name of the event Returns: Event """ return next((e for e in self.events if e.name == event_name), None)
[ "def", "get_event_from_name", "(", "self", ",", "event_name", ")", ":", "return", "next", "(", "(", "e", "for", "e", "in", "self", ".", "events", "if", "e", ".", "name", "==", "event_name", ")", ",", "None", ")" ]
Return an event from a name Args: event_name (str): name of the event Returns: Event
[ "Return", "an", "event", "from", "a", "name", "Args", ":", "event_name", "(", "str", ")", ":", "name", "of", "the", "event", "Returns", ":", "Event" ]
python
train
30.111111
click-contrib/click-configfile
tasks/_vendor/path.py
https://github.com/click-contrib/click-configfile/blob/a616204cb9944125fd5051556f27a7ccef611e22/tasks/_vendor/path.py#L799-L871
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False): r""" Write the given text to this file. The default behavior is to overwrite any existing file; to append instead, use the `append=True` keyword argument. There are two differences between :meth:`write_text` and :meth:`write_bytes`: newline handling and Unicode handling. See below. Parameters: `text` - str/unicode - The text to be written. `encoding` - str - The Unicode encoding that will be used. This is ignored if `text` isn't a Unicode string. `errors` - str - How to handle Unicode encoding errors. Default is ``'strict'``. See ``help(unicode.encode)`` for the options. This is ignored if `text` isn't a Unicode string. `linesep` - keyword argument - str/unicode - The sequence of characters to be used to mark end-of-line. The default is :data:`os.linesep`. You can also specify ``None`` to leave all newlines as they are in `text`. `append` - keyword argument - bool - Specifies what to do if the file already exists (``True``: append to the end of it; ``False``: overwrite it.) The default is ``False``. --- Newline handling. ``write_text()`` converts all standard end-of-line sequences (``'\n'``, ``'\r'``, and ``'\r\n'``) to your platform's default end-of-line sequence (see :data:`os.linesep`; on Windows, for example, the end-of-line marker is ``'\r\n'``). If you don't like your platform's default, you can override it using the `linesep=` keyword argument. If you specifically want ``write_text()`` to preserve the newlines as-is, use ``linesep=None``. This applies to Unicode text the same as to 8-bit text, except there are three additional standard Unicode end-of-line sequences: ``u'\x85'``, ``u'\r\x85'``, and ``u'\u2028'``. (This is slightly different from when you open a file for writing with ``fopen(filename, "w")`` in C or ``open(filename, 'w')`` in Python.) --- Unicode If `text` isn't Unicode, then apart from newline handling, the bytes are written verbatim to the file. The `encoding` and `errors` arguments are not used and must be omitted. If `text` is Unicode, it is first converted to :func:`bytes` using the specified `encoding` (or the default encoding if `encoding` isn't specified). The `errors` argument applies only to this conversion. """ if isinstance(text, text_type): if linesep is not None: text = U_NEWLINE.sub(linesep, text) text = text.encode(encoding or sys.getdefaultencoding(), errors) else: assert encoding is None text = NEWLINE.sub(linesep, text) self.write_bytes(text, append=append)
[ "def", "write_text", "(", "self", ",", "text", ",", "encoding", "=", "None", ",", "errors", "=", "'strict'", ",", "linesep", "=", "os", ".", "linesep", ",", "append", "=", "False", ")", ":", "if", "isinstance", "(", "text", ",", "text_type", ")", ":", "if", "linesep", "is", "not", "None", ":", "text", "=", "U_NEWLINE", ".", "sub", "(", "linesep", ",", "text", ")", "text", "=", "text", ".", "encode", "(", "encoding", "or", "sys", ".", "getdefaultencoding", "(", ")", ",", "errors", ")", "else", ":", "assert", "encoding", "is", "None", "text", "=", "NEWLINE", ".", "sub", "(", "linesep", ",", "text", ")", "self", ".", "write_bytes", "(", "text", ",", "append", "=", "append", ")" ]
r""" Write the given text to this file. The default behavior is to overwrite any existing file; to append instead, use the `append=True` keyword argument. There are two differences between :meth:`write_text` and :meth:`write_bytes`: newline handling and Unicode handling. See below. Parameters: `text` - str/unicode - The text to be written. `encoding` - str - The Unicode encoding that will be used. This is ignored if `text` isn't a Unicode string. `errors` - str - How to handle Unicode encoding errors. Default is ``'strict'``. See ``help(unicode.encode)`` for the options. This is ignored if `text` isn't a Unicode string. `linesep` - keyword argument - str/unicode - The sequence of characters to be used to mark end-of-line. The default is :data:`os.linesep`. You can also specify ``None`` to leave all newlines as they are in `text`. `append` - keyword argument - bool - Specifies what to do if the file already exists (``True``: append to the end of it; ``False``: overwrite it.) The default is ``False``. --- Newline handling. ``write_text()`` converts all standard end-of-line sequences (``'\n'``, ``'\r'``, and ``'\r\n'``) to your platform's default end-of-line sequence (see :data:`os.linesep`; on Windows, for example, the end-of-line marker is ``'\r\n'``). If you don't like your platform's default, you can override it using the `linesep=` keyword argument. If you specifically want ``write_text()`` to preserve the newlines as-is, use ``linesep=None``. This applies to Unicode text the same as to 8-bit text, except there are three additional standard Unicode end-of-line sequences: ``u'\x85'``, ``u'\r\x85'``, and ``u'\u2028'``. (This is slightly different from when you open a file for writing with ``fopen(filename, "w")`` in C or ``open(filename, 'w')`` in Python.) --- Unicode If `text` isn't Unicode, then apart from newline handling, the bytes are written verbatim to the file. The `encoding` and `errors` arguments are not used and must be omitted. If `text` is Unicode, it is first converted to :func:`bytes` using the specified `encoding` (or the default encoding if `encoding` isn't specified). The `errors` argument applies only to this conversion.
[ "r", "Write", "the", "given", "text", "to", "this", "file", "." ]
python
train
41.260274
juju/charm-helpers
charmhelpers/contrib/database/mysql.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/database/mysql.py#L375-L385
def normalize_address(self, hostname): """Ensure that address returned is an IP address (i.e. not fqdn)""" if config_get('prefer-ipv6'): # TODO: add support for ipv6 dns return hostname if hostname != unit_get('private-address'): return get_host_ip(hostname, fallback=hostname) # Otherwise assume localhost return '127.0.0.1'
[ "def", "normalize_address", "(", "self", ",", "hostname", ")", ":", "if", "config_get", "(", "'prefer-ipv6'", ")", ":", "# TODO: add support for ipv6 dns", "return", "hostname", "if", "hostname", "!=", "unit_get", "(", "'private-address'", ")", ":", "return", "get_host_ip", "(", "hostname", ",", "fallback", "=", "hostname", ")", "# Otherwise assume localhost", "return", "'127.0.0.1'" ]
Ensure that address returned is an IP address (i.e. not fqdn)
[ "Ensure", "that", "address", "returned", "is", "an", "IP", "address", "(", "i", ".", "e", ".", "not", "fqdn", ")" ]
python
train
35.727273
annoviko/pyclustering
pyclustering/nnet/legion.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/nnet/legion.py#L193-L206
def allocate_sync_ensembles(self, tolerance = 0.1): """! @brief Allocate clusters in line with ensembles of synchronous oscillators where each synchronous ensemble corresponds to only one cluster. @param[in] tolerance (double): Maximum error for allocation of synchronous ensemble oscillators. @return (list) Grours of indexes of synchronous oscillators, for example, [ [index_osc1, index_osc3], [index_osc2], [index_osc4, index_osc5] ]. """ if (self.__ccore_legion_dynamic_pointer is not None): self.__output = wrapper.legion_dynamic_get_output(self.__ccore_legion_dynamic_pointer); return allocate_sync_ensembles(self.__output, tolerance);
[ "def", "allocate_sync_ensembles", "(", "self", ",", "tolerance", "=", "0.1", ")", ":", "if", "(", "self", ".", "__ccore_legion_dynamic_pointer", "is", "not", "None", ")", ":", "self", ".", "__output", "=", "wrapper", ".", "legion_dynamic_get_output", "(", "self", ".", "__ccore_legion_dynamic_pointer", ")", "return", "allocate_sync_ensembles", "(", "self", ".", "__output", ",", "tolerance", ")" ]
! @brief Allocate clusters in line with ensembles of synchronous oscillators where each synchronous ensemble corresponds to only one cluster. @param[in] tolerance (double): Maximum error for allocation of synchronous ensemble oscillators. @return (list) Grours of indexes of synchronous oscillators, for example, [ [index_osc1, index_osc3], [index_osc2], [index_osc4, index_osc5] ].
[ "!" ]
python
valid
53.571429
rytilahti/python-songpal
songpal/group.py
https://github.com/rytilahti/python-songpal/blob/0443de6b3d960b9067a851d82261ca00e46b4618/songpal/group.py#L199-L203
async def stop(self): """Stop playback?""" state = await self.state() res = await self.call("X_Stop", MasterSessionID=state.MasterSessionID) return res
[ "async", "def", "stop", "(", "self", ")", ":", "state", "=", "await", "self", ".", "state", "(", ")", "res", "=", "await", "self", ".", "call", "(", "\"X_Stop\"", ",", "MasterSessionID", "=", "state", ".", "MasterSessionID", ")", "return", "res" ]
Stop playback?
[ "Stop", "playback?" ]
python
train
35.8
yyuu/botornado
botornado/sqs/queue.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/botornado/sqs/queue.py#L139-L152
def read(self, visibility_timeout=None, callback=None): """ Read a single message from the queue. :type visibility_timeout: int :param visibility_timeout: The timeout for this message in seconds :rtype: :class:`boto.sqs.message.Message` :return: A single message or None if queue is empty """ def _read(rs): if callable(callback): callback(rs[0] if len(rs) == 1 else None) self.get_messages(1, visibility_timeout, callback=callback)
[ "def", "read", "(", "self", ",", "visibility_timeout", "=", "None", ",", "callback", "=", "None", ")", ":", "def", "_read", "(", "rs", ")", ":", "if", "callable", "(", "callback", ")", ":", "callback", "(", "rs", "[", "0", "]", "if", "len", "(", "rs", ")", "==", "1", "else", "None", ")", "self", ".", "get_messages", "(", "1", ",", "visibility_timeout", ",", "callback", "=", "callback", ")" ]
Read a single message from the queue. :type visibility_timeout: int :param visibility_timeout: The timeout for this message in seconds :rtype: :class:`boto.sqs.message.Message` :return: A single message or None if queue is empty
[ "Read", "a", "single", "message", "from", "the", "queue", ".", ":", "type", "visibility_timeout", ":", "int", ":", "param", "visibility_timeout", ":", "The", "timeout", "for", "this", "message", "in", "seconds" ]
python
train
37.785714
mrallen1/pygett
pygett/base.py
https://github.com/mrallen1/pygett/blob/1e21f8674a3634a901af054226670174b5ce2d87/pygett/base.py#L183-L230
def upload_file(self, **kwargs): """ Upload a file to the Gett service. Takes keyword arguments. Input: * ``filename`` the filename to use in the Gett service (required) * ``data`` the file contents to store in the Gett service (required) - must be a string * ``sharename`` the name of the share in which to store the data (optional); if not given, a new share will be created. * ``title`` the share title to use if a new share is created (optional) Output: * A :py:mod:`pygett.files.GettFile` object Example:: file = client.upload_file(filaname="foo", data=open("foo.txt").read()) """ params = None if 'filename' not in kwargs: raise AttributeError("Parameter 'filename' must be given") else: params = { "filename": kwargs['filename'] } if 'data' not in kwargs: raise AttributeError("Parameter 'data' must be given") sharename = None if 'sharename' not in kwargs: share = None if 'title' in kwargs: share = self.create_share(title=kwargs['title']) else: share = self.create_share() sharename = share.sharename else: sharename = kwargs['sharename'] response = GettRequest().post("/files/%s/create?accesstoken=%s" % (sharename, self.user.access_token()), params) f = None if response.http_status == 200: if 'sharename' not in response.response: response.response['sharename'] = sharename f = GettFile(self.user, **response.response) if f.send_data(data=kwargs['data']): return f
[ "def", "upload_file", "(", "self", ",", "*", "*", "kwargs", ")", ":", "params", "=", "None", "if", "'filename'", "not", "in", "kwargs", ":", "raise", "AttributeError", "(", "\"Parameter 'filename' must be given\"", ")", "else", ":", "params", "=", "{", "\"filename\"", ":", "kwargs", "[", "'filename'", "]", "}", "if", "'data'", "not", "in", "kwargs", ":", "raise", "AttributeError", "(", "\"Parameter 'data' must be given\"", ")", "sharename", "=", "None", "if", "'sharename'", "not", "in", "kwargs", ":", "share", "=", "None", "if", "'title'", "in", "kwargs", ":", "share", "=", "self", ".", "create_share", "(", "title", "=", "kwargs", "[", "'title'", "]", ")", "else", ":", "share", "=", "self", ".", "create_share", "(", ")", "sharename", "=", "share", ".", "sharename", "else", ":", "sharename", "=", "kwargs", "[", "'sharename'", "]", "response", "=", "GettRequest", "(", ")", ".", "post", "(", "\"/files/%s/create?accesstoken=%s\"", "%", "(", "sharename", ",", "self", ".", "user", ".", "access_token", "(", ")", ")", ",", "params", ")", "f", "=", "None", "if", "response", ".", "http_status", "==", "200", ":", "if", "'sharename'", "not", "in", "response", ".", "response", ":", "response", ".", "response", "[", "'sharename'", "]", "=", "sharename", "f", "=", "GettFile", "(", "self", ".", "user", ",", "*", "*", "response", ".", "response", ")", "if", "f", ".", "send_data", "(", "data", "=", "kwargs", "[", "'data'", "]", ")", ":", "return", "f" ]
Upload a file to the Gett service. Takes keyword arguments. Input: * ``filename`` the filename to use in the Gett service (required) * ``data`` the file contents to store in the Gett service (required) - must be a string * ``sharename`` the name of the share in which to store the data (optional); if not given, a new share will be created. * ``title`` the share title to use if a new share is created (optional) Output: * A :py:mod:`pygett.files.GettFile` object Example:: file = client.upload_file(filaname="foo", data=open("foo.txt").read())
[ "Upload", "a", "file", "to", "the", "Gett", "service", ".", "Takes", "keyword", "arguments", "." ]
python
train
36.708333
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/feature.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L513-L538
def compose (composite_property_s, component_properties_s): """ Sets the components of the given composite property. All parameters are <feature>value strings """ from . import property component_properties_s = to_seq (component_properties_s) composite_property = property.create_from_string(composite_property_s) f = composite_property.feature if len(component_properties_s) > 0 and isinstance(component_properties_s[0], property.Property): component_properties = component_properties_s else: component_properties = [property.create_from_string(p) for p in component_properties_s] if not f.composite: raise BaseException ("'%s' is not a composite feature" % f) if property in __composite_properties: raise BaseException ('components of "%s" already set: %s' % (composite_property, str (__composite_properties[composite_property]))) if composite_property in component_properties: raise BaseException ('composite property "%s" cannot have itself as a component' % composite_property) __composite_properties[composite_property] = component_properties
[ "def", "compose", "(", "composite_property_s", ",", "component_properties_s", ")", ":", "from", ".", "import", "property", "component_properties_s", "=", "to_seq", "(", "component_properties_s", ")", "composite_property", "=", "property", ".", "create_from_string", "(", "composite_property_s", ")", "f", "=", "composite_property", ".", "feature", "if", "len", "(", "component_properties_s", ")", ">", "0", "and", "isinstance", "(", "component_properties_s", "[", "0", "]", ",", "property", ".", "Property", ")", ":", "component_properties", "=", "component_properties_s", "else", ":", "component_properties", "=", "[", "property", ".", "create_from_string", "(", "p", ")", "for", "p", "in", "component_properties_s", "]", "if", "not", "f", ".", "composite", ":", "raise", "BaseException", "(", "\"'%s' is not a composite feature\"", "%", "f", ")", "if", "property", "in", "__composite_properties", ":", "raise", "BaseException", "(", "'components of \"%s\" already set: %s'", "%", "(", "composite_property", ",", "str", "(", "__composite_properties", "[", "composite_property", "]", ")", ")", ")", "if", "composite_property", "in", "component_properties", ":", "raise", "BaseException", "(", "'composite property \"%s\" cannot have itself as a component'", "%", "composite_property", ")", "__composite_properties", "[", "composite_property", "]", "=", "component_properties" ]
Sets the components of the given composite property. All parameters are <feature>value strings
[ "Sets", "the", "components", "of", "the", "given", "composite", "property", "." ]
python
train
43.153846
RudolfCardinal/pythonlib
cardinal_pythonlib/file_io.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/file_io.py#L347-L354
def webify_file(srcfilename: str, destfilename: str) -> None: """ Rewrites a file from ``srcfilename`` to ``destfilename``, HTML-escaping it in the process. """ with open(srcfilename) as infile, open(destfilename, 'w') as ofile: for line_ in infile: ofile.write(escape(line_))
[ "def", "webify_file", "(", "srcfilename", ":", "str", ",", "destfilename", ":", "str", ")", "->", "None", ":", "with", "open", "(", "srcfilename", ")", "as", "infile", ",", "open", "(", "destfilename", ",", "'w'", ")", "as", "ofile", ":", "for", "line_", "in", "infile", ":", "ofile", ".", "write", "(", "escape", "(", "line_", ")", ")" ]
Rewrites a file from ``srcfilename`` to ``destfilename``, HTML-escaping it in the process.
[ "Rewrites", "a", "file", "from", "srcfilename", "to", "destfilename", "HTML", "-", "escaping", "it", "in", "the", "process", "." ]
python
train
38.625
pydata/xarray
xarray/core/dataset.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/dataset.py#L751-L762
def _replace_with_new_dims( # type: ignore self: T, variables: 'OrderedDict[Any, Variable]' = None, coord_names: set = None, attrs: 'Optional[OrderedDict]' = __default, indexes: 'Optional[OrderedDict[Any, pd.Index]]' = __default, inplace: bool = False, ) -> T: """Replace variables with recalculated dimensions.""" dims = dict(calculate_dimensions(variables)) return self._replace( variables, coord_names, dims, attrs, indexes, inplace=inplace)
[ "def", "_replace_with_new_dims", "(", "# type: ignore", "self", ":", "T", ",", "variables", ":", "'OrderedDict[Any, Variable]'", "=", "None", ",", "coord_names", ":", "set", "=", "None", ",", "attrs", ":", "'Optional[OrderedDict]'", "=", "__default", ",", "indexes", ":", "'Optional[OrderedDict[Any, pd.Index]]'", "=", "__default", ",", "inplace", ":", "bool", "=", "False", ",", ")", "->", "T", ":", "dims", "=", "dict", "(", "calculate_dimensions", "(", "variables", ")", ")", "return", "self", ".", "_replace", "(", "variables", ",", "coord_names", ",", "dims", ",", "attrs", ",", "indexes", ",", "inplace", "=", "inplace", ")" ]
Replace variables with recalculated dimensions.
[ "Replace", "variables", "with", "recalculated", "dimensions", "." ]
python
train
43.5
lsst-sqre/lsst-projectmeta-kit
lsstprojectmeta/tex/lsstbib.py
https://github.com/lsst-sqre/lsst-projectmeta-kit/blob/ac8d4ff65bb93d8fdeb1b46ae6eb5d7414f1ae14/lsstprojectmeta/tex/lsstbib.py#L178-L214
def get_url_from_entry(entry): """Get a usable URL from a pybtex entry. Parameters ---------- entry : `pybtex.database.Entry` A pybtex bibliography entry. Returns ------- url : `str` Best available URL from the ``entry``. Raises ------ NoEntryUrlError Raised when no URL can be made from the bibliography entry. Notes ----- The order of priority is: 1. ``url`` field 2. ``ls.st`` URL from the handle for ``@docushare`` entries. 3. ``adsurl`` 4. DOI """ if 'url' in entry.fields: return entry.fields['url'] elif entry.type.lower() == 'docushare': return 'https://ls.st/' + entry.fields['handle'] elif 'adsurl' in entry.fields: return entry.fields['adsurl'] elif 'doi' in entry.fields: return 'https://doi.org/' + entry.fields['doi'] else: raise NoEntryUrlError()
[ "def", "get_url_from_entry", "(", "entry", ")", ":", "if", "'url'", "in", "entry", ".", "fields", ":", "return", "entry", ".", "fields", "[", "'url'", "]", "elif", "entry", ".", "type", ".", "lower", "(", ")", "==", "'docushare'", ":", "return", "'https://ls.st/'", "+", "entry", ".", "fields", "[", "'handle'", "]", "elif", "'adsurl'", "in", "entry", ".", "fields", ":", "return", "entry", ".", "fields", "[", "'adsurl'", "]", "elif", "'doi'", "in", "entry", ".", "fields", ":", "return", "'https://doi.org/'", "+", "entry", ".", "fields", "[", "'doi'", "]", "else", ":", "raise", "NoEntryUrlError", "(", ")" ]
Get a usable URL from a pybtex entry. Parameters ---------- entry : `pybtex.database.Entry` A pybtex bibliography entry. Returns ------- url : `str` Best available URL from the ``entry``. Raises ------ NoEntryUrlError Raised when no URL can be made from the bibliography entry. Notes ----- The order of priority is: 1. ``url`` field 2. ``ls.st`` URL from the handle for ``@docushare`` entries. 3. ``adsurl`` 4. DOI
[ "Get", "a", "usable", "URL", "from", "a", "pybtex", "entry", "." ]
python
valid
23.945946
ScottDuckworth/python-anyvcs
anyvcs/svn.py
https://github.com/ScottDuckworth/python-anyvcs/blob/9eb09defbc6b7c99d373fad53cbf8fc81b637923/anyvcs/svn.py#L746-L776
def dump( self, stream, progress=None, lower=None, upper=None, incremental=False, deltas=False ): """Dump the repository to a dumpfile stream. :param stream: A file stream to which the dumpfile is written :param progress: A file stream to which progress is written :param lower: Must be a numeric version number :param upper: Must be a numeric version number See ``svnadmin help dump`` for details on the other arguments. """ cmd = [SVNADMIN, 'dump', '.'] if progress is None: cmd.append('-q') if lower is not None: cmd.append('-r') if upper is None: cmd.append(str(int(lower))) else: cmd.append('%d:%d' % (int(lower), int(upper))) if incremental: cmd.append('--incremental') if deltas: cmd.append('--deltas') p = subprocess.Popen(cmd, cwd=self.path, stdout=stream, stderr=progress) p.wait() if p.returncode != 0: raise subprocess.CalledProcessError(p.returncode, cmd)
[ "def", "dump", "(", "self", ",", "stream", ",", "progress", "=", "None", ",", "lower", "=", "None", ",", "upper", "=", "None", ",", "incremental", "=", "False", ",", "deltas", "=", "False", ")", ":", "cmd", "=", "[", "SVNADMIN", ",", "'dump'", ",", "'.'", "]", "if", "progress", "is", "None", ":", "cmd", ".", "append", "(", "'-q'", ")", "if", "lower", "is", "not", "None", ":", "cmd", ".", "append", "(", "'-r'", ")", "if", "upper", "is", "None", ":", "cmd", ".", "append", "(", "str", "(", "int", "(", "lower", ")", ")", ")", "else", ":", "cmd", ".", "append", "(", "'%d:%d'", "%", "(", "int", "(", "lower", ")", ",", "int", "(", "upper", ")", ")", ")", "if", "incremental", ":", "cmd", ".", "append", "(", "'--incremental'", ")", "if", "deltas", ":", "cmd", ".", "append", "(", "'--deltas'", ")", "p", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "cwd", "=", "self", ".", "path", ",", "stdout", "=", "stream", ",", "stderr", "=", "progress", ")", "p", ".", "wait", "(", ")", "if", "p", ".", "returncode", "!=", "0", ":", "raise", "subprocess", ".", "CalledProcessError", "(", "p", ".", "returncode", ",", "cmd", ")" ]
Dump the repository to a dumpfile stream. :param stream: A file stream to which the dumpfile is written :param progress: A file stream to which progress is written :param lower: Must be a numeric version number :param upper: Must be a numeric version number See ``svnadmin help dump`` for details on the other arguments.
[ "Dump", "the", "repository", "to", "a", "dumpfile", "stream", "." ]
python
train
35.387097
numenta/nupic
src/nupic/swarming/hypersearch_v2.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch_v2.py#L581-L630
def getMaturedSwarmGenerations(self): """Return a list of swarm generations that have completed and the best (minimal) errScore seen for each of them. Parameters: --------------------------------------------------------------------- retval: list of tuples. Each tuple is of the form: (swarmId, genIdx, bestErrScore) """ # Return results go in this list result = [] # For each of the swarm generations which have had model result updates # since the last time we were called, see which have completed. modifiedSwarmGens = sorted(self._modifiedSwarmGens) # Walk through them in order from lowest to highest generation index for key in modifiedSwarmGens: (swarmId, genIdx) = key # Skip it if we've already reported on it. This should happen rarely, if # ever. It means that some worker has started and completed a model in # this generation after we've determined that the generation has ended. if key in self._maturedSwarmGens: self._modifiedSwarmGens.remove(key) continue # If the previous generation for this swarm is not complete yet, don't # bother evaluating this one. if (genIdx >= 1) and not (swarmId, genIdx-1) in self._maturedSwarmGens: continue # We found a swarm generation that had some results reported since last # time, see if it's complete or not (_, _, errScores, completedFlags, maturedFlags) = \ self.getParticleInfos(swarmId, genIdx) maturedFlags = numpy.array(maturedFlags) numMatured = maturedFlags.sum() if numMatured >= self._hsObj._minParticlesPerSwarm \ and numMatured == len(maturedFlags): errScores = numpy.array(errScores) bestScore = errScores.min() self._maturedSwarmGens.add(key) self._modifiedSwarmGens.remove(key) result.append((swarmId, genIdx, bestScore)) # Return results return result
[ "def", "getMaturedSwarmGenerations", "(", "self", ")", ":", "# Return results go in this list", "result", "=", "[", "]", "# For each of the swarm generations which have had model result updates", "# since the last time we were called, see which have completed.", "modifiedSwarmGens", "=", "sorted", "(", "self", ".", "_modifiedSwarmGens", ")", "# Walk through them in order from lowest to highest generation index", "for", "key", "in", "modifiedSwarmGens", ":", "(", "swarmId", ",", "genIdx", ")", "=", "key", "# Skip it if we've already reported on it. This should happen rarely, if", "# ever. It means that some worker has started and completed a model in", "# this generation after we've determined that the generation has ended.", "if", "key", "in", "self", ".", "_maturedSwarmGens", ":", "self", ".", "_modifiedSwarmGens", ".", "remove", "(", "key", ")", "continue", "# If the previous generation for this swarm is not complete yet, don't", "# bother evaluating this one.", "if", "(", "genIdx", ">=", "1", ")", "and", "not", "(", "swarmId", ",", "genIdx", "-", "1", ")", "in", "self", ".", "_maturedSwarmGens", ":", "continue", "# We found a swarm generation that had some results reported since last", "# time, see if it's complete or not", "(", "_", ",", "_", ",", "errScores", ",", "completedFlags", ",", "maturedFlags", ")", "=", "self", ".", "getParticleInfos", "(", "swarmId", ",", "genIdx", ")", "maturedFlags", "=", "numpy", ".", "array", "(", "maturedFlags", ")", "numMatured", "=", "maturedFlags", ".", "sum", "(", ")", "if", "numMatured", ">=", "self", ".", "_hsObj", ".", "_minParticlesPerSwarm", "and", "numMatured", "==", "len", "(", "maturedFlags", ")", ":", "errScores", "=", "numpy", ".", "array", "(", "errScores", ")", "bestScore", "=", "errScores", ".", "min", "(", ")", "self", ".", "_maturedSwarmGens", ".", "add", "(", "key", ")", "self", ".", "_modifiedSwarmGens", ".", "remove", "(", "key", ")", "result", ".", "append", "(", "(", "swarmId", ",", "genIdx", ",", "bestScore", ")", ")", "# Return results", "return", "result" ]
Return a list of swarm generations that have completed and the best (minimal) errScore seen for each of them. Parameters: --------------------------------------------------------------------- retval: list of tuples. Each tuple is of the form: (swarmId, genIdx, bestErrScore)
[ "Return", "a", "list", "of", "swarm", "generations", "that", "have", "completed", "and", "the", "best", "(", "minimal", ")", "errScore", "seen", "for", "each", "of", "them", "." ]
python
valid
39
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/output_writers.py
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L536-L573
def _generate_filename(cls, writer_spec, name, job_id, num, attempt=None, seg_index=None): """Generates a filename for a particular output. Args: writer_spec: specification dictionary for the output writer. name: name of the job. job_id: the ID number assigned to the job. num: shard number. attempt: the shard attempt number. seg_index: index of the seg. None means the final output. Returns: a string containing the filename. Raises: BadWriterParamsError: if the template contains any errors such as invalid syntax or contains unknown substitution placeholders. """ naming_format = cls._TMP_FILE_NAMING_FORMAT if seg_index is None: naming_format = writer_spec.get(cls.NAMING_FORMAT_PARAM, cls._DEFAULT_NAMING_FORMAT) template = string.Template(naming_format) try: # Check that template doesn't use undefined mappings and is formatted well if seg_index is None: return template.substitute(name=name, id=job_id, num=num) else: return template.substitute(name=name, id=job_id, num=num, attempt=attempt, seg=seg_index) except ValueError, error: raise errors.BadWriterParamsError("Naming template is bad, %s" % (error)) except KeyError, error: raise errors.BadWriterParamsError("Naming template '%s' has extra " "mappings, %s" % (naming_format, error))
[ "def", "_generate_filename", "(", "cls", ",", "writer_spec", ",", "name", ",", "job_id", ",", "num", ",", "attempt", "=", "None", ",", "seg_index", "=", "None", ")", ":", "naming_format", "=", "cls", ".", "_TMP_FILE_NAMING_FORMAT", "if", "seg_index", "is", "None", ":", "naming_format", "=", "writer_spec", ".", "get", "(", "cls", ".", "NAMING_FORMAT_PARAM", ",", "cls", ".", "_DEFAULT_NAMING_FORMAT", ")", "template", "=", "string", ".", "Template", "(", "naming_format", ")", "try", ":", "# Check that template doesn't use undefined mappings and is formatted well", "if", "seg_index", "is", "None", ":", "return", "template", ".", "substitute", "(", "name", "=", "name", ",", "id", "=", "job_id", ",", "num", "=", "num", ")", "else", ":", "return", "template", ".", "substitute", "(", "name", "=", "name", ",", "id", "=", "job_id", ",", "num", "=", "num", ",", "attempt", "=", "attempt", ",", "seg", "=", "seg_index", ")", "except", "ValueError", ",", "error", ":", "raise", "errors", ".", "BadWriterParamsError", "(", "\"Naming template is bad, %s\"", "%", "(", "error", ")", ")", "except", "KeyError", ",", "error", ":", "raise", "errors", ".", "BadWriterParamsError", "(", "\"Naming template '%s' has extra \"", "\"mappings, %s\"", "%", "(", "naming_format", ",", "error", ")", ")" ]
Generates a filename for a particular output. Args: writer_spec: specification dictionary for the output writer. name: name of the job. job_id: the ID number assigned to the job. num: shard number. attempt: the shard attempt number. seg_index: index of the seg. None means the final output. Returns: a string containing the filename. Raises: BadWriterParamsError: if the template contains any errors such as invalid syntax or contains unknown substitution placeholders.
[ "Generates", "a", "filename", "for", "a", "particular", "output", "." ]
python
train
40.552632
rsalmaso/django-fluo
fluo/views/decorators.py
https://github.com/rsalmaso/django-fluo/blob/1321c1e7d6a912108f79be02a9e7f2108c57f89f/fluo/views/decorators.py#L54-L78
def login_required(function=None, required=False, redirect_field_name=REDIRECT_FIELD_NAME): """ Decorator for views that, if required, checks that the user is logged in and redirect to the log-in page if necessary. """ if required: if django.VERSION < (1, 11): actual_decorator = user_passes_test( lambda u: u.is_authenticated(), redirect_field_name=redirect_field_name ) else: actual_decorator = user_passes_test( lambda u: u.is_authenticated, redirect_field_name=redirect_field_name ) if function: return actual_decorator(function) return actual_decorator # login not required def decorator(view_func): def _wrapper(request, *args, **kwargs): return function(request, *args, **kwargs) return wraps(function)(_wrapper) return method_decorator(decorator)
[ "def", "login_required", "(", "function", "=", "None", ",", "required", "=", "False", ",", "redirect_field_name", "=", "REDIRECT_FIELD_NAME", ")", ":", "if", "required", ":", "if", "django", ".", "VERSION", "<", "(", "1", ",", "11", ")", ":", "actual_decorator", "=", "user_passes_test", "(", "lambda", "u", ":", "u", ".", "is_authenticated", "(", ")", ",", "redirect_field_name", "=", "redirect_field_name", ")", "else", ":", "actual_decorator", "=", "user_passes_test", "(", "lambda", "u", ":", "u", ".", "is_authenticated", ",", "redirect_field_name", "=", "redirect_field_name", ")", "if", "function", ":", "return", "actual_decorator", "(", "function", ")", "return", "actual_decorator", "# login not required", "def", "decorator", "(", "view_func", ")", ":", "def", "_wrapper", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "function", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wraps", "(", "function", ")", "(", "_wrapper", ")", "return", "method_decorator", "(", "decorator", ")" ]
Decorator for views that, if required, checks that the user is logged in and redirect to the log-in page if necessary.
[ "Decorator", "for", "views", "that", "if", "required", "checks", "that", "the", "user", "is", "logged", "in", "and", "redirect", "to", "the", "log", "-", "in", "page", "if", "necessary", "." ]
python
train
37.84
tornadoweb/tornado
tornado/httpclient.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/httpclient.py#L113-L118
def close(self) -> None: """Closes the HTTPClient, freeing any resources used.""" if not self._closed: self._async_client.close() self._io_loop.close() self._closed = True
[ "def", "close", "(", "self", ")", "->", "None", ":", "if", "not", "self", ".", "_closed", ":", "self", ".", "_async_client", ".", "close", "(", ")", "self", ".", "_io_loop", ".", "close", "(", ")", "self", ".", "_closed", "=", "True" ]
Closes the HTTPClient, freeing any resources used.
[ "Closes", "the", "HTTPClient", "freeing", "any", "resources", "used", "." ]
python
train
36.333333
swisscom/cleanerversion
versions/models.py
https://github.com/swisscom/cleanerversion/blob/becadbab5d7b474a0e9a596b99e97682402d2f2c/versions/models.py#L531-L548
def _set_item_querytime(self, item, type_check=True): """ Sets the time for which the query was made on the resulting item :param item: an item of type Versionable :param type_check: Check the item to be a Versionable :return: Returns the item itself with the time set """ if isinstance(item, Versionable): item._querytime = self.querytime elif isinstance(item, VersionedQuerySet): item.querytime = self.querytime else: if type_check: raise TypeError( "This item is not a Versionable, it's a " + str( type(item))) return item
[ "def", "_set_item_querytime", "(", "self", ",", "item", ",", "type_check", "=", "True", ")", ":", "if", "isinstance", "(", "item", ",", "Versionable", ")", ":", "item", ".", "_querytime", "=", "self", ".", "querytime", "elif", "isinstance", "(", "item", ",", "VersionedQuerySet", ")", ":", "item", ".", "querytime", "=", "self", ".", "querytime", "else", ":", "if", "type_check", ":", "raise", "TypeError", "(", "\"This item is not a Versionable, it's a \"", "+", "str", "(", "type", "(", "item", ")", ")", ")", "return", "item" ]
Sets the time for which the query was made on the resulting item :param item: an item of type Versionable :param type_check: Check the item to be a Versionable :return: Returns the item itself with the time set
[ "Sets", "the", "time", "for", "which", "the", "query", "was", "made", "on", "the", "resulting", "item" ]
python
train
38.055556
nschloe/orthopy
orthopy/sphere/orth.py
https://github.com/nschloe/orthopy/blob/64713d0533b0af042810a7535fff411b8e0aea9e/orthopy/sphere/orth.py#L9-L33
def tree_sph(polar, azimuthal, n, standardization, symbolic=False): """Evaluate all spherical harmonics of degree at most `n` at angles `polar`, `azimuthal`. """ cos = numpy.vectorize(sympy.cos) if symbolic else numpy.cos # Conventions from # <https://en.wikipedia.org/wiki/Spherical_harmonics#Orthogonality_and_normalization>. config = { "acoustic": ("complex spherical", False), "quantum mechanic": ("complex spherical", True), "geodetic": ("complex spherical 1", False), "schmidt": ("schmidt", False), } standard, cs_phase = config[standardization] return tree_alp( cos(polar), n, phi=azimuthal, standardization=standard, with_condon_shortley_phase=cs_phase, symbolic=symbolic, )
[ "def", "tree_sph", "(", "polar", ",", "azimuthal", ",", "n", ",", "standardization", ",", "symbolic", "=", "False", ")", ":", "cos", "=", "numpy", ".", "vectorize", "(", "sympy", ".", "cos", ")", "if", "symbolic", "else", "numpy", ".", "cos", "# Conventions from", "# <https://en.wikipedia.org/wiki/Spherical_harmonics#Orthogonality_and_normalization>.", "config", "=", "{", "\"acoustic\"", ":", "(", "\"complex spherical\"", ",", "False", ")", ",", "\"quantum mechanic\"", ":", "(", "\"complex spherical\"", ",", "True", ")", ",", "\"geodetic\"", ":", "(", "\"complex spherical 1\"", ",", "False", ")", ",", "\"schmidt\"", ":", "(", "\"schmidt\"", ",", "False", ")", ",", "}", "standard", ",", "cs_phase", "=", "config", "[", "standardization", "]", "return", "tree_alp", "(", "cos", "(", "polar", ")", ",", "n", ",", "phi", "=", "azimuthal", ",", "standardization", "=", "standard", ",", "with_condon_shortley_phase", "=", "cs_phase", ",", "symbolic", "=", "symbolic", ",", ")" ]
Evaluate all spherical harmonics of degree at most `n` at angles `polar`, `azimuthal`.
[ "Evaluate", "all", "spherical", "harmonics", "of", "degree", "at", "most", "n", "at", "angles", "polar", "azimuthal", "." ]
python
train
31.4
inveniosoftware-contrib/invenio-classifier
invenio_classifier/engine.py
https://github.com/inveniosoftware-contrib/invenio-classifier/blob/3c758cf34dca6bf0548e7da5de34e5f72e3b255e/invenio_classifier/engine.py#L569-L577
def save_keywords(filename, xml): """Save keyword XML to filename.""" tmp_dir = os.path.dirname(filename) if not os.path.isdir(tmp_dir): os.mkdir(tmp_dir) file_desc = open(filename, "w") file_desc.write(xml) file_desc.close()
[ "def", "save_keywords", "(", "filename", ",", "xml", ")", ":", "tmp_dir", "=", "os", ".", "path", ".", "dirname", "(", "filename", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "tmp_dir", ")", ":", "os", ".", "mkdir", "(", "tmp_dir", ")", "file_desc", "=", "open", "(", "filename", ",", "\"w\"", ")", "file_desc", ".", "write", "(", "xml", ")", "file_desc", ".", "close", "(", ")" ]
Save keyword XML to filename.
[ "Save", "keyword", "XML", "to", "filename", "." ]
python
train
27.777778
mikedh/trimesh
trimesh/path/path.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/path.py#L1381-L1387
def identifier_md5(self): """ Return an MD5 of the identifier """ as_int = (self.identifier * 1e4).astype(np.int64) hashed = util.md5_object(as_int.tostring(order='C')) return hashed
[ "def", "identifier_md5", "(", "self", ")", ":", "as_int", "=", "(", "self", ".", "identifier", "*", "1e4", ")", ".", "astype", "(", "np", ".", "int64", ")", "hashed", "=", "util", ".", "md5_object", "(", "as_int", ".", "tostring", "(", "order", "=", "'C'", ")", ")", "return", "hashed" ]
Return an MD5 of the identifier
[ "Return", "an", "MD5", "of", "the", "identifier" ]
python
train
32
nfcpy/nfcpy
src/nfc/tag/tt3_sony.py
https://github.com/nfcpy/nfcpy/blob/6649146d1afdd5e82b2b6b1ea00aa58d50785117/src/nfc/tag/tt3_sony.py#L281-L323
def search_service_code(self, service_index): """Search for a service code that corresponds to an index. The Search Service Code command provides access to the iterable list of services and areas within the activated system. The *service_index* argument may be any value from 0 to 0xffff. As long as there is a service or area found for a given *service_index*, the information returned is a tuple with either one or two 16-bit integer elements. Two integers are returned for an area definition, the first is the area code and the second is the largest possible service index for the area. One integer, the service code, is returned for a service definition. The return value is :const:`None` if the *service_index* was not found. For example, to print all services and areas of the active system: :: for i in xrange(0x10000): area_or_service = tag.search_service_code(i) if area_or_service is None: break elif len(area_or_service) == 1: sc = area_or_service[0] print(nfc.tag.tt3.ServiceCode(sc >> 6, sc & 0x3f)) elif len(area_or_service) == 2: area_code, area_last = area_or_service print("Area {0:04x}--{0:04x}".format(area_code, area_last)) Command execution errors raise :exc:`~nfc.tag.TagCommandError`. """ log.debug("search service code index {0}".format(service_index)) # The maximum response time is given by the value of PMM[3]. # Some cards (like RC-S860 with IC RC-S915) encode a value # that is too short, thus we use at lest 2 ms. a, e = self.pmm[3] & 7, self.pmm[3] >> 6 timeout = max(302E-6 * (a + 1) * 4**e, 0.002) data = pack("<H", service_index) data = self.send_cmd_recv_rsp(0x0A, data, timeout, check_status=False) if data != "\xFF\xFF": unpack_format = "<H" if len(data) == 2 else "<HH" return unpack(unpack_format, data)
[ "def", "search_service_code", "(", "self", ",", "service_index", ")", ":", "log", ".", "debug", "(", "\"search service code index {0}\"", ".", "format", "(", "service_index", ")", ")", "# The maximum response time is given by the value of PMM[3].", "# Some cards (like RC-S860 with IC RC-S915) encode a value", "# that is too short, thus we use at lest 2 ms.", "a", ",", "e", "=", "self", ".", "pmm", "[", "3", "]", "&", "7", ",", "self", ".", "pmm", "[", "3", "]", ">>", "6", "timeout", "=", "max", "(", "302E-6", "*", "(", "a", "+", "1", ")", "*", "4", "**", "e", ",", "0.002", ")", "data", "=", "pack", "(", "\"<H\"", ",", "service_index", ")", "data", "=", "self", ".", "send_cmd_recv_rsp", "(", "0x0A", ",", "data", ",", "timeout", ",", "check_status", "=", "False", ")", "if", "data", "!=", "\"\\xFF\\xFF\"", ":", "unpack_format", "=", "\"<H\"", "if", "len", "(", "data", ")", "==", "2", "else", "\"<HH\"", "return", "unpack", "(", "unpack_format", ",", "data", ")" ]
Search for a service code that corresponds to an index. The Search Service Code command provides access to the iterable list of services and areas within the activated system. The *service_index* argument may be any value from 0 to 0xffff. As long as there is a service or area found for a given *service_index*, the information returned is a tuple with either one or two 16-bit integer elements. Two integers are returned for an area definition, the first is the area code and the second is the largest possible service index for the area. One integer, the service code, is returned for a service definition. The return value is :const:`None` if the *service_index* was not found. For example, to print all services and areas of the active system: :: for i in xrange(0x10000): area_or_service = tag.search_service_code(i) if area_or_service is None: break elif len(area_or_service) == 1: sc = area_or_service[0] print(nfc.tag.tt3.ServiceCode(sc >> 6, sc & 0x3f)) elif len(area_or_service) == 2: area_code, area_last = area_or_service print("Area {0:04x}--{0:04x}".format(area_code, area_last)) Command execution errors raise :exc:`~nfc.tag.TagCommandError`.
[ "Search", "for", "a", "service", "code", "that", "corresponds", "to", "an", "index", "." ]
python
train
48.953488
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/lxml/html/diff.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/lxml/html/diff.py#L540-L551
def parse_html(html, cleanup=True): """ Parses an HTML fragment, returning an lxml element. Note that the HTML will be wrapped in a <div> tag that was not in the original document. If cleanup is true, make sure there's no <head> or <body>, and get rid of any <ins> and <del> tags. """ if cleanup: # This removes any extra markup or structure like <head>: html = cleanup_html(html) return fragment_fromstring(html, create_parent=True)
[ "def", "parse_html", "(", "html", ",", "cleanup", "=", "True", ")", ":", "if", "cleanup", ":", "# This removes any extra markup or structure like <head>:", "html", "=", "cleanup_html", "(", "html", ")", "return", "fragment_fromstring", "(", "html", ",", "create_parent", "=", "True", ")" ]
Parses an HTML fragment, returning an lxml element. Note that the HTML will be wrapped in a <div> tag that was not in the original document. If cleanup is true, make sure there's no <head> or <body>, and get rid of any <ins> and <del> tags.
[ "Parses", "an", "HTML", "fragment", "returning", "an", "lxml", "element", ".", "Note", "that", "the", "HTML", "will", "be", "wrapped", "in", "a", "<div", ">", "tag", "that", "was", "not", "in", "the", "original", "document", "." ]
python
test
39.333333
cmap/cmapPy
cmapPy/math/fast_corr.py
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/math/fast_corr.py#L11-L37
def fast_corr(x, y=None, destination=None): """calculate the pearson correlation matrix for the columns of x (with dimensions MxN), or optionally, the pearson correlaton matrix between x and y (with dimensions OxP). If destination is provided, put the results there. In the language of statistics the columns are the variables and the rows are the observations. Args: x (numpy array-like) MxN in shape y (optional, numpy array-like) OxP in shape. M (# rows in x) must equal O (# rows in y) destination (numpy array-like) optional location where to store the results as they are calculated (e.g. a numpy memmap of a file) returns (numpy array-like) array of the covariance values for defaults (y=None), shape is NxN if y is provied, shape is NxP """ if y is None: y = x r = fast_cov.fast_cov(x, y, destination) std_x = numpy.std(x, axis=0, ddof=1) std_y = numpy.std(y, axis=0, ddof=1) numpy.divide(r, std_x[:, numpy.newaxis], out=r) numpy.divide(r, std_y[numpy.newaxis, :], out=r) return r
[ "def", "fast_corr", "(", "x", ",", "y", "=", "None", ",", "destination", "=", "None", ")", ":", "if", "y", "is", "None", ":", "y", "=", "x", "r", "=", "fast_cov", ".", "fast_cov", "(", "x", ",", "y", ",", "destination", ")", "std_x", "=", "numpy", ".", "std", "(", "x", ",", "axis", "=", "0", ",", "ddof", "=", "1", ")", "std_y", "=", "numpy", ".", "std", "(", "y", ",", "axis", "=", "0", ",", "ddof", "=", "1", ")", "numpy", ".", "divide", "(", "r", ",", "std_x", "[", ":", ",", "numpy", ".", "newaxis", "]", ",", "out", "=", "r", ")", "numpy", ".", "divide", "(", "r", ",", "std_y", "[", "numpy", ".", "newaxis", ",", ":", "]", ",", "out", "=", "r", ")", "return", "r" ]
calculate the pearson correlation matrix for the columns of x (with dimensions MxN), or optionally, the pearson correlaton matrix between x and y (with dimensions OxP). If destination is provided, put the results there. In the language of statistics the columns are the variables and the rows are the observations. Args: x (numpy array-like) MxN in shape y (optional, numpy array-like) OxP in shape. M (# rows in x) must equal O (# rows in y) destination (numpy array-like) optional location where to store the results as they are calculated (e.g. a numpy memmap of a file) returns (numpy array-like) array of the covariance values for defaults (y=None), shape is NxN if y is provied, shape is NxP
[ "calculate", "the", "pearson", "correlation", "matrix", "for", "the", "columns", "of", "x", "(", "with", "dimensions", "MxN", ")", "or", "optionally", "the", "pearson", "correlaton", "matrix", "between", "x", "and", "y", "(", "with", "dimensions", "OxP", ")", ".", "If", "destination", "is", "provided", "put", "the", "results", "there", ".", "In", "the", "language", "of", "statistics", "the", "columns", "are", "the", "variables", "and", "the", "rows", "are", "the", "observations", "." ]
python
train
40.592593
awslabs/sockeye
sockeye/encoder.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/encoder.py#L197-L220
def get_transformer_encoder(config: transformer.TransformerConfig, prefix: str) -> 'Encoder': """ Returns a Transformer encoder, consisting of an embedding layer with positional encodings and a TransformerEncoder instance. :param config: Configuration for transformer encoder. :param prefix: Prefix for variable names. :return: Encoder instance. """ encoder_seq = EncoderSequence([], dtype=config.dtype) cls, encoder_params = _get_positional_embedding_params(config.positional_embedding_type, config.model_size, config.max_seq_len_source, fixed_pos_embed_scale_up_input=True, fixed_pos_embed_scale_down_positions=False, prefix=prefix + C.SOURCE_POSITIONAL_EMBEDDING_PREFIX) encoder_seq.append(cls, **encoder_params) if config.conv_config is not None: encoder_seq.append(ConvolutionalEmbeddingEncoder, config=config.conv_config, prefix=prefix + C.CHAR_SEQ_ENCODER_PREFIX) encoder_seq.append(TransformerEncoder, config=config, prefix=prefix + C.TRANSFORMER_ENCODER_PREFIX) return encoder_seq
[ "def", "get_transformer_encoder", "(", "config", ":", "transformer", ".", "TransformerConfig", ",", "prefix", ":", "str", ")", "->", "'Encoder'", ":", "encoder_seq", "=", "EncoderSequence", "(", "[", "]", ",", "dtype", "=", "config", ".", "dtype", ")", "cls", ",", "encoder_params", "=", "_get_positional_embedding_params", "(", "config", ".", "positional_embedding_type", ",", "config", ".", "model_size", ",", "config", ".", "max_seq_len_source", ",", "fixed_pos_embed_scale_up_input", "=", "True", ",", "fixed_pos_embed_scale_down_positions", "=", "False", ",", "prefix", "=", "prefix", "+", "C", ".", "SOURCE_POSITIONAL_EMBEDDING_PREFIX", ")", "encoder_seq", ".", "append", "(", "cls", ",", "*", "*", "encoder_params", ")", "if", "config", ".", "conv_config", "is", "not", "None", ":", "encoder_seq", ".", "append", "(", "ConvolutionalEmbeddingEncoder", ",", "config", "=", "config", ".", "conv_config", ",", "prefix", "=", "prefix", "+", "C", ".", "CHAR_SEQ_ENCODER_PREFIX", ")", "encoder_seq", ".", "append", "(", "TransformerEncoder", ",", "config", "=", "config", ",", "prefix", "=", "prefix", "+", "C", ".", "TRANSFORMER_ENCODER_PREFIX", ")", "return", "encoder_seq" ]
Returns a Transformer encoder, consisting of an embedding layer with positional encodings and a TransformerEncoder instance. :param config: Configuration for transformer encoder. :param prefix: Prefix for variable names. :return: Encoder instance.
[ "Returns", "a", "Transformer", "encoder", "consisting", "of", "an", "embedding", "layer", "with", "positional", "encodings", "and", "a", "TransformerEncoder", "instance", "." ]
python
train
56.291667
lowandrew/OLCTools
metagenomefilter/automateCLARK.py
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/metagenomefilter/automateCLARK.py#L80-L103
def classifymetagenome(self): """Run the classify metagenome of the CLARK package on the samples""" logging.info('Classifying metagenomes') # Define the system call self.classifycall = 'cd {} && ./classify_metagenome.sh -O {} -R {} -n {} --light'\ .format(self.clarkpath, self.filelist, self.reportlist, self.cpus) # Variable to store classification state classify = True for sample in self.runmetadata.samples: try: # Define the name of the .csv classification file sample.general.classification = sample.general.combined.split('.')[0] + '.csv' # If the file exists, then set classify to False if os.path.isfile(sample.general.classification): classify = False except KeyError: pass # Run the system call if the samples have not been classified if classify: # Run the call subprocess.call(self.classifycall, shell=True, stdout=self.devnull, stderr=self.devnull)
[ "def", "classifymetagenome", "(", "self", ")", ":", "logging", ".", "info", "(", "'Classifying metagenomes'", ")", "# Define the system call", "self", ".", "classifycall", "=", "'cd {} && ./classify_metagenome.sh -O {} -R {} -n {} --light'", ".", "format", "(", "self", ".", "clarkpath", ",", "self", ".", "filelist", ",", "self", ".", "reportlist", ",", "self", ".", "cpus", ")", "# Variable to store classification state", "classify", "=", "True", "for", "sample", "in", "self", ".", "runmetadata", ".", "samples", ":", "try", ":", "# Define the name of the .csv classification file", "sample", ".", "general", ".", "classification", "=", "sample", ".", "general", ".", "combined", ".", "split", "(", "'.'", ")", "[", "0", "]", "+", "'.csv'", "# If the file exists, then set classify to False", "if", "os", ".", "path", ".", "isfile", "(", "sample", ".", "general", ".", "classification", ")", ":", "classify", "=", "False", "except", "KeyError", ":", "pass", "# Run the system call if the samples have not been classified", "if", "classify", ":", "# Run the call", "subprocess", ".", "call", "(", "self", ".", "classifycall", ",", "shell", "=", "True", ",", "stdout", "=", "self", ".", "devnull", ",", "stderr", "=", "self", ".", "devnull", ")" ]
Run the classify metagenome of the CLARK package on the samples
[ "Run", "the", "classify", "metagenome", "of", "the", "CLARK", "package", "on", "the", "samples" ]
python
train
47.125
click-contrib/click-configfile
tasks/_vendor/pathlib.py
https://github.com/click-contrib/click-configfile/blob/a616204cb9944125fd5051556f27a7ccef611e22/tasks/_vendor/pathlib.py#L1152-L1160
def replace(self, target): """ Rename this path to the given path, clobbering the existing destination if it exists. """ if sys.version_info < (3, 3): raise NotImplementedError("replace() is only available " "with Python 3.3 and later") self._accessor.replace(self, target)
[ "def", "replace", "(", "self", ",", "target", ")", ":", "if", "sys", ".", "version_info", "<", "(", "3", ",", "3", ")", ":", "raise", "NotImplementedError", "(", "\"replace() is only available \"", "\"with Python 3.3 and later\"", ")", "self", ".", "_accessor", ".", "replace", "(", "self", ",", "target", ")" ]
Rename this path to the given path, clobbering the existing destination if it exists.
[ "Rename", "this", "path", "to", "the", "given", "path", "clobbering", "the", "existing", "destination", "if", "it", "exists", "." ]
python
train
40.333333
fronzbot/blinkpy
blinkpy/blinkpy.py
https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/blinkpy.py#L250-L285
def download_videos(self, path, since=None, camera='all', stop=10): """ Download all videos from server since specified time. :param path: Path to write files. /path/<cameraname>_<recorddate>.mp4 :param since: Date and time to get videos from. Ex: "2018/07/28 12:33:00" to retrieve videos since July 28th 2018 at 12:33:00 :param camera: Camera name to retrieve. Defaults to "all". Use a list for multiple cameras. :param stop: Page to stop on (~25 items per page. Default page 10). """ if since is None: since_epochs = self.last_refresh else: parsed_datetime = parse(since, fuzzy=True) since_epochs = parsed_datetime.timestamp() formatted_date = get_time(time_to_convert=since_epochs) _LOGGER.info("Retrieving videos since %s", formatted_date) if not isinstance(camera, list): camera = [camera] for page in range(1, stop): response = api.request_videos(self, time=since_epochs, page=page) _LOGGER.debug("Processing page %s", page) try: result = response['videos'] if not result: raise IndexError except (KeyError, IndexError): _LOGGER.info("No videos found on page %s. Exiting.", page) break self._parse_downloaded_items(result, camera, path)
[ "def", "download_videos", "(", "self", ",", "path", ",", "since", "=", "None", ",", "camera", "=", "'all'", ",", "stop", "=", "10", ")", ":", "if", "since", "is", "None", ":", "since_epochs", "=", "self", ".", "last_refresh", "else", ":", "parsed_datetime", "=", "parse", "(", "since", ",", "fuzzy", "=", "True", ")", "since_epochs", "=", "parsed_datetime", ".", "timestamp", "(", ")", "formatted_date", "=", "get_time", "(", "time_to_convert", "=", "since_epochs", ")", "_LOGGER", ".", "info", "(", "\"Retrieving videos since %s\"", ",", "formatted_date", ")", "if", "not", "isinstance", "(", "camera", ",", "list", ")", ":", "camera", "=", "[", "camera", "]", "for", "page", "in", "range", "(", "1", ",", "stop", ")", ":", "response", "=", "api", ".", "request_videos", "(", "self", ",", "time", "=", "since_epochs", ",", "page", "=", "page", ")", "_LOGGER", ".", "debug", "(", "\"Processing page %s\"", ",", "page", ")", "try", ":", "result", "=", "response", "[", "'videos'", "]", "if", "not", "result", ":", "raise", "IndexError", "except", "(", "KeyError", ",", "IndexError", ")", ":", "_LOGGER", ".", "info", "(", "\"No videos found on page %s. Exiting.\"", ",", "page", ")", "break", "self", ".", "_parse_downloaded_items", "(", "result", ",", "camera", ",", "path", ")" ]
Download all videos from server since specified time. :param path: Path to write files. /path/<cameraname>_<recorddate>.mp4 :param since: Date and time to get videos from. Ex: "2018/07/28 12:33:00" to retrieve videos since July 28th 2018 at 12:33:00 :param camera: Camera name to retrieve. Defaults to "all". Use a list for multiple cameras. :param stop: Page to stop on (~25 items per page. Default page 10).
[ "Download", "all", "videos", "from", "server", "since", "specified", "time", "." ]
python
train
41.166667
lionel/counterparts
counterparts.py
https://github.com/lionel/counterparts/blob/20db9852feff531f854972f76b412c442b2fafbf/counterparts.py#L143-L168
def _check_and_handle_includes(self, from_file): """Look for an optional INCLUDE section in the given file path. If the parser set `paths`, it is cleared so that they do not keep showing up when additional files are parsed. """ logger.debug("Check/handle includes from %s", from_file) try: paths = self._parser.get("INCLUDE", "paths") except (config_parser.NoSectionError, config_parser.NoOptionError) as exc: logger.debug("_check_and_handle_includes: EXCEPTION: %s", exc) return paths_lines = [p.strip() for p in paths.split("\n")] logger.debug("paths = %s (wanted just once; CLEARING)", paths_lines) self._parser.remove_option("INCLUDE", "paths") for f in paths_lines: abspath = (f if os.path.isabs(f) else os.path.abspath( os.path.join(os.path.dirname(from_file), f))) use_path = os.path.normpath(abspath) if use_path in self._parsed_files: raise RecursionInConfigFile("In %s: %s already read", from_file, use_path) self._parsed_files.append(use_path) self._handle_rc_file(use_path)
[ "def", "_check_and_handle_includes", "(", "self", ",", "from_file", ")", ":", "logger", ".", "debug", "(", "\"Check/handle includes from %s\"", ",", "from_file", ")", "try", ":", "paths", "=", "self", ".", "_parser", ".", "get", "(", "\"INCLUDE\"", ",", "\"paths\"", ")", "except", "(", "config_parser", ".", "NoSectionError", ",", "config_parser", ".", "NoOptionError", ")", "as", "exc", ":", "logger", ".", "debug", "(", "\"_check_and_handle_includes: EXCEPTION: %s\"", ",", "exc", ")", "return", "paths_lines", "=", "[", "p", ".", "strip", "(", ")", "for", "p", "in", "paths", ".", "split", "(", "\"\\n\"", ")", "]", "logger", ".", "debug", "(", "\"paths = %s (wanted just once; CLEARING)\"", ",", "paths_lines", ")", "self", ".", "_parser", ".", "remove_option", "(", "\"INCLUDE\"", ",", "\"paths\"", ")", "for", "f", "in", "paths_lines", ":", "abspath", "=", "(", "f", "if", "os", ".", "path", ".", "isabs", "(", "f", ")", "else", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "from_file", ")", ",", "f", ")", ")", ")", "use_path", "=", "os", ".", "path", ".", "normpath", "(", "abspath", ")", "if", "use_path", "in", "self", ".", "_parsed_files", ":", "raise", "RecursionInConfigFile", "(", "\"In %s: %s already read\"", ",", "from_file", ",", "use_path", ")", "self", ".", "_parsed_files", ".", "append", "(", "use_path", ")", "self", ".", "_handle_rc_file", "(", "use_path", ")" ]
Look for an optional INCLUDE section in the given file path. If the parser set `paths`, it is cleared so that they do not keep showing up when additional files are parsed.
[ "Look", "for", "an", "optional", "INCLUDE", "section", "in", "the", "given", "file", "path", ".", "If", "the", "parser", "set", "paths", "it", "is", "cleared", "so", "that", "they", "do", "not", "keep", "showing", "up", "when", "additional", "files", "are", "parsed", "." ]
python
train
48.923077
coveralls-clients/coveralls-python
coveralls/reporter.py
https://github.com/coveralls-clients/coveralls-python/blob/0d2636d029b329f8bd74cad43e04b2c8f518532a/coveralls/reporter.py#L90-L114
def get_arcs(analysis): """ Hit stats for each branch. Returns a flat list where every four values represent a branch: 1. line-number 2. block-number (not used) 3. branch-number 4. hits (we only get 1/0 from coverage.py) """ if not analysis.has_arcs(): return None branch_lines = analysis.branch_lines() branches = [] for l1, l2 in analysis.arcs_executed(): if l1 in branch_lines: branches.extend((l1, 0, abs(l2), 1)) for l1, l2 in analysis.arcs_missing(): if l1 in branch_lines: branches.extend((l1, 0, abs(l2), 0)) return branches
[ "def", "get_arcs", "(", "analysis", ")", ":", "if", "not", "analysis", ".", "has_arcs", "(", ")", ":", "return", "None", "branch_lines", "=", "analysis", ".", "branch_lines", "(", ")", "branches", "=", "[", "]", "for", "l1", ",", "l2", "in", "analysis", ".", "arcs_executed", "(", ")", ":", "if", "l1", "in", "branch_lines", ":", "branches", ".", "extend", "(", "(", "l1", ",", "0", ",", "abs", "(", "l2", ")", ",", "1", ")", ")", "for", "l1", ",", "l2", "in", "analysis", ".", "arcs_missing", "(", ")", ":", "if", "l1", "in", "branch_lines", ":", "branches", ".", "extend", "(", "(", "l1", ",", "0", ",", "abs", "(", "l2", ")", ",", "0", ")", ")", "return", "branches" ]
Hit stats for each branch. Returns a flat list where every four values represent a branch: 1. line-number 2. block-number (not used) 3. branch-number 4. hits (we only get 1/0 from coverage.py)
[ "Hit", "stats", "for", "each", "branch", "." ]
python
train
27.72
flo-compbio/genometools
genometools/ensembl/util.py
https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/ensembl/util.py#L77-L127
def get_file_checksums(url, ftp=None): """Download and parse an Ensembl CHECKSUMS file and obtain checksums. Parameters ---------- url : str The URL of the CHECKSUM file. ftp : `ftplib.FTP` or `None`, optional An FTP connection. Returns ------- `collections.OrderedDict` An ordered dictionary containing file names as keys and checksums as values. Notes ----- The checksums contains in Ensembl CHECKSUM files are obtained with the UNIX `sum` command. """ assert isinstance(url, (str, _oldstr)) if ftp is not None: assert isinstance(ftp, ftplib.FTP) # open FTP connection if necessary close_connection = False ftp_server = 'ftp.ensembl.org' ftp_user = 'anonymous' if ftp is None: ftp = ftplib.FTP(ftp_server) ftp.login(ftp_user) close_connection = True # download and parse CHECKSUM file data = [] ftp.retrbinary('RETR %s' % url, data.append) data = ''.join(d.decode('utf-8') for d in data).split('\n')[:-1] file_checksums = OrderedDict() for d in data: file_name = d[(d.rindex(' ') + 1):] sum_ = int(d[:d.index(' ')]) file_checksums[file_name] = sum_ logger.debug('Obtained checksums for %d files', len(file_checksums)) # close FTP connection if we opened it if close_connection: ftp.close() return file_checksums
[ "def", "get_file_checksums", "(", "url", ",", "ftp", "=", "None", ")", ":", "assert", "isinstance", "(", "url", ",", "(", "str", ",", "_oldstr", ")", ")", "if", "ftp", "is", "not", "None", ":", "assert", "isinstance", "(", "ftp", ",", "ftplib", ".", "FTP", ")", "# open FTP connection if necessary", "close_connection", "=", "False", "ftp_server", "=", "'ftp.ensembl.org'", "ftp_user", "=", "'anonymous'", "if", "ftp", "is", "None", ":", "ftp", "=", "ftplib", ".", "FTP", "(", "ftp_server", ")", "ftp", ".", "login", "(", "ftp_user", ")", "close_connection", "=", "True", "# download and parse CHECKSUM file", "data", "=", "[", "]", "ftp", ".", "retrbinary", "(", "'RETR %s'", "%", "url", ",", "data", ".", "append", ")", "data", "=", "''", ".", "join", "(", "d", ".", "decode", "(", "'utf-8'", ")", "for", "d", "in", "data", ")", ".", "split", "(", "'\\n'", ")", "[", ":", "-", "1", "]", "file_checksums", "=", "OrderedDict", "(", ")", "for", "d", "in", "data", ":", "file_name", "=", "d", "[", "(", "d", ".", "rindex", "(", "' '", ")", "+", "1", ")", ":", "]", "sum_", "=", "int", "(", "d", "[", ":", "d", ".", "index", "(", "' '", ")", "]", ")", "file_checksums", "[", "file_name", "]", "=", "sum_", "logger", ".", "debug", "(", "'Obtained checksums for %d files'", ",", "len", "(", "file_checksums", ")", ")", "# close FTP connection if we opened it", "if", "close_connection", ":", "ftp", ".", "close", "(", ")", "return", "file_checksums" ]
Download and parse an Ensembl CHECKSUMS file and obtain checksums. Parameters ---------- url : str The URL of the CHECKSUM file. ftp : `ftplib.FTP` or `None`, optional An FTP connection. Returns ------- `collections.OrderedDict` An ordered dictionary containing file names as keys and checksums as values. Notes ----- The checksums contains in Ensembl CHECKSUM files are obtained with the UNIX `sum` command.
[ "Download", "and", "parse", "an", "Ensembl", "CHECKSUMS", "file", "and", "obtain", "checksums", "." ]
python
train
27.588235
doraemonext/wechat-python-sdk
wechat_sdk/core/conf.py
https://github.com/doraemonext/wechat-python-sdk/blob/bf6f6f3d4a5440feb73a51937059d7feddc335a0/wechat_sdk/core/conf.py#L218-L242
def grant_jsapi_ticket(self): """ 获取 jsapi ticket 并更新当前配置 :return: 返回的 JSON 数据包 (传入 jsapi_ticket_refreshfunc 参数后返回 None) """ self._check_appid_appsecret() if callable(self.__jsapi_ticket_refreshfunc): self.__jsapi_ticket, self.__jsapi_ticket_expires_at = self.__jsapi_ticket_refreshfunc() return response_json = self.__request.get( url="https://api.weixin.qq.com/cgi-bin/ticket/getticket", params={ "type": "jsapi", }, access_token=self.access_token, ) self.__jsapi_ticket = response_json['ticket'] self.__jsapi_ticket_expires_at = int(time.time()) + response_json['expires_in'] if callable(self.__jsapi_ticket_setfunc): self.__jsapi_ticket_setfunc(self.__jsapi_ticket, self.__jsapi_ticket_expires_at) return response_json
[ "def", "grant_jsapi_ticket", "(", "self", ")", ":", "self", ".", "_check_appid_appsecret", "(", ")", "if", "callable", "(", "self", ".", "__jsapi_ticket_refreshfunc", ")", ":", "self", ".", "__jsapi_ticket", ",", "self", ".", "__jsapi_ticket_expires_at", "=", "self", ".", "__jsapi_ticket_refreshfunc", "(", ")", "return", "response_json", "=", "self", ".", "__request", ".", "get", "(", "url", "=", "\"https://api.weixin.qq.com/cgi-bin/ticket/getticket\"", ",", "params", "=", "{", "\"type\"", ":", "\"jsapi\"", ",", "}", ",", "access_token", "=", "self", ".", "access_token", ",", ")", "self", ".", "__jsapi_ticket", "=", "response_json", "[", "'ticket'", "]", "self", ".", "__jsapi_ticket_expires_at", "=", "int", "(", "time", ".", "time", "(", ")", ")", "+", "response_json", "[", "'expires_in'", "]", "if", "callable", "(", "self", ".", "__jsapi_ticket_setfunc", ")", ":", "self", ".", "__jsapi_ticket_setfunc", "(", "self", ".", "__jsapi_ticket", ",", "self", ".", "__jsapi_ticket_expires_at", ")", "return", "response_json" ]
获取 jsapi ticket 并更新当前配置 :return: 返回的 JSON 数据包 (传入 jsapi_ticket_refreshfunc 参数后返回 None)
[ "获取", "jsapi", "ticket", "并更新当前配置", ":", "return", ":", "返回的", "JSON", "数据包", "(", "传入", "jsapi_ticket_refreshfunc", "参数后返回", "None", ")" ]
python
valid
35.92
necaris/python3-openid
openid/store/memstore.py
https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/store/memstore.py#L38-L49
def cleanup(self): """Remove expired associations. @return: tuple of (removed associations, remaining associations) """ remove = [] for handle, assoc in self.assocs.items(): if assoc.expiresIn == 0: remove.append(handle) for handle in remove: del self.assocs[handle] return len(remove), len(self.assocs)
[ "def", "cleanup", "(", "self", ")", ":", "remove", "=", "[", "]", "for", "handle", ",", "assoc", "in", "self", ".", "assocs", ".", "items", "(", ")", ":", "if", "assoc", ".", "expiresIn", "==", "0", ":", "remove", ".", "append", "(", "handle", ")", "for", "handle", "in", "remove", ":", "del", "self", ".", "assocs", "[", "handle", "]", "return", "len", "(", "remove", ")", ",", "len", "(", "self", ".", "assocs", ")" ]
Remove expired associations. @return: tuple of (removed associations, remaining associations)
[ "Remove", "expired", "associations", "." ]
python
train
32.416667
galaxy-genome-annotation/python-apollo
arrow/commands/users/update_user.py
https://github.com/galaxy-genome-annotation/python-apollo/blob/2bc9991302abe4402ec2885dcaac35915475b387/arrow/commands/users/update_user.py#L19-L26
def cli(ctx, email, first_name, last_name, password, metadata={}): """Update an existing user Output: a dictionary containing user information """ return ctx.gi.users.update_user(email, first_name, last_name, password, metadata=metadata)
[ "def", "cli", "(", "ctx", ",", "email", ",", "first_name", ",", "last_name", ",", "password", ",", "metadata", "=", "{", "}", ")", ":", "return", "ctx", ".", "gi", ".", "users", ".", "update_user", "(", "email", ",", "first_name", ",", "last_name", ",", "password", ",", "metadata", "=", "metadata", ")" ]
Update an existing user Output: a dictionary containing user information
[ "Update", "an", "existing", "user" ]
python
train
31
inveniosoftware-contrib/invenio-classifier
invenio_classifier/engine.py
https://github.com/inveniosoftware-contrib/invenio-classifier/blob/3c758cf34dca6bf0548e7da5de34e5f72e3b255e/invenio_classifier/engine.py#L550-L566
def get_partial_text(fulltext): """Return a short version of the fulltext used with partial matching mode. The version is composed of 20% in the beginning and 20% in the middle of the text. """ def _get_index(x): return int(float(x) / 100 * len(fulltext)) partial_text = [ fulltext[_get_index(start):_get_index(end)] for start, end in current_app.config[ "CLASSIFIER_PARTIAL_TEXT_PERCENTAGES" ] ] return "\n".join(partial_text)
[ "def", "get_partial_text", "(", "fulltext", ")", ":", "def", "_get_index", "(", "x", ")", ":", "return", "int", "(", "float", "(", "x", ")", "/", "100", "*", "len", "(", "fulltext", ")", ")", "partial_text", "=", "[", "fulltext", "[", "_get_index", "(", "start", ")", ":", "_get_index", "(", "end", ")", "]", "for", "start", ",", "end", "in", "current_app", ".", "config", "[", "\"CLASSIFIER_PARTIAL_TEXT_PERCENTAGES\"", "]", "]", "return", "\"\\n\"", ".", "join", "(", "partial_text", ")" ]
Return a short version of the fulltext used with partial matching mode. The version is composed of 20% in the beginning and 20% in the middle of the text.
[ "Return", "a", "short", "version", "of", "the", "fulltext", "used", "with", "partial", "matching", "mode", "." ]
python
train
28.823529
onecodex/onecodex
onecodex/lib/upload.py
https://github.com/onecodex/onecodex/blob/326a0a1af140e3a57ccf31c3c9c5e17a5775c13d/onecodex/lib/upload.py#L225-L246
def _file_size(file_path, uncompressed=False): """Return size of a single file, compressed or uncompressed""" _, ext = os.path.splitext(file_path) if uncompressed: if ext in {".gz", ".gzip"}: with gzip.GzipFile(file_path, mode="rb") as fp: try: fp.seek(0, os.SEEK_END) return fp.tell() except ValueError: # on python2, cannot seek from end and must instead read to end fp.seek(0) while len(fp.read(8192)) != 0: pass return fp.tell() elif ext in {".bz", ".bz2", ".bzip", ".bzip2"}: with bz2.BZ2File(file_path, mode="rb") as fp: fp.seek(0, os.SEEK_END) return fp.tell() return os.path.getsize(file_path)
[ "def", "_file_size", "(", "file_path", ",", "uncompressed", "=", "False", ")", ":", "_", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "file_path", ")", "if", "uncompressed", ":", "if", "ext", "in", "{", "\".gz\"", ",", "\".gzip\"", "}", ":", "with", "gzip", ".", "GzipFile", "(", "file_path", ",", "mode", "=", "\"rb\"", ")", "as", "fp", ":", "try", ":", "fp", ".", "seek", "(", "0", ",", "os", ".", "SEEK_END", ")", "return", "fp", ".", "tell", "(", ")", "except", "ValueError", ":", "# on python2, cannot seek from end and must instead read to end", "fp", ".", "seek", "(", "0", ")", "while", "len", "(", "fp", ".", "read", "(", "8192", ")", ")", "!=", "0", ":", "pass", "return", "fp", ".", "tell", "(", ")", "elif", "ext", "in", "{", "\".bz\"", ",", "\".bz2\"", ",", "\".bzip\"", ",", "\".bzip2\"", "}", ":", "with", "bz2", ".", "BZ2File", "(", "file_path", ",", "mode", "=", "\"rb\"", ")", "as", "fp", ":", "fp", ".", "seek", "(", "0", ",", "os", ".", "SEEK_END", ")", "return", "fp", ".", "tell", "(", ")", "return", "os", ".", "path", ".", "getsize", "(", "file_path", ")" ]
Return size of a single file, compressed or uncompressed
[ "Return", "size", "of", "a", "single", "file", "compressed", "or", "uncompressed" ]
python
train
38.454545
LionelAuroux/pyrser
pyrser/parsing/base.py
https://github.com/LionelAuroux/pyrser/blob/f153a97ef2b6bf915a1ed468c0252a9a59b754d5/pyrser/parsing/base.py#L99-L109
def push_rule_nodes(self) -> bool: """Push context variable to store rule nodes.""" if self.rule_nodes is None: self.rule_nodes = collections.ChainMap() self.tag_cache = collections.ChainMap() self.id_cache = collections.ChainMap() else: self.rule_nodes = self.rule_nodes.new_child() self.tag_cache = self.tag_cache.new_child() self.id_cache = self.id_cache.new_child() return True
[ "def", "push_rule_nodes", "(", "self", ")", "->", "bool", ":", "if", "self", ".", "rule_nodes", "is", "None", ":", "self", ".", "rule_nodes", "=", "collections", ".", "ChainMap", "(", ")", "self", ".", "tag_cache", "=", "collections", ".", "ChainMap", "(", ")", "self", ".", "id_cache", "=", "collections", ".", "ChainMap", "(", ")", "else", ":", "self", ".", "rule_nodes", "=", "self", ".", "rule_nodes", ".", "new_child", "(", ")", "self", ".", "tag_cache", "=", "self", ".", "tag_cache", ".", "new_child", "(", ")", "self", ".", "id_cache", "=", "self", ".", "id_cache", ".", "new_child", "(", ")", "return", "True" ]
Push context variable to store rule nodes.
[ "Push", "context", "variable", "to", "store", "rule", "nodes", "." ]
python
test
43.181818
log2timeline/plaso
plaso/parsers/text_parser.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/text_parser.py#L587-L690
def ParseFileObject(self, parser_mediator, file_object): """Parses a text file-like object using a pyparsing definition. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. Raises: UnableToParseFile: when the file cannot be parsed. """ if not self.LINE_STRUCTURES: raise errors.UnableToParseFile('Missing line structures.') encoding = self._ENCODING or parser_mediator.codepage text_reader = EncodedTextReader( encoding, buffer_size=self.BUFFER_SIZE) text_reader.Reset() try: text_reader.ReadLines(file_object) except UnicodeDecodeError as exception: raise errors.UnableToParseFile( 'Not a text file, with error: {0!s}'.format(exception)) if not self.VerifyStructure(parser_mediator, text_reader.lines): raise errors.UnableToParseFile('Wrong file structure.') # Using parseWithTabs() overrides Pyparsing's default replacement of tabs # with spaces to SkipAhead() the correct number of bytes after a match. for key, structure in self.LINE_STRUCTURES: structure.parseWithTabs() consecutive_line_failures = 0 # Read every line in the text file. while text_reader.lines: if parser_mediator.abort: break # Initialize pyparsing objects. tokens = None start = 0 end = 0 key = None index = None # Try to parse the line using all the line structures. for index, (key, structure) in enumerate(self._line_structures): try: structure_generator = structure.scanString( text_reader.lines, maxMatches=1) parsed_structure = next(structure_generator, None) except pyparsing.ParseException: parsed_structure = None if not parsed_structure: continue tokens, start, end = parsed_structure # Only want to parse the structure if it starts # at the beginning of the buffer. if start == 0: break if tokens and start == 0: # Move matching key, structure pair to the front of the list, so that # structures that are more likely to match are tried first. if index is not None and index != 0: key_structure = self._line_structures.pop(index) self._line_structures.insert(0, key_structure) try: self.ParseRecord(parser_mediator, key, tokens) consecutive_line_failures = 0 except (errors.ParseError, errors.TimestampError) as exception: parser_mediator.ProduceExtractionWarning( 'unable to parse record: {0:s} with error: {1!s}'.format( key, exception)) text_reader.SkipAhead(file_object, end) else: odd_line = text_reader.ReadLine(file_object) if odd_line: if len(odd_line) > 80: odd_line = '{0:s}...'.format(odd_line[:77]) parser_mediator.ProduceExtractionWarning( 'unable to parse log line: {0:s}'.format(repr(odd_line))) consecutive_line_failures += 1 if (consecutive_line_failures > self.MAXIMUM_CONSECUTIVE_LINE_FAILURES): raise errors.UnableToParseFile( 'more than {0:d} consecutive failures to parse lines.'.format( self.MAXIMUM_CONSECUTIVE_LINE_FAILURES)) try: text_reader.ReadLines(file_object) except UnicodeDecodeError as exception: parser_mediator.ProduceExtractionWarning( 'unable to read lines with error: {0!s}'.format(exception))
[ "def", "ParseFileObject", "(", "self", ",", "parser_mediator", ",", "file_object", ")", ":", "if", "not", "self", ".", "LINE_STRUCTURES", ":", "raise", "errors", ".", "UnableToParseFile", "(", "'Missing line structures.'", ")", "encoding", "=", "self", ".", "_ENCODING", "or", "parser_mediator", ".", "codepage", "text_reader", "=", "EncodedTextReader", "(", "encoding", ",", "buffer_size", "=", "self", ".", "BUFFER_SIZE", ")", "text_reader", ".", "Reset", "(", ")", "try", ":", "text_reader", ".", "ReadLines", "(", "file_object", ")", "except", "UnicodeDecodeError", "as", "exception", ":", "raise", "errors", ".", "UnableToParseFile", "(", "'Not a text file, with error: {0!s}'", ".", "format", "(", "exception", ")", ")", "if", "not", "self", ".", "VerifyStructure", "(", "parser_mediator", ",", "text_reader", ".", "lines", ")", ":", "raise", "errors", ".", "UnableToParseFile", "(", "'Wrong file structure.'", ")", "# Using parseWithTabs() overrides Pyparsing's default replacement of tabs", "# with spaces to SkipAhead() the correct number of bytes after a match.", "for", "key", ",", "structure", "in", "self", ".", "LINE_STRUCTURES", ":", "structure", ".", "parseWithTabs", "(", ")", "consecutive_line_failures", "=", "0", "# Read every line in the text file.", "while", "text_reader", ".", "lines", ":", "if", "parser_mediator", ".", "abort", ":", "break", "# Initialize pyparsing objects.", "tokens", "=", "None", "start", "=", "0", "end", "=", "0", "key", "=", "None", "index", "=", "None", "# Try to parse the line using all the line structures.", "for", "index", ",", "(", "key", ",", "structure", ")", "in", "enumerate", "(", "self", ".", "_line_structures", ")", ":", "try", ":", "structure_generator", "=", "structure", ".", "scanString", "(", "text_reader", ".", "lines", ",", "maxMatches", "=", "1", ")", "parsed_structure", "=", "next", "(", "structure_generator", ",", "None", ")", "except", "pyparsing", ".", "ParseException", ":", "parsed_structure", "=", "None", "if", "not", "parsed_structure", ":", "continue", "tokens", ",", "start", ",", "end", "=", "parsed_structure", "# Only want to parse the structure if it starts", "# at the beginning of the buffer.", "if", "start", "==", "0", ":", "break", "if", "tokens", "and", "start", "==", "0", ":", "# Move matching key, structure pair to the front of the list, so that", "# structures that are more likely to match are tried first.", "if", "index", "is", "not", "None", "and", "index", "!=", "0", ":", "key_structure", "=", "self", ".", "_line_structures", ".", "pop", "(", "index", ")", "self", ".", "_line_structures", ".", "insert", "(", "0", ",", "key_structure", ")", "try", ":", "self", ".", "ParseRecord", "(", "parser_mediator", ",", "key", ",", "tokens", ")", "consecutive_line_failures", "=", "0", "except", "(", "errors", ".", "ParseError", ",", "errors", ".", "TimestampError", ")", "as", "exception", ":", "parser_mediator", ".", "ProduceExtractionWarning", "(", "'unable to parse record: {0:s} with error: {1!s}'", ".", "format", "(", "key", ",", "exception", ")", ")", "text_reader", ".", "SkipAhead", "(", "file_object", ",", "end", ")", "else", ":", "odd_line", "=", "text_reader", ".", "ReadLine", "(", "file_object", ")", "if", "odd_line", ":", "if", "len", "(", "odd_line", ")", ">", "80", ":", "odd_line", "=", "'{0:s}...'", ".", "format", "(", "odd_line", "[", ":", "77", "]", ")", "parser_mediator", ".", "ProduceExtractionWarning", "(", "'unable to parse log line: {0:s}'", ".", "format", "(", "repr", "(", "odd_line", ")", ")", ")", "consecutive_line_failures", "+=", "1", "if", "(", "consecutive_line_failures", ">", "self", ".", "MAXIMUM_CONSECUTIVE_LINE_FAILURES", ")", ":", "raise", "errors", ".", "UnableToParseFile", "(", "'more than {0:d} consecutive failures to parse lines.'", ".", "format", "(", "self", ".", "MAXIMUM_CONSECUTIVE_LINE_FAILURES", ")", ")", "try", ":", "text_reader", ".", "ReadLines", "(", "file_object", ")", "except", "UnicodeDecodeError", "as", "exception", ":", "parser_mediator", ".", "ProduceExtractionWarning", "(", "'unable to read lines with error: {0!s}'", ".", "format", "(", "exception", ")", ")" ]
Parses a text file-like object using a pyparsing definition. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
[ "Parses", "a", "text", "file", "-", "like", "object", "using", "a", "pyparsing", "definition", "." ]
python
train
34.673077
ramses-tech/nefertari
nefertari/authentication/models.py
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/authentication/models.py#L159-L171
def encrypt_password(**kwargs): """ Crypt :new_value: if it's not crypted yet. """ new_value = kwargs['new_value'] field = kwargs['field'] min_length = field.params['min_length'] if len(new_value) < min_length: raise ValueError( '`{}`: Value length must be more than {}'.format( field.name, field.params['min_length'])) if new_value and not crypt.match(new_value): new_value = str(crypt.encode(new_value)) return new_value
[ "def", "encrypt_password", "(", "*", "*", "kwargs", ")", ":", "new_value", "=", "kwargs", "[", "'new_value'", "]", "field", "=", "kwargs", "[", "'field'", "]", "min_length", "=", "field", ".", "params", "[", "'min_length'", "]", "if", "len", "(", "new_value", ")", "<", "min_length", ":", "raise", "ValueError", "(", "'`{}`: Value length must be more than {}'", ".", "format", "(", "field", ".", "name", ",", "field", ".", "params", "[", "'min_length'", "]", ")", ")", "if", "new_value", "and", "not", "crypt", ".", "match", "(", "new_value", ")", ":", "new_value", "=", "str", "(", "crypt", ".", "encode", "(", "new_value", ")", ")", "return", "new_value" ]
Crypt :new_value: if it's not crypted yet.
[ "Crypt", ":", "new_value", ":", "if", "it", "s", "not", "crypted", "yet", "." ]
python
train
37.153846
RPi-Distro/python-sense-hat
sense_hat/sense_hat.py
https://github.com/RPi-Distro/python-sense-hat/blob/9a37f0923ce8dbde69514c3b8d58d30de01c9ee7/sense_hat/sense_hat.py#L715-L729
def get_orientation_radians(self): """ Returns a dictionary object to represent the current orientation in radians using the aircraft principal axes of pitch, roll and yaw """ raw = self._get_raw_data('fusionPoseValid', 'fusionPose') if raw is not None: raw['roll'] = raw.pop('x') raw['pitch'] = raw.pop('y') raw['yaw'] = raw.pop('z') self._last_orientation = raw return deepcopy(self._last_orientation)
[ "def", "get_orientation_radians", "(", "self", ")", ":", "raw", "=", "self", ".", "_get_raw_data", "(", "'fusionPoseValid'", ",", "'fusionPose'", ")", "if", "raw", "is", "not", "None", ":", "raw", "[", "'roll'", "]", "=", "raw", ".", "pop", "(", "'x'", ")", "raw", "[", "'pitch'", "]", "=", "raw", ".", "pop", "(", "'y'", ")", "raw", "[", "'yaw'", "]", "=", "raw", ".", "pop", "(", "'z'", ")", "self", ".", "_last_orientation", "=", "raw", "return", "deepcopy", "(", "self", ".", "_last_orientation", ")" ]
Returns a dictionary object to represent the current orientation in radians using the aircraft principal axes of pitch, roll and yaw
[ "Returns", "a", "dictionary", "object", "to", "represent", "the", "current", "orientation", "in", "radians", "using", "the", "aircraft", "principal", "axes", "of", "pitch", "roll", "and", "yaw" ]
python
train
33.066667
Ezhil-Language-Foundation/open-tamil
solthiruthi/heuristics.py
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/solthiruthi/heuristics.py#L85-L98
def apply(self,word,ctx=None): """ ignore ctx information right now """ chars = get_letters(word) flag = True #no error assumed reason = None #no reason prev_letter = None for char in chars: if prev_letter == char: flag = False break prev_letter = char # continue loop if not flag: reason = RepeatedLetters.reason return flag,reason
[ "def", "apply", "(", "self", ",", "word", ",", "ctx", "=", "None", ")", ":", "chars", "=", "get_letters", "(", "word", ")", "flag", "=", "True", "#no error assumed", "reason", "=", "None", "#no reason", "prev_letter", "=", "None", "for", "char", "in", "chars", ":", "if", "prev_letter", "==", "char", ":", "flag", "=", "False", "break", "prev_letter", "=", "char", "# continue loop", "if", "not", "flag", ":", "reason", "=", "RepeatedLetters", ".", "reason", "return", "flag", ",", "reason" ]
ignore ctx information right now
[ "ignore", "ctx", "information", "right", "now" ]
python
train
32.214286
StackStorm/pybind
pybind/nos/v6_0_2f/rbridge_id/ipv6/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/rbridge_id/ipv6/__init__.py#L347-L368
def _set_proto_vrrpv3(self, v, load=False): """ Setter method for proto_vrrpv3, mapped from YANG variable /rbridge_id/ipv6/proto_vrrpv3 (container) If this variable is read-only (config: false) in the source YANG file, then _set_proto_vrrpv3 is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_proto_vrrpv3() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=proto_vrrpv3.proto_vrrpv3, is_container='container', presence=False, yang_name="proto-vrrpv3", rest_name="protocol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'vrrpv3GlobalConf', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None, u'alt-name': u'protocol'}}, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """proto_vrrpv3 must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=proto_vrrpv3.proto_vrrpv3, is_container='container', presence=False, yang_name="proto-vrrpv3", rest_name="protocol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'vrrpv3GlobalConf', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None, u'alt-name': u'protocol'}}, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='container', is_config=True)""", }) self.__proto_vrrpv3 = t if hasattr(self, '_set'): self._set()
[ "def", "_set_proto_vrrpv3", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "proto_vrrpv3", ".", "proto_vrrpv3", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"proto-vrrpv3\"", ",", "rest_name", "=", "\"protocol\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'callpoint'", ":", "u'vrrpv3GlobalConf'", ",", "u'display-when'", ":", "u'/vcsmode/vcs-mode = \"true\"'", ",", "u'cli-incomplete-no'", ":", "None", ",", "u'alt-name'", ":", "u'protocol'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-vrrpv3'", ",", "defining_module", "=", "'brocade-vrrpv3'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"proto_vrrpv3 must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=proto_vrrpv3.proto_vrrpv3, is_container='container', presence=False, yang_name=\"proto-vrrpv3\", rest_name=\"protocol\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'vrrpv3GlobalConf', u'display-when': u'/vcsmode/vcs-mode = \"true\"', u'cli-incomplete-no': None, u'alt-name': u'protocol'}}, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__proto_vrrpv3", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for proto_vrrpv3, mapped from YANG variable /rbridge_id/ipv6/proto_vrrpv3 (container) If this variable is read-only (config: false) in the source YANG file, then _set_proto_vrrpv3 is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_proto_vrrpv3() directly.
[ "Setter", "method", "for", "proto_vrrpv3", "mapped", "from", "YANG", "variable", "/", "rbridge_id", "/", "ipv6", "/", "proto_vrrpv3", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_proto_vrrpv3", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_proto_vrrpv3", "()", "directly", "." ]
python
train
82.636364
Staffjoy/client_python
staffjoy/resource.py
https://github.com/Staffjoy/client_python/blob/e8811b0c06651a15e691c96cbfd41e7da4f7f213/staffjoy/resource.py#L210-L216
def _delay_for_ratelimits(cls, start): """If request was shorter than max request time, delay""" stop = datetime.now() duration_microseconds = (stop - start).microseconds if duration_microseconds < cls.REQUEST_TIME_MICROSECONDS: time.sleep((cls.REQUEST_TIME_MICROSECONDS - duration_microseconds) / MICROSECONDS_PER_SECOND)
[ "def", "_delay_for_ratelimits", "(", "cls", ",", "start", ")", ":", "stop", "=", "datetime", ".", "now", "(", ")", "duration_microseconds", "=", "(", "stop", "-", "start", ")", ".", "microseconds", "if", "duration_microseconds", "<", "cls", ".", "REQUEST_TIME_MICROSECONDS", ":", "time", ".", "sleep", "(", "(", "cls", ".", "REQUEST_TIME_MICROSECONDS", "-", "duration_microseconds", ")", "/", "MICROSECONDS_PER_SECOND", ")" ]
If request was shorter than max request time, delay
[ "If", "request", "was", "shorter", "than", "max", "request", "time", "delay" ]
python
train
54.714286
PyCQA/pylint
pylint/checkers/base.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/base.py#L2214-L2233
def _check_type_x_is_y(self, node, left, operator, right): """Check for expressions like type(x) == Y.""" left_func = utils.safe_infer(left.func) if not ( isinstance(left_func, astroid.ClassDef) and left_func.qname() == TYPE_QNAME ): return if operator in ("is", "is not") and _is_one_arg_pos_call(right): right_func = utils.safe_infer(right.func) if ( isinstance(right_func, astroid.ClassDef) and right_func.qname() == TYPE_QNAME ): # type(x) == type(a) right_arg = utils.safe_infer(right.args[0]) if not isinstance(right_arg, LITERAL_NODE_TYPES): # not e.g. type(x) == type([]) return self.add_message("unidiomatic-typecheck", node=node)
[ "def", "_check_type_x_is_y", "(", "self", ",", "node", ",", "left", ",", "operator", ",", "right", ")", ":", "left_func", "=", "utils", ".", "safe_infer", "(", "left", ".", "func", ")", "if", "not", "(", "isinstance", "(", "left_func", ",", "astroid", ".", "ClassDef", ")", "and", "left_func", ".", "qname", "(", ")", "==", "TYPE_QNAME", ")", ":", "return", "if", "operator", "in", "(", "\"is\"", ",", "\"is not\"", ")", "and", "_is_one_arg_pos_call", "(", "right", ")", ":", "right_func", "=", "utils", ".", "safe_infer", "(", "right", ".", "func", ")", "if", "(", "isinstance", "(", "right_func", ",", "astroid", ".", "ClassDef", ")", "and", "right_func", ".", "qname", "(", ")", "==", "TYPE_QNAME", ")", ":", "# type(x) == type(a)", "right_arg", "=", "utils", ".", "safe_infer", "(", "right", ".", "args", "[", "0", "]", ")", "if", "not", "isinstance", "(", "right_arg", ",", "LITERAL_NODE_TYPES", ")", ":", "# not e.g. type(x) == type([])", "return", "self", ".", "add_message", "(", "\"unidiomatic-typecheck\"", ",", "node", "=", "node", ")" ]
Check for expressions like type(x) == Y.
[ "Check", "for", "expressions", "like", "type", "(", "x", ")", "==", "Y", "." ]
python
test
42.45
hsolbrig/PyShEx
pyshex/prefixlib.py
https://github.com/hsolbrig/PyShEx/blob/9d659cc36e808afd66d4a6d60e8ea21cb12eb744/pyshex/prefixlib.py#L82-L98
def add_to_object(self, target: object, override: bool = False) -> int: """ Add the bindings to the target object :param target: target to add to :param override: override existing bindings if they are of type Namespace :return: number of items actually added """ nret = 0 for k, v in self: key = k.upper() exists = hasattr(target, key) if not exists or (override and isinstance(getattr(target, k), (Namespace, _RDFNamespace))): setattr(target, k, v) nret += 1 else: print(f"Warning: {key} is already defined in namespace {target}. Not overridden") return nret
[ "def", "add_to_object", "(", "self", ",", "target", ":", "object", ",", "override", ":", "bool", "=", "False", ")", "->", "int", ":", "nret", "=", "0", "for", "k", ",", "v", "in", "self", ":", "key", "=", "k", ".", "upper", "(", ")", "exists", "=", "hasattr", "(", "target", ",", "key", ")", "if", "not", "exists", "or", "(", "override", "and", "isinstance", "(", "getattr", "(", "target", ",", "k", ")", ",", "(", "Namespace", ",", "_RDFNamespace", ")", ")", ")", ":", "setattr", "(", "target", ",", "k", ",", "v", ")", "nret", "+=", "1", "else", ":", "print", "(", "f\"Warning: {key} is already defined in namespace {target}. Not overridden\"", ")", "return", "nret" ]
Add the bindings to the target object :param target: target to add to :param override: override existing bindings if they are of type Namespace :return: number of items actually added
[ "Add", "the", "bindings", "to", "the", "target", "object", ":", "param", "target", ":", "target", "to", "add", "to", ":", "param", "override", ":", "override", "existing", "bindings", "if", "they", "are", "of", "type", "Namespace", ":", "return", ":", "number", "of", "items", "actually", "added" ]
python
train
41.941176
salu133445/pypianoroll
pypianoroll/multitrack.py
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L486-L538
def merge_tracks(self, track_indices=None, mode='sum', program=0, is_drum=False, name='merged', remove_merged=False): """ Merge pianorolls of the tracks specified by `track_indices`. The merged track will have program number as given by `program` and drum indicator as given by `is_drum`. The merged track will be appended at the end of the track list. Parameters ---------- track_indices : list The indices of tracks to be merged. Defaults to all the tracks. mode : {'sum', 'max', 'any'} A string that indicates the merging strategy to apply along the track axis. Default to 'sum'. - In 'sum' mode, the merged pianoroll is the sum of the collected pianorolls. Note that for binarized pianorolls, integer summation is performed. - In 'max' mode, for each pixel, the maximum value among the collected pianorolls is assigned to the merged pianoroll. - In 'any' mode, the value of a pixel in the merged pianoroll is True if any of the collected pianorolls has nonzero value at that pixel; False if all the collected pianorolls are inactive (zero-valued) at that pixel. program: int A program number according to General MIDI specification [1]. Available values are 0 to 127. Defaults to 0 (Acoustic Grand Piano). is_drum : bool A boolean number that indicates whether it is a percussion track. Defaults to False. name : str A name to be assigned to the merged track. Defaults to 'merged'. remove_merged : bool True to remove the source tracks from the track list. False to keep them. Defaults to False. References ---------- [1] https://www.midi.org/specifications/item/gm-level-1-sound-set """ if mode not in ('max', 'sum', 'any'): raise ValueError("`mode` must be one of {'max', 'sum', 'any'}.") merged = self[track_indices].get_merged_pianoroll(mode) merged_track = Track(merged, program, is_drum, name) self.append_track(merged_track) if remove_merged: self.remove_tracks(track_indices)
[ "def", "merge_tracks", "(", "self", ",", "track_indices", "=", "None", ",", "mode", "=", "'sum'", ",", "program", "=", "0", ",", "is_drum", "=", "False", ",", "name", "=", "'merged'", ",", "remove_merged", "=", "False", ")", ":", "if", "mode", "not", "in", "(", "'max'", ",", "'sum'", ",", "'any'", ")", ":", "raise", "ValueError", "(", "\"`mode` must be one of {'max', 'sum', 'any'}.\"", ")", "merged", "=", "self", "[", "track_indices", "]", ".", "get_merged_pianoroll", "(", "mode", ")", "merged_track", "=", "Track", "(", "merged", ",", "program", ",", "is_drum", ",", "name", ")", "self", ".", "append_track", "(", "merged_track", ")", "if", "remove_merged", ":", "self", ".", "remove_tracks", "(", "track_indices", ")" ]
Merge pianorolls of the tracks specified by `track_indices`. The merged track will have program number as given by `program` and drum indicator as given by `is_drum`. The merged track will be appended at the end of the track list. Parameters ---------- track_indices : list The indices of tracks to be merged. Defaults to all the tracks. mode : {'sum', 'max', 'any'} A string that indicates the merging strategy to apply along the track axis. Default to 'sum'. - In 'sum' mode, the merged pianoroll is the sum of the collected pianorolls. Note that for binarized pianorolls, integer summation is performed. - In 'max' mode, for each pixel, the maximum value among the collected pianorolls is assigned to the merged pianoroll. - In 'any' mode, the value of a pixel in the merged pianoroll is True if any of the collected pianorolls has nonzero value at that pixel; False if all the collected pianorolls are inactive (zero-valued) at that pixel. program: int A program number according to General MIDI specification [1]. Available values are 0 to 127. Defaults to 0 (Acoustic Grand Piano). is_drum : bool A boolean number that indicates whether it is a percussion track. Defaults to False. name : str A name to be assigned to the merged track. Defaults to 'merged'. remove_merged : bool True to remove the source tracks from the track list. False to keep them. Defaults to False. References ---------- [1] https://www.midi.org/specifications/item/gm-level-1-sound-set
[ "Merge", "pianorolls", "of", "the", "tracks", "specified", "by", "track_indices", ".", "The", "merged", "track", "will", "have", "program", "number", "as", "given", "by", "program", "and", "drum", "indicator", "as", "given", "by", "is_drum", ".", "The", "merged", "track", "will", "be", "appended", "at", "the", "end", "of", "the", "track", "list", "." ]
python
train
43.471698
python-diamond/Diamond
src/collectors/entropy/entropy.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/entropy/entropy.py#L20-L28
def get_default_config(self): """ Returns the default collector settings """ config = super(EntropyStatCollector, self).get_default_config() config.update({ 'path': 'entropy' }) return config
[ "def", "get_default_config", "(", "self", ")", ":", "config", "=", "super", "(", "EntropyStatCollector", ",", "self", ")", ".", "get_default_config", "(", ")", "config", ".", "update", "(", "{", "'path'", ":", "'entropy'", "}", ")", "return", "config" ]
Returns the default collector settings
[ "Returns", "the", "default", "collector", "settings" ]
python
train
28.333333