repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
tommikaikkonen/prettyprinter
prettyprinter/prettyprinter.py
https://github.com/tommikaikkonen/prettyprinter/blob/6b405884b8085eaf867e81c02b7b662b463ac5a0/prettyprinter/prettyprinter.py#L462-L544
def register_pretty(type=None, predicate=None): """Returns a decorator that registers the decorated function as the pretty printer for instances of ``type``. :param type: the type to register the pretty printer for, or a ``str`` to indicate the module and name, e.g.: ``'collections.Counter'``. :param predicate: a predicate function that takes one argument and returns a boolean indicating if the value should be handled by the registered pretty printer. Only one of ``type`` and ``predicate`` may be supplied. That means that ``predicate`` will be run on unregistered types only. The decorated function must accept exactly two positional arguments: - ``value`` to pretty print, and - ``ctx``, a context value. Here's an example of the pretty printer for OrderedDict: .. code:: python from collections import OrderedDict from prettyprinter import register_pretty, pretty_call @register_pretty(OrderedDict) def pretty_orderreddict(value, ctx): return pretty_call(ctx, OrderedDict, list(value.items())) """ if type is None and predicate is None: raise ValueError( "You must provide either the 'type' or 'predicate' argument." ) if type is not None and predicate is not None: raise ValueError( "You must provide either the 'type' or 'predicate' argument," "but not both" ) if predicate is not None: if not callable(predicate): raise ValueError( "Expected a callable for 'predicate', got {}".format( repr(predicate) ) ) def decorator(fn): sig = inspect.signature(fn) value = None ctx = None try: sig.bind(value, ctx) except TypeError: fnname = '{}.{}'.format( fn.__module__, fn.__qualname__ ) raise ValueError( "Functions decorated with register_pretty must accept " "exactly two positional parameters: 'value' and 'ctx'. " "The function signature for {} was not compatible.".format( fnname ) ) if type: if isinstance(type, str): # We don't wrap this with _run_pretty, # so that when we register this printer with an actual # class, we can call register_pretty(cls)(fn) _DEFERRED_DISPATCH_BY_NAME[type] = fn else: pretty_dispatch.register(type, partial(_run_pretty, fn)) else: assert callable(predicate) _PREDICATE_REGISTRY.append((predicate, fn)) return fn return decorator
[ "def", "register_pretty", "(", "type", "=", "None", ",", "predicate", "=", "None", ")", ":", "if", "type", "is", "None", "and", "predicate", "is", "None", ":", "raise", "ValueError", "(", "\"You must provide either the 'type' or 'predicate' argument.\"", ")", "if", "type", "is", "not", "None", "and", "predicate", "is", "not", "None", ":", "raise", "ValueError", "(", "\"You must provide either the 'type' or 'predicate' argument,\"", "\"but not both\"", ")", "if", "predicate", "is", "not", "None", ":", "if", "not", "callable", "(", "predicate", ")", ":", "raise", "ValueError", "(", "\"Expected a callable for 'predicate', got {}\"", ".", "format", "(", "repr", "(", "predicate", ")", ")", ")", "def", "decorator", "(", "fn", ")", ":", "sig", "=", "inspect", ".", "signature", "(", "fn", ")", "value", "=", "None", "ctx", "=", "None", "try", ":", "sig", ".", "bind", "(", "value", ",", "ctx", ")", "except", "TypeError", ":", "fnname", "=", "'{}.{}'", ".", "format", "(", "fn", ".", "__module__", ",", "fn", ".", "__qualname__", ")", "raise", "ValueError", "(", "\"Functions decorated with register_pretty must accept \"", "\"exactly two positional parameters: 'value' and 'ctx'. \"", "\"The function signature for {} was not compatible.\"", ".", "format", "(", "fnname", ")", ")", "if", "type", ":", "if", "isinstance", "(", "type", ",", "str", ")", ":", "# We don't wrap this with _run_pretty,", "# so that when we register this printer with an actual", "# class, we can call register_pretty(cls)(fn)", "_DEFERRED_DISPATCH_BY_NAME", "[", "type", "]", "=", "fn", "else", ":", "pretty_dispatch", ".", "register", "(", "type", ",", "partial", "(", "_run_pretty", ",", "fn", ")", ")", "else", ":", "assert", "callable", "(", "predicate", ")", "_PREDICATE_REGISTRY", ".", "append", "(", "(", "predicate", ",", "fn", ")", ")", "return", "fn", "return", "decorator" ]
Returns a decorator that registers the decorated function as the pretty printer for instances of ``type``. :param type: the type to register the pretty printer for, or a ``str`` to indicate the module and name, e.g.: ``'collections.Counter'``. :param predicate: a predicate function that takes one argument and returns a boolean indicating if the value should be handled by the registered pretty printer. Only one of ``type`` and ``predicate`` may be supplied. That means that ``predicate`` will be run on unregistered types only. The decorated function must accept exactly two positional arguments: - ``value`` to pretty print, and - ``ctx``, a context value. Here's an example of the pretty printer for OrderedDict: .. code:: python from collections import OrderedDict from prettyprinter import register_pretty, pretty_call @register_pretty(OrderedDict) def pretty_orderreddict(value, ctx): return pretty_call(ctx, OrderedDict, list(value.items()))
[ "Returns", "a", "decorator", "that", "registers", "the", "decorated", "function", "as", "the", "pretty", "printer", "for", "instances", "of", "type", "." ]
python
train
33.698795
rackerlabs/rackspace-python-neutronclient
neutronclient/v2_0/client.py
https://github.com/rackerlabs/rackspace-python-neutronclient/blob/5a5009a8fe078e3aa1d582176669f1b28ab26bef/neutronclient/v2_0/client.py#L900-L904
def update_ipsec_site_connection(self, ipsecsite_conn, body=None): """Updates an IPsecSiteConnection.""" return self.put( self.ipsec_site_connection_path % (ipsecsite_conn), body=body )
[ "def", "update_ipsec_site_connection", "(", "self", ",", "ipsecsite_conn", ",", "body", "=", "None", ")", ":", "return", "self", ".", "put", "(", "self", ".", "ipsec_site_connection_path", "%", "(", "ipsecsite_conn", ")", ",", "body", "=", "body", ")" ]
Updates an IPsecSiteConnection.
[ "Updates", "an", "IPsecSiteConnection", "." ]
python
train
43.4
RudolfCardinal/pythonlib
cardinal_pythonlib/rnc_text.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_text.py#L540-L549
def dictlist_convert_to_float(dict_list: Iterable[Dict], key: str) -> None: """ Process an iterable of dictionaries. For each dictionary ``d``, convert (in place) ``d[key]`` to a float. If that fails, convert it to ``None``. """ for d in dict_list: try: d[key] = float(d[key]) except ValueError: d[key] = None
[ "def", "dictlist_convert_to_float", "(", "dict_list", ":", "Iterable", "[", "Dict", "]", ",", "key", ":", "str", ")", "->", "None", ":", "for", "d", "in", "dict_list", ":", "try", ":", "d", "[", "key", "]", "=", "float", "(", "d", "[", "key", "]", ")", "except", "ValueError", ":", "d", "[", "key", "]", "=", "None" ]
Process an iterable of dictionaries. For each dictionary ``d``, convert (in place) ``d[key]`` to a float. If that fails, convert it to ``None``.
[ "Process", "an", "iterable", "of", "dictionaries", ".", "For", "each", "dictionary", "d", "convert", "(", "in", "place", ")", "d", "[", "key", "]", "to", "a", "float", ".", "If", "that", "fails", "convert", "it", "to", "None", "." ]
python
train
36
yinkaisheng/Python-UIAutomation-for-Windows
uiautomation/uiautomation.py
https://github.com/yinkaisheng/Python-UIAutomation-for-Windows/blob/2cc91060982cc8b777152e698d677cc2989bf263/uiautomation/uiautomation.py#L2304-L2325
def PlayWaveFile(filePath: str = r'C:\Windows\Media\notify.wav', isAsync: bool = False, isLoop: bool = False) -> bool: """ Call PlaySound from Win32. filePath: str, if emtpy, stop playing the current sound. isAsync: bool, if True, the sound is played asynchronously and returns immediately. isLoop: bool, if True, the sound plays repeatedly until PlayWaveFile(None) is called again, must also set isAsync to True. Return bool, True if succeed otherwise False. """ if filePath: SND_ASYNC = 0x0001 SND_NODEFAULT = 0x0002 SND_LOOP = 0x0008 SND_FILENAME = 0x20000 flags = SND_NODEFAULT | SND_FILENAME if isAsync: flags |= SND_ASYNC if isLoop: flags |= SND_LOOP flags |= SND_ASYNC return bool(ctypes.windll.winmm.PlaySoundW(ctypes.c_wchar_p(filePath), ctypes.c_void_p(0), flags)) else: return bool(ctypes.windll.winmm.PlaySoundW(ctypes.c_wchar_p(0), ctypes.c_void_p(0), 0))
[ "def", "PlayWaveFile", "(", "filePath", ":", "str", "=", "r'C:\\Windows\\Media\\notify.wav'", ",", "isAsync", ":", "bool", "=", "False", ",", "isLoop", ":", "bool", "=", "False", ")", "->", "bool", ":", "if", "filePath", ":", "SND_ASYNC", "=", "0x0001", "SND_NODEFAULT", "=", "0x0002", "SND_LOOP", "=", "0x0008", "SND_FILENAME", "=", "0x20000", "flags", "=", "SND_NODEFAULT", "|", "SND_FILENAME", "if", "isAsync", ":", "flags", "|=", "SND_ASYNC", "if", "isLoop", ":", "flags", "|=", "SND_LOOP", "flags", "|=", "SND_ASYNC", "return", "bool", "(", "ctypes", ".", "windll", ".", "winmm", ".", "PlaySoundW", "(", "ctypes", ".", "c_wchar_p", "(", "filePath", ")", ",", "ctypes", ".", "c_void_p", "(", "0", ")", ",", "flags", ")", ")", "else", ":", "return", "bool", "(", "ctypes", ".", "windll", ".", "winmm", ".", "PlaySoundW", "(", "ctypes", ".", "c_wchar_p", "(", "0", ")", ",", "ctypes", ".", "c_void_p", "(", "0", ")", ",", "0", ")", ")" ]
Call PlaySound from Win32. filePath: str, if emtpy, stop playing the current sound. isAsync: bool, if True, the sound is played asynchronously and returns immediately. isLoop: bool, if True, the sound plays repeatedly until PlayWaveFile(None) is called again, must also set isAsync to True. Return bool, True if succeed otherwise False.
[ "Call", "PlaySound", "from", "Win32", ".", "filePath", ":", "str", "if", "emtpy", "stop", "playing", "the", "current", "sound", ".", "isAsync", ":", "bool", "if", "True", "the", "sound", "is", "played", "asynchronously", "and", "returns", "immediately", ".", "isLoop", ":", "bool", "if", "True", "the", "sound", "plays", "repeatedly", "until", "PlayWaveFile", "(", "None", ")", "is", "called", "again", "must", "also", "set", "isAsync", "to", "True", ".", "Return", "bool", "True", "if", "succeed", "otherwise", "False", "." ]
python
valid
45.045455
owncloud/pyocclient
owncloud/owncloud.py
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1440-L1467
def get_config(self): """Returns ownCloud config information :returns: array of tuples (key, value) for each information e.g. [('version', '1.7'), ('website', 'ownCloud'), ('host', 'cloud.example.com'), ('contact', ''), ('ssl', 'false')] :raises: HTTPResponseError in case an HTTP error status was returned """ path = 'config' res = self._make_ocs_request( 'GET', '', path ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree) values = [] element = tree.find('data') if element is not None: keys = ['version', 'website', 'host', 'contact', 'ssl'] for key in keys: text = element.find(key).text or '' values.append(text) return zip(keys, values) else: return None raise HTTPResponseError(res)
[ "def", "get_config", "(", "self", ")", ":", "path", "=", "'config'", "res", "=", "self", ".", "_make_ocs_request", "(", "'GET'", ",", "''", ",", "path", ")", "if", "res", ".", "status_code", "==", "200", ":", "tree", "=", "ET", ".", "fromstring", "(", "res", ".", "content", ")", "self", ".", "_check_ocs_status", "(", "tree", ")", "values", "=", "[", "]", "element", "=", "tree", ".", "find", "(", "'data'", ")", "if", "element", "is", "not", "None", ":", "keys", "=", "[", "'version'", ",", "'website'", ",", "'host'", ",", "'contact'", ",", "'ssl'", "]", "for", "key", "in", "keys", ":", "text", "=", "element", ".", "find", "(", "key", ")", ".", "text", "or", "''", "values", ".", "append", "(", "text", ")", "return", "zip", "(", "keys", ",", "values", ")", "else", ":", "return", "None", "raise", "HTTPResponseError", "(", "res", ")" ]
Returns ownCloud config information :returns: array of tuples (key, value) for each information e.g. [('version', '1.7'), ('website', 'ownCloud'), ('host', 'cloud.example.com'), ('contact', ''), ('ssl', 'false')] :raises: HTTPResponseError in case an HTTP error status was returned
[ "Returns", "ownCloud", "config", "information", ":", "returns", ":", "array", "of", "tuples", "(", "key", "value", ")", "for", "each", "information", "e", ".", "g", ".", "[", "(", "version", "1", ".", "7", ")", "(", "website", "ownCloud", ")", "(", "host", "cloud", ".", "example", ".", "com", ")", "(", "contact", ")", "(", "ssl", "false", ")", "]", ":", "raises", ":", "HTTPResponseError", "in", "case", "an", "HTTP", "error", "status", "was", "returned" ]
python
train
36.107143
CartoDB/cartoframes
cartoframes/context.py
https://github.com/CartoDB/cartoframes/blob/c94238a545f3dec45963dac3892540942b6f0df8/cartoframes/context.py#L934-L969
def _geom_type(self, source): """gets geometry type(s) of specified layer""" if isinstance(source, AbstractLayer): query = source.orig_query else: query = 'SELECT * FROM "{table}"'.format(table=source) resp = self.sql_client.send( utils.minify_sql(( 'SELECT', ' CASE WHEN ST_GeometryType(the_geom)', ' in (\'ST_Point\', \'ST_MultiPoint\')', ' THEN \'point\'', ' WHEN ST_GeometryType(the_geom)', ' in (\'ST_LineString\', \'ST_MultiLineString\')', ' THEN \'line\'', ' WHEN ST_GeometryType(the_geom)', ' in (\'ST_Polygon\', \'ST_MultiPolygon\')', ' THEN \'polygon\'', ' ELSE null END AS geom_type,', ' count(*) as cnt', 'FROM ({query}) AS _wrap', 'WHERE the_geom IS NOT NULL', 'GROUP BY 1', 'ORDER BY 2 DESC', )).format(query=query), **DEFAULT_SQL_ARGS) if resp['total_rows'] > 1: warn('There are multiple geometry types in {query}: ' '{geoms}. Styling by `{common_geom}`, the most common'.format( query=query, geoms=','.join(g['geom_type'] for g in resp['rows']), common_geom=resp['rows'][0]['geom_type'])) elif resp['total_rows'] == 0: raise ValueError('No geometry for layer. Check all layer tables ' 'and queries to ensure there are geometries.') return resp['rows'][0]['geom_type']
[ "def", "_geom_type", "(", "self", ",", "source", ")", ":", "if", "isinstance", "(", "source", ",", "AbstractLayer", ")", ":", "query", "=", "source", ".", "orig_query", "else", ":", "query", "=", "'SELECT * FROM \"{table}\"'", ".", "format", "(", "table", "=", "source", ")", "resp", "=", "self", ".", "sql_client", ".", "send", "(", "utils", ".", "minify_sql", "(", "(", "'SELECT'", ",", "' CASE WHEN ST_GeometryType(the_geom)'", ",", "' in (\\'ST_Point\\', \\'ST_MultiPoint\\')'", ",", "' THEN \\'point\\''", ",", "' WHEN ST_GeometryType(the_geom)'", ",", "' in (\\'ST_LineString\\', \\'ST_MultiLineString\\')'", ",", "' THEN \\'line\\''", ",", "' WHEN ST_GeometryType(the_geom)'", ",", "' in (\\'ST_Polygon\\', \\'ST_MultiPolygon\\')'", ",", "' THEN \\'polygon\\''", ",", "' ELSE null END AS geom_type,'", ",", "' count(*) as cnt'", ",", "'FROM ({query}) AS _wrap'", ",", "'WHERE the_geom IS NOT NULL'", ",", "'GROUP BY 1'", ",", "'ORDER BY 2 DESC'", ",", ")", ")", ".", "format", "(", "query", "=", "query", ")", ",", "*", "*", "DEFAULT_SQL_ARGS", ")", "if", "resp", "[", "'total_rows'", "]", ">", "1", ":", "warn", "(", "'There are multiple geometry types in {query}: '", "'{geoms}. Styling by `{common_geom}`, the most common'", ".", "format", "(", "query", "=", "query", ",", "geoms", "=", "','", ".", "join", "(", "g", "[", "'geom_type'", "]", "for", "g", "in", "resp", "[", "'rows'", "]", ")", ",", "common_geom", "=", "resp", "[", "'rows'", "]", "[", "0", "]", "[", "'geom_type'", "]", ")", ")", "elif", "resp", "[", "'total_rows'", "]", "==", "0", ":", "raise", "ValueError", "(", "'No geometry for layer. Check all layer tables '", "'and queries to ensure there are geometries.'", ")", "return", "resp", "[", "'rows'", "]", "[", "0", "]", "[", "'geom_type'", "]" ]
gets geometry type(s) of specified layer
[ "gets", "geometry", "type", "(", "s", ")", "of", "specified", "layer" ]
python
train
48.527778
inveniosoftware/kwalitee
kwalitee/kwalitee.py
https://github.com/inveniosoftware/kwalitee/blob/9124f8f55b15547fef08c6c43cabced314e70674/kwalitee/kwalitee.py#L92-L126
def _check_1st_line(line, **kwargs): """First line check. Check that the first line has a known component name followed by a colon and then a short description of the commit. :param line: first line :type line: str :param components: list of known component names :type line: list :param max_first_line: maximum length of the first line :type max_first_line: int :return: errors as in (code, line number, *args) :rtype: list """ components = kwargs.get("components", ()) max_first_line = kwargs.get("max_first_line", 50) errors = [] lineno = 1 if len(line) > max_first_line: errors.append(("M190", lineno, max_first_line, len(line))) if line.endswith("."): errors.append(("M191", lineno)) if ':' not in line: errors.append(("M110", lineno)) else: component, msg = line.split(':', 1) if component not in components: errors.append(("M111", lineno, component)) return errors
[ "def", "_check_1st_line", "(", "line", ",", "*", "*", "kwargs", ")", ":", "components", "=", "kwargs", ".", "get", "(", "\"components\"", ",", "(", ")", ")", "max_first_line", "=", "kwargs", ".", "get", "(", "\"max_first_line\"", ",", "50", ")", "errors", "=", "[", "]", "lineno", "=", "1", "if", "len", "(", "line", ")", ">", "max_first_line", ":", "errors", ".", "append", "(", "(", "\"M190\"", ",", "lineno", ",", "max_first_line", ",", "len", "(", "line", ")", ")", ")", "if", "line", ".", "endswith", "(", "\".\"", ")", ":", "errors", ".", "append", "(", "(", "\"M191\"", ",", "lineno", ")", ")", "if", "':'", "not", "in", "line", ":", "errors", ".", "append", "(", "(", "\"M110\"", ",", "lineno", ")", ")", "else", ":", "component", ",", "msg", "=", "line", ".", "split", "(", "':'", ",", "1", ")", "if", "component", "not", "in", "components", ":", "errors", ".", "append", "(", "(", "\"M111\"", ",", "lineno", ",", "component", ")", ")", "return", "errors" ]
First line check. Check that the first line has a known component name followed by a colon and then a short description of the commit. :param line: first line :type line: str :param components: list of known component names :type line: list :param max_first_line: maximum length of the first line :type max_first_line: int :return: errors as in (code, line number, *args) :rtype: list
[ "First", "line", "check", "." ]
python
train
28
andreafrancia/trash-cli
trashcli/put.py
https://github.com/andreafrancia/trash-cli/blob/5abecd53e1d84f2a5fd3fc60d2f5d71e518826c5/trashcli/put.py#L273-L308
def describe(path): """ Return a textual description of the file pointed by this path. Options: - "symbolic link" - "directory" - "'.' directory" - "'..' directory" - "regular file" - "regular empty file" - "non existent" - "entry" """ if os.path.islink(path): return 'symbolic link' elif os.path.isdir(path): if path == '.': return 'directory' elif path == '..': return 'directory' else: if os.path.basename(path) == '.': return "'.' directory" elif os.path.basename(path) == '..': return "'..' directory" else: return 'directory' elif os.path.isfile(path): if os.path.getsize(path) == 0: return 'regular empty file' else: return 'regular file' elif not os.path.exists(path): return 'non existent' else: return 'entry'
[ "def", "describe", "(", "path", ")", ":", "if", "os", ".", "path", ".", "islink", "(", "path", ")", ":", "return", "'symbolic link'", "elif", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "if", "path", "==", "'.'", ":", "return", "'directory'", "elif", "path", "==", "'..'", ":", "return", "'directory'", "else", ":", "if", "os", ".", "path", ".", "basename", "(", "path", ")", "==", "'.'", ":", "return", "\"'.' directory\"", "elif", "os", ".", "path", ".", "basename", "(", "path", ")", "==", "'..'", ":", "return", "\"'..' directory\"", "else", ":", "return", "'directory'", "elif", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "if", "os", ".", "path", ".", "getsize", "(", "path", ")", "==", "0", ":", "return", "'regular empty file'", "else", ":", "return", "'regular file'", "elif", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "return", "'non existent'", "else", ":", "return", "'entry'" ]
Return a textual description of the file pointed by this path. Options: - "symbolic link" - "directory" - "'.' directory" - "'..' directory" - "regular file" - "regular empty file" - "non existent" - "entry"
[ "Return", "a", "textual", "description", "of", "the", "file", "pointed", "by", "this", "path", ".", "Options", ":", "-", "symbolic", "link", "-", "directory", "-", ".", "directory", "-", "..", "directory", "-", "regular", "file", "-", "regular", "empty", "file", "-", "non", "existent", "-", "entry" ]
python
valid
26.555556
CitrineInformatics/pif-dft
dfttopif/parsers/pwscf.py
https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/pwscf.py#L171-L183
def get_pp_name(self): '''Determine the pseudopotential names from the output''' ppnames = [] # Find the number of atom types natomtypes = int(self._get_line('number of atomic types', self.outputf).split()[5]) # Find the pseudopotential names with open(self.outputf) as fp: for line in fp: if "PseudoPot. #" in line: ppnames.append(Scalar(value=next(fp).split('/')[-1].rstrip())) if len(ppnames) == natomtypes: return Value(scalars=ppnames) raise Exception('Could not find %i pseudopotential names'%natomtypes)
[ "def", "get_pp_name", "(", "self", ")", ":", "ppnames", "=", "[", "]", "# Find the number of atom types", "natomtypes", "=", "int", "(", "self", ".", "_get_line", "(", "'number of atomic types'", ",", "self", ".", "outputf", ")", ".", "split", "(", ")", "[", "5", "]", ")", "# Find the pseudopotential names", "with", "open", "(", "self", ".", "outputf", ")", "as", "fp", ":", "for", "line", "in", "fp", ":", "if", "\"PseudoPot. #\"", "in", "line", ":", "ppnames", ".", "append", "(", "Scalar", "(", "value", "=", "next", "(", "fp", ")", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", ".", "rstrip", "(", ")", ")", ")", "if", "len", "(", "ppnames", ")", "==", "natomtypes", ":", "return", "Value", "(", "scalars", "=", "ppnames", ")", "raise", "Exception", "(", "'Could not find %i pseudopotential names'", "%", "natomtypes", ")" ]
Determine the pseudopotential names from the output
[ "Determine", "the", "pseudopotential", "names", "from", "the", "output" ]
python
train
50
google/textfsm
textfsm/clitable.py
https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/clitable.py#L288-L313
def _ParseCmdItem(self, cmd_input, template_file=None): """Creates Texttable with output of command. Args: cmd_input: String, Device response. template_file: File object, template to parse with. Returns: TextTable containing command output. Raises: CliTableError: A template was not found for the given command. """ # Build FSM machine from the template. fsm = textfsm.TextFSM(template_file) if not self._keys: self._keys = set(fsm.GetValuesByAttrib('Key')) # Pass raw data through FSM. table = texttable.TextTable() table.header = fsm.header # Fill TextTable from record entries. for record in fsm.ParseText(cmd_input): table.Append(record) return table
[ "def", "_ParseCmdItem", "(", "self", ",", "cmd_input", ",", "template_file", "=", "None", ")", ":", "# Build FSM machine from the template.", "fsm", "=", "textfsm", ".", "TextFSM", "(", "template_file", ")", "if", "not", "self", ".", "_keys", ":", "self", ".", "_keys", "=", "set", "(", "fsm", ".", "GetValuesByAttrib", "(", "'Key'", ")", ")", "# Pass raw data through FSM.", "table", "=", "texttable", ".", "TextTable", "(", ")", "table", ".", "header", "=", "fsm", ".", "header", "# Fill TextTable from record entries.", "for", "record", "in", "fsm", ".", "ParseText", "(", "cmd_input", ")", ":", "table", ".", "Append", "(", "record", ")", "return", "table" ]
Creates Texttable with output of command. Args: cmd_input: String, Device response. template_file: File object, template to parse with. Returns: TextTable containing command output. Raises: CliTableError: A template was not found for the given command.
[ "Creates", "Texttable", "with", "output", "of", "command", "." ]
python
train
27.923077
UCL-INGI/INGInious
base-containers/base/inginious/feedback.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/base-containers/base/inginious/feedback.py#L108-L116
def tag(value): """ Add a tag with generated id. :param value: everything working with the str() function """ rdict = load_feedback() tests = rdict.setdefault("tests", {}) tests["*auto-tag-" + str(hash(str(value)))] = str(value) save_feedback(rdict)
[ "def", "tag", "(", "value", ")", ":", "rdict", "=", "load_feedback", "(", ")", "tests", "=", "rdict", ".", "setdefault", "(", "\"tests\"", ",", "{", "}", ")", "tests", "[", "\"*auto-tag-\"", "+", "str", "(", "hash", "(", "str", "(", "value", ")", ")", ")", "]", "=", "str", "(", "value", ")", "save_feedback", "(", "rdict", ")" ]
Add a tag with generated id. :param value: everything working with the str() function
[ "Add", "a", "tag", "with", "generated", "id", ".", ":", "param", "value", ":", "everything", "working", "with", "the", "str", "()", "function" ]
python
train
30.333333
aiortc/aioice
aioice/ice.py
https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/ice.py#L656-L661
def check_state(self, pair, state): """ Updates the state of a check. """ self.__log_info('Check %s %s -> %s', pair, pair.state, state) pair.state = state
[ "def", "check_state", "(", "self", ",", "pair", ",", "state", ")", ":", "self", ".", "__log_info", "(", "'Check %s %s -> %s'", ",", "pair", ",", "pair", ".", "state", ",", "state", ")", "pair", ".", "state", "=", "state" ]
Updates the state of a check.
[ "Updates", "the", "state", "of", "a", "check", "." ]
python
train
31.5
SmileyChris/easy-thumbnails
easy_thumbnails/files.py
https://github.com/SmileyChris/easy-thumbnails/blob/b08ab44883bf7b221a98dadb9b589cb95d35b0bf/easy_thumbnails/files.py#L461-L488
def get_existing_thumbnail(self, thumbnail_options, high_resolution=False): """ Return a ``ThumbnailFile`` containing an existing thumbnail for a set of thumbnail options, or ``None`` if not found. """ thumbnail_options = self.get_options(thumbnail_options) names = [ self.get_thumbnail_name( thumbnail_options, transparent=False, high_resolution=high_resolution)] transparent_name = self.get_thumbnail_name( thumbnail_options, transparent=True, high_resolution=high_resolution) if transparent_name not in names: names.append(transparent_name) for filename in names: exists = self.thumbnail_exists(filename) if exists: thumbnail_file = ThumbnailFile( name=filename, storage=self.thumbnail_storage, thumbnail_options=thumbnail_options) if settings.THUMBNAIL_CACHE_DIMENSIONS: # If this wasn't local storage, exists will be a thumbnail # instance so we can store the image dimensions now to save # a future potential query. thumbnail_file.set_image_dimensions(exists) return thumbnail_file
[ "def", "get_existing_thumbnail", "(", "self", ",", "thumbnail_options", ",", "high_resolution", "=", "False", ")", ":", "thumbnail_options", "=", "self", ".", "get_options", "(", "thumbnail_options", ")", "names", "=", "[", "self", ".", "get_thumbnail_name", "(", "thumbnail_options", ",", "transparent", "=", "False", ",", "high_resolution", "=", "high_resolution", ")", "]", "transparent_name", "=", "self", ".", "get_thumbnail_name", "(", "thumbnail_options", ",", "transparent", "=", "True", ",", "high_resolution", "=", "high_resolution", ")", "if", "transparent_name", "not", "in", "names", ":", "names", ".", "append", "(", "transparent_name", ")", "for", "filename", "in", "names", ":", "exists", "=", "self", ".", "thumbnail_exists", "(", "filename", ")", "if", "exists", ":", "thumbnail_file", "=", "ThumbnailFile", "(", "name", "=", "filename", ",", "storage", "=", "self", ".", "thumbnail_storage", ",", "thumbnail_options", "=", "thumbnail_options", ")", "if", "settings", ".", "THUMBNAIL_CACHE_DIMENSIONS", ":", "# If this wasn't local storage, exists will be a thumbnail", "# instance so we can store the image dimensions now to save", "# a future potential query.", "thumbnail_file", ".", "set_image_dimensions", "(", "exists", ")", "return", "thumbnail_file" ]
Return a ``ThumbnailFile`` containing an existing thumbnail for a set of thumbnail options, or ``None`` if not found.
[ "Return", "a", "ThumbnailFile", "containing", "an", "existing", "thumbnail", "for", "a", "set", "of", "thumbnail", "options", "or", "None", "if", "not", "found", "." ]
python
train
46.607143
benoitkugler/abstractDataLibrary
pyDLib/Core/controller.py
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/controller.py#L129-L153
def launch_background_job(self, job, on_error=None, on_success=None): """Launch the callable job in background thread. Succes or failure are controlled by on_error and on_success """ if not self.main.mode_online: self.sortie_erreur_GUI( "Local mode activated. Can't run background task !") self.reset() return on_error = on_error or self.sortie_erreur_GUI on_success = on_success or self.sortie_standard_GUI def thread_end(r): on_success(r) self.update() def thread_error(r): on_error(r) self.reset() logging.info( f"Launching background task from interface {self.__class__.__name__} ...") th = threads.worker(job, thread_error, thread_end) self._add_thread(th)
[ "def", "launch_background_job", "(", "self", ",", "job", ",", "on_error", "=", "None", ",", "on_success", "=", "None", ")", ":", "if", "not", "self", ".", "main", ".", "mode_online", ":", "self", ".", "sortie_erreur_GUI", "(", "\"Local mode activated. Can't run background task !\"", ")", "self", ".", "reset", "(", ")", "return", "on_error", "=", "on_error", "or", "self", ".", "sortie_erreur_GUI", "on_success", "=", "on_success", "or", "self", ".", "sortie_standard_GUI", "def", "thread_end", "(", "r", ")", ":", "on_success", "(", "r", ")", "self", ".", "update", "(", ")", "def", "thread_error", "(", "r", ")", ":", "on_error", "(", "r", ")", "self", ".", "reset", "(", ")", "logging", ".", "info", "(", "f\"Launching background task from interface {self.__class__.__name__} ...\"", ")", "th", "=", "threads", ".", "worker", "(", "job", ",", "thread_error", ",", "thread_end", ")", "self", ".", "_add_thread", "(", "th", ")" ]
Launch the callable job in background thread. Succes or failure are controlled by on_error and on_success
[ "Launch", "the", "callable", "job", "in", "background", "thread", ".", "Succes", "or", "failure", "are", "controlled", "by", "on_error", "and", "on_success" ]
python
train
33.64
fabioz/PyDev.Debugger
_pydevd_bundle/pydevd_dont_trace.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydevd_bundle/pydevd_dont_trace.py#L84-L101
def clear_trace_filter_cache(): ''' Clear the trace filter cache. Call this after reloading. ''' global should_trace_hook try: # Need to temporarily disable a hook because otherwise # _filename_to_ignored_lines.clear() will never complete. old_hook = should_trace_hook should_trace_hook = None # Clear the linecache linecache.clearcache() _filename_to_ignored_lines.clear() finally: should_trace_hook = old_hook
[ "def", "clear_trace_filter_cache", "(", ")", ":", "global", "should_trace_hook", "try", ":", "# Need to temporarily disable a hook because otherwise", "# _filename_to_ignored_lines.clear() will never complete.", "old_hook", "=", "should_trace_hook", "should_trace_hook", "=", "None", "# Clear the linecache", "linecache", ".", "clearcache", "(", ")", "_filename_to_ignored_lines", ".", "clear", "(", ")", "finally", ":", "should_trace_hook", "=", "old_hook" ]
Clear the trace filter cache. Call this after reloading.
[ "Clear", "the", "trace", "filter", "cache", ".", "Call", "this", "after", "reloading", "." ]
python
train
27.111111
hazelcast/hazelcast-python-client
hazelcast/proxy/transactional_multi_map.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/proxy/transactional_multi_map.py#L50-L59
def remove_all(self, key): """ Transactional implementation of :func:`MultiMap.remove_all(key) <hazelcast.proxy.multi_map.MultiMap.remove_all>` :param key: (object), the key of the entries to remove. :return: (list), the collection of the values associated with the key. """ check_not_none(key, "key can't be none") return self._encode_invoke(transactional_multi_map_remove_codec, key=self._to_data(key))
[ "def", "remove_all", "(", "self", ",", "key", ")", ":", "check_not_none", "(", "key", ",", "\"key can't be none\"", ")", "return", "self", ".", "_encode_invoke", "(", "transactional_multi_map_remove_codec", ",", "key", "=", "self", ".", "_to_data", "(", "key", ")", ")" ]
Transactional implementation of :func:`MultiMap.remove_all(key) <hazelcast.proxy.multi_map.MultiMap.remove_all>` :param key: (object), the key of the entries to remove. :return: (list), the collection of the values associated with the key.
[ "Transactional", "implementation", "of", ":", "func", ":", "MultiMap", ".", "remove_all", "(", "key", ")", "<hazelcast", ".", "proxy", ".", "multi_map", ".", "MultiMap", ".", "remove_all", ">" ]
python
train
46
tensorflow/mesh
mesh_tensorflow/ops.py
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4431-L4453
def pretty_print_counters(counters): """print counters hierarchically. Each counter is a pair of a string and a number. The string can have slashes, meaning that the number also counts towards each prefix. e.g. "parameters/trainable" counts towards both "parameters" and "parameters/trainable". Args: counters: a list of (string, number) pairs Returns: a string """ totals = collections.defaultdict(int) for (name, val) in counters: prefixes = [name[:i] for i in xrange(len(name)) if name[i] == "/"] + [name] for p in prefixes: totals[p] += val parts = [] for name, val in sorted(six.iteritems(totals)): parts.append(" " * name.count("/") + "%s: %.3g" % (name, val)) return "\n".join(parts)
[ "def", "pretty_print_counters", "(", "counters", ")", ":", "totals", "=", "collections", ".", "defaultdict", "(", "int", ")", "for", "(", "name", ",", "val", ")", "in", "counters", ":", "prefixes", "=", "[", "name", "[", ":", "i", "]", "for", "i", "in", "xrange", "(", "len", "(", "name", ")", ")", "if", "name", "[", "i", "]", "==", "\"/\"", "]", "+", "[", "name", "]", "for", "p", "in", "prefixes", ":", "totals", "[", "p", "]", "+=", "val", "parts", "=", "[", "]", "for", "name", ",", "val", "in", "sorted", "(", "six", ".", "iteritems", "(", "totals", ")", ")", ":", "parts", ".", "append", "(", "\" \"", "*", "name", ".", "count", "(", "\"/\"", ")", "+", "\"%s: %.3g\"", "%", "(", "name", ",", "val", ")", ")", "return", "\"\\n\"", ".", "join", "(", "parts", ")" ]
print counters hierarchically. Each counter is a pair of a string and a number. The string can have slashes, meaning that the number also counts towards each prefix. e.g. "parameters/trainable" counts towards both "parameters" and "parameters/trainable". Args: counters: a list of (string, number) pairs Returns: a string
[ "print", "counters", "hierarchically", "." ]
python
train
31.521739
JukeboxPipeline/jukebox-core
src/jukeboxcore/reftrack.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/reftrack.py#L1579-L1588
def fetch_reference_restriction(self, ): """Fetch whether referencing is restricted :returns: True, if referencing is restricted :rtype: :class:`bool` :raises: None """ inter = self.get_refobjinter() restricted = self.status() is not None return restricted or inter.fetch_action_restriction(self, 'reference')
[ "def", "fetch_reference_restriction", "(", "self", ",", ")", ":", "inter", "=", "self", ".", "get_refobjinter", "(", ")", "restricted", "=", "self", ".", "status", "(", ")", "is", "not", "None", "return", "restricted", "or", "inter", ".", "fetch_action_restriction", "(", "self", ",", "'reference'", ")" ]
Fetch whether referencing is restricted :returns: True, if referencing is restricted :rtype: :class:`bool` :raises: None
[ "Fetch", "whether", "referencing", "is", "restricted" ]
python
train
36.5
ianmiell/shutit
shutit_pexpect.py
https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_pexpect.py#L671-L691
def whoami(self, note=None, loglevel=logging.DEBUG): """Returns the current user by executing "whoami". @param note: See send() @return: the output of "whoami" @rtype: string """ shutit = self.shutit shutit.handle_note(note) res = self.send_and_get_output(' command whoami', echo=False, loglevel=loglevel).strip() if res == '': res = self.send_and_get_output(' command id -u -n', echo=False, loglevel=loglevel).strip() shutit.handle_note_after(note=note) return res
[ "def", "whoami", "(", "self", ",", "note", "=", "None", ",", "loglevel", "=", "logging", ".", "DEBUG", ")", ":", "shutit", "=", "self", ".", "shutit", "shutit", ".", "handle_note", "(", "note", ")", "res", "=", "self", ".", "send_and_get_output", "(", "' command whoami'", ",", "echo", "=", "False", ",", "loglevel", "=", "loglevel", ")", ".", "strip", "(", ")", "if", "res", "==", "''", ":", "res", "=", "self", ".", "send_and_get_output", "(", "' command id -u -n'", ",", "echo", "=", "False", ",", "loglevel", "=", "loglevel", ")", ".", "strip", "(", ")", "shutit", ".", "handle_note_after", "(", "note", "=", "note", ")", "return", "res" ]
Returns the current user by executing "whoami". @param note: See send() @return: the output of "whoami" @rtype: string
[ "Returns", "the", "current", "user", "by", "executing", "whoami", "." ]
python
train
30.190476
saltstack/salt
salt/modules/mount.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mount.py#L1585-L1639
def _filesystems(config='/etc/filesystems', leading_key=True): ''' Return the contents of the filesystems in an OrderedDict config File containing filesystem infomation leading_key True return dictionary keyed by 'name' and value as dictionary with other keys, values (name excluded) OrderedDict({ '/dir' : OrderedDict({'dev': '/dev/hd8', .... }})) False return dictionary keyed by 'name' and value as dictionary with all keys, values (name included) OrderedDict({ '/dir' : OrderedDict({'name': '/dir', 'dev': '/dev/hd8', ... })}) ''' ret = OrderedDict() lines = [] parsing_block = False if not os.path.isfile(config) or 'AIX' not in __grains__['kernel']: return ret # read in block of filesystems, block starts with '/' till empty line with salt.utils.files.fopen(config) as ifile: for line in ifile: line = salt.utils.stringutils.to_unicode(line) # skip till first entry if not line.startswith('/') and not parsing_block: continue if line.startswith('/'): parsing_block = True lines.append(line) elif not line.split(): parsing_block = False try: entry = _FileSystemsEntry.dict_from_lines( lines, _FileSystemsEntry.compatibility_keys) lines = [] if 'opts' in entry: entry['opts'] = entry['opts'].split(',') while entry['name'] in ret: entry['name'] += '_' if leading_key: ret[entry.pop('name')] = entry else: ret[entry['name']] = entry except _FileSystemsEntry.ParseError: pass else: lines.append(line) return ret
[ "def", "_filesystems", "(", "config", "=", "'/etc/filesystems'", ",", "leading_key", "=", "True", ")", ":", "ret", "=", "OrderedDict", "(", ")", "lines", "=", "[", "]", "parsing_block", "=", "False", "if", "not", "os", ".", "path", ".", "isfile", "(", "config", ")", "or", "'AIX'", "not", "in", "__grains__", "[", "'kernel'", "]", ":", "return", "ret", "# read in block of filesystems, block starts with '/' till empty line", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "config", ")", "as", "ifile", ":", "for", "line", "in", "ifile", ":", "line", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "line", ")", "# skip till first entry", "if", "not", "line", ".", "startswith", "(", "'/'", ")", "and", "not", "parsing_block", ":", "continue", "if", "line", ".", "startswith", "(", "'/'", ")", ":", "parsing_block", "=", "True", "lines", ".", "append", "(", "line", ")", "elif", "not", "line", ".", "split", "(", ")", ":", "parsing_block", "=", "False", "try", ":", "entry", "=", "_FileSystemsEntry", ".", "dict_from_lines", "(", "lines", ",", "_FileSystemsEntry", ".", "compatibility_keys", ")", "lines", "=", "[", "]", "if", "'opts'", "in", "entry", ":", "entry", "[", "'opts'", "]", "=", "entry", "[", "'opts'", "]", ".", "split", "(", "','", ")", "while", "entry", "[", "'name'", "]", "in", "ret", ":", "entry", "[", "'name'", "]", "+=", "'_'", "if", "leading_key", ":", "ret", "[", "entry", ".", "pop", "(", "'name'", ")", "]", "=", "entry", "else", ":", "ret", "[", "entry", "[", "'name'", "]", "]", "=", "entry", "except", "_FileSystemsEntry", ".", "ParseError", ":", "pass", "else", ":", "lines", ".", "append", "(", "line", ")", "return", "ret" ]
Return the contents of the filesystems in an OrderedDict config File containing filesystem infomation leading_key True return dictionary keyed by 'name' and value as dictionary with other keys, values (name excluded) OrderedDict({ '/dir' : OrderedDict({'dev': '/dev/hd8', .... }})) False return dictionary keyed by 'name' and value as dictionary with all keys, values (name included) OrderedDict({ '/dir' : OrderedDict({'name': '/dir', 'dev': '/dev/hd8', ... })})
[ "Return", "the", "contents", "of", "the", "filesystems", "in", "an", "OrderedDict" ]
python
train
35.690909
aio-libs/aiomysql
aiomysql/cursors.py
https://github.com/aio-libs/aiomysql/blob/131fb9f914739ff01a24b402d29bfd719f2d1a8b/aiomysql/cursors.py#L323-L363
async def callproc(self, procname, args=()): """Execute stored procedure procname with args Compatibility warning: PEP-249 specifies that any modified parameters must be returned. This is currently impossible as they are only available by storing them in a server variable and then retrieved by a query. Since stored procedures return zero or more result sets, there is no reliable way to get at OUT or INOUT parameters via callproc. The server variables are named @_procname_n, where procname is the parameter above and n is the position of the parameter (from zero). Once all result sets generated by the procedure have been fetched, you can issue a SELECT @_procname_0, ... query using .execute() to get any OUT or INOUT values. Compatibility warning: The act of calling a stored procedure itself creates an empty result set. This appears after any result sets generated by the procedure. This is non-standard behavior with respect to the DB-API. Be sure to use nextset() to advance through all result sets; otherwise you may get disconnected. :param procname: ``str``, name of procedure to execute on server :param args: `sequence of parameters to use with procedure :returns: the original args. """ conn = self._get_db() if self._echo: logger.info("CALL %s", procname) logger.info("%r", args) for index, arg in enumerate(args): q = "SET @_%s_%d=%s" % (procname, index, conn.escape(arg)) await self._query(q) await self.nextset() _args = ','.join('@_%s_%d' % (procname, i) for i in range(len(args))) q = "CALL %s(%s)" % (procname, _args) await self._query(q) self._executed = q return args
[ "async", "def", "callproc", "(", "self", ",", "procname", ",", "args", "=", "(", ")", ")", ":", "conn", "=", "self", ".", "_get_db", "(", ")", "if", "self", ".", "_echo", ":", "logger", ".", "info", "(", "\"CALL %s\"", ",", "procname", ")", "logger", ".", "info", "(", "\"%r\"", ",", "args", ")", "for", "index", ",", "arg", "in", "enumerate", "(", "args", ")", ":", "q", "=", "\"SET @_%s_%d=%s\"", "%", "(", "procname", ",", "index", ",", "conn", ".", "escape", "(", "arg", ")", ")", "await", "self", ".", "_query", "(", "q", ")", "await", "self", ".", "nextset", "(", ")", "_args", "=", "','", ".", "join", "(", "'@_%s_%d'", "%", "(", "procname", ",", "i", ")", "for", "i", "in", "range", "(", "len", "(", "args", ")", ")", ")", "q", "=", "\"CALL %s(%s)\"", "%", "(", "procname", ",", "_args", ")", "await", "self", ".", "_query", "(", "q", ")", "self", ".", "_executed", "=", "q", "return", "args" ]
Execute stored procedure procname with args Compatibility warning: PEP-249 specifies that any modified parameters must be returned. This is currently impossible as they are only available by storing them in a server variable and then retrieved by a query. Since stored procedures return zero or more result sets, there is no reliable way to get at OUT or INOUT parameters via callproc. The server variables are named @_procname_n, where procname is the parameter above and n is the position of the parameter (from zero). Once all result sets generated by the procedure have been fetched, you can issue a SELECT @_procname_0, ... query using .execute() to get any OUT or INOUT values. Compatibility warning: The act of calling a stored procedure itself creates an empty result set. This appears after any result sets generated by the procedure. This is non-standard behavior with respect to the DB-API. Be sure to use nextset() to advance through all result sets; otherwise you may get disconnected. :param procname: ``str``, name of procedure to execute on server :param args: `sequence of parameters to use with procedure :returns: the original args.
[ "Execute", "stored", "procedure", "procname", "with", "args" ]
python
train
45.317073
benoitbryon/rst2rst
rst2rst/writer.py
https://github.com/benoitbryon/rst2rst/blob/976eef709aacb1facc8dca87cf7032f01d53adfe/rst2rst/writer.py#L156-L159
def indent(self, levels, first_line=None): """Increase indentation by ``levels`` levels.""" self._indentation_levels.append(levels) self._indent_first_line.append(first_line)
[ "def", "indent", "(", "self", ",", "levels", ",", "first_line", "=", "None", ")", ":", "self", ".", "_indentation_levels", ".", "append", "(", "levels", ")", "self", ".", "_indent_first_line", ".", "append", "(", "first_line", ")" ]
Increase indentation by ``levels`` levels.
[ "Increase", "indentation", "by", "levels", "levels", "." ]
python
train
48.75
pandas-dev/pandas
pandas/core/computation/scope.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/scope.py#L243-L260
def update(self, level): """Update the current scope by going back `level` levels. Parameters ---------- level : int or None, optional, default None """ sl = level + 1 # add sl frames to the scope starting with the # most distant and overwriting with more current # makes sure that we can capture variable scope stack = inspect.stack() try: self._get_vars(stack[:sl], scopes=['locals']) finally: del stack[:], stack
[ "def", "update", "(", "self", ",", "level", ")", ":", "sl", "=", "level", "+", "1", "# add sl frames to the scope starting with the", "# most distant and overwriting with more current", "# makes sure that we can capture variable scope", "stack", "=", "inspect", ".", "stack", "(", ")", "try", ":", "self", ".", "_get_vars", "(", "stack", "[", ":", "sl", "]", ",", "scopes", "=", "[", "'locals'", "]", ")", "finally", ":", "del", "stack", "[", ":", "]", ",", "stack" ]
Update the current scope by going back `level` levels. Parameters ---------- level : int or None, optional, default None
[ "Update", "the", "current", "scope", "by", "going", "back", "level", "levels", "." ]
python
train
28.944444
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/lib/inputhook.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/lib/inputhook.py#L232-L261
def enable_qt4(self, app=None): """Enable event loop integration with PyQt4. Parameters ---------- app : Qt Application, optional. Running application to use. If not given, we probe Qt for an existing application object, and create a new one if none is found. Notes ----- This methods sets the PyOS_InputHook for PyQt4, which allows the PyQt4 to integrate with terminal based applications like IPython. If ``app`` is not given we probe for an existing one, and return it if found. If no existing app is found, we create an :class:`QApplication` as follows:: from PyQt4 import QtCore app = QtGui.QApplication(sys.argv) """ from IPython.lib.inputhookqt4 import create_inputhook_qt4 app, inputhook_qt4 = create_inputhook_qt4(self, app) self.set_inputhook(inputhook_qt4) self._current_gui = GUI_QT4 app._in_event_loop = True self._apps[GUI_QT4] = app return app
[ "def", "enable_qt4", "(", "self", ",", "app", "=", "None", ")", ":", "from", "IPython", ".", "lib", ".", "inputhookqt4", "import", "create_inputhook_qt4", "app", ",", "inputhook_qt4", "=", "create_inputhook_qt4", "(", "self", ",", "app", ")", "self", ".", "set_inputhook", "(", "inputhook_qt4", ")", "self", ".", "_current_gui", "=", "GUI_QT4", "app", ".", "_in_event_loop", "=", "True", "self", ".", "_apps", "[", "GUI_QT4", "]", "=", "app", "return", "app" ]
Enable event loop integration with PyQt4. Parameters ---------- app : Qt Application, optional. Running application to use. If not given, we probe Qt for an existing application object, and create a new one if none is found. Notes ----- This methods sets the PyOS_InputHook for PyQt4, which allows the PyQt4 to integrate with terminal based applications like IPython. If ``app`` is not given we probe for an existing one, and return it if found. If no existing app is found, we create an :class:`QApplication` as follows:: from PyQt4 import QtCore app = QtGui.QApplication(sys.argv)
[ "Enable", "event", "loop", "integration", "with", "PyQt4", ".", "Parameters", "----------", "app", ":", "Qt", "Application", "optional", ".", "Running", "application", "to", "use", ".", "If", "not", "given", "we", "probe", "Qt", "for", "an", "existing", "application", "object", "and", "create", "a", "new", "one", "if", "none", "is", "found", "." ]
python
test
35.033333
acutesoftware/AIKIF
aikif/core_data.py
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/core_data.py#L158-L166
def links_to(self, other, tpe): """ adds a link from this thing to other thing using type (is_a, has_a, uses, contains, part_of) """ if self.check_type(tpe): self.links.append([other, tpe]) else: raise Exception('aikif.core_data cannot process this object type')
[ "def", "links_to", "(", "self", ",", "other", ",", "tpe", ")", ":", "if", "self", ".", "check_type", "(", "tpe", ")", ":", "self", ".", "links", ".", "append", "(", "[", "other", ",", "tpe", "]", ")", "else", ":", "raise", "Exception", "(", "'aikif.core_data cannot process this object type'", ")" ]
adds a link from this thing to other thing using type (is_a, has_a, uses, contains, part_of)
[ "adds", "a", "link", "from", "this", "thing", "to", "other", "thing", "using", "type", "(", "is_a", "has_a", "uses", "contains", "part_of", ")" ]
python
train
36.222222
DocNow/twarc
twarc/decorators.py
https://github.com/DocNow/twarc/blob/47dd87d0c00592a4d583412c9d660ba574fc6f26/twarc/decorators.py#L96-L108
def catch_gzip_errors(f): """ A decorator to handle gzip encoding errors which have been known to happen during hydration. """ def new_f(self, *args, **kwargs): try: return f(self, *args, **kwargs) except requests.exceptions.ContentDecodingError as e: log.warning("caught gzip error: %s", e) self.connect() return f(self, *args, **kwargs) return new_f
[ "def", "catch_gzip_errors", "(", "f", ")", ":", "def", "new_f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "requests", ".", "exceptions", ".", "ContentDecodingError", "as", "e", ":", "log", ".", "warning", "(", "\"caught gzip error: %s\"", ",", "e", ")", "self", ".", "connect", "(", ")", "return", "f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "new_f" ]
A decorator to handle gzip encoding errors which have been known to happen during hydration.
[ "A", "decorator", "to", "handle", "gzip", "encoding", "errors", "which", "have", "been", "known", "to", "happen", "during", "hydration", "." ]
python
train
32.846154
scanny/python-pptx
pptx/chart/xmlwriter.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/chart/xmlwriter.py#L1676-L1687
def yVal_xml(self): """ Return the ``<c:yVal>`` element for this series as unicode text. This element contains the Y values for this series. """ return self._yVal_tmpl.format(**{ 'nsdecls': '', 'numRef_xml': self.numRef_xml( self._series.y_values_ref, self._series.number_format, self._series.y_values ), })
[ "def", "yVal_xml", "(", "self", ")", ":", "return", "self", ".", "_yVal_tmpl", ".", "format", "(", "*", "*", "{", "'nsdecls'", ":", "''", ",", "'numRef_xml'", ":", "self", ".", "numRef_xml", "(", "self", ".", "_series", ".", "y_values_ref", ",", "self", ".", "_series", ".", "number_format", ",", "self", ".", "_series", ".", "y_values", ")", ",", "}", ")" ]
Return the ``<c:yVal>`` element for this series as unicode text. This element contains the Y values for this series.
[ "Return", "the", "<c", ":", "yVal", ">", "element", "for", "this", "series", "as", "unicode", "text", ".", "This", "element", "contains", "the", "Y", "values", "for", "this", "series", "." ]
python
train
34.583333
happyleavesaoc/python-voobly
voobly/__init__.py
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L375-L378
def get_user_matches(session, user_id, from_timestamp=None, limit=None): """Get recent matches by user.""" return get_recent_matches(session, '{}{}/{}/Matches/games/matches/user/{}/0'.format( session.auth.base_url, PROFILE_URL, user_id, user_id), from_timestamp, limit)
[ "def", "get_user_matches", "(", "session", ",", "user_id", ",", "from_timestamp", "=", "None", ",", "limit", "=", "None", ")", ":", "return", "get_recent_matches", "(", "session", ",", "'{}{}/{}/Matches/games/matches/user/{}/0'", ".", "format", "(", "session", ".", "auth", ".", "base_url", ",", "PROFILE_URL", ",", "user_id", ",", "user_id", ")", ",", "from_timestamp", ",", "limit", ")" ]
Get recent matches by user.
[ "Get", "recent", "matches", "by", "user", "." ]
python
train
70.5
cablehead/vanilla
vanilla/core.py
https://github.com/cablehead/vanilla/blob/c9f5b86f45720a30e8840fb68b1429b919c4ca66/vanilla/core.py#L327-L333
def throw_to(self, target, *a): self.ready.append((getcurrent(), ())) """ if len(a) == 1 and isinstance(a[0], preserve_exception): return target.throw(a[0].typ, a[0].val, a[0].tb) """ return target.throw(*a)
[ "def", "throw_to", "(", "self", ",", "target", ",", "*", "a", ")", ":", "self", ".", "ready", ".", "append", "(", "(", "getcurrent", "(", ")", ",", "(", ")", ")", ")", "return", "target", ".", "throw", "(", "*", "a", ")" ]
if len(a) == 1 and isinstance(a[0], preserve_exception): return target.throw(a[0].typ, a[0].val, a[0].tb)
[ "if", "len", "(", "a", ")", "==", "1", "and", "isinstance", "(", "a", "[", "0", "]", "preserve_exception", ")", ":", "return", "target", ".", "throw", "(", "a", "[", "0", "]", ".", "typ", "a", "[", "0", "]", ".", "val", "a", "[", "0", "]", ".", "tb", ")" ]
python
train
36.142857
ladybug-tools/ladybug
ladybug/futil.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/futil.py#L146-L171
def bat_to_sh(file_path): """Convert honeybee .bat file to .sh file. WARNING: This is a very simple function and doesn't handle any edge cases. """ sh_file = file_path[:-4] + '.sh' with open(file_path, 'rb') as inf, open(sh_file, 'wb') as outf: outf.write('#!/usr/bin/env bash\n\n') for line in inf: # pass the path lines, etc to get to the commands if line.strip(): continue else: break for line in inf: if line.startswith('echo'): continue modified_line = line.replace('c:\\radiance\\bin\\', '').replace('\\', '/') outf.write(modified_line) print('bash file is created at:\n\t%s' % sh_file) # Heroku - Make command.sh executable st = os.stat(sh_file) os.chmod(sh_file, st.st_mode | 0o111) return sh_file
[ "def", "bat_to_sh", "(", "file_path", ")", ":", "sh_file", "=", "file_path", "[", ":", "-", "4", "]", "+", "'.sh'", "with", "open", "(", "file_path", ",", "'rb'", ")", "as", "inf", ",", "open", "(", "sh_file", ",", "'wb'", ")", "as", "outf", ":", "outf", ".", "write", "(", "'#!/usr/bin/env bash\\n\\n'", ")", "for", "line", "in", "inf", ":", "# pass the path lines, etc to get to the commands", "if", "line", ".", "strip", "(", ")", ":", "continue", "else", ":", "break", "for", "line", "in", "inf", ":", "if", "line", ".", "startswith", "(", "'echo'", ")", ":", "continue", "modified_line", "=", "line", ".", "replace", "(", "'c:\\\\radiance\\\\bin\\\\'", ",", "''", ")", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "outf", ".", "write", "(", "modified_line", ")", "print", "(", "'bash file is created at:\\n\\t%s'", "%", "sh_file", ")", "# Heroku - Make command.sh executable", "st", "=", "os", ".", "stat", "(", "sh_file", ")", "os", ".", "chmod", "(", "sh_file", ",", "st", ".", "st_mode", "|", "0o111", ")", "return", "sh_file" ]
Convert honeybee .bat file to .sh file. WARNING: This is a very simple function and doesn't handle any edge cases.
[ "Convert", "honeybee", ".", "bat", "file", "to", ".", "sh", "file", "." ]
python
train
33.346154
andrenarchy/krypy
krypy/linsys.py
https://github.com/andrenarchy/krypy/blob/4883ec9a61d64ea56489e15c35cc40f0633ab2f1/krypy/linsys.py#L897-L905
def operations(nsteps): '''Returns the number of operations needed for nsteps of GMRES''' return {'A': 1 + nsteps, 'M': 2 + nsteps, 'Ml': 2 + nsteps, 'Mr': 1 + nsteps, 'ip_B': 2 + nsteps + nsteps*(nsteps+1)/2, 'axpy': 4 + 2*nsteps + nsteps*(nsteps+1)/2 }
[ "def", "operations", "(", "nsteps", ")", ":", "return", "{", "'A'", ":", "1", "+", "nsteps", ",", "'M'", ":", "2", "+", "nsteps", ",", "'Ml'", ":", "2", "+", "nsteps", ",", "'Mr'", ":", "1", "+", "nsteps", ",", "'ip_B'", ":", "2", "+", "nsteps", "+", "nsteps", "*", "(", "nsteps", "+", "1", ")", "/", "2", ",", "'axpy'", ":", "4", "+", "2", "*", "nsteps", "+", "nsteps", "*", "(", "nsteps", "+", "1", ")", "/", "2", "}" ]
Returns the number of operations needed for nsteps of GMRES
[ "Returns", "the", "number", "of", "operations", "needed", "for", "nsteps", "of", "GMRES" ]
python
train
39.777778
bcbio/bcbio-nextgen
bcbio/structural/titancna.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/titancna.py#L86-L95
def _run_select_solution(ploidy_outdirs, work_dir, data): """Select optimal """ out_file = os.path.join(work_dir, "optimalClusters.txt") if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: ploidy_inputs = " ".join(["--ploidyRun%s=%s" % (p, d) for p, d in ploidy_outdirs]) cmd = "titanCNA_selectSolution.R {ploidy_inputs} --outFile={tx_out_file}" do.run(cmd.format(**locals()), "TitanCNA: select optimal solution") return out_file
[ "def", "_run_select_solution", "(", "ploidy_outdirs", ",", "work_dir", ",", "data", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"optimalClusters.txt\"", ")", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "with", "file_transaction", "(", "data", ",", "out_file", ")", "as", "tx_out_file", ":", "ploidy_inputs", "=", "\" \"", ".", "join", "(", "[", "\"--ploidyRun%s=%s\"", "%", "(", "p", ",", "d", ")", "for", "p", ",", "d", "in", "ploidy_outdirs", "]", ")", "cmd", "=", "\"titanCNA_selectSolution.R {ploidy_inputs} --outFile={tx_out_file}\"", "do", ".", "run", "(", "cmd", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ",", "\"TitanCNA: select optimal solution\"", ")", "return", "out_file" ]
Select optimal
[ "Select", "optimal" ]
python
train
52.2
fake-name/WebRequest
WebRequest/WebRequestClass.py
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/WebRequestClass.py#L279-L290
def getFileAndName(self, *args, **kwargs): ''' Give a requested page (note: the arguments for this call are forwarded to getpage()), return the content at the target URL and the filename for the target content as a 2-tuple (pgctnt, hName) for the content at the target URL. The filename specified in the content-disposition header is used, if present. Otherwise, the last section of the url path segment is treated as the filename. ''' pgctnt, hName, mime = self.getFileNameMime(*args, **kwargs) return pgctnt, hName
[ "def", "getFileAndName", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "pgctnt", ",", "hName", ",", "mime", "=", "self", ".", "getFileNameMime", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "pgctnt", ",", "hName" ]
Give a requested page (note: the arguments for this call are forwarded to getpage()), return the content at the target URL and the filename for the target content as a 2-tuple (pgctnt, hName) for the content at the target URL. The filename specified in the content-disposition header is used, if present. Otherwise, the last section of the url path segment is treated as the filename.
[ "Give", "a", "requested", "page", "(", "note", ":", "the", "arguments", "for", "this", "call", "are", "forwarded", "to", "getpage", "()", ")", "return", "the", "content", "at", "the", "target", "URL", "and", "the", "filename", "for", "the", "target", "content", "as", "a", "2", "-", "tuple", "(", "pgctnt", "hName", ")", "for", "the", "content", "at", "the", "target", "URL", "." ]
python
train
43.75
Dallinger/Dallinger
dallinger/recruiters.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/recruiters.py#L885-L913
def open_recruitment(self, n=1): """Return initial experiment URL list. """ logger.info("Multi recruitment running for {} participants".format(n)) recruitments = [] messages = {} remaining = n for recruiter, count in self.recruiters(n): if not count: break if recruiter.nickname in messages: result = recruiter.recruit(count) recruitments.extend(result) else: result = recruiter.open_recruitment(count) recruitments.extend(result["items"]) messages[recruiter.nickname] = result["message"] remaining -= count if remaining <= 0: break logger.info( ( "Multi-recruited {} out of {} participants, " "using {} recruiters." ).format(n - remaining, n, len(messages)) ) return {"items": recruitments, "message": "\n".join(messages.values())}
[ "def", "open_recruitment", "(", "self", ",", "n", "=", "1", ")", ":", "logger", ".", "info", "(", "\"Multi recruitment running for {} participants\"", ".", "format", "(", "n", ")", ")", "recruitments", "=", "[", "]", "messages", "=", "{", "}", "remaining", "=", "n", "for", "recruiter", ",", "count", "in", "self", ".", "recruiters", "(", "n", ")", ":", "if", "not", "count", ":", "break", "if", "recruiter", ".", "nickname", "in", "messages", ":", "result", "=", "recruiter", ".", "recruit", "(", "count", ")", "recruitments", ".", "extend", "(", "result", ")", "else", ":", "result", "=", "recruiter", ".", "open_recruitment", "(", "count", ")", "recruitments", ".", "extend", "(", "result", "[", "\"items\"", "]", ")", "messages", "[", "recruiter", ".", "nickname", "]", "=", "result", "[", "\"message\"", "]", "remaining", "-=", "count", "if", "remaining", "<=", "0", ":", "break", "logger", ".", "info", "(", "(", "\"Multi-recruited {} out of {} participants, \"", "\"using {} recruiters.\"", ")", ".", "format", "(", "n", "-", "remaining", ",", "n", ",", "len", "(", "messages", ")", ")", ")", "return", "{", "\"items\"", ":", "recruitments", ",", "\"message\"", ":", "\"\\n\"", ".", "join", "(", "messages", ".", "values", "(", ")", ")", "}" ]
Return initial experiment URL list.
[ "Return", "initial", "experiment", "URL", "list", "." ]
python
train
34.448276
allenai/allennlp
allennlp/training/metrics/conll_coref_scores.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/metrics/conll_coref_scores.py#L188-L205
def muc(clusters, mention_to_gold): """ Counts the mentions in each predicted cluster which need to be re-allocated in order for each predicted cluster to be contained by the respective gold cluster. <http://aclweb.org/anthology/M/M95/M95-1005.pdf> """ true_p, all_p = 0, 0 for cluster in clusters: all_p += len(cluster) - 1 true_p += len(cluster) linked = set() for mention in cluster: if mention in mention_to_gold: linked.add(mention_to_gold[mention]) else: true_p -= 1 true_p -= len(linked) return true_p, all_p
[ "def", "muc", "(", "clusters", ",", "mention_to_gold", ")", ":", "true_p", ",", "all_p", "=", "0", ",", "0", "for", "cluster", "in", "clusters", ":", "all_p", "+=", "len", "(", "cluster", ")", "-", "1", "true_p", "+=", "len", "(", "cluster", ")", "linked", "=", "set", "(", ")", "for", "mention", "in", "cluster", ":", "if", "mention", "in", "mention_to_gold", ":", "linked", ".", "add", "(", "mention_to_gold", "[", "mention", "]", ")", "else", ":", "true_p", "-=", "1", "true_p", "-=", "len", "(", "linked", ")", "return", "true_p", ",", "all_p" ]
Counts the mentions in each predicted cluster which need to be re-allocated in order for each predicted cluster to be contained by the respective gold cluster. <http://aclweb.org/anthology/M/M95/M95-1005.pdf>
[ "Counts", "the", "mentions", "in", "each", "predicted", "cluster", "which", "need", "to", "be", "re", "-", "allocated", "in", "order", "for", "each", "predicted", "cluster", "to", "be", "contained", "by", "the", "respective", "gold", "cluster", ".", "<http", ":", "//", "aclweb", ".", "org", "/", "anthology", "/", "M", "/", "M95", "/", "M95", "-", "1005", ".", "pdf", ">" ]
python
train
38.555556
prompt-toolkit/pymux
pymux/commands/commands.py
https://github.com/prompt-toolkit/pymux/blob/3f66e62b9de4b2251c7f9afad6c516dc5a30ec67/pymux/commands/commands.py#L211-L232
def select_window(pymux, variables): """ Select a window. E.g: select-window -t :3 """ window_id = variables['<target-window>'] def invalid_window(): raise CommandException('Invalid window: %s' % window_id) if window_id.startswith(':'): try: number = int(window_id[1:]) except ValueError: invalid_window() else: w = pymux.arrangement.get_window_by_index(number) if w: pymux.arrangement.set_active_window(w) else: invalid_window() else: invalid_window()
[ "def", "select_window", "(", "pymux", ",", "variables", ")", ":", "window_id", "=", "variables", "[", "'<target-window>'", "]", "def", "invalid_window", "(", ")", ":", "raise", "CommandException", "(", "'Invalid window: %s'", "%", "window_id", ")", "if", "window_id", ".", "startswith", "(", "':'", ")", ":", "try", ":", "number", "=", "int", "(", "window_id", "[", "1", ":", "]", ")", "except", "ValueError", ":", "invalid_window", "(", ")", "else", ":", "w", "=", "pymux", ".", "arrangement", ".", "get_window_by_index", "(", "number", ")", "if", "w", ":", "pymux", ".", "arrangement", ".", "set_active_window", "(", "w", ")", "else", ":", "invalid_window", "(", ")", "else", ":", "invalid_window", "(", ")" ]
Select a window. E.g: select-window -t :3
[ "Select", "a", "window", ".", "E", ".", "g", ":", "select", "-", "window", "-", "t", ":", "3" ]
python
train
27
pandas-dev/pandas
pandas/core/groupby/groupby.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1457-L1462
def rolling(self, *args, **kwargs): """ Return a rolling grouper, providing rolling functionality per group. """ from pandas.core.window import RollingGroupby return RollingGroupby(self, *args, **kwargs)
[ "def", "rolling", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", "pandas", ".", "core", ".", "window", "import", "RollingGroupby", "return", "RollingGroupby", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Return a rolling grouper, providing rolling functionality per group.
[ "Return", "a", "rolling", "grouper", "providing", "rolling", "functionality", "per", "group", "." ]
python
train
39.666667
senaite/senaite.jsonapi
src/senaite/jsonapi/api.py
https://github.com/senaite/senaite.jsonapi/blob/871959f4b1c9edbb477e9456325527ca78e13ec6/src/senaite/jsonapi/api.py#L573-L601
def get_search_results(portal_type=None, uid=None, **kw): """Search the catalog and return the results :returns: Catalog search results :rtype: iterable """ # If we have an UID, return the object immediately if uid is not None: logger.info("UID '%s' found, returning the object immediately" % uid) return u.to_list(get_object_by_uid(uid)) # allow to search search for the Plone Site with portal_type include_portal = False if u.to_string(portal_type) == "Plone Site": include_portal = True # The request may contain a list of portal_types, e.g. # `?portal_type=Document&portal_type=Plone Site` if "Plone Site" in u.to_list(req.get("portal_type")): include_portal = True # Build and execute a catalog query results = search(portal_type=portal_type, uid=uid, **kw) if include_portal: results = list(results) + u.to_list(get_portal()) return results
[ "def", "get_search_results", "(", "portal_type", "=", "None", ",", "uid", "=", "None", ",", "*", "*", "kw", ")", ":", "# If we have an UID, return the object immediately", "if", "uid", "is", "not", "None", ":", "logger", ".", "info", "(", "\"UID '%s' found, returning the object immediately\"", "%", "uid", ")", "return", "u", ".", "to_list", "(", "get_object_by_uid", "(", "uid", ")", ")", "# allow to search search for the Plone Site with portal_type", "include_portal", "=", "False", "if", "u", ".", "to_string", "(", "portal_type", ")", "==", "\"Plone Site\"", ":", "include_portal", "=", "True", "# The request may contain a list of portal_types, e.g.", "# `?portal_type=Document&portal_type=Plone Site`", "if", "\"Plone Site\"", "in", "u", ".", "to_list", "(", "req", ".", "get", "(", "\"portal_type\"", ")", ")", ":", "include_portal", "=", "True", "# Build and execute a catalog query", "results", "=", "search", "(", "portal_type", "=", "portal_type", ",", "uid", "=", "uid", ",", "*", "*", "kw", ")", "if", "include_portal", ":", "results", "=", "list", "(", "results", ")", "+", "u", ".", "to_list", "(", "get_portal", "(", ")", ")", "return", "results" ]
Search the catalog and return the results :returns: Catalog search results :rtype: iterable
[ "Search", "the", "catalog", "and", "return", "the", "results" ]
python
train
32.034483
mallamanis/experimenter
experimenter/experimentlogger.py
https://github.com/mallamanis/experimenter/blob/2ed5ce85084cc47251ccba3aae0cb3431fbe4259/experimenter/experimentlogger.py#L46-L63
def record_results(self, results): """ Record the results of this experiment, by updating the tag. :param results: A dictionary containing the results of the experiment. :type results: dict """ repository = Repo(self.__repository_directory, search_parent_directories=True) for tag in repository.tags: if tag.name == self.__tag_name: tag_object = tag break else: raise Exception("Experiment tag has been deleted since experiment started") data = json.loads(tag_object.tag.message) data["results"] = results TagReference.create(repository, self.__tag_name, message=json.dumps(data), ref=tag_object.tag.object, force=True) self.__results_recorded = True
[ "def", "record_results", "(", "self", ",", "results", ")", ":", "repository", "=", "Repo", "(", "self", ".", "__repository_directory", ",", "search_parent_directories", "=", "True", ")", "for", "tag", "in", "repository", ".", "tags", ":", "if", "tag", ".", "name", "==", "self", ".", "__tag_name", ":", "tag_object", "=", "tag", "break", "else", ":", "raise", "Exception", "(", "\"Experiment tag has been deleted since experiment started\"", ")", "data", "=", "json", ".", "loads", "(", "tag_object", ".", "tag", ".", "message", ")", "data", "[", "\"results\"", "]", "=", "results", "TagReference", ".", "create", "(", "repository", ",", "self", ".", "__tag_name", ",", "message", "=", "json", ".", "dumps", "(", "data", ")", ",", "ref", "=", "tag_object", ".", "tag", ".", "object", ",", "force", "=", "True", ")", "self", ".", "__results_recorded", "=", "True" ]
Record the results of this experiment, by updating the tag. :param results: A dictionary containing the results of the experiment. :type results: dict
[ "Record", "the", "results", "of", "this", "experiment", "by", "updating", "the", "tag", ".", ":", "param", "results", ":", "A", "dictionary", "containing", "the", "results", "of", "the", "experiment", ".", ":", "type", "results", ":", "dict" ]
python
valid
45.166667
CiscoTestAutomation/yang
ncdiff/src/yang/ncdiff/model.py
https://github.com/CiscoTestAutomation/yang/blob/c70ec5ac5a91f276c4060009203770ece92e76b4/ncdiff/src/yang/ncdiff/model.py#L285-L316
def get_datatype_str(self, element, length): '''get_datatype_str High-level api: Produce a string that indicates the data type of a node. Parameters ---------- element : `Element` A node in model tree. length : `int` String length that has been consumed. Returns ------- str A string that indicates the data type of a node. ''' spaces = ' '*(self.get_width(element) - length) type_info = element.get('type') ret = '' if type_info == 'anyxml' or type_info == 'anydata': ret = spaces + '<{}>'.format(type_info) elif element.get('datatype') is not None: ret = spaces + element.get('datatype') if element.get('if-feature') is not None: return ret + ' {' + element.get('if-feature') + '}?' else: return ret
[ "def", "get_datatype_str", "(", "self", ",", "element", ",", "length", ")", ":", "spaces", "=", "' '", "*", "(", "self", ".", "get_width", "(", "element", ")", "-", "length", ")", "type_info", "=", "element", ".", "get", "(", "'type'", ")", "ret", "=", "''", "if", "type_info", "==", "'anyxml'", "or", "type_info", "==", "'anydata'", ":", "ret", "=", "spaces", "+", "'<{}>'", ".", "format", "(", "type_info", ")", "elif", "element", ".", "get", "(", "'datatype'", ")", "is", "not", "None", ":", "ret", "=", "spaces", "+", "element", ".", "get", "(", "'datatype'", ")", "if", "element", ".", "get", "(", "'if-feature'", ")", "is", "not", "None", ":", "return", "ret", "+", "' {'", "+", "element", ".", "get", "(", "'if-feature'", ")", "+", "'}?'", "else", ":", "return", "ret" ]
get_datatype_str High-level api: Produce a string that indicates the data type of a node. Parameters ---------- element : `Element` A node in model tree. length : `int` String length that has been consumed. Returns ------- str A string that indicates the data type of a node.
[ "get_datatype_str" ]
python
train
28.03125
GoogleCloudPlatform/compute-image-packages
packages/python-google-compute-engine/google_compute_engine/distro_lib/sles_12/utils.py
https://github.com/GoogleCloudPlatform/compute-image-packages/blob/53ea8cd069fb4d9a1984d1c167e54c133033f8da/packages/python-google-compute-engine/google_compute_engine/distro_lib/sles_12/utils.py#L32-L45
def EnableNetworkInterfaces( self, interfaces, logger, dhclient_script=None): """Enable the list of network interfaces. Args: interfaces: list of string, the output device names to enable. logger: logger object, used to write to SysLog and serial port. dhclient_script: string, the path to a dhclient script used by dhclient. """ interfaces_to_up = [i for i in interfaces if i != 'eth0'] if interfaces_to_up: logger.info('Enabling the Ethernet interfaces %s.', interfaces_to_up) self._WriteIfcfg(interfaces_to_up, logger) self._Ifup(interfaces_to_up, logger)
[ "def", "EnableNetworkInterfaces", "(", "self", ",", "interfaces", ",", "logger", ",", "dhclient_script", "=", "None", ")", ":", "interfaces_to_up", "=", "[", "i", "for", "i", "in", "interfaces", "if", "i", "!=", "'eth0'", "]", "if", "interfaces_to_up", ":", "logger", ".", "info", "(", "'Enabling the Ethernet interfaces %s.'", ",", "interfaces_to_up", ")", "self", ".", "_WriteIfcfg", "(", "interfaces_to_up", ",", "logger", ")", "self", ".", "_Ifup", "(", "interfaces_to_up", ",", "logger", ")" ]
Enable the list of network interfaces. Args: interfaces: list of string, the output device names to enable. logger: logger object, used to write to SysLog and serial port. dhclient_script: string, the path to a dhclient script used by dhclient.
[ "Enable", "the", "list", "of", "network", "interfaces", "." ]
python
train
43.428571
draperjames/qtpandas
qtpandas/ui/fallback/easygui/boxes/base_boxes.py
https://github.com/draperjames/qtpandas/blob/64294fb69f1839e53dee5ea453337266bfaf24f4/qtpandas/ui/fallback/easygui/boxes/base_boxes.py#L828-L916
def fileopenbox(msg=None, title=None, default='*', filetypes=None, multiple=False): """ A dialog to get a file name. **About the "default" argument** The "default" argument specifies a filepath that (normally) contains one or more wildcards. fileopenbox will display only files that match the default filepath. If omitted, defaults to "\*" (all files in the current directory). WINDOWS EXAMPLE:: ...default="c:/myjunk/*.py" will open in directory c:\\myjunk\\ and show all Python files. WINDOWS EXAMPLE:: ...default="c:/myjunk/test*.py" will open in directory c:\\myjunk\\ and show all Python files whose names begin with "test". Note that on Windows, fileopenbox automatically changes the path separator to the Windows path separator (backslash). **About the "filetypes" argument** If specified, it should contain a list of items, where each item is either: - a string containing a filemask # e.g. "\*.txt" - a list of strings, where all of the strings except the last one are filemasks (each beginning with "\*.", such as "\*.txt" for text files, "\*.py" for Python files, etc.). and the last string contains a filetype description EXAMPLE:: filetypes = ["*.css", ["*.htm", "*.html", "HTML files"] ] .. note:: If the filetypes list does not contain ("All files","*"), it will be added. If the filetypes list does not contain a filemask that includes the extension of the "default" argument, it will be added. For example, if default="\*abc.py" and no filetypes argument was specified, then "\*.py" will automatically be added to the filetypes argument. :param str msg: the msg to be displayed. :param str title: the window title :param str default: filepath with wildcards :param object filetypes: filemasks that a user can choose, e.g. "\*.txt" :param bool multiple: If true, more than one file can be selected :return: the name of a file, or None if user chose to cancel """ localRoot = Tk() localRoot.withdraw() initialbase, initialfile, initialdir, filetypes = fileboxSetup( default, filetypes) # ------------------------------------------------------------ # if initialfile contains no wildcards; we don't want an # initial file. It won't be used anyway. # Also: if initialbase is simply "*", we don't want an # initialfile; it is not doing any useful work. # ------------------------------------------------------------ if (initialfile.find("*") < 0) and (initialfile.find("?") < 0): initialfile = None elif initialbase == "*": initialfile = None func = ut.tk_FileDialog.askopenfilenames if multiple else ut.tk_FileDialog.askopenfilename ret_val = func(parent=localRoot, title=getFileDialogTitle(msg, title), initialdir=initialdir, initialfile=initialfile, filetypes=filetypes ) if multiple: f = [os.path.normpath(x) for x in localRoot.tk.splitlist(ret_val)] else: f = os.path.normpath(ret_val) localRoot.destroy() if not f: return None return f
[ "def", "fileopenbox", "(", "msg", "=", "None", ",", "title", "=", "None", ",", "default", "=", "'*'", ",", "filetypes", "=", "None", ",", "multiple", "=", "False", ")", ":", "localRoot", "=", "Tk", "(", ")", "localRoot", ".", "withdraw", "(", ")", "initialbase", ",", "initialfile", ",", "initialdir", ",", "filetypes", "=", "fileboxSetup", "(", "default", ",", "filetypes", ")", "# ------------------------------------------------------------", "# if initialfile contains no wildcards; we don't want an", "# initial file. It won't be used anyway.", "# Also: if initialbase is simply \"*\", we don't want an", "# initialfile; it is not doing any useful work.", "# ------------------------------------------------------------", "if", "(", "initialfile", ".", "find", "(", "\"*\"", ")", "<", "0", ")", "and", "(", "initialfile", ".", "find", "(", "\"?\"", ")", "<", "0", ")", ":", "initialfile", "=", "None", "elif", "initialbase", "==", "\"*\"", ":", "initialfile", "=", "None", "func", "=", "ut", ".", "tk_FileDialog", ".", "askopenfilenames", "if", "multiple", "else", "ut", ".", "tk_FileDialog", ".", "askopenfilename", "ret_val", "=", "func", "(", "parent", "=", "localRoot", ",", "title", "=", "getFileDialogTitle", "(", "msg", ",", "title", ")", ",", "initialdir", "=", "initialdir", ",", "initialfile", "=", "initialfile", ",", "filetypes", "=", "filetypes", ")", "if", "multiple", ":", "f", "=", "[", "os", ".", "path", ".", "normpath", "(", "x", ")", "for", "x", "in", "localRoot", ".", "tk", ".", "splitlist", "(", "ret_val", ")", "]", "else", ":", "f", "=", "os", ".", "path", ".", "normpath", "(", "ret_val", ")", "localRoot", ".", "destroy", "(", ")", "if", "not", "f", ":", "return", "None", "return", "f" ]
A dialog to get a file name. **About the "default" argument** The "default" argument specifies a filepath that (normally) contains one or more wildcards. fileopenbox will display only files that match the default filepath. If omitted, defaults to "\*" (all files in the current directory). WINDOWS EXAMPLE:: ...default="c:/myjunk/*.py" will open in directory c:\\myjunk\\ and show all Python files. WINDOWS EXAMPLE:: ...default="c:/myjunk/test*.py" will open in directory c:\\myjunk\\ and show all Python files whose names begin with "test". Note that on Windows, fileopenbox automatically changes the path separator to the Windows path separator (backslash). **About the "filetypes" argument** If specified, it should contain a list of items, where each item is either: - a string containing a filemask # e.g. "\*.txt" - a list of strings, where all of the strings except the last one are filemasks (each beginning with "\*.", such as "\*.txt" for text files, "\*.py" for Python files, etc.). and the last string contains a filetype description EXAMPLE:: filetypes = ["*.css", ["*.htm", "*.html", "HTML files"] ] .. note:: If the filetypes list does not contain ("All files","*"), it will be added. If the filetypes list does not contain a filemask that includes the extension of the "default" argument, it will be added. For example, if default="\*abc.py" and no filetypes argument was specified, then "\*.py" will automatically be added to the filetypes argument. :param str msg: the msg to be displayed. :param str title: the window title :param str default: filepath with wildcards :param object filetypes: filemasks that a user can choose, e.g. "\*.txt" :param bool multiple: If true, more than one file can be selected :return: the name of a file, or None if user chose to cancel
[ "A", "dialog", "to", "get", "a", "file", "name", "." ]
python
train
34.966292
limix/limix-core
limix_core/covar/lowrank.py
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/covar/lowrank.py#L83-L90
def setCovariance(self, cov): """ makes lowrank approximation of cov """ assert cov.shape[0]==self.dim, 'Dimension mismatch.' S, U = la.eigh(cov) U = U[:,::-1] S = S[::-1] _X = U[:, :self.rank] * sp.sqrt(S[:self.rank]) self.X = _X
[ "def", "setCovariance", "(", "self", ",", "cov", ")", ":", "assert", "cov", ".", "shape", "[", "0", "]", "==", "self", ".", "dim", ",", "'Dimension mismatch.'", "S", ",", "U", "=", "la", ".", "eigh", "(", "cov", ")", "U", "=", "U", "[", ":", ",", ":", ":", "-", "1", "]", "S", "=", "S", "[", ":", ":", "-", "1", "]", "_X", "=", "U", "[", ":", ",", ":", "self", ".", "rank", "]", "*", "sp", ".", "sqrt", "(", "S", "[", ":", "self", ".", "rank", "]", ")", "self", ".", "X", "=", "_X" ]
makes lowrank approximation of cov
[ "makes", "lowrank", "approximation", "of", "cov" ]
python
train
34.875
tensorflow/lucid
lucid/optvis/objectives.py
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L165-L170
def channel(layer, n_channel, batch=None): """Visualize a single channel""" if batch is None: return lambda T: tf.reduce_mean(T(layer)[..., n_channel]) else: return lambda T: tf.reduce_mean(T(layer)[batch, ..., n_channel])
[ "def", "channel", "(", "layer", ",", "n_channel", ",", "batch", "=", "None", ")", ":", "if", "batch", "is", "None", ":", "return", "lambda", "T", ":", "tf", ".", "reduce_mean", "(", "T", "(", "layer", ")", "[", "...", ",", "n_channel", "]", ")", "else", ":", "return", "lambda", "T", ":", "tf", ".", "reduce_mean", "(", "T", "(", "layer", ")", "[", "batch", ",", "...", ",", "n_channel", "]", ")" ]
Visualize a single channel
[ "Visualize", "a", "single", "channel" ]
python
train
38.5
RedHatInsights/insights-core
insights/core/plugins.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/plugins.py#L414-L425
def adjust_for_length(self, key, r, kwargs): """ Converts the response to a string and compares its length to a max length specified in settings. If the response is too long, an error is logged, and an abbreviated response is returned instead. """ length = len(str(kwargs)) if length > settings.defaults["max_detail_length"]: self._log_length_error(key, length) r["max_detail_length_error"] = length return r return kwargs
[ "def", "adjust_for_length", "(", "self", ",", "key", ",", "r", ",", "kwargs", ")", ":", "length", "=", "len", "(", "str", "(", "kwargs", ")", ")", "if", "length", ">", "settings", ".", "defaults", "[", "\"max_detail_length\"", "]", ":", "self", ".", "_log_length_error", "(", "key", ",", "length", ")", "r", "[", "\"max_detail_length_error\"", "]", "=", "length", "return", "r", "return", "kwargs" ]
Converts the response to a string and compares its length to a max length specified in settings. If the response is too long, an error is logged, and an abbreviated response is returned instead.
[ "Converts", "the", "response", "to", "a", "string", "and", "compares", "its", "length", "to", "a", "max", "length", "specified", "in", "settings", ".", "If", "the", "response", "is", "too", "long", "an", "error", "is", "logged", "and", "an", "abbreviated", "response", "is", "returned", "instead", "." ]
python
train
42.583333
typemytype/booleanOperations
Lib/booleanOperations/flatten.py
https://github.com/typemytype/booleanOperations/blob/b7d9fc95c155824662f4a0020e653c77b7723d24/Lib/booleanOperations/flatten.py#L419-L447
def _convertPointsToSegments(points, willBeReversed=False): """ Compile points into InputSegment objects. """ # get the last on curve previousOnCurve = None for point in reversed(points): if point.segmentType is not None: previousOnCurve = point.coordinates break assert previousOnCurve is not None # gather the segments offCurves = [] segments = [] for point in points: # off curve, hold. if point.segmentType is None: offCurves.append(point) else: segment = InputSegment( points=offCurves + [point], previousOnCurve=previousOnCurve, willBeReversed=willBeReversed ) segments.append(segment) offCurves = [] previousOnCurve = point.coordinates assert not offCurves return segments
[ "def", "_convertPointsToSegments", "(", "points", ",", "willBeReversed", "=", "False", ")", ":", "# get the last on curve", "previousOnCurve", "=", "None", "for", "point", "in", "reversed", "(", "points", ")", ":", "if", "point", ".", "segmentType", "is", "not", "None", ":", "previousOnCurve", "=", "point", ".", "coordinates", "break", "assert", "previousOnCurve", "is", "not", "None", "# gather the segments", "offCurves", "=", "[", "]", "segments", "=", "[", "]", "for", "point", "in", "points", ":", "# off curve, hold.", "if", "point", ".", "segmentType", "is", "None", ":", "offCurves", ".", "append", "(", "point", ")", "else", ":", "segment", "=", "InputSegment", "(", "points", "=", "offCurves", "+", "[", "point", "]", ",", "previousOnCurve", "=", "previousOnCurve", ",", "willBeReversed", "=", "willBeReversed", ")", "segments", ".", "append", "(", "segment", ")", "offCurves", "=", "[", "]", "previousOnCurve", "=", "point", ".", "coordinates", "assert", "not", "offCurves", "return", "segments" ]
Compile points into InputSegment objects.
[ "Compile", "points", "into", "InputSegment", "objects", "." ]
python
train
30.310345
manns/pyspread
pyspread/src/gui/_main_window.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_main_window.py#L1170-L1193
def OnApprove(self, event): """File approve event handler""" if not self.main_window.safe_mode: return msg = _(u"You are going to approve and trust a file that\n" u"you have not created yourself.\n" u"After proceeding, the file is executed.\n \n" u"It may harm your system as any program can.\n" u"Please check all cells thoroughly before\nproceeding.\n \n" u"Proceed and sign this file as trusted?") short_msg = _("Security warning") if self.main_window.interfaces.get_warning_choice(msg, short_msg): # Leave safe mode self.main_window.grid.actions.leave_safe_mode() # Display safe mode end in status bar statustext = _("Safe mode deactivated.") post_command_event(self.main_window, self.main_window.StatusBarMsg, text=statustext)
[ "def", "OnApprove", "(", "self", ",", "event", ")", ":", "if", "not", "self", ".", "main_window", ".", "safe_mode", ":", "return", "msg", "=", "_", "(", "u\"You are going to approve and trust a file that\\n\"", "u\"you have not created yourself.\\n\"", "u\"After proceeding, the file is executed.\\n \\n\"", "u\"It may harm your system as any program can.\\n\"", "u\"Please check all cells thoroughly before\\nproceeding.\\n \\n\"", "u\"Proceed and sign this file as trusted?\"", ")", "short_msg", "=", "_", "(", "\"Security warning\"", ")", "if", "self", ".", "main_window", ".", "interfaces", ".", "get_warning_choice", "(", "msg", ",", "short_msg", ")", ":", "# Leave safe mode", "self", ".", "main_window", ".", "grid", ".", "actions", ".", "leave_safe_mode", "(", ")", "# Display safe mode end in status bar", "statustext", "=", "_", "(", "\"Safe mode deactivated.\"", ")", "post_command_event", "(", "self", ".", "main_window", ",", "self", ".", "main_window", ".", "StatusBarMsg", ",", "text", "=", "statustext", ")" ]
File approve event handler
[ "File", "approve", "event", "handler" ]
python
train
39.041667
chaimleib/intervaltree
intervaltree/intervaltree.py
https://github.com/chaimleib/intervaltree/blob/ffb2b1667f8b832e89324a75a175be8440504c9d/intervaltree/intervaltree.py#L645-L708
def merge_overlaps(self, data_reducer=None, data_initializer=None, strict=True): """ Finds all intervals with overlapping ranges and merges them into a single interval. If provided, uses data_reducer and data_initializer with similar semantics to Python's built-in reduce(reducer_func[, initializer]), as follows: If data_reducer is set to a function, combines the data fields of the Intervals with current_reduced_data = data_reducer(current_reduced_data, new_data) If data_reducer is None, the merged Interval's data field will be set to None, ignoring all the data fields of the merged Intervals. On encountering the first Interval to merge, if data_initializer is None (default), uses the first Interval's data field as the first value for current_reduced_data. If data_initializer is not None, current_reduced_data is set to a shallow copy of data_initializer created with copy.copy(data_initializer). If strict is True (default), intervals are only merged if their ranges actually overlap; adjacent, touching intervals will not be merged. If strict is False, intervals are merged even if they are only end-to-end adjacent. Completes in O(n*logn). """ if not self: return sorted_intervals = sorted(self.all_intervals) # get sorted intervals merged = [] # use mutable object to allow new_series() to modify it current_reduced = [None] higher = None # iterating variable, which new_series() needs access to def new_series(): if data_initializer is None: current_reduced[0] = higher.data merged.append(higher) return else: # data_initializer is not None current_reduced[0] = copy(data_initializer) current_reduced[0] = data_reducer(current_reduced[0], higher.data) merged.append(Interval(higher.begin, higher.end, current_reduced[0])) for higher in sorted_intervals: if merged: # series already begun lower = merged[-1] if (higher.begin < lower.end or not strict and higher.begin == lower.end): # should merge upper_bound = max(lower.end, higher.end) if data_reducer is not None: current_reduced[0] = data_reducer(current_reduced[0], higher.data) else: # annihilate the data, since we don't know how to merge it current_reduced[0] = None merged[-1] = Interval(lower.begin, upper_bound, current_reduced[0]) else: new_series() else: # not merged; is first of Intervals to merge new_series() self.__init__(merged)
[ "def", "merge_overlaps", "(", "self", ",", "data_reducer", "=", "None", ",", "data_initializer", "=", "None", ",", "strict", "=", "True", ")", ":", "if", "not", "self", ":", "return", "sorted_intervals", "=", "sorted", "(", "self", ".", "all_intervals", ")", "# get sorted intervals", "merged", "=", "[", "]", "# use mutable object to allow new_series() to modify it", "current_reduced", "=", "[", "None", "]", "higher", "=", "None", "# iterating variable, which new_series() needs access to", "def", "new_series", "(", ")", ":", "if", "data_initializer", "is", "None", ":", "current_reduced", "[", "0", "]", "=", "higher", ".", "data", "merged", ".", "append", "(", "higher", ")", "return", "else", ":", "# data_initializer is not None", "current_reduced", "[", "0", "]", "=", "copy", "(", "data_initializer", ")", "current_reduced", "[", "0", "]", "=", "data_reducer", "(", "current_reduced", "[", "0", "]", ",", "higher", ".", "data", ")", "merged", ".", "append", "(", "Interval", "(", "higher", ".", "begin", ",", "higher", ".", "end", ",", "current_reduced", "[", "0", "]", ")", ")", "for", "higher", "in", "sorted_intervals", ":", "if", "merged", ":", "# series already begun", "lower", "=", "merged", "[", "-", "1", "]", "if", "(", "higher", ".", "begin", "<", "lower", ".", "end", "or", "not", "strict", "and", "higher", ".", "begin", "==", "lower", ".", "end", ")", ":", "# should merge", "upper_bound", "=", "max", "(", "lower", ".", "end", ",", "higher", ".", "end", ")", "if", "data_reducer", "is", "not", "None", ":", "current_reduced", "[", "0", "]", "=", "data_reducer", "(", "current_reduced", "[", "0", "]", ",", "higher", ".", "data", ")", "else", ":", "# annihilate the data, since we don't know how to merge it", "current_reduced", "[", "0", "]", "=", "None", "merged", "[", "-", "1", "]", "=", "Interval", "(", "lower", ".", "begin", ",", "upper_bound", ",", "current_reduced", "[", "0", "]", ")", "else", ":", "new_series", "(", ")", "else", ":", "# not merged; is first of Intervals to merge", "new_series", "(", ")", "self", ".", "__init__", "(", "merged", ")" ]
Finds all intervals with overlapping ranges and merges them into a single interval. If provided, uses data_reducer and data_initializer with similar semantics to Python's built-in reduce(reducer_func[, initializer]), as follows: If data_reducer is set to a function, combines the data fields of the Intervals with current_reduced_data = data_reducer(current_reduced_data, new_data) If data_reducer is None, the merged Interval's data field will be set to None, ignoring all the data fields of the merged Intervals. On encountering the first Interval to merge, if data_initializer is None (default), uses the first Interval's data field as the first value for current_reduced_data. If data_initializer is not None, current_reduced_data is set to a shallow copy of data_initializer created with copy.copy(data_initializer). If strict is True (default), intervals are only merged if their ranges actually overlap; adjacent, touching intervals will not be merged. If strict is False, intervals are merged even if they are only end-to-end adjacent. Completes in O(n*logn).
[ "Finds", "all", "intervals", "with", "overlapping", "ranges", "and", "merges", "them", "into", "a", "single", "interval", ".", "If", "provided", "uses", "data_reducer", "and", "data_initializer", "with", "similar", "semantics", "to", "Python", "s", "built", "-", "in", "reduce", "(", "reducer_func", "[", "initializer", "]", ")", "as", "follows", ":" ]
python
train
45.5
thautwarm/RBNF
rbnf/auto_lexer/__init__.py
https://github.com/thautwarm/RBNF/blob/cceec88c90f7ec95c160cfda01bfc532610985e0/rbnf/auto_lexer/__init__.py#L246-L270
def regex_lexer(regex_pat): """ generate token names' cache """ if isinstance(regex_pat, str): regex_pat = re.compile(regex_pat) def f(inp_str, pos): m = regex_pat.match(inp_str, pos) return m.group() if m else None elif hasattr(regex_pat, 'match'): def f(inp_str, pos): m = regex_pat.match(inp_str, pos) return m.group() if m else None else: regex_pats = tuple(re.compile(e) for e in regex_pat) def f(inp_str, pos): for each_pat in regex_pats: m = each_pat.match(inp_str, pos) if m: return m.group() return f
[ "def", "regex_lexer", "(", "regex_pat", ")", ":", "if", "isinstance", "(", "regex_pat", ",", "str", ")", ":", "regex_pat", "=", "re", ".", "compile", "(", "regex_pat", ")", "def", "f", "(", "inp_str", ",", "pos", ")", ":", "m", "=", "regex_pat", ".", "match", "(", "inp_str", ",", "pos", ")", "return", "m", ".", "group", "(", ")", "if", "m", "else", "None", "elif", "hasattr", "(", "regex_pat", ",", "'match'", ")", ":", "def", "f", "(", "inp_str", ",", "pos", ")", ":", "m", "=", "regex_pat", ".", "match", "(", "inp_str", ",", "pos", ")", "return", "m", ".", "group", "(", ")", "if", "m", "else", "None", "else", ":", "regex_pats", "=", "tuple", "(", "re", ".", "compile", "(", "e", ")", "for", "e", "in", "regex_pat", ")", "def", "f", "(", "inp_str", ",", "pos", ")", ":", "for", "each_pat", "in", "regex_pats", ":", "m", "=", "each_pat", ".", "match", "(", "inp_str", ",", "pos", ")", "if", "m", ":", "return", "m", ".", "group", "(", ")", "return", "f" ]
generate token names' cache
[ "generate", "token", "names", "cache" ]
python
train
26.76
broadinstitute/fiss
firecloud/fiss.py
https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L257-L261
def entity_list(args): """ List entities in a workspace. """ r = fapi.get_entities_with_type(args.project, args.workspace) fapi._check_response_code(r, 200) return [ '{0}\t{1}'.format(e['entityType'], e['name']) for e in r.json() ]
[ "def", "entity_list", "(", "args", ")", ":", "r", "=", "fapi", ".", "get_entities_with_type", "(", "args", ".", "project", ",", "args", ".", "workspace", ")", "fapi", ".", "_check_response_code", "(", "r", ",", "200", ")", "return", "[", "'{0}\\t{1}'", ".", "format", "(", "e", "[", "'entityType'", "]", ",", "e", "[", "'name'", "]", ")", "for", "e", "in", "r", ".", "json", "(", ")", "]" ]
List entities in a workspace.
[ "List", "entities", "in", "a", "workspace", "." ]
python
train
48.6
tbielawa/bitmath
bitmath/__init__.py
https://github.com/tbielawa/bitmath/blob/58ad3ac5f076cc6e53f36a91af055c6028c850a5/bitmath/__init__.py#L1351-L1391
def listdir(search_base, followlinks=False, filter='*', relpath=False, bestprefix=False, system=NIST): """This is a generator which recurses the directory tree `search_base`, yielding 2-tuples of: * The absolute/relative path to a discovered file * A bitmath instance representing the "apparent size" of the file. - `search_base` - The directory to begin walking down. - `followlinks` - Whether or not to follow symbolic links to directories - `filter` - A glob (see :py:mod:`fnmatch`) to filter results with (default: ``*``, everything) - `relpath` - ``True`` to return the relative path from `pwd` or ``False`` (default) to return the fully qualified path - ``bestprefix`` - set to ``False`` to get ``bitmath.Byte`` instances back instead. - `system` - Provide a preferred unit system by setting `system` to either ``bitmath.NIST`` (default) or ``bitmath.SI``. .. note:: This function does NOT return tuples for directory entities. .. note:: Symlinks to **files** are followed automatically """ for root, dirs, files in os.walk(search_base, followlinks=followlinks): for name in fnmatch.filter(files, filter): _path = os.path.join(root, name) if relpath: # RELATIVE path _return_path = os.path.relpath(_path, '.') else: # REAL path _return_path = os.path.realpath(_path) if followlinks: yield (_return_path, getsize(_path, bestprefix=bestprefix, system=system)) else: if os.path.isdir(_path) or os.path.islink(_path): pass else: yield (_return_path, getsize(_path, bestprefix=bestprefix, system=system))
[ "def", "listdir", "(", "search_base", ",", "followlinks", "=", "False", ",", "filter", "=", "'*'", ",", "relpath", "=", "False", ",", "bestprefix", "=", "False", ",", "system", "=", "NIST", ")", ":", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "search_base", ",", "followlinks", "=", "followlinks", ")", ":", "for", "name", "in", "fnmatch", ".", "filter", "(", "files", ",", "filter", ")", ":", "_path", "=", "os", ".", "path", ".", "join", "(", "root", ",", "name", ")", "if", "relpath", ":", "# RELATIVE path", "_return_path", "=", "os", ".", "path", ".", "relpath", "(", "_path", ",", "'.'", ")", "else", ":", "# REAL path", "_return_path", "=", "os", ".", "path", ".", "realpath", "(", "_path", ")", "if", "followlinks", ":", "yield", "(", "_return_path", ",", "getsize", "(", "_path", ",", "bestprefix", "=", "bestprefix", ",", "system", "=", "system", ")", ")", "else", ":", "if", "os", ".", "path", ".", "isdir", "(", "_path", ")", "or", "os", ".", "path", ".", "islink", "(", "_path", ")", ":", "pass", "else", ":", "yield", "(", "_return_path", ",", "getsize", "(", "_path", ",", "bestprefix", "=", "bestprefix", ",", "system", "=", "system", ")", ")" ]
This is a generator which recurses the directory tree `search_base`, yielding 2-tuples of: * The absolute/relative path to a discovered file * A bitmath instance representing the "apparent size" of the file. - `search_base` - The directory to begin walking down. - `followlinks` - Whether or not to follow symbolic links to directories - `filter` - A glob (see :py:mod:`fnmatch`) to filter results with (default: ``*``, everything) - `relpath` - ``True`` to return the relative path from `pwd` or ``False`` (default) to return the fully qualified path - ``bestprefix`` - set to ``False`` to get ``bitmath.Byte`` instances back instead. - `system` - Provide a preferred unit system by setting `system` to either ``bitmath.NIST`` (default) or ``bitmath.SI``. .. note:: This function does NOT return tuples for directory entities. .. note:: Symlinks to **files** are followed automatically
[ "This", "is", "a", "generator", "which", "recurses", "the", "directory", "tree", "search_base", "yielding", "2", "-", "tuples", "of", ":" ]
python
train
43
SeleniumHQ/selenium
py/selenium/webdriver/remote/webelement.py
https://github.com/SeleniumHQ/selenium/blob/df40c28b41d4b3953f90eaff84838a9ac052b84a/py/selenium/webdriver/remote/webelement.py#L545-L554
def size(self): """The size of the element.""" size = {} if self._w3c: size = self._execute(Command.GET_ELEMENT_RECT)['value'] else: size = self._execute(Command.GET_ELEMENT_SIZE)['value'] new_size = {"height": size["height"], "width": size["width"]} return new_size
[ "def", "size", "(", "self", ")", ":", "size", "=", "{", "}", "if", "self", ".", "_w3c", ":", "size", "=", "self", ".", "_execute", "(", "Command", ".", "GET_ELEMENT_RECT", ")", "[", "'value'", "]", "else", ":", "size", "=", "self", ".", "_execute", "(", "Command", ".", "GET_ELEMENT_SIZE", ")", "[", "'value'", "]", "new_size", "=", "{", "\"height\"", ":", "size", "[", "\"height\"", "]", ",", "\"width\"", ":", "size", "[", "\"width\"", "]", "}", "return", "new_size" ]
The size of the element.
[ "The", "size", "of", "the", "element", "." ]
python
train
34.9
pandas-dev/pandas
pandas/core/sparse/series.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/series.py#L554-L571
def combine_first(self, other): """ Combine Series values, choosing the calling Series's values first. Result index will be the union of the two indexes Parameters ---------- other : Series Returns ------- y : Series """ if isinstance(other, SparseSeries): other = other.to_dense() dense_combined = self.to_dense().combine_first(other) return dense_combined.to_sparse(fill_value=self.fill_value)
[ "def", "combine_first", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "SparseSeries", ")", ":", "other", "=", "other", ".", "to_dense", "(", ")", "dense_combined", "=", "self", ".", "to_dense", "(", ")", ".", "combine_first", "(", "other", ")", "return", "dense_combined", ".", "to_sparse", "(", "fill_value", "=", "self", ".", "fill_value", ")" ]
Combine Series values, choosing the calling Series's values first. Result index will be the union of the two indexes Parameters ---------- other : Series Returns ------- y : Series
[ "Combine", "Series", "values", "choosing", "the", "calling", "Series", "s", "values", "first", ".", "Result", "index", "will", "be", "the", "union", "of", "the", "two", "indexes" ]
python
train
27.611111
doraemonext/wechat-python-sdk
wechat_sdk/lib/crypto/base.py
https://github.com/doraemonext/wechat-python-sdk/blob/bf6f6f3d4a5440feb73a51937059d7feddc335a0/wechat_sdk/lib/crypto/base.py#L45-L75
def decrypt(self, text, appid): """对解密后的明文进行补位删除 @param text: 密文 @return: 删除填充补位后的明文 """ try: cryptor = AES.new(self.key, self.mode, self.key[:16]) # 使用BASE64对密文进行解码,然后AES-CBC解密 plain_text = cryptor.decrypt(base64.b64decode(text)) except Exception as e: raise DecryptAESError(e) try: if six.PY2: pad = ord(plain_text[-1]) else: pad = plain_text[-1] # 去掉补位字符串 # pkcs7 = PKCS7Encoder() # plain_text = pkcs7.encode(plain_text) # 去除16位随机字符串 content = plain_text[16:-pad] xml_len = socket.ntohl(struct.unpack("I", content[: 4])[0]) xml_content = content[4: xml_len + 4] from_appid = content[xml_len + 4:] except Exception as e: raise IllegalBuffer(e) if from_appid != appid: raise ValidateAppIDError() return xml_content
[ "def", "decrypt", "(", "self", ",", "text", ",", "appid", ")", ":", "try", ":", "cryptor", "=", "AES", ".", "new", "(", "self", ".", "key", ",", "self", ".", "mode", ",", "self", ".", "key", "[", ":", "16", "]", ")", "# 使用BASE64对密文进行解码,然后AES-CBC解密", "plain_text", "=", "cryptor", ".", "decrypt", "(", "base64", ".", "b64decode", "(", "text", ")", ")", "except", "Exception", "as", "e", ":", "raise", "DecryptAESError", "(", "e", ")", "try", ":", "if", "six", ".", "PY2", ":", "pad", "=", "ord", "(", "plain_text", "[", "-", "1", "]", ")", "else", ":", "pad", "=", "plain_text", "[", "-", "1", "]", "# 去掉补位字符串", "# pkcs7 = PKCS7Encoder()", "# plain_text = pkcs7.encode(plain_text)", "# 去除16位随机字符串", "content", "=", "plain_text", "[", "16", ":", "-", "pad", "]", "xml_len", "=", "socket", ".", "ntohl", "(", "struct", ".", "unpack", "(", "\"I\"", ",", "content", "[", ":", "4", "]", ")", "[", "0", "]", ")", "xml_content", "=", "content", "[", "4", ":", "xml_len", "+", "4", "]", "from_appid", "=", "content", "[", "xml_len", "+", "4", ":", "]", "except", "Exception", "as", "e", ":", "raise", "IllegalBuffer", "(", "e", ")", "if", "from_appid", "!=", "appid", ":", "raise", "ValidateAppIDError", "(", ")", "return", "xml_content" ]
对解密后的明文进行补位删除 @param text: 密文 @return: 删除填充补位后的明文
[ "对解密后的明文进行补位删除" ]
python
valid
31.967742
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/uri_parser.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/uri_parser.py#L53-L66
def _rpartition(entity, sep): """Python2.4 doesn't have an rpartition method so we provide our own that mimics str.rpartition from later releases. Split the string at the last occurrence of sep, and return a 3-tuple containing the part before the separator, the separator itself, and the part after the separator. If the separator is not found, return a 3-tuple containing two empty strings, followed by the string itself. """ idx = entity.rfind(sep) if idx == -1: return '', '', entity return entity[:idx], sep, entity[idx + 1:]
[ "def", "_rpartition", "(", "entity", ",", "sep", ")", ":", "idx", "=", "entity", ".", "rfind", "(", "sep", ")", "if", "idx", "==", "-", "1", ":", "return", "''", ",", "''", ",", "entity", "return", "entity", "[", ":", "idx", "]", ",", "sep", ",", "entity", "[", "idx", "+", "1", ":", "]" ]
Python2.4 doesn't have an rpartition method so we provide our own that mimics str.rpartition from later releases. Split the string at the last occurrence of sep, and return a 3-tuple containing the part before the separator, the separator itself, and the part after the separator. If the separator is not found, return a 3-tuple containing two empty strings, followed by the string itself.
[ "Python2", ".", "4", "doesn", "t", "have", "an", "rpartition", "method", "so", "we", "provide", "our", "own", "that", "mimics", "str", ".", "rpartition", "from", "later", "releases", "." ]
python
train
40.642857
rocky/python3-trepan
trepan/processor/cmdfns.py
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/processor/cmdfns.py#L103-L118
def get_int(errmsg, arg, default=1, cmdname=None): """If arg is an int, use that otherwise take default.""" if arg: try: # eval() is used so we will allow arithmetic expressions, # variables etc. default = int(eval(arg)) except (SyntaxError, NameError, ValueError): if cmdname: errmsg("Command '%s' expects an integer; got: %s." % (cmdname, str(arg))) else: errmsg('Expecting an integer, got: %s.' % str(arg)) pass raise ValueError return default
[ "def", "get_int", "(", "errmsg", ",", "arg", ",", "default", "=", "1", ",", "cmdname", "=", "None", ")", ":", "if", "arg", ":", "try", ":", "# eval() is used so we will allow arithmetic expressions,", "# variables etc.", "default", "=", "int", "(", "eval", "(", "arg", ")", ")", "except", "(", "SyntaxError", ",", "NameError", ",", "ValueError", ")", ":", "if", "cmdname", ":", "errmsg", "(", "\"Command '%s' expects an integer; got: %s.\"", "%", "(", "cmdname", ",", "str", "(", "arg", ")", ")", ")", "else", ":", "errmsg", "(", "'Expecting an integer, got: %s.'", "%", "str", "(", "arg", ")", ")", "pass", "raise", "ValueError", "return", "default" ]
If arg is an int, use that otherwise take default.
[ "If", "arg", "is", "an", "int", "use", "that", "otherwise", "take", "default", "." ]
python
test
37.625
InfoAgeTech/django-core
django_core/utils/date_parsers.py
https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/utils/date_parsers.py#L117-L133
def parse_date(dt, ignoretz=True, as_tz=None): """ :param dt: string datetime to convert into datetime object. :return: date object if the string can be parsed into a date. Otherwise, return None. :see: http://labix.org/python-dateutil Examples: >>> parse_date('2011-12-30') datetime.date(2011, 12, 30) >>> parse_date('12/30/2011') datetime.date(2011, 12, 30) """ dttm = parse_datetime(dt, ignoretz=ignoretz) return None if dttm is None else dttm.date()
[ "def", "parse_date", "(", "dt", ",", "ignoretz", "=", "True", ",", "as_tz", "=", "None", ")", ":", "dttm", "=", "parse_datetime", "(", "dt", ",", "ignoretz", "=", "ignoretz", ")", "return", "None", "if", "dttm", "is", "None", "else", "dttm", ".", "date", "(", ")" ]
:param dt: string datetime to convert into datetime object. :return: date object if the string can be parsed into a date. Otherwise, return None. :see: http://labix.org/python-dateutil Examples: >>> parse_date('2011-12-30') datetime.date(2011, 12, 30) >>> parse_date('12/30/2011') datetime.date(2011, 12, 30)
[ ":", "param", "dt", ":", "string", "datetime", "to", "convert", "into", "datetime", "object", ".", ":", "return", ":", "date", "object", "if", "the", "string", "can", "be", "parsed", "into", "a", "date", ".", "Otherwise", "return", "None", "." ]
python
train
29.176471
pycontribs/jira
jira/client.py
https://github.com/pycontribs/jira/blob/397db5d78441ed6a680a9b7db4c62030ade1fd8a/jira/client.py#L2716-L2724
def delete_user_avatar(self, username, avatar): """Delete a user's avatar. :param username: the user to delete the avatar from :param avatar: ID of the avatar to remove """ params = {'username': username} url = self._get_url('user/avatar/' + avatar) return self._session.delete(url, params=params)
[ "def", "delete_user_avatar", "(", "self", ",", "username", ",", "avatar", ")", ":", "params", "=", "{", "'username'", ":", "username", "}", "url", "=", "self", ".", "_get_url", "(", "'user/avatar/'", "+", "avatar", ")", "return", "self", ".", "_session", ".", "delete", "(", "url", ",", "params", "=", "params", ")" ]
Delete a user's avatar. :param username: the user to delete the avatar from :param avatar: ID of the avatar to remove
[ "Delete", "a", "user", "s", "avatar", "." ]
python
train
38.444444
cocagne/txdbus
doc/examples/fd_server.py
https://github.com/cocagne/txdbus/blob/eb424918764b7b93eecd2a4e2e5c2d0b2944407b/doc/examples/fd_server.py#L56-L63
def dbus_readBytesFD(self, fd, byte_count): """ Reads byte_count bytes from fd and returns them. """ f = os.fdopen(fd, 'rb') result = f.read(byte_count) f.close() return bytearray(result)
[ "def", "dbus_readBytesFD", "(", "self", ",", "fd", ",", "byte_count", ")", ":", "f", "=", "os", ".", "fdopen", "(", "fd", ",", "'rb'", ")", "result", "=", "f", ".", "read", "(", "byte_count", ")", "f", ".", "close", "(", ")", "return", "bytearray", "(", "result", ")" ]
Reads byte_count bytes from fd and returns them.
[ "Reads", "byte_count", "bytes", "from", "fd", "and", "returns", "them", "." ]
python
train
29.5
danilobellini/audiolazy
audiolazy/lazy_stream.py
https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/audiolazy/lazy_stream.py#L299-L318
def peek(self, n=None, constructor=list): """ Sees/peeks the next few items in the Stream, without removing them. Besides that this functions keeps the Stream items, it's the same to the ``Stream.take()`` method. See Also -------- Stream.take : Returns the n first elements from the Stream, removing them. Note ---- When applied in a StreamTeeHub, this method doesn't consume a copy. Data evaluation is done only once, i.e., after peeking the data is simply stored to be yielded again when asked for. """ return self.copy().take(n=n, constructor=constructor)
[ "def", "peek", "(", "self", ",", "n", "=", "None", ",", "constructor", "=", "list", ")", ":", "return", "self", ".", "copy", "(", ")", ".", "take", "(", "n", "=", "n", ",", "constructor", "=", "constructor", ")" ]
Sees/peeks the next few items in the Stream, without removing them. Besides that this functions keeps the Stream items, it's the same to the ``Stream.take()`` method. See Also -------- Stream.take : Returns the n first elements from the Stream, removing them. Note ---- When applied in a StreamTeeHub, this method doesn't consume a copy. Data evaluation is done only once, i.e., after peeking the data is simply stored to be yielded again when asked for.
[ "Sees", "/", "peeks", "the", "next", "few", "items", "in", "the", "Stream", "without", "removing", "them", "." ]
python
train
30.25
openego/ding0
ding0/grid/mv_grid/solvers/savings.py
https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/grid/mv_grid/solvers/savings.py#L206-L247
def solve(self, graph, timeout, debug=False, anim=None): """Solves the CVRP problem using Clarke and Wright Savings methods Parameters ---------- graph: :networkx:`NetworkX Graph Obj< >` A NetworkX graaph is used. timeout: int max processing time in seconds debug: bool, defaults to False If True, information is printed while routing anim: AnimationDing0 Returns ------- SavingsSolution A solution """ savings_list = self.compute_savings_list(graph) solution = SavingsSolution(graph) start = time.time() for i, j in savings_list[:]: if solution.is_complete(): break if solution.can_process((i, j)): solution, inserted = solution.process((i, j)) if inserted: savings_list.remove((i, j)) if anim: solution.draw_network(anim) if time.time() - start > timeout: break return solution
[ "def", "solve", "(", "self", ",", "graph", ",", "timeout", ",", "debug", "=", "False", ",", "anim", "=", "None", ")", ":", "savings_list", "=", "self", ".", "compute_savings_list", "(", "graph", ")", "solution", "=", "SavingsSolution", "(", "graph", ")", "start", "=", "time", ".", "time", "(", ")", "for", "i", ",", "j", "in", "savings_list", "[", ":", "]", ":", "if", "solution", ".", "is_complete", "(", ")", ":", "break", "if", "solution", ".", "can_process", "(", "(", "i", ",", "j", ")", ")", ":", "solution", ",", "inserted", "=", "solution", ".", "process", "(", "(", "i", ",", "j", ")", ")", "if", "inserted", ":", "savings_list", ".", "remove", "(", "(", "i", ",", "j", ")", ")", "if", "anim", ":", "solution", ".", "draw_network", "(", "anim", ")", "if", "time", ".", "time", "(", ")", "-", "start", ">", "timeout", ":", "break", "return", "solution" ]
Solves the CVRP problem using Clarke and Wright Savings methods Parameters ---------- graph: :networkx:`NetworkX Graph Obj< >` A NetworkX graaph is used. timeout: int max processing time in seconds debug: bool, defaults to False If True, information is printed while routing anim: AnimationDing0 Returns ------- SavingsSolution A solution
[ "Solves", "the", "CVRP", "problem", "using", "Clarke", "and", "Wright", "Savings", "methods" ]
python
train
25.928571
miyakogi/wdom
wdom/node.py
https://github.com/miyakogi/wdom/blob/a21bcd23e94baceee71161829f6897bee3fd39c1/wdom/node.py#L509-L516
def replaceWith(self, *nodes: Union[AbstractNode, str]) -> None: """Replace this node with nodes. If nodes contains ``str``, it will be converted to Text node. """ if self.parentNode: node = _to_node_list(nodes) self.parentNode.replaceChild(node, self)
[ "def", "replaceWith", "(", "self", ",", "*", "nodes", ":", "Union", "[", "AbstractNode", ",", "str", "]", ")", "->", "None", ":", "if", "self", ".", "parentNode", ":", "node", "=", "_to_node_list", "(", "nodes", ")", "self", ".", "parentNode", ".", "replaceChild", "(", "node", ",", "self", ")" ]
Replace this node with nodes. If nodes contains ``str``, it will be converted to Text node.
[ "Replace", "this", "node", "with", "nodes", "." ]
python
train
37.75
JoeVirtual/KonFoo
konfoo/core.py
https://github.com/JoeVirtual/KonFoo/blob/0c62ef5c2bed4deaf908b34082e4de2544532fdc/konfoo/core.py#L2647-L2690
def _set_alignment(self, group_size, bit_offset=0, auto_align=False): """ Sets the alignment of the ``Decimal`` field. :param int group_size: size of the aligned `Field` group in bytes, can be between ``1`` and ``8``. :param int bit_offset: bit offset of the `Decimal` field within the aligned `Field` group, can be between ``0`` and ``63``. :param bool auto_align: if ``True`` the `Decimal` field aligns itself to the next matching byte size according to the *size* of the `Decimal` field. """ # Field alignment offset field_offset = int(bit_offset) # Auto alignment if auto_align: # Field alignment size field_size, bit_offset = divmod(field_offset, 8) if bit_offset is not 0: field_size += 1 field_size = max(field_size, 1) # No auto alignment else: # Field alignment size field_size = int(group_size) # Field alignment alignment = Alignment(field_size, field_offset) # Invalid field alignment size if field_size not in range(1, 8): raise FieldAlignmentError(self, self.index, alignment) # Invalid field alignment offset if not (0 <= field_offset <= 63): raise FieldAlignmentError(self, self.index, alignment) # Invalid field alignment if field_offset >= field_size * 8: raise FieldAlignmentError(self, self.index, alignment) # Set field alignment self._align_to_byte_size = alignment.byte_size self._align_to_bit_offset = alignment.bit_offset
[ "def", "_set_alignment", "(", "self", ",", "group_size", ",", "bit_offset", "=", "0", ",", "auto_align", "=", "False", ")", ":", "# Field alignment offset", "field_offset", "=", "int", "(", "bit_offset", ")", "# Auto alignment", "if", "auto_align", ":", "# Field alignment size", "field_size", ",", "bit_offset", "=", "divmod", "(", "field_offset", ",", "8", ")", "if", "bit_offset", "is", "not", "0", ":", "field_size", "+=", "1", "field_size", "=", "max", "(", "field_size", ",", "1", ")", "# No auto alignment", "else", ":", "# Field alignment size", "field_size", "=", "int", "(", "group_size", ")", "# Field alignment", "alignment", "=", "Alignment", "(", "field_size", ",", "field_offset", ")", "# Invalid field alignment size", "if", "field_size", "not", "in", "range", "(", "1", ",", "8", ")", ":", "raise", "FieldAlignmentError", "(", "self", ",", "self", ".", "index", ",", "alignment", ")", "# Invalid field alignment offset", "if", "not", "(", "0", "<=", "field_offset", "<=", "63", ")", ":", "raise", "FieldAlignmentError", "(", "self", ",", "self", ".", "index", ",", "alignment", ")", "# Invalid field alignment", "if", "field_offset", ">=", "field_size", "*", "8", ":", "raise", "FieldAlignmentError", "(", "self", ",", "self", ".", "index", ",", "alignment", ")", "# Set field alignment", "self", ".", "_align_to_byte_size", "=", "alignment", ".", "byte_size", "self", ".", "_align_to_bit_offset", "=", "alignment", ".", "bit_offset" ]
Sets the alignment of the ``Decimal`` field. :param int group_size: size of the aligned `Field` group in bytes, can be between ``1`` and ``8``. :param int bit_offset: bit offset of the `Decimal` field within the aligned `Field` group, can be between ``0`` and ``63``. :param bool auto_align: if ``True`` the `Decimal` field aligns itself to the next matching byte size according to the *size* of the `Decimal` field.
[ "Sets", "the", "alignment", "of", "the", "Decimal", "field", "." ]
python
train
37.681818
apache/spark
python/pyspark/sql/session.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/session.py#L382-L412
def _inferSchema(self, rdd, samplingRatio=None, names=None): """ Infer schema from an RDD of Row or tuple. :param rdd: an RDD of Row or tuple :param samplingRatio: sampling ratio, or no sampling (default) :return: :class:`pyspark.sql.types.StructType` """ first = rdd.first() if not first: raise ValueError("The first row in RDD is empty, " "can not infer schema") if type(first) is dict: warnings.warn("Using RDD of dict to inferSchema is deprecated. " "Use pyspark.sql.Row instead") if samplingRatio is None: schema = _infer_schema(first, names=names) if _has_nulltype(schema): for row in rdd.take(100)[1:]: schema = _merge_type(schema, _infer_schema(row, names=names)) if not _has_nulltype(schema): break else: raise ValueError("Some of types cannot be determined by the " "first 100 rows, please try again with sampling") else: if samplingRatio < 0.99: rdd = rdd.sample(False, float(samplingRatio)) schema = rdd.map(lambda row: _infer_schema(row, names)).reduce(_merge_type) return schema
[ "def", "_inferSchema", "(", "self", ",", "rdd", ",", "samplingRatio", "=", "None", ",", "names", "=", "None", ")", ":", "first", "=", "rdd", ".", "first", "(", ")", "if", "not", "first", ":", "raise", "ValueError", "(", "\"The first row in RDD is empty, \"", "\"can not infer schema\"", ")", "if", "type", "(", "first", ")", "is", "dict", ":", "warnings", ".", "warn", "(", "\"Using RDD of dict to inferSchema is deprecated. \"", "\"Use pyspark.sql.Row instead\"", ")", "if", "samplingRatio", "is", "None", ":", "schema", "=", "_infer_schema", "(", "first", ",", "names", "=", "names", ")", "if", "_has_nulltype", "(", "schema", ")", ":", "for", "row", "in", "rdd", ".", "take", "(", "100", ")", "[", "1", ":", "]", ":", "schema", "=", "_merge_type", "(", "schema", ",", "_infer_schema", "(", "row", ",", "names", "=", "names", ")", ")", "if", "not", "_has_nulltype", "(", "schema", ")", ":", "break", "else", ":", "raise", "ValueError", "(", "\"Some of types cannot be determined by the \"", "\"first 100 rows, please try again with sampling\"", ")", "else", ":", "if", "samplingRatio", "<", "0.99", ":", "rdd", "=", "rdd", ".", "sample", "(", "False", ",", "float", "(", "samplingRatio", ")", ")", "schema", "=", "rdd", ".", "map", "(", "lambda", "row", ":", "_infer_schema", "(", "row", ",", "names", ")", ")", ".", "reduce", "(", "_merge_type", ")", "return", "schema" ]
Infer schema from an RDD of Row or tuple. :param rdd: an RDD of Row or tuple :param samplingRatio: sampling ratio, or no sampling (default) :return: :class:`pyspark.sql.types.StructType`
[ "Infer", "schema", "from", "an", "RDD", "of", "Row", "or", "tuple", "." ]
python
train
43.741935
kgori/treeCl
treeCl/collection.py
https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/collection.py#L598-L638
def analyse_cache_dir(self, jobhandler=None, batchsize=1, **kwargs): """ Scan the cache directory and launch analysis for all unscored alignments using associated task handler. KWargs are passed to the tree calculating task managed by the TaskInterface in self.task_interface. Example kwargs: TreeCollectionTaskInterface: scale=1, guide_tree=None, niters=10, keep_topology=False RaxmlTaskInterface: -------- partition_files=None, model=None, threads=1 FastTreeTaskInterface: ----- No kwargs """ if jobhandler is None: jobhandler = SequentialJobHandler() files = glob.glob(os.path.join(self.cache_dir, '*.phy')) #logger.debug('Files - {}'.format(files)) records = [] outfiles = [] dna = self.collection[0].is_dna() # THIS IS ONLY A GUESS AT SEQ TYPE!! for infile in files: id_ = fileIO.strip_extensions(infile) outfile = self.get_result_file(id_) #logger.debug('Looking for {}: {}'.format(outfile, os.path.exists(outfile))) if not os.path.exists(outfile): record = Alignment(infile, 'phylip', True) records.append(record) outfiles.append(outfile) if len(records) == 0: return [] args, to_delete = self.task_interface.scrape_args(records, outfiles=outfiles, **kwargs) # logger.debug('Args - {}'.format(args)) with fileIO.TempFileList(to_delete): result = jobhandler(self.task_interface.get_task(), args, 'Cache dir analysis', batchsize) for (out, res) in zip(outfiles, result): if not os.path.exists(out) and res: with open(out, 'w') as outfl: json.dump(res, outfl) return result
[ "def", "analyse_cache_dir", "(", "self", ",", "jobhandler", "=", "None", ",", "batchsize", "=", "1", ",", "*", "*", "kwargs", ")", ":", "if", "jobhandler", "is", "None", ":", "jobhandler", "=", "SequentialJobHandler", "(", ")", "files", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "self", ".", "cache_dir", ",", "'*.phy'", ")", ")", "#logger.debug('Files - {}'.format(files))", "records", "=", "[", "]", "outfiles", "=", "[", "]", "dna", "=", "self", ".", "collection", "[", "0", "]", ".", "is_dna", "(", ")", "# THIS IS ONLY A GUESS AT SEQ TYPE!!", "for", "infile", "in", "files", ":", "id_", "=", "fileIO", ".", "strip_extensions", "(", "infile", ")", "outfile", "=", "self", ".", "get_result_file", "(", "id_", ")", "#logger.debug('Looking for {}: {}'.format(outfile, os.path.exists(outfile)))", "if", "not", "os", ".", "path", ".", "exists", "(", "outfile", ")", ":", "record", "=", "Alignment", "(", "infile", ",", "'phylip'", ",", "True", ")", "records", ".", "append", "(", "record", ")", "outfiles", ".", "append", "(", "outfile", ")", "if", "len", "(", "records", ")", "==", "0", ":", "return", "[", "]", "args", ",", "to_delete", "=", "self", ".", "task_interface", ".", "scrape_args", "(", "records", ",", "outfiles", "=", "outfiles", ",", "*", "*", "kwargs", ")", "# logger.debug('Args - {}'.format(args))", "with", "fileIO", ".", "TempFileList", "(", "to_delete", ")", ":", "result", "=", "jobhandler", "(", "self", ".", "task_interface", ".", "get_task", "(", ")", ",", "args", ",", "'Cache dir analysis'", ",", "batchsize", ")", "for", "(", "out", ",", "res", ")", "in", "zip", "(", "outfiles", ",", "result", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "out", ")", "and", "res", ":", "with", "open", "(", "out", ",", "'w'", ")", "as", "outfl", ":", "json", ".", "dump", "(", "res", ",", "outfl", ")", "return", "result" ]
Scan the cache directory and launch analysis for all unscored alignments using associated task handler. KWargs are passed to the tree calculating task managed by the TaskInterface in self.task_interface. Example kwargs: TreeCollectionTaskInterface: scale=1, guide_tree=None, niters=10, keep_topology=False RaxmlTaskInterface: -------- partition_files=None, model=None, threads=1 FastTreeTaskInterface: ----- No kwargs
[ "Scan", "the", "cache", "directory", "and", "launch", "analysis", "for", "all", "unscored", "alignments", "using", "associated", "task", "handler", ".", "KWargs", "are", "passed", "to", "the", "tree", "calculating", "task", "managed", "by", "the", "TaskInterface", "in", "self", ".", "task_interface", ".", "Example", "kwargs", ":", "TreeCollectionTaskInterface", ":", "scale", "=", "1", "guide_tree", "=", "None", "niters", "=", "10", "keep_topology", "=", "False", "RaxmlTaskInterface", ":", "--------", "partition_files", "=", "None", "model", "=", "None", "threads", "=", "1", "FastTreeTaskInterface", ":", "-----", "No", "kwargs" ]
python
train
45.341463
eclipse/unide.python
src/unide/util.py
https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/util.py#L51-L65
def loads(data, validate=False, **kwargs): """Load a PPMP message from the JSON-formatted string in `data`. When `validate` is set, raise `ValidationError`. Additional keyword arguments are the same that are accepted by `json.loads`, e.g. `indent` to get a pretty output. """ d = json.loads(data, **kwargs) content_spec = d["content-spec"] Payload = CONTENT_SPECS[content_spec] payload = Payload.load(d) if validate: errors = payload.problems() if errors: raise ValidationError(errors) return payload
[ "def", "loads", "(", "data", ",", "validate", "=", "False", ",", "*", "*", "kwargs", ")", ":", "d", "=", "json", ".", "loads", "(", "data", ",", "*", "*", "kwargs", ")", "content_spec", "=", "d", "[", "\"content-spec\"", "]", "Payload", "=", "CONTENT_SPECS", "[", "content_spec", "]", "payload", "=", "Payload", ".", "load", "(", "d", ")", "if", "validate", ":", "errors", "=", "payload", ".", "problems", "(", ")", "if", "errors", ":", "raise", "ValidationError", "(", "errors", ")", "return", "payload" ]
Load a PPMP message from the JSON-formatted string in `data`. When `validate` is set, raise `ValidationError`. Additional keyword arguments are the same that are accepted by `json.loads`, e.g. `indent` to get a pretty output.
[ "Load", "a", "PPMP", "message", "from", "the", "JSON", "-", "formatted", "string", "in", "data", ".", "When", "validate", "is", "set", "raise", "ValidationError", ".", "Additional", "keyword", "arguments", "are", "the", "same", "that", "are", "accepted", "by", "json", ".", "loads", "e", ".", "g", ".", "indent", "to", "get", "a", "pretty", "output", "." ]
python
train
37.2
sdispater/cleo
cleo/commands/command.py
https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/commands/command.py#L142-L146
def confirm(self, question, default=False, true_answer_regex="(?i)^y"): """ Confirm a question with the user. """ return self._io.confirm(question, default, true_answer_regex)
[ "def", "confirm", "(", "self", ",", "question", ",", "default", "=", "False", ",", "true_answer_regex", "=", "\"(?i)^y\"", ")", ":", "return", "self", ".", "_io", ".", "confirm", "(", "question", ",", "default", ",", "true_answer_regex", ")" ]
Confirm a question with the user.
[ "Confirm", "a", "question", "with", "the", "user", "." ]
python
train
40.6
exekias/droplet
droplet/sudo.py
https://github.com/exekias/droplet/blob/aeac573a2c1c4b774e99d5414a1c79b1bb734941/droplet/sudo.py#L29-L56
def set_euid(): """ Set settings.DROPLET_USER effective UID for the current process This adds some security, but nothing magic, an attacker can still gain root access, but at least we only elevate privileges when needed See root context manager """ current = os.geteuid() logger.debug("Current EUID is %s" % current) if settings.DROPLET_USER is None: logger.info("Not changing EUID, DROPLET_USER is None") return uid = int(pwd.getpwnam(settings.DROPLET_USER).pw_uid) if current != uid: try: os.seteuid(uid) logger.info("Set EUID to %s (%s)" % (settings.DROPLET_USER, os.geteuid())) except: current_user = pwd.getpwuid(os.getuid()).pw_name logger.error("Failed to set '%s' EUID, running as '%s'" % (settings.DROPLET_USER, current_user)) else: logger.debug("Didn't set EUID, it was already correct")
[ "def", "set_euid", "(", ")", ":", "current", "=", "os", ".", "geteuid", "(", ")", "logger", ".", "debug", "(", "\"Current EUID is %s\"", "%", "current", ")", "if", "settings", ".", "DROPLET_USER", "is", "None", ":", "logger", ".", "info", "(", "\"Not changing EUID, DROPLET_USER is None\"", ")", "return", "uid", "=", "int", "(", "pwd", ".", "getpwnam", "(", "settings", ".", "DROPLET_USER", ")", ".", "pw_uid", ")", "if", "current", "!=", "uid", ":", "try", ":", "os", ".", "seteuid", "(", "uid", ")", "logger", ".", "info", "(", "\"Set EUID to %s (%s)\"", "%", "(", "settings", ".", "DROPLET_USER", ",", "os", ".", "geteuid", "(", ")", ")", ")", "except", ":", "current_user", "=", "pwd", ".", "getpwuid", "(", "os", ".", "getuid", "(", ")", ")", ".", "pw_name", "logger", ".", "error", "(", "\"Failed to set '%s' EUID, running as '%s'\"", "%", "(", "settings", ".", "DROPLET_USER", ",", "current_user", ")", ")", "else", ":", "logger", ".", "debug", "(", "\"Didn't set EUID, it was already correct\"", ")" ]
Set settings.DROPLET_USER effective UID for the current process This adds some security, but nothing magic, an attacker can still gain root access, but at least we only elevate privileges when needed See root context manager
[ "Set", "settings", ".", "DROPLET_USER", "effective", "UID", "for", "the", "current", "process" ]
python
train
34.25
pyblish/pyblish-qml
pyblish_qml/vendor/mock.py
https://github.com/pyblish/pyblish-qml/blob/6095d18b2ec0afd0409a9b1a17e53b0658887283/pyblish_qml/vendor/mock.py#L243-L258
def _instance_callable(obj): """Given an object, return True if the object is callable. For classes, return True if instances would be callable.""" if not isinstance(obj, ClassTypes): # already an instance return getattr(obj, '__call__', None) is not None klass = obj # uses __bases__ instead of __mro__ so that we work with old style classes if klass.__dict__.get('__call__') is not None: return True for base in klass.__bases__: if _instance_callable(base): return True return False
[ "def", "_instance_callable", "(", "obj", ")", ":", "if", "not", "isinstance", "(", "obj", ",", "ClassTypes", ")", ":", "# already an instance", "return", "getattr", "(", "obj", ",", "'__call__'", ",", "None", ")", "is", "not", "None", "klass", "=", "obj", "# uses __bases__ instead of __mro__ so that we work with old style classes", "if", "klass", ".", "__dict__", ".", "get", "(", "'__call__'", ")", "is", "not", "None", ":", "return", "True", "for", "base", "in", "klass", ".", "__bases__", ":", "if", "_instance_callable", "(", "base", ")", ":", "return", "True", "return", "False" ]
Given an object, return True if the object is callable. For classes, return True if instances would be callable.
[ "Given", "an", "object", "return", "True", "if", "the", "object", "is", "callable", ".", "For", "classes", "return", "True", "if", "instances", "would", "be", "callable", "." ]
python
train
34.1875
sixty-north/cosmic-ray
scripts/cosmic_ray_tooling.py
https://github.com/sixty-north/cosmic-ray/blob/c654e074afbb7b7fcbc23359083c1287c0d3e991/scripts/cosmic_ray_tooling.py#L49-L54
def read_version(version_file): "Read the `(version-string, version-info)` from `version_file`." vars = {} with open(version_file) as f: exec(f.read(), {}, vars) return (vars['__version__'], vars['__version_info__'])
[ "def", "read_version", "(", "version_file", ")", ":", "vars", "=", "{", "}", "with", "open", "(", "version_file", ")", "as", "f", ":", "exec", "(", "f", ".", "read", "(", ")", ",", "{", "}", ",", "vars", ")", "return", "(", "vars", "[", "'__version__'", "]", ",", "vars", "[", "'__version_info__'", "]", ")" ]
Read the `(version-string, version-info)` from `version_file`.
[ "Read", "the", "(", "version", "-", "string", "version", "-", "info", ")", "from", "version_file", "." ]
python
train
39.166667
helixyte/everest
everest/repositories/state.py
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/repositories/state.py#L113-L135
def set_state_data(cls, entity, data): """ Sets the state data for the given entity to the given data. This also works for unmanaged entities. """ attr_names = get_domain_class_attribute_names(type(entity)) nested_items = [] for attr, new_attr_value in iteritems_(data): if not attr.entity_attr in attr_names: raise ValueError('Can not set attribute "%s" for entity ' '"%s".' % (attr.entity_attr, entity)) if '.' in attr.entity_attr: nested_items.append((attr, new_attr_value)) continue else: setattr(entity, attr.entity_attr, new_attr_value) for attr, new_attr_value in nested_items: try: set_nested_attribute(entity, attr.entity_attr, new_attr_value) except AttributeError as exc: if not new_attr_value is None: raise exc
[ "def", "set_state_data", "(", "cls", ",", "entity", ",", "data", ")", ":", "attr_names", "=", "get_domain_class_attribute_names", "(", "type", "(", "entity", ")", ")", "nested_items", "=", "[", "]", "for", "attr", ",", "new_attr_value", "in", "iteritems_", "(", "data", ")", ":", "if", "not", "attr", ".", "entity_attr", "in", "attr_names", ":", "raise", "ValueError", "(", "'Can not set attribute \"%s\" for entity '", "'\"%s\".'", "%", "(", "attr", ".", "entity_attr", ",", "entity", ")", ")", "if", "'.'", "in", "attr", ".", "entity_attr", ":", "nested_items", ".", "append", "(", "(", "attr", ",", "new_attr_value", ")", ")", "continue", "else", ":", "setattr", "(", "entity", ",", "attr", ".", "entity_attr", ",", "new_attr_value", ")", "for", "attr", ",", "new_attr_value", "in", "nested_items", ":", "try", ":", "set_nested_attribute", "(", "entity", ",", "attr", ".", "entity_attr", ",", "new_attr_value", ")", "except", "AttributeError", "as", "exc", ":", "if", "not", "new_attr_value", "is", "None", ":", "raise", "exc" ]
Sets the state data for the given entity to the given data. This also works for unmanaged entities.
[ "Sets", "the", "state", "data", "for", "the", "given", "entity", "to", "the", "given", "data", "." ]
python
train
42.391304
boriel/zxbasic
api/symboltable.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/api/symboltable.py#L812-L819
def check_classes(self, scope=-1): """ Check if pending identifiers are defined or not. If not, returns a syntax error. If no scope is given, the current one is checked. """ for entry in self[scope].values(): if entry.class_ is None: syntax_error(entry.lineno, "Unknown identifier '%s'" % entry.name)
[ "def", "check_classes", "(", "self", ",", "scope", "=", "-", "1", ")", ":", "for", "entry", "in", "self", "[", "scope", "]", ".", "values", "(", ")", ":", "if", "entry", ".", "class_", "is", "None", ":", "syntax_error", "(", "entry", ".", "lineno", ",", "\"Unknown identifier '%s'\"", "%", "entry", ".", "name", ")" ]
Check if pending identifiers are defined or not. If not, returns a syntax error. If no scope is given, the current one is checked.
[ "Check", "if", "pending", "identifiers", "are", "defined", "or", "not", ".", "If", "not", "returns", "a", "syntax", "error", ".", "If", "no", "scope", "is", "given", "the", "current", "one", "is", "checked", "." ]
python
train
45.125
fp12/achallonge
challonge/tournament.py
https://github.com/fp12/achallonge/blob/25780b3c48b66400a50ff9f884e4287afd4c89e4/challonge/tournament.py#L511-L531
async def get_participant(self, p_id: int, force_update=False) -> Participant: """ get a participant by its id |methcoro| Args: p_id: participant id force_update (dfault=False): True to force an update to the Challonge API Returns: Participant: None if not found Raises: APIException """ found_p = self._find_participant(p_id) if force_update or found_p is None: await self.get_participants() found_p = self._find_participant(p_id) return found_p
[ "async", "def", "get_participant", "(", "self", ",", "p_id", ":", "int", ",", "force_update", "=", "False", ")", "->", "Participant", ":", "found_p", "=", "self", ".", "_find_participant", "(", "p_id", ")", "if", "force_update", "or", "found_p", "is", "None", ":", "await", "self", ".", "get_participants", "(", ")", "found_p", "=", "self", ".", "_find_participant", "(", "p_id", ")", "return", "found_p" ]
get a participant by its id |methcoro| Args: p_id: participant id force_update (dfault=False): True to force an update to the Challonge API Returns: Participant: None if not found Raises: APIException
[ "get", "a", "participant", "by", "its", "id" ]
python
train
27.380952
DAI-Lab/Copulas
copulas/__init__.py
https://github.com/DAI-Lab/Copulas/blob/821df61c3d36a6b81ef2883935f935c2eaaa862c/copulas/__init__.py#L38-L41
def import_object(object_name): """Import an object from its Fully Qualified Name.""" package, name = object_name.rsplit('.', 1) return getattr(importlib.import_module(package), name)
[ "def", "import_object", "(", "object_name", ")", ":", "package", ",", "name", "=", "object_name", ".", "rsplit", "(", "'.'", ",", "1", ")", "return", "getattr", "(", "importlib", ".", "import_module", "(", "package", ")", ",", "name", ")" ]
Import an object from its Fully Qualified Name.
[ "Import", "an", "object", "from", "its", "Fully", "Qualified", "Name", "." ]
python
train
48
mattupstate/flask-security
flask_security/utils.py
https://github.com/mattupstate/flask-security/blob/a401fb47018fbbbe0b899ea55afadfd0e3cd847a/flask_security/utils.py#L501-L517
def capture_reset_password_requests(reset_password_sent_at=None): """Testing utility for capturing password reset requests. :param reset_password_sent_at: An optional datetime object to set the user's `reset_password_sent_at` to """ reset_requests = [] def _on(app, **data): reset_requests.append(data) reset_password_instructions_sent.connect(_on) try: yield reset_requests finally: reset_password_instructions_sent.disconnect(_on)
[ "def", "capture_reset_password_requests", "(", "reset_password_sent_at", "=", "None", ")", ":", "reset_requests", "=", "[", "]", "def", "_on", "(", "app", ",", "*", "*", "data", ")", ":", "reset_requests", ".", "append", "(", "data", ")", "reset_password_instructions_sent", ".", "connect", "(", "_on", ")", "try", ":", "yield", "reset_requests", "finally", ":", "reset_password_instructions_sent", ".", "disconnect", "(", "_on", ")" ]
Testing utility for capturing password reset requests. :param reset_password_sent_at: An optional datetime object to set the user's `reset_password_sent_at` to
[ "Testing", "utility", "for", "capturing", "password", "reset", "requests", "." ]
python
train
30.058824
eandersson/amqpstorm
amqpstorm/channel0.py
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel0.py#L140-L166
def _send_start_ok(self, frame_in): """Send Start OK frame. :param specification.Connection.Start frame_in: Amqp frame. :return: """ mechanisms = try_utf8_decode(frame_in.mechanisms) if 'EXTERNAL' in mechanisms: mechanism = 'EXTERNAL' credentials = '\0\0' elif 'PLAIN' in mechanisms: mechanism = 'PLAIN' credentials = self._plain_credentials() else: exception = AMQPConnectionError( 'Unsupported Security Mechanism(s): %s' % frame_in.mechanisms ) self._connection.exceptions.append(exception) return start_ok_frame = specification.Connection.StartOk( mechanism=mechanism, client_properties=self._client_properties(), response=credentials, locale=LOCALE ) self._write_frame(start_ok_frame)
[ "def", "_send_start_ok", "(", "self", ",", "frame_in", ")", ":", "mechanisms", "=", "try_utf8_decode", "(", "frame_in", ".", "mechanisms", ")", "if", "'EXTERNAL'", "in", "mechanisms", ":", "mechanism", "=", "'EXTERNAL'", "credentials", "=", "'\\0\\0'", "elif", "'PLAIN'", "in", "mechanisms", ":", "mechanism", "=", "'PLAIN'", "credentials", "=", "self", ".", "_plain_credentials", "(", ")", "else", ":", "exception", "=", "AMQPConnectionError", "(", "'Unsupported Security Mechanism(s): %s'", "%", "frame_in", ".", "mechanisms", ")", "self", ".", "_connection", ".", "exceptions", ".", "append", "(", "exception", ")", "return", "start_ok_frame", "=", "specification", ".", "Connection", ".", "StartOk", "(", "mechanism", "=", "mechanism", ",", "client_properties", "=", "self", ".", "_client_properties", "(", ")", ",", "response", "=", "credentials", ",", "locale", "=", "LOCALE", ")", "self", ".", "_write_frame", "(", "start_ok_frame", ")" ]
Send Start OK frame. :param specification.Connection.Start frame_in: Amqp frame. :return:
[ "Send", "Start", "OK", "frame", "." ]
python
train
34.333333
seantis/suitable
suitable/api.py
https://github.com/seantis/suitable/blob/b056afb753f2b89909edfb6e00ec7c55691135ff/suitable/api.py#L280-L295
def valid_return_codes(self, *codes): """ Sets codes which are considered valid when returned from command modules. The default is (0, ). Should be used as a context:: with api.valid_return_codes(0, 1): api.shell('test -e /tmp/log && rm /tmp/log') """ previous_codes = self._valid_return_codes self._valid_return_codes = codes yield self._valid_return_codes = previous_codes
[ "def", "valid_return_codes", "(", "self", ",", "*", "codes", ")", ":", "previous_codes", "=", "self", ".", "_valid_return_codes", "self", ".", "_valid_return_codes", "=", "codes", "yield", "self", ".", "_valid_return_codes", "=", "previous_codes" ]
Sets codes which are considered valid when returned from command modules. The default is (0, ). Should be used as a context:: with api.valid_return_codes(0, 1): api.shell('test -e /tmp/log && rm /tmp/log')
[ "Sets", "codes", "which", "are", "considered", "valid", "when", "returned", "from", "command", "modules", ".", "The", "default", "is", "(", "0", ")", "." ]
python
train
28.5
estnltk/estnltk
estnltk/mw_verbs/utils.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/utils.py#L127-L164
def matches(self, tokenJson): '''Determines whether given token (tokenJson) satisfies all the rules listed in the WordTemplate. If the rules describe tokenJson[ANALYSIS], it is required that at least one item in the list tokenJson[ANALYSIS] satisfies all the rules (but it is not required that all the items should satisfy). Returns a boolean value. Parameters ---------- tokenJson: pyvabamorf's analysis of a single word token; ''' if self.otherRules != None: otherMatches = [] for field in self.otherRules: match = field in tokenJson and ((self.otherRules[field]).match(tokenJson[field]) != None) otherMatches.append( match ) if not otherMatches or not all(otherMatches): return False elif self.analysisRules == None and all(otherMatches): return True if self.analysisRules != None: assert ANALYSIS in tokenJson, "No ANALYSIS found within token: "+str(tokenJson) totalMatches = [] for analysis in tokenJson[ANALYSIS]: # Check whether this analysis satisfies all the rules # (if not, discard the analysis) matches = [] for field in self.analysisRules: value = analysis[field] if field in analysis else "" match = (self.analysisRules[field]).match(value) != None matches.append( match ) if not match: break totalMatches.append( all(matches) ) # Return True iff there was at least one analysis that # satisfied all the rules; return any(totalMatches) return False
[ "def", "matches", "(", "self", ",", "tokenJson", ")", ":", "if", "self", ".", "otherRules", "!=", "None", ":", "otherMatches", "=", "[", "]", "for", "field", "in", "self", ".", "otherRules", ":", "match", "=", "field", "in", "tokenJson", "and", "(", "(", "self", ".", "otherRules", "[", "field", "]", ")", ".", "match", "(", "tokenJson", "[", "field", "]", ")", "!=", "None", ")", "otherMatches", ".", "append", "(", "match", ")", "if", "not", "otherMatches", "or", "not", "all", "(", "otherMatches", ")", ":", "return", "False", "elif", "self", ".", "analysisRules", "==", "None", "and", "all", "(", "otherMatches", ")", ":", "return", "True", "if", "self", ".", "analysisRules", "!=", "None", ":", "assert", "ANALYSIS", "in", "tokenJson", ",", "\"No ANALYSIS found within token: \"", "+", "str", "(", "tokenJson", ")", "totalMatches", "=", "[", "]", "for", "analysis", "in", "tokenJson", "[", "ANALYSIS", "]", ":", "# Check whether this analysis satisfies all the rules \r", "# (if not, discard the analysis)\r", "matches", "=", "[", "]", "for", "field", "in", "self", ".", "analysisRules", ":", "value", "=", "analysis", "[", "field", "]", "if", "field", "in", "analysis", "else", "\"\"", "match", "=", "(", "self", ".", "analysisRules", "[", "field", "]", ")", ".", "match", "(", "value", ")", "!=", "None", "matches", ".", "append", "(", "match", ")", "if", "not", "match", ":", "break", "totalMatches", ".", "append", "(", "all", "(", "matches", ")", ")", "# Return True iff there was at least one analysis that \r", "# satisfied all the rules;\r", "return", "any", "(", "totalMatches", ")", "return", "False" ]
Determines whether given token (tokenJson) satisfies all the rules listed in the WordTemplate. If the rules describe tokenJson[ANALYSIS], it is required that at least one item in the list tokenJson[ANALYSIS] satisfies all the rules (but it is not required that all the items should satisfy). Returns a boolean value. Parameters ---------- tokenJson: pyvabamorf's analysis of a single word token;
[ "Determines", "whether", "given", "token", "(", "tokenJson", ")", "satisfies", "all", "the", "rules", "listed", "in", "the", "WordTemplate", ".", "If", "the", "rules", "describe", "tokenJson", "[", "ANALYSIS", "]", "it", "is", "required", "that", "at", "least", "one", "item", "in", "the", "list", "tokenJson", "[", "ANALYSIS", "]", "satisfies", "all", "the", "rules", "(", "but", "it", "is", "not", "required", "that", "all", "the", "items", "should", "satisfy", ")", ".", "Returns", "a", "boolean", "value", ".", "Parameters", "----------", "tokenJson", ":", "pyvabamorf", "s", "analysis", "of", "a", "single", "word", "token", ";" ]
python
train
49.026316
crytic/slither
slither/core/cfg/node.py
https://github.com/crytic/slither/blob/04c147f7e50223c6af458ca430befae747ccd259/slither/core/cfg/node.py#L433-L449
def is_conditional(self, include_loop=True): """ Check if the node is a conditional node A conditional node is either a IF or a require/assert or a RETURN bool Returns: bool: True if the node is a conditional node """ if self.contains_if(include_loop) or self.contains_require_or_assert(): return True if self.irs: last_ir = self.irs[-1] if last_ir: if isinstance(last_ir, Return): for r in last_ir.read: if r.type == ElementaryType('bool'): return True return False
[ "def", "is_conditional", "(", "self", ",", "include_loop", "=", "True", ")", ":", "if", "self", ".", "contains_if", "(", "include_loop", ")", "or", "self", ".", "contains_require_or_assert", "(", ")", ":", "return", "True", "if", "self", ".", "irs", ":", "last_ir", "=", "self", ".", "irs", "[", "-", "1", "]", "if", "last_ir", ":", "if", "isinstance", "(", "last_ir", ",", "Return", ")", ":", "for", "r", "in", "last_ir", ".", "read", ":", "if", "r", ".", "type", "==", "ElementaryType", "(", "'bool'", ")", ":", "return", "True", "return", "False" ]
Check if the node is a conditional node A conditional node is either a IF or a require/assert or a RETURN bool Returns: bool: True if the node is a conditional node
[ "Check", "if", "the", "node", "is", "a", "conditional", "node", "A", "conditional", "node", "is", "either", "a", "IF", "or", "a", "require", "/", "assert", "or", "a", "RETURN", "bool", "Returns", ":", "bool", ":", "True", "if", "the", "node", "is", "a", "conditional", "node" ]
python
train
38.705882
yakupadakli/python-unsplash
unsplash/photo.py
https://github.com/yakupadakli/python-unsplash/blob/6e43dce3225237e1b8111fd475fb98b1ea33972c/unsplash/photo.py#L80-L109
def search(self, query, category=None, orientation=None, page=1, per_page=10): """ Get a single page from a photo search. Optionally limit your search to a set of categories by supplying the category ID’s. Note: If supplying multiple category ID’s, the resulting photos will be those that match all of the given categories, not ones that match any category. :param query [string]: Search terms. :param category [string]: Category ID(‘s) to filter search. If multiple, comma-separated. (deprecated) :param orientation [string]: Filter search results by photo orientation. Valid values are landscape, portrait, and squarish. :param page [integer]: Page number to retrieve. (Optional; default: 1) :param per_page [integer]: Number of items per page. (Optional; default: 10) :return: [Array]: A single page of the curated Photo list. :raise UnsplashError: If the given orientation is not in the default orientation values. """ if orientation and orientation not in self.orientation_values: raise Exception() params = { "query": query, "category": category, "orientation": orientation, "page": page, "per_page": per_page } url = "/photos/search" result = self._get(url, params=params) return PhotoModel.parse_list(result)
[ "def", "search", "(", "self", ",", "query", ",", "category", "=", "None", ",", "orientation", "=", "None", ",", "page", "=", "1", ",", "per_page", "=", "10", ")", ":", "if", "orientation", "and", "orientation", "not", "in", "self", ".", "orientation_values", ":", "raise", "Exception", "(", ")", "params", "=", "{", "\"query\"", ":", "query", ",", "\"category\"", ":", "category", ",", "\"orientation\"", ":", "orientation", ",", "\"page\"", ":", "page", ",", "\"per_page\"", ":", "per_page", "}", "url", "=", "\"/photos/search\"", "result", "=", "self", ".", "_get", "(", "url", ",", "params", "=", "params", ")", "return", "PhotoModel", ".", "parse_list", "(", "result", ")" ]
Get a single page from a photo search. Optionally limit your search to a set of categories by supplying the category ID’s. Note: If supplying multiple category ID’s, the resulting photos will be those that match all of the given categories, not ones that match any category. :param query [string]: Search terms. :param category [string]: Category ID(‘s) to filter search. If multiple, comma-separated. (deprecated) :param orientation [string]: Filter search results by photo orientation. Valid values are landscape, portrait, and squarish. :param page [integer]: Page number to retrieve. (Optional; default: 1) :param per_page [integer]: Number of items per page. (Optional; default: 10) :return: [Array]: A single page of the curated Photo list. :raise UnsplashError: If the given orientation is not in the default orientation values.
[ "Get", "a", "single", "page", "from", "a", "photo", "search", ".", "Optionally", "limit", "your", "search", "to", "a", "set", "of", "categories", "by", "supplying", "the", "category", "ID’s", "." ]
python
train
47.633333
jmoiron/johnny-cache
johnny/transaction.py
https://github.com/jmoiron/johnny-cache/blob/d96ea94c5dfcde517ff8f65d6ba4e435d8a0168c/johnny/transaction.py#L81-L93
def set(self, key, val, timeout=None, using=None): """ Set will be using the generational key, so if another thread bumps this key, the localstore version will still be invalid. If the key is bumped during a transaction it will be new to the global cache on commit, so it will still be a bump. """ if timeout is None: timeout = self.timeout if self.is_managed(using=using) and self._patched_var: self.local[key] = val else: self.cache_backend.set(key, val, timeout)
[ "def", "set", "(", "self", ",", "key", ",", "val", ",", "timeout", "=", "None", ",", "using", "=", "None", ")", ":", "if", "timeout", "is", "None", ":", "timeout", "=", "self", ".", "timeout", "if", "self", ".", "is_managed", "(", "using", "=", "using", ")", "and", "self", ".", "_patched_var", ":", "self", ".", "local", "[", "key", "]", "=", "val", "else", ":", "self", ".", "cache_backend", ".", "set", "(", "key", ",", "val", ",", "timeout", ")" ]
Set will be using the generational key, so if another thread bumps this key, the localstore version will still be invalid. If the key is bumped during a transaction it will be new to the global cache on commit, so it will still be a bump.
[ "Set", "will", "be", "using", "the", "generational", "key", "so", "if", "another", "thread", "bumps", "this", "key", "the", "localstore", "version", "will", "still", "be", "invalid", ".", "If", "the", "key", "is", "bumped", "during", "a", "transaction", "it", "will", "be", "new", "to", "the", "global", "cache", "on", "commit", "so", "it", "will", "still", "be", "a", "bump", "." ]
python
train
43.153846
UCL-INGI/INGInious
inginious/frontend/user_manager.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/user_manager.py#L518-L523
def user_saw_task(self, username, courseid, taskid): """ Set in the database that the user has viewed this task """ self._database.user_tasks.update({"username": username, "courseid": courseid, "taskid": taskid}, {"$setOnInsert": {"username": username, "courseid": courseid, "taskid": taskid, "tried": 0, "succeeded": False, "grade": 0.0, "submissionid": None, "state": ""}}, upsert=True)
[ "def", "user_saw_task", "(", "self", ",", "username", ",", "courseid", ",", "taskid", ")", ":", "self", ".", "_database", ".", "user_tasks", ".", "update", "(", "{", "\"username\"", ":", "username", ",", "\"courseid\"", ":", "courseid", ",", "\"taskid\"", ":", "taskid", "}", ",", "{", "\"$setOnInsert\"", ":", "{", "\"username\"", ":", "username", ",", "\"courseid\"", ":", "courseid", ",", "\"taskid\"", ":", "taskid", ",", "\"tried\"", ":", "0", ",", "\"succeeded\"", ":", "False", ",", "\"grade\"", ":", "0.0", ",", "\"submissionid\"", ":", "None", ",", "\"state\"", ":", "\"\"", "}", "}", ",", "upsert", "=", "True", ")" ]
Set in the database that the user has viewed this task
[ "Set", "in", "the", "database", "that", "the", "user", "has", "viewed", "this", "task" ]
python
train
90
googleapis/google-cloud-python
logging/docs/snippets.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/logging/docs/snippets.py#L104-L112
def client_list_entries_multi_project( client, to_delete ): # pylint: disable=unused-argument """List entries via client across multiple projects.""" # [START client_list_entries_multi_project] PROJECT_IDS = ["one-project", "another-project"] for entry in client.list_entries(project_ids=PROJECT_IDS): # API call(s) do_something_with(entry)
[ "def", "client_list_entries_multi_project", "(", "client", ",", "to_delete", ")", ":", "# pylint: disable=unused-argument", "# [START client_list_entries_multi_project]", "PROJECT_IDS", "=", "[", "\"one-project\"", ",", "\"another-project\"", "]", "for", "entry", "in", "client", ".", "list_entries", "(", "project_ids", "=", "PROJECT_IDS", ")", ":", "# API call(s)", "do_something_with", "(", "entry", ")" ]
List entries via client across multiple projects.
[ "List", "entries", "via", "client", "across", "multiple", "projects", "." ]
python
train
40.333333
mikedh/trimesh
trimesh/primitives.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/primitives.py#L548-L556
def is_oriented(self): """ Returns whether or not the current box is rotated at all. """ if util.is_shape(self.primitive.transform, (4, 4)): return not np.allclose(self.primitive.transform[ 0:3, 0:3], np.eye(3)) else: return False
[ "def", "is_oriented", "(", "self", ")", ":", "if", "util", ".", "is_shape", "(", "self", ".", "primitive", ".", "transform", ",", "(", "4", ",", "4", ")", ")", ":", "return", "not", "np", ".", "allclose", "(", "self", ".", "primitive", ".", "transform", "[", "0", ":", "3", ",", "0", ":", "3", "]", ",", "np", ".", "eye", "(", "3", ")", ")", "else", ":", "return", "False" ]
Returns whether or not the current box is rotated at all.
[ "Returns", "whether", "or", "not", "the", "current", "box", "is", "rotated", "at", "all", "." ]
python
train
35.666667
c0fec0de/anytree
anytree/importer/jsonimporter.py
https://github.com/c0fec0de/anytree/blob/775477e206a75e697983e70dae6372b5a7e42dcf/anytree/importer/jsonimporter.py#L60-L62
def import_(self, data): """Read JSON from `data`.""" return self.__import(json.loads(data, **self.kwargs))
[ "def", "import_", "(", "self", ",", "data", ")", ":", "return", "self", ".", "__import", "(", "json", ".", "loads", "(", "data", ",", "*", "*", "self", ".", "kwargs", ")", ")" ]
Read JSON from `data`.
[ "Read", "JSON", "from", "data", "." ]
python
train
40.333333
simpleai-team/simpleai
simpleai/search/models.py
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/models.py#L102-L117
def expand(self, local_search=False): '''Create successors.''' new_nodes = [] for action in self.problem.actions(self.state): new_state = self.problem.result(self.state, action) cost = self.problem.cost(self.state, action, new_state) nodefactory = self.__class__ new_nodes.append(nodefactory(state=new_state, parent=None if local_search else self, problem=self.problem, action=action, cost=self.cost + cost, depth=self.depth + 1)) return new_nodes
[ "def", "expand", "(", "self", ",", "local_search", "=", "False", ")", ":", "new_nodes", "=", "[", "]", "for", "action", "in", "self", ".", "problem", ".", "actions", "(", "self", ".", "state", ")", ":", "new_state", "=", "self", ".", "problem", ".", "result", "(", "self", ".", "state", ",", "action", ")", "cost", "=", "self", ".", "problem", ".", "cost", "(", "self", ".", "state", ",", "action", ",", "new_state", ")", "nodefactory", "=", "self", ".", "__class__", "new_nodes", ".", "append", "(", "nodefactory", "(", "state", "=", "new_state", ",", "parent", "=", "None", "if", "local_search", "else", "self", ",", "problem", "=", "self", ".", "problem", ",", "action", "=", "action", ",", "cost", "=", "self", ".", "cost", "+", "cost", ",", "depth", "=", "self", ".", "depth", "+", "1", ")", ")", "return", "new_nodes" ]
Create successors.
[ "Create", "successors", "." ]
python
train
49.4375
rcarmo/pngcanvas
pngcanvas.py
https://github.com/rcarmo/pngcanvas/blob/e2eaa0d5ba353005b3b658f6ee453c1956340670/pngcanvas.py#L237-L251
def dump(self): """Dump the image data""" scan_lines = bytearray() for y in range(self.height): scan_lines.append(0) # filter type 0 (None) scan_lines.extend( self.canvas[(y * self.width * 4):((y + 1) * self.width * 4)] ) # image represented as RGBA tuples, no interlacing return SIGNATURE + \ self.pack_chunk(b'IHDR', struct.pack(b"!2I5B", self.width, self.height, 8, 6, 0, 0, 0)) + \ self.pack_chunk(b'IDAT', zlib.compress(bytes(scan_lines), 9)) + \ self.pack_chunk(b'IEND', b'')
[ "def", "dump", "(", "self", ")", ":", "scan_lines", "=", "bytearray", "(", ")", "for", "y", "in", "range", "(", "self", ".", "height", ")", ":", "scan_lines", ".", "append", "(", "0", ")", "# filter type 0 (None)", "scan_lines", ".", "extend", "(", "self", ".", "canvas", "[", "(", "y", "*", "self", ".", "width", "*", "4", ")", ":", "(", "(", "y", "+", "1", ")", "*", "self", ".", "width", "*", "4", ")", "]", ")", "# image represented as RGBA tuples, no interlacing", "return", "SIGNATURE", "+", "self", ".", "pack_chunk", "(", "b'IHDR'", ",", "struct", ".", "pack", "(", "b\"!2I5B\"", ",", "self", ".", "width", ",", "self", ".", "height", ",", "8", ",", "6", ",", "0", ",", "0", ",", "0", ")", ")", "+", "self", ".", "pack_chunk", "(", "b'IDAT'", ",", "zlib", ".", "compress", "(", "bytes", "(", "scan_lines", ")", ",", "9", ")", ")", "+", "self", ".", "pack_chunk", "(", "b'IEND'", ",", "b''", ")" ]
Dump the image data
[ "Dump", "the", "image", "data" ]
python
train
46.266667
adafruit/Adafruit_Python_BluefruitLE
Adafruit_BluefruitLE/bluez_dbus/gatt.py
https://github.com/adafruit/Adafruit_Python_BluefruitLE/blob/34fc6f596371b961628369d78ce836950514062f/Adafruit_BluefruitLE/bluez_dbus/gatt.py#L111-L117
def list_descriptors(self): """Return list of GATT descriptors that have been discovered for this characteristic. """ paths = self._props.Get(_CHARACTERISTIC_INTERFACE, 'Descriptors') return map(BluezGattDescriptor, get_provider()._get_objects_by_path(paths))
[ "def", "list_descriptors", "(", "self", ")", ":", "paths", "=", "self", ".", "_props", ".", "Get", "(", "_CHARACTERISTIC_INTERFACE", ",", "'Descriptors'", ")", "return", "map", "(", "BluezGattDescriptor", ",", "get_provider", "(", ")", ".", "_get_objects_by_path", "(", "paths", ")", ")" ]
Return list of GATT descriptors that have been discovered for this characteristic.
[ "Return", "list", "of", "GATT", "descriptors", "that", "have", "been", "discovered", "for", "this", "characteristic", "." ]
python
valid
44.571429
user-cont/conu
conu/backend/k8s/client.py
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/backend/k8s/client.py#L52-L71
def get_apps_api(): """ Create instance of Apps V1 API of kubernetes: https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/AppsV1Api.md :return: instance of client """ global apps_api if apps_api is None: config.load_kube_config() if API_KEY is not None: # Configure API key authorization: BearerToken configuration = client.Configuration() configuration.api_key['authorization'] = API_KEY configuration.api_key_prefix['authorization'] = 'Bearer' apps_api = client.AppsV1Api(client.ApiClient(configuration)) else: apps_api = client.AppsV1Api() return apps_api
[ "def", "get_apps_api", "(", ")", ":", "global", "apps_api", "if", "apps_api", "is", "None", ":", "config", ".", "load_kube_config", "(", ")", "if", "API_KEY", "is", "not", "None", ":", "# Configure API key authorization: BearerToken", "configuration", "=", "client", ".", "Configuration", "(", ")", "configuration", ".", "api_key", "[", "'authorization'", "]", "=", "API_KEY", "configuration", ".", "api_key_prefix", "[", "'authorization'", "]", "=", "'Bearer'", "apps_api", "=", "client", ".", "AppsV1Api", "(", "client", ".", "ApiClient", "(", "configuration", ")", ")", "else", ":", "apps_api", "=", "client", ".", "AppsV1Api", "(", ")", "return", "apps_api" ]
Create instance of Apps V1 API of kubernetes: https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/AppsV1Api.md :return: instance of client
[ "Create", "instance", "of", "Apps", "V1", "API", "of", "kubernetes", ":", "https", ":", "//", "github", ".", "com", "/", "kubernetes", "-", "client", "/", "python", "/", "blob", "/", "master", "/", "kubernetes", "/", "docs", "/", "AppsV1Api", ".", "md", ":", "return", ":", "instance", "of", "client" ]
python
train
34.45
tipsi/tipsi_tools
tipsi_tools/unix.py
https://github.com/tipsi/tipsi_tools/blob/1aba960c9890ceef2fb5e215b98b1646056ee58e/tipsi_tools/unix.py#L143-L148
def interpolate_sysenv(line, defaults={}): ''' Format line system environment variables + defaults ''' map = ChainMap(os.environ, defaults) return line.format(**map)
[ "def", "interpolate_sysenv", "(", "line", ",", "defaults", "=", "{", "}", ")", ":", "map", "=", "ChainMap", "(", "os", ".", "environ", ",", "defaults", ")", "return", "line", ".", "format", "(", "*", "*", "map", ")" ]
Format line system environment variables + defaults
[ "Format", "line", "system", "environment", "variables", "+", "defaults" ]
python
train
30
singularityhub/sregistry-cli
sregistry/main/google_build/query.py
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/google_build/query.py#L78-L105
def container_query(self, query, quiet=False): '''search for a specific container. This function would likely be similar to the above, but have different filter criteria from the user (based on the query) ''' results = self._list_containers() matches = [] for result in results: for key,val in result.metadata.items(): if query in val and result not in matches: matches.append(result) if not quiet: bot.info("[gs://%s] Found %s containers" %(self._bucket_name,len(matches))) for image in matches: size = round(image.size / (1024*1024.0)) bot.custom(prefix=image.name, color="CYAN") bot.custom(prefix='id: ', message=image.id) bot.custom(prefix='uri: ', message=image.metadata['name']) bot.custom(prefix='updated:', message=image.updated) bot.custom(prefix='size: ', message=' %s MB' %(size)) bot.custom(prefix='md5: ', message=image.md5_hash) if "public_url" in image.metadata: public_url = image.metadata['public_url'] bot.custom(prefix='url: ', message=public_url) bot.newline() return matches
[ "def", "container_query", "(", "self", ",", "query", ",", "quiet", "=", "False", ")", ":", "results", "=", "self", ".", "_list_containers", "(", ")", "matches", "=", "[", "]", "for", "result", "in", "results", ":", "for", "key", ",", "val", "in", "result", ".", "metadata", ".", "items", "(", ")", ":", "if", "query", "in", "val", "and", "result", "not", "in", "matches", ":", "matches", ".", "append", "(", "result", ")", "if", "not", "quiet", ":", "bot", ".", "info", "(", "\"[gs://%s] Found %s containers\"", "%", "(", "self", ".", "_bucket_name", ",", "len", "(", "matches", ")", ")", ")", "for", "image", "in", "matches", ":", "size", "=", "round", "(", "image", ".", "size", "/", "(", "1024", "*", "1024.0", ")", ")", "bot", ".", "custom", "(", "prefix", "=", "image", ".", "name", ",", "color", "=", "\"CYAN\"", ")", "bot", ".", "custom", "(", "prefix", "=", "'id: '", ",", "message", "=", "image", ".", "id", ")", "bot", ".", "custom", "(", "prefix", "=", "'uri: '", ",", "message", "=", "image", ".", "metadata", "[", "'name'", "]", ")", "bot", ".", "custom", "(", "prefix", "=", "'updated:'", ",", "message", "=", "image", ".", "updated", ")", "bot", ".", "custom", "(", "prefix", "=", "'size: '", ",", "message", "=", "' %s MB'", "%", "(", "size", ")", ")", "bot", ".", "custom", "(", "prefix", "=", "'md5: '", ",", "message", "=", "image", ".", "md5_hash", ")", "if", "\"public_url\"", "in", "image", ".", "metadata", ":", "public_url", "=", "image", ".", "metadata", "[", "'public_url'", "]", "bot", ".", "custom", "(", "prefix", "=", "'url: '", ",", "message", "=", "public_url", ")", "bot", ".", "newline", "(", ")", "return", "matches" ]
search for a specific container. This function would likely be similar to the above, but have different filter criteria from the user (based on the query)
[ "search", "for", "a", "specific", "container", ".", "This", "function", "would", "likely", "be", "similar", "to", "the", "above", "but", "have", "different", "filter", "criteria", "from", "the", "user", "(", "based", "on", "the", "query", ")" ]
python
test
43.535714
indico/indico-plugins
chat/indico_chat/notifications.py
https://github.com/indico/indico-plugins/blob/fe50085cc63be9b8161b09539e662e7b04e4b38e/chat/indico_chat/notifications.py#L23-L31
def notify_created(room, event, user): """Notifies about the creation of a chatroom. :param room: the chatroom :param event: the event :param user: the user performing the action """ tpl = get_plugin_template_module('emails/created.txt', chatroom=room, event=event, user=user) _send(event, tpl)
[ "def", "notify_created", "(", "room", ",", "event", ",", "user", ")", ":", "tpl", "=", "get_plugin_template_module", "(", "'emails/created.txt'", ",", "chatroom", "=", "room", ",", "event", "=", "event", ",", "user", "=", "user", ")", "_send", "(", "event", ",", "tpl", ")" ]
Notifies about the creation of a chatroom. :param room: the chatroom :param event: the event :param user: the user performing the action
[ "Notifies", "about", "the", "creation", "of", "a", "chatroom", "." ]
python
train
35
andycasey/sick
sick/plot.py
https://github.com/andycasey/sick/blob/6c37686182794c4cafea45abf7062b30b789b1a2/sick/plot.py#L150-L263
def chains(xs, labels=None, truths=None, truth_color=u"#4682b4", burn=None, alpha=0.5, fig=None): """ Create a plot showing the walker values for each parameter at every step. :param xs: The samples. This should be a 3D :class:`numpy.ndarray` of size (``n_walkers``, ``n_steps``, ``n_parameters``). :type xs: :class:`numpy.ndarray` :param labels: [optional] Labels for all the parameters. :type labels: iterable of strings or None :param truths: [optional] Reference values to indicate on the plots. :type truths: iterable of floats or None :param truth_color: [optional] A ``matplotlib`` style color for the ``truths`` markers. :param burn: [optional] Reference step to indicate on the plots. :type burn: integer or None :param alpha: [optional] Transparency of individual walker lines between zero and one. :type alpha: float :param fig: [optional] Overplot onto the provided figure object. :type fig: :class:`matplotlib.Figure` or None :raises ValueError: If a ``fig`` is provided with the incorrect number of axes. :returns: The chain figure. :rtype: :class:`matplotlib.Figure` """ n_walkers, n_steps, K = xs.shape if labels is not None: assert len(labels) == K if truths is not None: assert len(truths) == K factor = 2.0 lbdim = 0.5 * factor trdim = 0.2 * factor whspace = 0.10 width = 15. height = factor*K + factor * (K - 1.) * whspace dimy = lbdim + height + trdim dimx = lbdim + width + trdim if fig is None: fig, axes = plt.subplots(K, 1, figsize=(dimx, dimy)) else: try: axes = np.array(fig.axes).reshape((1, K)) except: raise ValueError("Provided figure has {0} axes, but data has " "parameters K={1}".format(len(fig.axes), K)) lm = lbdim / dimx bm = lbdim / dimy trm = (lbdim + height) / dimy fig.subplots_adjust(left=lm, bottom=bm, right=trm, top=trm, wspace=whspace, hspace=whspace) if K == 1: axes = [axes] for k, ax in enumerate(axes): for walker in range(n_walkers): ax.plot(xs[walker, :, k], color="k", alpha=alpha) if burn is not None: ax.axvline(burn, color="k", linestyle=":") if truths is not None: ax.axhline(truths[k], color=truth_color, lw=2) ax.set_xlim(0, n_steps) if k < K - 1: ax.set_xticklabels([]) else: ax.set_xlabel("Step") ax.yaxis.set_major_locator(MaxNLocator(4)) [l.set_rotation(45) for l in ax.get_yticklabels()] if labels is not None: ax.set_ylabel(labels[k]) ax.yaxis.set_label_coords(-0.05, 0.5) return fig
[ "def", "chains", "(", "xs", ",", "labels", "=", "None", ",", "truths", "=", "None", ",", "truth_color", "=", "u\"#4682b4\"", ",", "burn", "=", "None", ",", "alpha", "=", "0.5", ",", "fig", "=", "None", ")", ":", "n_walkers", ",", "n_steps", ",", "K", "=", "xs", ".", "shape", "if", "labels", "is", "not", "None", ":", "assert", "len", "(", "labels", ")", "==", "K", "if", "truths", "is", "not", "None", ":", "assert", "len", "(", "truths", ")", "==", "K", "factor", "=", "2.0", "lbdim", "=", "0.5", "*", "factor", "trdim", "=", "0.2", "*", "factor", "whspace", "=", "0.10", "width", "=", "15.", "height", "=", "factor", "*", "K", "+", "factor", "*", "(", "K", "-", "1.", ")", "*", "whspace", "dimy", "=", "lbdim", "+", "height", "+", "trdim", "dimx", "=", "lbdim", "+", "width", "+", "trdim", "if", "fig", "is", "None", ":", "fig", ",", "axes", "=", "plt", ".", "subplots", "(", "K", ",", "1", ",", "figsize", "=", "(", "dimx", ",", "dimy", ")", ")", "else", ":", "try", ":", "axes", "=", "np", ".", "array", "(", "fig", ".", "axes", ")", ".", "reshape", "(", "(", "1", ",", "K", ")", ")", "except", ":", "raise", "ValueError", "(", "\"Provided figure has {0} axes, but data has \"", "\"parameters K={1}\"", ".", "format", "(", "len", "(", "fig", ".", "axes", ")", ",", "K", ")", ")", "lm", "=", "lbdim", "/", "dimx", "bm", "=", "lbdim", "/", "dimy", "trm", "=", "(", "lbdim", "+", "height", ")", "/", "dimy", "fig", ".", "subplots_adjust", "(", "left", "=", "lm", ",", "bottom", "=", "bm", ",", "right", "=", "trm", ",", "top", "=", "trm", ",", "wspace", "=", "whspace", ",", "hspace", "=", "whspace", ")", "if", "K", "==", "1", ":", "axes", "=", "[", "axes", "]", "for", "k", ",", "ax", "in", "enumerate", "(", "axes", ")", ":", "for", "walker", "in", "range", "(", "n_walkers", ")", ":", "ax", ".", "plot", "(", "xs", "[", "walker", ",", ":", ",", "k", "]", ",", "color", "=", "\"k\"", ",", "alpha", "=", "alpha", ")", "if", "burn", "is", "not", "None", ":", "ax", ".", "axvline", "(", "burn", ",", "color", "=", "\"k\"", ",", "linestyle", "=", "\":\"", ")", "if", "truths", "is", "not", "None", ":", "ax", ".", "axhline", "(", "truths", "[", "k", "]", ",", "color", "=", "truth_color", ",", "lw", "=", "2", ")", "ax", ".", "set_xlim", "(", "0", ",", "n_steps", ")", "if", "k", "<", "K", "-", "1", ":", "ax", ".", "set_xticklabels", "(", "[", "]", ")", "else", ":", "ax", ".", "set_xlabel", "(", "\"Step\"", ")", "ax", ".", "yaxis", ".", "set_major_locator", "(", "MaxNLocator", "(", "4", ")", ")", "[", "l", ".", "set_rotation", "(", "45", ")", "for", "l", "in", "ax", ".", "get_yticklabels", "(", ")", "]", "if", "labels", "is", "not", "None", ":", "ax", ".", "set_ylabel", "(", "labels", "[", "k", "]", ")", "ax", ".", "yaxis", ".", "set_label_coords", "(", "-", "0.05", ",", "0.5", ")", "return", "fig" ]
Create a plot showing the walker values for each parameter at every step. :param xs: The samples. This should be a 3D :class:`numpy.ndarray` of size (``n_walkers``, ``n_steps``, ``n_parameters``). :type xs: :class:`numpy.ndarray` :param labels: [optional] Labels for all the parameters. :type labels: iterable of strings or None :param truths: [optional] Reference values to indicate on the plots. :type truths: iterable of floats or None :param truth_color: [optional] A ``matplotlib`` style color for the ``truths`` markers. :param burn: [optional] Reference step to indicate on the plots. :type burn: integer or None :param alpha: [optional] Transparency of individual walker lines between zero and one. :type alpha: float :param fig: [optional] Overplot onto the provided figure object. :type fig: :class:`matplotlib.Figure` or None :raises ValueError: If a ``fig`` is provided with the incorrect number of axes. :returns: The chain figure. :rtype: :class:`matplotlib.Figure`
[ "Create", "a", "plot", "showing", "the", "walker", "values", "for", "each", "parameter", "at", "every", "step", "." ]
python
train
24.719298
trailofbits/manticore
manticore/platforms/evm.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/platforms/evm.py#L699-L708
def read_code(self, address, size=1): """ Read size byte from bytecode. If less than size bytes are available result will be pad with \x00 """ assert address < len(self.bytecode) value = self.bytecode[address:address + size] if len(value) < size: value += '\x00' * (size - len(value)) # pad with null (spec) return value
[ "def", "read_code", "(", "self", ",", "address", ",", "size", "=", "1", ")", ":", "assert", "address", "<", "len", "(", "self", ".", "bytecode", ")", "value", "=", "self", ".", "bytecode", "[", "address", ":", "address", "+", "size", "]", "if", "len", "(", "value", ")", "<", "size", ":", "value", "+=", "'\\x00'", "*", "(", "size", "-", "len", "(", "value", ")", ")", "# pad with null (spec)", "return", "value" ]
Read size byte from bytecode. If less than size bytes are available result will be pad with \x00
[ "Read", "size", "byte", "from", "bytecode", ".", "If", "less", "than", "size", "bytes", "are", "available", "result", "will", "be", "pad", "with", "\\", "x00" ]
python
valid
38.8
bitesofcode/projex
projex/security.py
https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/security.py#L228-L238
def pad(text, bits=32): """ Pads the inputted text to ensure it fits the proper block length for encryption. :param text | <str> bits | <int> :return <str> """ return text + (bits - len(text) % bits) * chr(bits - len(text) % bits)
[ "def", "pad", "(", "text", ",", "bits", "=", "32", ")", ":", "return", "text", "+", "(", "bits", "-", "len", "(", "text", ")", "%", "bits", ")", "*", "chr", "(", "bits", "-", "len", "(", "text", ")", "%", "bits", ")" ]
Pads the inputted text to ensure it fits the proper block length for encryption. :param text | <str> bits | <int> :return <str>
[ "Pads", "the", "inputted", "text", "to", "ensure", "it", "fits", "the", "proper", "block", "length", "for", "encryption", ".", ":", "param", "text", "|", "<str", ">", "bits", "|", "<int", ">", ":", "return", "<str", ">" ]
python
train
25.727273
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L1207-L1211
def tree(string, token=[WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]): """ Transforms the output of parse() into a Text object. The token parameter lists the order of tags in each token in the input string. """ return Text(string, token)
[ "def", "tree", "(", "string", ",", "token", "=", "[", "WORD", ",", "POS", ",", "CHUNK", ",", "PNP", ",", "REL", ",", "ANCHOR", ",", "LEMMA", "]", ")", ":", "return", "Text", "(", "string", ",", "token", ")" ]
Transforms the output of parse() into a Text object. The token parameter lists the order of tags in each token in the input string.
[ "Transforms", "the", "output", "of", "parse", "()", "into", "a", "Text", "object", ".", "The", "token", "parameter", "lists", "the", "order", "of", "tags", "in", "each", "token", "in", "the", "input", "string", "." ]
python
train
50.2
sentinel-hub/sentinelhub-py
sentinelhub/aws.py
https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/aws.py#L163-L175
def url_to_tile(url): """ Extracts tile name, date and AWS index from tile url on AWS. :param url: class input parameter 'metafiles' :type url: str :return: Name of tile, date and AWS index which uniquely identifies tile on AWS :rtype: (str, str, int) """ info = url.strip('/').split('/') name = ''.join(info[-7: -4]) date = '-'.join(info[-4: -1]) return name, date, int(info[-1])
[ "def", "url_to_tile", "(", "url", ")", ":", "info", "=", "url", ".", "strip", "(", "'/'", ")", ".", "split", "(", "'/'", ")", "name", "=", "''", ".", "join", "(", "info", "[", "-", "7", ":", "-", "4", "]", ")", "date", "=", "'-'", ".", "join", "(", "info", "[", "-", "4", ":", "-", "1", "]", ")", "return", "name", ",", "date", ",", "int", "(", "info", "[", "-", "1", "]", ")" ]
Extracts tile name, date and AWS index from tile url on AWS. :param url: class input parameter 'metafiles' :type url: str :return: Name of tile, date and AWS index which uniquely identifies tile on AWS :rtype: (str, str, int)
[ "Extracts", "tile", "name", "date", "and", "AWS", "index", "from", "tile", "url", "on", "AWS", "." ]
python
train
35.153846
dnanexus/dx-toolkit
src/python/dxpy/utils/resolver.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/resolver.py#L288-L314
def split_unescaped(char, string, include_empty_strings=False): ''' :param char: The character on which to split the string :type char: string :param string: The string to split :type string: string :returns: List of substrings of *string* :rtype: list of strings Splits *string* whenever *char* appears without an odd number of backslashes ('\\') preceding it, discarding any empty string elements. ''' words = [] pos = len(string) lastpos = pos while pos >= 0: pos = get_last_pos_of_char(char, string[:lastpos]) if pos >= 0: if pos + 1 != lastpos or include_empty_strings: words.append(string[pos + 1: lastpos]) lastpos = pos if lastpos != 0 or include_empty_strings: words.append(string[:lastpos]) words.reverse() return words
[ "def", "split_unescaped", "(", "char", ",", "string", ",", "include_empty_strings", "=", "False", ")", ":", "words", "=", "[", "]", "pos", "=", "len", "(", "string", ")", "lastpos", "=", "pos", "while", "pos", ">=", "0", ":", "pos", "=", "get_last_pos_of_char", "(", "char", ",", "string", "[", ":", "lastpos", "]", ")", "if", "pos", ">=", "0", ":", "if", "pos", "+", "1", "!=", "lastpos", "or", "include_empty_strings", ":", "words", ".", "append", "(", "string", "[", "pos", "+", "1", ":", "lastpos", "]", ")", "lastpos", "=", "pos", "if", "lastpos", "!=", "0", "or", "include_empty_strings", ":", "words", ".", "append", "(", "string", "[", ":", "lastpos", "]", ")", "words", ".", "reverse", "(", ")", "return", "words" ]
:param char: The character on which to split the string :type char: string :param string: The string to split :type string: string :returns: List of substrings of *string* :rtype: list of strings Splits *string* whenever *char* appears without an odd number of backslashes ('\\') preceding it, discarding any empty string elements.
[ ":", "param", "char", ":", "The", "character", "on", "which", "to", "split", "the", "string", ":", "type", "char", ":", "string", ":", "param", "string", ":", "The", "string", "to", "split", ":", "type", "string", ":", "string", ":", "returns", ":", "List", "of", "substrings", "of", "*", "string", "*", ":", "rtype", ":", "list", "of", "strings" ]
python
train
31.148148
blockstack/blockstack-core
blockstack/lib/fast_sync.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/fast_sync.py#L114-L180
def fast_sync_sign_snapshot( snapshot_path, private_key, first=False ): """ Append a signature to the end of a snapshot path with the given private key. If first is True, then don't expect the signature trailer. Return True on success Return False on error """ if not os.path.exists(snapshot_path): log.error("No such file or directory: {}".format(snapshot_path)) return False file_size = 0 payload_size = 0 write_offset = 0 try: sb = os.stat(snapshot_path) file_size = sb.st_size assert file_size > 8 except Exception as e: log.exception(e) return False num_sigs = 0 snapshot_hash = None with open(snapshot_path, 'r+') as f: if not first: info = fast_sync_inspect(f) if 'error' in info: log.error("Failed to inspect {}: {}".format(snapshot_path, info['error'])) return False num_sigs = len(info['signatures']) write_offset = info['sig_append_offset'] payload_size = info['payload_size'] else: # no one has signed yet. write_offset = file_size num_sigs = 0 payload_size = file_size # hash the file and sign the (bin-encoded) hash privkey_hex = keylib.ECPrivateKey(private_key).to_hex() hash_hex = get_file_hash( f, hashlib.sha256, fd_len=payload_size ) sigb64 = sign_digest( hash_hex, privkey_hex, hashfunc=hashlib.sha256 ) if BLOCKSTACK_TEST: log.debug("Signed {} with {} to make {}".format(hash_hex, keylib.ECPrivateKey(private_key).public_key().to_hex(), sigb64)) # append f.seek(write_offset, os.SEEK_SET) f.write(sigb64) f.write('{:08x}'.format(len(sigb64))) # append number of signatures num_sigs += 1 f.write('{:08x}'.format(num_sigs)) f.flush() os.fsync(f.fileno()) return True
[ "def", "fast_sync_sign_snapshot", "(", "snapshot_path", ",", "private_key", ",", "first", "=", "False", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "snapshot_path", ")", ":", "log", ".", "error", "(", "\"No such file or directory: {}\"", ".", "format", "(", "snapshot_path", ")", ")", "return", "False", "file_size", "=", "0", "payload_size", "=", "0", "write_offset", "=", "0", "try", ":", "sb", "=", "os", ".", "stat", "(", "snapshot_path", ")", "file_size", "=", "sb", ".", "st_size", "assert", "file_size", ">", "8", "except", "Exception", "as", "e", ":", "log", ".", "exception", "(", "e", ")", "return", "False", "num_sigs", "=", "0", "snapshot_hash", "=", "None", "with", "open", "(", "snapshot_path", ",", "'r+'", ")", "as", "f", ":", "if", "not", "first", ":", "info", "=", "fast_sync_inspect", "(", "f", ")", "if", "'error'", "in", "info", ":", "log", ".", "error", "(", "\"Failed to inspect {}: {}\"", ".", "format", "(", "snapshot_path", ",", "info", "[", "'error'", "]", ")", ")", "return", "False", "num_sigs", "=", "len", "(", "info", "[", "'signatures'", "]", ")", "write_offset", "=", "info", "[", "'sig_append_offset'", "]", "payload_size", "=", "info", "[", "'payload_size'", "]", "else", ":", "# no one has signed yet.", "write_offset", "=", "file_size", "num_sigs", "=", "0", "payload_size", "=", "file_size", "# hash the file and sign the (bin-encoded) hash", "privkey_hex", "=", "keylib", ".", "ECPrivateKey", "(", "private_key", ")", ".", "to_hex", "(", ")", "hash_hex", "=", "get_file_hash", "(", "f", ",", "hashlib", ".", "sha256", ",", "fd_len", "=", "payload_size", ")", "sigb64", "=", "sign_digest", "(", "hash_hex", ",", "privkey_hex", ",", "hashfunc", "=", "hashlib", ".", "sha256", ")", "if", "BLOCKSTACK_TEST", ":", "log", ".", "debug", "(", "\"Signed {} with {} to make {}\"", ".", "format", "(", "hash_hex", ",", "keylib", ".", "ECPrivateKey", "(", "private_key", ")", ".", "public_key", "(", ")", ".", "to_hex", "(", ")", ",", "sigb64", ")", ")", "# append", "f", ".", "seek", "(", "write_offset", ",", "os", ".", "SEEK_SET", ")", "f", ".", "write", "(", "sigb64", ")", "f", ".", "write", "(", "'{:08x}'", ".", "format", "(", "len", "(", "sigb64", ")", ")", ")", "# append number of signatures", "num_sigs", "+=", "1", "f", ".", "write", "(", "'{:08x}'", ".", "format", "(", "num_sigs", ")", ")", "f", ".", "flush", "(", ")", "os", ".", "fsync", "(", "f", ".", "fileno", "(", ")", ")", "return", "True" ]
Append a signature to the end of a snapshot path with the given private key. If first is True, then don't expect the signature trailer. Return True on success Return False on error
[ "Append", "a", "signature", "to", "the", "end", "of", "a", "snapshot", "path", "with", "the", "given", "private", "key", "." ]
python
train
29.179104
facetoe/zenpy
zenpy/lib/api.py
https://github.com/facetoe/zenpy/blob/34c54c7e408b9ed01604ddf8b3422204c8bf31ea/zenpy/lib/api.py#L834-L838
def skips(self, user): """ Skips for user. Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/core/ticket_skips>`__. """ return self._get(self._build_url(self.endpoint.skips(id=user)))
[ "def", "skips", "(", "self", ",", "user", ")", ":", "return", "self", ".", "_get", "(", "self", ".", "_build_url", "(", "self", ".", "endpoint", ".", "skips", "(", "id", "=", "user", ")", ")", ")" ]
Skips for user. Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/core/ticket_skips>`__.
[ "Skips", "for", "user", ".", "Zendesk", "API", "Reference", "<https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "core", "/", "ticket_skips", ">", "__", "." ]
python
train
45.8