repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
bennylope/pygeocodio
geocodio/client.py
https://github.com/bennylope/pygeocodio/blob/4c33d3d34f6b63d4b8fe85fe571ae02b9f67d6c3/geocodio/client.py#L134-L149
def batch_geocode(self, addresses, **kwargs): """ Returns an Address dictionary with the components of the queried address. """ fields = ",".join(kwargs.pop("fields", [])) response = self._req( "post", verb="geocode", params={"fields": fields}, data=json.dumps(addresses), ) if response.status_code != 200: return error_response(response) return LocationCollection(response.json()["results"])
[ "def", "batch_geocode", "(", "self", ",", "addresses", ",", "*", "*", "kwargs", ")", ":", "fields", "=", "\",\"", ".", "join", "(", "kwargs", ".", "pop", "(", "\"fields\"", ",", "[", "]", ")", ")", "response", "=", "self", ".", "_req", "(", "\"post\"", ",", "verb", "=", "\"geocode\"", ",", "params", "=", "{", "\"fields\"", ":", "fields", "}", ",", "data", "=", "json", ".", "dumps", "(", "addresses", ")", ",", ")", "if", "response", ".", "status_code", "!=", "200", ":", "return", "error_response", "(", "response", ")", "return", "LocationCollection", "(", "response", ".", "json", "(", ")", "[", "\"results\"", "]", ")" ]
Returns an Address dictionary with the components of the queried address.
[ "Returns", "an", "Address", "dictionary", "with", "the", "components", "of", "the", "queried", "address", "." ]
python
train
jaraco/path.py
path/__init__.py
https://github.com/jaraco/path.py/blob/bbe7d99e7a64a004f866ace9ec12bd9b296908f5/path/__init__.py#L1336-L1424
def in_place( self, mode='r', buffering=-1, encoding=None, errors=None, newline=None, backup_extension=None, ): """ A context in which a file may be re-written in-place with new content. Yields a tuple of :samp:`({readable}, {writable})` file objects, where `writable` replaces `readable`. If an exception occurs, the old file is restored, removing the written data. Mode *must not* use ``'w'``, ``'a'``, or ``'+'``; only read-only-modes are allowed. A :exc:`ValueError` is raised on invalid modes. For example, to add line numbers to a file:: p = Path(filename) assert p.isfile() with p.in_place() as (reader, writer): for number, line in enumerate(reader, 1): writer.write('{0:3}: '.format(number))) writer.write(line) Thereafter, the file at `filename` will have line numbers in it. """ import io if set(mode).intersection('wa+'): raise ValueError('Only read-only file modes can be used') # move existing file to backup, create new file with same permissions # borrowed extensively from the fileinput module backup_fn = self + (backup_extension or os.extsep + 'bak') try: os.unlink(backup_fn) except os.error: pass os.rename(self, backup_fn) readable = io.open( backup_fn, mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) try: perm = os.fstat(readable.fileno()).st_mode except OSError: writable = open( self, 'w' + mode.replace('r', ''), buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) else: os_mode = os.O_CREAT | os.O_WRONLY | os.O_TRUNC if hasattr(os, 'O_BINARY'): os_mode |= os.O_BINARY fd = os.open(self, os_mode, perm) writable = io.open( fd, "w" + mode.replace('r', ''), buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) try: if hasattr(os, 'chmod'): os.chmod(self, perm) except OSError: pass try: yield readable, writable except Exception: # move backup back readable.close() writable.close() try: os.unlink(self) except os.error: pass os.rename(backup_fn, self) raise else: readable.close() writable.close() finally: try: os.unlink(backup_fn) except os.error: pass
[ "def", "in_place", "(", "self", ",", "mode", "=", "'r'", ",", "buffering", "=", "-", "1", ",", "encoding", "=", "None", ",", "errors", "=", "None", ",", "newline", "=", "None", ",", "backup_extension", "=", "None", ",", ")", ":", "import", "io", "if", "set", "(", "mode", ")", ".", "intersection", "(", "'wa+'", ")", ":", "raise", "ValueError", "(", "'Only read-only file modes can be used'", ")", "# move existing file to backup, create new file with same permissions", "# borrowed extensively from the fileinput module", "backup_fn", "=", "self", "+", "(", "backup_extension", "or", "os", ".", "extsep", "+", "'bak'", ")", "try", ":", "os", ".", "unlink", "(", "backup_fn", ")", "except", "os", ".", "error", ":", "pass", "os", ".", "rename", "(", "self", ",", "backup_fn", ")", "readable", "=", "io", ".", "open", "(", "backup_fn", ",", "mode", ",", "buffering", "=", "buffering", ",", "encoding", "=", "encoding", ",", "errors", "=", "errors", ",", "newline", "=", "newline", ",", ")", "try", ":", "perm", "=", "os", ".", "fstat", "(", "readable", ".", "fileno", "(", ")", ")", ".", "st_mode", "except", "OSError", ":", "writable", "=", "open", "(", "self", ",", "'w'", "+", "mode", ".", "replace", "(", "'r'", ",", "''", ")", ",", "buffering", "=", "buffering", ",", "encoding", "=", "encoding", ",", "errors", "=", "errors", ",", "newline", "=", "newline", ",", ")", "else", ":", "os_mode", "=", "os", ".", "O_CREAT", "|", "os", ".", "O_WRONLY", "|", "os", ".", "O_TRUNC", "if", "hasattr", "(", "os", ",", "'O_BINARY'", ")", ":", "os_mode", "|=", "os", ".", "O_BINARY", "fd", "=", "os", ".", "open", "(", "self", ",", "os_mode", ",", "perm", ")", "writable", "=", "io", ".", "open", "(", "fd", ",", "\"w\"", "+", "mode", ".", "replace", "(", "'r'", ",", "''", ")", ",", "buffering", "=", "buffering", ",", "encoding", "=", "encoding", ",", "errors", "=", "errors", ",", "newline", "=", "newline", ",", ")", "try", ":", "if", "hasattr", "(", "os", ",", "'chmod'", ")", ":", "os", ".", "chmod", "(", "self", ",", "perm", ")", "except", "OSError", ":", "pass", "try", ":", "yield", "readable", ",", "writable", "except", "Exception", ":", "# move backup back", "readable", ".", "close", "(", ")", "writable", ".", "close", "(", ")", "try", ":", "os", ".", "unlink", "(", "self", ")", "except", "os", ".", "error", ":", "pass", "os", ".", "rename", "(", "backup_fn", ",", "self", ")", "raise", "else", ":", "readable", ".", "close", "(", ")", "writable", ".", "close", "(", ")", "finally", ":", "try", ":", "os", ".", "unlink", "(", "backup_fn", ")", "except", "os", ".", "error", ":", "pass" ]
A context in which a file may be re-written in-place with new content. Yields a tuple of :samp:`({readable}, {writable})` file objects, where `writable` replaces `readable`. If an exception occurs, the old file is restored, removing the written data. Mode *must not* use ``'w'``, ``'a'``, or ``'+'``; only read-only-modes are allowed. A :exc:`ValueError` is raised on invalid modes. For example, to add line numbers to a file:: p = Path(filename) assert p.isfile() with p.in_place() as (reader, writer): for number, line in enumerate(reader, 1): writer.write('{0:3}: '.format(number))) writer.write(line) Thereafter, the file at `filename` will have line numbers in it.
[ "A", "context", "in", "which", "a", "file", "may", "be", "re", "-", "written", "in", "-", "place", "with", "new", "content", "." ]
python
train
tcalmant/ipopo
pelix/rsa/__init__.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/rsa/__init__.py#L1513-L1527
def get_dot_properties(prefix, props, remove_prefix): # type: (str, Dict[str, Any], bool) -> Dict[str, Any] """ Gets the properties starting with the given prefix """ result_props = {} if props: dot_keys = [x for x in props.keys() if x.startswith(prefix + ".")] for dot_key in dot_keys: if remove_prefix: new_key = dot_key[len(prefix) + 1 :] else: new_key = dot_key result_props[new_key] = props.get(dot_key) return result_props
[ "def", "get_dot_properties", "(", "prefix", ",", "props", ",", "remove_prefix", ")", ":", "# type: (str, Dict[str, Any], bool) -> Dict[str, Any]", "result_props", "=", "{", "}", "if", "props", ":", "dot_keys", "=", "[", "x", "for", "x", "in", "props", ".", "keys", "(", ")", "if", "x", ".", "startswith", "(", "prefix", "+", "\".\"", ")", "]", "for", "dot_key", "in", "dot_keys", ":", "if", "remove_prefix", ":", "new_key", "=", "dot_key", "[", "len", "(", "prefix", ")", "+", "1", ":", "]", "else", ":", "new_key", "=", "dot_key", "result_props", "[", "new_key", "]", "=", "props", ".", "get", "(", "dot_key", ")", "return", "result_props" ]
Gets the properties starting with the given prefix
[ "Gets", "the", "properties", "starting", "with", "the", "given", "prefix" ]
python
train
ARMmbed/icetea
icetea_lib/cloud.py
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/cloud.py#L224-L241
def _resolve_host(self): # pylint: disable=no-self-use """ Resolve cloud provider host name. Defaults to environment variables OPENTMI_ADDRESS_PRIVATE or OPENTMI_ADDRESS_PUBLIC if environment variable NODE_NAME starts with 'aws'. Otherwise gets ICETEA_CLOUD_HOST environment variable OR localhost:3000 if that one does not exist. :return: Cloud host information """ node_name = os.environ.get('NODE_NAME', '') if node_name.startswith('aws'): _host = os.environ.get('OPENTMI_ADDRESS_PRIVATE', None) else: _host = os.environ.get('OPENTMI_ADDRESS_PUBLIC', None) if _host is None: _host = os.environ.get("ICETEA_CLOUD_HOST", "localhost:3000") return _host
[ "def", "_resolve_host", "(", "self", ")", ":", "# pylint: disable=no-self-use", "node_name", "=", "os", ".", "environ", ".", "get", "(", "'NODE_NAME'", ",", "''", ")", "if", "node_name", ".", "startswith", "(", "'aws'", ")", ":", "_host", "=", "os", ".", "environ", ".", "get", "(", "'OPENTMI_ADDRESS_PRIVATE'", ",", "None", ")", "else", ":", "_host", "=", "os", ".", "environ", ".", "get", "(", "'OPENTMI_ADDRESS_PUBLIC'", ",", "None", ")", "if", "_host", "is", "None", ":", "_host", "=", "os", ".", "environ", ".", "get", "(", "\"ICETEA_CLOUD_HOST\"", ",", "\"localhost:3000\"", ")", "return", "_host" ]
Resolve cloud provider host name. Defaults to environment variables OPENTMI_ADDRESS_PRIVATE or OPENTMI_ADDRESS_PUBLIC if environment variable NODE_NAME starts with 'aws'. Otherwise gets ICETEA_CLOUD_HOST environment variable OR localhost:3000 if that one does not exist. :return: Cloud host information
[ "Resolve", "cloud", "provider", "host", "name", ".", "Defaults", "to", "environment", "variables", "OPENTMI_ADDRESS_PRIVATE", "or", "OPENTMI_ADDRESS_PUBLIC", "if", "environment", "variable", "NODE_NAME", "starts", "with", "aws", ".", "Otherwise", "gets", "ICETEA_CLOUD_HOST", "environment", "variable", "OR", "localhost", ":", "3000", "if", "that", "one", "does", "not", "exist", ".", ":", "return", ":", "Cloud", "host", "information" ]
python
train
sosy-lab/benchexec
benchexec/util.py
https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/util.py#L109-L113
def get_list_from_xml(elem, tag="option", attributes=["name"]): ''' This function searches for all "option"-tags and returns a list with all attributes and texts. ''' return flatten(([option.get(attr) for attr in attributes] + [option.text] for option in elem.findall(tag)), exclude=[None])
[ "def", "get_list_from_xml", "(", "elem", ",", "tag", "=", "\"option\"", ",", "attributes", "=", "[", "\"name\"", "]", ")", ":", "return", "flatten", "(", "(", "[", "option", ".", "get", "(", "attr", ")", "for", "attr", "in", "attributes", "]", "+", "[", "option", ".", "text", "]", "for", "option", "in", "elem", ".", "findall", "(", "tag", ")", ")", ",", "exclude", "=", "[", "None", "]", ")" ]
This function searches for all "option"-tags and returns a list with all attributes and texts.
[ "This", "function", "searches", "for", "all", "option", "-", "tags", "and", "returns", "a", "list", "with", "all", "attributes", "and", "texts", "." ]
python
train
pyblish/pyblish-qml
pyblish_qml/util.py
https://github.com/pyblish/pyblish-qml/blob/6095d18b2ec0afd0409a9b1a17e53b0658887283/pyblish_qml/util.py#L121-L144
def defer(target, args=None, kwargs=None, callback=None): """Perform operation in thread with callback Instances are cached until finished, at which point they are garbage collected. If we didn't do this, Python would step in and garbage collect the thread before having had time to finish, resulting in an exception. Arguments: target (callable): Method or function to call callback (callable, optional): Method or function to call once `target` has finished. Returns: None """ obj = _defer(target, args, kwargs, callback) obj.finished.connect(lambda: _defer_cleanup(obj)) obj.start() _defer_threads.append(obj) return obj
[ "def", "defer", "(", "target", ",", "args", "=", "None", ",", "kwargs", "=", "None", ",", "callback", "=", "None", ")", ":", "obj", "=", "_defer", "(", "target", ",", "args", ",", "kwargs", ",", "callback", ")", "obj", ".", "finished", ".", "connect", "(", "lambda", ":", "_defer_cleanup", "(", "obj", ")", ")", "obj", ".", "start", "(", ")", "_defer_threads", ".", "append", "(", "obj", ")", "return", "obj" ]
Perform operation in thread with callback Instances are cached until finished, at which point they are garbage collected. If we didn't do this, Python would step in and garbage collect the thread before having had time to finish, resulting in an exception. Arguments: target (callable): Method or function to call callback (callable, optional): Method or function to call once `target` has finished. Returns: None
[ "Perform", "operation", "in", "thread", "with", "callback" ]
python
train
theislab/scanpy
scanpy/readwrite.py
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/readwrite.py#L188-L214
def _read_v3_10x_h5(filename): """ Read hdf5 file from Cell Ranger v3 or later versions. """ with tables.open_file(str(filename), 'r') as f: try: dsets = {} for node in f.walk_nodes('/matrix', 'Array'): dsets[node.name] = node.read() from scipy.sparse import csr_matrix M, N = dsets['shape'] data = dsets['data'] if dsets['data'].dtype == np.dtype('int32'): data = dsets['data'].view('float32') data[:] = dsets['data'] matrix = csr_matrix((data, dsets['indices'], dsets['indptr']), shape=(N, M)) adata = AnnData(matrix, {'obs_names': dsets['barcodes'].astype(str)}, {'var_names': dsets['name'].astype(str), 'gene_ids': dsets['id'].astype(str), 'feature_types': dsets['feature_type'].astype(str), 'genome': dsets['genome'].astype(str)}) logg.info(t=True) return adata except KeyError: raise Exception('File is missing one or more required datasets.')
[ "def", "_read_v3_10x_h5", "(", "filename", ")", ":", "with", "tables", ".", "open_file", "(", "str", "(", "filename", ")", ",", "'r'", ")", "as", "f", ":", "try", ":", "dsets", "=", "{", "}", "for", "node", "in", "f", ".", "walk_nodes", "(", "'/matrix'", ",", "'Array'", ")", ":", "dsets", "[", "node", ".", "name", "]", "=", "node", ".", "read", "(", ")", "from", "scipy", ".", "sparse", "import", "csr_matrix", "M", ",", "N", "=", "dsets", "[", "'shape'", "]", "data", "=", "dsets", "[", "'data'", "]", "if", "dsets", "[", "'data'", "]", ".", "dtype", "==", "np", ".", "dtype", "(", "'int32'", ")", ":", "data", "=", "dsets", "[", "'data'", "]", ".", "view", "(", "'float32'", ")", "data", "[", ":", "]", "=", "dsets", "[", "'data'", "]", "matrix", "=", "csr_matrix", "(", "(", "data", ",", "dsets", "[", "'indices'", "]", ",", "dsets", "[", "'indptr'", "]", ")", ",", "shape", "=", "(", "N", ",", "M", ")", ")", "adata", "=", "AnnData", "(", "matrix", ",", "{", "'obs_names'", ":", "dsets", "[", "'barcodes'", "]", ".", "astype", "(", "str", ")", "}", ",", "{", "'var_names'", ":", "dsets", "[", "'name'", "]", ".", "astype", "(", "str", ")", ",", "'gene_ids'", ":", "dsets", "[", "'id'", "]", ".", "astype", "(", "str", ")", ",", "'feature_types'", ":", "dsets", "[", "'feature_type'", "]", ".", "astype", "(", "str", ")", ",", "'genome'", ":", "dsets", "[", "'genome'", "]", ".", "astype", "(", "str", ")", "}", ")", "logg", ".", "info", "(", "t", "=", "True", ")", "return", "adata", "except", "KeyError", ":", "raise", "Exception", "(", "'File is missing one or more required datasets.'", ")" ]
Read hdf5 file from Cell Ranger v3 or later versions.
[ "Read", "hdf5", "file", "from", "Cell", "Ranger", "v3", "or", "later", "versions", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/variation/annotation.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/annotation.py#L115-L134
def _update_header(orig_vcf, base_file, new_lines, chrom_process_fn=None): """Fix header with additional lines and remapping of generic sample names. """ new_header = "%s-sample_header.txt" % utils.splitext_plus(base_file)[0] with open(new_header, "w") as out_handle: chrom_line = None with utils.open_gzipsafe(orig_vcf) as in_handle: for line in in_handle: if line.startswith("##"): out_handle.write(line) else: chrom_line = line break assert chrom_line is not None for line in new_lines: out_handle.write(line + "\n") if chrom_process_fn: chrom_line = chrom_process_fn(chrom_line) out_handle.write(chrom_line) return new_header
[ "def", "_update_header", "(", "orig_vcf", ",", "base_file", ",", "new_lines", ",", "chrom_process_fn", "=", "None", ")", ":", "new_header", "=", "\"%s-sample_header.txt\"", "%", "utils", ".", "splitext_plus", "(", "base_file", ")", "[", "0", "]", "with", "open", "(", "new_header", ",", "\"w\"", ")", "as", "out_handle", ":", "chrom_line", "=", "None", "with", "utils", ".", "open_gzipsafe", "(", "orig_vcf", ")", "as", "in_handle", ":", "for", "line", "in", "in_handle", ":", "if", "line", ".", "startswith", "(", "\"##\"", ")", ":", "out_handle", ".", "write", "(", "line", ")", "else", ":", "chrom_line", "=", "line", "break", "assert", "chrom_line", "is", "not", "None", "for", "line", "in", "new_lines", ":", "out_handle", ".", "write", "(", "line", "+", "\"\\n\"", ")", "if", "chrom_process_fn", ":", "chrom_line", "=", "chrom_process_fn", "(", "chrom_line", ")", "out_handle", ".", "write", "(", "chrom_line", ")", "return", "new_header" ]
Fix header with additional lines and remapping of generic sample names.
[ "Fix", "header", "with", "additional", "lines", "and", "remapping", "of", "generic", "sample", "names", "." ]
python
train
ministryofjustice/money-to-prisoners-common
mtp_common/auth/middleware.py
https://github.com/ministryofjustice/money-to-prisoners-common/blob/33c43a2912cb990d9148da7c8718f480f07d90a1/mtp_common/auth/middleware.py#L11-L18
def get_user(request): """ Returns a cached copy of the user if it exists or calls `auth_get_user` otherwise. """ if not hasattr(request, '_cached_user'): request._cached_user = auth_get_user(request) return request._cached_user
[ "def", "get_user", "(", "request", ")", ":", "if", "not", "hasattr", "(", "request", ",", "'_cached_user'", ")", ":", "request", ".", "_cached_user", "=", "auth_get_user", "(", "request", ")", "return", "request", ".", "_cached_user" ]
Returns a cached copy of the user if it exists or calls `auth_get_user` otherwise.
[ "Returns", "a", "cached", "copy", "of", "the", "user", "if", "it", "exists", "or", "calls", "auth_get_user", "otherwise", "." ]
python
train
horazont/aioxmpp
aioxmpp/service.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/service.py#L1520-L1530
def is_depsignal_handler(class_, signal_name, cb, *, defer=False): """ Return true if `cb` has been decorated with :func:`depsignal` for the given signal, class and connection mode. """ try: handlers = get_magic_attr(cb) except AttributeError: return False return _depsignal_spec(class_, signal_name, cb, defer) in handlers
[ "def", "is_depsignal_handler", "(", "class_", ",", "signal_name", ",", "cb", ",", "*", ",", "defer", "=", "False", ")", ":", "try", ":", "handlers", "=", "get_magic_attr", "(", "cb", ")", "except", "AttributeError", ":", "return", "False", "return", "_depsignal_spec", "(", "class_", ",", "signal_name", ",", "cb", ",", "defer", ")", "in", "handlers" ]
Return true if `cb` has been decorated with :func:`depsignal` for the given signal, class and connection mode.
[ "Return", "true", "if", "cb", "has", "been", "decorated", "with", ":", "func", ":", "depsignal", "for", "the", "given", "signal", "class", "and", "connection", "mode", "." ]
python
train
saltstack/salt
salt/utils/pkg/win.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/pkg/win.py#L382-L420
def get_product_value(self, value_name, wanted_type=None): ''' For the product section of the registry return the name value. Args: value_name (str): Registry value name. wanted_type (str): The type of value wanted if the type does not match None is return. wanted_type support values are ``str`` ``int`` ``list`` ``bytes``. Returns: value: Value requested or ``None`` if not found. ''' if not self.__reg_products_handle: return None subkey, search_value_name = os.path.split(value_name) try: if subkey: handle = win32api.RegOpenKeyEx( # pylint: disable=no-member self.__reg_products_handle, subkey, 0, win32con.KEY_READ | self.__reg_32bit_access) item_value, item_type = self.__reg_query_value(handle, search_value_name) win32api.RegCloseKey(handle) # pylint: disable=no-member else: item_value, item_type = \ win32api.RegQueryValueEx(self.__reg_products_handle, value_name) # pylint: disable=no-member except pywintypes.error as exc: # pylint: disable=no-member if exc.winerror == winerror.ERROR_FILE_NOT_FOUND: # Not Found return None raise if wanted_type and item_type not in self.__reg_types[wanted_type]: item_value = None return item_value
[ "def", "get_product_value", "(", "self", ",", "value_name", ",", "wanted_type", "=", "None", ")", ":", "if", "not", "self", ".", "__reg_products_handle", ":", "return", "None", "subkey", ",", "search_value_name", "=", "os", ".", "path", ".", "split", "(", "value_name", ")", "try", ":", "if", "subkey", ":", "handle", "=", "win32api", ".", "RegOpenKeyEx", "(", "# pylint: disable=no-member", "self", ".", "__reg_products_handle", ",", "subkey", ",", "0", ",", "win32con", ".", "KEY_READ", "|", "self", ".", "__reg_32bit_access", ")", "item_value", ",", "item_type", "=", "self", ".", "__reg_query_value", "(", "handle", ",", "search_value_name", ")", "win32api", ".", "RegCloseKey", "(", "handle", ")", "# pylint: disable=no-member", "else", ":", "item_value", ",", "item_type", "=", "win32api", ".", "RegQueryValueEx", "(", "self", ".", "__reg_products_handle", ",", "value_name", ")", "# pylint: disable=no-member", "except", "pywintypes", ".", "error", "as", "exc", ":", "# pylint: disable=no-member", "if", "exc", ".", "winerror", "==", "winerror", ".", "ERROR_FILE_NOT_FOUND", ":", "# Not Found", "return", "None", "raise", "if", "wanted_type", "and", "item_type", "not", "in", "self", ".", "__reg_types", "[", "wanted_type", "]", ":", "item_value", "=", "None", "return", "item_value" ]
For the product section of the registry return the name value. Args: value_name (str): Registry value name. wanted_type (str): The type of value wanted if the type does not match None is return. wanted_type support values are ``str`` ``int`` ``list`` ``bytes``. Returns: value: Value requested or ``None`` if not found.
[ "For", "the", "product", "section", "of", "the", "registry", "return", "the", "name", "value", "." ]
python
train
senaite/senaite.core
bika/lims/validators.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/validators.py#L1063-L1071
def _sumLists(a, b): """ Algorithm to check validity of NBI and NIF. Receives string with a umber to validate. """ val = 0 for i in map(lambda a, b: a * b, a, b): val += i return val
[ "def", "_sumLists", "(", "a", ",", "b", ")", ":", "val", "=", "0", "for", "i", "in", "map", "(", "lambda", "a", ",", "b", ":", "a", "*", "b", ",", "a", ",", "b", ")", ":", "val", "+=", "i", "return", "val" ]
Algorithm to check validity of NBI and NIF. Receives string with a umber to validate.
[ "Algorithm", "to", "check", "validity", "of", "NBI", "and", "NIF", ".", "Receives", "string", "with", "a", "umber", "to", "validate", "." ]
python
train
pyblish/pyblish-qml
pyblish_qml/models.py
https://github.com/pyblish/pyblish-qml/blob/6095d18b2ec0afd0409a9b1a17e53b0658887283/pyblish_qml/models.py#L830-L840
def _remove_rule(self, group, role, value=None): """Implementation detail""" if role not in group: return if value is None: group.pop(role, None) else: group[role].remove(value) self.invalidate()
[ "def", "_remove_rule", "(", "self", ",", "group", ",", "role", ",", "value", "=", "None", ")", ":", "if", "role", "not", "in", "group", ":", "return", "if", "value", "is", "None", ":", "group", ".", "pop", "(", "role", ",", "None", ")", "else", ":", "group", "[", "role", "]", ".", "remove", "(", "value", ")", "self", ".", "invalidate", "(", ")" ]
Implementation detail
[ "Implementation", "detail" ]
python
train
FujiMakoto/AgentML
agentml/parser/trigger/__init__.py
https://github.com/FujiMakoto/AgentML/blob/c8cb64b460d876666bf29ea2c682189874c7c403/agentml/parser/trigger/__init__.py#L190-L205
def _add_response(self, response, weight=1): """ Add a new trigger :param response: The Response object :type response: Response or Condition :param weight: The weight of the response :type weight: int """ # If no response with this priority level has been defined yet, create a new list if response.priority not in self._responses: self._responses[response.priority] = [(response, weight)] return # Otherwise, add this trigger to an existing priority list self._responses[response.priority].append((response, weight))
[ "def", "_add_response", "(", "self", ",", "response", ",", "weight", "=", "1", ")", ":", "# If no response with this priority level has been defined yet, create a new list", "if", "response", ".", "priority", "not", "in", "self", ".", "_responses", ":", "self", ".", "_responses", "[", "response", ".", "priority", "]", "=", "[", "(", "response", ",", "weight", ")", "]", "return", "# Otherwise, add this trigger to an existing priority list", "self", ".", "_responses", "[", "response", ".", "priority", "]", ".", "append", "(", "(", "response", ",", "weight", ")", ")" ]
Add a new trigger :param response: The Response object :type response: Response or Condition :param weight: The weight of the response :type weight: int
[ "Add", "a", "new", "trigger", ":", "param", "response", ":", "The", "Response", "object", ":", "type", "response", ":", "Response", "or", "Condition" ]
python
train
Asana/python-asana
asana/resources/gen/teams.py
https://github.com/Asana/python-asana/blob/6deb7a34495db23f44858e53b6bb2c9eccff7872/asana/resources/gen/teams.py#L10-L19
def find_by_id(self, team, params={}, **options): """Returns the full record for a single team. Parameters ---------- team : {Id} Globally unique identifier for the team. [params] : {Object} Parameters for the request """ path = "/teams/%s" % (team) return self.client.get(path, params, **options)
[ "def", "find_by_id", "(", "self", ",", "team", ",", "params", "=", "{", "}", ",", "*", "*", "options", ")", ":", "path", "=", "\"/teams/%s\"", "%", "(", "team", ")", "return", "self", ".", "client", ".", "get", "(", "path", ",", "params", ",", "*", "*", "options", ")" ]
Returns the full record for a single team. Parameters ---------- team : {Id} Globally unique identifier for the team. [params] : {Object} Parameters for the request
[ "Returns", "the", "full", "record", "for", "a", "single", "team", "." ]
python
train
ashleysommer/sanicpluginsframework
spf/plugins/contextualize.py
https://github.com/ashleysommer/sanicpluginsframework/blob/2cb1656d9334f04c30c738074784b0450c1b893e/spf/plugins/contextualize.py#L173-L194
def middleware(self, *args, **kwargs): """Decorate and register middleware :param args: captures all of the positional arguments passed in :type args: tuple(Any) :param kwargs: captures the keyword arguments passed in :type kwargs: dict(Any) :return: The middleware function to use as the decorator :rtype: fn """ kwargs.setdefault('priority', 5) kwargs.setdefault('relative', None) kwargs.setdefault('attach_to', None) kwargs['with_context'] = True # This is the whole point of this plugin if len(args) == 1 and callable(args[0]): middle_f = args[0] return super(Contextualize, self).middleware(middle_f, **kwargs) def wrapper(middle_f): nonlocal self, args, kwargs return super(Contextualize, self).middleware( *args, **kwargs)(middle_f) return wrapper
[ "def", "middleware", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "'priority'", ",", "5", ")", "kwargs", ".", "setdefault", "(", "'relative'", ",", "None", ")", "kwargs", ".", "setdefault", "(", "'attach_to'", ",", "None", ")", "kwargs", "[", "'with_context'", "]", "=", "True", "# This is the whole point of this plugin", "if", "len", "(", "args", ")", "==", "1", "and", "callable", "(", "args", "[", "0", "]", ")", ":", "middle_f", "=", "args", "[", "0", "]", "return", "super", "(", "Contextualize", ",", "self", ")", ".", "middleware", "(", "middle_f", ",", "*", "*", "kwargs", ")", "def", "wrapper", "(", "middle_f", ")", ":", "nonlocal", "self", ",", "args", ",", "kwargs", "return", "super", "(", "Contextualize", ",", "self", ")", ".", "middleware", "(", "*", "args", ",", "*", "*", "kwargs", ")", "(", "middle_f", ")", "return", "wrapper" ]
Decorate and register middleware :param args: captures all of the positional arguments passed in :type args: tuple(Any) :param kwargs: captures the keyword arguments passed in :type kwargs: dict(Any) :return: The middleware function to use as the decorator :rtype: fn
[ "Decorate", "and", "register", "middleware", ":", "param", "args", ":", "captures", "all", "of", "the", "positional", "arguments", "passed", "in", ":", "type", "args", ":", "tuple", "(", "Any", ")", ":", "param", "kwargs", ":", "captures", "the", "keyword", "arguments", "passed", "in", ":", "type", "kwargs", ":", "dict", "(", "Any", ")", ":", "return", ":", "The", "middleware", "function", "to", "use", "as", "the", "decorator", ":", "rtype", ":", "fn" ]
python
train
bitesofcode/projex
projex/xmlutil.py
https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/xmlutil.py#L388-L405
def save(self, data, xparent=None): """ Parses the element from XML to Python. :param data | <variant> xparent | <xml.etree.ElementTree.Element> || None :return <xml.etree.ElementTree.Element> """ if xparent is not None: elem = ElementTree.SubElement(xparent, 'list') else: elem = ElementTree.Element('list') for item in data: XmlDataIO.toXml(item, elem) return elem
[ "def", "save", "(", "self", ",", "data", ",", "xparent", "=", "None", ")", ":", "if", "xparent", "is", "not", "None", ":", "elem", "=", "ElementTree", ".", "SubElement", "(", "xparent", ",", "'list'", ")", "else", ":", "elem", "=", "ElementTree", ".", "Element", "(", "'list'", ")", "for", "item", "in", "data", ":", "XmlDataIO", ".", "toXml", "(", "item", ",", "elem", ")", "return", "elem" ]
Parses the element from XML to Python. :param data | <variant> xparent | <xml.etree.ElementTree.Element> || None :return <xml.etree.ElementTree.Element>
[ "Parses", "the", "element", "from", "XML", "to", "Python", ".", ":", "param", "data", "|", "<variant", ">", "xparent", "|", "<xml", ".", "etree", ".", "ElementTree", ".", "Element", ">", "||", "None", ":", "return", "<xml", ".", "etree", ".", "ElementTree", ".", "Element", ">" ]
python
train
projectatomic/atomic-reactor
atomic_reactor/plugins/pre_resolve_composes.py
https://github.com/projectatomic/atomic-reactor/blob/fd31c01b964097210bf169960d051e5f04019a80/atomic_reactor/plugins/pre_resolve_composes.py#L131-L148
def adjust_for_autorebuild(self): """Ignore pre-filled signing_intent and compose_ids for autorebuids Auto rebuilds are expected to use a known configuration. The parameters signing_intent and compose_ids are meant for one-off build calls. This method ensure that these parameters are ignored for autorebuilds. """ if not is_rebuild(self.workflow): return if self.signing_intent: self.log.info('Autorebuild detected: Ignoring signing_intent plugin parameter') self.signing_intent = None if self.compose_ids: self.log.info('Autorebuild detected: Ignoring compose_ids plugin parameter') self.compose_ids = tuple() self.all_compose_ids = []
[ "def", "adjust_for_autorebuild", "(", "self", ")", ":", "if", "not", "is_rebuild", "(", "self", ".", "workflow", ")", ":", "return", "if", "self", ".", "signing_intent", ":", "self", ".", "log", ".", "info", "(", "'Autorebuild detected: Ignoring signing_intent plugin parameter'", ")", "self", ".", "signing_intent", "=", "None", "if", "self", ".", "compose_ids", ":", "self", ".", "log", ".", "info", "(", "'Autorebuild detected: Ignoring compose_ids plugin parameter'", ")", "self", ".", "compose_ids", "=", "tuple", "(", ")", "self", ".", "all_compose_ids", "=", "[", "]" ]
Ignore pre-filled signing_intent and compose_ids for autorebuids Auto rebuilds are expected to use a known configuration. The parameters signing_intent and compose_ids are meant for one-off build calls. This method ensure that these parameters are ignored for autorebuilds.
[ "Ignore", "pre", "-", "filled", "signing_intent", "and", "compose_ids", "for", "autorebuids" ]
python
train
sixpack/sixpack
sixpack/models.py
https://github.com/sixpack/sixpack/blob/fec044a35eea79dd7b9af73fafe1b7d15f1d9ef8/sixpack/models.py#L629-L656
def record_participation(self, client, dt=None): """Record a user's participation in a test along with a given variation""" if dt is None: date = datetime.now() else: date = dt experiment_key = self.experiment.name pipe = self.redis.pipeline() pipe.sadd(_key("p:{0}:years".format(experiment_key)), date.strftime('%Y')) pipe.sadd(_key("p:{0}:months".format(experiment_key)), date.strftime('%Y-%m')) pipe.sadd(_key("p:{0}:days".format(experiment_key)), date.strftime('%Y-%m-%d')) pipe.execute() keys = [ _key("p:{0}:_all:all".format(experiment_key)), _key("p:{0}:_all:{1}".format(experiment_key, date.strftime('%Y'))), _key("p:{0}:_all:{1}".format(experiment_key, date.strftime('%Y-%m'))), _key("p:{0}:_all:{1}".format(experiment_key, date.strftime('%Y-%m-%d'))), _key("p:{0}:{1}:all".format(experiment_key, self.name)), _key("p:{0}:{1}:{2}".format(experiment_key, self.name, date.strftime('%Y'))), _key("p:{0}:{1}:{2}".format(experiment_key, self.name, date.strftime('%Y-%m'))), _key("p:{0}:{1}:{2}".format(experiment_key, self.name, date.strftime('%Y-%m-%d'))), ] msetbit(keys=keys, args=([self.experiment.sequential_id(client), 1] * len(keys)))
[ "def", "record_participation", "(", "self", ",", "client", ",", "dt", "=", "None", ")", ":", "if", "dt", "is", "None", ":", "date", "=", "datetime", ".", "now", "(", ")", "else", ":", "date", "=", "dt", "experiment_key", "=", "self", ".", "experiment", ".", "name", "pipe", "=", "self", ".", "redis", ".", "pipeline", "(", ")", "pipe", ".", "sadd", "(", "_key", "(", "\"p:{0}:years\"", ".", "format", "(", "experiment_key", ")", ")", ",", "date", ".", "strftime", "(", "'%Y'", ")", ")", "pipe", ".", "sadd", "(", "_key", "(", "\"p:{0}:months\"", ".", "format", "(", "experiment_key", ")", ")", ",", "date", ".", "strftime", "(", "'%Y-%m'", ")", ")", "pipe", ".", "sadd", "(", "_key", "(", "\"p:{0}:days\"", ".", "format", "(", "experiment_key", ")", ")", ",", "date", ".", "strftime", "(", "'%Y-%m-%d'", ")", ")", "pipe", ".", "execute", "(", ")", "keys", "=", "[", "_key", "(", "\"p:{0}:_all:all\"", ".", "format", "(", "experiment_key", ")", ")", ",", "_key", "(", "\"p:{0}:_all:{1}\"", ".", "format", "(", "experiment_key", ",", "date", ".", "strftime", "(", "'%Y'", ")", ")", ")", ",", "_key", "(", "\"p:{0}:_all:{1}\"", ".", "format", "(", "experiment_key", ",", "date", ".", "strftime", "(", "'%Y-%m'", ")", ")", ")", ",", "_key", "(", "\"p:{0}:_all:{1}\"", ".", "format", "(", "experiment_key", ",", "date", ".", "strftime", "(", "'%Y-%m-%d'", ")", ")", ")", ",", "_key", "(", "\"p:{0}:{1}:all\"", ".", "format", "(", "experiment_key", ",", "self", ".", "name", ")", ")", ",", "_key", "(", "\"p:{0}:{1}:{2}\"", ".", "format", "(", "experiment_key", ",", "self", ".", "name", ",", "date", ".", "strftime", "(", "'%Y'", ")", ")", ")", ",", "_key", "(", "\"p:{0}:{1}:{2}\"", ".", "format", "(", "experiment_key", ",", "self", ".", "name", ",", "date", ".", "strftime", "(", "'%Y-%m'", ")", ")", ")", ",", "_key", "(", "\"p:{0}:{1}:{2}\"", ".", "format", "(", "experiment_key", ",", "self", ".", "name", ",", "date", ".", "strftime", "(", "'%Y-%m-%d'", ")", ")", ")", ",", "]", "msetbit", "(", "keys", "=", "keys", ",", "args", "=", "(", "[", "self", ".", "experiment", ".", "sequential_id", "(", "client", ")", ",", "1", "]", "*", "len", "(", "keys", ")", ")", ")" ]
Record a user's participation in a test along with a given variation
[ "Record", "a", "user", "s", "participation", "in", "a", "test", "along", "with", "a", "given", "variation" ]
python
train
googleapis/google-auth-library-python
google/oauth2/_client.py
https://github.com/googleapis/google-auth-library-python/blob/2c6ad78917e936f38f87c946209c8031166dc96e/google/oauth2/_client.py#L159-L201
def id_token_jwt_grant(request, token_uri, assertion): """Implements the JWT Profile for OAuth 2.0 Authorization Grants, but requests an OpenID Connect ID Token instead of an access token. This is a variant on the standard JWT Profile that is currently unique to Google. This was added for the benefit of authenticating to services that require ID Tokens instead of access tokens or JWT bearer tokens. Args: request (google.auth.transport.Request): A callable used to make HTTP requests. token_uri (str): The OAuth 2.0 authorization server's token endpoint URI. assertion (str): JWT token signed by a service account. The token's payload must include a ``target_audience`` claim. Returns: Tuple[str, Optional[datetime], Mapping[str, str]]: The (encoded) Open ID Connect ID Token, expiration, and additional data returned by the endpoint. Raises: google.auth.exceptions.RefreshError: If the token endpoint returned an error. """ body = { 'assertion': assertion, 'grant_type': _JWT_GRANT_TYPE, } response_data = _token_endpoint_request(request, token_uri, body) try: id_token = response_data['id_token'] except KeyError as caught_exc: new_exc = exceptions.RefreshError( 'No ID token in response.', response_data) six.raise_from(new_exc, caught_exc) payload = jwt.decode(id_token, verify=False) expiry = datetime.datetime.utcfromtimestamp(payload['exp']) return id_token, expiry, response_data
[ "def", "id_token_jwt_grant", "(", "request", ",", "token_uri", ",", "assertion", ")", ":", "body", "=", "{", "'assertion'", ":", "assertion", ",", "'grant_type'", ":", "_JWT_GRANT_TYPE", ",", "}", "response_data", "=", "_token_endpoint_request", "(", "request", ",", "token_uri", ",", "body", ")", "try", ":", "id_token", "=", "response_data", "[", "'id_token'", "]", "except", "KeyError", "as", "caught_exc", ":", "new_exc", "=", "exceptions", ".", "RefreshError", "(", "'No ID token in response.'", ",", "response_data", ")", "six", ".", "raise_from", "(", "new_exc", ",", "caught_exc", ")", "payload", "=", "jwt", ".", "decode", "(", "id_token", ",", "verify", "=", "False", ")", "expiry", "=", "datetime", ".", "datetime", ".", "utcfromtimestamp", "(", "payload", "[", "'exp'", "]", ")", "return", "id_token", ",", "expiry", ",", "response_data" ]
Implements the JWT Profile for OAuth 2.0 Authorization Grants, but requests an OpenID Connect ID Token instead of an access token. This is a variant on the standard JWT Profile that is currently unique to Google. This was added for the benefit of authenticating to services that require ID Tokens instead of access tokens or JWT bearer tokens. Args: request (google.auth.transport.Request): A callable used to make HTTP requests. token_uri (str): The OAuth 2.0 authorization server's token endpoint URI. assertion (str): JWT token signed by a service account. The token's payload must include a ``target_audience`` claim. Returns: Tuple[str, Optional[datetime], Mapping[str, str]]: The (encoded) Open ID Connect ID Token, expiration, and additional data returned by the endpoint. Raises: google.auth.exceptions.RefreshError: If the token endpoint returned an error.
[ "Implements", "the", "JWT", "Profile", "for", "OAuth", "2", ".", "0", "Authorization", "Grants", "but", "requests", "an", "OpenID", "Connect", "ID", "Token", "instead", "of", "an", "access", "token", "." ]
python
train
apache/incubator-mxnet
example/multivariate_time_series/src/metrics.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/multivariate_time_series/src/metrics.py#L31-L35
def rae(label, pred): """computes the relative absolute error (condensed using standard deviation formula)""" numerator = np.mean(np.abs(label - pred), axis=None) denominator = np.mean(np.abs(label - np.mean(label, axis=None)), axis=None) return numerator / denominator
[ "def", "rae", "(", "label", ",", "pred", ")", ":", "numerator", "=", "np", ".", "mean", "(", "np", ".", "abs", "(", "label", "-", "pred", ")", ",", "axis", "=", "None", ")", "denominator", "=", "np", ".", "mean", "(", "np", ".", "abs", "(", "label", "-", "np", ".", "mean", "(", "label", ",", "axis", "=", "None", ")", ")", ",", "axis", "=", "None", ")", "return", "numerator", "/", "denominator" ]
computes the relative absolute error (condensed using standard deviation formula)
[ "computes", "the", "relative", "absolute", "error", "(", "condensed", "using", "standard", "deviation", "formula", ")" ]
python
train
assamite/creamas
creamas/grid.py
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/grid.py#L203-L222
def add_to_grid(self, agent): '''Add agent to the next available spot in the grid. :returns: (x,y) of the agent in the grid. This is the agent's overall coordinate in the grand grid (i.e. the actual coordinate of the agent w.t.r **origin**). :raises: `ValueError` if the grid is full. ''' for i in range(len(self.grid)): for j in range(len(self.grid[0])): if self.grid[i][j] is None: x = self.origin[0] + i y = self.origin[1] + j self.grid[i][j] = agent return (x, y) raise ValueError("Trying to add an agent to a full grid." .format(len(self._grid[0]), len(self._grid[1])))
[ "def", "add_to_grid", "(", "self", ",", "agent", ")", ":", "for", "i", "in", "range", "(", "len", "(", "self", ".", "grid", ")", ")", ":", "for", "j", "in", "range", "(", "len", "(", "self", ".", "grid", "[", "0", "]", ")", ")", ":", "if", "self", ".", "grid", "[", "i", "]", "[", "j", "]", "is", "None", ":", "x", "=", "self", ".", "origin", "[", "0", "]", "+", "i", "y", "=", "self", ".", "origin", "[", "1", "]", "+", "j", "self", ".", "grid", "[", "i", "]", "[", "j", "]", "=", "agent", "return", "(", "x", ",", "y", ")", "raise", "ValueError", "(", "\"Trying to add an agent to a full grid.\"", ".", "format", "(", "len", "(", "self", ".", "_grid", "[", "0", "]", ")", ",", "len", "(", "self", ".", "_grid", "[", "1", "]", ")", ")", ")" ]
Add agent to the next available spot in the grid. :returns: (x,y) of the agent in the grid. This is the agent's overall coordinate in the grand grid (i.e. the actual coordinate of the agent w.t.r **origin**). :raises: `ValueError` if the grid is full.
[ "Add", "agent", "to", "the", "next", "available", "spot", "in", "the", "grid", "." ]
python
train
sendgrid/sendgrid-python
sendgrid/helpers/mail/mail.py
https://github.com/sendgrid/sendgrid-python/blob/266c2abde7a35dfcce263e06bedc6a0bbdebeac9/sendgrid/helpers/mail/mail.py#L702-L712
def content(self, contents): """The content(s) of the email :param contents: The content(s) of the email :type contents: Content, list(Content) """ if isinstance(contents, list): for c in contents: self.add_content(c) else: self.add_content(contents)
[ "def", "content", "(", "self", ",", "contents", ")", ":", "if", "isinstance", "(", "contents", ",", "list", ")", ":", "for", "c", "in", "contents", ":", "self", ".", "add_content", "(", "c", ")", "else", ":", "self", ".", "add_content", "(", "contents", ")" ]
The content(s) of the email :param contents: The content(s) of the email :type contents: Content, list(Content)
[ "The", "content", "(", "s", ")", "of", "the", "email" ]
python
train
ssato/python-anyconfig
src/anyconfig/backend/xml.py
https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/backend/xml.py#L240-L264
def _process_children_elems(elem, dic, subdic, container=dict, children="@children", **options): """ :param elem: ET Element object or None :param dic: <container> (dict[-like]) object converted from elem :param subdic: Sub <container> object converted from elem :param container: callble to make a container object :param children: Tag for children nodes :param options: Keyword options, see the description of :func:`elem_to_container` for more details. :return: None but updating dic and subdic as side effects """ cdics = [elem_to_container(c, container=container, **options) for c in elem] merge_attrs = options.get("merge_attrs", False) sdics = [container(elem.attrib) if merge_attrs else subdic] + cdics if _dicts_have_unique_keys(sdics): # ex. <a><b>1</b><c>c</c></a> dic[elem.tag] = _merge_dicts(sdics, container) elif not subdic: # There are no attrs nor text and only these children. dic[elem.tag] = cdics else: subdic[children] = cdics
[ "def", "_process_children_elems", "(", "elem", ",", "dic", ",", "subdic", ",", "container", "=", "dict", ",", "children", "=", "\"@children\"", ",", "*", "*", "options", ")", ":", "cdics", "=", "[", "elem_to_container", "(", "c", ",", "container", "=", "container", ",", "*", "*", "options", ")", "for", "c", "in", "elem", "]", "merge_attrs", "=", "options", ".", "get", "(", "\"merge_attrs\"", ",", "False", ")", "sdics", "=", "[", "container", "(", "elem", ".", "attrib", ")", "if", "merge_attrs", "else", "subdic", "]", "+", "cdics", "if", "_dicts_have_unique_keys", "(", "sdics", ")", ":", "# ex. <a><b>1</b><c>c</c></a>", "dic", "[", "elem", ".", "tag", "]", "=", "_merge_dicts", "(", "sdics", ",", "container", ")", "elif", "not", "subdic", ":", "# There are no attrs nor text and only these children.", "dic", "[", "elem", ".", "tag", "]", "=", "cdics", "else", ":", "subdic", "[", "children", "]", "=", "cdics" ]
:param elem: ET Element object or None :param dic: <container> (dict[-like]) object converted from elem :param subdic: Sub <container> object converted from elem :param container: callble to make a container object :param children: Tag for children nodes :param options: Keyword options, see the description of :func:`elem_to_container` for more details. :return: None but updating dic and subdic as side effects
[ ":", "param", "elem", ":", "ET", "Element", "object", "or", "None", ":", "param", "dic", ":", "<container", ">", "(", "dict", "[", "-", "like", "]", ")", "object", "converted", "from", "elem", ":", "param", "subdic", ":", "Sub", "<container", ">", "object", "converted", "from", "elem", ":", "param", "container", ":", "callble", "to", "make", "a", "container", "object", ":", "param", "children", ":", "Tag", "for", "children", "nodes", ":", "param", "options", ":", "Keyword", "options", "see", "the", "description", "of", ":", "func", ":", "elem_to_container", "for", "more", "details", "." ]
python
train
Duke-GCB/DukeDSClient
ddsc/core/fileuploader.py
https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/fileuploader.py#L140-L155
def create_upload_and_chunk_url(self, project_id, path_data, hash_data, remote_filename=None, storage_provider_id=None): """ Create an non-chunked upload that returns upload id and upload url. This type of upload doesn't allow additional upload urls. For single chunk files this method is more efficient than create_upload/create_file_chunk_url. :param project_id: str: uuid of the project :param path_data: PathData: holds file system data about the file we are uploading :param hash_data: HashData: contains hash alg and value for the file we are uploading :param remote_filename: str: name to use for our remote file (defaults to path_data basename otherwise) :param storage_provider_id:str: optional storage provider id :return: str, dict: uuid for the upload, upload chunk url dict """ upload_response = self._create_upload(project_id, path_data, hash_data, remote_filename=remote_filename, storage_provider_id=storage_provider_id, chunked=False) return upload_response['id'], upload_response['signed_url']
[ "def", "create_upload_and_chunk_url", "(", "self", ",", "project_id", ",", "path_data", ",", "hash_data", ",", "remote_filename", "=", "None", ",", "storage_provider_id", "=", "None", ")", ":", "upload_response", "=", "self", ".", "_create_upload", "(", "project_id", ",", "path_data", ",", "hash_data", ",", "remote_filename", "=", "remote_filename", ",", "storage_provider_id", "=", "storage_provider_id", ",", "chunked", "=", "False", ")", "return", "upload_response", "[", "'id'", "]", ",", "upload_response", "[", "'signed_url'", "]" ]
Create an non-chunked upload that returns upload id and upload url. This type of upload doesn't allow additional upload urls. For single chunk files this method is more efficient than create_upload/create_file_chunk_url. :param project_id: str: uuid of the project :param path_data: PathData: holds file system data about the file we are uploading :param hash_data: HashData: contains hash alg and value for the file we are uploading :param remote_filename: str: name to use for our remote file (defaults to path_data basename otherwise) :param storage_provider_id:str: optional storage provider id :return: str, dict: uuid for the upload, upload chunk url dict
[ "Create", "an", "non", "-", "chunked", "upload", "that", "returns", "upload", "id", "and", "upload", "url", ".", "This", "type", "of", "upload", "doesn", "t", "allow", "additional", "upload", "urls", ".", "For", "single", "chunk", "files", "this", "method", "is", "more", "efficient", "than", "create_upload", "/", "create_file_chunk_url", ".", ":", "param", "project_id", ":", "str", ":", "uuid", "of", "the", "project", ":", "param", "path_data", ":", "PathData", ":", "holds", "file", "system", "data", "about", "the", "file", "we", "are", "uploading", ":", "param", "hash_data", ":", "HashData", ":", "contains", "hash", "alg", "and", "value", "for", "the", "file", "we", "are", "uploading", ":", "param", "remote_filename", ":", "str", ":", "name", "to", "use", "for", "our", "remote", "file", "(", "defaults", "to", "path_data", "basename", "otherwise", ")", ":", "param", "storage_provider_id", ":", "str", ":", "optional", "storage", "provider", "id", ":", "return", ":", "str", "dict", ":", "uuid", "for", "the", "upload", "upload", "chunk", "url", "dict" ]
python
train
bwohlberg/sporco
sporco/dictlrn/onlinecdl.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/dictlrn/onlinecdl.py#L546-L567
def xstep(self, S, W, lmbda, dimK): """Solve CSC problem for training data `S`.""" if self.opt['CUDA_CBPDN']: Z = cuda.cbpdnmsk(self.D.squeeze(), S[..., 0], W.squeeze(), lmbda, self.opt['CBPDN']) Z = Z.reshape(self.cri.Nv + (1, 1, self.cri.M,)) self.Z[:] = np.asarray(Z, dtype=self.dtype) self.Zf = sl.rfftn(self.Z, self.cri.Nv, self.cri.axisN) self.Sf = sl.rfftn(S.reshape(self.cri.shpS), self.cri.Nv, self.cri.axisN) self.xstep_itstat = None else: # Create X update object (external representation is expected!) xstep = cbpdn.ConvBPDNMaskDcpl(self.D.squeeze(), S, lmbda, W, self.opt['CBPDN'], dimK=dimK, dimN=self.cri.dimN) xstep.solve() self.Sf = sl.rfftn(S.reshape(self.cri.shpS), self.cri.Nv, self.cri.axisN) self.setcoef(xstep.getcoef()) self.xstep_itstat = xstep.itstat[-1] if xstep.itstat else None
[ "def", "xstep", "(", "self", ",", "S", ",", "W", ",", "lmbda", ",", "dimK", ")", ":", "if", "self", ".", "opt", "[", "'CUDA_CBPDN'", "]", ":", "Z", "=", "cuda", ".", "cbpdnmsk", "(", "self", ".", "D", ".", "squeeze", "(", ")", ",", "S", "[", "...", ",", "0", "]", ",", "W", ".", "squeeze", "(", ")", ",", "lmbda", ",", "self", ".", "opt", "[", "'CBPDN'", "]", ")", "Z", "=", "Z", ".", "reshape", "(", "self", ".", "cri", ".", "Nv", "+", "(", "1", ",", "1", ",", "self", ".", "cri", ".", "M", ",", ")", ")", "self", ".", "Z", "[", ":", "]", "=", "np", ".", "asarray", "(", "Z", ",", "dtype", "=", "self", ".", "dtype", ")", "self", ".", "Zf", "=", "sl", ".", "rfftn", "(", "self", ".", "Z", ",", "self", ".", "cri", ".", "Nv", ",", "self", ".", "cri", ".", "axisN", ")", "self", ".", "Sf", "=", "sl", ".", "rfftn", "(", "S", ".", "reshape", "(", "self", ".", "cri", ".", "shpS", ")", ",", "self", ".", "cri", ".", "Nv", ",", "self", ".", "cri", ".", "axisN", ")", "self", ".", "xstep_itstat", "=", "None", "else", ":", "# Create X update object (external representation is expected!)", "xstep", "=", "cbpdn", ".", "ConvBPDNMaskDcpl", "(", "self", ".", "D", ".", "squeeze", "(", ")", ",", "S", ",", "lmbda", ",", "W", ",", "self", ".", "opt", "[", "'CBPDN'", "]", ",", "dimK", "=", "dimK", ",", "dimN", "=", "self", ".", "cri", ".", "dimN", ")", "xstep", ".", "solve", "(", ")", "self", ".", "Sf", "=", "sl", ".", "rfftn", "(", "S", ".", "reshape", "(", "self", ".", "cri", ".", "shpS", ")", ",", "self", ".", "cri", ".", "Nv", ",", "self", ".", "cri", ".", "axisN", ")", "self", ".", "setcoef", "(", "xstep", ".", "getcoef", "(", ")", ")", "self", ".", "xstep_itstat", "=", "xstep", ".", "itstat", "[", "-", "1", "]", "if", "xstep", ".", "itstat", "else", "None" ]
Solve CSC problem for training data `S`.
[ "Solve", "CSC", "problem", "for", "training", "data", "S", "." ]
python
train
dslackw/slpkg
slpkg/auto_pkg.py
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/auto_pkg.py#L46-L72
def select(self): """Select Slackware command """ print("\nDetected Slackware binary package for installation:\n") for pkg in self.packages: print(" " + pkg.split("/")[-1]) print("") self.msg.template(78) print("| Choose a Slackware command:") self.msg.template(78) for com in sorted(self.commands): print("| {0}{1}{2}) {3}{4}{5}".format( self.meta.color["RED"], com, self.meta.color["ENDC"], self.meta.color["GREEN"], self.commands[com], self.meta.color["ENDC"])) self.msg.template(78) try: self.choice = raw_input(" > ") except EOFError: print("") raise SystemExit() if self.choice in self.commands.keys(): sys.stdout.write(" \x1b[1A{0}{1}{2}\n\n".format( self.meta.color["CYAN"], self.commands[self.choice], self.meta.color["ENDC"])) sys.stdout.flush() self.execute()
[ "def", "select", "(", "self", ")", ":", "print", "(", "\"\\nDetected Slackware binary package for installation:\\n\"", ")", "for", "pkg", "in", "self", ".", "packages", ":", "print", "(", "\" \"", "+", "pkg", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", ")", "print", "(", "\"\"", ")", "self", ".", "msg", ".", "template", "(", "78", ")", "print", "(", "\"| Choose a Slackware command:\"", ")", "self", ".", "msg", ".", "template", "(", "78", ")", "for", "com", "in", "sorted", "(", "self", ".", "commands", ")", ":", "print", "(", "\"| {0}{1}{2}) {3}{4}{5}\"", ".", "format", "(", "self", ".", "meta", ".", "color", "[", "\"RED\"", "]", ",", "com", ",", "self", ".", "meta", ".", "color", "[", "\"ENDC\"", "]", ",", "self", ".", "meta", ".", "color", "[", "\"GREEN\"", "]", ",", "self", ".", "commands", "[", "com", "]", ",", "self", ".", "meta", ".", "color", "[", "\"ENDC\"", "]", ")", ")", "self", ".", "msg", ".", "template", "(", "78", ")", "try", ":", "self", ".", "choice", "=", "raw_input", "(", "\" > \"", ")", "except", "EOFError", ":", "print", "(", "\"\"", ")", "raise", "SystemExit", "(", ")", "if", "self", ".", "choice", "in", "self", ".", "commands", ".", "keys", "(", ")", ":", "sys", ".", "stdout", ".", "write", "(", "\" \\x1b[1A{0}{1}{2}\\n\\n\"", ".", "format", "(", "self", ".", "meta", ".", "color", "[", "\"CYAN\"", "]", ",", "self", ".", "commands", "[", "self", ".", "choice", "]", ",", "self", ".", "meta", ".", "color", "[", "\"ENDC\"", "]", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "self", ".", "execute", "(", ")" ]
Select Slackware command
[ "Select", "Slackware", "command" ]
python
train
krukas/Trionyx
trionyx/models.py
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/models.py#L60-L69
def get_fields(cls, inlcude_base=False, include_id=False): """Get model fields""" for field in cls._meta.fields: if field.name == 'deleted': continue if not include_id and field.name == 'id': continue if not inlcude_base and field.name in ['created_at', 'updated_at']: continue yield field
[ "def", "get_fields", "(", "cls", ",", "inlcude_base", "=", "False", ",", "include_id", "=", "False", ")", ":", "for", "field", "in", "cls", ".", "_meta", ".", "fields", ":", "if", "field", ".", "name", "==", "'deleted'", ":", "continue", "if", "not", "include_id", "and", "field", ".", "name", "==", "'id'", ":", "continue", "if", "not", "inlcude_base", "and", "field", ".", "name", "in", "[", "'created_at'", ",", "'updated_at'", "]", ":", "continue", "yield", "field" ]
Get model fields
[ "Get", "model", "fields" ]
python
train
McSwindler/python-milight
milight/white.py
https://github.com/McSwindler/python-milight/blob/4891b1d7d6a720901a27a64f7b0d0c208f0c291f/milight/white.py#L87-L95
def warmness(level=100, group=0): """ Assumes level is out of 100 """ if level not in range(0,101): raise Exception("Warmness must be value between 0 and 100") b = int(floor(level / 10.0)) #lights have 10 levels of warmness commands = list(coolest(group)) for i in range(0, b): commands.append(COMMANDS['WARMER']) return tuple(commands)
[ "def", "warmness", "(", "level", "=", "100", ",", "group", "=", "0", ")", ":", "if", "level", "not", "in", "range", "(", "0", ",", "101", ")", ":", "raise", "Exception", "(", "\"Warmness must be value between 0 and 100\"", ")", "b", "=", "int", "(", "floor", "(", "level", "/", "10.0", ")", ")", "#lights have 10 levels of warmness", "commands", "=", "list", "(", "coolest", "(", "group", ")", ")", "for", "i", "in", "range", "(", "0", ",", "b", ")", ":", "commands", ".", "append", "(", "COMMANDS", "[", "'WARMER'", "]", ")", "return", "tuple", "(", "commands", ")" ]
Assumes level is out of 100
[ "Assumes", "level", "is", "out", "of", "100" ]
python
valid
Erotemic/utool
utool/util_profile.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_profile.py#L141-L170
def get_summary(profile_block_list, maxlines=20): """ References: https://github.com/rkern/line_profiler """ time_list = [get_block_totaltime(block) for block in profile_block_list] time_list = [time if time is not None else -1 for time in time_list] blockid_list = [get_block_id(block) for block in profile_block_list] sortx = ut.list_argsort(time_list) sorted_time_list = ut.take(time_list, sortx) sorted_blockid_list = ut.take(blockid_list, sortx) aligned_blockid_list = ut.util_str.align_lines(sorted_blockid_list, ':') summary_lines = [('%6.2f seconds - ' % time) + line for time, line in zip(sorted_time_list, aligned_blockid_list)] #summary_header = ut.codeblock( # ''' # CLEANED PROFILE OUPUT # The Pystone timings are not from kernprof, so they may include kernprof # overhead, whereas kernprof timings do not (unless the line being # profiled is also decorated with kernrof) # The kernprof times are reported in Timer Units # ''') summary_lines_ = ut.listclip(summary_lines, maxlines, fromback=True) summary_text = '\n'.join(summary_lines_) return summary_text
[ "def", "get_summary", "(", "profile_block_list", ",", "maxlines", "=", "20", ")", ":", "time_list", "=", "[", "get_block_totaltime", "(", "block", ")", "for", "block", "in", "profile_block_list", "]", "time_list", "=", "[", "time", "if", "time", "is", "not", "None", "else", "-", "1", "for", "time", "in", "time_list", "]", "blockid_list", "=", "[", "get_block_id", "(", "block", ")", "for", "block", "in", "profile_block_list", "]", "sortx", "=", "ut", ".", "list_argsort", "(", "time_list", ")", "sorted_time_list", "=", "ut", ".", "take", "(", "time_list", ",", "sortx", ")", "sorted_blockid_list", "=", "ut", ".", "take", "(", "blockid_list", ",", "sortx", ")", "aligned_blockid_list", "=", "ut", ".", "util_str", ".", "align_lines", "(", "sorted_blockid_list", ",", "':'", ")", "summary_lines", "=", "[", "(", "'%6.2f seconds - '", "%", "time", ")", "+", "line", "for", "time", ",", "line", "in", "zip", "(", "sorted_time_list", ",", "aligned_blockid_list", ")", "]", "#summary_header = ut.codeblock(", "# '''", "# CLEANED PROFILE OUPUT", "# The Pystone timings are not from kernprof, so they may include kernprof", "# overhead, whereas kernprof timings do not (unless the line being", "# profiled is also decorated with kernrof)", "# The kernprof times are reported in Timer Units", "# ''')", "summary_lines_", "=", "ut", ".", "listclip", "(", "summary_lines", ",", "maxlines", ",", "fromback", "=", "True", ")", "summary_text", "=", "'\\n'", ".", "join", "(", "summary_lines_", ")", "return", "summary_text" ]
References: https://github.com/rkern/line_profiler
[ "References", ":", "https", ":", "//", "github", ".", "com", "/", "rkern", "/", "line_profiler" ]
python
train
googleapis/oauth2client
oauth2client/contrib/appengine.py
https://github.com/googleapis/oauth2client/blob/50d20532a748f18e53f7d24ccbe6647132c979a9/oauth2client/contrib/appengine.py#L757-L810
def callback_handler(self): """RequestHandler for the OAuth 2.0 redirect callback. Usage:: app = webapp.WSGIApplication([ ('/index', MyIndexHandler), ..., (decorator.callback_path, decorator.callback_handler()) ]) Returns: A webapp.RequestHandler that handles the redirect back from the server during the OAuth 2.0 dance. """ decorator = self class OAuth2Handler(webapp.RequestHandler): """Handler for the redirect_uri of the OAuth 2.0 dance.""" @login_required def get(self): error = self.request.get('error') if error: errormsg = self.request.get('error_description', error) self.response.out.write( 'The authorization request failed: {0}'.format( _safe_html(errormsg))) else: user = users.get_current_user() decorator._create_flow(self) credentials = decorator.flow.step2_exchange( self.request.params) decorator._storage_class( decorator._credentials_class, None, decorator._credentials_property_name, user=user).put(credentials) redirect_uri = _parse_state_value( str(self.request.get('state')), user) if redirect_uri is None: self.response.out.write( 'The authorization request failed') return if (decorator._token_response_param and credentials.token_response): resp_json = json.dumps(credentials.token_response) redirect_uri = _helpers._add_query_parameter( redirect_uri, decorator._token_response_param, resp_json) self.redirect(redirect_uri) return OAuth2Handler
[ "def", "callback_handler", "(", "self", ")", ":", "decorator", "=", "self", "class", "OAuth2Handler", "(", "webapp", ".", "RequestHandler", ")", ":", "\"\"\"Handler for the redirect_uri of the OAuth 2.0 dance.\"\"\"", "@", "login_required", "def", "get", "(", "self", ")", ":", "error", "=", "self", ".", "request", ".", "get", "(", "'error'", ")", "if", "error", ":", "errormsg", "=", "self", ".", "request", ".", "get", "(", "'error_description'", ",", "error", ")", "self", ".", "response", ".", "out", ".", "write", "(", "'The authorization request failed: {0}'", ".", "format", "(", "_safe_html", "(", "errormsg", ")", ")", ")", "else", ":", "user", "=", "users", ".", "get_current_user", "(", ")", "decorator", ".", "_create_flow", "(", "self", ")", "credentials", "=", "decorator", ".", "flow", ".", "step2_exchange", "(", "self", ".", "request", ".", "params", ")", "decorator", ".", "_storage_class", "(", "decorator", ".", "_credentials_class", ",", "None", ",", "decorator", ".", "_credentials_property_name", ",", "user", "=", "user", ")", ".", "put", "(", "credentials", ")", "redirect_uri", "=", "_parse_state_value", "(", "str", "(", "self", ".", "request", ".", "get", "(", "'state'", ")", ")", ",", "user", ")", "if", "redirect_uri", "is", "None", ":", "self", ".", "response", ".", "out", ".", "write", "(", "'The authorization request failed'", ")", "return", "if", "(", "decorator", ".", "_token_response_param", "and", "credentials", ".", "token_response", ")", ":", "resp_json", "=", "json", ".", "dumps", "(", "credentials", ".", "token_response", ")", "redirect_uri", "=", "_helpers", ".", "_add_query_parameter", "(", "redirect_uri", ",", "decorator", ".", "_token_response_param", ",", "resp_json", ")", "self", ".", "redirect", "(", "redirect_uri", ")", "return", "OAuth2Handler" ]
RequestHandler for the OAuth 2.0 redirect callback. Usage:: app = webapp.WSGIApplication([ ('/index', MyIndexHandler), ..., (decorator.callback_path, decorator.callback_handler()) ]) Returns: A webapp.RequestHandler that handles the redirect back from the server during the OAuth 2.0 dance.
[ "RequestHandler", "for", "the", "OAuth", "2", ".", "0", "redirect", "callback", "." ]
python
valid
ambitioninc/django-query-builder
querybuilder/query.py
https://github.com/ambitioninc/django-query-builder/blob/113a7d845d3ddc6a45621b9880308e756f87c5bf/querybuilder/query.py#L596-L614
def init_defaults(self): """ Sets the default values for this instance """ self.sql = '' self.tables = [] self.joins = [] self._where = Where() self.groups = [] self.sorters = [] self._limit = None self.table_prefix = '' self.is_inner = False self.with_tables = [] self._distinct = False self.distinct_ons = [] self.field_names = [] self.field_names_pk = None self.values = []
[ "def", "init_defaults", "(", "self", ")", ":", "self", ".", "sql", "=", "''", "self", ".", "tables", "=", "[", "]", "self", ".", "joins", "=", "[", "]", "self", ".", "_where", "=", "Where", "(", ")", "self", ".", "groups", "=", "[", "]", "self", ".", "sorters", "=", "[", "]", "self", ".", "_limit", "=", "None", "self", ".", "table_prefix", "=", "''", "self", ".", "is_inner", "=", "False", "self", ".", "with_tables", "=", "[", "]", "self", ".", "_distinct", "=", "False", "self", ".", "distinct_ons", "=", "[", "]", "self", ".", "field_names", "=", "[", "]", "self", ".", "field_names_pk", "=", "None", "self", ".", "values", "=", "[", "]" ]
Sets the default values for this instance
[ "Sets", "the", "default", "values", "for", "this", "instance" ]
python
train
taborlab/FlowCal
FlowCal/transform.py
https://github.com/taborlab/FlowCal/blob/031a7af82acb1d46879a8e384a1a00f27f0bdc7a/FlowCal/transform.py#L22-L79
def transform(data, channels, transform_fxn, def_channels = None): """ Apply some transformation function to flow cytometry data. This function is a template transformation function, intended to be used by other specific transformation functions. It performs basic checks on `channels` and `data`. It then applies `transform_fxn` to the specified channels. Finally, it rescales ``data.range`` and if necessary. Parameters ---------- data : FCSData or numpy array NxD flow cytometry data where N is the number of events and D is the number of parameters (aka channels). channels : int, str, list of int, list of str, optional Channels on which to perform the transformation. If `channels` is None, use def_channels. transform_fxn : function Function that performs the actual transformation. def_channels : int, str, list of int, list of str, optional Default set of channels in which to perform the transformation. If `def_channels` is None, use all channels. Returns ------- data_t : FCSData or numpy array NxD transformed flow cytometry data. """ # Copy data array data_t = data.copy().astype(np.float64) # Default if channels is None: if def_channels is None: channels = range(data_t.shape[1]) else: channels = def_channels # Convert channels to iterable if not (hasattr(channels, '__iter__') \ and not isinstance(channels, six.string_types)): channels = [channels] # Apply transformation data_t[:,channels] = transform_fxn(data_t[:,channels]) # Apply transformation to ``data.range`` if hasattr(data_t, '_range'): for channel in channels: # Transform channel name to index if necessary channel_idx = data_t._name_to_index(channel) if data_t._range[channel_idx] is not None: data_t._range[channel_idx] = \ transform_fxn(data_t._range[channel_idx]) return data_t
[ "def", "transform", "(", "data", ",", "channels", ",", "transform_fxn", ",", "def_channels", "=", "None", ")", ":", "# Copy data array", "data_t", "=", "data", ".", "copy", "(", ")", ".", "astype", "(", "np", ".", "float64", ")", "# Default", "if", "channels", "is", "None", ":", "if", "def_channels", "is", "None", ":", "channels", "=", "range", "(", "data_t", ".", "shape", "[", "1", "]", ")", "else", ":", "channels", "=", "def_channels", "# Convert channels to iterable", "if", "not", "(", "hasattr", "(", "channels", ",", "'__iter__'", ")", "and", "not", "isinstance", "(", "channels", ",", "six", ".", "string_types", ")", ")", ":", "channels", "=", "[", "channels", "]", "# Apply transformation", "data_t", "[", ":", ",", "channels", "]", "=", "transform_fxn", "(", "data_t", "[", ":", ",", "channels", "]", ")", "# Apply transformation to ``data.range``", "if", "hasattr", "(", "data_t", ",", "'_range'", ")", ":", "for", "channel", "in", "channels", ":", "# Transform channel name to index if necessary", "channel_idx", "=", "data_t", ".", "_name_to_index", "(", "channel", ")", "if", "data_t", ".", "_range", "[", "channel_idx", "]", "is", "not", "None", ":", "data_t", ".", "_range", "[", "channel_idx", "]", "=", "transform_fxn", "(", "data_t", ".", "_range", "[", "channel_idx", "]", ")", "return", "data_t" ]
Apply some transformation function to flow cytometry data. This function is a template transformation function, intended to be used by other specific transformation functions. It performs basic checks on `channels` and `data`. It then applies `transform_fxn` to the specified channels. Finally, it rescales ``data.range`` and if necessary. Parameters ---------- data : FCSData or numpy array NxD flow cytometry data where N is the number of events and D is the number of parameters (aka channels). channels : int, str, list of int, list of str, optional Channels on which to perform the transformation. If `channels` is None, use def_channels. transform_fxn : function Function that performs the actual transformation. def_channels : int, str, list of int, list of str, optional Default set of channels in which to perform the transformation. If `def_channels` is None, use all channels. Returns ------- data_t : FCSData or numpy array NxD transformed flow cytometry data.
[ "Apply", "some", "transformation", "function", "to", "flow", "cytometry", "data", "." ]
python
train
openstack/quark
quark/tools/async_worker.py
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/tools/async_worker.py#L115-L125
def start_api_and_rpc_workers(self): """Initializes eventlet and starts wait for workers to exit. Spawns the workers returned from serve_rpc """ pool = eventlet.GreenPool() quark_rpc = self.serve_rpc() pool.spawn(quark_rpc.wait) pool.waitall()
[ "def", "start_api_and_rpc_workers", "(", "self", ")", ":", "pool", "=", "eventlet", ".", "GreenPool", "(", ")", "quark_rpc", "=", "self", ".", "serve_rpc", "(", ")", "pool", ".", "spawn", "(", "quark_rpc", ".", "wait", ")", "pool", ".", "waitall", "(", ")" ]
Initializes eventlet and starts wait for workers to exit. Spawns the workers returned from serve_rpc
[ "Initializes", "eventlet", "and", "starts", "wait", "for", "workers", "to", "exit", "." ]
python
valid
shreyaspotnis/rampage
rampage/server.py
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/server.py#L616-L626
def get_digital_channels(channel_list): """Goes through channel list and returns digital channels with ids Dev1/port0/line08, Dev1/port0/line09... Dev1/port0/line30.""" dig_ids = digital_channel_ids() dig_channels = [] for ln in dig_ids: for ch in channel_list: if ch.dct['id'] == ln: dig_channels.append(ch) break return dig_channels
[ "def", "get_digital_channels", "(", "channel_list", ")", ":", "dig_ids", "=", "digital_channel_ids", "(", ")", "dig_channels", "=", "[", "]", "for", "ln", "in", "dig_ids", ":", "for", "ch", "in", "channel_list", ":", "if", "ch", ".", "dct", "[", "'id'", "]", "==", "ln", ":", "dig_channels", ".", "append", "(", "ch", ")", "break", "return", "dig_channels" ]
Goes through channel list and returns digital channels with ids Dev1/port0/line08, Dev1/port0/line09... Dev1/port0/line30.
[ "Goes", "through", "channel", "list", "and", "returns", "digital", "channels", "with", "ids", "Dev1", "/", "port0", "/", "line08", "Dev1", "/", "port0", "/", "line09", "...", "Dev1", "/", "port0", "/", "line30", "." ]
python
train
andrewsnowden/dota2py
dota2py/parser.py
https://github.com/andrewsnowden/dota2py/blob/67637f4b9c160ea90c11b7e81545baf350affa7a/dota2py/parser.py#L100-L115
def read_vint32(self): """ This seems to be a variable length integer ala utf-8 style """ result = 0 count = 0 while True: if count > 4: raise ValueError("Corrupt VarInt32") b = self.read_byte() result = result | (b & 0x7F) << (7 * count) count += 1 if not b & 0x80: return result
[ "def", "read_vint32", "(", "self", ")", ":", "result", "=", "0", "count", "=", "0", "while", "True", ":", "if", "count", ">", "4", ":", "raise", "ValueError", "(", "\"Corrupt VarInt32\"", ")", "b", "=", "self", ".", "read_byte", "(", ")", "result", "=", "result", "|", "(", "b", "&", "0x7F", ")", "<<", "(", "7", "*", "count", ")", "count", "+=", "1", "if", "not", "b", "&", "0x80", ":", "return", "result" ]
This seems to be a variable length integer ala utf-8 style
[ "This", "seems", "to", "be", "a", "variable", "length", "integer", "ala", "utf", "-", "8", "style" ]
python
train
deepmind/sonnet
sonnet/python/modules/block_matrix.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/block_matrix.py#L182-L185
def _content_blocks(self, r): """Number of content blocks in block row `r`.""" return (self._block_rows - self._left_zero_blocks(r) - self._right_zero_blocks(r))
[ "def", "_content_blocks", "(", "self", ",", "r", ")", ":", "return", "(", "self", ".", "_block_rows", "-", "self", ".", "_left_zero_blocks", "(", "r", ")", "-", "self", ".", "_right_zero_blocks", "(", "r", ")", ")" ]
Number of content blocks in block row `r`.
[ "Number", "of", "content", "blocks", "in", "block", "row", "r", "." ]
python
train
avihad/twistes
twistes/client.py
https://github.com/avihad/twistes/blob/9ab8f5aa088b8886aefe3dec85a400e5035e034a/twistes/client.py#L348-L377
def index(self, index, doc_type, body, id=None, **query_params): """ Adds or updates a typed JSON document in a specific index, making it searchable. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html>`_ :param index: The name of the index :param doc_type: The type of the document :param body: The document :param id: Document ID :arg consistency: Explicit write consistency setting for the operation, valid choices are: 'one', 'quorum', 'all' :arg op_type: Explicit operation type, default 'index', valid choices are: 'index', 'create' :arg parent: ID of the parent document :arg refresh: Refresh the index after performing the operation :arg routing: Specific routing value :arg timeout: Explicit operation timeout :arg timestamp: Explicit timestamp for the document :arg ttl: Expiration time for the document :arg version: Explicit version number for concurrency control :arg version_type: Specific version type, valid choices are: 'internal', 'external', 'external_gte', 'force' """ self._es_parser.is_not_empty_params(index, doc_type, body) method = HttpMethod.POST if id in NULL_VALUES else HttpMethod.PUT path = self._es_parser.make_path(index, doc_type, id) result = yield self._perform_request(method, path, body, params=query_params) returnValue(result)
[ "def", "index", "(", "self", ",", "index", ",", "doc_type", ",", "body", ",", "id", "=", "None", ",", "*", "*", "query_params", ")", ":", "self", ".", "_es_parser", ".", "is_not_empty_params", "(", "index", ",", "doc_type", ",", "body", ")", "method", "=", "HttpMethod", ".", "POST", "if", "id", "in", "NULL_VALUES", "else", "HttpMethod", ".", "PUT", "path", "=", "self", ".", "_es_parser", ".", "make_path", "(", "index", ",", "doc_type", ",", "id", ")", "result", "=", "yield", "self", ".", "_perform_request", "(", "method", ",", "path", ",", "body", ",", "params", "=", "query_params", ")", "returnValue", "(", "result", ")" ]
Adds or updates a typed JSON document in a specific index, making it searchable. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html>`_ :param index: The name of the index :param doc_type: The type of the document :param body: The document :param id: Document ID :arg consistency: Explicit write consistency setting for the operation, valid choices are: 'one', 'quorum', 'all' :arg op_type: Explicit operation type, default 'index', valid choices are: 'index', 'create' :arg parent: ID of the parent document :arg refresh: Refresh the index after performing the operation :arg routing: Specific routing value :arg timeout: Explicit operation timeout :arg timestamp: Explicit timestamp for the document :arg ttl: Expiration time for the document :arg version: Explicit version number for concurrency control :arg version_type: Specific version type, valid choices are: 'internal', 'external', 'external_gte', 'force'
[ "Adds", "or", "updates", "a", "typed", "JSON", "document", "in", "a", "specific", "index", "making", "it", "searchable", ".", "<http", ":", "//", "www", ".", "elastic", ".", "co", "/", "guide", "/", "en", "/", "elasticsearch", "/", "reference", "/", "current", "/", "docs", "-", "index_", ".", "html", ">", "_" ]
python
train
synw/dataswim
dataswim/data/transform/columns.py
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/data/transform/columns.py#L80-L97
def drop(self, *cols): """ Drops columns from the main dataframe :param cols: names of the columns :type cols: str :example: ``ds.drop("Col 1", "Col 2")`` """ try: index = self.df.columns.values for col in cols: if col not in index: self.warning("Column", col, "not found. Aborting") return self.df = self.df.drop(col, axis=1) except Exception as e: self.err(e, self.drop, "Can not drop column")
[ "def", "drop", "(", "self", ",", "*", "cols", ")", ":", "try", ":", "index", "=", "self", ".", "df", ".", "columns", ".", "values", "for", "col", "in", "cols", ":", "if", "col", "not", "in", "index", ":", "self", ".", "warning", "(", "\"Column\"", ",", "col", ",", "\"not found. Aborting\"", ")", "return", "self", ".", "df", "=", "self", ".", "df", ".", "drop", "(", "col", ",", "axis", "=", "1", ")", "except", "Exception", "as", "e", ":", "self", ".", "err", "(", "e", ",", "self", ".", "drop", ",", "\"Can not drop column\"", ")" ]
Drops columns from the main dataframe :param cols: names of the columns :type cols: str :example: ``ds.drop("Col 1", "Col 2")``
[ "Drops", "columns", "from", "the", "main", "dataframe" ]
python
train
vinu76jsr/pipsort
setup.py
https://github.com/vinu76jsr/pipsort/blob/71ead1269de85ee0255741390bf1da85d81b7d16/setup.py#L49-L56
def version(): """ Get the local package version. """ path = join("lib", _CONFIG["name"], "__version__.py") with open(path) as stream: exec(stream.read()) return __version__
[ "def", "version", "(", ")", ":", "path", "=", "join", "(", "\"lib\"", ",", "_CONFIG", "[", "\"name\"", "]", ",", "\"__version__.py\"", ")", "with", "open", "(", "path", ")", "as", "stream", ":", "exec", "(", "stream", ".", "read", "(", ")", ")", "return", "__version__" ]
Get the local package version.
[ "Get", "the", "local", "package", "version", "." ]
python
train
rbuffat/pyepw
pyepw/epw.py
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L1940-L1960
def wb010(self, value=None): """ Corresponds to IDD Field `wb010` Wet-bulb temperature corresponding to 1.0% annual cumulative frequency of occurrence Args: value (float): value for IDD Field `wb010` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `wb010`'.format(value)) self._wb010 = value
[ "def", "wb010", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "float", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type float '", "'for field `wb010`'", ".", "format", "(", "value", ")", ")", "self", ".", "_wb010", "=", "value" ]
Corresponds to IDD Field `wb010` Wet-bulb temperature corresponding to 1.0% annual cumulative frequency of occurrence Args: value (float): value for IDD Field `wb010` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "wb010", "Wet", "-", "bulb", "temperature", "corresponding", "to", "1", ".", "0%", "annual", "cumulative", "frequency", "of", "occurrence" ]
python
train
MacHu-GWU/constant2-project
constant2/_constant2.py
https://github.com/MacHu-GWU/constant2-project/blob/ccf7e14b0e23f9f4bfd13a3e2ce4a1142e570d4f/constant2/_constant2.py#L77-L103
def Items(cls): """non-class attributes ordered by alphabetical order. :: >>> class MyClass(Constant): ... a = 1 # non-class attributre ... b = 2 # non-class attributre ... ... class C(Constant): ... pass ... ... class D(Constant): ... pass >>> MyClass.Items() [("a", 1), ("b", 2)] .. versionadded:: 0.0.5 """ l = list() for attr, value in get_all_attributes(cls): # if it's not a class(Constant) if not inspect.isclass(value): l.append((attr, value)) return list(sorted(l, key=lambda x: x[0]))
[ "def", "Items", "(", "cls", ")", ":", "l", "=", "list", "(", ")", "for", "attr", ",", "value", "in", "get_all_attributes", "(", "cls", ")", ":", "# if it's not a class(Constant)", "if", "not", "inspect", ".", "isclass", "(", "value", ")", ":", "l", ".", "append", "(", "(", "attr", ",", "value", ")", ")", "return", "list", "(", "sorted", "(", "l", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ")", ")" ]
non-class attributes ordered by alphabetical order. :: >>> class MyClass(Constant): ... a = 1 # non-class attributre ... b = 2 # non-class attributre ... ... class C(Constant): ... pass ... ... class D(Constant): ... pass >>> MyClass.Items() [("a", 1), ("b", 2)] .. versionadded:: 0.0.5
[ "non", "-", "class", "attributes", "ordered", "by", "alphabetical", "order", "." ]
python
train
rootpy/rootpy
rootpy/plotting/style/lhcb/labels.py
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/plotting/style/lhcb/labels.py#L17-L61
def LHCb_label(side="L", status="final", text="", pad=None): """Add an 'LHCb (Preliminary|Unofficial)' label to the current pad.""" if pad is None: pad = ROOT.gPad with preserve_current_canvas(): pad.cd() if side == "L": l = ROOT.TPaveText(pad.GetLeftMargin() + 0.05, 0.87 - pad.GetTopMargin(), pad.GetLeftMargin() + 0.30, 0.95 - pad.GetTopMargin(), "BRNDC") elif side == "R": l = ROOT.TPaveText(0.70 - pad.GetRightMargin(), 0.75 - pad.GetTopMargin(), 0.95 - pad.GetRightMargin(), 0.85 - pad.GetTopMargin(), "BRNDC") else: raise TypeError("Unknown side '{0}'".format(side)) if status == "final": l.AddText("LHCb") elif status == "preliminary": l.AddText("#splitline{LHCb}{#scale[1.0]{Preliminary}}") elif status == "unofficial": l.AddText("#splitline{LHCb}{#scale[1.0]{Unofficial}}") elif status == "custom": l.AddText(text) else: raise TypeError("Unknown status '{0}'".format(status)) l.SetFillColor(0) l.SetTextAlign(12) l.SetBorderSize(0) l.Draw() keepalive(pad, l) pad.Modified() pad.Update() return l, None
[ "def", "LHCb_label", "(", "side", "=", "\"L\"", ",", "status", "=", "\"final\"", ",", "text", "=", "\"\"", ",", "pad", "=", "None", ")", ":", "if", "pad", "is", "None", ":", "pad", "=", "ROOT", ".", "gPad", "with", "preserve_current_canvas", "(", ")", ":", "pad", ".", "cd", "(", ")", "if", "side", "==", "\"L\"", ":", "l", "=", "ROOT", ".", "TPaveText", "(", "pad", ".", "GetLeftMargin", "(", ")", "+", "0.05", ",", "0.87", "-", "pad", ".", "GetTopMargin", "(", ")", ",", "pad", ".", "GetLeftMargin", "(", ")", "+", "0.30", ",", "0.95", "-", "pad", ".", "GetTopMargin", "(", ")", ",", "\"BRNDC\"", ")", "elif", "side", "==", "\"R\"", ":", "l", "=", "ROOT", ".", "TPaveText", "(", "0.70", "-", "pad", ".", "GetRightMargin", "(", ")", ",", "0.75", "-", "pad", ".", "GetTopMargin", "(", ")", ",", "0.95", "-", "pad", ".", "GetRightMargin", "(", ")", ",", "0.85", "-", "pad", ".", "GetTopMargin", "(", ")", ",", "\"BRNDC\"", ")", "else", ":", "raise", "TypeError", "(", "\"Unknown side '{0}'\"", ".", "format", "(", "side", ")", ")", "if", "status", "==", "\"final\"", ":", "l", ".", "AddText", "(", "\"LHCb\"", ")", "elif", "status", "==", "\"preliminary\"", ":", "l", ".", "AddText", "(", "\"#splitline{LHCb}{#scale[1.0]{Preliminary}}\"", ")", "elif", "status", "==", "\"unofficial\"", ":", "l", ".", "AddText", "(", "\"#splitline{LHCb}{#scale[1.0]{Unofficial}}\"", ")", "elif", "status", "==", "\"custom\"", ":", "l", ".", "AddText", "(", "text", ")", "else", ":", "raise", "TypeError", "(", "\"Unknown status '{0}'\"", ".", "format", "(", "status", ")", ")", "l", ".", "SetFillColor", "(", "0", ")", "l", ".", "SetTextAlign", "(", "12", ")", "l", ".", "SetBorderSize", "(", "0", ")", "l", ".", "Draw", "(", ")", "keepalive", "(", "pad", ",", "l", ")", "pad", ".", "Modified", "(", ")", "pad", ".", "Update", "(", ")", "return", "l", ",", "None" ]
Add an 'LHCb (Preliminary|Unofficial)' label to the current pad.
[ "Add", "an", "LHCb", "(", "Preliminary|Unofficial", ")", "label", "to", "the", "current", "pad", "." ]
python
train
yyuu/botornado
boto/__init__.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/__init__.py#L264-L276
def connect_emr(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): """ :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.emr.EmrConnection` :return: A connection to Elastic mapreduce """ from boto.emr import EmrConnection return EmrConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
[ "def", "connect_emr", "(", "aws_access_key_id", "=", "None", ",", "aws_secret_access_key", "=", "None", ",", "*", "*", "kwargs", ")", ":", "from", "boto", ".", "emr", "import", "EmrConnection", "return", "EmrConnection", "(", "aws_access_key_id", ",", "aws_secret_access_key", ",", "*", "*", "kwargs", ")" ]
:type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.emr.EmrConnection` :return: A connection to Elastic mapreduce
[ ":", "type", "aws_access_key_id", ":", "string", ":", "param", "aws_access_key_id", ":", "Your", "AWS", "Access", "Key", "ID" ]
python
train
za-creature/gulpless
gulpless/handlers.py
https://github.com/za-creature/gulpless/blob/fd73907dbe86880086719816bb042233f85121f6/gulpless/handlers.py#L128-L132
def build(self, input_path, output_paths): """Should be extended by subclasses to actually do stuff. By default this will copy `input` over every file in the `outputs` list.""" for output in output_paths: shutil.copy(input_path, output_paths)
[ "def", "build", "(", "self", ",", "input_path", ",", "output_paths", ")", ":", "for", "output", "in", "output_paths", ":", "shutil", ".", "copy", "(", "input_path", ",", "output_paths", ")" ]
Should be extended by subclasses to actually do stuff. By default this will copy `input` over every file in the `outputs` list.
[ "Should", "be", "extended", "by", "subclasses", "to", "actually", "do", "stuff", ".", "By", "default", "this", "will", "copy", "input", "over", "every", "file", "in", "the", "outputs", "list", "." ]
python
train
wadda/gps3
gps3/gps3threaded.py
https://github.com/wadda/gps3/blob/91adcd7073b891b135b2a46d039ce2125cf09a09/gps3/gps3threaded.py#L48-L58
def run_thread(self, usnap=.2, daemon=True): """run thread with data """ # self.stream_data() # Unless other changes are made this would limit to localhost only. try: gps3_data_thread = Thread(target=self.unpack_data, args={usnap: usnap}, daemon=daemon) except TypeError: # threading.Thread() only accepts daemon argument in Python 3.3 gps3_data_thread = Thread(target=self.unpack_data, args={usnap: usnap}) gps3_data_thread.setDaemon(daemon) gps3_data_thread.start()
[ "def", "run_thread", "(", "self", ",", "usnap", "=", ".2", ",", "daemon", "=", "True", ")", ":", "# self.stream_data() # Unless other changes are made this would limit to localhost only.", "try", ":", "gps3_data_thread", "=", "Thread", "(", "target", "=", "self", ".", "unpack_data", ",", "args", "=", "{", "usnap", ":", "usnap", "}", ",", "daemon", "=", "daemon", ")", "except", "TypeError", ":", "# threading.Thread() only accepts daemon argument in Python 3.3", "gps3_data_thread", "=", "Thread", "(", "target", "=", "self", ".", "unpack_data", ",", "args", "=", "{", "usnap", ":", "usnap", "}", ")", "gps3_data_thread", ".", "setDaemon", "(", "daemon", ")", "gps3_data_thread", ".", "start", "(", ")" ]
run thread with data
[ "run", "thread", "with", "data" ]
python
train
klmitch/bark
bark/format.py
https://github.com/klmitch/bark/blob/6e0e002d55f01fee27e3e45bb86e30af1bfeef36/bark/format.py#L82-L96
def add_text(self, end, next=None): """ Adds the text from string beginning to the specified ending index to the format. :param end: The ending index of the string. :param next: The next string begin index. If None, the string index will not be updated. """ if self.str_begin != end: self.fmt.append_text(self.format[self.str_begin:end]) if next is not None: self.str_begin = next
[ "def", "add_text", "(", "self", ",", "end", ",", "next", "=", "None", ")", ":", "if", "self", ".", "str_begin", "!=", "end", ":", "self", ".", "fmt", ".", "append_text", "(", "self", ".", "format", "[", "self", ".", "str_begin", ":", "end", "]", ")", "if", "next", "is", "not", "None", ":", "self", ".", "str_begin", "=", "next" ]
Adds the text from string beginning to the specified ending index to the format. :param end: The ending index of the string. :param next: The next string begin index. If None, the string index will not be updated.
[ "Adds", "the", "text", "from", "string", "beginning", "to", "the", "specified", "ending", "index", "to", "the", "format", "." ]
python
train
HDI-Project/BTB
btb/tuning/tuner.py
https://github.com/HDI-Project/BTB/blob/7f489ebc5591bd0886652ef743098c022d7f7460/btb/tuning/tuner.py#L52-L60
def fit(self, X, y): """Fit Args: X (np.array): Array of hyperparameter values with shape (n_samples, len(tunables)) y (np.array): Array of scores with shape (n_samples, ) """ self.X = X self.y = y
[ "def", "fit", "(", "self", ",", "X", ",", "y", ")", ":", "self", ".", "X", "=", "X", "self", ".", "y", "=", "y" ]
Fit Args: X (np.array): Array of hyperparameter values with shape (n_samples, len(tunables)) y (np.array): Array of scores with shape (n_samples, )
[ "Fit" ]
python
train
fabioz/PyDev.Debugger
_pydevd_bundle/pydevconsole_code_for_ironpython.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydevd_bundle/pydevconsole_code_for_ironpython.py#L343-L364
def showtraceback(self, *args, **kwargs): """Display the exception that just occurred. We remove the first stack item because it is our own code. The output is written by self.write(), below. """ try: type, value, tb = sys.exc_info() sys.last_type = type sys.last_value = value sys.last_traceback = tb tblist = traceback.extract_tb(tb) del tblist[:1] list = traceback.format_list(tblist) if list: list.insert(0, "Traceback (most recent call last):\n") list[len(list):] = traceback.format_exception_only(type, value) finally: tblist = tb = None map(self.write, list)
[ "def", "showtraceback", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "type", ",", "value", ",", "tb", "=", "sys", ".", "exc_info", "(", ")", "sys", ".", "last_type", "=", "type", "sys", ".", "last_value", "=", "value", "sys", ".", "last_traceback", "=", "tb", "tblist", "=", "traceback", ".", "extract_tb", "(", "tb", ")", "del", "tblist", "[", ":", "1", "]", "list", "=", "traceback", ".", "format_list", "(", "tblist", ")", "if", "list", ":", "list", ".", "insert", "(", "0", ",", "\"Traceback (most recent call last):\\n\"", ")", "list", "[", "len", "(", "list", ")", ":", "]", "=", "traceback", ".", "format_exception_only", "(", "type", ",", "value", ")", "finally", ":", "tblist", "=", "tb", "=", "None", "map", "(", "self", ".", "write", ",", "list", ")" ]
Display the exception that just occurred. We remove the first stack item because it is our own code. The output is written by self.write(), below.
[ "Display", "the", "exception", "that", "just", "occurred", "." ]
python
train
anthill/koala
koala/ast/__init__.py
https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/ast/__init__.py#L358-L386
def cell2code(cell, named_ranges): """Generate python code for the given cell""" if cell.formula: debug = False # if 'OFFSET' in cell.formula or 'INDEX' in cell.formula: # debug = True # if debug: # print 'FORMULA', cell.formula ref = parse_cell_address(cell.address()) if not cell.is_named_range else None sheet = cell.sheet e = shunting_yard(cell.formula, named_ranges, ref=ref, tokenize_range = False) ast,root = build_ast(e, debug = debug) code = root.emit(ast, context=sheet) # print 'CODE', code, ref else: ast = None if isinstance(cell.value, unicode): code = u'u"' + cell.value.replace(u'"', u'\\"') + u'"' elif isinstance(cell.value, str): raise RuntimeError("Got unexpected non-unicode str") else: code = str(cell.value) return code,ast
[ "def", "cell2code", "(", "cell", ",", "named_ranges", ")", ":", "if", "cell", ".", "formula", ":", "debug", "=", "False", "# if 'OFFSET' in cell.formula or 'INDEX' in cell.formula:", "# debug = True", "# if debug:", "# print 'FORMULA', cell.formula", "ref", "=", "parse_cell_address", "(", "cell", ".", "address", "(", ")", ")", "if", "not", "cell", ".", "is_named_range", "else", "None", "sheet", "=", "cell", ".", "sheet", "e", "=", "shunting_yard", "(", "cell", ".", "formula", ",", "named_ranges", ",", "ref", "=", "ref", ",", "tokenize_range", "=", "False", ")", "ast", ",", "root", "=", "build_ast", "(", "e", ",", "debug", "=", "debug", ")", "code", "=", "root", ".", "emit", "(", "ast", ",", "context", "=", "sheet", ")", "# print 'CODE', code, ref", "else", ":", "ast", "=", "None", "if", "isinstance", "(", "cell", ".", "value", ",", "unicode", ")", ":", "code", "=", "u'u\"'", "+", "cell", ".", "value", ".", "replace", "(", "u'\"'", ",", "u'\\\\\"'", ")", "+", "u'\"'", "elif", "isinstance", "(", "cell", ".", "value", ",", "str", ")", ":", "raise", "RuntimeError", "(", "\"Got unexpected non-unicode str\"", ")", "else", ":", "code", "=", "str", "(", "cell", ".", "value", ")", "return", "code", ",", "ast" ]
Generate python code for the given cell
[ "Generate", "python", "code", "for", "the", "given", "cell" ]
python
train
heuer/cablemap
cablemap.tm/cablemap/tm/handler.py
https://github.com/heuer/cablemap/blob/42066c8fc2972d237a2c35578e14525aaf705f38/cablemap.tm/cablemap/tm/handler.py#L264-L281
def _handle_recipient(self, typ, recipient): """\ """ route, name, precedence, mcn = recipient.route, recipient.name, recipient.precedence, recipient.mcn if not name: return h = self._handler h.startAssociation(typ) h.role(psis.CABLE_TYPE, self._cable_psi) h.role(psis.RECIPIENT_TYPE, psis.station_psi(name, route)) if route: h.role(psis.ROUTE_TYPE, psis.route_psi(route)) if precedence: h.role(psis.PRECEDENCE_TYPE, psis.precedence_psi(precedence)) if mcn: h.role(psis.MCN_TYPE, psis.mcn_psi(mcn)) h.endAssociation()
[ "def", "_handle_recipient", "(", "self", ",", "typ", ",", "recipient", ")", ":", "route", ",", "name", ",", "precedence", ",", "mcn", "=", "recipient", ".", "route", ",", "recipient", ".", "name", ",", "recipient", ".", "precedence", ",", "recipient", ".", "mcn", "if", "not", "name", ":", "return", "h", "=", "self", ".", "_handler", "h", ".", "startAssociation", "(", "typ", ")", "h", ".", "role", "(", "psis", ".", "CABLE_TYPE", ",", "self", ".", "_cable_psi", ")", "h", ".", "role", "(", "psis", ".", "RECIPIENT_TYPE", ",", "psis", ".", "station_psi", "(", "name", ",", "route", ")", ")", "if", "route", ":", "h", ".", "role", "(", "psis", ".", "ROUTE_TYPE", ",", "psis", ".", "route_psi", "(", "route", ")", ")", "if", "precedence", ":", "h", ".", "role", "(", "psis", ".", "PRECEDENCE_TYPE", ",", "psis", ".", "precedence_psi", "(", "precedence", ")", ")", "if", "mcn", ":", "h", ".", "role", "(", "psis", ".", "MCN_TYPE", ",", "psis", ".", "mcn_psi", "(", "mcn", ")", ")", "h", ".", "endAssociation", "(", ")" ]
\
[ "\\" ]
python
train
tradenity/python-sdk
tradenity/resources/refund_transaction.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/refund_transaction.py#L454-L474
def get_refund_transaction_by_id(cls, refund_transaction_id, **kwargs): """Find RefundTransaction Return single instance of RefundTransaction by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_refund_transaction_by_id(refund_transaction_id, async=True) >>> result = thread.get() :param async bool :param str refund_transaction_id: ID of refundTransaction to return (required) :return: RefundTransaction If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_refund_transaction_by_id_with_http_info(refund_transaction_id, **kwargs) else: (data) = cls._get_refund_transaction_by_id_with_http_info(refund_transaction_id, **kwargs) return data
[ "def", "get_refund_transaction_by_id", "(", "cls", ",", "refund_transaction_id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_get_refund_transaction_by_id_with_http_info", "(", "refund_transaction_id", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_get_refund_transaction_by_id_with_http_info", "(", "refund_transaction_id", ",", "*", "*", "kwargs", ")", "return", "data" ]
Find RefundTransaction Return single instance of RefundTransaction by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_refund_transaction_by_id(refund_transaction_id, async=True) >>> result = thread.get() :param async bool :param str refund_transaction_id: ID of refundTransaction to return (required) :return: RefundTransaction If the method is called asynchronously, returns the request thread.
[ "Find", "RefundTransaction" ]
python
train
ramrod-project/database-brain
schema/brain/queries/reads.py
https://github.com/ramrod-project/database-brain/blob/b024cb44f34cabb9d80af38271ddb65c25767083/schema/brain/queries/reads.py#L21-L44
def _jobs_cursor(plugin_name, location=None, port=None, custom=None): """ generates a reql cursor for plugin_name with status ready and prepares to sort by StartTime :param plugin_name: :param location: :param port: :return: """ cur = RBJ.get_all(READY, index=STATUS_FIELD) cur_filter = (r.row[TARGET_FIELD][PLUGIN_NAME_KEY] == plugin_name) cur_filter = cur_filter & \ (~r.row.has_fields(EXPIRE_FIELD) | r.row[EXPIRE_FIELD].ge(time())) if location: cur_filter = cur_filter & \ (r.row[TARGET_FIELD][LOCATION_FIELD] == location) if port: cur_filter = cur_filter & \ (r.row[TARGET_FIELD][PORT_FIELD] == port) if custom: cur_filter = cur_filter & custom return cur.filter(cur_filter).order_by(START_FIELD)
[ "def", "_jobs_cursor", "(", "plugin_name", ",", "location", "=", "None", ",", "port", "=", "None", ",", "custom", "=", "None", ")", ":", "cur", "=", "RBJ", ".", "get_all", "(", "READY", ",", "index", "=", "STATUS_FIELD", ")", "cur_filter", "=", "(", "r", ".", "row", "[", "TARGET_FIELD", "]", "[", "PLUGIN_NAME_KEY", "]", "==", "plugin_name", ")", "cur_filter", "=", "cur_filter", "&", "(", "~", "r", ".", "row", ".", "has_fields", "(", "EXPIRE_FIELD", ")", "|", "r", ".", "row", "[", "EXPIRE_FIELD", "]", ".", "ge", "(", "time", "(", ")", ")", ")", "if", "location", ":", "cur_filter", "=", "cur_filter", "&", "(", "r", ".", "row", "[", "TARGET_FIELD", "]", "[", "LOCATION_FIELD", "]", "==", "location", ")", "if", "port", ":", "cur_filter", "=", "cur_filter", "&", "(", "r", ".", "row", "[", "TARGET_FIELD", "]", "[", "PORT_FIELD", "]", "==", "port", ")", "if", "custom", ":", "cur_filter", "=", "cur_filter", "&", "custom", "return", "cur", ".", "filter", "(", "cur_filter", ")", ".", "order_by", "(", "START_FIELD", ")" ]
generates a reql cursor for plugin_name with status ready and prepares to sort by StartTime :param plugin_name: :param location: :param port: :return:
[ "generates", "a", "reql", "cursor", "for", "plugin_name", "with", "status", "ready", "and", "prepares", "to", "sort", "by", "StartTime", ":", "param", "plugin_name", ":", ":", "param", "location", ":", ":", "param", "port", ":", ":", "return", ":" ]
python
train
vaexio/vaex
packages/vaex-core/vaex/ext/readcol.py
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/ext/readcol.py#L270-L281
def readff(s,format): """ Fixed-format reader Pass in a single line string (s) and a format list, which needs to be a python list of string lengths """ F = numpy.array([0]+format).cumsum() bothF = zip(F[:-1],F[1:]) strarr = [s[l:u] for l,u in bothF] return strarr
[ "def", "readff", "(", "s", ",", "format", ")", ":", "F", "=", "numpy", ".", "array", "(", "[", "0", "]", "+", "format", ")", ".", "cumsum", "(", ")", "bothF", "=", "zip", "(", "F", "[", ":", "-", "1", "]", ",", "F", "[", "1", ":", "]", ")", "strarr", "=", "[", "s", "[", "l", ":", "u", "]", "for", "l", ",", "u", "in", "bothF", "]", "return", "strarr" ]
Fixed-format reader Pass in a single line string (s) and a format list, which needs to be a python list of string lengths
[ "Fixed", "-", "format", "reader", "Pass", "in", "a", "single", "line", "string", "(", "s", ")", "and", "a", "format", "list", "which", "needs", "to", "be", "a", "python", "list", "of", "string", "lengths" ]
python
test
stevearc/dql
dql/engine.py
https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L974-L992
def execute(self, fragment, pretty_format=True): """ Run or aggregate a query fragment Concat the fragment to any stored fragments. If they form a complete query, run it and return the result. If not, store them and return None. """ self.fragments = (self.fragments + "\n" + fragment).lstrip() try: line_parser.parseString(self.fragments) except ParseException: pass else: self.last_query = self.fragments.strip() self.fragments = "" return super(FragmentEngine, self).execute(self.last_query, pretty_format) return None
[ "def", "execute", "(", "self", ",", "fragment", ",", "pretty_format", "=", "True", ")", ":", "self", ".", "fragments", "=", "(", "self", ".", "fragments", "+", "\"\\n\"", "+", "fragment", ")", ".", "lstrip", "(", ")", "try", ":", "line_parser", ".", "parseString", "(", "self", ".", "fragments", ")", "except", "ParseException", ":", "pass", "else", ":", "self", ".", "last_query", "=", "self", ".", "fragments", ".", "strip", "(", ")", "self", ".", "fragments", "=", "\"\"", "return", "super", "(", "FragmentEngine", ",", "self", ")", ".", "execute", "(", "self", ".", "last_query", ",", "pretty_format", ")", "return", "None" ]
Run or aggregate a query fragment Concat the fragment to any stored fragments. If they form a complete query, run it and return the result. If not, store them and return None.
[ "Run", "or", "aggregate", "a", "query", "fragment" ]
python
train
agoragames/kairos
kairos/mongo_backend.py
https://github.com/agoragames/kairos/blob/0b062d543b0f4a46df460fa0eb6ec281232ab179/kairos/mongo_backend.py#L127-L150
def _batch_insert(self, inserts, intervals, **kwargs): ''' Batch insert implementation. ''' updates = {} # TODO support flush interval for interval,config in self._intervals.items(): for timestamp,names in inserts.iteritems(): timestamps = self._normalize_timestamps(timestamp, intervals, config) for name,values in names.iteritems(): for value in values: for tstamp in timestamps: query,insert = self._insert_data( name, value, tstamp, interval, config, dry_run=True) batch_key = self._batch_key(query) updates.setdefault(batch_key, {'query':query, 'interval':interval}) new_insert = self._batch(insert, updates[batch_key].get('insert')) updates[batch_key]['insert'] = new_insert # now that we've collected a bunch of updates, flush them out for spec in updates.values(): self._client[ spec['interval'] ].update( spec['query'], spec['insert'], upsert=True, check_keys=False )
[ "def", "_batch_insert", "(", "self", ",", "inserts", ",", "intervals", ",", "*", "*", "kwargs", ")", ":", "updates", "=", "{", "}", "# TODO support flush interval", "for", "interval", ",", "config", "in", "self", ".", "_intervals", ".", "items", "(", ")", ":", "for", "timestamp", ",", "names", "in", "inserts", ".", "iteritems", "(", ")", ":", "timestamps", "=", "self", ".", "_normalize_timestamps", "(", "timestamp", ",", "intervals", ",", "config", ")", "for", "name", ",", "values", "in", "names", ".", "iteritems", "(", ")", ":", "for", "value", "in", "values", ":", "for", "tstamp", "in", "timestamps", ":", "query", ",", "insert", "=", "self", ".", "_insert_data", "(", "name", ",", "value", ",", "tstamp", ",", "interval", ",", "config", ",", "dry_run", "=", "True", ")", "batch_key", "=", "self", ".", "_batch_key", "(", "query", ")", "updates", ".", "setdefault", "(", "batch_key", ",", "{", "'query'", ":", "query", ",", "'interval'", ":", "interval", "}", ")", "new_insert", "=", "self", ".", "_batch", "(", "insert", ",", "updates", "[", "batch_key", "]", ".", "get", "(", "'insert'", ")", ")", "updates", "[", "batch_key", "]", "[", "'insert'", "]", "=", "new_insert", "# now that we've collected a bunch of updates, flush them out", "for", "spec", "in", "updates", ".", "values", "(", ")", ":", "self", ".", "_client", "[", "spec", "[", "'interval'", "]", "]", ".", "update", "(", "spec", "[", "'query'", "]", ",", "spec", "[", "'insert'", "]", ",", "upsert", "=", "True", ",", "check_keys", "=", "False", ")" ]
Batch insert implementation.
[ "Batch", "insert", "implementation", "." ]
python
train
Nachtfeuer/pipeline
spline/components/bash.py
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/bash.py#L138-L145
def render_bash_options(self): """Rendering Bash options.""" options = '' if self.config.debug: options += "set -x\n" if self.config.strict: options += "set -euo pipefail\n" return options
[ "def", "render_bash_options", "(", "self", ")", ":", "options", "=", "''", "if", "self", ".", "config", ".", "debug", ":", "options", "+=", "\"set -x\\n\"", "if", "self", ".", "config", ".", "strict", ":", "options", "+=", "\"set -euo pipefail\\n\"", "return", "options" ]
Rendering Bash options.
[ "Rendering", "Bash", "options", "." ]
python
train
roclark/sportsreference
sportsreference/ncaaf/conferences.py
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/ncaaf/conferences.py#L73-L103
def _find_conference_teams(self, conference_abbreviation, year): """ Retrieve the teams in the conference for the requested season. Find and retrieve all teams that participated in a conference for a given season. The name and abbreviation for each team are parsed and recorded to enable easy queries of conference schools.. Parameters ---------- conference_abbreviation : string A string of the requested conference's abbreviation, such as 'big-12'. year : string A string of the requested year to pull conference information from. """ if not year: year = utils._find_year_for_season('ncaaf') page = self._pull_conference_page(conference_abbreviation, year) if not page: url = CONFERENCE_URL % (conference_abbreviation, year) output = ("Can't pull requested conference page. Ensure the " "following URL exists: %s" % url) raise ValueError(output) conference = page('table#standings tbody tr').items() for team in conference: team_abbreviation = self._get_team_abbreviation(team) if team_abbreviation == '': continue team_name = team('th[data-stat="school_name"]').text() self._teams[team_abbreviation] = team_name
[ "def", "_find_conference_teams", "(", "self", ",", "conference_abbreviation", ",", "year", ")", ":", "if", "not", "year", ":", "year", "=", "utils", ".", "_find_year_for_season", "(", "'ncaaf'", ")", "page", "=", "self", ".", "_pull_conference_page", "(", "conference_abbreviation", ",", "year", ")", "if", "not", "page", ":", "url", "=", "CONFERENCE_URL", "%", "(", "conference_abbreviation", ",", "year", ")", "output", "=", "(", "\"Can't pull requested conference page. Ensure the \"", "\"following URL exists: %s\"", "%", "url", ")", "raise", "ValueError", "(", "output", ")", "conference", "=", "page", "(", "'table#standings tbody tr'", ")", ".", "items", "(", ")", "for", "team", "in", "conference", ":", "team_abbreviation", "=", "self", ".", "_get_team_abbreviation", "(", "team", ")", "if", "team_abbreviation", "==", "''", ":", "continue", "team_name", "=", "team", "(", "'th[data-stat=\"school_name\"]'", ")", ".", "text", "(", ")", "self", ".", "_teams", "[", "team_abbreviation", "]", "=", "team_name" ]
Retrieve the teams in the conference for the requested season. Find and retrieve all teams that participated in a conference for a given season. The name and abbreviation for each team are parsed and recorded to enable easy queries of conference schools.. Parameters ---------- conference_abbreviation : string A string of the requested conference's abbreviation, such as 'big-12'. year : string A string of the requested year to pull conference information from.
[ "Retrieve", "the", "teams", "in", "the", "conference", "for", "the", "requested", "season", "." ]
python
train
fozzle/python-brotherprint
brotherprint/brotherprint.py
https://github.com/fozzle/python-brotherprint/blob/5fb92df11b599c30a7da3d6ac7ed60acff230044/brotherprint/brotherprint.py#L62-L73
def initialize(self): '''Calling this function initializes the printer. Args: None Returns: None Raises: None ''' self.fonttype = self.font_types['bitmap'] self.send(chr(27)+chr(64))
[ "def", "initialize", "(", "self", ")", ":", "self", ".", "fonttype", "=", "self", ".", "font_types", "[", "'bitmap'", "]", "self", ".", "send", "(", "chr", "(", "27", ")", "+", "chr", "(", "64", ")", ")" ]
Calling this function initializes the printer. Args: None Returns: None Raises: None
[ "Calling", "this", "function", "initializes", "the", "printer", ".", "Args", ":", "None", "Returns", ":", "None", "Raises", ":", "None" ]
python
train
cdriehuys/django-rest-email-auth
rest_email_auth/serializers.py
https://github.com/cdriehuys/django-rest-email-auth/blob/7e752c4d77ae02d2d046f214f56e743aa12ab23f/rest_email_auth/serializers.py#L297-L313
def validate_key(self, key): """ Validate the provided reset key. Returns: The validated key. Raises: serializers.ValidationError: If the provided key does not exist. """ if not models.PasswordResetToken.valid_tokens.filter(key=key).exists(): raise serializers.ValidationError( _("The provided reset token does not exist, or is expired.") ) return key
[ "def", "validate_key", "(", "self", ",", "key", ")", ":", "if", "not", "models", ".", "PasswordResetToken", ".", "valid_tokens", ".", "filter", "(", "key", "=", "key", ")", ".", "exists", "(", ")", ":", "raise", "serializers", ".", "ValidationError", "(", "_", "(", "\"The provided reset token does not exist, or is expired.\"", ")", ")", "return", "key" ]
Validate the provided reset key. Returns: The validated key. Raises: serializers.ValidationError: If the provided key does not exist.
[ "Validate", "the", "provided", "reset", "key", "." ]
python
valid
explosion/spaCy
spacy/pipeline/functions.py
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/pipeline/functions.py#L24-L36
def merge_entities(doc): """Merge entities into a single token. doc (Doc): The Doc object. RETURNS (Doc): The Doc object with merged entities. DOCS: https://spacy.io/api/pipeline-functions#merge_entities """ with doc.retokenize() as retokenizer: for ent in doc.ents: attrs = {"tag": ent.root.tag, "dep": ent.root.dep, "ent_type": ent.label} retokenizer.merge(ent, attrs=attrs) return doc
[ "def", "merge_entities", "(", "doc", ")", ":", "with", "doc", ".", "retokenize", "(", ")", "as", "retokenizer", ":", "for", "ent", "in", "doc", ".", "ents", ":", "attrs", "=", "{", "\"tag\"", ":", "ent", ".", "root", ".", "tag", ",", "\"dep\"", ":", "ent", ".", "root", ".", "dep", ",", "\"ent_type\"", ":", "ent", ".", "label", "}", "retokenizer", ".", "merge", "(", "ent", ",", "attrs", "=", "attrs", ")", "return", "doc" ]
Merge entities into a single token. doc (Doc): The Doc object. RETURNS (Doc): The Doc object with merged entities. DOCS: https://spacy.io/api/pipeline-functions#merge_entities
[ "Merge", "entities", "into", "a", "single", "token", "." ]
python
train
keon/algorithms
algorithms/tree/bst/kth_smallest.py
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/tree/bst/kth_smallest.py#L24-L32
def kth_smallest(self, root, k): """ :type root: TreeNode :type k: int :rtype: int """ count = [] self.helper(root, count) return count[k-1]
[ "def", "kth_smallest", "(", "self", ",", "root", ",", "k", ")", ":", "count", "=", "[", "]", "self", ".", "helper", "(", "root", ",", "count", ")", "return", "count", "[", "k", "-", "1", "]" ]
:type root: TreeNode :type k: int :rtype: int
[ ":", "type", "root", ":", "TreeNode", ":", "type", "k", ":", "int", ":", "rtype", ":", "int" ]
python
train
tensorflow/datasets
tensorflow_datasets/core/features/feature.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L468-L490
def encode_example(self, example_dict): """See base class for details.""" # Flatten dict matching the tf-example features # Use NonMutableDict to ensure there is no collision between features keys tfexample_dict = utils.NonMutableDict() # Iterate over example fields for feature_key, (feature, example_value) in utils.zip_dict( self._feature_dict, example_dict): # Encode the field with the associated encoder encoded_feature = feature.encode_example(example_value) # Singleton case if not feature.serialized_keys: tfexample_dict[feature_key] = encoded_feature # Feature contains sub features else: _assert_keys_match(encoded_feature.keys(), feature.serialized_keys) tfexample_dict.update({ posixpath.join(feature_key, k): encoded_feature[k] for k in feature.serialized_keys }) return tfexample_dict
[ "def", "encode_example", "(", "self", ",", "example_dict", ")", ":", "# Flatten dict matching the tf-example features", "# Use NonMutableDict to ensure there is no collision between features keys", "tfexample_dict", "=", "utils", ".", "NonMutableDict", "(", ")", "# Iterate over example fields", "for", "feature_key", ",", "(", "feature", ",", "example_value", ")", "in", "utils", ".", "zip_dict", "(", "self", ".", "_feature_dict", ",", "example_dict", ")", ":", "# Encode the field with the associated encoder", "encoded_feature", "=", "feature", ".", "encode_example", "(", "example_value", ")", "# Singleton case", "if", "not", "feature", ".", "serialized_keys", ":", "tfexample_dict", "[", "feature_key", "]", "=", "encoded_feature", "# Feature contains sub features", "else", ":", "_assert_keys_match", "(", "encoded_feature", ".", "keys", "(", ")", ",", "feature", ".", "serialized_keys", ")", "tfexample_dict", ".", "update", "(", "{", "posixpath", ".", "join", "(", "feature_key", ",", "k", ")", ":", "encoded_feature", "[", "k", "]", "for", "k", "in", "feature", ".", "serialized_keys", "}", ")", "return", "tfexample_dict" ]
See base class for details.
[ "See", "base", "class", "for", "details", "." ]
python
train
xiaocong/uiautomator
uiautomator/__init__.py
https://github.com/xiaocong/uiautomator/blob/9a0c892ffd056713f91aa2153d1533c5b0553a1c/uiautomator/__init__.py#L963-L976
def drag(self): ''' Drag the ui object to other point or ui object. Usage: d(text="Clock").drag.to(x=100, y=100) # drag to point (x,y) d(text="Clock").drag.to(text="Remove") # drag to another object ''' def to(obj, *args, **kwargs): if len(args) >= 2 or "x" in kwargs or "y" in kwargs: drag_to = lambda x, y, steps=100: self.jsonrpc.dragTo(self.selector, x, y, steps) else: drag_to = lambda steps=100, **kwargs: self.jsonrpc.dragTo(self.selector, Selector(**kwargs), steps) return drag_to(*args, **kwargs) return type("Drag", (object,), {"to": to})()
[ "def", "drag", "(", "self", ")", ":", "def", "to", "(", "obj", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "args", ")", ">=", "2", "or", "\"x\"", "in", "kwargs", "or", "\"y\"", "in", "kwargs", ":", "drag_to", "=", "lambda", "x", ",", "y", ",", "steps", "=", "100", ":", "self", ".", "jsonrpc", ".", "dragTo", "(", "self", ".", "selector", ",", "x", ",", "y", ",", "steps", ")", "else", ":", "drag_to", "=", "lambda", "steps", "=", "100", ",", "*", "*", "kwargs", ":", "self", ".", "jsonrpc", ".", "dragTo", "(", "self", ".", "selector", ",", "Selector", "(", "*", "*", "kwargs", ")", ",", "steps", ")", "return", "drag_to", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "type", "(", "\"Drag\"", ",", "(", "object", ",", ")", ",", "{", "\"to\"", ":", "to", "}", ")", "(", ")" ]
Drag the ui object to other point or ui object. Usage: d(text="Clock").drag.to(x=100, y=100) # drag to point (x,y) d(text="Clock").drag.to(text="Remove") # drag to another object
[ "Drag", "the", "ui", "object", "to", "other", "point", "or", "ui", "object", ".", "Usage", ":", "d", "(", "text", "=", "Clock", ")", ".", "drag", ".", "to", "(", "x", "=", "100", "y", "=", "100", ")", "#", "drag", "to", "point", "(", "x", "y", ")", "d", "(", "text", "=", "Clock", ")", ".", "drag", ".", "to", "(", "text", "=", "Remove", ")", "#", "drag", "to", "another", "object" ]
python
train
lepture/safe
safe/__init__.py
https://github.com/lepture/safe/blob/038a72e59557caf97c1b93f66124a8f014eb032b/safe/__init__.py#L142-L185
def check(raw, length=8, freq=0, min_types=3, level=STRONG): """Check the safety level of the password. :param raw: raw text password. :param length: minimal length of the password. :param freq: minimum frequency. :param min_types: minimum character family. :param level: minimum level to validate a password. """ raw = to_unicode(raw) if level > STRONG: level = STRONG if len(raw) < length: return Strength(False, 'terrible', 'password is too short') if is_asdf(raw) or is_by_step(raw): return Strength(False, 'simple', 'password has a pattern') if is_common_password(raw, freq=freq): return Strength(False, 'simple', 'password is too common') types = 0 if LOWER.search(raw): types += 1 if UPPER.search(raw): types += 1 if NUMBER.search(raw): types += 1 if MARKS.search(raw): types += 1 if types < 2: return Strength(level <= SIMPLE, 'simple', 'password is too simple') if types < min_types: return Strength(level <= MEDIUM, 'medium', 'password is good enough, but not strong') return Strength(True, 'strong', 'password is perfect')
[ "def", "check", "(", "raw", ",", "length", "=", "8", ",", "freq", "=", "0", ",", "min_types", "=", "3", ",", "level", "=", "STRONG", ")", ":", "raw", "=", "to_unicode", "(", "raw", ")", "if", "level", ">", "STRONG", ":", "level", "=", "STRONG", "if", "len", "(", "raw", ")", "<", "length", ":", "return", "Strength", "(", "False", ",", "'terrible'", ",", "'password is too short'", ")", "if", "is_asdf", "(", "raw", ")", "or", "is_by_step", "(", "raw", ")", ":", "return", "Strength", "(", "False", ",", "'simple'", ",", "'password has a pattern'", ")", "if", "is_common_password", "(", "raw", ",", "freq", "=", "freq", ")", ":", "return", "Strength", "(", "False", ",", "'simple'", ",", "'password is too common'", ")", "types", "=", "0", "if", "LOWER", ".", "search", "(", "raw", ")", ":", "types", "+=", "1", "if", "UPPER", ".", "search", "(", "raw", ")", ":", "types", "+=", "1", "if", "NUMBER", ".", "search", "(", "raw", ")", ":", "types", "+=", "1", "if", "MARKS", ".", "search", "(", "raw", ")", ":", "types", "+=", "1", "if", "types", "<", "2", ":", "return", "Strength", "(", "level", "<=", "SIMPLE", ",", "'simple'", ",", "'password is too simple'", ")", "if", "types", "<", "min_types", ":", "return", "Strength", "(", "level", "<=", "MEDIUM", ",", "'medium'", ",", "'password is good enough, but not strong'", ")", "return", "Strength", "(", "True", ",", "'strong'", ",", "'password is perfect'", ")" ]
Check the safety level of the password. :param raw: raw text password. :param length: minimal length of the password. :param freq: minimum frequency. :param min_types: minimum character family. :param level: minimum level to validate a password.
[ "Check", "the", "safety", "level", "of", "the", "password", "." ]
python
train
kootenpv/yagmail
yagmail/validate.py
https://github.com/kootenpv/yagmail/blob/b8873299ce682193eef43314aa214e553c2b67cb/yagmail/validate.py#L100-L113
def validate_email_with_regex(email_address): """ Note that this will only filter out syntax mistakes in emailaddresses. If a human would think it is probably a valid email, it will most likely pass. However, it could still very well be that the actual emailaddress has simply not be claimed by anyone (so then this function fails to devalidate). """ if not re.match(VALID_ADDRESS_REGEXP, email_address): emsg = 'Emailaddress "{}" is not valid according to RFC 2822 standards'.format( email_address) raise YagInvalidEmailAddress(emsg) # apart from the standard, I personally do not trust email addresses without dot. if "." not in email_address and "localhost" not in email_address.lower(): raise YagInvalidEmailAddress("Missing dot in emailaddress")
[ "def", "validate_email_with_regex", "(", "email_address", ")", ":", "if", "not", "re", ".", "match", "(", "VALID_ADDRESS_REGEXP", ",", "email_address", ")", ":", "emsg", "=", "'Emailaddress \"{}\" is not valid according to RFC 2822 standards'", ".", "format", "(", "email_address", ")", "raise", "YagInvalidEmailAddress", "(", "emsg", ")", "# apart from the standard, I personally do not trust email addresses without dot.", "if", "\".\"", "not", "in", "email_address", "and", "\"localhost\"", "not", "in", "email_address", ".", "lower", "(", ")", ":", "raise", "YagInvalidEmailAddress", "(", "\"Missing dot in emailaddress\"", ")" ]
Note that this will only filter out syntax mistakes in emailaddresses. If a human would think it is probably a valid email, it will most likely pass. However, it could still very well be that the actual emailaddress has simply not be claimed by anyone (so then this function fails to devalidate).
[ "Note", "that", "this", "will", "only", "filter", "out", "syntax", "mistakes", "in", "emailaddresses", ".", "If", "a", "human", "would", "think", "it", "is", "probably", "a", "valid", "email", "it", "will", "most", "likely", "pass", ".", "However", "it", "could", "still", "very", "well", "be", "that", "the", "actual", "emailaddress", "has", "simply", "not", "be", "claimed", "by", "anyone", "(", "so", "then", "this", "function", "fails", "to", "devalidate", ")", "." ]
python
train
spacetelescope/stsci.tools
lib/stsci/tools/swapgeis.py
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/swapgeis.py#L92-L515
def byteswap(input,output=None,clobber=True): """Input GEIS files "input" will be read and converted to a new GEIS file whose byte-order has been swapped from its original state. Parameters ---------- input - str Full filename with path of input GEIS image header file output - str Full filename with path of output GEIS image header file If None, a default name will be created as input_swap.??h clobber - bool Overwrite any pre-existing output file? [Default: True] Notes ----- This function will automatically read and write out the data file using the GEIS image naming conventions. """ global dat cardLen = fits.Card.length # input file(s) must be of the form *.??h and *.??d if input[-1] != 'h' or input[-4] != '.': raise "Illegal input GEIS file name %s" % input data_file = input[:-1]+'d' # Create default output name if no output name was specified by the user if output is None: output = input.replace('.','_swap.') out_data = output[:-1]+'d' if os.path.exists(output) and not clobber: errstr = 'Output file already exists! Please remove or rename and start again...' raise IOError(errstr) _os = sys.platform if _os[:5] == 'linux' or _os[:5] == 'win32' or _os[:5] == 'sunos' or _os[:3] == 'osf' or _os[:6] == 'darwin': bytes_per_line = cardLen+1 else: raise "Platform %s is not supported (yet)." % _os end_card = 'END'+' '* (cardLen-3) # open input file im = open(input) # Generate the primary HDU so we can have access to keywords which describe # the number of groups and shape of each group's array # cards = [] while 1: line = im.read(bytes_per_line)[:cardLen] line = line[:8].upper() + line[8:] if line == end_card: break cards.append(fits.Card.fromstring(line)) phdr = fits.Header(cards) im.close() _naxis0 = phdr.get('NAXIS', 0) _naxis = [phdr['NAXIS'+str(j)] for j in range(1, _naxis0+1)] _naxis.insert(0, _naxis0) _bitpix = phdr['BITPIX'] _psize = phdr['PSIZE'] if phdr['DATATYPE'][:4] == 'REAL': _bitpix = -_bitpix if _naxis0 > 0: size = reduce(lambda x,y:x*y, _naxis[1:]) data_size = abs(_bitpix) * size // 8 else: data_size = 0 group_size = data_size + _psize // 8 # decode the group parameter definitions, # group parameters will become extension header groups = phdr['GROUPS'] gcount = phdr['GCOUNT'] pcount = phdr['PCOUNT'] formats = [] bools = [] floats = [] _range = list(range(1, pcount+1)) key = [phdr['PTYPE'+str(j)] for j in _range] comm = [phdr.cards['PTYPE'+str(j)].comment for j in _range] # delete group parameter definition header keywords _list = ['PTYPE'+str(j) for j in _range] + \ ['PDTYPE'+str(j) for j in _range] + \ ['PSIZE'+str(j) for j in _range] + \ ['DATATYPE', 'PSIZE', 'GCOUNT', 'PCOUNT', 'BSCALE', 'BZERO'] # Construct record array formats for the group parameters # as interpreted from the Primary header file for i in range(1, pcount+1): ptype = key[i-1] pdtype = phdr['PDTYPE'+str(i)] star = pdtype.find('*') _type = pdtype[:star] _bytes = pdtype[star+1:] # collect boolean keywords since they need special attention later if _type == 'LOGICAL': bools.append(i) if pdtype == 'REAL*4': floats.append(i) fmt = geis_fmt[_type] + _bytes formats.append((ptype,fmt)) _shape = _naxis[1:] _shape.reverse() _code = fits.BITPIX2DTYPE[_bitpix] _bscale = phdr.get('BSCALE', 1) _bzero = phdr.get('BZERO', 0) if phdr['DATATYPE'][:10] == 'UNSIGNED*2': _uint16 = 1 _bzero = 32768 else: _uint16 = 0 # Use copy-on-write for all data types since byteswap may be needed # in some platforms. f1 = open(data_file, mode='rb') dat = f1.read() f1.close() errormsg = "" loc = 0 outdat = b'' for k in range(gcount): ext_dat = numpy.fromstring(dat[loc:loc+data_size], dtype=_code) ext_dat = ext_dat.reshape(_shape).byteswap() outdat += ext_dat.tostring() ext_hdu = fits.hdu.ImageHDU(data=ext_dat) rec = numpy.fromstring(dat[loc+data_size:loc+group_size], dtype=formats).byteswap() outdat += rec.tostring() loc += group_size if os.path.exists(output): os.remove(output) if os.path.exists(out_data): os.remove(out_data) shutil.copy(input,output) outfile = open(out_data,mode='wb') outfile.write(outdat) outfile.close() print('Finished byte-swapping ',input,' to ',output) #------------------------------------------------------------------------------- """Input GEIS files "input" will be read and a HDUList object will be returned that matches the waiver-FITS format written out by 'stwfits' in IRAF. The user can use the writeto method to write the HDUList object to a FITS file. """ # global dat # !!! (looks like this is a function missing its head) cardLen = fits.Card.length # input file(s) must be of the form *.??h and *.??d if input[-1] != 'h' or input[-4] != '.': raise "Illegal input GEIS file name %s" % input data_file = input[:-1]+'d' _os = sys.platform if _os[:5] == 'linux' or _os[:5] == 'win32' or _os[:5] == 'sunos' or _os[:3] == 'osf' or _os[:6] == 'darwin': bytes_per_line = cardLen+1 else: raise "Platform %s is not supported (yet)." % _os end_card = 'END'+' '* (cardLen-3) # open input file im = open(input) # Generate the primary HDU cards = [] while 1: line = im.read(bytes_per_line)[:cardLen] line = line[:8].upper() + line[8:] if line == end_card: break cards.append(fits.Card.fromstring(line)) phdr = fits.Header(cards) im.close() phdr.set('FILENAME', value=input, after='DATE') # Determine starting point for adding Group Parameter Block keywords to Primary header phdr_indx = phdr.index('PSIZE') _naxis0 = phdr.get('NAXIS', 0) _naxis = [phdr['NAXIS'+str(j)] for j in range(1, _naxis0+1)] _naxis.insert(0, _naxis0) _bitpix = phdr['BITPIX'] _psize = phdr['PSIZE'] if phdr['DATATYPE'][:4] == 'REAL': _bitpix = -_bitpix if _naxis0 > 0: size = reduce(lambda x,y:x*y, _naxis[1:]) data_size = abs(_bitpix) * size // 8 else: data_size = 0 group_size = data_size + _psize // 8 # decode the group parameter definitions, # group parameters will become extension table groups = phdr['GROUPS'] gcount = phdr['GCOUNT'] pcount = phdr['PCOUNT'] formats = [] bools = [] floats = [] cols = [] # column definitions used for extension table cols_dict = {} # provides name access to Column defs _range = list(range(1, pcount+1)) key = [phdr['PTYPE'+str(j)] for j in _range] comm = [phdr.cards['PTYPE'+str(j)].comment for j in _range] # delete group parameter definition header keywords _list = ['PTYPE'+str(j) for j in _range] + \ ['PDTYPE'+str(j) for j in _range] + \ ['PSIZE'+str(j) for j in _range] + \ ['DATATYPE', 'PSIZE', 'GCOUNT', 'PCOUNT', 'BSCALE', 'BZERO'] # Construct record array formats for the group parameters # as interpreted from the Primary header file for i in range(1, pcount+1): ptype = key[i-1] pdtype = phdr['PDTYPE'+str(i)] star = pdtype.find('*') _type = pdtype[:star] _bytes = pdtype[star+1:] # collect boolean keywords since they need special attention later if _type == 'LOGICAL': bools.append(i) if pdtype == 'REAL*4': floats.append(i) # identify keywords which require conversion to special units if ptype in kw_DOUBLE: _type = 'DOUBLE' fmt = geis_fmt[_type] + _bytes formats.append((ptype,fmt)) # Set up definitions for use in creating the group-parameter block table nrpt = '' nbits = str(int(_bytes)*8) if 'CHAR' in _type: nrpt = _bytes nbits = _bytes afmt = cols_fmt[_type]+ nbits if 'LOGICAL' in _type: afmt = cols_fmt[_type] cfmt = cols_pfmt[_type]+nrpt #print 'Column format for ',ptype,': ',cfmt,' with dtype of ',afmt cols_dict[ptype] = fits.Column(name=ptype,format=cfmt,array=numpy.zeros(gcount,dtype=afmt)) cols.append(cols_dict[ptype]) # This keeps the columns in order _shape = _naxis[1:] _shape.reverse() _code = fits.BITPIX2DTYPE[_bitpix] _bscale = phdr.get('BSCALE', 1) _bzero = phdr.get('BZERO', 0) if phdr['DATATYPE'][:10] == 'UNSIGNED*2': _uint16 = 1 _bzero = 32768 else: _uint16 = 0 # delete from the end, so it will not conflict with previous delete for i in range(len(phdr)-1, -1, -1): if phdr.cards[i].keyword in _list: del phdr[i] # clean up other primary header keywords phdr['SIMPLE'] = True phdr['GROUPS'] = False _after = 'NAXIS' if _naxis0 > 0: _after += str(_naxis0) phdr.set('EXTEND', value=True, comment="FITS dataset may contain extensions", after=_after) # Use copy-on-write for all data types since byteswap may be needed # in some platforms. f1 = open(data_file, mode='rb') dat = f1.read() errormsg = "" # Define data array for all groups arr_shape = _naxis[:] arr_shape[0] = gcount arr_stack = numpy.zeros(arr_shape,dtype=_code) loc = 0 for k in range(gcount): ext_dat = numpy.fromstring(dat[loc:loc+data_size], dtype=_code) ext_dat = ext_dat.reshape(_shape) if _uint16: ext_dat += _bzero # Check to see whether there are any NaN's or infs which might indicate # a byte-swapping problem, such as being written out on little-endian # and being read in on big-endian or vice-versa. if _code.find('float') >= 0 and \ (numpy.any(numpy.isnan(ext_dat)) or numpy.any(numpy.isinf(ext_dat))): errormsg += "===================================\n" errormsg += "= WARNING: =\n" errormsg += "= Input image: =\n" errormsg += input+"[%d]\n"%(k+1) errormsg += "= had floating point data values =\n" errormsg += "= of NaN and/or Inf. =\n" errormsg += "===================================\n" elif _code.find('int') >= 0: # Check INT data for max values ext_dat_frac,ext_dat_exp = numpy.frexp(ext_dat) if ext_dat_exp.max() == int(_bitpix) - 1: # Potential problems with byteswapping errormsg += "===================================\n" errormsg += "= WARNING: =\n" errormsg += "= Input image: =\n" errormsg += input+"[%d]\n"%(k+1) errormsg += "= had integer data values =\n" errormsg += "= with maximum bitvalues. =\n" errormsg += "===================================\n" arr_stack[k] = ext_dat rec = numpy.fromstring(dat[loc+data_size:loc+group_size], dtype=formats) loc += group_size # Add data from this GPB to table for i in range(1, pcount+1): val = rec[0][i-1] if i in bools: if val: val = 'T' else: val = 'F' cols[i-1].array[k] = val # Based on the first group, add GPB keywords to PRIMARY header if k == 0: # Create separate PyFITS Card objects for each entry in 'rec' # and update Primary HDU with these keywords after PSIZE for i in range(1, pcount+1): #val = rec.field(i-1)[0] val = rec[0][i-1] if val.dtype.kind == 'S': val = val.decode('ascii') if i in bools: if val: val = True else: val = False if i in floats: # use fromstring, format in Card is deprecated in pyfits 0.9 _str = '%-8s= %20.13G / %s' % (key[i-1], val, comm[i-1]) _card = fits.Card.fromstring(_str) else: _card = fits.Card(keyword=key[i-1], value=val, comment=comm[i-1]) phdr.insert(phdr_indx+i, _card) # deal with bscale/bzero if (_bscale != 1 or _bzero != 0): phdr['BSCALE'] = _bscale phdr['BZERO'] = _bzero #hdulist.append(ext_hdu) # Define new table based on Column definitions ext_table = fits.TableHDU.from_columns(cols) ext_table.header.set('EXTNAME', value=input+'.tab', after='TFIELDS') # Add column descriptions to header of table extension to match stwfits output for i in range(len(key)): ext_table.header.append(fits.Card(keyword=key[i], value=comm[i])) if errormsg != "": errormsg += "===================================\n" errormsg += "= This file may have been =\n" errormsg += "= written out on a platform =\n" errormsg += "= with a different byte-order. =\n" errormsg += "= =\n" errormsg += "= Please verify that the values =\n" errormsg += "= are correct or apply the =\n" errormsg += "= '.byteswap()' method. =\n" errormsg += "===================================\n" print(errormsg) f1.close() hdulist = fits.HDUList([fits.PrimaryHDU(header=phdr, data=arr_stack)]) hdulist.append(ext_table) return hdulist
[ "def", "byteswap", "(", "input", ",", "output", "=", "None", ",", "clobber", "=", "True", ")", ":", "global", "dat", "cardLen", "=", "fits", ".", "Card", ".", "length", "# input file(s) must be of the form *.??h and *.??d", "if", "input", "[", "-", "1", "]", "!=", "'h'", "or", "input", "[", "-", "4", "]", "!=", "'.'", ":", "raise", "\"Illegal input GEIS file name %s\"", "%", "input", "data_file", "=", "input", "[", ":", "-", "1", "]", "+", "'d'", "# Create default output name if no output name was specified by the user", "if", "output", "is", "None", ":", "output", "=", "input", ".", "replace", "(", "'.'", ",", "'_swap.'", ")", "out_data", "=", "output", "[", ":", "-", "1", "]", "+", "'d'", "if", "os", ".", "path", ".", "exists", "(", "output", ")", "and", "not", "clobber", ":", "errstr", "=", "'Output file already exists! Please remove or rename and start again...'", "raise", "IOError", "(", "errstr", ")", "_os", "=", "sys", ".", "platform", "if", "_os", "[", ":", "5", "]", "==", "'linux'", "or", "_os", "[", ":", "5", "]", "==", "'win32'", "or", "_os", "[", ":", "5", "]", "==", "'sunos'", "or", "_os", "[", ":", "3", "]", "==", "'osf'", "or", "_os", "[", ":", "6", "]", "==", "'darwin'", ":", "bytes_per_line", "=", "cardLen", "+", "1", "else", ":", "raise", "\"Platform %s is not supported (yet).\"", "%", "_os", "end_card", "=", "'END'", "+", "' '", "*", "(", "cardLen", "-", "3", ")", "# open input file", "im", "=", "open", "(", "input", ")", "# Generate the primary HDU so we can have access to keywords which describe", "# the number of groups and shape of each group's array", "#", "cards", "=", "[", "]", "while", "1", ":", "line", "=", "im", ".", "read", "(", "bytes_per_line", ")", "[", ":", "cardLen", "]", "line", "=", "line", "[", ":", "8", "]", ".", "upper", "(", ")", "+", "line", "[", "8", ":", "]", "if", "line", "==", "end_card", ":", "break", "cards", ".", "append", "(", "fits", ".", "Card", ".", "fromstring", "(", "line", ")", ")", "phdr", "=", "fits", ".", "Header", "(", "cards", ")", "im", ".", "close", "(", ")", "_naxis0", "=", "phdr", ".", "get", "(", "'NAXIS'", ",", "0", ")", "_naxis", "=", "[", "phdr", "[", "'NAXIS'", "+", "str", "(", "j", ")", "]", "for", "j", "in", "range", "(", "1", ",", "_naxis0", "+", "1", ")", "]", "_naxis", ".", "insert", "(", "0", ",", "_naxis0", ")", "_bitpix", "=", "phdr", "[", "'BITPIX'", "]", "_psize", "=", "phdr", "[", "'PSIZE'", "]", "if", "phdr", "[", "'DATATYPE'", "]", "[", ":", "4", "]", "==", "'REAL'", ":", "_bitpix", "=", "-", "_bitpix", "if", "_naxis0", ">", "0", ":", "size", "=", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "*", "y", ",", "_naxis", "[", "1", ":", "]", ")", "data_size", "=", "abs", "(", "_bitpix", ")", "*", "size", "//", "8", "else", ":", "data_size", "=", "0", "group_size", "=", "data_size", "+", "_psize", "//", "8", "# decode the group parameter definitions,", "# group parameters will become extension header", "groups", "=", "phdr", "[", "'GROUPS'", "]", "gcount", "=", "phdr", "[", "'GCOUNT'", "]", "pcount", "=", "phdr", "[", "'PCOUNT'", "]", "formats", "=", "[", "]", "bools", "=", "[", "]", "floats", "=", "[", "]", "_range", "=", "list", "(", "range", "(", "1", ",", "pcount", "+", "1", ")", ")", "key", "=", "[", "phdr", "[", "'PTYPE'", "+", "str", "(", "j", ")", "]", "for", "j", "in", "_range", "]", "comm", "=", "[", "phdr", ".", "cards", "[", "'PTYPE'", "+", "str", "(", "j", ")", "]", ".", "comment", "for", "j", "in", "_range", "]", "# delete group parameter definition header keywords", "_list", "=", "[", "'PTYPE'", "+", "str", "(", "j", ")", "for", "j", "in", "_range", "]", "+", "[", "'PDTYPE'", "+", "str", "(", "j", ")", "for", "j", "in", "_range", "]", "+", "[", "'PSIZE'", "+", "str", "(", "j", ")", "for", "j", "in", "_range", "]", "+", "[", "'DATATYPE'", ",", "'PSIZE'", ",", "'GCOUNT'", ",", "'PCOUNT'", ",", "'BSCALE'", ",", "'BZERO'", "]", "# Construct record array formats for the group parameters", "# as interpreted from the Primary header file", "for", "i", "in", "range", "(", "1", ",", "pcount", "+", "1", ")", ":", "ptype", "=", "key", "[", "i", "-", "1", "]", "pdtype", "=", "phdr", "[", "'PDTYPE'", "+", "str", "(", "i", ")", "]", "star", "=", "pdtype", ".", "find", "(", "'*'", ")", "_type", "=", "pdtype", "[", ":", "star", "]", "_bytes", "=", "pdtype", "[", "star", "+", "1", ":", "]", "# collect boolean keywords since they need special attention later", "if", "_type", "==", "'LOGICAL'", ":", "bools", ".", "append", "(", "i", ")", "if", "pdtype", "==", "'REAL*4'", ":", "floats", ".", "append", "(", "i", ")", "fmt", "=", "geis_fmt", "[", "_type", "]", "+", "_bytes", "formats", ".", "append", "(", "(", "ptype", ",", "fmt", ")", ")", "_shape", "=", "_naxis", "[", "1", ":", "]", "_shape", ".", "reverse", "(", ")", "_code", "=", "fits", ".", "BITPIX2DTYPE", "[", "_bitpix", "]", "_bscale", "=", "phdr", ".", "get", "(", "'BSCALE'", ",", "1", ")", "_bzero", "=", "phdr", ".", "get", "(", "'BZERO'", ",", "0", ")", "if", "phdr", "[", "'DATATYPE'", "]", "[", ":", "10", "]", "==", "'UNSIGNED*2'", ":", "_uint16", "=", "1", "_bzero", "=", "32768", "else", ":", "_uint16", "=", "0", "# Use copy-on-write for all data types since byteswap may be needed", "# in some platforms.", "f1", "=", "open", "(", "data_file", ",", "mode", "=", "'rb'", ")", "dat", "=", "f1", ".", "read", "(", ")", "f1", ".", "close", "(", ")", "errormsg", "=", "\"\"", "loc", "=", "0", "outdat", "=", "b''", "for", "k", "in", "range", "(", "gcount", ")", ":", "ext_dat", "=", "numpy", ".", "fromstring", "(", "dat", "[", "loc", ":", "loc", "+", "data_size", "]", ",", "dtype", "=", "_code", ")", "ext_dat", "=", "ext_dat", ".", "reshape", "(", "_shape", ")", ".", "byteswap", "(", ")", "outdat", "+=", "ext_dat", ".", "tostring", "(", ")", "ext_hdu", "=", "fits", ".", "hdu", ".", "ImageHDU", "(", "data", "=", "ext_dat", ")", "rec", "=", "numpy", ".", "fromstring", "(", "dat", "[", "loc", "+", "data_size", ":", "loc", "+", "group_size", "]", ",", "dtype", "=", "formats", ")", ".", "byteswap", "(", ")", "outdat", "+=", "rec", ".", "tostring", "(", ")", "loc", "+=", "group_size", "if", "os", ".", "path", ".", "exists", "(", "output", ")", ":", "os", ".", "remove", "(", "output", ")", "if", "os", ".", "path", ".", "exists", "(", "out_data", ")", ":", "os", ".", "remove", "(", "out_data", ")", "shutil", ".", "copy", "(", "input", ",", "output", ")", "outfile", "=", "open", "(", "out_data", ",", "mode", "=", "'wb'", ")", "outfile", ".", "write", "(", "outdat", ")", "outfile", ".", "close", "(", ")", "print", "(", "'Finished byte-swapping '", ",", "input", ",", "' to '", ",", "output", ")", "#-------------------------------------------------------------------------------", "\"\"\"Input GEIS files \"input\" will be read and a HDUList object will\n be returned that matches the waiver-FITS format written out by 'stwfits' in IRAF.\n\n The user can use the writeto method to write the HDUList object to\n a FITS file.\n \"\"\"", "# global dat # !!! (looks like this is a function missing its head)", "cardLen", "=", "fits", ".", "Card", ".", "length", "# input file(s) must be of the form *.??h and *.??d", "if", "input", "[", "-", "1", "]", "!=", "'h'", "or", "input", "[", "-", "4", "]", "!=", "'.'", ":", "raise", "\"Illegal input GEIS file name %s\"", "%", "input", "data_file", "=", "input", "[", ":", "-", "1", "]", "+", "'d'", "_os", "=", "sys", ".", "platform", "if", "_os", "[", ":", "5", "]", "==", "'linux'", "or", "_os", "[", ":", "5", "]", "==", "'win32'", "or", "_os", "[", ":", "5", "]", "==", "'sunos'", "or", "_os", "[", ":", "3", "]", "==", "'osf'", "or", "_os", "[", ":", "6", "]", "==", "'darwin'", ":", "bytes_per_line", "=", "cardLen", "+", "1", "else", ":", "raise", "\"Platform %s is not supported (yet).\"", "%", "_os", "end_card", "=", "'END'", "+", "' '", "*", "(", "cardLen", "-", "3", ")", "# open input file", "im", "=", "open", "(", "input", ")", "# Generate the primary HDU", "cards", "=", "[", "]", "while", "1", ":", "line", "=", "im", ".", "read", "(", "bytes_per_line", ")", "[", ":", "cardLen", "]", "line", "=", "line", "[", ":", "8", "]", ".", "upper", "(", ")", "+", "line", "[", "8", ":", "]", "if", "line", "==", "end_card", ":", "break", "cards", ".", "append", "(", "fits", ".", "Card", ".", "fromstring", "(", "line", ")", ")", "phdr", "=", "fits", ".", "Header", "(", "cards", ")", "im", ".", "close", "(", ")", "phdr", ".", "set", "(", "'FILENAME'", ",", "value", "=", "input", ",", "after", "=", "'DATE'", ")", "# Determine starting point for adding Group Parameter Block keywords to Primary header", "phdr_indx", "=", "phdr", ".", "index", "(", "'PSIZE'", ")", "_naxis0", "=", "phdr", ".", "get", "(", "'NAXIS'", ",", "0", ")", "_naxis", "=", "[", "phdr", "[", "'NAXIS'", "+", "str", "(", "j", ")", "]", "for", "j", "in", "range", "(", "1", ",", "_naxis0", "+", "1", ")", "]", "_naxis", ".", "insert", "(", "0", ",", "_naxis0", ")", "_bitpix", "=", "phdr", "[", "'BITPIX'", "]", "_psize", "=", "phdr", "[", "'PSIZE'", "]", "if", "phdr", "[", "'DATATYPE'", "]", "[", ":", "4", "]", "==", "'REAL'", ":", "_bitpix", "=", "-", "_bitpix", "if", "_naxis0", ">", "0", ":", "size", "=", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "*", "y", ",", "_naxis", "[", "1", ":", "]", ")", "data_size", "=", "abs", "(", "_bitpix", ")", "*", "size", "//", "8", "else", ":", "data_size", "=", "0", "group_size", "=", "data_size", "+", "_psize", "//", "8", "# decode the group parameter definitions,", "# group parameters will become extension table", "groups", "=", "phdr", "[", "'GROUPS'", "]", "gcount", "=", "phdr", "[", "'GCOUNT'", "]", "pcount", "=", "phdr", "[", "'PCOUNT'", "]", "formats", "=", "[", "]", "bools", "=", "[", "]", "floats", "=", "[", "]", "cols", "=", "[", "]", "# column definitions used for extension table", "cols_dict", "=", "{", "}", "# provides name access to Column defs", "_range", "=", "list", "(", "range", "(", "1", ",", "pcount", "+", "1", ")", ")", "key", "=", "[", "phdr", "[", "'PTYPE'", "+", "str", "(", "j", ")", "]", "for", "j", "in", "_range", "]", "comm", "=", "[", "phdr", ".", "cards", "[", "'PTYPE'", "+", "str", "(", "j", ")", "]", ".", "comment", "for", "j", "in", "_range", "]", "# delete group parameter definition header keywords", "_list", "=", "[", "'PTYPE'", "+", "str", "(", "j", ")", "for", "j", "in", "_range", "]", "+", "[", "'PDTYPE'", "+", "str", "(", "j", ")", "for", "j", "in", "_range", "]", "+", "[", "'PSIZE'", "+", "str", "(", "j", ")", "for", "j", "in", "_range", "]", "+", "[", "'DATATYPE'", ",", "'PSIZE'", ",", "'GCOUNT'", ",", "'PCOUNT'", ",", "'BSCALE'", ",", "'BZERO'", "]", "# Construct record array formats for the group parameters", "# as interpreted from the Primary header file", "for", "i", "in", "range", "(", "1", ",", "pcount", "+", "1", ")", ":", "ptype", "=", "key", "[", "i", "-", "1", "]", "pdtype", "=", "phdr", "[", "'PDTYPE'", "+", "str", "(", "i", ")", "]", "star", "=", "pdtype", ".", "find", "(", "'*'", ")", "_type", "=", "pdtype", "[", ":", "star", "]", "_bytes", "=", "pdtype", "[", "star", "+", "1", ":", "]", "# collect boolean keywords since they need special attention later", "if", "_type", "==", "'LOGICAL'", ":", "bools", ".", "append", "(", "i", ")", "if", "pdtype", "==", "'REAL*4'", ":", "floats", ".", "append", "(", "i", ")", "# identify keywords which require conversion to special units", "if", "ptype", "in", "kw_DOUBLE", ":", "_type", "=", "'DOUBLE'", "fmt", "=", "geis_fmt", "[", "_type", "]", "+", "_bytes", "formats", ".", "append", "(", "(", "ptype", ",", "fmt", ")", ")", "# Set up definitions for use in creating the group-parameter block table", "nrpt", "=", "''", "nbits", "=", "str", "(", "int", "(", "_bytes", ")", "*", "8", ")", "if", "'CHAR'", "in", "_type", ":", "nrpt", "=", "_bytes", "nbits", "=", "_bytes", "afmt", "=", "cols_fmt", "[", "_type", "]", "+", "nbits", "if", "'LOGICAL'", "in", "_type", ":", "afmt", "=", "cols_fmt", "[", "_type", "]", "cfmt", "=", "cols_pfmt", "[", "_type", "]", "+", "nrpt", "#print 'Column format for ',ptype,': ',cfmt,' with dtype of ',afmt", "cols_dict", "[", "ptype", "]", "=", "fits", ".", "Column", "(", "name", "=", "ptype", ",", "format", "=", "cfmt", ",", "array", "=", "numpy", ".", "zeros", "(", "gcount", ",", "dtype", "=", "afmt", ")", ")", "cols", ".", "append", "(", "cols_dict", "[", "ptype", "]", ")", "# This keeps the columns in order", "_shape", "=", "_naxis", "[", "1", ":", "]", "_shape", ".", "reverse", "(", ")", "_code", "=", "fits", ".", "BITPIX2DTYPE", "[", "_bitpix", "]", "_bscale", "=", "phdr", ".", "get", "(", "'BSCALE'", ",", "1", ")", "_bzero", "=", "phdr", ".", "get", "(", "'BZERO'", ",", "0", ")", "if", "phdr", "[", "'DATATYPE'", "]", "[", ":", "10", "]", "==", "'UNSIGNED*2'", ":", "_uint16", "=", "1", "_bzero", "=", "32768", "else", ":", "_uint16", "=", "0", "# delete from the end, so it will not conflict with previous delete", "for", "i", "in", "range", "(", "len", "(", "phdr", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "if", "phdr", ".", "cards", "[", "i", "]", ".", "keyword", "in", "_list", ":", "del", "phdr", "[", "i", "]", "# clean up other primary header keywords", "phdr", "[", "'SIMPLE'", "]", "=", "True", "phdr", "[", "'GROUPS'", "]", "=", "False", "_after", "=", "'NAXIS'", "if", "_naxis0", ">", "0", ":", "_after", "+=", "str", "(", "_naxis0", ")", "phdr", ".", "set", "(", "'EXTEND'", ",", "value", "=", "True", ",", "comment", "=", "\"FITS dataset may contain extensions\"", ",", "after", "=", "_after", ")", "# Use copy-on-write for all data types since byteswap may be needed", "# in some platforms.", "f1", "=", "open", "(", "data_file", ",", "mode", "=", "'rb'", ")", "dat", "=", "f1", ".", "read", "(", ")", "errormsg", "=", "\"\"", "# Define data array for all groups", "arr_shape", "=", "_naxis", "[", ":", "]", "arr_shape", "[", "0", "]", "=", "gcount", "arr_stack", "=", "numpy", ".", "zeros", "(", "arr_shape", ",", "dtype", "=", "_code", ")", "loc", "=", "0", "for", "k", "in", "range", "(", "gcount", ")", ":", "ext_dat", "=", "numpy", ".", "fromstring", "(", "dat", "[", "loc", ":", "loc", "+", "data_size", "]", ",", "dtype", "=", "_code", ")", "ext_dat", "=", "ext_dat", ".", "reshape", "(", "_shape", ")", "if", "_uint16", ":", "ext_dat", "+=", "_bzero", "# Check to see whether there are any NaN's or infs which might indicate", "# a byte-swapping problem, such as being written out on little-endian", "# and being read in on big-endian or vice-versa.", "if", "_code", ".", "find", "(", "'float'", ")", ">=", "0", "and", "(", "numpy", ".", "any", "(", "numpy", ".", "isnan", "(", "ext_dat", ")", ")", "or", "numpy", ".", "any", "(", "numpy", ".", "isinf", "(", "ext_dat", ")", ")", ")", ":", "errormsg", "+=", "\"===================================\\n\"", "errormsg", "+=", "\"= WARNING: =\\n\"", "errormsg", "+=", "\"= Input image: =\\n\"", "errormsg", "+=", "input", "+", "\"[%d]\\n\"", "%", "(", "k", "+", "1", ")", "errormsg", "+=", "\"= had floating point data values =\\n\"", "errormsg", "+=", "\"= of NaN and/or Inf. =\\n\"", "errormsg", "+=", "\"===================================\\n\"", "elif", "_code", ".", "find", "(", "'int'", ")", ">=", "0", ":", "# Check INT data for max values", "ext_dat_frac", ",", "ext_dat_exp", "=", "numpy", ".", "frexp", "(", "ext_dat", ")", "if", "ext_dat_exp", ".", "max", "(", ")", "==", "int", "(", "_bitpix", ")", "-", "1", ":", "# Potential problems with byteswapping", "errormsg", "+=", "\"===================================\\n\"", "errormsg", "+=", "\"= WARNING: =\\n\"", "errormsg", "+=", "\"= Input image: =\\n\"", "errormsg", "+=", "input", "+", "\"[%d]\\n\"", "%", "(", "k", "+", "1", ")", "errormsg", "+=", "\"= had integer data values =\\n\"", "errormsg", "+=", "\"= with maximum bitvalues. =\\n\"", "errormsg", "+=", "\"===================================\\n\"", "arr_stack", "[", "k", "]", "=", "ext_dat", "rec", "=", "numpy", ".", "fromstring", "(", "dat", "[", "loc", "+", "data_size", ":", "loc", "+", "group_size", "]", ",", "dtype", "=", "formats", ")", "loc", "+=", "group_size", "# Add data from this GPB to table", "for", "i", "in", "range", "(", "1", ",", "pcount", "+", "1", ")", ":", "val", "=", "rec", "[", "0", "]", "[", "i", "-", "1", "]", "if", "i", "in", "bools", ":", "if", "val", ":", "val", "=", "'T'", "else", ":", "val", "=", "'F'", "cols", "[", "i", "-", "1", "]", ".", "array", "[", "k", "]", "=", "val", "# Based on the first group, add GPB keywords to PRIMARY header", "if", "k", "==", "0", ":", "# Create separate PyFITS Card objects for each entry in 'rec'", "# and update Primary HDU with these keywords after PSIZE", "for", "i", "in", "range", "(", "1", ",", "pcount", "+", "1", ")", ":", "#val = rec.field(i-1)[0]", "val", "=", "rec", "[", "0", "]", "[", "i", "-", "1", "]", "if", "val", ".", "dtype", ".", "kind", "==", "'S'", ":", "val", "=", "val", ".", "decode", "(", "'ascii'", ")", "if", "i", "in", "bools", ":", "if", "val", ":", "val", "=", "True", "else", ":", "val", "=", "False", "if", "i", "in", "floats", ":", "# use fromstring, format in Card is deprecated in pyfits 0.9", "_str", "=", "'%-8s= %20.13G / %s'", "%", "(", "key", "[", "i", "-", "1", "]", ",", "val", ",", "comm", "[", "i", "-", "1", "]", ")", "_card", "=", "fits", ".", "Card", ".", "fromstring", "(", "_str", ")", "else", ":", "_card", "=", "fits", ".", "Card", "(", "keyword", "=", "key", "[", "i", "-", "1", "]", ",", "value", "=", "val", ",", "comment", "=", "comm", "[", "i", "-", "1", "]", ")", "phdr", ".", "insert", "(", "phdr_indx", "+", "i", ",", "_card", ")", "# deal with bscale/bzero", "if", "(", "_bscale", "!=", "1", "or", "_bzero", "!=", "0", ")", ":", "phdr", "[", "'BSCALE'", "]", "=", "_bscale", "phdr", "[", "'BZERO'", "]", "=", "_bzero", "#hdulist.append(ext_hdu)", "# Define new table based on Column definitions", "ext_table", "=", "fits", ".", "TableHDU", ".", "from_columns", "(", "cols", ")", "ext_table", ".", "header", ".", "set", "(", "'EXTNAME'", ",", "value", "=", "input", "+", "'.tab'", ",", "after", "=", "'TFIELDS'", ")", "# Add column descriptions to header of table extension to match stwfits output", "for", "i", "in", "range", "(", "len", "(", "key", ")", ")", ":", "ext_table", ".", "header", ".", "append", "(", "fits", ".", "Card", "(", "keyword", "=", "key", "[", "i", "]", ",", "value", "=", "comm", "[", "i", "]", ")", ")", "if", "errormsg", "!=", "\"\"", ":", "errormsg", "+=", "\"===================================\\n\"", "errormsg", "+=", "\"= This file may have been =\\n\"", "errormsg", "+=", "\"= written out on a platform =\\n\"", "errormsg", "+=", "\"= with a different byte-order. =\\n\"", "errormsg", "+=", "\"= =\\n\"", "errormsg", "+=", "\"= Please verify that the values =\\n\"", "errormsg", "+=", "\"= are correct or apply the =\\n\"", "errormsg", "+=", "\"= '.byteswap()' method. =\\n\"", "errormsg", "+=", "\"===================================\\n\"", "print", "(", "errormsg", ")", "f1", ".", "close", "(", ")", "hdulist", "=", "fits", ".", "HDUList", "(", "[", "fits", ".", "PrimaryHDU", "(", "header", "=", "phdr", ",", "data", "=", "arr_stack", ")", "]", ")", "hdulist", ".", "append", "(", "ext_table", ")", "return", "hdulist" ]
Input GEIS files "input" will be read and converted to a new GEIS file whose byte-order has been swapped from its original state. Parameters ---------- input - str Full filename with path of input GEIS image header file output - str Full filename with path of output GEIS image header file If None, a default name will be created as input_swap.??h clobber - bool Overwrite any pre-existing output file? [Default: True] Notes ----- This function will automatically read and write out the data file using the GEIS image naming conventions.
[ "Input", "GEIS", "files", "input", "will", "be", "read", "and", "converted", "to", "a", "new", "GEIS", "file", "whose", "byte", "-", "order", "has", "been", "swapped", "from", "its", "original", "state", "." ]
python
train
michael-lazar/rtv
rtv/packages/praw/objects.py
https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/objects.py#L1232-L1245
def comments(self): # pylint: disable=E0202 """Return forest of comments, with top-level comments as tree roots. May contain instances of MoreComment objects. To easily replace these objects with Comment objects, use the replace_more_comments method then fetch this attribute. Use comment replies to walk down the tree. To get an unnested, flat list of comments from this attribute use helpers.flatten_tree. """ if self._comments is None: self.comments = Submission.from_url( # pylint: disable=W0212 self.reddit_session, self._api_link, comments_only=True) return self._comments
[ "def", "comments", "(", "self", ")", ":", "# pylint: disable=E0202", "if", "self", ".", "_comments", "is", "None", ":", "self", ".", "comments", "=", "Submission", ".", "from_url", "(", "# pylint: disable=W0212", "self", ".", "reddit_session", ",", "self", ".", "_api_link", ",", "comments_only", "=", "True", ")", "return", "self", ".", "_comments" ]
Return forest of comments, with top-level comments as tree roots. May contain instances of MoreComment objects. To easily replace these objects with Comment objects, use the replace_more_comments method then fetch this attribute. Use comment replies to walk down the tree. To get an unnested, flat list of comments from this attribute use helpers.flatten_tree.
[ "Return", "forest", "of", "comments", "with", "top", "-", "level", "comments", "as", "tree", "roots", "." ]
python
train
devricks/soft_drf
soft_drf/api/viewsets/__init__.py
https://github.com/devricks/soft_drf/blob/1869b13f9341bfcebd931059e93de2bc38570da3/soft_drf/api/viewsets/__init__.py#L11-L19
def get_serializer_class(self, action=None): """ Return the serializer class depending on request method. Attribute of proper serializer should be defined. """ if action is not None: return getattr(self, '%s_serializer_class' % action) else: return super(GenericViewSet, self).get_serializer_class()
[ "def", "get_serializer_class", "(", "self", ",", "action", "=", "None", ")", ":", "if", "action", "is", "not", "None", ":", "return", "getattr", "(", "self", ",", "'%s_serializer_class'", "%", "action", ")", "else", ":", "return", "super", "(", "GenericViewSet", ",", "self", ")", ".", "get_serializer_class", "(", ")" ]
Return the serializer class depending on request method. Attribute of proper serializer should be defined.
[ "Return", "the", "serializer", "class", "depending", "on", "request", "method", ".", "Attribute", "of", "proper", "serializer", "should", "be", "defined", "." ]
python
train
macacajs/wd.py
macaca/util.py
https://github.com/macacajs/wd.py/blob/6d3c52060013e01a67cd52b68b5230b387427bad/macaca/util.py#L152-L180
def value_to_key_strokes(value): """Convert value to a list of key strokes >>> value_to_key_strokes(123) ['123'] >>> value_to_key_strokes('123') ['123'] >>> value_to_key_strokes([1, 2, 3]) ['123'] >>> value_to_key_strokes(['1', '2', '3']) ['123'] Args: value(int|str|list) Returns: A list of string. """ result = '' if isinstance(value, Integral): value = str(value) for v in value: if isinstance(v, Keys): result += v.value elif isinstance(v, Integral): result += str(v) else: result += v return [result]
[ "def", "value_to_key_strokes", "(", "value", ")", ":", "result", "=", "''", "if", "isinstance", "(", "value", ",", "Integral", ")", ":", "value", "=", "str", "(", "value", ")", "for", "v", "in", "value", ":", "if", "isinstance", "(", "v", ",", "Keys", ")", ":", "result", "+=", "v", ".", "value", "elif", "isinstance", "(", "v", ",", "Integral", ")", ":", "result", "+=", "str", "(", "v", ")", "else", ":", "result", "+=", "v", "return", "[", "result", "]" ]
Convert value to a list of key strokes >>> value_to_key_strokes(123) ['123'] >>> value_to_key_strokes('123') ['123'] >>> value_to_key_strokes([1, 2, 3]) ['123'] >>> value_to_key_strokes(['1', '2', '3']) ['123'] Args: value(int|str|list) Returns: A list of string.
[ "Convert", "value", "to", "a", "list", "of", "key", "strokes", ">>>", "value_to_key_strokes", "(", "123", ")", "[", "123", "]", ">>>", "value_to_key_strokes", "(", "123", ")", "[", "123", "]", ">>>", "value_to_key_strokes", "(", "[", "1", "2", "3", "]", ")", "[", "123", "]", ">>>", "value_to_key_strokes", "(", "[", "1", "2", "3", "]", ")", "[", "123", "]" ]
python
valid
pip-services3-python/pip-services3-commons-python
pip_services3_commons/convert/ArrayConverter.py
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/convert/ArrayConverter.py#L23-L41
def to_nullable_array(value): """ Converts value into array object. Single values are converted into arrays with a single element. :param value: the value to convert. :return: array object or None when value is None. """ # Shortcuts if value == None: return None if type(value) == list: return value if type(value) in [tuple, set]: return list(value) return [value]
[ "def", "to_nullable_array", "(", "value", ")", ":", "# Shortcuts", "if", "value", "==", "None", ":", "return", "None", "if", "type", "(", "value", ")", "==", "list", ":", "return", "value", "if", "type", "(", "value", ")", "in", "[", "tuple", ",", "set", "]", ":", "return", "list", "(", "value", ")", "return", "[", "value", "]" ]
Converts value into array object. Single values are converted into arrays with a single element. :param value: the value to convert. :return: array object or None when value is None.
[ "Converts", "value", "into", "array", "object", ".", "Single", "values", "are", "converted", "into", "arrays", "with", "a", "single", "element", "." ]
python
train
pallets/werkzeug
src/werkzeug/wrappers/base_request.py
https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/wrappers/base_request.py#L543-L545
def full_path(self): """Requested path as unicode, including the query string.""" return self.path + u"?" + to_unicode(self.query_string, self.url_charset)
[ "def", "full_path", "(", "self", ")", ":", "return", "self", ".", "path", "+", "u\"?\"", "+", "to_unicode", "(", "self", ".", "query_string", ",", "self", ".", "url_charset", ")" ]
Requested path as unicode, including the query string.
[ "Requested", "path", "as", "unicode", "including", "the", "query", "string", "." ]
python
train
sergiocorreia/panflute
examples/pandocfilters/gabc.py
https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/examples/pandocfilters/gabc.py#L120-L172
def gabc(key, value, fmt, meta): # pylint:disable=I0011,W0613 """Handle gabc file inclusion and gabc code block.""" if key == 'Code': [[ident, classes, kvs], contents] = value # pylint:disable=I0011,W0612 kvs = {key: value for key, value in kvs} if "gabc" in classes: if fmt == "latex": if ident == "": label = "" else: label = '\\label{' + ident + '}' return latex( "\n\\smallskip\n{%\n" + latexsnippet('\\gregorioscore{' + contents + '}', kvs) + "%\n}" + label ) else: infile = contents + ( '.gabc' if '.gabc' not in contents else '' ) with open(infile, 'r') as doc: code = doc.read().split('%%\n')[1] return [Image(['', [], []], [], [ png( contents, latexsnippet('\\gregorioscore', kvs) ), "" ])] elif key == 'CodeBlock': [[ident, classes, kvs], contents] = value kvs = {key: value for key, value in kvs} if "gabc" in classes: if fmt == "latex": if ident == "": label = "" else: label = '\\label{' + ident + '}' return [latexblock( "\n\\smallskip\n{%\n" + latexsnippet('\\gabcsnippet{' + contents + '}', kvs) + "%\n}" + label )] else: return Para([Image(['', [], []], [], [ png( contents, latexsnippet('\\gabcsnippet', kvs) ), "" ])])
[ "def", "gabc", "(", "key", ",", "value", ",", "fmt", ",", "meta", ")", ":", "# pylint:disable=I0011,W0613", "if", "key", "==", "'Code'", ":", "[", "[", "ident", ",", "classes", ",", "kvs", "]", ",", "contents", "]", "=", "value", "# pylint:disable=I0011,W0612", "kvs", "=", "{", "key", ":", "value", "for", "key", ",", "value", "in", "kvs", "}", "if", "\"gabc\"", "in", "classes", ":", "if", "fmt", "==", "\"latex\"", ":", "if", "ident", "==", "\"\"", ":", "label", "=", "\"\"", "else", ":", "label", "=", "'\\\\label{'", "+", "ident", "+", "'}'", "return", "latex", "(", "\"\\n\\\\smallskip\\n{%\\n\"", "+", "latexsnippet", "(", "'\\\\gregorioscore{'", "+", "contents", "+", "'}'", ",", "kvs", ")", "+", "\"%\\n}\"", "+", "label", ")", "else", ":", "infile", "=", "contents", "+", "(", "'.gabc'", "if", "'.gabc'", "not", "in", "contents", "else", "''", ")", "with", "open", "(", "infile", ",", "'r'", ")", "as", "doc", ":", "code", "=", "doc", ".", "read", "(", ")", ".", "split", "(", "'%%\\n'", ")", "[", "1", "]", "return", "[", "Image", "(", "[", "''", ",", "[", "]", ",", "[", "]", "]", ",", "[", "]", ",", "[", "png", "(", "contents", ",", "latexsnippet", "(", "'\\\\gregorioscore'", ",", "kvs", ")", ")", ",", "\"\"", "]", ")", "]", "elif", "key", "==", "'CodeBlock'", ":", "[", "[", "ident", ",", "classes", ",", "kvs", "]", ",", "contents", "]", "=", "value", "kvs", "=", "{", "key", ":", "value", "for", "key", ",", "value", "in", "kvs", "}", "if", "\"gabc\"", "in", "classes", ":", "if", "fmt", "==", "\"latex\"", ":", "if", "ident", "==", "\"\"", ":", "label", "=", "\"\"", "else", ":", "label", "=", "'\\\\label{'", "+", "ident", "+", "'}'", "return", "[", "latexblock", "(", "\"\\n\\\\smallskip\\n{%\\n\"", "+", "latexsnippet", "(", "'\\\\gabcsnippet{'", "+", "contents", "+", "'}'", ",", "kvs", ")", "+", "\"%\\n}\"", "+", "label", ")", "]", "else", ":", "return", "Para", "(", "[", "Image", "(", "[", "''", ",", "[", "]", ",", "[", "]", "]", ",", "[", "]", ",", "[", "png", "(", "contents", ",", "latexsnippet", "(", "'\\\\gabcsnippet'", ",", "kvs", ")", ")", ",", "\"\"", "]", ")", "]", ")" ]
Handle gabc file inclusion and gabc code block.
[ "Handle", "gabc", "file", "inclusion", "and", "gabc", "code", "block", "." ]
python
train
ska-sa/katcp-python
katcp/ioloop_manager.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/ioloop_manager.py#L199-L220
def decorate_callable(self, callable_): """Decorate a callable to use call_in_ioloop""" @wraps(callable_) def decorated(*args, **kwargs): # Extract timeout from request itself or use default for ioloop wrapper timeout = kwargs.get('timeout') return self.call_in_ioloop(callable_, args, kwargs, timeout) decorated.__doc__ = '\n\n'.join(( """Wrapped async call. Will call in ioloop. This call will block until the original callable has finished running on the ioloop, and will pass on the return value. If the original callable returns a future, this call will wait for the future to resolve and return the value or raise the exception that the future resolves with. Original Callable Docstring --------------------------- """, textwrap.dedent(decorated.__doc__ or ''))) return decorated
[ "def", "decorate_callable", "(", "self", ",", "callable_", ")", ":", "@", "wraps", "(", "callable_", ")", "def", "decorated", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Extract timeout from request itself or use default for ioloop wrapper", "timeout", "=", "kwargs", ".", "get", "(", "'timeout'", ")", "return", "self", ".", "call_in_ioloop", "(", "callable_", ",", "args", ",", "kwargs", ",", "timeout", ")", "decorated", ".", "__doc__", "=", "'\\n\\n'", ".", "join", "(", "(", "\"\"\"Wrapped async call. Will call in ioloop.\n\nThis call will block until the original callable has finished running on the ioloop, and\nwill pass on the return value. If the original callable returns a future, this call will\nwait for the future to resolve and return the value or raise the exception that the future\nresolves with.\n\nOriginal Callable Docstring\n---------------------------\n\"\"\"", ",", "textwrap", ".", "dedent", "(", "decorated", ".", "__doc__", "or", "''", ")", ")", ")", "return", "decorated" ]
Decorate a callable to use call_in_ioloop
[ "Decorate", "a", "callable", "to", "use", "call_in_ioloop" ]
python
train
geminipy/geminipy
geminipy/__init__.py
https://github.com/geminipy/geminipy/blob/0d83fe225b746ac4c8bb800aa6091e1b606231e8/geminipy/__init__.py#L243-L252
def tradevolume(self): """Send a request to get your trade volume, return the response.""" request = '/v1/tradevolume' url = self.base_url + request params = { 'request': request, 'nonce': self.get_nonce() } return requests.post(url, headers=self.prepare(params))
[ "def", "tradevolume", "(", "self", ")", ":", "request", "=", "'/v1/tradevolume'", "url", "=", "self", ".", "base_url", "+", "request", "params", "=", "{", "'request'", ":", "request", ",", "'nonce'", ":", "self", ".", "get_nonce", "(", ")", "}", "return", "requests", ".", "post", "(", "url", ",", "headers", "=", "self", ".", "prepare", "(", "params", ")", ")" ]
Send a request to get your trade volume, return the response.
[ "Send", "a", "request", "to", "get", "your", "trade", "volume", "return", "the", "response", "." ]
python
train
5j9/wikitextparser
wikitextparser/_wikitext.py
https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_wikitext.py#L297-L300
def string(self) -> str: """Return str(self).""" start, end = self._span return self._lststr[0][start:end]
[ "def", "string", "(", "self", ")", "->", "str", ":", "start", ",", "end", "=", "self", ".", "_span", "return", "self", ".", "_lststr", "[", "0", "]", "[", "start", ":", "end", "]" ]
Return str(self).
[ "Return", "str", "(", "self", ")", "." ]
python
test
quadrismegistus/prosodic
prosodic/Text.py
https://github.com/quadrismegistus/prosodic/blob/8af66ed9be40c922d03a0b09bc11c87d2061b618/prosodic/Text.py#L191-L256
def stats_positions(self,meter=None,all_parses=False): """Produce statistics from the parser""" """Positions All feats of slots All constraint violations """ parses = self.allParses(meter=meter) if all_parses else [[parse] for parse in self.bestParses(meter=meter)] dx={} for parselist in parses: for parse in parselist: if not parse: continue slot_i=0 for pos in parse.positions: for slot in pos.slots: slot_i+=1 feat_dicts = [slot.feats, pos.constraintScores, pos.feats] for feat_dict in feat_dicts: for k,v in feat_dict.items(): dk = (slot_i,str(k)) if not dk in dx: dx[dk]=[] dx[dk]+=[v] def _writegen(): for ((slot_i,k),l) in sorted(dx.items()): l2=[] for x in l: if type(x)==bool: x=1 if x else 0 elif type(x)==type(None): x=0 elif type(x) in [str,unicode]: continue else: x=float(x) if x>1: x=1 l2+=[x] #print k, l2 #try: if not l2: continue avg=sum(l2) / float(len(l2)) count=sum(l2) chances=len(l2) #except TypeError: # continue odx={'slot_num':slot_i, 'statistic':k, 'average':avg, 'count':count, 'chances':chances, 'text':self.name} odx['header']=['slot_num', 'statistic','count','chances','average'] #print odx yield odx name=self.name.replace('.txt','') ofn=os.path.join(self.dir_results, 'stats','texts',name, name+'.positions.csv') #print ofn if not os.path.exists(os.path.split(ofn)[0]): os.makedirs(os.path.split(ofn)[0]) for dx in writegengen(ofn, _writegen): yield dx print '>> saved:',ofn
[ "def", "stats_positions", "(", "self", ",", "meter", "=", "None", ",", "all_parses", "=", "False", ")", ":", "\"\"\"Positions\n\t\tAll feats of slots\n\t\tAll constraint violations\n\n\n\t\t\"\"\"", "parses", "=", "self", ".", "allParses", "(", "meter", "=", "meter", ")", "if", "all_parses", "else", "[", "[", "parse", "]", "for", "parse", "in", "self", ".", "bestParses", "(", "meter", "=", "meter", ")", "]", "dx", "=", "{", "}", "for", "parselist", "in", "parses", ":", "for", "parse", "in", "parselist", ":", "if", "not", "parse", ":", "continue", "slot_i", "=", "0", "for", "pos", "in", "parse", ".", "positions", ":", "for", "slot", "in", "pos", ".", "slots", ":", "slot_i", "+=", "1", "feat_dicts", "=", "[", "slot", ".", "feats", ",", "pos", ".", "constraintScores", ",", "pos", ".", "feats", "]", "for", "feat_dict", "in", "feat_dicts", ":", "for", "k", ",", "v", "in", "feat_dict", ".", "items", "(", ")", ":", "dk", "=", "(", "slot_i", ",", "str", "(", "k", ")", ")", "if", "not", "dk", "in", "dx", ":", "dx", "[", "dk", "]", "=", "[", "]", "dx", "[", "dk", "]", "+=", "[", "v", "]", "def", "_writegen", "(", ")", ":", "for", "(", "(", "slot_i", ",", "k", ")", ",", "l", ")", "in", "sorted", "(", "dx", ".", "items", "(", ")", ")", ":", "l2", "=", "[", "]", "for", "x", "in", "l", ":", "if", "type", "(", "x", ")", "==", "bool", ":", "x", "=", "1", "if", "x", "else", "0", "elif", "type", "(", "x", ")", "==", "type", "(", "None", ")", ":", "x", "=", "0", "elif", "type", "(", "x", ")", "in", "[", "str", ",", "unicode", "]", ":", "continue", "else", ":", "x", "=", "float", "(", "x", ")", "if", "x", ">", "1", ":", "x", "=", "1", "l2", "+=", "[", "x", "]", "#print k, l2", "#try:", "if", "not", "l2", ":", "continue", "avg", "=", "sum", "(", "l2", ")", "/", "float", "(", "len", "(", "l2", ")", ")", "count", "=", "sum", "(", "l2", ")", "chances", "=", "len", "(", "l2", ")", "#except TypeError:", "#\tcontinue", "odx", "=", "{", "'slot_num'", ":", "slot_i", ",", "'statistic'", ":", "k", ",", "'average'", ":", "avg", ",", "'count'", ":", "count", ",", "'chances'", ":", "chances", ",", "'text'", ":", "self", ".", "name", "}", "odx", "[", "'header'", "]", "=", "[", "'slot_num'", ",", "'statistic'", ",", "'count'", ",", "'chances'", ",", "'average'", "]", "#print odx", "yield", "odx", "name", "=", "self", ".", "name", ".", "replace", "(", "'.txt'", ",", "''", ")", "ofn", "=", "os", ".", "path", ".", "join", "(", "self", ".", "dir_results", ",", "'stats'", ",", "'texts'", ",", "name", ",", "name", "+", "'.positions.csv'", ")", "#print ofn", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "split", "(", "ofn", ")", "[", "0", "]", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "split", "(", "ofn", ")", "[", "0", "]", ")", "for", "dx", "in", "writegengen", "(", "ofn", ",", "_writegen", ")", ":", "yield", "dx", "print", "'>> saved:'", ",", "ofn" ]
Produce statistics from the parser
[ "Produce", "statistics", "from", "the", "parser" ]
python
train
jxtech/wechatpy
wechatpy/client/api/wxa.py
https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/client/api/wxa.py#L145-L164
def commit(self, template_id, ext_json, version, description): """ 为授权的小程序账号上传小程序代码 详情请参考 https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list&id=open1489140610_Uavc4 :param template_id: 代码库中的代码模板 ID :param ext_json: 第三方自定义的配置 :param version: 代码版本号,开发者可自定义 :param description: 代码描述,开发者可自定义 """ return self._post( 'wxa/commit', data={ 'template_id': template_id, 'ext_json': ext_json, 'user_version': version, 'user_desc': description, }, )
[ "def", "commit", "(", "self", ",", "template_id", ",", "ext_json", ",", "version", ",", "description", ")", ":", "return", "self", ".", "_post", "(", "'wxa/commit'", ",", "data", "=", "{", "'template_id'", ":", "template_id", ",", "'ext_json'", ":", "ext_json", ",", "'user_version'", ":", "version", ",", "'user_desc'", ":", "description", ",", "}", ",", ")" ]
为授权的小程序账号上传小程序代码 详情请参考 https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list&id=open1489140610_Uavc4 :param template_id: 代码库中的代码模板 ID :param ext_json: 第三方自定义的配置 :param version: 代码版本号,开发者可自定义 :param description: 代码描述,开发者可自定义
[ "为授权的小程序账号上传小程序代码", "详情请参考", "https", ":", "//", "open", ".", "weixin", ".", "qq", ".", "com", "/", "cgi", "-", "bin", "/", "showdocument?action", "=", "dir_list&id", "=", "open1489140610_Uavc4" ]
python
train
autokey/autokey
lib/autokey/iomediator/_iomediator.py
https://github.com/autokey/autokey/blob/35decb72f286ce68cd2a1f09ace8891a520b58d1/lib/autokey/iomediator/_iomediator.py#L211-L216
def send_up(self, count): """ Sends the given number of up key presses. """ for i in range(count): self.interface.send_key(Key.UP)
[ "def", "send_up", "(", "self", ",", "count", ")", ":", "for", "i", "in", "range", "(", "count", ")", ":", "self", ".", "interface", ".", "send_key", "(", "Key", ".", "UP", ")" ]
Sends the given number of up key presses.
[ "Sends", "the", "given", "number", "of", "up", "key", "presses", "." ]
python
train
noobermin/lspreader
lspreader/lspreader.py
https://github.com/noobermin/lspreader/blob/903b9d6427513b07986ffacf76cbca54e18d8be6/lspreader/lspreader.py#L155-L162
def flds_firstsort(d): ''' Perform a lexsort and return the sort indices and shape as a tuple. ''' shape = [ len( np.unique(d[l]) ) for l in ['xs', 'ys', 'zs'] ]; si = np.lexsort((d['z'],d['y'],d['x'])); return si,shape;
[ "def", "flds_firstsort", "(", "d", ")", ":", "shape", "=", "[", "len", "(", "np", ".", "unique", "(", "d", "[", "l", "]", ")", ")", "for", "l", "in", "[", "'xs'", ",", "'ys'", ",", "'zs'", "]", "]", "si", "=", "np", ".", "lexsort", "(", "(", "d", "[", "'z'", "]", ",", "d", "[", "'y'", "]", ",", "d", "[", "'x'", "]", ")", ")", "return", "si", ",", "shape" ]
Perform a lexsort and return the sort indices and shape as a tuple.
[ "Perform", "a", "lexsort", "and", "return", "the", "sort", "indices", "and", "shape", "as", "a", "tuple", "." ]
python
train
nathankw/pulsarpy
pulsarpy/models.py
https://github.com/nathankw/pulsarpy/blob/359b040c0f2383b88c0b5548715fefd35f5f634c/pulsarpy/models.py#L969-L1005
def post(cls, payload): """ A wrapper over Model.post() that handles the case where a Library has a PairedBarcode and the user may have supplied the PairedBarcode in the form of index1-index2, i.e. GATTTCCA-GGCGTCGA. This isn't the PairedBarcode's record name or a record ID, thus Model.post() won't be able to figure out the PairedBarcode's ID to substitute in the payload (via a call to cls.replace_name_with_id()). Thus, this wrapper will attempt to replace a PairedBarcode sequence in the payload with a PairedBarcode ID, then pass the payload off to Model.post(). """ slpk_attr_name = "sequencing_library_prep_kit_id" paired_bc_id_attr_name = "paired_barcode_id" seq_reg = re.compile("^[ACGTN]+$") if paired_bc_id_attr_name in payload: try: index1, index2 = payload[paired_bc_id_attr_name].upper().split("-") except ValueError: # Not in GATTTCCA-GGCGTCGA format so let it be. return Model.post(cls=cls, payload=payload) if not seq_reg.match(index1) or not seq_reg.match(index2): # Not in GATTTCCA-GGCGTCGA format so let it be. return Model.post(cls=cls, payload=payload) if not slpk_attr_name in payload: raise Exception("You need to include the " + slpk + " attribute name.") slpk_id = SequencingLibraryPrepKit.replace_name_with_id(payload[slpk_attr_name]) payload[slpk_attr_name] = slpk_id index1_id = Barcode.find_by(payload={slpk_attr_name: slpk_id, "index_number": 1, "sequence": index1}, require=True)["id"] index2_id = Barcode.find_by(payload={slpk_attr_name: slpk_id, "index_number": 2, "sequence": index2}, require=True)["id"] # Ensure that PairedBarcode for this index combo already exists: pbc_payload = {"index1_id": index1_id, "index2_id": index2_id, slpk_attr_name: slpk_id} pbc_exists = PairedBarcode.find_by(payload=pbc_payload) if not pbc_exists: pbc_exists = PairedBarcode.post(payload=pbc_payload) pbc_id = pbc_exists["id"] payload[paired_bc_id_attr_name] = pbc_id return super().post(payload=payload)
[ "def", "post", "(", "cls", ",", "payload", ")", ":", "slpk_attr_name", "=", "\"sequencing_library_prep_kit_id\"", "paired_bc_id_attr_name", "=", "\"paired_barcode_id\"", "seq_reg", "=", "re", ".", "compile", "(", "\"^[ACGTN]+$\"", ")", "if", "paired_bc_id_attr_name", "in", "payload", ":", "try", ":", "index1", ",", "index2", "=", "payload", "[", "paired_bc_id_attr_name", "]", ".", "upper", "(", ")", ".", "split", "(", "\"-\"", ")", "except", "ValueError", ":", "# Not in GATTTCCA-GGCGTCGA format so let it be. ", "return", "Model", ".", "post", "(", "cls", "=", "cls", ",", "payload", "=", "payload", ")", "if", "not", "seq_reg", ".", "match", "(", "index1", ")", "or", "not", "seq_reg", ".", "match", "(", "index2", ")", ":", "# Not in GATTTCCA-GGCGTCGA format so let it be. ", "return", "Model", ".", "post", "(", "cls", "=", "cls", ",", "payload", "=", "payload", ")", "if", "not", "slpk_attr_name", "in", "payload", ":", "raise", "Exception", "(", "\"You need to include the \"", "+", "slpk", "+", "\" attribute name.\"", ")", "slpk_id", "=", "SequencingLibraryPrepKit", ".", "replace_name_with_id", "(", "payload", "[", "slpk_attr_name", "]", ")", "payload", "[", "slpk_attr_name", "]", "=", "slpk_id", "index1_id", "=", "Barcode", ".", "find_by", "(", "payload", "=", "{", "slpk_attr_name", ":", "slpk_id", ",", "\"index_number\"", ":", "1", ",", "\"sequence\"", ":", "index1", "}", ",", "require", "=", "True", ")", "[", "\"id\"", "]", "index2_id", "=", "Barcode", ".", "find_by", "(", "payload", "=", "{", "slpk_attr_name", ":", "slpk_id", ",", "\"index_number\"", ":", "2", ",", "\"sequence\"", ":", "index2", "}", ",", "require", "=", "True", ")", "[", "\"id\"", "]", "# Ensure that PairedBarcode for this index combo already exists:", "pbc_payload", "=", "{", "\"index1_id\"", ":", "index1_id", ",", "\"index2_id\"", ":", "index2_id", ",", "slpk_attr_name", ":", "slpk_id", "}", "pbc_exists", "=", "PairedBarcode", ".", "find_by", "(", "payload", "=", "pbc_payload", ")", "if", "not", "pbc_exists", ":", "pbc_exists", "=", "PairedBarcode", ".", "post", "(", "payload", "=", "pbc_payload", ")", "pbc_id", "=", "pbc_exists", "[", "\"id\"", "]", "payload", "[", "paired_bc_id_attr_name", "]", "=", "pbc_id", "return", "super", "(", ")", ".", "post", "(", "payload", "=", "payload", ")" ]
A wrapper over Model.post() that handles the case where a Library has a PairedBarcode and the user may have supplied the PairedBarcode in the form of index1-index2, i.e. GATTTCCA-GGCGTCGA. This isn't the PairedBarcode's record name or a record ID, thus Model.post() won't be able to figure out the PairedBarcode's ID to substitute in the payload (via a call to cls.replace_name_with_id()). Thus, this wrapper will attempt to replace a PairedBarcode sequence in the payload with a PairedBarcode ID, then pass the payload off to Model.post().
[ "A", "wrapper", "over", "Model", ".", "post", "()", "that", "handles", "the", "case", "where", "a", "Library", "has", "a", "PairedBarcode", "and", "the", "user", "may", "have", "supplied", "the", "PairedBarcode", "in", "the", "form", "of", "index1", "-", "index2", "i", ".", "e", ".", "GATTTCCA", "-", "GGCGTCGA", ".", "This", "isn", "t", "the", "PairedBarcode", "s", "record", "name", "or", "a", "record", "ID", "thus", "Model", ".", "post", "()", "won", "t", "be", "able", "to", "figure", "out", "the", "PairedBarcode", "s", "ID", "to", "substitute", "in", "the", "payload", "(", "via", "a", "call", "to", "cls", ".", "replace_name_with_id", "()", ")", ".", "Thus", "this", "wrapper", "will", "attempt", "to", "replace", "a", "PairedBarcode", "sequence", "in", "the", "payload", "with", "a", "PairedBarcode", "ID", "then", "pass", "the", "payload", "off", "to", "Model", ".", "post", "()", "." ]
python
train
spacetelescope/drizzlepac
drizzlepac/updatenpol.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/updatenpol.py#L286-L295
def run(configobj=None,editpars=False): """ Teal interface for running this code. """ if configobj is None: configobj =teal.teal(__taskname__,loadOnly=(not editpars)) update(configobj['input'],configobj['refdir'], local=configobj['local'],interactive=configobj['interactive'], wcsupdate=configobj['wcsupdate'])
[ "def", "run", "(", "configobj", "=", "None", ",", "editpars", "=", "False", ")", ":", "if", "configobj", "is", "None", ":", "configobj", "=", "teal", ".", "teal", "(", "__taskname__", ",", "loadOnly", "=", "(", "not", "editpars", ")", ")", "update", "(", "configobj", "[", "'input'", "]", ",", "configobj", "[", "'refdir'", "]", ",", "local", "=", "configobj", "[", "'local'", "]", ",", "interactive", "=", "configobj", "[", "'interactive'", "]", ",", "wcsupdate", "=", "configobj", "[", "'wcsupdate'", "]", ")" ]
Teal interface for running this code.
[ "Teal", "interface", "for", "running", "this", "code", "." ]
python
train
Jaza/flask-restplus-patched
flask_restplus_patched/namespace.py
https://github.com/Jaza/flask-restplus-patched/blob/38b4a030f28e6aec374d105173aa5e9b6bd51e5e/flask_restplus_patched/namespace.py#L96-L192
def response(self, model=None, code=HTTPStatus.OK, description=None, **kwargs): """ Endpoint response OpenAPI documentation decorator. It automatically documents HTTPError%(code)d responses with relevant schemas. Arguments: model (flask_marshmallow.Schema) - it can be a class or an instance of the class, which will be used for OpenAPI documentation purposes. It can be omitted if ``code`` argument is set to an error HTTP status code. code (int) - HTTP status code which is documented. description (str) Example: >>> @namespace.response(BaseTeamSchema(many=True)) ... @namespace.response(code=HTTPStatus.FORBIDDEN) ... def get_teams(): ... if not user.is_admin: ... abort(HTTPStatus.FORBIDDEN) ... return Team.query.all() """ code = HTTPStatus(code) if code is HTTPStatus.NO_CONTENT: assert model is None if model is None and code not in {HTTPStatus.ACCEPTED, HTTPStatus.NO_CONTENT}: if code.value not in http_exceptions.default_exceptions: raise ValueError("`model` parameter is required for code %d" % code) model = self.model( name='HTTPError%d' % code, model=DefaultHTTPErrorSchema(http_code=code) ) if description is None: description = code.description def response_serializer_decorator(func): """ This decorator handles responses to serialize the returned value with a given model. """ def dump_wrapper(*args, **kwargs): # pylint: disable=missing-docstring response = func(*args, **kwargs) extra_headers = None if response is None: if model is not None: raise ValueError("Response cannot not be None with HTTP status %d" % code) return flask.Response(status=code) elif isinstance(response, flask.Response) or model is None: return response elif isinstance(response, tuple): response, _code, extra_headers = unpack(response) else: _code = code if HTTPStatus(_code) is code: response = model.dump(response).data return response, _code, extra_headers return dump_wrapper def decorator(func_or_class): if code.value in http_exceptions.default_exceptions: # If the code is handled by raising an exception, it will # produce a response later, so we don't need to apply a useless # wrapper. decorated_func_or_class = func_or_class elif isinstance(func_or_class, type): # Handle Resource classes decoration # pylint: disable=protected-access func_or_class._apply_decorator_to_methods(response_serializer_decorator) decorated_func_or_class = func_or_class else: decorated_func_or_class = wraps(func_or_class)( response_serializer_decorator(func_or_class) ) if model is None: api_model = None else: if isinstance(model, Model): api_model = model else: api_model = self.model(model=model) if getattr(model, 'many', False): api_model = [api_model] doc_decorator = self.doc( responses={ code.value: (description, api_model) } ) return doc_decorator(decorated_func_or_class) return decorator
[ "def", "response", "(", "self", ",", "model", "=", "None", ",", "code", "=", "HTTPStatus", ".", "OK", ",", "description", "=", "None", ",", "*", "*", "kwargs", ")", ":", "code", "=", "HTTPStatus", "(", "code", ")", "if", "code", "is", "HTTPStatus", ".", "NO_CONTENT", ":", "assert", "model", "is", "None", "if", "model", "is", "None", "and", "code", "not", "in", "{", "HTTPStatus", ".", "ACCEPTED", ",", "HTTPStatus", ".", "NO_CONTENT", "}", ":", "if", "code", ".", "value", "not", "in", "http_exceptions", ".", "default_exceptions", ":", "raise", "ValueError", "(", "\"`model` parameter is required for code %d\"", "%", "code", ")", "model", "=", "self", ".", "model", "(", "name", "=", "'HTTPError%d'", "%", "code", ",", "model", "=", "DefaultHTTPErrorSchema", "(", "http_code", "=", "code", ")", ")", "if", "description", "is", "None", ":", "description", "=", "code", ".", "description", "def", "response_serializer_decorator", "(", "func", ")", ":", "\"\"\"\n This decorator handles responses to serialize the returned value\n with a given model.\n \"\"\"", "def", "dump_wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=missing-docstring", "response", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "extra_headers", "=", "None", "if", "response", "is", "None", ":", "if", "model", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Response cannot not be None with HTTP status %d\"", "%", "code", ")", "return", "flask", ".", "Response", "(", "status", "=", "code", ")", "elif", "isinstance", "(", "response", ",", "flask", ".", "Response", ")", "or", "model", "is", "None", ":", "return", "response", "elif", "isinstance", "(", "response", ",", "tuple", ")", ":", "response", ",", "_code", ",", "extra_headers", "=", "unpack", "(", "response", ")", "else", ":", "_code", "=", "code", "if", "HTTPStatus", "(", "_code", ")", "is", "code", ":", "response", "=", "model", ".", "dump", "(", "response", ")", ".", "data", "return", "response", ",", "_code", ",", "extra_headers", "return", "dump_wrapper", "def", "decorator", "(", "func_or_class", ")", ":", "if", "code", ".", "value", "in", "http_exceptions", ".", "default_exceptions", ":", "# If the code is handled by raising an exception, it will", "# produce a response later, so we don't need to apply a useless", "# wrapper.", "decorated_func_or_class", "=", "func_or_class", "elif", "isinstance", "(", "func_or_class", ",", "type", ")", ":", "# Handle Resource classes decoration", "# pylint: disable=protected-access", "func_or_class", ".", "_apply_decorator_to_methods", "(", "response_serializer_decorator", ")", "decorated_func_or_class", "=", "func_or_class", "else", ":", "decorated_func_or_class", "=", "wraps", "(", "func_or_class", ")", "(", "response_serializer_decorator", "(", "func_or_class", ")", ")", "if", "model", "is", "None", ":", "api_model", "=", "None", "else", ":", "if", "isinstance", "(", "model", ",", "Model", ")", ":", "api_model", "=", "model", "else", ":", "api_model", "=", "self", ".", "model", "(", "model", "=", "model", ")", "if", "getattr", "(", "model", ",", "'many'", ",", "False", ")", ":", "api_model", "=", "[", "api_model", "]", "doc_decorator", "=", "self", ".", "doc", "(", "responses", "=", "{", "code", ".", "value", ":", "(", "description", ",", "api_model", ")", "}", ")", "return", "doc_decorator", "(", "decorated_func_or_class", ")", "return", "decorator" ]
Endpoint response OpenAPI documentation decorator. It automatically documents HTTPError%(code)d responses with relevant schemas. Arguments: model (flask_marshmallow.Schema) - it can be a class or an instance of the class, which will be used for OpenAPI documentation purposes. It can be omitted if ``code`` argument is set to an error HTTP status code. code (int) - HTTP status code which is documented. description (str) Example: >>> @namespace.response(BaseTeamSchema(many=True)) ... @namespace.response(code=HTTPStatus.FORBIDDEN) ... def get_teams(): ... if not user.is_admin: ... abort(HTTPStatus.FORBIDDEN) ... return Team.query.all()
[ "Endpoint", "response", "OpenAPI", "documentation", "decorator", "." ]
python
train
dahlia/sqlalchemy-imageattach
sqlalchemy_imageattach/entity.py
https://github.com/dahlia/sqlalchemy-imageattach/blob/b4bafa73f3bb576ecf67ed7b40b702704a0fbdc8/sqlalchemy_imageattach/entity.py#L846-L853
def original(self): """(:class:`Image`) The original image. It could be :const:`None` if there are no stored images yet. """ images = self.query._original_images(**self.identity_map) if images: return images[0]
[ "def", "original", "(", "self", ")", ":", "images", "=", "self", ".", "query", ".", "_original_images", "(", "*", "*", "self", ".", "identity_map", ")", "if", "images", ":", "return", "images", "[", "0", "]" ]
(:class:`Image`) The original image. It could be :const:`None` if there are no stored images yet.
[ "(", ":", "class", ":", "Image", ")", "The", "original", "image", ".", "It", "could", "be", ":", "const", ":", "None", "if", "there", "are", "no", "stored", "images", "yet", "." ]
python
train
sammchardy/python-binance
binance/depthcache.py
https://github.com/sammchardy/python-binance/blob/31c0d0a32f9edd528c6c2c1dd3044d9a34ce43cc/binance/depthcache.py#L221-L254
def _process_depth_message(self, msg, buffer=False): """Process a depth event message. :param msg: Depth event message. :return: """ if buffer and msg['u'] <= self._last_update_id: # ignore any updates before the initial update id return elif msg['U'] != self._last_update_id + 1: # if not buffered check we get sequential updates # otherwise init cache again self._init_cache() # add any bid or ask values for bid in msg['b']: self._depth_cache.add_bid(bid) for ask in msg['a']: self._depth_cache.add_ask(ask) # keeping update time self._depth_cache.update_time = msg['E'] # call the callback with the updated depth cache if self._callback: self._callback(self._depth_cache) self._last_update_id = msg['u'] # after processing event see if we need to refresh the depth cache if self._refresh_interval and int(time.time()) > self._refresh_time: self._init_cache()
[ "def", "_process_depth_message", "(", "self", ",", "msg", ",", "buffer", "=", "False", ")", ":", "if", "buffer", "and", "msg", "[", "'u'", "]", "<=", "self", ".", "_last_update_id", ":", "# ignore any updates before the initial update id", "return", "elif", "msg", "[", "'U'", "]", "!=", "self", ".", "_last_update_id", "+", "1", ":", "# if not buffered check we get sequential updates", "# otherwise init cache again", "self", ".", "_init_cache", "(", ")", "# add any bid or ask values", "for", "bid", "in", "msg", "[", "'b'", "]", ":", "self", ".", "_depth_cache", ".", "add_bid", "(", "bid", ")", "for", "ask", "in", "msg", "[", "'a'", "]", ":", "self", ".", "_depth_cache", ".", "add_ask", "(", "ask", ")", "# keeping update time", "self", ".", "_depth_cache", ".", "update_time", "=", "msg", "[", "'E'", "]", "# call the callback with the updated depth cache", "if", "self", ".", "_callback", ":", "self", ".", "_callback", "(", "self", ".", "_depth_cache", ")", "self", ".", "_last_update_id", "=", "msg", "[", "'u'", "]", "# after processing event see if we need to refresh the depth cache", "if", "self", ".", "_refresh_interval", "and", "int", "(", "time", ".", "time", "(", ")", ")", ">", "self", ".", "_refresh_time", ":", "self", ".", "_init_cache", "(", ")" ]
Process a depth event message. :param msg: Depth event message. :return:
[ "Process", "a", "depth", "event", "message", "." ]
python
train
wavefrontHQ/python-client
wavefront_api_client/api/search_api.py
https://github.com/wavefrontHQ/python-client/blob/b0f1046a8f68c2c7d69e395f7167241f224c738a/wavefront_api_client/api/search_api.py#L4819-L4840
def search_user_group_for_facet(self, facet, **kwargs): # noqa: E501 """Lists the values of a specific facet over the customer's user groups # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.search_user_group_for_facet(facet, async_req=True) >>> result = thread.get() :param async_req bool :param str facet: (required) :param FacetSearchRequestContainer body: :return: ResponseContainerFacetResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.search_user_group_for_facet_with_http_info(facet, **kwargs) # noqa: E501 else: (data) = self.search_user_group_for_facet_with_http_info(facet, **kwargs) # noqa: E501 return data
[ "def", "search_user_group_for_facet", "(", "self", ",", "facet", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "search_user_group_for_facet_with_http_info", "(", "facet", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "search_user_group_for_facet_with_http_info", "(", "facet", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
Lists the values of a specific facet over the customer's user groups # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.search_user_group_for_facet(facet, async_req=True) >>> result = thread.get() :param async_req bool :param str facet: (required) :param FacetSearchRequestContainer body: :return: ResponseContainerFacetResponse If the method is called asynchronously, returns the request thread.
[ "Lists", "the", "values", "of", "a", "specific", "facet", "over", "the", "customer", "s", "user", "groups", "#", "noqa", ":", "E501" ]
python
train
toomore/goristock
grs/goristock.py
https://github.com/toomore/goristock/blob/e61f57f11a626cfbc4afbf66337fd9d1c51e3e71/grs/goristock.py#L511-L526
def ckMAO(self,data,s=5,pm=False): """判斷正負乖離位置 s = 取樣判斷區間 pm = True(正)/False(負) 乖離 return [T/F, 第幾個轉折日, 乖離值] """ c = data[-s:] if pm: ckvalue = max(c) preckvalue = max(c) > 0 else: ckvalue = min(c) preckvalue = max(c) < 0 return [s - c.index(ckvalue) < 4 and c.index(ckvalue) != s-1 and preckvalue, s - c.index(ckvalue) - 1, ckvalue]
[ "def", "ckMAO", "(", "self", ",", "data", ",", "s", "=", "5", ",", "pm", "=", "False", ")", ":", "c", "=", "data", "[", "-", "s", ":", "]", "if", "pm", ":", "ckvalue", "=", "max", "(", "c", ")", "preckvalue", "=", "max", "(", "c", ")", ">", "0", "else", ":", "ckvalue", "=", "min", "(", "c", ")", "preckvalue", "=", "max", "(", "c", ")", "<", "0", "return", "[", "s", "-", "c", ".", "index", "(", "ckvalue", ")", "<", "4", "and", "c", ".", "index", "(", "ckvalue", ")", "!=", "s", "-", "1", "and", "preckvalue", ",", "s", "-", "c", ".", "index", "(", "ckvalue", ")", "-", "1", ",", "ckvalue", "]" ]
判斷正負乖離位置 s = 取樣判斷區間 pm = True(正)/False(負) 乖離 return [T/F, 第幾個轉折日, 乖離值]
[ "判斷正負乖離位置", "s", "=", "取樣判斷區間", "pm", "=", "True(正)", "/", "False(負)", "乖離", "return", "[", "T", "/", "F", "第幾個轉折日", "乖離值", "]" ]
python
train
osrg/ryu
ryu/services/protocols/bgp/core_managers/table_manager.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/core_managers/table_manager.py#L162-L172
def learn_path(self, path): """Inserts `path` into correct global table. Since known paths to `Destination` has changes, we queue it for further processing. """ # Get VPN/Global table table = self.get_global_table_by_route_family(path.route_family) gpath_dest = table.insert(path) # Since destination was updated, we enqueue it for processing. self._signal_bus.dest_changed(gpath_dest)
[ "def", "learn_path", "(", "self", ",", "path", ")", ":", "# Get VPN/Global table", "table", "=", "self", ".", "get_global_table_by_route_family", "(", "path", ".", "route_family", ")", "gpath_dest", "=", "table", ".", "insert", "(", "path", ")", "# Since destination was updated, we enqueue it for processing.", "self", ".", "_signal_bus", ".", "dest_changed", "(", "gpath_dest", ")" ]
Inserts `path` into correct global table. Since known paths to `Destination` has changes, we queue it for further processing.
[ "Inserts", "path", "into", "correct", "global", "table", "." ]
python
train
gem/oq-engine
openquake/server/views.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/server/views.py#L708-L732
def calc_datastore(request, job_id): """ Download a full datastore file. :param request: `django.http.HttpRequest` object. :param job_id: The id of the requested datastore :returns: A `django.http.HttpResponse` containing the content of the requested artifact, if present, else throws a 404 """ job = logs.dbcmd('get_job', int(job_id)) if job is None: return HttpResponseNotFound() if not utils.user_has_permission(request, job.user_name): return HttpResponseForbidden() fname = job.ds_calc_dir + '.hdf5' response = FileResponse( FileWrapper(open(fname, 'rb')), content_type=HDF5) response['Content-Disposition'] = ( 'attachment; filename=%s' % os.path.basename(fname)) response['Content-Length'] = str(os.path.getsize(fname)) return response
[ "def", "calc_datastore", "(", "request", ",", "job_id", ")", ":", "job", "=", "logs", ".", "dbcmd", "(", "'get_job'", ",", "int", "(", "job_id", ")", ")", "if", "job", "is", "None", ":", "return", "HttpResponseNotFound", "(", ")", "if", "not", "utils", ".", "user_has_permission", "(", "request", ",", "job", ".", "user_name", ")", ":", "return", "HttpResponseForbidden", "(", ")", "fname", "=", "job", ".", "ds_calc_dir", "+", "'.hdf5'", "response", "=", "FileResponse", "(", "FileWrapper", "(", "open", "(", "fname", ",", "'rb'", ")", ")", ",", "content_type", "=", "HDF5", ")", "response", "[", "'Content-Disposition'", "]", "=", "(", "'attachment; filename=%s'", "%", "os", ".", "path", ".", "basename", "(", "fname", ")", ")", "response", "[", "'Content-Length'", "]", "=", "str", "(", "os", ".", "path", ".", "getsize", "(", "fname", ")", ")", "return", "response" ]
Download a full datastore file. :param request: `django.http.HttpRequest` object. :param job_id: The id of the requested datastore :returns: A `django.http.HttpResponse` containing the content of the requested artifact, if present, else throws a 404
[ "Download", "a", "full", "datastore", "file", "." ]
python
train
pyQode/pyqode.core
pyqode/core/panels/folding.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/panels/folding.py#L553-L570
def leaveEvent(self, event): """ Removes scope decorations and background from the editor and the panel if highlight_caret_scope, else simply update the scope decorations to match the caret scope. """ super(FoldingPanel, self).leaveEvent(event) QtWidgets.QApplication.restoreOverrideCursor() self._highlight_runner.cancel_requests() if not self.highlight_caret_scope: self._clear_scope_decos() self._mouse_over_line = None self._current_scope = None else: self._block_nbr = -1 self._highlight_caret_scope() self.editor.repaint()
[ "def", "leaveEvent", "(", "self", ",", "event", ")", ":", "super", "(", "FoldingPanel", ",", "self", ")", ".", "leaveEvent", "(", "event", ")", "QtWidgets", ".", "QApplication", ".", "restoreOverrideCursor", "(", ")", "self", ".", "_highlight_runner", ".", "cancel_requests", "(", ")", "if", "not", "self", ".", "highlight_caret_scope", ":", "self", ".", "_clear_scope_decos", "(", ")", "self", ".", "_mouse_over_line", "=", "None", "self", ".", "_current_scope", "=", "None", "else", ":", "self", ".", "_block_nbr", "=", "-", "1", "self", ".", "_highlight_caret_scope", "(", ")", "self", ".", "editor", ".", "repaint", "(", ")" ]
Removes scope decorations and background from the editor and the panel if highlight_caret_scope, else simply update the scope decorations to match the caret scope.
[ "Removes", "scope", "decorations", "and", "background", "from", "the", "editor", "and", "the", "panel", "if", "highlight_caret_scope", "else", "simply", "update", "the", "scope", "decorations", "to", "match", "the", "caret", "scope", "." ]
python
train
ministryofjustice/money-to-prisoners-common
mtp_common/build_tasks/tasks.py
https://github.com/ministryofjustice/money-to-prisoners-common/blob/33c43a2912cb990d9148da7c8718f480f07d90a1/mtp_common/build_tasks/tasks.py#L121-L127
def create_build_paths(context: Context): """ Creates directories needed for build outputs """ paths = [context.app.asset_build_path, context.app.screenshots_build_path, context.app.collected_assets_path] for path in filter(None, paths): os.makedirs(path, exist_ok=True)
[ "def", "create_build_paths", "(", "context", ":", "Context", ")", ":", "paths", "=", "[", "context", ".", "app", ".", "asset_build_path", ",", "context", ".", "app", ".", "screenshots_build_path", ",", "context", ".", "app", ".", "collected_assets_path", "]", "for", "path", "in", "filter", "(", "None", ",", "paths", ")", ":", "os", ".", "makedirs", "(", "path", ",", "exist_ok", "=", "True", ")" ]
Creates directories needed for build outputs
[ "Creates", "directories", "needed", "for", "build", "outputs" ]
python
train
jaraco/keyrings.alt
keyrings/alt/file.py
https://github.com/jaraco/keyrings.alt/blob/5b71223d12bf9ac6abd05b1b395f1efccb5ea660/keyrings/alt/file.py#L47-L54
def _create_cipher(self, password, salt, IV): """ Create the cipher object to encrypt or decrypt a payload. """ from Crypto.Protocol.KDF import PBKDF2 from Crypto.Cipher import AES pw = PBKDF2(password, salt, dkLen=self.block_size) return AES.new(pw[:self.block_size], AES.MODE_CFB, IV)
[ "def", "_create_cipher", "(", "self", ",", "password", ",", "salt", ",", "IV", ")", ":", "from", "Crypto", ".", "Protocol", ".", "KDF", "import", "PBKDF2", "from", "Crypto", ".", "Cipher", "import", "AES", "pw", "=", "PBKDF2", "(", "password", ",", "salt", ",", "dkLen", "=", "self", ".", "block_size", ")", "return", "AES", ".", "new", "(", "pw", "[", ":", "self", ".", "block_size", "]", ",", "AES", ".", "MODE_CFB", ",", "IV", ")" ]
Create the cipher object to encrypt or decrypt a payload.
[ "Create", "the", "cipher", "object", "to", "encrypt", "or", "decrypt", "a", "payload", "." ]
python
train
google/openhtf
openhtf/util/timeouts.py
https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/timeouts.py#L422-L440
def take_at_most_n_seconds(time_s, func, *args, **kwargs): """A function that returns whether a function call took less than time_s. NOTE: The function call is not killed and will run indefinitely if hung. Args: time_s: Maximum amount of time to take. func: Function to call. *args: Arguments to call the function with. **kwargs: Keyword arguments to call the function with. Returns: True if the function finished in less than time_s seconds. """ thread = threading.Thread(target=func, args=args, kwargs=kwargs) thread.start() thread.join(time_s) if thread.is_alive(): return False return True
[ "def", "take_at_most_n_seconds", "(", "time_s", ",", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "thread", "=", "threading", ".", "Thread", "(", "target", "=", "func", ",", "args", "=", "args", ",", "kwargs", "=", "kwargs", ")", "thread", ".", "start", "(", ")", "thread", ".", "join", "(", "time_s", ")", "if", "thread", ".", "is_alive", "(", ")", ":", "return", "False", "return", "True" ]
A function that returns whether a function call took less than time_s. NOTE: The function call is not killed and will run indefinitely if hung. Args: time_s: Maximum amount of time to take. func: Function to call. *args: Arguments to call the function with. **kwargs: Keyword arguments to call the function with. Returns: True if the function finished in less than time_s seconds.
[ "A", "function", "that", "returns", "whether", "a", "function", "call", "took", "less", "than", "time_s", "." ]
python
train
barrust/pyspellchecker
spellchecker/spellchecker.py
https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L79-L88
def distance(self, val): """ set the distance parameter """ tmp = 2 try: int(val) if val > 0 and val <= 2: tmp = val except (ValueError, TypeError): pass self._distance = tmp
[ "def", "distance", "(", "self", ",", "val", ")", ":", "tmp", "=", "2", "try", ":", "int", "(", "val", ")", "if", "val", ">", "0", "and", "val", "<=", "2", ":", "tmp", "=", "val", "except", "(", "ValueError", ",", "TypeError", ")", ":", "pass", "self", ".", "_distance", "=", "tmp" ]
set the distance parameter
[ "set", "the", "distance", "parameter" ]
python
train
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L934-L938
def community_topic_delete(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/help_center/topics#delete-topic" api_path = "/api/v2/community/topics/{id}.json" api_path = api_path.format(id=id) return self.call(api_path, method="DELETE", **kwargs)
[ "def", "community_topic_delete", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/community/topics/{id}.json\"", "api_path", "=", "api_path", ".", "format", "(", "id", "=", "id", ")", "return", "self", ".", "call", "(", "api_path", ",", "method", "=", "\"DELETE\"", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/help_center/topics#delete-topic
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "help_center", "/", "topics#delete", "-", "topic" ]
python
train
apache/incubator-heron
heron/tools/ui/src/python/handlers/topology.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/ui/src/python/handlers/topology.py#L176-L195
def get(self, cluster, environ, topology, container): ''' :param cluster: :param environ: :param topology: :param container: :return: ''' path = self.get_argument("path") options = dict( cluster=cluster, environ=environ, topology=topology, container=container, path=path, baseUrl=self.baseUrl ) self.render("file.html", **options)
[ "def", "get", "(", "self", ",", "cluster", ",", "environ", ",", "topology", ",", "container", ")", ":", "path", "=", "self", ".", "get_argument", "(", "\"path\"", ")", "options", "=", "dict", "(", "cluster", "=", "cluster", ",", "environ", "=", "environ", ",", "topology", "=", "topology", ",", "container", "=", "container", ",", "path", "=", "path", ",", "baseUrl", "=", "self", ".", "baseUrl", ")", "self", ".", "render", "(", "\"file.html\"", ",", "*", "*", "options", ")" ]
:param cluster: :param environ: :param topology: :param container: :return:
[ ":", "param", "cluster", ":", ":", "param", "environ", ":", ":", "param", "topology", ":", ":", "param", "container", ":", ":", "return", ":" ]
python
valid
Workiva/furious
furious/config.py
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/config.py#L98-L115
def find_furious_yaml(config_file=__file__): """ Traverse directory trees to find a furious.yaml file Begins with the location of this file then checks the working directory if not found Args: config_file: location of this file, override for testing Returns: the path of furious.yaml or None if not found """ checked = set() result = _find_furious_yaml(os.path.dirname(config_file), checked) if not result: result = _find_furious_yaml(os.getcwd(), checked) return result
[ "def", "find_furious_yaml", "(", "config_file", "=", "__file__", ")", ":", "checked", "=", "set", "(", ")", "result", "=", "_find_furious_yaml", "(", "os", ".", "path", ".", "dirname", "(", "config_file", ")", ",", "checked", ")", "if", "not", "result", ":", "result", "=", "_find_furious_yaml", "(", "os", ".", "getcwd", "(", ")", ",", "checked", ")", "return", "result" ]
Traverse directory trees to find a furious.yaml file Begins with the location of this file then checks the working directory if not found Args: config_file: location of this file, override for testing Returns: the path of furious.yaml or None if not found
[ "Traverse", "directory", "trees", "to", "find", "a", "furious", ".", "yaml", "file" ]
python
train
bodylabs/lace
lace/serialization/dae.py
https://github.com/bodylabs/lace/blob/b68f4a60a4cac66c0607ffbae38ef9d07d37f459/lace/serialization/dae.py#L7-L12
def _dump(f, mesh): ''' Writes a mesh to collada file format. ''' dae = mesh_to_collada(mesh) dae.write(f.name)
[ "def", "_dump", "(", "f", ",", "mesh", ")", ":", "dae", "=", "mesh_to_collada", "(", "mesh", ")", "dae", ".", "write", "(", "f", ".", "name", ")" ]
Writes a mesh to collada file format.
[ "Writes", "a", "mesh", "to", "collada", "file", "format", "." ]
python
train
ejhigson/nestcheck
nestcheck/diagnostics_tables.py
https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/diagnostics_tables.py#L231-L244
def run_list_error_summary(run_list, estimator_list, estimator_names, n_simulate, **kwargs): """Wrapper which runs run_list_error_values then applies error_values summary to the resulting dataframe. See the docstrings for those two funcions for more details and for descriptions of parameters and output. """ true_values = kwargs.pop('true_values', None) include_true_values = kwargs.pop('include_true_values', False) include_rmse = kwargs.pop('include_rmse', False) error_values = run_list_error_values(run_list, estimator_list, estimator_names, n_simulate, **kwargs) return error_values_summary(error_values, true_values=true_values, include_true_values=include_true_values, include_rmse=include_rmse)
[ "def", "run_list_error_summary", "(", "run_list", ",", "estimator_list", ",", "estimator_names", ",", "n_simulate", ",", "*", "*", "kwargs", ")", ":", "true_values", "=", "kwargs", ".", "pop", "(", "'true_values'", ",", "None", ")", "include_true_values", "=", "kwargs", ".", "pop", "(", "'include_true_values'", ",", "False", ")", "include_rmse", "=", "kwargs", ".", "pop", "(", "'include_rmse'", ",", "False", ")", "error_values", "=", "run_list_error_values", "(", "run_list", ",", "estimator_list", ",", "estimator_names", ",", "n_simulate", ",", "*", "*", "kwargs", ")", "return", "error_values_summary", "(", "error_values", ",", "true_values", "=", "true_values", ",", "include_true_values", "=", "include_true_values", ",", "include_rmse", "=", "include_rmse", ")" ]
Wrapper which runs run_list_error_values then applies error_values summary to the resulting dataframe. See the docstrings for those two funcions for more details and for descriptions of parameters and output.
[ "Wrapper", "which", "runs", "run_list_error_values", "then", "applies", "error_values", "summary", "to", "the", "resulting", "dataframe", ".", "See", "the", "docstrings", "for", "those", "two", "funcions", "for", "more", "details", "and", "for", "descriptions", "of", "parameters", "and", "output", "." ]
python
train
Neurita/boyle
boyle/dicom/convert.py
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/dicom/convert.py#L44-L102
def add_meta_to_nii(nii_file, dicom_file, dcm_tags=''): """ Add slice duration and acquisition times to the headers of the nifit1 files in `nii_file`. It will add the repetition time of the DICOM file (field: {0x0018, 0x0080, DS, Repetition Time}) to the NifTI file as well as any other tag in `dcm_tags`. All selected DICOM tags values are set in the `descrip` nifti header field. Note that this will modify the header content of `nii_file`. Parameters ---------- nii_files: str Path to the NifTI file to modify. dicom_file: str Paths to the DICOM file from where to get the meta data. dcm_tags: list of str List of tags from the DICOM file to read and store in the nifti file. """ # Load a dicom image dcmimage = dicom.read_file(dicom_file) # Load the nifti1 image image = nibabel.load(nii_file) # Check the we have a nifti1 format image if not isinstance(image, nibabel.nifti1.Nifti1Image): raise Exception( "Only Nifti1 image are supported not '{0}'.".format( type(image))) # check if dcm_tags is one string, if yes put it in a list: if isinstance(dcm_tags, str): dcm_tags = [dcm_tags] # Fill the nifti1 header header = image.get_header() # slice_duration: Time for 1 slice repetition_time = float(dcmimage[("0x0018", "0x0080")].value) header.set_dim_info(slice=2) nb_slices = header.get_n_slices() # Force round to 0 digit after coma. If more, nibabel completes to # 6 digits with random numbers... slice_duration = round(repetition_time / nb_slices, 0) header.set_slice_duration(slice_duration) # add free dicom fields if dcm_tags: content = ["{0}={1}".format(name, dcmimage[tag].value) for name, tag in dcm_tags] free_field = numpy.array(";".join(content), dtype=header["descrip"].dtype) image.get_header()["descrip"] = free_field # Update the image header image.update_header() # Save the filled image nibabel.save(image, nii_file)
[ "def", "add_meta_to_nii", "(", "nii_file", ",", "dicom_file", ",", "dcm_tags", "=", "''", ")", ":", "# Load a dicom image", "dcmimage", "=", "dicom", ".", "read_file", "(", "dicom_file", ")", "# Load the nifti1 image", "image", "=", "nibabel", ".", "load", "(", "nii_file", ")", "# Check the we have a nifti1 format image", "if", "not", "isinstance", "(", "image", ",", "nibabel", ".", "nifti1", ".", "Nifti1Image", ")", ":", "raise", "Exception", "(", "\"Only Nifti1 image are supported not '{0}'.\"", ".", "format", "(", "type", "(", "image", ")", ")", ")", "# check if dcm_tags is one string, if yes put it in a list:", "if", "isinstance", "(", "dcm_tags", ",", "str", ")", ":", "dcm_tags", "=", "[", "dcm_tags", "]", "# Fill the nifti1 header", "header", "=", "image", ".", "get_header", "(", ")", "# slice_duration: Time for 1 slice", "repetition_time", "=", "float", "(", "dcmimage", "[", "(", "\"0x0018\"", ",", "\"0x0080\"", ")", "]", ".", "value", ")", "header", ".", "set_dim_info", "(", "slice", "=", "2", ")", "nb_slices", "=", "header", ".", "get_n_slices", "(", ")", "# Force round to 0 digit after coma. If more, nibabel completes to", "# 6 digits with random numbers...", "slice_duration", "=", "round", "(", "repetition_time", "/", "nb_slices", ",", "0", ")", "header", ".", "set_slice_duration", "(", "slice_duration", ")", "# add free dicom fields", "if", "dcm_tags", ":", "content", "=", "[", "\"{0}={1}\"", ".", "format", "(", "name", ",", "dcmimage", "[", "tag", "]", ".", "value", ")", "for", "name", ",", "tag", "in", "dcm_tags", "]", "free_field", "=", "numpy", ".", "array", "(", "\";\"", ".", "join", "(", "content", ")", ",", "dtype", "=", "header", "[", "\"descrip\"", "]", ".", "dtype", ")", "image", ".", "get_header", "(", ")", "[", "\"descrip\"", "]", "=", "free_field", "# Update the image header", "image", ".", "update_header", "(", ")", "# Save the filled image", "nibabel", ".", "save", "(", "image", ",", "nii_file", ")" ]
Add slice duration and acquisition times to the headers of the nifit1 files in `nii_file`. It will add the repetition time of the DICOM file (field: {0x0018, 0x0080, DS, Repetition Time}) to the NifTI file as well as any other tag in `dcm_tags`. All selected DICOM tags values are set in the `descrip` nifti header field. Note that this will modify the header content of `nii_file`. Parameters ---------- nii_files: str Path to the NifTI file to modify. dicom_file: str Paths to the DICOM file from where to get the meta data. dcm_tags: list of str List of tags from the DICOM file to read and store in the nifti file.
[ "Add", "slice", "duration", "and", "acquisition", "times", "to", "the", "headers", "of", "the", "nifit1", "files", "in", "nii_file", ".", "It", "will", "add", "the", "repetition", "time", "of", "the", "DICOM", "file", "(", "field", ":", "{", "0x0018", "0x0080", "DS", "Repetition", "Time", "}", ")", "to", "the", "NifTI", "file", "as", "well", "as", "any", "other", "tag", "in", "dcm_tags", ".", "All", "selected", "DICOM", "tags", "values", "are", "set", "in", "the", "descrip", "nifti", "header", "field", ".", "Note", "that", "this", "will", "modify", "the", "header", "content", "of", "nii_file", "." ]
python
valid