repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
ionelmc/python-matrix
src/matrix/__init__.py
https://github.com/ionelmc/python-matrix/blob/e1a63879a6c94c37c3883386f1d86eb7c2179a5b/src/matrix/__init__.py#L131-L156
def from_config(config): """ Generate a matrix from a configuration dictionary. """ matrix = {} variables = config.keys() for entries in product(*config.values()): combination = dict(zip(variables, entries)) include = True for value in combination.values(): for reducer in value.reducers: if reducer.pattern == '-': match = not combination[reducer.variable].value else: match = fnmatch(combination[reducer.variable].value, reducer.pattern) if match if reducer.is_exclude else not match: include = False if include: key = '-'.join(entry.alias for entry in entries if entry.alias) data = dict( zip(variables, (entry.value for entry in entries)) ) if key in matrix and data != matrix[key]: raise DuplicateEnvironment(key, data, matrix[key]) matrix[key] = data return matrix
[ "def", "from_config", "(", "config", ")", ":", "matrix", "=", "{", "}", "variables", "=", "config", ".", "keys", "(", ")", "for", "entries", "in", "product", "(", "*", "config", ".", "values", "(", ")", ")", ":", "combination", "=", "dict", "(", "zip", "(", "variables", ",", "entries", ")", ")", "include", "=", "True", "for", "value", "in", "combination", ".", "values", "(", ")", ":", "for", "reducer", "in", "value", ".", "reducers", ":", "if", "reducer", ".", "pattern", "==", "'-'", ":", "match", "=", "not", "combination", "[", "reducer", ".", "variable", "]", ".", "value", "else", ":", "match", "=", "fnmatch", "(", "combination", "[", "reducer", ".", "variable", "]", ".", "value", ",", "reducer", ".", "pattern", ")", "if", "match", "if", "reducer", ".", "is_exclude", "else", "not", "match", ":", "include", "=", "False", "if", "include", ":", "key", "=", "'-'", ".", "join", "(", "entry", ".", "alias", "for", "entry", "in", "entries", "if", "entry", ".", "alias", ")", "data", "=", "dict", "(", "zip", "(", "variables", ",", "(", "entry", ".", "value", "for", "entry", "in", "entries", ")", ")", ")", "if", "key", "in", "matrix", "and", "data", "!=", "matrix", "[", "key", "]", ":", "raise", "DuplicateEnvironment", "(", "key", ",", "data", ",", "matrix", "[", "key", "]", ")", "matrix", "[", "key", "]", "=", "data", "return", "matrix" ]
Generate a matrix from a configuration dictionary.
[ "Generate", "a", "matrix", "from", "a", "configuration", "dictionary", "." ]
python
train
39.115385
jupyter/jupyter-drive
jupyterdrive/mixednbmanager.py
https://github.com/jupyter/jupyter-drive/blob/545813377cb901235e8ea81f83b0ac7755dbd7a9/jupyterdrive/mixednbmanager.py#L186-L208
def path_dispatch_rename(rename_like_method): """ decorator for rename-like function, that need dispatch on 2 arguments """ def _wrapper_method(self, old_path, new_path): old_path, _old_path, old_sentinel = _split_path(old_path); new_path, _new_path, new_sentinel = _split_path(new_path); if old_sentinel != new_sentinel: raise ValueError('Does not know how to move things across contents manager mountpoints') else: sentinel = new_sentinel man = self.managers.get(sentinel, None) if man is not None: rename_meth = getattr(man, rename_like_method.__name__) sub = rename_meth('/'.join(_old_path), '/'.join(_new_path)) return sub else : return rename_meth(self, old_path, new_path) return _wrapper_method
[ "def", "path_dispatch_rename", "(", "rename_like_method", ")", ":", "def", "_wrapper_method", "(", "self", ",", "old_path", ",", "new_path", ")", ":", "old_path", ",", "_old_path", ",", "old_sentinel", "=", "_split_path", "(", "old_path", ")", "new_path", ",", "_new_path", ",", "new_sentinel", "=", "_split_path", "(", "new_path", ")", "if", "old_sentinel", "!=", "new_sentinel", ":", "raise", "ValueError", "(", "'Does not know how to move things across contents manager mountpoints'", ")", "else", ":", "sentinel", "=", "new_sentinel", "man", "=", "self", ".", "managers", ".", "get", "(", "sentinel", ",", "None", ")", "if", "man", "is", "not", "None", ":", "rename_meth", "=", "getattr", "(", "man", ",", "rename_like_method", ".", "__name__", ")", "sub", "=", "rename_meth", "(", "'/'", ".", "join", "(", "_old_path", ")", ",", "'/'", ".", "join", "(", "_new_path", ")", ")", "return", "sub", "else", ":", "return", "rename_meth", "(", "self", ",", "old_path", ",", "new_path", ")", "return", "_wrapper_method" ]
decorator for rename-like function, that need dispatch on 2 arguments
[ "decorator", "for", "rename", "-", "like", "function", "that", "need", "dispatch", "on", "2", "arguments" ]
python
train
39.391304
worstcase/blockade
blockade/net.py
https://github.com/worstcase/blockade/blob/3dc6ad803f0b0d56586dec9542a6a06aa06cf569/blockade/net.py#L235-L251
def insert_rule(self, chain, src=None, dest=None, target=None): """Insert a new rule in the chain """ if not chain: raise ValueError("Invalid chain") if not target: raise ValueError("Invalid target") if not (src or dest): raise ValueError("Need src, dest, or both") args = ["-I", chain] if src: args += ["-s", src] if dest: args += ["-d", dest] args += ["-j", target] self.call(*args)
[ "def", "insert_rule", "(", "self", ",", "chain", ",", "src", "=", "None", ",", "dest", "=", "None", ",", "target", "=", "None", ")", ":", "if", "not", "chain", ":", "raise", "ValueError", "(", "\"Invalid chain\"", ")", "if", "not", "target", ":", "raise", "ValueError", "(", "\"Invalid target\"", ")", "if", "not", "(", "src", "or", "dest", ")", ":", "raise", "ValueError", "(", "\"Need src, dest, or both\"", ")", "args", "=", "[", "\"-I\"", ",", "chain", "]", "if", "src", ":", "args", "+=", "[", "\"-s\"", ",", "src", "]", "if", "dest", ":", "args", "+=", "[", "\"-d\"", ",", "dest", "]", "args", "+=", "[", "\"-j\"", ",", "target", "]", "self", ".", "call", "(", "*", "args", ")" ]
Insert a new rule in the chain
[ "Insert", "a", "new", "rule", "in", "the", "chain" ]
python
valid
29.941176
watson-developer-cloud/python-sdk
ibm_watson/discovery_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/discovery_v1.py#L6699-L6704
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'collections') and self.collections is not None: _dict['collections'] = [x._to_dict() for x in self.collections] return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'collections'", ")", "and", "self", ".", "collections", "is", "not", "None", ":", "_dict", "[", "'collections'", "]", "=", "[", "x", ".", "_to_dict", "(", ")", "for", "x", "in", "self", ".", "collections", "]", "return", "_dict" ]
Return a json dictionary representing this model.
[ "Return", "a", "json", "dictionary", "representing", "this", "model", "." ]
python
train
44.666667
basecrm/basecrm-python
basecrm/services.py
https://github.com/basecrm/basecrm-python/blob/7c1cf97dbaba8aeb9ff89f8a54f945a8702349f6/basecrm/services.py#L105-L121
def destroy(self, deal_id, contact_id) : """ Remove an associated contact Remove a deal's associated contact If a deal with the supplied unique identifier does not exist, it returns an error This operation cannot be undone :calls: ``delete /deals/{deal_id}/associated_contacts/{contact_id}`` :param int deal_id: Unique identifier of a Deal. :param int contact_id: Unique identifier of a Contact. :return: True if the operation succeeded. :rtype: bool """ status_code, _, _ = self.http_client.delete("/deals/{deal_id}/associated_contacts/{contact_id}".format(deal_id=deal_id, contact_id=contact_id)) return status_code == 204
[ "def", "destroy", "(", "self", ",", "deal_id", ",", "contact_id", ")", ":", "status_code", ",", "_", ",", "_", "=", "self", ".", "http_client", ".", "delete", "(", "\"/deals/{deal_id}/associated_contacts/{contact_id}\"", ".", "format", "(", "deal_id", "=", "deal_id", ",", "contact_id", "=", "contact_id", ")", ")", "return", "status_code", "==", "204" ]
Remove an associated contact Remove a deal's associated contact If a deal with the supplied unique identifier does not exist, it returns an error This operation cannot be undone :calls: ``delete /deals/{deal_id}/associated_contacts/{contact_id}`` :param int deal_id: Unique identifier of a Deal. :param int contact_id: Unique identifier of a Contact. :return: True if the operation succeeded. :rtype: bool
[ "Remove", "an", "associated", "contact" ]
python
train
42.058824
linkedin/shiv
src/shiv/cli.py
https://github.com/linkedin/shiv/blob/6bda78676170b35d0877f67b71095c39ce41a74a/src/shiv/cli.py#L128-L204
def main( output_file: str, entry_point: Optional[str], console_script: Optional[str], python: Optional[str], site_packages: Optional[str], compressed: bool, compile_pyc: bool, extend_pythonpath: bool, pip_args: List[str], ) -> None: """ Shiv is a command line utility for building fully self-contained Python zipapps as outlined in PEP 441, but with all their dependencies included! """ if not pip_args and not site_packages: sys.exit(NO_PIP_ARGS_OR_SITE_PACKAGES) if output_file is None: sys.exit(NO_OUTFILE) # check for disallowed pip arguments for disallowed in DISALLOWED_ARGS: for supplied_arg in pip_args: if supplied_arg in disallowed: sys.exit( DISALLOWED_PIP_ARGS.format( arg=supplied_arg, reason=DISALLOWED_ARGS[disallowed] ) ) with TemporaryDirectory() as working_path: tmp_site_packages = Path(working_path, "site-packages") if site_packages: shutil.copytree(site_packages, tmp_site_packages) if pip_args: # install deps into staged site-packages pip.install(["--target", str(tmp_site_packages)] + list(pip_args)) # if entry_point is a console script, get the callable if entry_point is None and console_script is not None: try: entry_point = find_entry_point(tmp_site_packages, console_script) except KeyError: if not Path(tmp_site_packages, "bin", console_script).exists(): sys.exit(NO_ENTRY_POINT.format(entry_point=console_script)) # create runtime environment metadata env = Environment( build_id=str(uuid.uuid4()), entry_point=entry_point, script=console_script, compile_pyc=compile_pyc, extend_pythonpath=extend_pythonpath, ) Path(working_path, "environment.json").write_text(env.to_json()) # create bootstrapping directory in working path bootstrap_target = Path(working_path, "_bootstrap") bootstrap_target.mkdir(parents=True, exist_ok=True) # copy bootstrap code copy_bootstrap(bootstrap_target) # create the zip builder.create_archive( Path(working_path), target=Path(output_file).expanduser(), interpreter=python or _interpreter_path(), main="_bootstrap:bootstrap", compressed=compressed, )
[ "def", "main", "(", "output_file", ":", "str", ",", "entry_point", ":", "Optional", "[", "str", "]", ",", "console_script", ":", "Optional", "[", "str", "]", ",", "python", ":", "Optional", "[", "str", "]", ",", "site_packages", ":", "Optional", "[", "str", "]", ",", "compressed", ":", "bool", ",", "compile_pyc", ":", "bool", ",", "extend_pythonpath", ":", "bool", ",", "pip_args", ":", "List", "[", "str", "]", ",", ")", "->", "None", ":", "if", "not", "pip_args", "and", "not", "site_packages", ":", "sys", ".", "exit", "(", "NO_PIP_ARGS_OR_SITE_PACKAGES", ")", "if", "output_file", "is", "None", ":", "sys", ".", "exit", "(", "NO_OUTFILE", ")", "# check for disallowed pip arguments", "for", "disallowed", "in", "DISALLOWED_ARGS", ":", "for", "supplied_arg", "in", "pip_args", ":", "if", "supplied_arg", "in", "disallowed", ":", "sys", ".", "exit", "(", "DISALLOWED_PIP_ARGS", ".", "format", "(", "arg", "=", "supplied_arg", ",", "reason", "=", "DISALLOWED_ARGS", "[", "disallowed", "]", ")", ")", "with", "TemporaryDirectory", "(", ")", "as", "working_path", ":", "tmp_site_packages", "=", "Path", "(", "working_path", ",", "\"site-packages\"", ")", "if", "site_packages", ":", "shutil", ".", "copytree", "(", "site_packages", ",", "tmp_site_packages", ")", "if", "pip_args", ":", "# install deps into staged site-packages", "pip", ".", "install", "(", "[", "\"--target\"", ",", "str", "(", "tmp_site_packages", ")", "]", "+", "list", "(", "pip_args", ")", ")", "# if entry_point is a console script, get the callable", "if", "entry_point", "is", "None", "and", "console_script", "is", "not", "None", ":", "try", ":", "entry_point", "=", "find_entry_point", "(", "tmp_site_packages", ",", "console_script", ")", "except", "KeyError", ":", "if", "not", "Path", "(", "tmp_site_packages", ",", "\"bin\"", ",", "console_script", ")", ".", "exists", "(", ")", ":", "sys", ".", "exit", "(", "NO_ENTRY_POINT", ".", "format", "(", "entry_point", "=", "console_script", ")", ")", "# create runtime environment metadata", "env", "=", "Environment", "(", "build_id", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", ",", "entry_point", "=", "entry_point", ",", "script", "=", "console_script", ",", "compile_pyc", "=", "compile_pyc", ",", "extend_pythonpath", "=", "extend_pythonpath", ",", ")", "Path", "(", "working_path", ",", "\"environment.json\"", ")", ".", "write_text", "(", "env", ".", "to_json", "(", ")", ")", "# create bootstrapping directory in working path", "bootstrap_target", "=", "Path", "(", "working_path", ",", "\"_bootstrap\"", ")", "bootstrap_target", ".", "mkdir", "(", "parents", "=", "True", ",", "exist_ok", "=", "True", ")", "# copy bootstrap code", "copy_bootstrap", "(", "bootstrap_target", ")", "# create the zip", "builder", ".", "create_archive", "(", "Path", "(", "working_path", ")", ",", "target", "=", "Path", "(", "output_file", ")", ".", "expanduser", "(", ")", ",", "interpreter", "=", "python", "or", "_interpreter_path", "(", ")", ",", "main", "=", "\"_bootstrap:bootstrap\"", ",", "compressed", "=", "compressed", ",", ")" ]
Shiv is a command line utility for building fully self-contained Python zipapps as outlined in PEP 441, but with all their dependencies included!
[ "Shiv", "is", "a", "command", "line", "utility", "for", "building", "fully", "self", "-", "contained", "Python", "zipapps", "as", "outlined", "in", "PEP", "441", "but", "with", "all", "their", "dependencies", "included!" ]
python
train
32.779221
jaraco/jaraco.windows
jaraco/windows/dpapi.py
https://github.com/jaraco/jaraco.windows/blob/51811efed50b46ad08daa25408a1cc806bc8d519/jaraco/windows/dpapi.py#L54-L57
def get_data(self): "Get the data for this blob" array = ctypes.POINTER(ctypes.c_char * len(self)) return ctypes.cast(self.data, array).contents.raw
[ "def", "get_data", "(", "self", ")", ":", "array", "=", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_char", "*", "len", "(", "self", ")", ")", "return", "ctypes", ".", "cast", "(", "self", ".", "data", ",", "array", ")", ".", "contents", ".", "raw" ]
Get the data for this blob
[ "Get", "the", "data", "for", "this", "blob" ]
python
train
37.75
Diviyan-Kalainathan/CausalDiscoveryToolbox
cdt/utils/Settings.py
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/utils/Settings.py#L95-L132
def get_default(self, *args, **kwargs): """Get the default parameters as defined in the Settings instance. This function proceeds to seamlessly retrieve the argument to pass through, depending on either it was overidden or not: If no argument was overridden in a function of the toolbox, the default argument will be set to ``None``, and this function will retrieve the default parameters as defined by the ``cdt.SETTINGS`` 's attributes. It has two modes of processing: 1. **kwargs for retrieving a single argument: ``get_default(argument_name=value)``. 2. *args through a list of tuples of the shape ``('argument_name', value)`` to retrieve multiple values at once. """ def retrieve_param(i): try: return self.__getattribute__(i) except AttributeError: if i == "device": return self.default_device else: return self.__getattribute__(i.upper()) if len(args) == 0: if len(kwargs) == 1 and kwargs[list(kwargs.keys())[0]] is not None: return kwargs[list(kwargs.keys())[0]] elif len(kwargs) == 1: return retrieve_param(list(kwargs.keys())[0]) else: raise TypeError("As dict is unordered, it is impossible to give" "the parameters in the correct order.") else: out = [] for i in args: if i[1] is None: out.append(retrieve_param(i[0])) else: out.append(i[1]) return out
[ "def", "get_default", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "retrieve_param", "(", "i", ")", ":", "try", ":", "return", "self", ".", "__getattribute__", "(", "i", ")", "except", "AttributeError", ":", "if", "i", "==", "\"device\"", ":", "return", "self", ".", "default_device", "else", ":", "return", "self", ".", "__getattribute__", "(", "i", ".", "upper", "(", ")", ")", "if", "len", "(", "args", ")", "==", "0", ":", "if", "len", "(", "kwargs", ")", "==", "1", "and", "kwargs", "[", "list", "(", "kwargs", ".", "keys", "(", ")", ")", "[", "0", "]", "]", "is", "not", "None", ":", "return", "kwargs", "[", "list", "(", "kwargs", ".", "keys", "(", ")", ")", "[", "0", "]", "]", "elif", "len", "(", "kwargs", ")", "==", "1", ":", "return", "retrieve_param", "(", "list", "(", "kwargs", ".", "keys", "(", ")", ")", "[", "0", "]", ")", "else", ":", "raise", "TypeError", "(", "\"As dict is unordered, it is impossible to give\"", "\"the parameters in the correct order.\"", ")", "else", ":", "out", "=", "[", "]", "for", "i", "in", "args", ":", "if", "i", "[", "1", "]", "is", "None", ":", "out", ".", "append", "(", "retrieve_param", "(", "i", "[", "0", "]", ")", ")", "else", ":", "out", ".", "append", "(", "i", "[", "1", "]", ")", "return", "out" ]
Get the default parameters as defined in the Settings instance. This function proceeds to seamlessly retrieve the argument to pass through, depending on either it was overidden or not: If no argument was overridden in a function of the toolbox, the default argument will be set to ``None``, and this function will retrieve the default parameters as defined by the ``cdt.SETTINGS`` 's attributes. It has two modes of processing: 1. **kwargs for retrieving a single argument: ``get_default(argument_name=value)``. 2. *args through a list of tuples of the shape ``('argument_name', value)`` to retrieve multiple values at once.
[ "Get", "the", "default", "parameters", "as", "defined", "in", "the", "Settings", "instance", "." ]
python
valid
43.947368
pydata/xarray
xarray/core/dataset.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/dataset.py#L3202-L3250
def apply(self, func, keep_attrs=None, args=(), **kwargs): """Apply a function over the data variables in this dataset. Parameters ---------- func : function Function which can be called in the form `func(x, *args, **kwargs)` to transform each DataArray `x` in this dataset into another DataArray. keep_attrs : bool, optional If True, the dataset's attributes (`attrs`) will be copied from the original object to the new one. If False, the new object will be returned without attributes. args : tuple, optional Positional arguments passed on to `func`. **kwargs : dict Keyword arguments passed on to `func`. Returns ------- applied : Dataset Resulting dataset from applying ``func`` over each data variable. Examples -------- >>> da = xr.DataArray(np.random.randn(2, 3)) >>> ds = xr.Dataset({'foo': da, 'bar': ('x', [-1, 2])}) >>> ds <xarray.Dataset> Dimensions: (dim_0: 2, dim_1: 3, x: 2) Dimensions without coordinates: dim_0, dim_1, x Data variables: foo (dim_0, dim_1) float64 -0.3751 -1.951 -1.945 0.2948 0.711 -0.3948 bar (x) int64 -1 2 >>> ds.apply(np.fabs) <xarray.Dataset> Dimensions: (dim_0: 2, dim_1: 3, x: 2) Dimensions without coordinates: dim_0, dim_1, x Data variables: foo (dim_0, dim_1) float64 0.3751 1.951 1.945 0.2948 0.711 0.3948 bar (x) float64 1.0 2.0 """ # noqa variables = OrderedDict( (k, maybe_wrap_array(v, func(v, *args, **kwargs))) for k, v in self.data_vars.items()) if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) attrs = self.attrs if keep_attrs else None return type(self)(variables, attrs=attrs)
[ "def", "apply", "(", "self", ",", "func", ",", "keep_attrs", "=", "None", ",", "args", "=", "(", ")", ",", "*", "*", "kwargs", ")", ":", "# noqa", "variables", "=", "OrderedDict", "(", "(", "k", ",", "maybe_wrap_array", "(", "v", ",", "func", "(", "v", ",", "*", "args", ",", "*", "*", "kwargs", ")", ")", ")", "for", "k", ",", "v", "in", "self", ".", "data_vars", ".", "items", "(", ")", ")", "if", "keep_attrs", "is", "None", ":", "keep_attrs", "=", "_get_keep_attrs", "(", "default", "=", "False", ")", "attrs", "=", "self", ".", "attrs", "if", "keep_attrs", "else", "None", "return", "type", "(", "self", ")", "(", "variables", ",", "attrs", "=", "attrs", ")" ]
Apply a function over the data variables in this dataset. Parameters ---------- func : function Function which can be called in the form `func(x, *args, **kwargs)` to transform each DataArray `x` in this dataset into another DataArray. keep_attrs : bool, optional If True, the dataset's attributes (`attrs`) will be copied from the original object to the new one. If False, the new object will be returned without attributes. args : tuple, optional Positional arguments passed on to `func`. **kwargs : dict Keyword arguments passed on to `func`. Returns ------- applied : Dataset Resulting dataset from applying ``func`` over each data variable. Examples -------- >>> da = xr.DataArray(np.random.randn(2, 3)) >>> ds = xr.Dataset({'foo': da, 'bar': ('x', [-1, 2])}) >>> ds <xarray.Dataset> Dimensions: (dim_0: 2, dim_1: 3, x: 2) Dimensions without coordinates: dim_0, dim_1, x Data variables: foo (dim_0, dim_1) float64 -0.3751 -1.951 -1.945 0.2948 0.711 -0.3948 bar (x) int64 -1 2 >>> ds.apply(np.fabs) <xarray.Dataset> Dimensions: (dim_0: 2, dim_1: 3, x: 2) Dimensions without coordinates: dim_0, dim_1, x Data variables: foo (dim_0, dim_1) float64 0.3751 1.951 1.945 0.2948 0.711 0.3948 bar (x) float64 1.0 2.0
[ "Apply", "a", "function", "over", "the", "data", "variables", "in", "this", "dataset", "." ]
python
train
39.816327
LionelR/pyair
pyair/xair.py
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/xair.py#L519-L570
def get_manuelles(self, site, code_parametre, debut, fin, court=False): """ Recupération des mesures manuelles (labo) pour un site site: numéro du site (voir fonction liste_sites_prelevement) code_parametre: code ISO du paramètre à rechercher (C6H6=V4) debut: date de début du premier prélèvement fin: date de fin du dernier prélèvement court: Renvoie un tableau au format court ou long (colonnes) """ condition = "WHERE MESLA.NOPOL='%s' " % code_parametre condition += "AND SITMETH.NSIT=%s " % site condition += "AND PRELEV.DATE_DEB>=TO_DATE('%s', 'YYYY-MM-DD') " % debut condition += "AND PRELEV.DATE_FIN<=TO_DATE('%s', 'YYYY-MM-DD') " % fin if court == False: select = """SELECT MESLA.LIBELLE AS MESURE, METH.LIBELLE AS METHODE, ANA.VALEUR AS VALEUR, MESLA.UNITE AS UNITE, ANA.CODE_QUALITE AS CODE_QUALITE, ANA.DATE_ANA AS DATE_ANALYSE, ANA.ID_LABO AS LABO, PRELEV.DATE_DEB AS DEBUT, PRELEV.DATE_FIN AS FIN, ANA.COMMENTAIRE AS COMMENTAIRE, SITE.LIBELLE AS SITE, SITE.AXE AS ADRESSE, COM.NOM_COMMUNE AS COMMUNE""" else: select = """SELECT MESLA.LIBELLE AS MESURE, ANA.VALEUR AS VALEUR, MESLA.UNITE AS UNITE, ANA.CODE_QUALITE AS CODE_QUALITE, PRELEV.DATE_DEB AS DEBUT, PRELEV.DATE_FIN AS FIN, SITE.AXE AS ADRESSE, COM.NOM_COMMUNE AS COMMUNE""" _sql = """%s FROM ANALYSE ANA INNER JOIN PRELEVEMENT PRELEV ON (ANA.CODE_PRELEV=PRELEV.CODE_PRELEV AND ANA.CODE_SMP=PRELEV.CODE_SMP) INNER JOIN MESURE_LABO MESLA ON (ANA.CODE_MES_LABO=MESLA.CODE_MES_LABO AND ANA.CODE_SMP=MESLA.CODE_SMP) INNER JOIN SITE_METH_PRELEV SITMETH ON (ANA.CODE_SMP=SITMETH.CODE_SMP) INNER JOIN METH_PRELEVEMENT METH ON (SITMETH.CODE_METH_P=METH.CODE_METH_P) INNER JOIN SITE_PRELEVEMENT SITE ON (SITE.NSIT=SITMETH.NSIT) INNER JOIN COMMUNE COM ON (COM.NINSEE=SITE.NINSEE) %s ORDER BY MESLA.NOPOL,MESLA.LIBELLE,PRELEV.DATE_DEB""" % (select, condition) return psql.read_sql(_sql, self.conn)
[ "def", "get_manuelles", "(", "self", ",", "site", ",", "code_parametre", ",", "debut", ",", "fin", ",", "court", "=", "False", ")", ":", "condition", "=", "\"WHERE MESLA.NOPOL='%s' \"", "%", "code_parametre", "condition", "+=", "\"AND SITMETH.NSIT=%s \"", "%", "site", "condition", "+=", "\"AND PRELEV.DATE_DEB>=TO_DATE('%s', 'YYYY-MM-DD') \"", "%", "debut", "condition", "+=", "\"AND PRELEV.DATE_FIN<=TO_DATE('%s', 'YYYY-MM-DD') \"", "%", "fin", "if", "court", "==", "False", ":", "select", "=", "\"\"\"SELECT\n MESLA.LIBELLE AS MESURE,\n METH.LIBELLE AS METHODE,\n ANA.VALEUR AS VALEUR,\n MESLA.UNITE AS UNITE,\n ANA.CODE_QUALITE AS CODE_QUALITE,\n ANA.DATE_ANA AS DATE_ANALYSE,\n ANA.ID_LABO AS LABO,\n PRELEV.DATE_DEB AS DEBUT,\n PRELEV.DATE_FIN AS FIN,\n ANA.COMMENTAIRE AS COMMENTAIRE,\n SITE.LIBELLE AS SITE,\n SITE.AXE AS ADRESSE,\n COM.NOM_COMMUNE AS COMMUNE\"\"\"", "else", ":", "select", "=", "\"\"\"SELECT\n MESLA.LIBELLE AS MESURE,\n ANA.VALEUR AS VALEUR,\n MESLA.UNITE AS UNITE,\n ANA.CODE_QUALITE AS CODE_QUALITE,\n PRELEV.DATE_DEB AS DEBUT,\n PRELEV.DATE_FIN AS FIN,\n SITE.AXE AS ADRESSE,\n COM.NOM_COMMUNE AS COMMUNE\"\"\"", "_sql", "=", "\"\"\"%s\n FROM ANALYSE ANA\n INNER JOIN PRELEVEMENT PRELEV ON (ANA.CODE_PRELEV=PRELEV.CODE_PRELEV AND ANA.CODE_SMP=PRELEV.CODE_SMP)\n INNER JOIN MESURE_LABO MESLA ON (ANA.CODE_MES_LABO=MESLA.CODE_MES_LABO AND ANA.CODE_SMP=MESLA.CODE_SMP)\n INNER JOIN SITE_METH_PRELEV SITMETH ON (ANA.CODE_SMP=SITMETH.CODE_SMP)\n INNER JOIN METH_PRELEVEMENT METH ON (SITMETH.CODE_METH_P=METH.CODE_METH_P)\n INNER JOIN SITE_PRELEVEMENT SITE ON (SITE.NSIT=SITMETH.NSIT)\n INNER JOIN COMMUNE COM ON (COM.NINSEE=SITE.NINSEE)\n %s\n ORDER BY MESLA.NOPOL,MESLA.LIBELLE,PRELEV.DATE_DEB\"\"\"", "%", "(", "select", ",", "condition", ")", "return", "psql", ".", "read_sql", "(", "_sql", ",", "self", ".", "conn", ")" ]
Recupération des mesures manuelles (labo) pour un site site: numéro du site (voir fonction liste_sites_prelevement) code_parametre: code ISO du paramètre à rechercher (C6H6=V4) debut: date de début du premier prélèvement fin: date de fin du dernier prélèvement court: Renvoie un tableau au format court ou long (colonnes)
[ "Recupération", "des", "mesures", "manuelles", "(", "labo", ")", "pour", "un", "site" ]
python
valid
48.769231
log2timeline/plaso
plaso/parsers/winreg_plugins/task_scheduler.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/winreg_plugins/task_scheduler.py#L73-L181
def ExtractEvents(self, parser_mediator, registry_key, **kwargs): """Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. """ dynamic_info_size_error_reported = False tasks_key = registry_key.GetSubkeyByName('Tasks') tree_key = registry_key.GetSubkeyByName('Tree') if not tasks_key or not tree_key: parser_mediator.ProduceExtractionWarning( 'Task Cache is missing a Tasks or Tree sub key.') return task_guids = {} for sub_key in tree_key.GetSubkeys(): for value_key, id_value in self._GetIdValue(sub_key): # TODO: improve this check to a regex. # The GUID is in the form {%GUID%} and stored an UTF-16 little-endian # string and should be 78 bytes in size. id_value_data_size = len(id_value.data) if id_value_data_size != 78: parser_mediator.ProduceExtractionWarning( 'unsupported Id value data size: {0:d}.'.format( id_value_data_size)) continue guid_string = id_value.GetDataAsObject() task_guids[guid_string] = value_key.name dynamic_info_map = self._GetDataTypeMap('dynamic_info_record') dynamic_info2_map = self._GetDataTypeMap('dynamic_info2_record') dynamic_info_size = dynamic_info_map.GetByteSize() dynamic_info2_size = dynamic_info2_map.GetByteSize() for sub_key in tasks_key.GetSubkeys(): dynamic_info_value = sub_key.GetValueByName('DynamicInfo') if not dynamic_info_value: continue dynamic_info_record_map = None dynamic_info_value_data_size = len(dynamic_info_value.data) if dynamic_info_value_data_size == dynamic_info_size: dynamic_info_record_map = dynamic_info_map elif dynamic_info_value_data_size == dynamic_info2_size: dynamic_info_record_map = dynamic_info2_map else: if not dynamic_info_size_error_reported: parser_mediator.ProduceExtractionWarning( 'unsupported DynamicInfo value data size: {0:d}.'.format( dynamic_info_value_data_size)) dynamic_info_size_error_reported = True continue try: dynamic_info_record = self._ReadStructureFromByteStream( dynamic_info_value.data, 0, dynamic_info_record_map) except (ValueError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning( 'unable to parse DynamicInfo record with error: {0!s}.'.format( exception)) name = task_guids.get(sub_key.name, sub_key.name) values_dict = {} values_dict['Task: {0:s}'.format(name)] = '[ID: {0:s}]'.format( sub_key.name) event_data = windows_events.WindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = registry_key.offset event_data.regvalue = values_dict event = time_events.DateTimeValuesEvent( registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) event_data = TaskCacheEventData() event_data.task_name = name event_data.task_identifier = sub_key.name last_registered_time = dynamic_info_record.last_registered_time if last_registered_time: # Note this is likely either the last registered time or # the update time. date_time = dfdatetime_filetime.Filetime(timestamp=last_registered_time) event = time_events.DateTimeValuesEvent( date_time, 'Last registered time') parser_mediator.ProduceEventWithEventData(event, event_data) launch_time = dynamic_info_record.launch_time if launch_time: # Note this is likely the launch time. date_time = dfdatetime_filetime.Filetime(timestamp=launch_time) event = time_events.DateTimeValuesEvent( date_time, 'Launch time') parser_mediator.ProduceEventWithEventData(event, event_data) unknown_time = getattr(dynamic_info_record, 'unknown_time', None) if unknown_time: date_time = dfdatetime_filetime.Filetime(timestamp=unknown_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_UNKNOWN) parser_mediator.ProduceEventWithEventData(event, event_data)
[ "def", "ExtractEvents", "(", "self", ",", "parser_mediator", ",", "registry_key", ",", "*", "*", "kwargs", ")", ":", "dynamic_info_size_error_reported", "=", "False", "tasks_key", "=", "registry_key", ".", "GetSubkeyByName", "(", "'Tasks'", ")", "tree_key", "=", "registry_key", ".", "GetSubkeyByName", "(", "'Tree'", ")", "if", "not", "tasks_key", "or", "not", "tree_key", ":", "parser_mediator", ".", "ProduceExtractionWarning", "(", "'Task Cache is missing a Tasks or Tree sub key.'", ")", "return", "task_guids", "=", "{", "}", "for", "sub_key", "in", "tree_key", ".", "GetSubkeys", "(", ")", ":", "for", "value_key", ",", "id_value", "in", "self", ".", "_GetIdValue", "(", "sub_key", ")", ":", "# TODO: improve this check to a regex.", "# The GUID is in the form {%GUID%} and stored an UTF-16 little-endian", "# string and should be 78 bytes in size.", "id_value_data_size", "=", "len", "(", "id_value", ".", "data", ")", "if", "id_value_data_size", "!=", "78", ":", "parser_mediator", ".", "ProduceExtractionWarning", "(", "'unsupported Id value data size: {0:d}.'", ".", "format", "(", "id_value_data_size", ")", ")", "continue", "guid_string", "=", "id_value", ".", "GetDataAsObject", "(", ")", "task_guids", "[", "guid_string", "]", "=", "value_key", ".", "name", "dynamic_info_map", "=", "self", ".", "_GetDataTypeMap", "(", "'dynamic_info_record'", ")", "dynamic_info2_map", "=", "self", ".", "_GetDataTypeMap", "(", "'dynamic_info2_record'", ")", "dynamic_info_size", "=", "dynamic_info_map", ".", "GetByteSize", "(", ")", "dynamic_info2_size", "=", "dynamic_info2_map", ".", "GetByteSize", "(", ")", "for", "sub_key", "in", "tasks_key", ".", "GetSubkeys", "(", ")", ":", "dynamic_info_value", "=", "sub_key", ".", "GetValueByName", "(", "'DynamicInfo'", ")", "if", "not", "dynamic_info_value", ":", "continue", "dynamic_info_record_map", "=", "None", "dynamic_info_value_data_size", "=", "len", "(", "dynamic_info_value", ".", "data", ")", "if", "dynamic_info_value_data_size", "==", "dynamic_info_size", ":", "dynamic_info_record_map", "=", "dynamic_info_map", "elif", "dynamic_info_value_data_size", "==", "dynamic_info2_size", ":", "dynamic_info_record_map", "=", "dynamic_info2_map", "else", ":", "if", "not", "dynamic_info_size_error_reported", ":", "parser_mediator", ".", "ProduceExtractionWarning", "(", "'unsupported DynamicInfo value data size: {0:d}.'", ".", "format", "(", "dynamic_info_value_data_size", ")", ")", "dynamic_info_size_error_reported", "=", "True", "continue", "try", ":", "dynamic_info_record", "=", "self", ".", "_ReadStructureFromByteStream", "(", "dynamic_info_value", ".", "data", ",", "0", ",", "dynamic_info_record_map", ")", "except", "(", "ValueError", ",", "errors", ".", "ParseError", ")", "as", "exception", ":", "parser_mediator", ".", "ProduceExtractionWarning", "(", "'unable to parse DynamicInfo record with error: {0!s}.'", ".", "format", "(", "exception", ")", ")", "name", "=", "task_guids", ".", "get", "(", "sub_key", ".", "name", ",", "sub_key", ".", "name", ")", "values_dict", "=", "{", "}", "values_dict", "[", "'Task: {0:s}'", ".", "format", "(", "name", ")", "]", "=", "'[ID: {0:s}]'", ".", "format", "(", "sub_key", ".", "name", ")", "event_data", "=", "windows_events", ".", "WindowsRegistryEventData", "(", ")", "event_data", ".", "key_path", "=", "registry_key", ".", "path", "event_data", ".", "offset", "=", "registry_key", ".", "offset", "event_data", ".", "regvalue", "=", "values_dict", "event", "=", "time_events", ".", "DateTimeValuesEvent", "(", "registry_key", ".", "last_written_time", ",", "definitions", ".", "TIME_DESCRIPTION_WRITTEN", ")", "parser_mediator", ".", "ProduceEventWithEventData", "(", "event", ",", "event_data", ")", "event_data", "=", "TaskCacheEventData", "(", ")", "event_data", ".", "task_name", "=", "name", "event_data", ".", "task_identifier", "=", "sub_key", ".", "name", "last_registered_time", "=", "dynamic_info_record", ".", "last_registered_time", "if", "last_registered_time", ":", "# Note this is likely either the last registered time or", "# the update time.", "date_time", "=", "dfdatetime_filetime", ".", "Filetime", "(", "timestamp", "=", "last_registered_time", ")", "event", "=", "time_events", ".", "DateTimeValuesEvent", "(", "date_time", ",", "'Last registered time'", ")", "parser_mediator", ".", "ProduceEventWithEventData", "(", "event", ",", "event_data", ")", "launch_time", "=", "dynamic_info_record", ".", "launch_time", "if", "launch_time", ":", "# Note this is likely the launch time.", "date_time", "=", "dfdatetime_filetime", ".", "Filetime", "(", "timestamp", "=", "launch_time", ")", "event", "=", "time_events", ".", "DateTimeValuesEvent", "(", "date_time", ",", "'Launch time'", ")", "parser_mediator", ".", "ProduceEventWithEventData", "(", "event", ",", "event_data", ")", "unknown_time", "=", "getattr", "(", "dynamic_info_record", ",", "'unknown_time'", ",", "None", ")", "if", "unknown_time", ":", "date_time", "=", "dfdatetime_filetime", ".", "Filetime", "(", "timestamp", "=", "unknown_time", ")", "event", "=", "time_events", ".", "DateTimeValuesEvent", "(", "date_time", ",", "definitions", ".", "TIME_DESCRIPTION_UNKNOWN", ")", "parser_mediator", ".", "ProduceEventWithEventData", "(", "event", ",", "event_data", ")" ]
Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
[ "Extracts", "events", "from", "a", "Windows", "Registry", "key", "." ]
python
train
40.541284
bird-house/twitcher
twitcher/__init__.py
https://github.com/bird-house/twitcher/blob/e6a36b3aeeacf44eec537434b0fb87c09ab54b5f/twitcher/__init__.py#L4-L24
def main(global_config, **settings): """ This function returns a Pyramid WSGI application. """ from pyramid.config import Configurator config = Configurator(settings=settings) # include twitcher components config.include('twitcher.config') config.include('twitcher.frontpage') config.include('twitcher.rpcinterface') config.include('twitcher.owsproxy') # tweens/middleware # TODO: maybe add tween for exception handling or use unknown_failure view config.include('twitcher.tweens') config.scan() return config.make_wsgi_app()
[ "def", "main", "(", "global_config", ",", "*", "*", "settings", ")", ":", "from", "pyramid", ".", "config", "import", "Configurator", "config", "=", "Configurator", "(", "settings", "=", "settings", ")", "# include twitcher components", "config", ".", "include", "(", "'twitcher.config'", ")", "config", ".", "include", "(", "'twitcher.frontpage'", ")", "config", ".", "include", "(", "'twitcher.rpcinterface'", ")", "config", ".", "include", "(", "'twitcher.owsproxy'", ")", "# tweens/middleware", "# TODO: maybe add tween for exception handling or use unknown_failure view", "config", ".", "include", "(", "'twitcher.tweens'", ")", "config", ".", "scan", "(", ")", "return", "config", ".", "make_wsgi_app", "(", ")" ]
This function returns a Pyramid WSGI application.
[ "This", "function", "returns", "a", "Pyramid", "WSGI", "application", "." ]
python
valid
27.142857
rhgrant10/Groupy
groupy/api/bots.py
https://github.com/rhgrant10/Groupy/blob/ffd8cac57586fa1c218e3b4bfaa531142c3be766/groupy/api/bots.py#L88-L97
def post(self, text, attachments=None): """Post a message as the bot. :param str text: the text of the message :param attachments: a list of attachments :type attachments: :class:`list` :return: ``True`` if successful :rtype: bool """ return self.manager.post(self.bot_id, text, attachments)
[ "def", "post", "(", "self", ",", "text", ",", "attachments", "=", "None", ")", ":", "return", "self", ".", "manager", ".", "post", "(", "self", ".", "bot_id", ",", "text", ",", "attachments", ")" ]
Post a message as the bot. :param str text: the text of the message :param attachments: a list of attachments :type attachments: :class:`list` :return: ``True`` if successful :rtype: bool
[ "Post", "a", "message", "as", "the", "bot", "." ]
python
train
34.7
Brazelton-Lab/bio_utils
bio_utils/blast_tools/filter_b6_evalue.py
https://github.com/Brazelton-Lab/bio_utils/blob/5a7ddf13ee0bf4baaaeb6b2b99e01bf74aa132b7/bio_utils/blast_tools/filter_b6_evalue.py#L73-L77
def main(): """Open B6/M8 file, filter entries by E-Value, and write said entries""" for entry in b6_evalue_filter(args.b6, args.e_value): args.output.write(entry.write())
[ "def", "main", "(", ")", ":", "for", "entry", "in", "b6_evalue_filter", "(", "args", ".", "b6", ",", "args", ".", "e_value", ")", ":", "args", ".", "output", ".", "write", "(", "entry", ".", "write", "(", ")", ")" ]
Open B6/M8 file, filter entries by E-Value, and write said entries
[ "Open", "B6", "/", "M8", "file", "filter", "entries", "by", "E", "-", "Value", "and", "write", "said", "entries" ]
python
train
36.8
pypa/pipenv
pipenv/environment.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/environment.py#L481-L489
def is_installed(self, pkgname): """Given a package name, returns whether it is installed in the environment :param str pkgname: The name of a package :return: Whether the supplied package is installed in the environment :rtype: bool """ return any(d for d in self.get_distributions() if d.project_name == pkgname)
[ "def", "is_installed", "(", "self", ",", "pkgname", ")", ":", "return", "any", "(", "d", "for", "d", "in", "self", ".", "get_distributions", "(", ")", "if", "d", ".", "project_name", "==", "pkgname", ")" ]
Given a package name, returns whether it is installed in the environment :param str pkgname: The name of a package :return: Whether the supplied package is installed in the environment :rtype: bool
[ "Given", "a", "package", "name", "returns", "whether", "it", "is", "installed", "in", "the", "environment" ]
python
train
39.555556
materialsproject/pymatgen
pymatgen/core/spectrum.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/spectrum.py#L70-L87
def normalize(self, mode="max", value=1): """ Normalize the spectrum with respect to the sum of intensity Args: mode (str): Normalization mode. Supported modes are "max" (set the max y value to value, e.g., in XRD patterns), "sum" (set the sum of y to a value, i.e., like a probability density). value (float): Value to normalize to. Defaults to 1. """ if mode.lower() == "sum": factor = np.sum(self.y, axis=0) elif mode.lower() == "max": factor = np.max(self.y, axis=0) else: raise ValueError("Unsupported normalization mode %s!" % mode) self.y /= factor / value
[ "def", "normalize", "(", "self", ",", "mode", "=", "\"max\"", ",", "value", "=", "1", ")", ":", "if", "mode", ".", "lower", "(", ")", "==", "\"sum\"", ":", "factor", "=", "np", ".", "sum", "(", "self", ".", "y", ",", "axis", "=", "0", ")", "elif", "mode", ".", "lower", "(", ")", "==", "\"max\"", ":", "factor", "=", "np", ".", "max", "(", "self", ".", "y", ",", "axis", "=", "0", ")", "else", ":", "raise", "ValueError", "(", "\"Unsupported normalization mode %s!\"", "%", "mode", ")", "self", ".", "y", "/=", "factor", "/", "value" ]
Normalize the spectrum with respect to the sum of intensity Args: mode (str): Normalization mode. Supported modes are "max" (set the max y value to value, e.g., in XRD patterns), "sum" (set the sum of y to a value, i.e., like a probability density). value (float): Value to normalize to. Defaults to 1.
[ "Normalize", "the", "spectrum", "with", "respect", "to", "the", "sum", "of", "intensity" ]
python
train
39.111111
poppy-project/pypot
pypot/vrep/remoteApiBindings/vrep.py
https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/remoteApiBindings/vrep.py#L857-L872
def simxCopyPasteObjects(clientID, objectHandles, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' c_objectHandles = (ct.c_int*len(objectHandles))(*objectHandles) c_objectHandles = ct.cast(c_objectHandles,ct.POINTER(ct.c_int)) # IronPython needs this newObjectCount = ct.c_int() newObjectHandles = ct.POINTER(ct.c_int)() ret = c_CopyPasteObjects(clientID, c_objectHandles, len(objectHandles), ct.byref(newObjectHandles), ct.byref(newObjectCount), operationMode) newobj = [] if ret == 0: for i in range(newObjectCount.value): newobj.append(newObjectHandles[i]) return ret, newobj
[ "def", "simxCopyPasteObjects", "(", "clientID", ",", "objectHandles", ",", "operationMode", ")", ":", "c_objectHandles", "=", "(", "ct", ".", "c_int", "*", "len", "(", "objectHandles", ")", ")", "(", "*", "objectHandles", ")", "c_objectHandles", "=", "ct", ".", "cast", "(", "c_objectHandles", ",", "ct", ".", "POINTER", "(", "ct", ".", "c_int", ")", ")", "# IronPython needs this", "newObjectCount", "=", "ct", ".", "c_int", "(", ")", "newObjectHandles", "=", "ct", ".", "POINTER", "(", "ct", ".", "c_int", ")", "(", ")", "ret", "=", "c_CopyPasteObjects", "(", "clientID", ",", "c_objectHandles", ",", "len", "(", "objectHandles", ")", ",", "ct", ".", "byref", "(", "newObjectHandles", ")", ",", "ct", ".", "byref", "(", "newObjectCount", ")", ",", "operationMode", ")", "newobj", "=", "[", "]", "if", "ret", "==", "0", ":", "for", "i", "in", "range", "(", "newObjectCount", ".", "value", ")", ":", "newobj", ".", "append", "(", "newObjectHandles", "[", "i", "]", ")", "return", "ret", ",", "newobj" ]
Please have a look at the function description/documentation in the V-REP user manual
[ "Please", "have", "a", "look", "at", "the", "function", "description", "/", "documentation", "in", "the", "V", "-", "REP", "user", "manual" ]
python
train
43.3125
pycontribs/pyrax
pyrax/clouddns.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/clouddns.py#L1209-L1213
def list_subdomains(self, domain, limit=None, offset=None): """ Returns a list of all subdomains for the specified domain. """ return domain.list_subdomains(limit=limit, offset=offset)
[ "def", "list_subdomains", "(", "self", ",", "domain", ",", "limit", "=", "None", ",", "offset", "=", "None", ")", ":", "return", "domain", ".", "list_subdomains", "(", "limit", "=", "limit", ",", "offset", "=", "offset", ")" ]
Returns a list of all subdomains for the specified domain.
[ "Returns", "a", "list", "of", "all", "subdomains", "for", "the", "specified", "domain", "." ]
python
train
42.4
tensorpack/tensorpack
tensorpack/utils/viz.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/utils/viz.py#L265-L322
def dump_dataflow_images(df, index=0, batched=True, number=1000, output_dir=None, scale=1, resize=None, viz=None, flipRGB=False): """ Dump or visualize images of a :class:`DataFlow`. Args: df (DataFlow): the DataFlow. index (int): the index of the image component. batched (bool): whether the component contains batched images (NHW or NHWC) or not (HW or HWC). number (int): how many datapoint to take from the DataFlow. output_dir (str): output directory to save images, default to not save. scale (float): scale the value, usually either 1 or 255. resize (tuple or None): tuple of (h, w) to resize the images to. viz (tuple or None): tuple of (h, w) determining the grid size to use with :func:`gen_stack_patches` for visualization. No visualization will happen by default. flipRGB (bool): apply a RGB<->BGR conversion or not. """ if output_dir: mkdir_p(output_dir) if viz is not None: viz = shape2d(viz) vizsize = viz[0] * viz[1] if resize is not None: resize = tuple(shape2d(resize)) vizlist = [] df.reset_state() cnt = 0 while True: for dp in df: if not batched: imgbatch = [dp[index]] else: imgbatch = dp[index] for img in imgbatch: cnt += 1 if cnt == number: return if scale != 1: img = img * scale if resize is not None: img = cv2.resize(img, resize) if flipRGB: img = img[:, :, ::-1] if output_dir: fname = os.path.join(output_dir, '{:03d}.jpg'.format(cnt)) cv2.imwrite(fname, img) if viz is not None: vizlist.append(img) if viz is not None and len(vizlist) >= vizsize: stack_patches( vizlist[:vizsize], nr_row=viz[0], nr_col=viz[1], viz=True) vizlist = vizlist[vizsize:]
[ "def", "dump_dataflow_images", "(", "df", ",", "index", "=", "0", ",", "batched", "=", "True", ",", "number", "=", "1000", ",", "output_dir", "=", "None", ",", "scale", "=", "1", ",", "resize", "=", "None", ",", "viz", "=", "None", ",", "flipRGB", "=", "False", ")", ":", "if", "output_dir", ":", "mkdir_p", "(", "output_dir", ")", "if", "viz", "is", "not", "None", ":", "viz", "=", "shape2d", "(", "viz", ")", "vizsize", "=", "viz", "[", "0", "]", "*", "viz", "[", "1", "]", "if", "resize", "is", "not", "None", ":", "resize", "=", "tuple", "(", "shape2d", "(", "resize", ")", ")", "vizlist", "=", "[", "]", "df", ".", "reset_state", "(", ")", "cnt", "=", "0", "while", "True", ":", "for", "dp", "in", "df", ":", "if", "not", "batched", ":", "imgbatch", "=", "[", "dp", "[", "index", "]", "]", "else", ":", "imgbatch", "=", "dp", "[", "index", "]", "for", "img", "in", "imgbatch", ":", "cnt", "+=", "1", "if", "cnt", "==", "number", ":", "return", "if", "scale", "!=", "1", ":", "img", "=", "img", "*", "scale", "if", "resize", "is", "not", "None", ":", "img", "=", "cv2", ".", "resize", "(", "img", ",", "resize", ")", "if", "flipRGB", ":", "img", "=", "img", "[", ":", ",", ":", ",", ":", ":", "-", "1", "]", "if", "output_dir", ":", "fname", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "'{:03d}.jpg'", ".", "format", "(", "cnt", ")", ")", "cv2", ".", "imwrite", "(", "fname", ",", "img", ")", "if", "viz", "is", "not", "None", ":", "vizlist", ".", "append", "(", "img", ")", "if", "viz", "is", "not", "None", "and", "len", "(", "vizlist", ")", ">=", "vizsize", ":", "stack_patches", "(", "vizlist", "[", ":", "vizsize", "]", ",", "nr_row", "=", "viz", "[", "0", "]", ",", "nr_col", "=", "viz", "[", "1", "]", ",", "viz", "=", "True", ")", "vizlist", "=", "vizlist", "[", "vizsize", ":", "]" ]
Dump or visualize images of a :class:`DataFlow`. Args: df (DataFlow): the DataFlow. index (int): the index of the image component. batched (bool): whether the component contains batched images (NHW or NHWC) or not (HW or HWC). number (int): how many datapoint to take from the DataFlow. output_dir (str): output directory to save images, default to not save. scale (float): scale the value, usually either 1 or 255. resize (tuple or None): tuple of (h, w) to resize the images to. viz (tuple or None): tuple of (h, w) determining the grid size to use with :func:`gen_stack_patches` for visualization. No visualization will happen by default. flipRGB (bool): apply a RGB<->BGR conversion or not.
[ "Dump", "or", "visualize", "images", "of", "a", ":", "class", ":", "DataFlow", "." ]
python
train
37.862069
PyCQA/pydocstyle
src/pydocstyle/violations.py
https://github.com/PyCQA/pydocstyle/blob/2549847f9efad225789f931e83dfe782418ca13e/src/pydocstyle/violations.py#L153-L168
def to_rst(cls) -> str: """Output the registry as reStructuredText, for documentation.""" sep_line = '+' + 6 * '-' + '+' + '-' * 71 + '+\n' blank_line = '|' + 78 * ' ' + '|\n' table = '' for group in cls.groups: table += sep_line table += blank_line table += '|' + '**{}**'.format(group.name).center(78) + '|\n' table += blank_line for error in group.errors: table += sep_line table += ('|' + error.code.center(6) + '| ' + error.short_desc.ljust(70) + '|\n') table += sep_line return table
[ "def", "to_rst", "(", "cls", ")", "->", "str", ":", "sep_line", "=", "'+'", "+", "6", "*", "'-'", "+", "'+'", "+", "'-'", "*", "71", "+", "'+\\n'", "blank_line", "=", "'|'", "+", "78", "*", "' '", "+", "'|\\n'", "table", "=", "''", "for", "group", "in", "cls", ".", "groups", ":", "table", "+=", "sep_line", "table", "+=", "blank_line", "table", "+=", "'|'", "+", "'**{}**'", ".", "format", "(", "group", ".", "name", ")", ".", "center", "(", "78", ")", "+", "'|\\n'", "table", "+=", "blank_line", "for", "error", "in", "group", ".", "errors", ":", "table", "+=", "sep_line", "table", "+=", "(", "'|'", "+", "error", ".", "code", ".", "center", "(", "6", ")", "+", "'| '", "+", "error", ".", "short_desc", ".", "ljust", "(", "70", ")", "+", "'|\\n'", ")", "table", "+=", "sep_line", "return", "table" ]
Output the registry as reStructuredText, for documentation.
[ "Output", "the", "registry", "as", "reStructuredText", "for", "documentation", "." ]
python
train
40.5
unixsurfer/anycast_healthchecker
anycast_healthchecker/servicecheck.py
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/servicecheck.py#L88-L123
def _run_check(self): """Execute a check command. Returns: True if the exit code of the command is 0 otherwise False. """ cmd = shlex.split(self.config['check_cmd']) self.log.info("running %s", ' '.join(cmd)) proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) start_time = time.time() try: outs, errs = proc.communicate(timeout=self.config['check_timeout']) except subprocess.TimeoutExpired: self.log.error("check timed out") if proc.poll() is None: try: proc.kill() except PermissionError: self.log.warning("failed to kill check due to adequate " "access rights, check could be running " "under another user(root) via sudo") return False else: msg = "check duration {t:.3f}ms".format( t=(time.time() - start_time) * 1000) self.log.info(msg) if proc.returncode != 0: self.log.info("stderr from the check %s", errs) self.log.info("stdout from the check %s", outs) return proc.returncode == 0
[ "def", "_run_check", "(", "self", ")", ":", "cmd", "=", "shlex", ".", "split", "(", "self", ".", "config", "[", "'check_cmd'", "]", ")", "self", ".", "log", ".", "info", "(", "\"running %s\"", ",", "' '", ".", "join", "(", "cmd", ")", ")", "proc", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "start_time", "=", "time", ".", "time", "(", ")", "try", ":", "outs", ",", "errs", "=", "proc", ".", "communicate", "(", "timeout", "=", "self", ".", "config", "[", "'check_timeout'", "]", ")", "except", "subprocess", ".", "TimeoutExpired", ":", "self", ".", "log", ".", "error", "(", "\"check timed out\"", ")", "if", "proc", ".", "poll", "(", ")", "is", "None", ":", "try", ":", "proc", ".", "kill", "(", ")", "except", "PermissionError", ":", "self", ".", "log", ".", "warning", "(", "\"failed to kill check due to adequate \"", "\"access rights, check could be running \"", "\"under another user(root) via sudo\"", ")", "return", "False", "else", ":", "msg", "=", "\"check duration {t:.3f}ms\"", ".", "format", "(", "t", "=", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", "*", "1000", ")", "self", ".", "log", ".", "info", "(", "msg", ")", "if", "proc", ".", "returncode", "!=", "0", ":", "self", ".", "log", ".", "info", "(", "\"stderr from the check %s\"", ",", "errs", ")", "self", ".", "log", ".", "info", "(", "\"stdout from the check %s\"", ",", "outs", ")", "return", "proc", ".", "returncode", "==", "0" ]
Execute a check command. Returns: True if the exit code of the command is 0 otherwise False.
[ "Execute", "a", "check", "command", "." ]
python
train
36.138889
SecurityInnovation/PGPy
pgpy/pgp.py
https://github.com/SecurityInnovation/PGPy/blob/f1c3d68e32c334f5aa14c34580925e97f17f4fde/pgpy/pgp.py#L1287-L1295
def is_unlocked(self): """``False`` if this is a private key that is protected with a passphrase and has not yet been unlocked, otherwise ``True``""" if self.is_public: return True if not self.is_protected: return True return self._key.unlocked
[ "def", "is_unlocked", "(", "self", ")", ":", "if", "self", ".", "is_public", ":", "return", "True", "if", "not", "self", ".", "is_protected", ":", "return", "True", "return", "self", ".", "_key", ".", "unlocked" ]
``False`` if this is a private key that is protected with a passphrase and has not yet been unlocked, otherwise ``True``
[ "False", "if", "this", "is", "a", "private", "key", "that", "is", "protected", "with", "a", "passphrase", "and", "has", "not", "yet", "been", "unlocked", "otherwise", "True" ]
python
train
32.666667
bearyinnovative/bearychat.py
bearychat/rtm_client_service.py
https://github.com/bearyinnovative/bearychat.py/blob/6c7af2d215c2ff7135bb5af66ca333d0ea1089fd/bearychat/rtm_client_service.py#L33-L48
def members(self): """Gets members of current team Returns: list of User Throws: RTMServiceError when request failed """ resp = self._rtm_client.get('v1/current_team.members?all=true') if resp.is_fail(): raise RTMServiceError( 'Failed to get members of current team', resp ) return resp.data['result']
[ "def", "members", "(", "self", ")", ":", "resp", "=", "self", ".", "_rtm_client", ".", "get", "(", "'v1/current_team.members?all=true'", ")", "if", "resp", ".", "is_fail", "(", ")", ":", "raise", "RTMServiceError", "(", "'Failed to get members of current team'", ",", "resp", ")", "return", "resp", ".", "data", "[", "'result'", "]" ]
Gets members of current team Returns: list of User Throws: RTMServiceError when request failed
[ "Gets", "members", "of", "current", "team" ]
python
train
26.5
wbond/oscrypto
oscrypto/_win/symmetric.py
https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/_win/symmetric.py#L738-L796
def _encrypt(cipher, key, data, iv, padding): """ Encrypts plaintext :param cipher: A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key", "rc2", "rc4" :param key: The encryption key - a byte string 5-16 bytes long :param data: The plaintext - a byte string :param iv: The initialization vector - a byte string - unused for RC4 :param padding: Boolean, if padding should be used - unused for RC4 :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the ciphertext """ if not isinstance(key, byte_cls): raise TypeError(pretty_message( ''' key must be a byte string, not %s ''', type_name(key) )) if not isinstance(data, byte_cls): raise TypeError(pretty_message( ''' data must be a byte string, not %s ''', type_name(data) )) if cipher != 'rc4' and not isinstance(iv, byte_cls): raise TypeError(pretty_message( ''' iv must be a byte string, not %s ''', type_name(iv) )) if cipher != 'rc4' and not padding: raise ValueError('padding must be specified') if _backend == 'winlegacy': return _advapi32_encrypt(cipher, key, data, iv, padding) return _bcrypt_encrypt(cipher, key, data, iv, padding)
[ "def", "_encrypt", "(", "cipher", ",", "key", ",", "data", ",", "iv", ",", "padding", ")", ":", "if", "not", "isinstance", "(", "key", ",", "byte_cls", ")", ":", "raise", "TypeError", "(", "pretty_message", "(", "'''\n key must be a byte string, not %s\n '''", ",", "type_name", "(", "key", ")", ")", ")", "if", "not", "isinstance", "(", "data", ",", "byte_cls", ")", ":", "raise", "TypeError", "(", "pretty_message", "(", "'''\n data must be a byte string, not %s\n '''", ",", "type_name", "(", "data", ")", ")", ")", "if", "cipher", "!=", "'rc4'", "and", "not", "isinstance", "(", "iv", ",", "byte_cls", ")", ":", "raise", "TypeError", "(", "pretty_message", "(", "'''\n iv must be a byte string, not %s\n '''", ",", "type_name", "(", "iv", ")", ")", ")", "if", "cipher", "!=", "'rc4'", "and", "not", "padding", ":", "raise", "ValueError", "(", "'padding must be specified'", ")", "if", "_backend", "==", "'winlegacy'", ":", "return", "_advapi32_encrypt", "(", "cipher", ",", "key", ",", "data", ",", "iv", ",", "padding", ")", "return", "_bcrypt_encrypt", "(", "cipher", ",", "key", ",", "data", ",", "iv", ",", "padding", ")" ]
Encrypts plaintext :param cipher: A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key", "rc2", "rc4" :param key: The encryption key - a byte string 5-16 bytes long :param data: The plaintext - a byte string :param iv: The initialization vector - a byte string - unused for RC4 :param padding: Boolean, if padding should be used - unused for RC4 :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the ciphertext
[ "Encrypts", "plaintext" ]
python
valid
26.779661
wummel/linkchecker
linkcheck/colorama.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/colorama.py#L93-L96
def SetConsoleTextAttribute(stream_id, attrs): """Set a console text attribute.""" handle = handles[stream_id] return windll.kernel32.SetConsoleTextAttribute(handle, attrs)
[ "def", "SetConsoleTextAttribute", "(", "stream_id", ",", "attrs", ")", ":", "handle", "=", "handles", "[", "stream_id", "]", "return", "windll", ".", "kernel32", ".", "SetConsoleTextAttribute", "(", "handle", ",", "attrs", ")" ]
Set a console text attribute.
[ "Set", "a", "console", "text", "attribute", "." ]
python
train
45.25
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/utils/_process_posix.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/utils/_process_posix.py#L119-L187
def system(self, cmd): """Execute a command in a subshell. Parameters ---------- cmd : str A command to be executed in the system shell. Returns ------- int : child's exitstatus """ # Get likely encoding for the output. enc = DEFAULT_ENCODING # Patterns to match on the output, for pexpect. We read input and # allow either a short timeout or EOF patterns = [pexpect.TIMEOUT, pexpect.EOF] # the index of the EOF pattern in the list. # even though we know it's 1, this call means we don't have to worry if # we change the above list, and forget to change this value: EOF_index = patterns.index(pexpect.EOF) # The size of the output stored so far in the process output buffer. # Since pexpect only appends to this buffer, each time we print we # record how far we've printed, so that next time we only print *new* # content from the buffer. out_size = 0 try: # Since we're not really searching the buffer for text patterns, we # can set pexpect's search window to be tiny and it won't matter. # We only search for the 'patterns' timeout or EOF, which aren't in # the text itself. #child = pexpect.spawn(pcmd, searchwindowsize=1) if hasattr(pexpect, 'spawnb'): child = pexpect.spawnb(self.sh, args=['-c', cmd]) # Pexpect-U else: child = pexpect.spawn(self.sh, args=['-c', cmd]) # Vanilla Pexpect flush = sys.stdout.flush while True: # res is the index of the pattern that caused the match, so we # know whether we've finished (if we matched EOF) or not res_idx = child.expect_list(patterns, self.read_timeout) print(child.before[out_size:].decode(enc, 'replace'), end='') flush() if res_idx==EOF_index: break # Update the pointer to what we've already printed out_size = len(child.before) except KeyboardInterrupt: # We need to send ^C to the process. The ascii code for '^C' is 3 # (the character is known as ETX for 'End of Text', see # curses.ascii.ETX). child.sendline(chr(3)) # Read and print any more output the program might produce on its # way out. try: out_size = len(child.before) child.expect_list(patterns, self.terminate_timeout) print(child.before[out_size:].decode(enc, 'replace'), end='') sys.stdout.flush() except KeyboardInterrupt: # Impatient users tend to type it multiple times pass finally: # Ensure the subprocess really is terminated child.terminate(force=True) # add isalive check, to ensure exitstatus is set: child.isalive() return child.exitstatus
[ "def", "system", "(", "self", ",", "cmd", ")", ":", "# Get likely encoding for the output.", "enc", "=", "DEFAULT_ENCODING", "# Patterns to match on the output, for pexpect. We read input and", "# allow either a short timeout or EOF", "patterns", "=", "[", "pexpect", ".", "TIMEOUT", ",", "pexpect", ".", "EOF", "]", "# the index of the EOF pattern in the list.", "# even though we know it's 1, this call means we don't have to worry if", "# we change the above list, and forget to change this value:", "EOF_index", "=", "patterns", ".", "index", "(", "pexpect", ".", "EOF", ")", "# The size of the output stored so far in the process output buffer.", "# Since pexpect only appends to this buffer, each time we print we", "# record how far we've printed, so that next time we only print *new*", "# content from the buffer.", "out_size", "=", "0", "try", ":", "# Since we're not really searching the buffer for text patterns, we", "# can set pexpect's search window to be tiny and it won't matter.", "# We only search for the 'patterns' timeout or EOF, which aren't in", "# the text itself.", "#child = pexpect.spawn(pcmd, searchwindowsize=1)", "if", "hasattr", "(", "pexpect", ",", "'spawnb'", ")", ":", "child", "=", "pexpect", ".", "spawnb", "(", "self", ".", "sh", ",", "args", "=", "[", "'-c'", ",", "cmd", "]", ")", "# Pexpect-U", "else", ":", "child", "=", "pexpect", ".", "spawn", "(", "self", ".", "sh", ",", "args", "=", "[", "'-c'", ",", "cmd", "]", ")", "# Vanilla Pexpect", "flush", "=", "sys", ".", "stdout", ".", "flush", "while", "True", ":", "# res is the index of the pattern that caused the match, so we", "# know whether we've finished (if we matched EOF) or not", "res_idx", "=", "child", ".", "expect_list", "(", "patterns", ",", "self", ".", "read_timeout", ")", "print", "(", "child", ".", "before", "[", "out_size", ":", "]", ".", "decode", "(", "enc", ",", "'replace'", ")", ",", "end", "=", "''", ")", "flush", "(", ")", "if", "res_idx", "==", "EOF_index", ":", "break", "# Update the pointer to what we've already printed", "out_size", "=", "len", "(", "child", ".", "before", ")", "except", "KeyboardInterrupt", ":", "# We need to send ^C to the process. The ascii code for '^C' is 3", "# (the character is known as ETX for 'End of Text', see", "# curses.ascii.ETX).", "child", ".", "sendline", "(", "chr", "(", "3", ")", ")", "# Read and print any more output the program might produce on its", "# way out.", "try", ":", "out_size", "=", "len", "(", "child", ".", "before", ")", "child", ".", "expect_list", "(", "patterns", ",", "self", ".", "terminate_timeout", ")", "print", "(", "child", ".", "before", "[", "out_size", ":", "]", ".", "decode", "(", "enc", ",", "'replace'", ")", ",", "end", "=", "''", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "except", "KeyboardInterrupt", ":", "# Impatient users tend to type it multiple times", "pass", "finally", ":", "# Ensure the subprocess really is terminated", "child", ".", "terminate", "(", "force", "=", "True", ")", "# add isalive check, to ensure exitstatus is set:", "child", ".", "isalive", "(", ")", "return", "child", ".", "exitstatus" ]
Execute a command in a subshell. Parameters ---------- cmd : str A command to be executed in the system shell. Returns ------- int : child's exitstatus
[ "Execute", "a", "command", "in", "a", "subshell", "." ]
python
test
44.478261
Hackerfleet/hfos
hfos/ui/auth.py
https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/ui/auth.py#L190-L275
def _handle_login(self, event): """Manual password based login""" # TODO: Refactor to simplify self.log("Auth request for ", event.username, 'client:', event.clientuuid) # TODO: Define the requirements for secure passwords etc. # They're also required in the Enrol module..! if (len(event.username) < 1) or (len(event.password) < 5): self.log("Illegal username or password received, login cancelled", lvl=warn) self._fail(event, 'Password or username too short') return client_config = None try: user_account = objectmodels['user'].find_one({ 'name': event.username }) # self.log("Account: %s" % user_account._fields, lvl=debug) if user_account is None: raise AuthenticationError except Exception as e: self.log("No userobject due to error: ", e, type(e), lvl=error) self._fail(event) return self.log("User found.", lvl=debug) if user_account.active is False: self.log("Account deactivated.") self._fail(event, 'Account deactivated.') return if not std_hash(event.password, self.salt) == user_account.passhash: self.log("Password was wrong!", lvl=warn) self._fail(event) return self.log("Passhash matches, checking client and profile.", lvl=debug) requested_client_uuid = event.requestedclientuuid if requested_client_uuid is not None: client_config = objectmodels['client'].find_one({ 'uuid': requested_client_uuid }) if client_config: self.log("Checking client configuration permissions", lvl=debug) # TODO: Shareable client configurations? if client_config.owner != user_account.uuid: client_config = None self.log("Unauthorized client configuration " "requested", lvl=warn) else: self.log("Unknown client configuration requested: ", requested_client_uuid, event.__dict__, lvl=warn) if not client_config: self.log("Creating new default client configuration") # Either no configuration was found or not requested # -> Create a new client configuration uuid = event.clientuuid if event.clientuuid is not None else str(uuid4()) client_config = objectmodels['client']({'uuid': uuid}) client_config.name = std_human_uid(kind='place') client_config.description = "New client configuration from " + user_account.name client_config.owner = user_account.uuid # TODO: Get client configuration storage done right, this one is too simple client_config.save() user_profile = self._get_profile(user_account) self._login(event, user_account, user_profile, client_config) self.log("Done with Login request", lvl=debug)
[ "def", "_handle_login", "(", "self", ",", "event", ")", ":", "# TODO: Refactor to simplify", "self", ".", "log", "(", "\"Auth request for \"", ",", "event", ".", "username", ",", "'client:'", ",", "event", ".", "clientuuid", ")", "# TODO: Define the requirements for secure passwords etc.", "# They're also required in the Enrol module..!", "if", "(", "len", "(", "event", ".", "username", ")", "<", "1", ")", "or", "(", "len", "(", "event", ".", "password", ")", "<", "5", ")", ":", "self", ".", "log", "(", "\"Illegal username or password received, login cancelled\"", ",", "lvl", "=", "warn", ")", "self", ".", "_fail", "(", "event", ",", "'Password or username too short'", ")", "return", "client_config", "=", "None", "try", ":", "user_account", "=", "objectmodels", "[", "'user'", "]", ".", "find_one", "(", "{", "'name'", ":", "event", ".", "username", "}", ")", "# self.log(\"Account: %s\" % user_account._fields, lvl=debug)", "if", "user_account", "is", "None", ":", "raise", "AuthenticationError", "except", "Exception", "as", "e", ":", "self", ".", "log", "(", "\"No userobject due to error: \"", ",", "e", ",", "type", "(", "e", ")", ",", "lvl", "=", "error", ")", "self", ".", "_fail", "(", "event", ")", "return", "self", ".", "log", "(", "\"User found.\"", ",", "lvl", "=", "debug", ")", "if", "user_account", ".", "active", "is", "False", ":", "self", ".", "log", "(", "\"Account deactivated.\"", ")", "self", ".", "_fail", "(", "event", ",", "'Account deactivated.'", ")", "return", "if", "not", "std_hash", "(", "event", ".", "password", ",", "self", ".", "salt", ")", "==", "user_account", ".", "passhash", ":", "self", ".", "log", "(", "\"Password was wrong!\"", ",", "lvl", "=", "warn", ")", "self", ".", "_fail", "(", "event", ")", "return", "self", ".", "log", "(", "\"Passhash matches, checking client and profile.\"", ",", "lvl", "=", "debug", ")", "requested_client_uuid", "=", "event", ".", "requestedclientuuid", "if", "requested_client_uuid", "is", "not", "None", ":", "client_config", "=", "objectmodels", "[", "'client'", "]", ".", "find_one", "(", "{", "'uuid'", ":", "requested_client_uuid", "}", ")", "if", "client_config", ":", "self", ".", "log", "(", "\"Checking client configuration permissions\"", ",", "lvl", "=", "debug", ")", "# TODO: Shareable client configurations?", "if", "client_config", ".", "owner", "!=", "user_account", ".", "uuid", ":", "client_config", "=", "None", "self", ".", "log", "(", "\"Unauthorized client configuration \"", "\"requested\"", ",", "lvl", "=", "warn", ")", "else", ":", "self", ".", "log", "(", "\"Unknown client configuration requested: \"", ",", "requested_client_uuid", ",", "event", ".", "__dict__", ",", "lvl", "=", "warn", ")", "if", "not", "client_config", ":", "self", ".", "log", "(", "\"Creating new default client configuration\"", ")", "# Either no configuration was found or not requested", "# -> Create a new client configuration", "uuid", "=", "event", ".", "clientuuid", "if", "event", ".", "clientuuid", "is", "not", "None", "else", "str", "(", "uuid4", "(", ")", ")", "client_config", "=", "objectmodels", "[", "'client'", "]", "(", "{", "'uuid'", ":", "uuid", "}", ")", "client_config", ".", "name", "=", "std_human_uid", "(", "kind", "=", "'place'", ")", "client_config", ".", "description", "=", "\"New client configuration from \"", "+", "user_account", ".", "name", "client_config", ".", "owner", "=", "user_account", ".", "uuid", "# TODO: Get client configuration storage done right, this one is too simple", "client_config", ".", "save", "(", ")", "user_profile", "=", "self", ".", "_get_profile", "(", "user_account", ")", "self", ".", "_login", "(", "event", ",", "user_account", ",", "user_profile", ",", "client_config", ")", "self", ".", "log", "(", "\"Done with Login request\"", ",", "lvl", "=", "debug", ")" ]
Manual password based login
[ "Manual", "password", "based", "login" ]
python
train
36.5
piotr-rusin/spam-lists
spam_lists/host_collections.py
https://github.com/piotr-rusin/spam-lists/blob/fd616e8761b28f3eaa503fee5e45f7748e8f88f2/spam_lists/host_collections.py#L64-L74
def add(self, host_value): """Add the given value to the collection. :param host: an ip address or a hostname :raises InvalidHostError: raised when the given value is not a valid ip address nor a hostname """ host_obj = self._host_factory(host_value) if self._get_match(host_obj) is not None: return self._add_new(host_obj)
[ "def", "add", "(", "self", ",", "host_value", ")", ":", "host_obj", "=", "self", ".", "_host_factory", "(", "host_value", ")", "if", "self", ".", "_get_match", "(", "host_obj", ")", "is", "not", "None", ":", "return", "self", ".", "_add_new", "(", "host_obj", ")" ]
Add the given value to the collection. :param host: an ip address or a hostname :raises InvalidHostError: raised when the given value is not a valid ip address nor a hostname
[ "Add", "the", "given", "value", "to", "the", "collection", "." ]
python
train
35.454545
noxdafox/clipspy
clips/classes.py
https://github.com/noxdafox/clipspy/blob/b22d71a6da821c1715d8fa00d7d75cabc09ed364/clips/classes.py#L632-L634
def instance_class(self): """Instance class.""" return Class(self._env, lib.EnvGetInstanceClass(self._env, self._ist))
[ "def", "instance_class", "(", "self", ")", ":", "return", "Class", "(", "self", ".", "_env", ",", "lib", ".", "EnvGetInstanceClass", "(", "self", ".", "_env", ",", "self", ".", "_ist", ")", ")" ]
Instance class.
[ "Instance", "class", "." ]
python
train
44
saghul/evergreen
evergreen/futures/_base.py
https://github.com/saghul/evergreen/blob/22f22f45892f397c23c3e09e6ea1ad4c00b3add8/evergreen/futures/_base.py#L180-L223
def as_completed(fs, timeout=None): """An iterator over the given futures that yields each as it completes. Args: fs: The sequence of Futures (possibly created by different Executors) to iterate over. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. Returns: An iterator that yields the given Futures as they complete (finished or cancelled). Raises: TimeoutError: If the entire result iterator could not be generated before the given timeout. """ with _AcquireFutures(fs): finished = set(f for f in fs if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) pending = set(fs) - finished waiter = _create_and_install_waiters(fs, _AS_COMPLETED) timer = Timeout(timeout) timer.start() try: for future in finished: yield future while pending: waiter.event.wait() with waiter.lock: finished = waiter.finished_futures waiter.finished_futures = [] waiter.event.clear() for future in finished: yield future pending.remove(future) except Timeout as e: if timer is not e: raise raise TimeoutError('%d (of %d) futures unfinished' % (len(pending), len(fs))) finally: timer.cancel() for f in fs: f._waiters.remove(waiter)
[ "def", "as_completed", "(", "fs", ",", "timeout", "=", "None", ")", ":", "with", "_AcquireFutures", "(", "fs", ")", ":", "finished", "=", "set", "(", "f", "for", "f", "in", "fs", "if", "f", ".", "_state", "in", "[", "CANCELLED_AND_NOTIFIED", ",", "FINISHED", "]", ")", "pending", "=", "set", "(", "fs", ")", "-", "finished", "waiter", "=", "_create_and_install_waiters", "(", "fs", ",", "_AS_COMPLETED", ")", "timer", "=", "Timeout", "(", "timeout", ")", "timer", ".", "start", "(", ")", "try", ":", "for", "future", "in", "finished", ":", "yield", "future", "while", "pending", ":", "waiter", ".", "event", ".", "wait", "(", ")", "with", "waiter", ".", "lock", ":", "finished", "=", "waiter", ".", "finished_futures", "waiter", ".", "finished_futures", "=", "[", "]", "waiter", ".", "event", ".", "clear", "(", ")", "for", "future", "in", "finished", ":", "yield", "future", "pending", ".", "remove", "(", "future", ")", "except", "Timeout", "as", "e", ":", "if", "timer", "is", "not", "e", ":", "raise", "raise", "TimeoutError", "(", "'%d (of %d) futures unfinished'", "%", "(", "len", "(", "pending", ")", ",", "len", "(", "fs", ")", ")", ")", "finally", ":", "timer", ".", "cancel", "(", ")", "for", "f", "in", "fs", ":", "f", ".", "_waiters", ".", "remove", "(", "waiter", ")" ]
An iterator over the given futures that yields each as it completes. Args: fs: The sequence of Futures (possibly created by different Executors) to iterate over. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. Returns: An iterator that yields the given Futures as they complete (finished or cancelled). Raises: TimeoutError: If the entire result iterator could not be generated before the given timeout.
[ "An", "iterator", "over", "the", "given", "futures", "that", "yields", "each", "as", "it", "completes", "." ]
python
train
33.136364
dshean/pygeotools
pygeotools/lib/warplib.py
https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/warplib.py#L416-L515
def warp_multi(src_ds_list, res='first', extent='intersection', t_srs='first', r='cubic', warptype=memwarp, outdir=None, dst_ndv=None, verbose=True, debug=False): """This parses and checks inputs, then calls desired warp function with appropriate arguments for each input ds Parameters ---------- src_ds_list : list of gdal.Dataset objects List of original datasets to be warped res : arbitrary type Desired output resolution extent : arbitrary type Desired output extent t_srs : arbitrary type Desired output spatial reference r : str Desired resampling algorithm warptype : function Desired warp type (write to memory or disk) outdir : str Desired output directory (for disk warp) dst_ndv : float Desired output NoData Value verbose : bool Print warp parameters debug : bool Print extra information for debugging purposes Returns ------- out_ds_list : list of gdal.Dataset objects List of warped datasets (either in memory or on disk) """ #Type cast arguments as str for evaluation #Avoid path errors #res = str(res) #extent = str(extent) #t_srs = str(t_srs) #Parse the input t_srs = parse_srs(t_srs, src_ds_list) res = parse_res(res, src_ds_list, t_srs) extent = parse_extent(extent, src_ds_list, t_srs) if verbose: print("\nWarping all inputs to the following:") print("Resolution: %s" % res) print("Extent: %s" % str(extent)) print("Projection: '%s'" % t_srs.ExportToProj4()) print("Resampling alg: %s\n" % r) out_ds_list = [] for i, ds in enumerate(src_ds_list): fn_list = ds.GetFileList() fn = '[memory]' if fn_list is not None: fn = fn_list[0] if verbose: print("%i of %i: %s" % (i+1, len(src_ds_list), fn)) #If input srs are different, must warp ds_t_srs = geolib.get_ds_srs(ds) srscheck = bool(t_srs.IsSame(ds_t_srs)) if debug: print('\n%s' % ds_t_srs.ExportToWkt()) print('%s\n' % t_srs.ExportToWkt()) print('srscheck: %s\n' % srscheck) rescheck = False extentcheck = False #if srscheck: #Extract info from ds to see if warp is necessary ds_res = geolib.get_res(ds, square=True)[0] ds_extent = geolib.ds_extent(ds) #Note: these checks necessary to handle rounding and precision issues #Round extent and res to nearest mm precision = 1E-3 #Or if t_srs has units of degrees if ds_t_srs.IsGeographic(): precision = 1E-8 rescheck = (res is None) or geolib.res_compare(res, ds_res, precision=precision) extentcheck = (extent is None) or geolib.extent_compare(extent, ds_extent, precision=precision) if debug: print('\n%s, %s\n' % (ds_res, res)) print('%s' % ds_extent) print('%s\n' % extent) print('rescheck: %s' % rescheck) print('extentcheck: %s\n' % extentcheck) #If the ds passes all three, it is identical to desired output, short circuit if rescheck and extentcheck and srscheck: out_ds_list.append(ds) else: dst_ds = warptype(ds, res, extent, t_srs, r, outdir, dst_ndv=dst_ndv, verbose=verbose) out_ds_list.append(dst_ds) return out_ds_list
[ "def", "warp_multi", "(", "src_ds_list", ",", "res", "=", "'first'", ",", "extent", "=", "'intersection'", ",", "t_srs", "=", "'first'", ",", "r", "=", "'cubic'", ",", "warptype", "=", "memwarp", ",", "outdir", "=", "None", ",", "dst_ndv", "=", "None", ",", "verbose", "=", "True", ",", "debug", "=", "False", ")", ":", "#Type cast arguments as str for evaluation", "#Avoid path errors", "#res = str(res)", "#extent = str(extent)", "#t_srs = str(t_srs)", "#Parse the input", "t_srs", "=", "parse_srs", "(", "t_srs", ",", "src_ds_list", ")", "res", "=", "parse_res", "(", "res", ",", "src_ds_list", ",", "t_srs", ")", "extent", "=", "parse_extent", "(", "extent", ",", "src_ds_list", ",", "t_srs", ")", "if", "verbose", ":", "print", "(", "\"\\nWarping all inputs to the following:\"", ")", "print", "(", "\"Resolution: %s\"", "%", "res", ")", "print", "(", "\"Extent: %s\"", "%", "str", "(", "extent", ")", ")", "print", "(", "\"Projection: '%s'\"", "%", "t_srs", ".", "ExportToProj4", "(", ")", ")", "print", "(", "\"Resampling alg: %s\\n\"", "%", "r", ")", "out_ds_list", "=", "[", "]", "for", "i", ",", "ds", "in", "enumerate", "(", "src_ds_list", ")", ":", "fn_list", "=", "ds", ".", "GetFileList", "(", ")", "fn", "=", "'[memory]'", "if", "fn_list", "is", "not", "None", ":", "fn", "=", "fn_list", "[", "0", "]", "if", "verbose", ":", "print", "(", "\"%i of %i: %s\"", "%", "(", "i", "+", "1", ",", "len", "(", "src_ds_list", ")", ",", "fn", ")", ")", "#If input srs are different, must warp", "ds_t_srs", "=", "geolib", ".", "get_ds_srs", "(", "ds", ")", "srscheck", "=", "bool", "(", "t_srs", ".", "IsSame", "(", "ds_t_srs", ")", ")", "if", "debug", ":", "print", "(", "'\\n%s'", "%", "ds_t_srs", ".", "ExportToWkt", "(", ")", ")", "print", "(", "'%s\\n'", "%", "t_srs", ".", "ExportToWkt", "(", ")", ")", "print", "(", "'srscheck: %s\\n'", "%", "srscheck", ")", "rescheck", "=", "False", "extentcheck", "=", "False", "#if srscheck:", "#Extract info from ds to see if warp is necessary", "ds_res", "=", "geolib", ".", "get_res", "(", "ds", ",", "square", "=", "True", ")", "[", "0", "]", "ds_extent", "=", "geolib", ".", "ds_extent", "(", "ds", ")", "#Note: these checks necessary to handle rounding and precision issues", "#Round extent and res to nearest mm", "precision", "=", "1E-3", "#Or if t_srs has units of degrees", "if", "ds_t_srs", ".", "IsGeographic", "(", ")", ":", "precision", "=", "1E-8", "rescheck", "=", "(", "res", "is", "None", ")", "or", "geolib", ".", "res_compare", "(", "res", ",", "ds_res", ",", "precision", "=", "precision", ")", "extentcheck", "=", "(", "extent", "is", "None", ")", "or", "geolib", ".", "extent_compare", "(", "extent", ",", "ds_extent", ",", "precision", "=", "precision", ")", "if", "debug", ":", "print", "(", "'\\n%s, %s\\n'", "%", "(", "ds_res", ",", "res", ")", ")", "print", "(", "'%s'", "%", "ds_extent", ")", "print", "(", "'%s\\n'", "%", "extent", ")", "print", "(", "'rescheck: %s'", "%", "rescheck", ")", "print", "(", "'extentcheck: %s\\n'", "%", "extentcheck", ")", "#If the ds passes all three, it is identical to desired output, short circuit", "if", "rescheck", "and", "extentcheck", "and", "srscheck", ":", "out_ds_list", ".", "append", "(", "ds", ")", "else", ":", "dst_ds", "=", "warptype", "(", "ds", ",", "res", ",", "extent", ",", "t_srs", ",", "r", ",", "outdir", ",", "dst_ndv", "=", "dst_ndv", ",", "verbose", "=", "verbose", ")", "out_ds_list", ".", "append", "(", "dst_ds", ")", "return", "out_ds_list" ]
This parses and checks inputs, then calls desired warp function with appropriate arguments for each input ds Parameters ---------- src_ds_list : list of gdal.Dataset objects List of original datasets to be warped res : arbitrary type Desired output resolution extent : arbitrary type Desired output extent t_srs : arbitrary type Desired output spatial reference r : str Desired resampling algorithm warptype : function Desired warp type (write to memory or disk) outdir : str Desired output directory (for disk warp) dst_ndv : float Desired output NoData Value verbose : bool Print warp parameters debug : bool Print extra information for debugging purposes Returns ------- out_ds_list : list of gdal.Dataset objects List of warped datasets (either in memory or on disk)
[ "This", "parses", "and", "checks", "inputs", "then", "calls", "desired", "warp", "function", "with", "appropriate", "arguments", "for", "each", "input", "ds", "Parameters", "----------", "src_ds_list", ":", "list", "of", "gdal", ".", "Dataset", "objects", "List", "of", "original", "datasets", "to", "be", "warped", "res", ":", "arbitrary", "type", "Desired", "output", "resolution", "extent", ":", "arbitrary", "type", "Desired", "output", "extent", "t_srs", ":", "arbitrary", "type", "Desired", "output", "spatial", "reference", "r", ":", "str", "Desired", "resampling", "algorithm", "warptype", ":", "function", "Desired", "warp", "type", "(", "write", "to", "memory", "or", "disk", ")", "outdir", ":", "str", "Desired", "output", "directory", "(", "for", "disk", "warp", ")", "dst_ndv", ":", "float", "Desired", "output", "NoData", "Value", "verbose", ":", "bool", "Print", "warp", "parameters", "debug", ":", "bool", "Print", "extra", "information", "for", "debugging", "purposes" ]
python
train
34.01
hellosign/hellosign-python-sdk
hellosign_sdk/hsclient.py
https://github.com/hellosign/hellosign-python-sdk/blob/4325a29ad5766380a214eac3914511f62f7ecba4/hellosign_sdk/hsclient.py#L1001-L1074
def create_unclaimed_draft(self, test_mode=False, files=None, file_urls=None, draft_type=None, subject=None, message=None, signers=None, cc_email_addresses=None, signing_redirect_url=None, form_fields_per_document=None, metadata=None, use_preexisting_fields=False, allow_decline=False): ''' Creates a new Draft that can be claimed using the claim URL Creates a new Draft that can be claimed using the claim URL. The first authenticated user to access the URL will claim the Draft and will be shown either the "Sign and send" or the "Request signature" page with the Draft loaded. Subsequent access to the claim URL will result in a 404. If the type is "send_document" then only the file parameter is required. If the type is "request_signature", then the identities of the signers and optionally the location of signing elements on the page are also required. Args: test_mode (bool, optional): Whether this is a test, the signature request created from this draft will not be legally binding if set to True. Defaults to False. files (list of str): The uploaded file(s) to send for signature file_urls (list of str): URLs of the file for HelloSign to download to send for signature. Use either `files` or `file_urls` draft_type (str): The type of unclaimed draft to create. Use "send_document" to create a claimable file, and "request_signature" for a claimable signature request. If the type is "request_signature" then signers name and email_address are not optional. subject (str, optional): The subject in the email that will be sent to the signers message (str, optional): The custom message in the email that will be sent to the signers signers (list of dict): A list of signers, which each has the following attributes: name (str): The name of the signer email_address (str): Email address of the signer order (str, optional): The order the signer is required to sign in cc_email_addresses (list of str, optional): A list of email addresses that should be CC'd signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign. form_fields_per_document (str, optional): The fields that should appear on the document, expressed as a serialized JSON data structure which is a list of lists of the form fields. Please refer to the API reference of HelloSign for more details (https://www.hellosign.com/api/reference#SignatureRequest) metadata (dict, optional): Metadata to associate with the draft use_preexisting_fields (bool): Whether to use preexisting PDF fields allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0. Returns: An UnclaimedDraft object ''' self._check_required_fields({ 'draft_type': draft_type }, [{ "files": files, "file_urls": file_urls }] ) params = { 'test_mode': test_mode, 'files': files, 'file_urls': file_urls, 'draft_type': draft_type, 'subject': subject, 'message': message, 'signing_redirect_url': signing_redirect_url, 'signers': signers, 'cc_email_addresses': cc_email_addresses, 'form_fields_per_document': form_fields_per_document, 'metadata': metadata, 'use_preexisting_fields': use_preexisting_fields, 'allow_decline': allow_decline } return self._create_unclaimed_draft(**params)
[ "def", "create_unclaimed_draft", "(", "self", ",", "test_mode", "=", "False", ",", "files", "=", "None", ",", "file_urls", "=", "None", ",", "draft_type", "=", "None", ",", "subject", "=", "None", ",", "message", "=", "None", ",", "signers", "=", "None", ",", "cc_email_addresses", "=", "None", ",", "signing_redirect_url", "=", "None", ",", "form_fields_per_document", "=", "None", ",", "metadata", "=", "None", ",", "use_preexisting_fields", "=", "False", ",", "allow_decline", "=", "False", ")", ":", "self", ".", "_check_required_fields", "(", "{", "'draft_type'", ":", "draft_type", "}", ",", "[", "{", "\"files\"", ":", "files", ",", "\"file_urls\"", ":", "file_urls", "}", "]", ")", "params", "=", "{", "'test_mode'", ":", "test_mode", ",", "'files'", ":", "files", ",", "'file_urls'", ":", "file_urls", ",", "'draft_type'", ":", "draft_type", ",", "'subject'", ":", "subject", ",", "'message'", ":", "message", ",", "'signing_redirect_url'", ":", "signing_redirect_url", ",", "'signers'", ":", "signers", ",", "'cc_email_addresses'", ":", "cc_email_addresses", ",", "'form_fields_per_document'", ":", "form_fields_per_document", ",", "'metadata'", ":", "metadata", ",", "'use_preexisting_fields'", ":", "use_preexisting_fields", ",", "'allow_decline'", ":", "allow_decline", "}", "return", "self", ".", "_create_unclaimed_draft", "(", "*", "*", "params", ")" ]
Creates a new Draft that can be claimed using the claim URL Creates a new Draft that can be claimed using the claim URL. The first authenticated user to access the URL will claim the Draft and will be shown either the "Sign and send" or the "Request signature" page with the Draft loaded. Subsequent access to the claim URL will result in a 404. If the type is "send_document" then only the file parameter is required. If the type is "request_signature", then the identities of the signers and optionally the location of signing elements on the page are also required. Args: test_mode (bool, optional): Whether this is a test, the signature request created from this draft will not be legally binding if set to True. Defaults to False. files (list of str): The uploaded file(s) to send for signature file_urls (list of str): URLs of the file for HelloSign to download to send for signature. Use either `files` or `file_urls` draft_type (str): The type of unclaimed draft to create. Use "send_document" to create a claimable file, and "request_signature" for a claimable signature request. If the type is "request_signature" then signers name and email_address are not optional. subject (str, optional): The subject in the email that will be sent to the signers message (str, optional): The custom message in the email that will be sent to the signers signers (list of dict): A list of signers, which each has the following attributes: name (str): The name of the signer email_address (str): Email address of the signer order (str, optional): The order the signer is required to sign in cc_email_addresses (list of str, optional): A list of email addresses that should be CC'd signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign. form_fields_per_document (str, optional): The fields that should appear on the document, expressed as a serialized JSON data structure which is a list of lists of the form fields. Please refer to the API reference of HelloSign for more details (https://www.hellosign.com/api/reference#SignatureRequest) metadata (dict, optional): Metadata to associate with the draft use_preexisting_fields (bool): Whether to use preexisting PDF fields allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0. Returns: An UnclaimedDraft object
[ "Creates", "a", "new", "Draft", "that", "can", "be", "claimed", "using", "the", "claim", "URL" ]
python
train
53.810811
rackerlabs/rackspace-python-neutronclient
neutronclient/v2_0/client.py
https://github.com/rackerlabs/rackspace-python-neutronclient/blob/5a5009a8fe078e3aa1d582176669f1b28ab26bef/neutronclient/v2_0/client.py#L583-L585
def update_ext(self, path, id, body=None): """Client extension hook for update.""" return self.put(path % id, body=body)
[ "def", "update_ext", "(", "self", ",", "path", ",", "id", ",", "body", "=", "None", ")", ":", "return", "self", ".", "put", "(", "path", "%", "id", ",", "body", "=", "body", ")" ]
Client extension hook for update.
[ "Client", "extension", "hook", "for", "update", "." ]
python
train
44.666667
theelous3/asks
asks/request_object.py
https://github.com/theelous3/asks/blob/ea522ea971ecb031d488a6301dc2718516cadcd6/asks/request_object.py#L462-L509
async def _multipart(self, files_dict): ''' Forms multipart requests from a dict with name, path k/vs. Name does not have to be the actual file name. Args: files_dict (dict): A dict of `filename:filepath`s, to be sent as multipart files. Returns: multip_pkg (str): The strings representation of the content body, multipart formatted. ''' boundary = bytes(_BOUNDARY, self.encoding) hder_format = 'Content-Disposition: form-data; name="{}"' hder_format_io = '; filename="{}"' multip_pkg = b'' num_of_parts = len(files_dict) for index, kv in enumerate(files_dict.items(), start=1): multip_pkg += (b'--' + boundary + b'\r\n') k, v = kv try: pkg_body = await self._file_manager(v) multip_pkg += bytes(hder_format.format(k) + hder_format_io.format(basename(v)), self.encoding) mime_type = mimetypes.guess_type(basename(v)) if not mime_type[1]: mime_type = 'application/octet-stream' else: mime_type = '/'.join(mime_type) multip_pkg += bytes('; Content-Type: ' + mime_type, self.encoding) multip_pkg += b'\r\n'*2 + pkg_body except (TypeError, FileNotFoundError): pkg_body = bytes(v, self.encoding) + b'\r\n' multip_pkg += bytes(hder_format.format(k) + '\r\n'*2, self.encoding) multip_pkg += pkg_body if index == num_of_parts: multip_pkg += b'--' + boundary + b'--\r\n' return multip_pkg
[ "async", "def", "_multipart", "(", "self", ",", "files_dict", ")", ":", "boundary", "=", "bytes", "(", "_BOUNDARY", ",", "self", ".", "encoding", ")", "hder_format", "=", "'Content-Disposition: form-data; name=\"{}\"'", "hder_format_io", "=", "'; filename=\"{}\"'", "multip_pkg", "=", "b''", "num_of_parts", "=", "len", "(", "files_dict", ")", "for", "index", ",", "kv", "in", "enumerate", "(", "files_dict", ".", "items", "(", ")", ",", "start", "=", "1", ")", ":", "multip_pkg", "+=", "(", "b'--'", "+", "boundary", "+", "b'\\r\\n'", ")", "k", ",", "v", "=", "kv", "try", ":", "pkg_body", "=", "await", "self", ".", "_file_manager", "(", "v", ")", "multip_pkg", "+=", "bytes", "(", "hder_format", ".", "format", "(", "k", ")", "+", "hder_format_io", ".", "format", "(", "basename", "(", "v", ")", ")", ",", "self", ".", "encoding", ")", "mime_type", "=", "mimetypes", ".", "guess_type", "(", "basename", "(", "v", ")", ")", "if", "not", "mime_type", "[", "1", "]", ":", "mime_type", "=", "'application/octet-stream'", "else", ":", "mime_type", "=", "'/'", ".", "join", "(", "mime_type", ")", "multip_pkg", "+=", "bytes", "(", "'; Content-Type: '", "+", "mime_type", ",", "self", ".", "encoding", ")", "multip_pkg", "+=", "b'\\r\\n'", "*", "2", "+", "pkg_body", "except", "(", "TypeError", ",", "FileNotFoundError", ")", ":", "pkg_body", "=", "bytes", "(", "v", ",", "self", ".", "encoding", ")", "+", "b'\\r\\n'", "multip_pkg", "+=", "bytes", "(", "hder_format", ".", "format", "(", "k", ")", "+", "'\\r\\n'", "*", "2", ",", "self", ".", "encoding", ")", "multip_pkg", "+=", "pkg_body", "if", "index", "==", "num_of_parts", ":", "multip_pkg", "+=", "b'--'", "+", "boundary", "+", "b'--\\r\\n'", "return", "multip_pkg" ]
Forms multipart requests from a dict with name, path k/vs. Name does not have to be the actual file name. Args: files_dict (dict): A dict of `filename:filepath`s, to be sent as multipart files. Returns: multip_pkg (str): The strings representation of the content body, multipart formatted.
[ "Forms", "multipart", "requests", "from", "a", "dict", "with", "name", "path", "k", "/", "vs", ".", "Name", "does", "not", "have", "to", "be", "the", "actual", "file", "name", "." ]
python
train
37.75
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py#L1361-L1376
def create_dcnm_out_part(self, tenant_id, fw_dict, is_fw_virt=False): """Create the DCNM OUT partition and update the result. """ res = fw_const.DCNM_OUT_PART_CREATE_SUCCESS tenant_name = fw_dict.get('tenant_name') ret = True try: self._create_out_partition(tenant_id, tenant_name) except Exception as exc: LOG.error("Create of Out Partition failed for tenant " "%(tenant)s ,Exception %(exc)s", {'tenant': tenant_id, 'exc': str(exc)}) res = fw_const.DCNM_OUT_PART_CREATE_FAIL ret = False self.update_fw_db_result(tenant_id, dcnm_status=res) LOG.info("Out partition created") return ret
[ "def", "create_dcnm_out_part", "(", "self", ",", "tenant_id", ",", "fw_dict", ",", "is_fw_virt", "=", "False", ")", ":", "res", "=", "fw_const", ".", "DCNM_OUT_PART_CREATE_SUCCESS", "tenant_name", "=", "fw_dict", ".", "get", "(", "'tenant_name'", ")", "ret", "=", "True", "try", ":", "self", ".", "_create_out_partition", "(", "tenant_id", ",", "tenant_name", ")", "except", "Exception", "as", "exc", ":", "LOG", ".", "error", "(", "\"Create of Out Partition failed for tenant \"", "\"%(tenant)s ,Exception %(exc)s\"", ",", "{", "'tenant'", ":", "tenant_id", ",", "'exc'", ":", "str", "(", "exc", ")", "}", ")", "res", "=", "fw_const", ".", "DCNM_OUT_PART_CREATE_FAIL", "ret", "=", "False", "self", ".", "update_fw_db_result", "(", "tenant_id", ",", "dcnm_status", "=", "res", ")", "LOG", ".", "info", "(", "\"Out partition created\"", ")", "return", "ret" ]
Create the DCNM OUT partition and update the result.
[ "Create", "the", "DCNM", "OUT", "partition", "and", "update", "the", "result", "." ]
python
train
45.875
materialsproject/pymatgen
pymatgen/core/surface.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/surface.py#L614-L637
def symmetrically_add_atom(self, specie, point, coords_are_cartesian=False): """ Class method for adding a site at a specified point in a slab. Will add the corresponding site on the other side of the slab to maintain equivalent surfaces. Arg: specie (str): The specie to add point (coords): The coordinate of the site in the slab to add. coords_are_cartesian (bool): Is the point in cartesian coordinates Returns: (Slab): The modified slab """ # For now just use the species of the # surface atom as the element to add # Get the index of the corresponding site at the bottom point2 = self.get_symmetric_site(point, cartesian=coords_are_cartesian) self.append(specie, point, coords_are_cartesian=coords_are_cartesian) self.append(specie, point2, coords_are_cartesian=coords_are_cartesian)
[ "def", "symmetrically_add_atom", "(", "self", ",", "specie", ",", "point", ",", "coords_are_cartesian", "=", "False", ")", ":", "# For now just use the species of the", "# surface atom as the element to add", "# Get the index of the corresponding site at the bottom", "point2", "=", "self", ".", "get_symmetric_site", "(", "point", ",", "cartesian", "=", "coords_are_cartesian", ")", "self", ".", "append", "(", "specie", ",", "point", ",", "coords_are_cartesian", "=", "coords_are_cartesian", ")", "self", ".", "append", "(", "specie", ",", "point2", ",", "coords_are_cartesian", "=", "coords_are_cartesian", ")" ]
Class method for adding a site at a specified point in a slab. Will add the corresponding site on the other side of the slab to maintain equivalent surfaces. Arg: specie (str): The specie to add point (coords): The coordinate of the site in the slab to add. coords_are_cartesian (bool): Is the point in cartesian coordinates Returns: (Slab): The modified slab
[ "Class", "method", "for", "adding", "a", "site", "at", "a", "specified", "point", "in", "a", "slab", ".", "Will", "add", "the", "corresponding", "site", "on", "the", "other", "side", "of", "the", "slab", "to", "maintain", "equivalent", "surfaces", "." ]
python
train
38.791667
JarryShaw/PyPCAPKit
src/protocols/internet/hip.py
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/protocols/internet/hip.py#L380-L418
def _read_para_r1_counter(self, code, cbit, clen, *, desc, length, version): """Read HIP R1_COUNTER parameter. Structure of HIP R1_COUNTER parameter [RFC 5201][RFC 7401]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type | Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Reserved, 4 bytes | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | R1 generation counter, 8 bytes | | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 ri_counter.type Parameter Type 1 15 ri_counter.critical Critical Bit 2 16 ri_counter.length Length of Contents 4 32 - Reserved 8 64 ri_counter.count Generation of Valid Puzzles """ if clen != 12: raise ProtocolError(f'HIPv{version}: [Parano {code}] invalid format') if code == 128 and version != 1: raise ProtocolError(f'HIPv{version}: [Parano {code}] invalid parameter') _resv = self._read_fileng(4) _genc = self._read_unpack(8) r1_counter = dict( type=desc, critical=cbit, length=clen, count=_genc, ) return r1_counter
[ "def", "_read_para_r1_counter", "(", "self", ",", "code", ",", "cbit", ",", "clen", ",", "*", ",", "desc", ",", "length", ",", "version", ")", ":", "if", "clen", "!=", "12", ":", "raise", "ProtocolError", "(", "f'HIPv{version}: [Parano {code}] invalid format'", ")", "if", "code", "==", "128", "and", "version", "!=", "1", ":", "raise", "ProtocolError", "(", "f'HIPv{version}: [Parano {code}] invalid parameter'", ")", "_resv", "=", "self", ".", "_read_fileng", "(", "4", ")", "_genc", "=", "self", ".", "_read_unpack", "(", "8", ")", "r1_counter", "=", "dict", "(", "type", "=", "desc", ",", "critical", "=", "cbit", ",", "length", "=", "clen", ",", "count", "=", "_genc", ",", ")", "return", "r1_counter" ]
Read HIP R1_COUNTER parameter. Structure of HIP R1_COUNTER parameter [RFC 5201][RFC 7401]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type | Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Reserved, 4 bytes | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | R1 generation counter, 8 bytes | | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 ri_counter.type Parameter Type 1 15 ri_counter.critical Critical Bit 2 16 ri_counter.length Length of Contents 4 32 - Reserved 8 64 ri_counter.count Generation of Valid Puzzles
[ "Read", "HIP", "R1_COUNTER", "parameter", "." ]
python
train
47.333333
mfcloud/python-zvm-sdk
zvmsdk/smtclient.py
https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/zvmsdk/smtclient.py#L261-L334
def _parse_vswitch_inspect_data(self, rd_list): """ Parse the Virtual_Network_Vswitch_Query_Byte_Stats data to get inspect data. """ def _parse_value(data_list, idx, keyword, offset): return idx + offset, data_list[idx].rpartition(keyword)[2].strip() vsw_dict = {} with zvmutils.expect_invalid_resp_data(): # vswitch count idx = 0 idx, vsw_count = _parse_value(rd_list, idx, 'vswitch count:', 2) vsw_dict['vswitch_count'] = int(vsw_count) # deal with each vswitch data vsw_dict['vswitches'] = [] for i in range(vsw_dict['vswitch_count']): vsw_data = {} # skip vswitch number idx += 1 # vswitch name idx, vsw_name = _parse_value(rd_list, idx, 'vswitch name:', 1) vsw_data['vswitch_name'] = vsw_name # uplink count idx, up_count = _parse_value(rd_list, idx, 'uplink count:', 1) # skip uplink data idx += int(up_count) * 9 # skip bridge data idx += 8 # nic count vsw_data['nics'] = [] idx, nic_count = _parse_value(rd_list, idx, 'nic count:', 1) nic_count = int(nic_count) for j in range(nic_count): nic_data = {} idx, nic_id = _parse_value(rd_list, idx, 'nic_id:', 1) userid, toss, vdev = nic_id.partition(' ') nic_data['userid'] = userid nic_data['vdev'] = vdev idx, nic_data['nic_fr_rx'] = _parse_value(rd_list, idx, 'nic_fr_rx:', 1 ) idx, nic_data['nic_fr_rx_dsc'] = _parse_value(rd_list, idx, 'nic_fr_rx_dsc:', 1 ) idx, nic_data['nic_fr_rx_err'] = _parse_value(rd_list, idx, 'nic_fr_rx_err:', 1 ) idx, nic_data['nic_fr_tx'] = _parse_value(rd_list, idx, 'nic_fr_tx:', 1 ) idx, nic_data['nic_fr_tx_dsc'] = _parse_value(rd_list, idx, 'nic_fr_tx_dsc:', 1 ) idx, nic_data['nic_fr_tx_err'] = _parse_value(rd_list, idx, 'nic_fr_tx_err:', 1 ) idx, nic_data['nic_rx'] = _parse_value(rd_list, idx, 'nic_rx:', 1 ) idx, nic_data['nic_tx'] = _parse_value(rd_list, idx, 'nic_tx:', 1 ) vsw_data['nics'].append(nic_data) # vlan count idx, vlan_count = _parse_value(rd_list, idx, 'vlan count:', 1) # skip vlan data idx += int(vlan_count) * 3 # skip the blank line idx += 1 vsw_dict['vswitches'].append(vsw_data) return vsw_dict
[ "def", "_parse_vswitch_inspect_data", "(", "self", ",", "rd_list", ")", ":", "def", "_parse_value", "(", "data_list", ",", "idx", ",", "keyword", ",", "offset", ")", ":", "return", "idx", "+", "offset", ",", "data_list", "[", "idx", "]", ".", "rpartition", "(", "keyword", ")", "[", "2", "]", ".", "strip", "(", ")", "vsw_dict", "=", "{", "}", "with", "zvmutils", ".", "expect_invalid_resp_data", "(", ")", ":", "# vswitch count", "idx", "=", "0", "idx", ",", "vsw_count", "=", "_parse_value", "(", "rd_list", ",", "idx", ",", "'vswitch count:'", ",", "2", ")", "vsw_dict", "[", "'vswitch_count'", "]", "=", "int", "(", "vsw_count", ")", "# deal with each vswitch data", "vsw_dict", "[", "'vswitches'", "]", "=", "[", "]", "for", "i", "in", "range", "(", "vsw_dict", "[", "'vswitch_count'", "]", ")", ":", "vsw_data", "=", "{", "}", "# skip vswitch number", "idx", "+=", "1", "# vswitch name", "idx", ",", "vsw_name", "=", "_parse_value", "(", "rd_list", ",", "idx", ",", "'vswitch name:'", ",", "1", ")", "vsw_data", "[", "'vswitch_name'", "]", "=", "vsw_name", "# uplink count", "idx", ",", "up_count", "=", "_parse_value", "(", "rd_list", ",", "idx", ",", "'uplink count:'", ",", "1", ")", "# skip uplink data", "idx", "+=", "int", "(", "up_count", ")", "*", "9", "# skip bridge data", "idx", "+=", "8", "# nic count", "vsw_data", "[", "'nics'", "]", "=", "[", "]", "idx", ",", "nic_count", "=", "_parse_value", "(", "rd_list", ",", "idx", ",", "'nic count:'", ",", "1", ")", "nic_count", "=", "int", "(", "nic_count", ")", "for", "j", "in", "range", "(", "nic_count", ")", ":", "nic_data", "=", "{", "}", "idx", ",", "nic_id", "=", "_parse_value", "(", "rd_list", ",", "idx", ",", "'nic_id:'", ",", "1", ")", "userid", ",", "toss", ",", "vdev", "=", "nic_id", ".", "partition", "(", "' '", ")", "nic_data", "[", "'userid'", "]", "=", "userid", "nic_data", "[", "'vdev'", "]", "=", "vdev", "idx", ",", "nic_data", "[", "'nic_fr_rx'", "]", "=", "_parse_value", "(", "rd_list", ",", "idx", ",", "'nic_fr_rx:'", ",", "1", ")", "idx", ",", "nic_data", "[", "'nic_fr_rx_dsc'", "]", "=", "_parse_value", "(", "rd_list", ",", "idx", ",", "'nic_fr_rx_dsc:'", ",", "1", ")", "idx", ",", "nic_data", "[", "'nic_fr_rx_err'", "]", "=", "_parse_value", "(", "rd_list", ",", "idx", ",", "'nic_fr_rx_err:'", ",", "1", ")", "idx", ",", "nic_data", "[", "'nic_fr_tx'", "]", "=", "_parse_value", "(", "rd_list", ",", "idx", ",", "'nic_fr_tx:'", ",", "1", ")", "idx", ",", "nic_data", "[", "'nic_fr_tx_dsc'", "]", "=", "_parse_value", "(", "rd_list", ",", "idx", ",", "'nic_fr_tx_dsc:'", ",", "1", ")", "idx", ",", "nic_data", "[", "'nic_fr_tx_err'", "]", "=", "_parse_value", "(", "rd_list", ",", "idx", ",", "'nic_fr_tx_err:'", ",", "1", ")", "idx", ",", "nic_data", "[", "'nic_rx'", "]", "=", "_parse_value", "(", "rd_list", ",", "idx", ",", "'nic_rx:'", ",", "1", ")", "idx", ",", "nic_data", "[", "'nic_tx'", "]", "=", "_parse_value", "(", "rd_list", ",", "idx", ",", "'nic_tx:'", ",", "1", ")", "vsw_data", "[", "'nics'", "]", ".", "append", "(", "nic_data", ")", "# vlan count", "idx", ",", "vlan_count", "=", "_parse_value", "(", "rd_list", ",", "idx", ",", "'vlan count:'", ",", "1", ")", "# skip vlan data", "idx", "+=", "int", "(", "vlan_count", ")", "*", "3", "# skip the blank line", "idx", "+=", "1", "vsw_dict", "[", "'vswitches'", "]", ".", "append", "(", "vsw_data", ")", "return", "vsw_dict" ]
Parse the Virtual_Network_Vswitch_Query_Byte_Stats data to get inspect data.
[ "Parse", "the", "Virtual_Network_Vswitch_Query_Byte_Stats", "data", "to", "get", "inspect", "data", "." ]
python
train
50.310811
PolicyStat/docx2html
docx2html/core.py
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L816-L882
def _get_document_data(f, image_handler=None): ''' ``f`` is a ``ZipFile`` that is open Extract out the document data, numbering data and the relationship data. ''' if image_handler is None: def image_handler(image_id, relationship_dict): return relationship_dict.get(image_id) document_xml = None numbering_xml = None relationship_xml = None styles_xml = None parser = etree.XMLParser(strip_cdata=False) path, _ = os.path.split(f.filename) media = {} image_sizes = {} # Loop through the files in the zip file. for item in f.infolist(): # This file holds all the content of the document. if item.filename == 'word/document.xml': xml = f.read(item.filename) document_xml = etree.fromstring(xml, parser) # This file tells document.xml how lists should look. elif item.filename == 'word/numbering.xml': xml = f.read(item.filename) numbering_xml = etree.fromstring(xml, parser) elif item.filename == 'word/styles.xml': xml = f.read(item.filename) styles_xml = etree.fromstring(xml, parser) # This file holds the targets for hyperlinks and images. elif item.filename == 'word/_rels/document.xml.rels': xml = f.read(item.filename) try: relationship_xml = etree.fromstring(xml, parser) except XMLSyntaxError: relationship_xml = etree.fromstring('<xml></xml>', parser) if item.filename.startswith('word/media/'): # Strip off the leading word/ media[item.filename[len('word/'):]] = f.extract( item.filename, path, ) # Close the file pointer. f.close() # Get dictionaries for the numbering and the relationships. numbering_dict = get_numbering_info(numbering_xml) image_sizes = get_image_sizes(document_xml) relationship_dict = get_relationship_info( relationship_xml, media, image_sizes ) styles_dict = get_style_dict(styles_xml) font_sizes_dict = defaultdict(int) if DETECT_FONT_SIZE: font_sizes_dict = get_font_sizes_dict(document_xml, styles_dict) meta_data = MetaData( numbering_dict=numbering_dict, relationship_dict=relationship_dict, styles_dict=styles_dict, font_sizes_dict=font_sizes_dict, image_handler=image_handler, image_sizes=image_sizes, ) return document_xml, meta_data
[ "def", "_get_document_data", "(", "f", ",", "image_handler", "=", "None", ")", ":", "if", "image_handler", "is", "None", ":", "def", "image_handler", "(", "image_id", ",", "relationship_dict", ")", ":", "return", "relationship_dict", ".", "get", "(", "image_id", ")", "document_xml", "=", "None", "numbering_xml", "=", "None", "relationship_xml", "=", "None", "styles_xml", "=", "None", "parser", "=", "etree", ".", "XMLParser", "(", "strip_cdata", "=", "False", ")", "path", ",", "_", "=", "os", ".", "path", ".", "split", "(", "f", ".", "filename", ")", "media", "=", "{", "}", "image_sizes", "=", "{", "}", "# Loop through the files in the zip file.", "for", "item", "in", "f", ".", "infolist", "(", ")", ":", "# This file holds all the content of the document.", "if", "item", ".", "filename", "==", "'word/document.xml'", ":", "xml", "=", "f", ".", "read", "(", "item", ".", "filename", ")", "document_xml", "=", "etree", ".", "fromstring", "(", "xml", ",", "parser", ")", "# This file tells document.xml how lists should look.", "elif", "item", ".", "filename", "==", "'word/numbering.xml'", ":", "xml", "=", "f", ".", "read", "(", "item", ".", "filename", ")", "numbering_xml", "=", "etree", ".", "fromstring", "(", "xml", ",", "parser", ")", "elif", "item", ".", "filename", "==", "'word/styles.xml'", ":", "xml", "=", "f", ".", "read", "(", "item", ".", "filename", ")", "styles_xml", "=", "etree", ".", "fromstring", "(", "xml", ",", "parser", ")", "# This file holds the targets for hyperlinks and images.", "elif", "item", ".", "filename", "==", "'word/_rels/document.xml.rels'", ":", "xml", "=", "f", ".", "read", "(", "item", ".", "filename", ")", "try", ":", "relationship_xml", "=", "etree", ".", "fromstring", "(", "xml", ",", "parser", ")", "except", "XMLSyntaxError", ":", "relationship_xml", "=", "etree", ".", "fromstring", "(", "'<xml></xml>'", ",", "parser", ")", "if", "item", ".", "filename", ".", "startswith", "(", "'word/media/'", ")", ":", "# Strip off the leading word/", "media", "[", "item", ".", "filename", "[", "len", "(", "'word/'", ")", ":", "]", "]", "=", "f", ".", "extract", "(", "item", ".", "filename", ",", "path", ",", ")", "# Close the file pointer.", "f", ".", "close", "(", ")", "# Get dictionaries for the numbering and the relationships.", "numbering_dict", "=", "get_numbering_info", "(", "numbering_xml", ")", "image_sizes", "=", "get_image_sizes", "(", "document_xml", ")", "relationship_dict", "=", "get_relationship_info", "(", "relationship_xml", ",", "media", ",", "image_sizes", ")", "styles_dict", "=", "get_style_dict", "(", "styles_xml", ")", "font_sizes_dict", "=", "defaultdict", "(", "int", ")", "if", "DETECT_FONT_SIZE", ":", "font_sizes_dict", "=", "get_font_sizes_dict", "(", "document_xml", ",", "styles_dict", ")", "meta_data", "=", "MetaData", "(", "numbering_dict", "=", "numbering_dict", ",", "relationship_dict", "=", "relationship_dict", ",", "styles_dict", "=", "styles_dict", ",", "font_sizes_dict", "=", "font_sizes_dict", ",", "image_handler", "=", "image_handler", ",", "image_sizes", "=", "image_sizes", ",", ")", "return", "document_xml", ",", "meta_data" ]
``f`` is a ``ZipFile`` that is open Extract out the document data, numbering data and the relationship data.
[ "f", "is", "a", "ZipFile", "that", "is", "open", "Extract", "out", "the", "document", "data", "numbering", "data", "and", "the", "relationship", "data", "." ]
python
test
37.238806
skyfielders/python-skyfield
skyfield/earthlib.py
https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/earthlib.py#L166-L175
def refraction(alt_degrees, temperature_C, pressure_mbar): """Given an observed altitude, return how much the image is refracted. Zero refraction is returned both for objects very near the zenith, as well as for objects more than one degree below the horizon. """ r = 0.016667 / tan((alt_degrees + 7.31 / (alt_degrees + 4.4)) * DEG2RAD) d = r * (0.28 * pressure_mbar / (temperature_C + 273.0)) return where((-1.0 <= alt_degrees) & (alt_degrees <= 89.9), d, 0.0)
[ "def", "refraction", "(", "alt_degrees", ",", "temperature_C", ",", "pressure_mbar", ")", ":", "r", "=", "0.016667", "/", "tan", "(", "(", "alt_degrees", "+", "7.31", "/", "(", "alt_degrees", "+", "4.4", ")", ")", "*", "DEG2RAD", ")", "d", "=", "r", "*", "(", "0.28", "*", "pressure_mbar", "/", "(", "temperature_C", "+", "273.0", ")", ")", "return", "where", "(", "(", "-", "1.0", "<=", "alt_degrees", ")", "&", "(", "alt_degrees", "<=", "89.9", ")", ",", "d", ",", "0.0", ")" ]
Given an observed altitude, return how much the image is refracted. Zero refraction is returned both for objects very near the zenith, as well as for objects more than one degree below the horizon.
[ "Given", "an", "observed", "altitude", "return", "how", "much", "the", "image", "is", "refracted", "." ]
python
train
48.2
a1ezzz/wasp-general
wasp_general/datetime.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/datetime.py#L42-L61
def utc_datetime(dt=None, local_value=True): """ Convert local datetime and/or datetime without timezone information to UTC datetime with timezone information. :param dt: local datetime to convert. If is None, then system datetime value is used :param local_value: whether dt is a datetime in system timezone or UTC datetime without timezone information :return: datetime in UTC with tz set """ # TODO: rename local_value to local_tz or in_local_tz if dt is None: return datetime.now(tz=timezone.utc) result = dt if result.utcoffset() is None: if local_value is False: return result.replace(tzinfo=timezone.utc) else: result = result.replace(tzinfo=local_tz()) return result.astimezone(timezone.utc)
[ "def", "utc_datetime", "(", "dt", "=", "None", ",", "local_value", "=", "True", ")", ":", "# TODO: rename local_value to local_tz or in_local_tz", "if", "dt", "is", "None", ":", "return", "datetime", ".", "now", "(", "tz", "=", "timezone", ".", "utc", ")", "result", "=", "dt", "if", "result", ".", "utcoffset", "(", ")", "is", "None", ":", "if", "local_value", "is", "False", ":", "return", "result", ".", "replace", "(", "tzinfo", "=", "timezone", ".", "utc", ")", "else", ":", "result", "=", "result", ".", "replace", "(", "tzinfo", "=", "local_tz", "(", ")", ")", "return", "result", ".", "astimezone", "(", "timezone", ".", "utc", ")" ]
Convert local datetime and/or datetime without timezone information to UTC datetime with timezone information. :param dt: local datetime to convert. If is None, then system datetime value is used :param local_value: whether dt is a datetime in system timezone or UTC datetime without timezone information :return: datetime in UTC with tz set
[ "Convert", "local", "datetime", "and", "/", "or", "datetime", "without", "timezone", "information", "to", "UTC", "datetime", "with", "timezone", "information", "." ]
python
train
35.3
google/grumpy
third_party/stdlib/csv.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/csv.py#L295-L397
def _guess_delimiter(self, data, delimiters): """ The delimiter /should/ occur the same number of times on each row. However, due to malformed data, it may not. We don't want an all or nothing approach, so we allow for small variations in this number. 1) build a table of the frequency of each character on every line. 2) build a table of frequencies of this frequency (meta-frequency?), e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows, 7 times in 2 rows' 3) use the mode of the meta-frequency to determine the /expected/ frequency for that character 4) find out how often the character actually meets that goal 5) the character that best meets its goal is the delimiter For performance reasons, the data is evaluated in chunks, so it can try and evaluate the smallest portion of the data possible, evaluating additional chunks as necessary. """ data = filter(None, data.split('\n')) ascii = [chr(c) for c in range(127)] # 7-bit ASCII # build frequency tables chunkLength = min(10, len(data)) iteration = 0 charFrequency = {} modes = {} delims = {} start, end = 0, min(chunkLength, len(data)) while start < len(data): iteration += 1 for line in data[start:end]: for char in ascii: metaFrequency = charFrequency.get(char, {}) # must count even if frequency is 0 freq = line.count(char) # value is the mode metaFrequency[freq] = metaFrequency.get(freq, 0) + 1 charFrequency[char] = metaFrequency for char in charFrequency.keys(): items = charFrequency[char].items() if len(items) == 1 and items[0][0] == 0: continue # get the mode of the frequencies if len(items) > 1: modes[char] = reduce(lambda a, b: a[1] > b[1] and a or b, items) # adjust the mode - subtract the sum of all # other frequencies items.remove(modes[char]) modes[char] = (modes[char][0], modes[char][1] - reduce(lambda a, b: (0, a[1] + b[1]), items)[1]) else: modes[char] = items[0] # build a list of possible delimiters modeList = modes.items() total = float(chunkLength * iteration) # (rows of consistent data) / (number of rows) = 100% consistency = 1.0 # minimum consistency threshold threshold = 0.9 while len(delims) == 0 and consistency >= threshold: for k, v in modeList: if v[0] > 0 and v[1] > 0: if ((v[1]/total) >= consistency and (delimiters is None or k in delimiters)): delims[k] = v consistency -= 0.01 if len(delims) == 1: delim = delims.keys()[0] skipinitialspace = (data[0].count(delim) == data[0].count("%c " % delim)) return (delim, skipinitialspace) # analyze another chunkLength lines start = end end += chunkLength if not delims: return ('', 0) # if there's more than one, fall back to a 'preferred' list if len(delims) > 1: for d in self.preferred: if d in delims.keys(): skipinitialspace = (data[0].count(d) == data[0].count("%c " % d)) return (d, skipinitialspace) # nothing else indicates a preference, pick the character that # dominates(?) items = [(v,k) for (k,v) in delims.items()] items.sort() delim = items[-1][1] skipinitialspace = (data[0].count(delim) == data[0].count("%c " % delim)) return (delim, skipinitialspace)
[ "def", "_guess_delimiter", "(", "self", ",", "data", ",", "delimiters", ")", ":", "data", "=", "filter", "(", "None", ",", "data", ".", "split", "(", "'\\n'", ")", ")", "ascii", "=", "[", "chr", "(", "c", ")", "for", "c", "in", "range", "(", "127", ")", "]", "# 7-bit ASCII", "# build frequency tables", "chunkLength", "=", "min", "(", "10", ",", "len", "(", "data", ")", ")", "iteration", "=", "0", "charFrequency", "=", "{", "}", "modes", "=", "{", "}", "delims", "=", "{", "}", "start", ",", "end", "=", "0", ",", "min", "(", "chunkLength", ",", "len", "(", "data", ")", ")", "while", "start", "<", "len", "(", "data", ")", ":", "iteration", "+=", "1", "for", "line", "in", "data", "[", "start", ":", "end", "]", ":", "for", "char", "in", "ascii", ":", "metaFrequency", "=", "charFrequency", ".", "get", "(", "char", ",", "{", "}", ")", "# must count even if frequency is 0", "freq", "=", "line", ".", "count", "(", "char", ")", "# value is the mode", "metaFrequency", "[", "freq", "]", "=", "metaFrequency", ".", "get", "(", "freq", ",", "0", ")", "+", "1", "charFrequency", "[", "char", "]", "=", "metaFrequency", "for", "char", "in", "charFrequency", ".", "keys", "(", ")", ":", "items", "=", "charFrequency", "[", "char", "]", ".", "items", "(", ")", "if", "len", "(", "items", ")", "==", "1", "and", "items", "[", "0", "]", "[", "0", "]", "==", "0", ":", "continue", "# get the mode of the frequencies", "if", "len", "(", "items", ")", ">", "1", ":", "modes", "[", "char", "]", "=", "reduce", "(", "lambda", "a", ",", "b", ":", "a", "[", "1", "]", ">", "b", "[", "1", "]", "and", "a", "or", "b", ",", "items", ")", "# adjust the mode - subtract the sum of all", "# other frequencies", "items", ".", "remove", "(", "modes", "[", "char", "]", ")", "modes", "[", "char", "]", "=", "(", "modes", "[", "char", "]", "[", "0", "]", ",", "modes", "[", "char", "]", "[", "1", "]", "-", "reduce", "(", "lambda", "a", ",", "b", ":", "(", "0", ",", "a", "[", "1", "]", "+", "b", "[", "1", "]", ")", ",", "items", ")", "[", "1", "]", ")", "else", ":", "modes", "[", "char", "]", "=", "items", "[", "0", "]", "# build a list of possible delimiters", "modeList", "=", "modes", ".", "items", "(", ")", "total", "=", "float", "(", "chunkLength", "*", "iteration", ")", "# (rows of consistent data) / (number of rows) = 100%", "consistency", "=", "1.0", "# minimum consistency threshold", "threshold", "=", "0.9", "while", "len", "(", "delims", ")", "==", "0", "and", "consistency", ">=", "threshold", ":", "for", "k", ",", "v", "in", "modeList", ":", "if", "v", "[", "0", "]", ">", "0", "and", "v", "[", "1", "]", ">", "0", ":", "if", "(", "(", "v", "[", "1", "]", "/", "total", ")", ">=", "consistency", "and", "(", "delimiters", "is", "None", "or", "k", "in", "delimiters", ")", ")", ":", "delims", "[", "k", "]", "=", "v", "consistency", "-=", "0.01", "if", "len", "(", "delims", ")", "==", "1", ":", "delim", "=", "delims", ".", "keys", "(", ")", "[", "0", "]", "skipinitialspace", "=", "(", "data", "[", "0", "]", ".", "count", "(", "delim", ")", "==", "data", "[", "0", "]", ".", "count", "(", "\"%c \"", "%", "delim", ")", ")", "return", "(", "delim", ",", "skipinitialspace", ")", "# analyze another chunkLength lines", "start", "=", "end", "end", "+=", "chunkLength", "if", "not", "delims", ":", "return", "(", "''", ",", "0", ")", "# if there's more than one, fall back to a 'preferred' list", "if", "len", "(", "delims", ")", ">", "1", ":", "for", "d", "in", "self", ".", "preferred", ":", "if", "d", "in", "delims", ".", "keys", "(", ")", ":", "skipinitialspace", "=", "(", "data", "[", "0", "]", ".", "count", "(", "d", ")", "==", "data", "[", "0", "]", ".", "count", "(", "\"%c \"", "%", "d", ")", ")", "return", "(", "d", ",", "skipinitialspace", ")", "# nothing else indicates a preference, pick the character that", "# dominates(?)", "items", "=", "[", "(", "v", ",", "k", ")", "for", "(", "k", ",", "v", ")", "in", "delims", ".", "items", "(", ")", "]", "items", ".", "sort", "(", ")", "delim", "=", "items", "[", "-", "1", "]", "[", "1", "]", "skipinitialspace", "=", "(", "data", "[", "0", "]", ".", "count", "(", "delim", ")", "==", "data", "[", "0", "]", ".", "count", "(", "\"%c \"", "%", "delim", ")", ")", "return", "(", "delim", ",", "skipinitialspace", ")" ]
The delimiter /should/ occur the same number of times on each row. However, due to malformed data, it may not. We don't want an all or nothing approach, so we allow for small variations in this number. 1) build a table of the frequency of each character on every line. 2) build a table of frequencies of this frequency (meta-frequency?), e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows, 7 times in 2 rows' 3) use the mode of the meta-frequency to determine the /expected/ frequency for that character 4) find out how often the character actually meets that goal 5) the character that best meets its goal is the delimiter For performance reasons, the data is evaluated in chunks, so it can try and evaluate the smallest portion of the data possible, evaluating additional chunks as necessary.
[ "The", "delimiter", "/", "should", "/", "occur", "the", "same", "number", "of", "times", "on", "each", "row", ".", "However", "due", "to", "malformed", "data", "it", "may", "not", ".", "We", "don", "t", "want", "an", "all", "or", "nothing", "approach", "so", "we", "allow", "for", "small", "variations", "in", "this", "number", ".", "1", ")", "build", "a", "table", "of", "the", "frequency", "of", "each", "character", "on", "every", "line", ".", "2", ")", "build", "a", "table", "of", "frequencies", "of", "this", "frequency", "(", "meta", "-", "frequency?", ")", "e", ".", "g", ".", "x", "occurred", "5", "times", "in", "10", "rows", "6", "times", "in", "1000", "rows", "7", "times", "in", "2", "rows", "3", ")", "use", "the", "mode", "of", "the", "meta", "-", "frequency", "to", "determine", "the", "/", "expected", "/", "frequency", "for", "that", "character", "4", ")", "find", "out", "how", "often", "the", "character", "actually", "meets", "that", "goal", "5", ")", "the", "character", "that", "best", "meets", "its", "goal", "is", "the", "delimiter", "For", "performance", "reasons", "the", "data", "is", "evaluated", "in", "chunks", "so", "it", "can", "try", "and", "evaluate", "the", "smallest", "portion", "of", "the", "data", "possible", "evaluating", "additional", "chunks", "as", "necessary", "." ]
python
valid
41.543689
allenai/allennlp
allennlp/nn/util.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/nn/util.py#L132-L169
def sort_batch_by_length(tensor: torch.Tensor, sequence_lengths: torch.Tensor): """ Sort a batch first tensor by some specified lengths. Parameters ---------- tensor : torch.FloatTensor, required. A batch first Pytorch tensor. sequence_lengths : torch.LongTensor, required. A tensor representing the lengths of some dimension of the tensor which we want to sort by. Returns ------- sorted_tensor : torch.FloatTensor The original tensor sorted along the batch dimension with respect to sequence_lengths. sorted_sequence_lengths : torch.LongTensor The original sequence_lengths sorted by decreasing size. restoration_indices : torch.LongTensor Indices into the sorted_tensor such that ``sorted_tensor.index_select(0, restoration_indices) == original_tensor`` permutation_index : torch.LongTensor The indices used to sort the tensor. This is useful if you want to sort many tensors using the same ordering. """ if not isinstance(tensor, torch.Tensor) or not isinstance(sequence_lengths, torch.Tensor): raise ConfigurationError("Both the tensor and sequence lengths must be torch.Tensors.") sorted_sequence_lengths, permutation_index = sequence_lengths.sort(0, descending=True) sorted_tensor = tensor.index_select(0, permutation_index) index_range = torch.arange(0, len(sequence_lengths), device=sequence_lengths.device) # This is the equivalent of zipping with index, sorting by the original # sequence lengths and returning the now sorted indices. _, reverse_mapping = permutation_index.sort(0, descending=False) restoration_indices = index_range.index_select(0, reverse_mapping) return sorted_tensor, sorted_sequence_lengths, restoration_indices, permutation_index
[ "def", "sort_batch_by_length", "(", "tensor", ":", "torch", ".", "Tensor", ",", "sequence_lengths", ":", "torch", ".", "Tensor", ")", ":", "if", "not", "isinstance", "(", "tensor", ",", "torch", ".", "Tensor", ")", "or", "not", "isinstance", "(", "sequence_lengths", ",", "torch", ".", "Tensor", ")", ":", "raise", "ConfigurationError", "(", "\"Both the tensor and sequence lengths must be torch.Tensors.\"", ")", "sorted_sequence_lengths", ",", "permutation_index", "=", "sequence_lengths", ".", "sort", "(", "0", ",", "descending", "=", "True", ")", "sorted_tensor", "=", "tensor", ".", "index_select", "(", "0", ",", "permutation_index", ")", "index_range", "=", "torch", ".", "arange", "(", "0", ",", "len", "(", "sequence_lengths", ")", ",", "device", "=", "sequence_lengths", ".", "device", ")", "# This is the equivalent of zipping with index, sorting by the original", "# sequence lengths and returning the now sorted indices.", "_", ",", "reverse_mapping", "=", "permutation_index", ".", "sort", "(", "0", ",", "descending", "=", "False", ")", "restoration_indices", "=", "index_range", ".", "index_select", "(", "0", ",", "reverse_mapping", ")", "return", "sorted_tensor", ",", "sorted_sequence_lengths", ",", "restoration_indices", ",", "permutation_index" ]
Sort a batch first tensor by some specified lengths. Parameters ---------- tensor : torch.FloatTensor, required. A batch first Pytorch tensor. sequence_lengths : torch.LongTensor, required. A tensor representing the lengths of some dimension of the tensor which we want to sort by. Returns ------- sorted_tensor : torch.FloatTensor The original tensor sorted along the batch dimension with respect to sequence_lengths. sorted_sequence_lengths : torch.LongTensor The original sequence_lengths sorted by decreasing size. restoration_indices : torch.LongTensor Indices into the sorted_tensor such that ``sorted_tensor.index_select(0, restoration_indices) == original_tensor`` permutation_index : torch.LongTensor The indices used to sort the tensor. This is useful if you want to sort many tensors using the same ordering.
[ "Sort", "a", "batch", "first", "tensor", "by", "some", "specified", "lengths", "." ]
python
train
47.342105
pytest-dev/pytest-xprocess
xprocess.py
https://github.com/pytest-dev/pytest-xprocess/blob/c3ee760b02dce2d0eed960b3ab0e28379853c3ef/xprocess.py#L227-L238
def prep(self, wait, args, env=None): """ Given the return value of a preparefunc, prepare this CompatStarter. """ self.pattern = wait self.env = env self.args = args # wait is a function, supersedes the default behavior if callable(wait): self.wait = lambda lines: wait()
[ "def", "prep", "(", "self", ",", "wait", ",", "args", ",", "env", "=", "None", ")", ":", "self", ".", "pattern", "=", "wait", "self", ".", "env", "=", "env", "self", ".", "args", "=", "args", "# wait is a function, supersedes the default behavior", "if", "callable", "(", "wait", ")", ":", "self", ".", "wait", "=", "lambda", "lines", ":", "wait", "(", ")" ]
Given the return value of a preparefunc, prepare this CompatStarter.
[ "Given", "the", "return", "value", "of", "a", "preparefunc", "prepare", "this", "CompatStarter", "." ]
python
train
28.833333
bcbio/bcbio-nextgen
bcbio/cwl/hpc.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/hpc.py#L61-L69
def _load_custom_config(run_config): """Load custom configuration input HOCON file for cromwell. """ from pyhocon import ConfigFactory, HOCONConverter, ConfigTree conf = ConfigFactory.parse_file(run_config) out = {} if "database" in conf: out["database"] = HOCONConverter.to_hocon(ConfigTree({"database": conf.get_config("database")})) return out
[ "def", "_load_custom_config", "(", "run_config", ")", ":", "from", "pyhocon", "import", "ConfigFactory", ",", "HOCONConverter", ",", "ConfigTree", "conf", "=", "ConfigFactory", ".", "parse_file", "(", "run_config", ")", "out", "=", "{", "}", "if", "\"database\"", "in", "conf", ":", "out", "[", "\"database\"", "]", "=", "HOCONConverter", ".", "to_hocon", "(", "ConfigTree", "(", "{", "\"database\"", ":", "conf", ".", "get_config", "(", "\"database\"", ")", "}", ")", ")", "return", "out" ]
Load custom configuration input HOCON file for cromwell.
[ "Load", "custom", "configuration", "input", "HOCON", "file", "for", "cromwell", "." ]
python
train
41.555556
doakey3/DashTable
dashtable/html2data/html2data.py
https://github.com/doakey3/DashTable/blob/744cfb6a717fa75a8092c83ebcd49b2668023681/dashtable/html2data/html2data.py#L7-L40
def html2data(html_string): """ Convert an html table to a data table and spans. Parameters ---------- html_string : str The string containing the html table Returns ------- table : list of lists of str spans : list of lists of lists of int A span is a list of [row, column] pairs that define what cells are merged in a table. use_headers : bool """ spans = extract_spans(html_string) column_count = get_html_column_count(html_string) row_count = get_html_row_count(spans) count = 0 while count < len(spans): if len(spans[count]) == 1: spans.pop(count) else: count += 1 table = extract_table(html_string, row_count, column_count) use_headers = headers_present(html_string) return table, spans, use_headers
[ "def", "html2data", "(", "html_string", ")", ":", "spans", "=", "extract_spans", "(", "html_string", ")", "column_count", "=", "get_html_column_count", "(", "html_string", ")", "row_count", "=", "get_html_row_count", "(", "spans", ")", "count", "=", "0", "while", "count", "<", "len", "(", "spans", ")", ":", "if", "len", "(", "spans", "[", "count", "]", ")", "==", "1", ":", "spans", ".", "pop", "(", "count", ")", "else", ":", "count", "+=", "1", "table", "=", "extract_table", "(", "html_string", ",", "row_count", ",", "column_count", ")", "use_headers", "=", "headers_present", "(", "html_string", ")", "return", "table", ",", "spans", ",", "use_headers" ]
Convert an html table to a data table and spans. Parameters ---------- html_string : str The string containing the html table Returns ------- table : list of lists of str spans : list of lists of lists of int A span is a list of [row, column] pairs that define what cells are merged in a table. use_headers : bool
[ "Convert", "an", "html", "table", "to", "a", "data", "table", "and", "spans", "." ]
python
train
24.088235
ISA-tools/biopy-isatab
bcbio/isatab/parser.py
https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L142-L162
def _parse_keyvals(self, line_iter): """Generate dictionary from key/value pairs. """ out = None line = None for line in line_iter: if len(line) == 1 and line[0].upper() == line[0]: break else: # setup output dictionaries, trimming off blank columns if out is None: while not line[-1]: line = line[:-1] out = [{} for _ in line[1:]] # add blank values if the line is stripped while len(line) < len(out) + 1: line.append("") for i in range(len(out)): out[i][line[0]] = line[i+1].strip() line = None return out, line
[ "def", "_parse_keyvals", "(", "self", ",", "line_iter", ")", ":", "out", "=", "None", "line", "=", "None", "for", "line", "in", "line_iter", ":", "if", "len", "(", "line", ")", "==", "1", "and", "line", "[", "0", "]", ".", "upper", "(", ")", "==", "line", "[", "0", "]", ":", "break", "else", ":", "# setup output dictionaries, trimming off blank columns", "if", "out", "is", "None", ":", "while", "not", "line", "[", "-", "1", "]", ":", "line", "=", "line", "[", ":", "-", "1", "]", "out", "=", "[", "{", "}", "for", "_", "in", "line", "[", "1", ":", "]", "]", "# add blank values if the line is stripped", "while", "len", "(", "line", ")", "<", "len", "(", "out", ")", "+", "1", ":", "line", ".", "append", "(", "\"\"", ")", "for", "i", "in", "range", "(", "len", "(", "out", ")", ")", ":", "out", "[", "i", "]", "[", "line", "[", "0", "]", "]", "=", "line", "[", "i", "+", "1", "]", ".", "strip", "(", ")", "line", "=", "None", "return", "out", ",", "line" ]
Generate dictionary from key/value pairs.
[ "Generate", "dictionary", "from", "key", "/", "value", "pairs", "." ]
python
train
37.190476
nficano/python-lambda
aws_lambda/aws_lambda.py
https://github.com/nficano/python-lambda/blob/b0bd25404df70212d7fa057758760366406d64f2/aws_lambda/aws_lambda.py#L190-L248
def invoke( src, event_file='event.json', config_file='config.yaml', profile_name=None, verbose=False, ): """Simulates a call to your function. :param str src: The path to your Lambda ready project (folder must contain a valid config.yaml and handler module (e.g.: service.py). :param str alt_event: An optional argument to override which event file to use. :param bool verbose: Whether to print out verbose details. """ # Load and parse the config file. path_to_config_file = os.path.join(src, config_file) cfg = read_cfg(path_to_config_file, profile_name) # Set AWS_PROFILE environment variable based on `--profile` option. if profile_name: os.environ['AWS_PROFILE'] = profile_name # Load environment variables from the config file into the actual # environment. env_vars = cfg.get('environment_variables') if env_vars: for key, value in env_vars.items(): os.environ[key] = get_environment_variable_value(value) # Load and parse event file. path_to_event_file = os.path.join(src, event_file) event = read(path_to_event_file, loader=json.loads) # Tweak to allow module to import local modules try: sys.path.index(src) except ValueError: sys.path.append(src) handler = cfg.get('handler') # Inspect the handler string (<module>.<function name>) and translate it # into a function we can execute. fn = get_callable_handler_function(src, handler) timeout = cfg.get('timeout') if timeout: context = LambdaContext(cfg.get('function_name'),timeout) else: context = LambdaContext(cfg.get('function_name')) start = time.time() results = fn(event, context) end = time.time() print('{0}'.format(results)) if verbose: print('\nexecution time: {:.8f}s\nfunction execution ' 'timeout: {:2}s'.format(end - start, cfg.get('timeout', 15)))
[ "def", "invoke", "(", "src", ",", "event_file", "=", "'event.json'", ",", "config_file", "=", "'config.yaml'", ",", "profile_name", "=", "None", ",", "verbose", "=", "False", ",", ")", ":", "# Load and parse the config file.", "path_to_config_file", "=", "os", ".", "path", ".", "join", "(", "src", ",", "config_file", ")", "cfg", "=", "read_cfg", "(", "path_to_config_file", ",", "profile_name", ")", "# Set AWS_PROFILE environment variable based on `--profile` option.", "if", "profile_name", ":", "os", ".", "environ", "[", "'AWS_PROFILE'", "]", "=", "profile_name", "# Load environment variables from the config file into the actual", "# environment.", "env_vars", "=", "cfg", ".", "get", "(", "'environment_variables'", ")", "if", "env_vars", ":", "for", "key", ",", "value", "in", "env_vars", ".", "items", "(", ")", ":", "os", ".", "environ", "[", "key", "]", "=", "get_environment_variable_value", "(", "value", ")", "# Load and parse event file.", "path_to_event_file", "=", "os", ".", "path", ".", "join", "(", "src", ",", "event_file", ")", "event", "=", "read", "(", "path_to_event_file", ",", "loader", "=", "json", ".", "loads", ")", "# Tweak to allow module to import local modules", "try", ":", "sys", ".", "path", ".", "index", "(", "src", ")", "except", "ValueError", ":", "sys", ".", "path", ".", "append", "(", "src", ")", "handler", "=", "cfg", ".", "get", "(", "'handler'", ")", "# Inspect the handler string (<module>.<function name>) and translate it", "# into a function we can execute.", "fn", "=", "get_callable_handler_function", "(", "src", ",", "handler", ")", "timeout", "=", "cfg", ".", "get", "(", "'timeout'", ")", "if", "timeout", ":", "context", "=", "LambdaContext", "(", "cfg", ".", "get", "(", "'function_name'", ")", ",", "timeout", ")", "else", ":", "context", "=", "LambdaContext", "(", "cfg", ".", "get", "(", "'function_name'", ")", ")", "start", "=", "time", ".", "time", "(", ")", "results", "=", "fn", "(", "event", ",", "context", ")", "end", "=", "time", ".", "time", "(", ")", "print", "(", "'{0}'", ".", "format", "(", "results", ")", ")", "if", "verbose", ":", "print", "(", "'\\nexecution time: {:.8f}s\\nfunction execution '", "'timeout: {:2}s'", ".", "format", "(", "end", "-", "start", ",", "cfg", ".", "get", "(", "'timeout'", ",", "15", ")", ")", ")" ]
Simulates a call to your function. :param str src: The path to your Lambda ready project (folder must contain a valid config.yaml and handler module (e.g.: service.py). :param str alt_event: An optional argument to override which event file to use. :param bool verbose: Whether to print out verbose details.
[ "Simulates", "a", "call", "to", "your", "function", "." ]
python
valid
32.677966
hobson/aima
aima/learning.py
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/learning.py#L99-L102
def add_example(self, example): "Add an example to the list of examples, checking it first." self.check_example(example) self.examples.append(example)
[ "def", "add_example", "(", "self", ",", "example", ")", ":", "self", ".", "check_example", "(", "example", ")", "self", ".", "examples", ".", "append", "(", "example", ")" ]
Add an example to the list of examples, checking it first.
[ "Add", "an", "example", "to", "the", "list", "of", "examples", "checking", "it", "first", "." ]
python
valid
42.75
twilio/authy-python
authy/api/resources.py
https://github.com/twilio/authy-python/blob/7a0073b39a56bac495b10e4b4fca3f09982de6ed/authy/api/resources.py#L327-L360
def verification_start(self, phone_number, country_code, via='sms', locale=None, code_length=4): """ :param string phone_number: stored in your databse or you provided while creating new user. :param string country_code: stored in your databse or you provided while creating new user. :param string via: verification method either sms or call :param string locale: optional default none :param number code_length: optional default 4 :return: """ if via != 'sms' and via != 'call': raise AuthyFormatException("Invalid Via. Expected 'sms' or 'call'.") options = { 'phone_number': phone_number, 'country_code': country_code, 'via': via } if locale: options['locale'] = locale try: cl = int(code_length) if cl < 4 or cl > 10: raise ValueError options['code_length'] = cl except ValueError: raise AuthyFormatException( "Invalid code_length. Expected numeric value from 4-10.") resp = self.post("/protected/json/phones/verification/start", options) return Phone(self, resp)
[ "def", "verification_start", "(", "self", ",", "phone_number", ",", "country_code", ",", "via", "=", "'sms'", ",", "locale", "=", "None", ",", "code_length", "=", "4", ")", ":", "if", "via", "!=", "'sms'", "and", "via", "!=", "'call'", ":", "raise", "AuthyFormatException", "(", "\"Invalid Via. Expected 'sms' or 'call'.\"", ")", "options", "=", "{", "'phone_number'", ":", "phone_number", ",", "'country_code'", ":", "country_code", ",", "'via'", ":", "via", "}", "if", "locale", ":", "options", "[", "'locale'", "]", "=", "locale", "try", ":", "cl", "=", "int", "(", "code_length", ")", "if", "cl", "<", "4", "or", "cl", ">", "10", ":", "raise", "ValueError", "options", "[", "'code_length'", "]", "=", "cl", "except", "ValueError", ":", "raise", "AuthyFormatException", "(", "\"Invalid code_length. Expected numeric value from 4-10.\"", ")", "resp", "=", "self", ".", "post", "(", "\"/protected/json/phones/verification/start\"", ",", "options", ")", "return", "Phone", "(", "self", ",", "resp", ")" ]
:param string phone_number: stored in your databse or you provided while creating new user. :param string country_code: stored in your databse or you provided while creating new user. :param string via: verification method either sms or call :param string locale: optional default none :param number code_length: optional default 4 :return:
[ ":", "param", "string", "phone_number", ":", "stored", "in", "your", "databse", "or", "you", "provided", "while", "creating", "new", "user", ".", ":", "param", "string", "country_code", ":", "stored", "in", "your", "databse", "or", "you", "provided", "while", "creating", "new", "user", ".", ":", "param", "string", "via", ":", "verification", "method", "either", "sms", "or", "call", ":", "param", "string", "locale", ":", "optional", "default", "none", ":", "param", "number", "code_length", ":", "optional", "default", "4", ":", "return", ":" ]
python
train
36.294118
bcbio/bcbio-nextgen
bcbio/rnaseq/variation.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/variation.py#L154-L184
def gatk_filter_rnaseq(vrn_file, data): """ this incorporates filters listed here, dropping clusters of variants within a 35 nucleotide window, high fischer strand values and low quality by depth https://software.broadinstitute.org/gatk/guide/article?id=3891 java -jar GenomeAnalysisTK.jar -T VariantFiltration -R hg_19.fasta -V input.vcf -window 35 -cluster 3 -filterName FS -filter "FS > 30.0" -filterName QD -filter "QD < 2.0" -o output.vcf """ out_file = "%s-filter%s" % utils.splitext_plus(vrn_file) if not file_exists(out_file): ref_file = dd.get_ref_file(data) with file_transaction(data, out_file) as tx_out_file: params = ["VariantFiltration", "-R", ref_file, "-V", vrn_file, "--cluster-window-size", "35", "--cluster-size", "3", "--filter-expression", "'FS > 30.0'", "--filter-name", "FS", "--filter-expression", "'QD < 2.0'", "--filter-name", "QD", "--output", tx_out_file] # Use GATK4 for filtering, tools_off is for variant calling config = utils.deepish_copy(dd.get_config(data)) if "gatk4" in dd.get_tools_off({"config": config}): config["algorithm"]["tools_off"].remove("gatk4") jvm_opts = broad.get_gatk_opts(config, os.path.dirname(tx_out_file)) do.run(broad.gatk_cmd("gatk", jvm_opts, params, config), "Filter RNA-seq variants.") return out_file
[ "def", "gatk_filter_rnaseq", "(", "vrn_file", ",", "data", ")", ":", "out_file", "=", "\"%s-filter%s\"", "%", "utils", ".", "splitext_plus", "(", "vrn_file", ")", "if", "not", "file_exists", "(", "out_file", ")", ":", "ref_file", "=", "dd", ".", "get_ref_file", "(", "data", ")", "with", "file_transaction", "(", "data", ",", "out_file", ")", "as", "tx_out_file", ":", "params", "=", "[", "\"VariantFiltration\"", ",", "\"-R\"", ",", "ref_file", ",", "\"-V\"", ",", "vrn_file", ",", "\"--cluster-window-size\"", ",", "\"35\"", ",", "\"--cluster-size\"", ",", "\"3\"", ",", "\"--filter-expression\"", ",", "\"'FS > 30.0'\"", ",", "\"--filter-name\"", ",", "\"FS\"", ",", "\"--filter-expression\"", ",", "\"'QD < 2.0'\"", ",", "\"--filter-name\"", ",", "\"QD\"", ",", "\"--output\"", ",", "tx_out_file", "]", "# Use GATK4 for filtering, tools_off is for variant calling", "config", "=", "utils", ".", "deepish_copy", "(", "dd", ".", "get_config", "(", "data", ")", ")", "if", "\"gatk4\"", "in", "dd", ".", "get_tools_off", "(", "{", "\"config\"", ":", "config", "}", ")", ":", "config", "[", "\"algorithm\"", "]", "[", "\"tools_off\"", "]", ".", "remove", "(", "\"gatk4\"", ")", "jvm_opts", "=", "broad", ".", "get_gatk_opts", "(", "config", ",", "os", ".", "path", ".", "dirname", "(", "tx_out_file", ")", ")", "do", ".", "run", "(", "broad", ".", "gatk_cmd", "(", "\"gatk\"", ",", "jvm_opts", ",", "params", ",", "config", ")", ",", "\"Filter RNA-seq variants.\"", ")", "return", "out_file" ]
this incorporates filters listed here, dropping clusters of variants within a 35 nucleotide window, high fischer strand values and low quality by depth https://software.broadinstitute.org/gatk/guide/article?id=3891 java -jar GenomeAnalysisTK.jar -T VariantFiltration -R hg_19.fasta -V input.vcf -window 35 -cluster 3 -filterName FS -filter "FS > 30.0" -filterName QD -filter "QD < 2.0" -o output.vcf
[ "this", "incorporates", "filters", "listed", "here", "dropping", "clusters", "of", "variants", "within", "a", "35", "nucleotide", "window", "high", "fischer", "strand", "values", "and", "low", "quality", "by", "depth", "https", ":", "//", "software", ".", "broadinstitute", ".", "org", "/", "gatk", "/", "guide", "/", "article?id", "=", "3891", "java", "-", "jar", "GenomeAnalysisTK", ".", "jar", "-", "T", "VariantFiltration", "-", "R", "hg_19", ".", "fasta", "-", "V", "input", ".", "vcf", "-", "window", "35", "-", "cluster", "3", "-", "filterName", "FS", "-", "filter", "FS", ">", "30", ".", "0", "-", "filterName", "QD", "-", "filter", "QD", "<", "2", ".", "0", "-", "o", "output", ".", "vcf" ]
python
train
51.096774
matthewdeanmartin/jiggle_version
jiggle_version/package_info_finder.py
https://github.com/matthewdeanmartin/jiggle_version/blob/963656a0a47b7162780a5f6c8f4b8bbbebc148f5/jiggle_version/package_info_finder.py#L270-L290
def find_single_file_project(self): # type: () -> List[str] """ Find well formed singler file project :return: """ files = [f for f in os.listdir(".") if os.path.isfile(f)] candidates = [] setup_source = self.setup_py_source() for file in files: if file.endswith("setup.py") or not file.endswith(".py"): continue # duh if setup_source: if file.replace(".py", "") in setup_source: candidate = file.replace(".py", "") if candidate != "setup": candidates.append(candidate) return candidates
[ "def", "find_single_file_project", "(", "self", ")", ":", "# type: () -> List[str]", "files", "=", "[", "f", "for", "f", "in", "os", ".", "listdir", "(", "\".\"", ")", "if", "os", ".", "path", ".", "isfile", "(", "f", ")", "]", "candidates", "=", "[", "]", "setup_source", "=", "self", ".", "setup_py_source", "(", ")", "for", "file", "in", "files", ":", "if", "file", ".", "endswith", "(", "\"setup.py\"", ")", "or", "not", "file", ".", "endswith", "(", "\".py\"", ")", ":", "continue", "# duh", "if", "setup_source", ":", "if", "file", ".", "replace", "(", "\".py\"", ",", "\"\"", ")", "in", "setup_source", ":", "candidate", "=", "file", ".", "replace", "(", "\".py\"", ",", "\"\"", ")", "if", "candidate", "!=", "\"setup\"", ":", "candidates", ".", "append", "(", "candidate", ")", "return", "candidates" ]
Find well formed singler file project :return:
[ "Find", "well", "formed", "singler", "file", "project", ":", "return", ":" ]
python
train
31.666667
larsyencken/csvdiff
csvdiff/patch.py
https://github.com/larsyencken/csvdiff/blob/163dd9da676a8e5f926a935803726340261f03ae/csvdiff/patch.py#L106-L116
def apply(diff, recs, strict=True): """ Transform the records with the patch. May fail if the records do not match those expected in the patch. """ index_columns = diff['_index'] indexed = records.index(copy.deepcopy(list(recs)), index_columns) _add_records(indexed, diff['added'], index_columns, strict=strict) _remove_records(indexed, diff['removed'], index_columns, strict=strict) _update_records(indexed, diff['changed'], strict=strict) return records.sort(indexed.values())
[ "def", "apply", "(", "diff", ",", "recs", ",", "strict", "=", "True", ")", ":", "index_columns", "=", "diff", "[", "'_index'", "]", "indexed", "=", "records", ".", "index", "(", "copy", ".", "deepcopy", "(", "list", "(", "recs", ")", ")", ",", "index_columns", ")", "_add_records", "(", "indexed", ",", "diff", "[", "'added'", "]", ",", "index_columns", ",", "strict", "=", "strict", ")", "_remove_records", "(", "indexed", ",", "diff", "[", "'removed'", "]", ",", "index_columns", ",", "strict", "=", "strict", ")", "_update_records", "(", "indexed", ",", "diff", "[", "'changed'", "]", ",", "strict", "=", "strict", ")", "return", "records", ".", "sort", "(", "indexed", ".", "values", "(", ")", ")" ]
Transform the records with the patch. May fail if the records do not match those expected in the patch.
[ "Transform", "the", "records", "with", "the", "patch", ".", "May", "fail", "if", "the", "records", "do", "not", "match", "those", "expected", "in", "the", "patch", "." ]
python
train
46.181818
singularityhub/sregistry-cli
sregistry/main/globus/utils.py
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/globus/utils.py#L57-L70
def create_endpoint_folder(self, endpoint_id, folder): '''create an endpoint folder, catching the error if it exists. Parameters ========== endpoint_id: the endpoint id parameters folder: the relative path of the folder to create ''' try: res = self.transfer_client.operation_mkdir(endpoint_id, folder) bot.info("%s --> %s" %(res['message'], folder)) except TransferAPIError: bot.info('%s already exists at endpoint' %folder)
[ "def", "create_endpoint_folder", "(", "self", ",", "endpoint_id", ",", "folder", ")", ":", "try", ":", "res", "=", "self", ".", "transfer_client", ".", "operation_mkdir", "(", "endpoint_id", ",", "folder", ")", "bot", ".", "info", "(", "\"%s --> %s\"", "%", "(", "res", "[", "'message'", "]", ",", "folder", ")", ")", "except", "TransferAPIError", ":", "bot", ".", "info", "(", "'%s already exists at endpoint'", "%", "folder", ")" ]
create an endpoint folder, catching the error if it exists. Parameters ========== endpoint_id: the endpoint id parameters folder: the relative path of the folder to create
[ "create", "an", "endpoint", "folder", "catching", "the", "error", "if", "it", "exists", "." ]
python
test
34.428571
matrix-org/matrix-python-sdk
matrix_client/api.py
https://github.com/matrix-org/matrix-python-sdk/blob/e734cce3ccd35f2d355c6a19a7a701033472498a/matrix_client/api.py#L915-L926
def set_join_rule(self, room_id, join_rule): """Set the rule for users wishing to join the room. Args: room_id(str): The room to set the rules for. join_rule(str): The chosen rule. One of: ["public", "knock", "invite", "private"] """ content = { "join_rule": join_rule } return self.send_state_event(room_id, "m.room.join_rules", content)
[ "def", "set_join_rule", "(", "self", ",", "room_id", ",", "join_rule", ")", ":", "content", "=", "{", "\"join_rule\"", ":", "join_rule", "}", "return", "self", ".", "send_state_event", "(", "room_id", ",", "\"m.room.join_rules\"", ",", "content", ")" ]
Set the rule for users wishing to join the room. Args: room_id(str): The room to set the rules for. join_rule(str): The chosen rule. One of: ["public", "knock", "invite", "private"]
[ "Set", "the", "rule", "for", "users", "wishing", "to", "join", "the", "room", "." ]
python
train
35.666667
secynic/ipwhois
ipwhois/asn.py
https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/asn.py#L306-L404
def parse_fields_http(self, response, extra_org_map=None): """ The function for parsing ASN fields from a http response. Args: response (:obj:`str`): The response from the ASN http server. extra_org_map (:obj:`dict`): Dictionary mapping org handles to RIRs. This is for limited cases where ARIN REST (ASN fallback HTTP lookup) does not show an RIR as the org handle e.g., DNIC (which is now the built in ORG_MAP) e.g., {'DNIC': 'arin'}. Valid RIR values are (note the case-sensitive - this is meant to match the REST result): 'ARIN', 'RIPE', 'apnic', 'lacnic', 'afrinic'. Defaults to None. Returns: dict: The ASN lookup results :: { 'asn' (None) - Cannot retrieve with this method. 'asn_date' (None) - Cannot retrieve with this method. 'asn_registry' (str) - The assigned ASN registry 'asn_cidr' (None) - Cannot retrieve with this method. 'asn_country_code' (None) - Cannot retrieve with this method. 'asn_description' (None) - Cannot retrieve with this method. } Raises: ASNRegistryError: The ASN registry is not known. ASNParseError: ASN parsing failed. """ # Set the org_map. Map the orgRef handle to an RIR. org_map = self.org_map.copy() try: org_map.update(extra_org_map) except (TypeError, ValueError, IndexError, KeyError): pass try: asn_data = { 'asn_registry': None, 'asn': None, 'asn_cidr': None, 'asn_country_code': None, 'asn_date': None, 'asn_description': None } try: net_list = response['nets']['net'] if not isinstance(net_list, list): net_list = [net_list] except (KeyError, TypeError): log.debug('No networks found') net_list = [] for n in reversed(net_list): try: asn_data['asn_registry'] = ( org_map[n['orgRef']['@handle'].upper()] ) except KeyError as e: log.debug('Could not parse ASN registry via HTTP: ' '{0}'.format(str(e))) continue break if not asn_data['asn_registry']: log.debug('Could not parse ASN registry via HTTP') raise ASNRegistryError('ASN registry lookup failed.') except ASNRegistryError: raise except Exception as e: # pragma: no cover raise ASNParseError('Parsing failed for "{0}" with exception: {1}.' ''.format(response, e)[:100]) return asn_data
[ "def", "parse_fields_http", "(", "self", ",", "response", ",", "extra_org_map", "=", "None", ")", ":", "# Set the org_map. Map the orgRef handle to an RIR.", "org_map", "=", "self", ".", "org_map", ".", "copy", "(", ")", "try", ":", "org_map", ".", "update", "(", "extra_org_map", ")", "except", "(", "TypeError", ",", "ValueError", ",", "IndexError", ",", "KeyError", ")", ":", "pass", "try", ":", "asn_data", "=", "{", "'asn_registry'", ":", "None", ",", "'asn'", ":", "None", ",", "'asn_cidr'", ":", "None", ",", "'asn_country_code'", ":", "None", ",", "'asn_date'", ":", "None", ",", "'asn_description'", ":", "None", "}", "try", ":", "net_list", "=", "response", "[", "'nets'", "]", "[", "'net'", "]", "if", "not", "isinstance", "(", "net_list", ",", "list", ")", ":", "net_list", "=", "[", "net_list", "]", "except", "(", "KeyError", ",", "TypeError", ")", ":", "log", ".", "debug", "(", "'No networks found'", ")", "net_list", "=", "[", "]", "for", "n", "in", "reversed", "(", "net_list", ")", ":", "try", ":", "asn_data", "[", "'asn_registry'", "]", "=", "(", "org_map", "[", "n", "[", "'orgRef'", "]", "[", "'@handle'", "]", ".", "upper", "(", ")", "]", ")", "except", "KeyError", "as", "e", ":", "log", ".", "debug", "(", "'Could not parse ASN registry via HTTP: '", "'{0}'", ".", "format", "(", "str", "(", "e", ")", ")", ")", "continue", "break", "if", "not", "asn_data", "[", "'asn_registry'", "]", ":", "log", ".", "debug", "(", "'Could not parse ASN registry via HTTP'", ")", "raise", "ASNRegistryError", "(", "'ASN registry lookup failed.'", ")", "except", "ASNRegistryError", ":", "raise", "except", "Exception", "as", "e", ":", "# pragma: no cover", "raise", "ASNParseError", "(", "'Parsing failed for \"{0}\" with exception: {1}.'", "''", ".", "format", "(", "response", ",", "e", ")", "[", ":", "100", "]", ")", "return", "asn_data" ]
The function for parsing ASN fields from a http response. Args: response (:obj:`str`): The response from the ASN http server. extra_org_map (:obj:`dict`): Dictionary mapping org handles to RIRs. This is for limited cases where ARIN REST (ASN fallback HTTP lookup) does not show an RIR as the org handle e.g., DNIC (which is now the built in ORG_MAP) e.g., {'DNIC': 'arin'}. Valid RIR values are (note the case-sensitive - this is meant to match the REST result): 'ARIN', 'RIPE', 'apnic', 'lacnic', 'afrinic'. Defaults to None. Returns: dict: The ASN lookup results :: { 'asn' (None) - Cannot retrieve with this method. 'asn_date' (None) - Cannot retrieve with this method. 'asn_registry' (str) - The assigned ASN registry 'asn_cidr' (None) - Cannot retrieve with this method. 'asn_country_code' (None) - Cannot retrieve with this method. 'asn_description' (None) - Cannot retrieve with this method. } Raises: ASNRegistryError: The ASN registry is not known. ASNParseError: ASN parsing failed.
[ "The", "function", "for", "parsing", "ASN", "fields", "from", "a", "http", "response", "." ]
python
train
30.777778
Unidata/MetPy
metpy/interpolate/grid.py
https://github.com/Unidata/MetPy/blob/16f68a94919b9a82dcf9cada2169cf039129e67b/metpy/interpolate/grid.py#L87-L108
def get_xy_steps(bbox, h_dim): r"""Return meshgrid spacing based on bounding box. bbox: dictionary Dictionary containing coordinates for corners of study area. h_dim: integer Horizontal resolution in meters. Returns ------- x_steps, (X, ) ndarray Number of grids in x dimension. y_steps: (Y, ) ndarray Number of grids in y dimension. """ x_range, y_range = get_xy_range(bbox) x_steps = np.ceil(x_range / h_dim) y_steps = np.ceil(y_range / h_dim) return int(x_steps), int(y_steps)
[ "def", "get_xy_steps", "(", "bbox", ",", "h_dim", ")", ":", "x_range", ",", "y_range", "=", "get_xy_range", "(", "bbox", ")", "x_steps", "=", "np", ".", "ceil", "(", "x_range", "/", "h_dim", ")", "y_steps", "=", "np", ".", "ceil", "(", "y_range", "/", "h_dim", ")", "return", "int", "(", "x_steps", ")", ",", "int", "(", "y_steps", ")" ]
r"""Return meshgrid spacing based on bounding box. bbox: dictionary Dictionary containing coordinates for corners of study area. h_dim: integer Horizontal resolution in meters. Returns ------- x_steps, (X, ) ndarray Number of grids in x dimension. y_steps: (Y, ) ndarray Number of grids in y dimension.
[ "r", "Return", "meshgrid", "spacing", "based", "on", "bounding", "box", "." ]
python
train
24.681818
gesellkammer/sndfileio
sndfileio/sndfileio.py
https://github.com/gesellkammer/sndfileio/blob/8e2b264cadb652f09d2e775f54090c0a3cb2ced2/sndfileio/sndfileio.py#L228-L268
def sndwrite(samples:np.ndarray, sr:int, outfile:str, encoding:str='auto') -> None: """ samples --> Array-like. the actual samples, shape=(nframes, channels) sr --> Sampling-rate outfile --> The name of the outfile. the extension will determine the file-format. The formats supported depend on the available backends Without additional backends, only uncompressed formats are supported (wav, aif) encoding --> one of: - 'auto' or None: the encoding is determined from the format given by the extension of outfile, and from the data - 'pcm16' - 'pcm24' - 'pcm32' - 'flt32' NB: not all file formats support all encodings. Throws a SndfileError if the format does not support the given encoding If set to 'auto', an encoding will be selected based on the file-format and on the data. The bitdepth of the data is measured, and if the file-format supports it, it will be used. For bitdepths of 8, 16 and 24 bits, a PCM encoding will be used. For a bitdepth of 32 bits, a FLOAT encoding will be used, or the next lower supported encoding """ if encoding in ('auto', None): encoding = _guessEncoding(samples, outfile) # normalize in the case where there would be clipping clipping = ((samples > 1).any() or (samples < -1).any()) if encoding.startswith('pcm') and clipping: maxvalue = max(samples.max(), abs(samples.min())) samples = samples / maxvalue backend = _getWriteBackend(outfile, encoding) if not backend: raise SndfileError("No backend found to support the given format") logger.debug(f"sndwrite: using backend {backend.name}") return backend.write(samples, sr, outfile, encoding)
[ "def", "sndwrite", "(", "samples", ":", "np", ".", "ndarray", ",", "sr", ":", "int", ",", "outfile", ":", "str", ",", "encoding", ":", "str", "=", "'auto'", ")", "->", "None", ":", "if", "encoding", "in", "(", "'auto'", ",", "None", ")", ":", "encoding", "=", "_guessEncoding", "(", "samples", ",", "outfile", ")", "# normalize in the case where there would be clipping", "clipping", "=", "(", "(", "samples", ">", "1", ")", ".", "any", "(", ")", "or", "(", "samples", "<", "-", "1", ")", ".", "any", "(", ")", ")", "if", "encoding", ".", "startswith", "(", "'pcm'", ")", "and", "clipping", ":", "maxvalue", "=", "max", "(", "samples", ".", "max", "(", ")", ",", "abs", "(", "samples", ".", "min", "(", ")", ")", ")", "samples", "=", "samples", "/", "maxvalue", "backend", "=", "_getWriteBackend", "(", "outfile", ",", "encoding", ")", "if", "not", "backend", ":", "raise", "SndfileError", "(", "\"No backend found to support the given format\"", ")", "logger", ".", "debug", "(", "f\"sndwrite: using backend {backend.name}\"", ")", "return", "backend", ".", "write", "(", "samples", ",", "sr", ",", "outfile", ",", "encoding", ")" ]
samples --> Array-like. the actual samples, shape=(nframes, channels) sr --> Sampling-rate outfile --> The name of the outfile. the extension will determine the file-format. The formats supported depend on the available backends Without additional backends, only uncompressed formats are supported (wav, aif) encoding --> one of: - 'auto' or None: the encoding is determined from the format given by the extension of outfile, and from the data - 'pcm16' - 'pcm24' - 'pcm32' - 'flt32' NB: not all file formats support all encodings. Throws a SndfileError if the format does not support the given encoding If set to 'auto', an encoding will be selected based on the file-format and on the data. The bitdepth of the data is measured, and if the file-format supports it, it will be used. For bitdepths of 8, 16 and 24 bits, a PCM encoding will be used. For a bitdepth of 32 bits, a FLOAT encoding will be used, or the next lower supported encoding
[ "samples", "--", ">", "Array", "-", "like", ".", "the", "actual", "samples", "shape", "=", "(", "nframes", "channels", ")", "sr", "--", ">", "Sampling", "-", "rate", "outfile", "--", ">", "The", "name", "of", "the", "outfile", ".", "the", "extension", "will", "determine", "the", "file", "-", "format", ".", "The", "formats", "supported", "depend", "on", "the", "available", "backends", "Without", "additional", "backends", "only", "uncompressed", "formats", "are", "supported", "(", "wav", "aif", ")", "encoding", "--", ">", "one", "of", ":", "-", "auto", "or", "None", ":", "the", "encoding", "is", "determined", "from", "the", "format", "given", "by", "the", "extension", "of", "outfile", "and", "from", "the", "data", "-", "pcm16", "-", "pcm24", "-", "pcm32", "-", "flt32" ]
python
train
48.341463
chrisrink10/basilisp
src/basilisp/lang/runtime.py
https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/runtime.py#L759-L767
def cons(o, seq) -> ISeq: """Creates a new sequence where o is the first element and seq is the rest. If seq is None, return a list containing o. If seq is not a ISeq, attempt to coerce it to a ISeq and then cons o onto the resulting sequence.""" if seq is None: return llist.l(o) if isinstance(seq, ISeq): return seq.cons(o) return Maybe(to_seq(seq)).map(lambda s: s.cons(o)).or_else(lambda: llist.l(o))
[ "def", "cons", "(", "o", ",", "seq", ")", "->", "ISeq", ":", "if", "seq", "is", "None", ":", "return", "llist", ".", "l", "(", "o", ")", "if", "isinstance", "(", "seq", ",", "ISeq", ")", ":", "return", "seq", ".", "cons", "(", "o", ")", "return", "Maybe", "(", "to_seq", "(", "seq", ")", ")", ".", "map", "(", "lambda", "s", ":", "s", ".", "cons", "(", "o", ")", ")", ".", "or_else", "(", "lambda", ":", "llist", ".", "l", "(", "o", ")", ")" ]
Creates a new sequence where o is the first element and seq is the rest. If seq is None, return a list containing o. If seq is not a ISeq, attempt to coerce it to a ISeq and then cons o onto the resulting sequence.
[ "Creates", "a", "new", "sequence", "where", "o", "is", "the", "first", "element", "and", "seq", "is", "the", "rest", ".", "If", "seq", "is", "None", "return", "a", "list", "containing", "o", ".", "If", "seq", "is", "not", "a", "ISeq", "attempt", "to", "coerce", "it", "to", "a", "ISeq", "and", "then", "cons", "o", "onto", "the", "resulting", "sequence", "." ]
python
test
48.444444
edwards-lab/libGWAS
libgwas/pheno_covar.py
https://github.com/edwards-lab/libGWAS/blob/d68c9a083d443dfa5d7c5112de29010909cfe23f/libgwas/pheno_covar.py#L94-L102
def freeze_subjects(self): """Converts variable data into numpy arrays. This is required after all subjects have been added via the add_subject function, since we don't know ahead of time who is participating in the analysis due to various filtering possibilities. """ self.phenotype_data = numpy.array(self.phenotype_data) self.covariate_data = numpy.array(self.covariate_data)
[ "def", "freeze_subjects", "(", "self", ")", ":", "self", ".", "phenotype_data", "=", "numpy", ".", "array", "(", "self", ".", "phenotype_data", ")", "self", ".", "covariate_data", "=", "numpy", ".", "array", "(", "self", ".", "covariate_data", ")" ]
Converts variable data into numpy arrays. This is required after all subjects have been added via the add_subject function, since we don't know ahead of time who is participating in the analysis due to various filtering possibilities.
[ "Converts", "variable", "data", "into", "numpy", "arrays", "." ]
python
train
47.444444
google/grr
grr/server/grr_response_server/artifact_registry.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/artifact_registry.py#L40-L52
def AddDir(self, dirpath): """Adds a directory path as a source. Args: dirpath: a string representing a path to the directory. Returns: True if the directory is not an already existing source. """ if dirpath not in self._dirs: self._dirs.add(dirpath) return True return False
[ "def", "AddDir", "(", "self", ",", "dirpath", ")", ":", "if", "dirpath", "not", "in", "self", ".", "_dirs", ":", "self", ".", "_dirs", ".", "add", "(", "dirpath", ")", "return", "True", "return", "False" ]
Adds a directory path as a source. Args: dirpath: a string representing a path to the directory. Returns: True if the directory is not an already existing source.
[ "Adds", "a", "directory", "path", "as", "a", "source", "." ]
python
train
24.076923
albert12132/templar
templar/markdown.py
https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L382-L408
def hash_codeblocks(text, hashes): """Hashes codeblocks (<pre> elements). Codeblocks are strictly defined to be (non-list) lines that are indented at least 4 spaces from the newline. Exactly 4 spaces will be stripped from the beginning of the line -- any leading whitespace after that is preserved. Codeblock lines that are separated only by blank lines will be included in the same codeblock (as will the intermediate newlines). Certain HTML entities (&, <, >, ", ') will always be escaped inside code blocks. Markdown defines code blocks to be <pre><code>, not just <pre>. Certain highlighting packages (like highlight.js) are designed to accomodate (and even look) for this type of conversion. """ def sub(match): block = match.group(1).rstrip('\n') block = re.sub(r'(?:(?<=\n)|(?<=\A)) {4}', '', block) block = escape(block) block = '<pre><code>{}</code></pre>'.format(block) hashed = hash_text(block, 'pre') hashes[hashed] = block return '\n\n' + hashed + '\n\n' return re_codeblock.sub(sub, text)
[ "def", "hash_codeblocks", "(", "text", ",", "hashes", ")", ":", "def", "sub", "(", "match", ")", ":", "block", "=", "match", ".", "group", "(", "1", ")", ".", "rstrip", "(", "'\\n'", ")", "block", "=", "re", ".", "sub", "(", "r'(?:(?<=\\n)|(?<=\\A)) {4}'", ",", "''", ",", "block", ")", "block", "=", "escape", "(", "block", ")", "block", "=", "'<pre><code>{}</code></pre>'", ".", "format", "(", "block", ")", "hashed", "=", "hash_text", "(", "block", ",", "'pre'", ")", "hashes", "[", "hashed", "]", "=", "block", "return", "'\\n\\n'", "+", "hashed", "+", "'\\n\\n'", "return", "re_codeblock", ".", "sub", "(", "sub", ",", "text", ")" ]
Hashes codeblocks (<pre> elements). Codeblocks are strictly defined to be (non-list) lines that are indented at least 4 spaces from the newline. Exactly 4 spaces will be stripped from the beginning of the line -- any leading whitespace after that is preserved. Codeblock lines that are separated only by blank lines will be included in the same codeblock (as will the intermediate newlines). Certain HTML entities (&, <, >, ", ') will always be escaped inside code blocks. Markdown defines code blocks to be <pre><code>, not just <pre>. Certain highlighting packages (like highlight.js) are designed to accomodate (and even look) for this type of conversion.
[ "Hashes", "codeblocks", "(", "<pre", ">", "elements", ")", "." ]
python
train
40.592593
iotile/coretools
iotilecore/iotile/core/hw/transport/adapterstream.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/transport/adapterstream.py#L467-L481
def close(self): """Close this adapter stream. This method may only be called once in the lifetime of an AdapterStream and it will shutdown the underlying device adapter, disconnect all devices and stop all background activity. If this stream is configured to save a record of all RPCs, the RPCs will be logged to a file at this point. """ try: self._loop.run_coroutine(self.adapter.stop()) finally: self._save_recording()
[ "def", "close", "(", "self", ")", ":", "try", ":", "self", ".", "_loop", ".", "run_coroutine", "(", "self", ".", "adapter", ".", "stop", "(", ")", ")", "finally", ":", "self", ".", "_save_recording", "(", ")" ]
Close this adapter stream. This method may only be called once in the lifetime of an AdapterStream and it will shutdown the underlying device adapter, disconnect all devices and stop all background activity. If this stream is configured to save a record of all RPCs, the RPCs will be logged to a file at this point.
[ "Close", "this", "adapter", "stream", "." ]
python
train
33.8
acutesoftware/virtual-AI-simulator
vais/examples/game_incremental.py
https://github.com/acutesoftware/virtual-AI-simulator/blob/57de679a5b1a58c38fefe6aea58af1f3a7e79c58/vais/examples/game_incremental.py#L202-L211
def get_inventory_by_name(nme, character): """ returns the inventory index by name """ for ndx, sk in enumerate(character["inventory"]): #print("sk = ", sk, " , nme = ", nme) if sk["name"] == nme: return ndx return 0
[ "def", "get_inventory_by_name", "(", "nme", ",", "character", ")", ":", "for", "ndx", ",", "sk", "in", "enumerate", "(", "character", "[", "\"inventory\"", "]", ")", ":", "#print(\"sk = \", sk, \" , nme = \", nme)", "if", "sk", "[", "\"name\"", "]", "==", "nme", ":", "return", "ndx", "return", "0" ]
returns the inventory index by name
[ "returns", "the", "inventory", "index", "by", "name" ]
python
train
25.6
portantier/habu
habu/cli/cmd_nc.py
https://github.com/portantier/habu/blob/87091e389dc6332fe1b82830c22b2eefc55816f2/habu/cli/cmd_nc.py#L30-L118
def cmd_nc(host, port, family, ssl_enable, crlf, source_ip, source_port, protocol): """Some kind of netcat/ncat replacement. The execution emulates the feeling of this popular tools. Example: \b $ habu.nc --crlf www.portantier.com 80 Connected to 45.77.113.133 80 HEAD / HTTP/1.0 \b HTTP/1.0 301 Moved Permanently Date: Thu, 26 Jul 2018 21:10:51 GMT Server: OpenBSD httpd Connection: close Content-Type: text/html Content-Length: 443 Location: https://www.portantier.com/ """ resolved = socket.getaddrinfo(host, port) families = { '4' : [ socket.AF_INET ], '6' : [ socket.AF_INET6 ], '46': [ socket.AF_INET, socket.AF_INET6] } address = None for r in resolved: if r[0] in families[family]: address = r # (<AddressFamily.AF_INET6: 10>, <SocketType.SOCK_STREAM: 1>, 6, '', ('2606:2800:220:1:248:1893:25c8:1946', 80, 0, 0)) if not address: print('Could not resolve {} to the ip address family selected ({})'.format(host, family), file=sys.stderr) sys.exit(1) to_send = b'' if not source_ip: source_ip = which_source_for(address[4][0]) if protocol == 'tcp': s = socket.socket(address[0], socket.SOCK_STREAM) else: s = socket.socket(address[0], socket.SOCK_DGRAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind((source_ip, source_port)) if ssl_enable: ssl_context = ssl.SSLContext() s = ssl_context.wrap_socket(s, server_side=False) try: s.connect((address[4][0], port)) print('Connected to', address[4][0], port, file=sys.stderr) except Exception as e: print(e, file=sys.stderr) sys.exit(1) while True: iready, oready, eready = select.select([sys.stdin, s], [], [s]) for i in iready: if i == sys.stdin: if crlf: to_send += i.readline().replace('\n', '\r\n').encode() else: to_send += i.readline().encode() else: received = s.recv(4096) if not received: sys.exit(1) os.write(sys.stdout.fileno(), received) iready, oready, eready = select.select([], [s], [s]) for o in oready: if to_send: o.send(to_send) to_send = b'' s.close()
[ "def", "cmd_nc", "(", "host", ",", "port", ",", "family", ",", "ssl_enable", ",", "crlf", ",", "source_ip", ",", "source_port", ",", "protocol", ")", ":", "resolved", "=", "socket", ".", "getaddrinfo", "(", "host", ",", "port", ")", "families", "=", "{", "'4'", ":", "[", "socket", ".", "AF_INET", "]", ",", "'6'", ":", "[", "socket", ".", "AF_INET6", "]", ",", "'46'", ":", "[", "socket", ".", "AF_INET", ",", "socket", ".", "AF_INET6", "]", "}", "address", "=", "None", "for", "r", "in", "resolved", ":", "if", "r", "[", "0", "]", "in", "families", "[", "family", "]", ":", "address", "=", "r", "# (<AddressFamily.AF_INET6: 10>, <SocketType.SOCK_STREAM: 1>, 6, '', ('2606:2800:220:1:248:1893:25c8:1946', 80, 0, 0))", "if", "not", "address", ":", "print", "(", "'Could not resolve {} to the ip address family selected ({})'", ".", "format", "(", "host", ",", "family", ")", ",", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "exit", "(", "1", ")", "to_send", "=", "b''", "if", "not", "source_ip", ":", "source_ip", "=", "which_source_for", "(", "address", "[", "4", "]", "[", "0", "]", ")", "if", "protocol", "==", "'tcp'", ":", "s", "=", "socket", ".", "socket", "(", "address", "[", "0", "]", ",", "socket", ".", "SOCK_STREAM", ")", "else", ":", "s", "=", "socket", ".", "socket", "(", "address", "[", "0", "]", ",", "socket", ".", "SOCK_DGRAM", ")", "s", ".", "setsockopt", "(", "socket", ".", "SOL_SOCKET", ",", "socket", ".", "SO_REUSEADDR", ",", "1", ")", "s", ".", "bind", "(", "(", "source_ip", ",", "source_port", ")", ")", "if", "ssl_enable", ":", "ssl_context", "=", "ssl", ".", "SSLContext", "(", ")", "s", "=", "ssl_context", ".", "wrap_socket", "(", "s", ",", "server_side", "=", "False", ")", "try", ":", "s", ".", "connect", "(", "(", "address", "[", "4", "]", "[", "0", "]", ",", "port", ")", ")", "print", "(", "'Connected to'", ",", "address", "[", "4", "]", "[", "0", "]", ",", "port", ",", "file", "=", "sys", ".", "stderr", ")", "except", "Exception", "as", "e", ":", "print", "(", "e", ",", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "exit", "(", "1", ")", "while", "True", ":", "iready", ",", "oready", ",", "eready", "=", "select", ".", "select", "(", "[", "sys", ".", "stdin", ",", "s", "]", ",", "[", "]", ",", "[", "s", "]", ")", "for", "i", "in", "iready", ":", "if", "i", "==", "sys", ".", "stdin", ":", "if", "crlf", ":", "to_send", "+=", "i", ".", "readline", "(", ")", ".", "replace", "(", "'\\n'", ",", "'\\r\\n'", ")", ".", "encode", "(", ")", "else", ":", "to_send", "+=", "i", ".", "readline", "(", ")", ".", "encode", "(", ")", "else", ":", "received", "=", "s", ".", "recv", "(", "4096", ")", "if", "not", "received", ":", "sys", ".", "exit", "(", "1", ")", "os", ".", "write", "(", "sys", ".", "stdout", ".", "fileno", "(", ")", ",", "received", ")", "iready", ",", "oready", ",", "eready", "=", "select", ".", "select", "(", "[", "]", ",", "[", "s", "]", ",", "[", "s", "]", ")", "for", "o", "in", "oready", ":", "if", "to_send", ":", "o", ".", "send", "(", "to_send", ")", "to_send", "=", "b''", "s", ".", "close", "(", ")" ]
Some kind of netcat/ncat replacement. The execution emulates the feeling of this popular tools. Example: \b $ habu.nc --crlf www.portantier.com 80 Connected to 45.77.113.133 80 HEAD / HTTP/1.0 \b HTTP/1.0 301 Moved Permanently Date: Thu, 26 Jul 2018 21:10:51 GMT Server: OpenBSD httpd Connection: close Content-Type: text/html Content-Length: 443 Location: https://www.portantier.com/
[ "Some", "kind", "of", "netcat", "/", "ncat", "replacement", "." ]
python
train
26.730337
msmbuilder/msmbuilder
msmbuilder/decomposition/ksparsetica.py
https://github.com/msmbuilder/msmbuilder/blob/556a93a170782f47be53f4a1e9d740fb1c8272b3/msmbuilder/decomposition/ksparsetica.py#L129-L133
def _truncate(self, x, k): ''' given a vector x, leave its top-k absolute-value entries alone, and set the rest to 0 ''' not_F = np.argsort(np.abs(x))[:-k] x[not_F] = 0 return x
[ "def", "_truncate", "(", "self", ",", "x", ",", "k", ")", ":", "not_F", "=", "np", ".", "argsort", "(", "np", ".", "abs", "(", "x", ")", ")", "[", ":", "-", "k", "]", "x", "[", "not_F", "]", "=", "0", "return", "x" ]
given a vector x, leave its top-k absolute-value entries alone, and set the rest to 0
[ "given", "a", "vector", "x", "leave", "its", "top", "-", "k", "absolute", "-", "value", "entries", "alone", "and", "set", "the", "rest", "to", "0" ]
python
train
41
crocs-muni/roca
roca/detect.py
https://github.com/crocs-muni/roca/blob/74ad6ce63c428d83dcffce9c5e26ef7b9e30faa5/roca/detect.py#L343-L351
def strip_spaces(x): """ Strips spaces :param x: :return: """ x = x.replace(b' ', b'') x = x.replace(b'\t', b'') return x
[ "def", "strip_spaces", "(", "x", ")", ":", "x", "=", "x", ".", "replace", "(", "b' '", ",", "b''", ")", "x", "=", "x", ".", "replace", "(", "b'\\t'", ",", "b''", ")", "return", "x" ]
Strips spaces :param x: :return:
[ "Strips", "spaces", ":", "param", "x", ":", ":", "return", ":" ]
python
train
16.111111
gc3-uzh-ch/elasticluster
elasticluster/cluster.py
https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/cluster.py#L1321-L1338
def stop(self, wait=False): """ Terminate the VM instance launched on the cloud for this specific node. """ if self.instance_id is not None: log.info("Shutting down node `%s` (VM instance `%s`) ...", self.name, self.instance_id) self._cloud_provider.stop_instance(self.instance_id) if wait: while self.is_alive(): time.sleep(1) # When an instance is terminated, the EC2 cloud provider will # basically return it as "running" state. Setting the # `instance_id` attribute to None will force `is_alive()` # method not to check with the cloud provider, and forever # forgetting about the instance id. self.instance_id = None
[ "def", "stop", "(", "self", ",", "wait", "=", "False", ")", ":", "if", "self", ".", "instance_id", "is", "not", "None", ":", "log", ".", "info", "(", "\"Shutting down node `%s` (VM instance `%s`) ...\"", ",", "self", ".", "name", ",", "self", ".", "instance_id", ")", "self", ".", "_cloud_provider", ".", "stop_instance", "(", "self", ".", "instance_id", ")", "if", "wait", ":", "while", "self", ".", "is_alive", "(", ")", ":", "time", ".", "sleep", "(", "1", ")", "# When an instance is terminated, the EC2 cloud provider will", "# basically return it as \"running\" state. Setting the", "# `instance_id` attribute to None will force `is_alive()`", "# method not to check with the cloud provider, and forever", "# forgetting about the instance id.", "self", ".", "instance_id", "=", "None" ]
Terminate the VM instance launched on the cloud for this specific node.
[ "Terminate", "the", "VM", "instance", "launched", "on", "the", "cloud", "for", "this", "specific", "node", "." ]
python
train
44.5
molmod/molmod
molmod/molecules.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/molecules.py#L191-L202
def chemical_formula(self): """the chemical formula of the molecule""" counts = {} for number in self.numbers: counts[number] = counts.get(number, 0)+1 items = [] for number, count in sorted(counts.items(), reverse=True): if count == 1: items.append(periodic[number].symbol) else: items.append("%s%i" % (periodic[number].symbol, count)) return "".join(items)
[ "def", "chemical_formula", "(", "self", ")", ":", "counts", "=", "{", "}", "for", "number", "in", "self", ".", "numbers", ":", "counts", "[", "number", "]", "=", "counts", ".", "get", "(", "number", ",", "0", ")", "+", "1", "items", "=", "[", "]", "for", "number", ",", "count", "in", "sorted", "(", "counts", ".", "items", "(", ")", ",", "reverse", "=", "True", ")", ":", "if", "count", "==", "1", ":", "items", ".", "append", "(", "periodic", "[", "number", "]", ".", "symbol", ")", "else", ":", "items", ".", "append", "(", "\"%s%i\"", "%", "(", "periodic", "[", "number", "]", ".", "symbol", ",", "count", ")", ")", "return", "\"\"", ".", "join", "(", "items", ")" ]
the chemical formula of the molecule
[ "the", "chemical", "formula", "of", "the", "molecule" ]
python
train
38.583333
facelessuser/pyspelling
pyspelling/filters/stylesheets.py
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/stylesheets.py#L62-L71
def setup(self): """Setup.""" self.blocks = self.config['block_comments'] self.lines = self.config['line_comments'] self.group_comments = self.config['group_comments'] # If the style isn't found, just go with CSS, then use the appropriate prefix. self.stylesheets = STYLESHEET_TYPE.get(self.config['stylesheets'].lower(), CSS) self.prefix = [k for k, v in STYLESHEET_TYPE.items() if v == SASS][0] self.pattern = RE_CSS if self.stylesheets == CSS else RE_SCSS
[ "def", "setup", "(", "self", ")", ":", "self", ".", "blocks", "=", "self", ".", "config", "[", "'block_comments'", "]", "self", ".", "lines", "=", "self", ".", "config", "[", "'line_comments'", "]", "self", ".", "group_comments", "=", "self", ".", "config", "[", "'group_comments'", "]", "# If the style isn't found, just go with CSS, then use the appropriate prefix.", "self", ".", "stylesheets", "=", "STYLESHEET_TYPE", ".", "get", "(", "self", ".", "config", "[", "'stylesheets'", "]", ".", "lower", "(", ")", ",", "CSS", ")", "self", ".", "prefix", "=", "[", "k", "for", "k", ",", "v", "in", "STYLESHEET_TYPE", ".", "items", "(", ")", "if", "v", "==", "SASS", "]", "[", "0", "]", "self", ".", "pattern", "=", "RE_CSS", "if", "self", ".", "stylesheets", "==", "CSS", "else", "RE_SCSS" ]
Setup.
[ "Setup", "." ]
python
train
51.4
Opentrons/opentrons
api/src/opentrons/hardware_control/modules/update.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/hardware_control/modules/update.py#L44-L88
async def update_firmware(port: str, firmware_file_path: str, loop: Optional[asyncio.AbstractEventLoop])\ -> Tuple[str, Tuple[bool, str]]: """ Run avrdude firmware upload command. Switch back to normal module port Note: For modules with old bootloader, the kernel could assign the module a new port after the update (since the board is automatically reset). Scan for such a port change and use the appropriate port. Returns a tuple of the new port to communicate on (or None if it was not found) and a tuple of success and message from avrdude. """ ports_before_update = await _discover_ports() config_file_path = os.path.join(package_root, 'config', 'modules', 'avrdude.conf') kwargs: Dict[str, Any] = { 'stdout': asyncio.subprocess.PIPE, 'stderr': asyncio.subprocess.PIPE } if loop: kwargs['loop'] = loop proc = await asyncio.create_subprocess_exec( 'avrdude', '-C{}'.format(config_file_path), '-v', '-p{}'.format(PART_NO), '-c{}'.format(PROGRAMMER_ID), '-P{}'.format(port), '-b{}'.format(BAUDRATE), '-D', '-Uflash:w:{}:i'.format(firmware_file_path), **kwargs) await proc.wait() _result = await proc.communicate() result = _result[1].decode() avrdude_res = _format_avrdude_response(result) if avrdude_res[0]: log.debug(result) else: log.error("Failed to update module firmware for {}: {}" .format(port, avrdude_res[1])) new_port = await _port_on_mode_switch(ports_before_update) log.info("New port: {}".format(new_port)) return new_port, avrdude_res
[ "async", "def", "update_firmware", "(", "port", ":", "str", ",", "firmware_file_path", ":", "str", ",", "loop", ":", "Optional", "[", "asyncio", ".", "AbstractEventLoop", "]", ")", "->", "Tuple", "[", "str", ",", "Tuple", "[", "bool", ",", "str", "]", "]", ":", "ports_before_update", "=", "await", "_discover_ports", "(", ")", "config_file_path", "=", "os", ".", "path", ".", "join", "(", "package_root", ",", "'config'", ",", "'modules'", ",", "'avrdude.conf'", ")", "kwargs", ":", "Dict", "[", "str", ",", "Any", "]", "=", "{", "'stdout'", ":", "asyncio", ".", "subprocess", ".", "PIPE", ",", "'stderr'", ":", "asyncio", ".", "subprocess", ".", "PIPE", "}", "if", "loop", ":", "kwargs", "[", "'loop'", "]", "=", "loop", "proc", "=", "await", "asyncio", ".", "create_subprocess_exec", "(", "'avrdude'", ",", "'-C{}'", ".", "format", "(", "config_file_path", ")", ",", "'-v'", ",", "'-p{}'", ".", "format", "(", "PART_NO", ")", ",", "'-c{}'", ".", "format", "(", "PROGRAMMER_ID", ")", ",", "'-P{}'", ".", "format", "(", "port", ")", ",", "'-b{}'", ".", "format", "(", "BAUDRATE", ")", ",", "'-D'", ",", "'-Uflash:w:{}:i'", ".", "format", "(", "firmware_file_path", ")", ",", "*", "*", "kwargs", ")", "await", "proc", ".", "wait", "(", ")", "_result", "=", "await", "proc", ".", "communicate", "(", ")", "result", "=", "_result", "[", "1", "]", ".", "decode", "(", ")", "avrdude_res", "=", "_format_avrdude_response", "(", "result", ")", "if", "avrdude_res", "[", "0", "]", ":", "log", ".", "debug", "(", "result", ")", "else", ":", "log", ".", "error", "(", "\"Failed to update module firmware for {}: {}\"", ".", "format", "(", "port", ",", "avrdude_res", "[", "1", "]", ")", ")", "new_port", "=", "await", "_port_on_mode_switch", "(", "ports_before_update", ")", "log", ".", "info", "(", "\"New port: {}\"", ".", "format", "(", "new_port", ")", ")", "return", "new_port", ",", "avrdude_res" ]
Run avrdude firmware upload command. Switch back to normal module port Note: For modules with old bootloader, the kernel could assign the module a new port after the update (since the board is automatically reset). Scan for such a port change and use the appropriate port. Returns a tuple of the new port to communicate on (or None if it was not found) and a tuple of success and message from avrdude.
[ "Run", "avrdude", "firmware", "upload", "command", ".", "Switch", "back", "to", "normal", "module", "port" ]
python
train
38.466667
tanghaibao/jcvi
jcvi/projects/ies.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/ies.py#L244-L379
def deletion(args): """ %prog deletion [mac.mic.bam|mac.mic.bed] mic.gaps.bed Find IES based on mapping MAC reads to MIC genome. """ p = OptionParser(deletion.__doc__) p.add_option("--mindepth", default=3, type="int", help="Minimum depth to call a deletion") p.add_option("--minspan", default=30, type="int", help="Minimum span to call a deletion") p.add_option("--split", default=False, action="store_true", help="Break at cigar N into separate parts") p.set_tmpdir() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) bedfile, gapsbedfile = args if bedfile.endswith(".bam"): bamfile = bedfile bedfile = bamfile.replace(".sorted.", ".").replace(".bam", ".bed") if need_update(bamfile, bedfile): cmd = "bamToBed -i {0}".format(bamfile) if opts.split: cmd += " -split" cmd += " | cut -f1-4" sh(cmd, outfile=bedfile) sort_tmpdir = "--tmpdir={0}".format(opts.tmpdir) if bedfile.endswith(".sorted.bed"): pf = bedfile.rsplit(".", 2)[0] sortedbedfile = bedfile else: pf = bedfile.rsplit(".", 1)[0] sortedbedfile = pf + ".sorted.bed" if need_update(bedfile, sortedbedfile): sort([bedfile, "-u", "--accn", sort_tmpdir]) # Find reads that contain multiple matches ibedfile = pf + ".d.bed" if need_update(sortedbedfile, ibedfile): bed = Bed(sortedbedfile, sorted=False) fw = open(ibedfile, "w") logging.debug("Write deletions to `{0}`.".format(ibedfile)) for accn, bb in groupby(bed, key=lambda x: x.accn): bb = list(bb) branges = [(x.seqid, x.start, x.end) for x in bb] iranges = range_interleave(branges) for seqid, start, end in iranges: if end - start + 1 < opts.minspan: continue print("\t".join(str(x) for x in \ (seqid, start - 1, end, accn + '-d')), file=fw) fw.close() # Uniqify the insertions and count occurrences countbedfile = pf + ".uniq.bed" if need_update(ibedfile, countbedfile): bed = Bed(ibedfile) fw = open(countbedfile, "w") logging.debug("Write counts to `{0}`.".format(countbedfile)) registry = Counter((x.seqid, x.start, x.end) for x in bed) ies_id = 1 for (seqid, start, end), count in registry.items(): ies_name = "{0:05d}-r{1}".format(ies_id, count) if count < opts.mindepth: continue print("\t".join(str(x) for x in \ (seqid, start - 1, end, ies_name)), file=fw) ies_id += 1 fw.close() sort([countbedfile, "-i", sort_tmpdir]) # Remove deletions that contain some read depth depthbedfile = pf + ".depth.bed" if need_update((sortedbedfile, countbedfile), depthbedfile): depth([sortedbedfile, countbedfile, "--outfile={0}".format(depthbedfile)]) validbedfile = pf + ".valid.bed" if need_update(depthbedfile, validbedfile): fw = open(validbedfile, "w") logging.debug("Filter valid deletions to `{0}`.".format(validbedfile)) bed = Bed(depthbedfile) all_scores = [float(b.score) for b in bed] lb, ub = outlier_cutoff(all_scores) logging.debug("Bounds for depths: LB={0:.2f} (ignored) UB={1:.2f}".format(lb, ub)) for b in bed: if float(b.score) > ub: continue print(b, file=fw) fw.close() # Remove deletions that contain sequencing gaps on its flanks selectedbedfile = pf + ".selected.bed" if need_update(validbedfile, selectedbedfile): flanksbedfile = pf + ".flanks.bed" fw = open(flanksbedfile, "w") bed = Bed(validbedfile) flank = 100 logging.debug("Write deletion flanks to `{0}`.".format(flanksbedfile)) for b in bed: start, end = b.start, b.end b.start, b.end = start, min(start + flank - 1, end) print(b, file=fw) b.start, b.end = max(start, end - flank + 1), end print(b, file=fw) fw.close() intersectidsfile = pf + ".intersect.ids" cmd = "intersectBed -a {0} -b {1}".format(flanksbedfile, gapsbedfile) cmd += " | cut -f4 | sort -u" sh(cmd, outfile=intersectidsfile) some([validbedfile, intersectidsfile, "-v", "--outfile={0}".format(selectedbedfile)]) # Find best-scoring non-overlapping set iesbedfile = pf + ".ies.bed" if need_update(selectedbedfile, iesbedfile): bed = Bed(selectedbedfile) fw = open(iesbedfile, "w") logging.debug("Write IES to `{0}`.".format(iesbedfile)) branges = [Range(x.seqid, x.start, x.end, int(x.accn.rsplit("r")[-1]), i) \ for i, x in enumerate(bed)] iranges, iscore = range_chain(branges) logging.debug("Best chain score: {0} ({1} IES)".\ format(iscore, len(iranges))) ies_id = 1 for seqid, start, end, score, id in iranges: ies_name = "IES-{0:05d}-r{1}".format(ies_id, score) span = end - start + 1 print("\t".join(str(x) for x in \ (seqid, start - 1, end, ies_name, span)), file=fw) ies_id += 1 fw.close()
[ "def", "deletion", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "deletion", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--mindepth\"", ",", "default", "=", "3", ",", "type", "=", "\"int\"", ",", "help", "=", "\"Minimum depth to call a deletion\"", ")", "p", ".", "add_option", "(", "\"--minspan\"", ",", "default", "=", "30", ",", "type", "=", "\"int\"", ",", "help", "=", "\"Minimum span to call a deletion\"", ")", "p", ".", "add_option", "(", "\"--split\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Break at cigar N into separate parts\"", ")", "p", ".", "set_tmpdir", "(", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "bedfile", ",", "gapsbedfile", "=", "args", "if", "bedfile", ".", "endswith", "(", "\".bam\"", ")", ":", "bamfile", "=", "bedfile", "bedfile", "=", "bamfile", ".", "replace", "(", "\".sorted.\"", ",", "\".\"", ")", ".", "replace", "(", "\".bam\"", ",", "\".bed\"", ")", "if", "need_update", "(", "bamfile", ",", "bedfile", ")", ":", "cmd", "=", "\"bamToBed -i {0}\"", ".", "format", "(", "bamfile", ")", "if", "opts", ".", "split", ":", "cmd", "+=", "\" -split\"", "cmd", "+=", "\" | cut -f1-4\"", "sh", "(", "cmd", ",", "outfile", "=", "bedfile", ")", "sort_tmpdir", "=", "\"--tmpdir={0}\"", ".", "format", "(", "opts", ".", "tmpdir", ")", "if", "bedfile", ".", "endswith", "(", "\".sorted.bed\"", ")", ":", "pf", "=", "bedfile", ".", "rsplit", "(", "\".\"", ",", "2", ")", "[", "0", "]", "sortedbedfile", "=", "bedfile", "else", ":", "pf", "=", "bedfile", ".", "rsplit", "(", "\".\"", ",", "1", ")", "[", "0", "]", "sortedbedfile", "=", "pf", "+", "\".sorted.bed\"", "if", "need_update", "(", "bedfile", ",", "sortedbedfile", ")", ":", "sort", "(", "[", "bedfile", ",", "\"-u\"", ",", "\"--accn\"", ",", "sort_tmpdir", "]", ")", "# Find reads that contain multiple matches", "ibedfile", "=", "pf", "+", "\".d.bed\"", "if", "need_update", "(", "sortedbedfile", ",", "ibedfile", ")", ":", "bed", "=", "Bed", "(", "sortedbedfile", ",", "sorted", "=", "False", ")", "fw", "=", "open", "(", "ibedfile", ",", "\"w\"", ")", "logging", ".", "debug", "(", "\"Write deletions to `{0}`.\"", ".", "format", "(", "ibedfile", ")", ")", "for", "accn", ",", "bb", "in", "groupby", "(", "bed", ",", "key", "=", "lambda", "x", ":", "x", ".", "accn", ")", ":", "bb", "=", "list", "(", "bb", ")", "branges", "=", "[", "(", "x", ".", "seqid", ",", "x", ".", "start", ",", "x", ".", "end", ")", "for", "x", "in", "bb", "]", "iranges", "=", "range_interleave", "(", "branges", ")", "for", "seqid", ",", "start", ",", "end", "in", "iranges", ":", "if", "end", "-", "start", "+", "1", "<", "opts", ".", "minspan", ":", "continue", "print", "(", "\"\\t\"", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "(", "seqid", ",", "start", "-", "1", ",", "end", ",", "accn", "+", "'-d'", ")", ")", ",", "file", "=", "fw", ")", "fw", ".", "close", "(", ")", "# Uniqify the insertions and count occurrences", "countbedfile", "=", "pf", "+", "\".uniq.bed\"", "if", "need_update", "(", "ibedfile", ",", "countbedfile", ")", ":", "bed", "=", "Bed", "(", "ibedfile", ")", "fw", "=", "open", "(", "countbedfile", ",", "\"w\"", ")", "logging", ".", "debug", "(", "\"Write counts to `{0}`.\"", ".", "format", "(", "countbedfile", ")", ")", "registry", "=", "Counter", "(", "(", "x", ".", "seqid", ",", "x", ".", "start", ",", "x", ".", "end", ")", "for", "x", "in", "bed", ")", "ies_id", "=", "1", "for", "(", "seqid", ",", "start", ",", "end", ")", ",", "count", "in", "registry", ".", "items", "(", ")", ":", "ies_name", "=", "\"{0:05d}-r{1}\"", ".", "format", "(", "ies_id", ",", "count", ")", "if", "count", "<", "opts", ".", "mindepth", ":", "continue", "print", "(", "\"\\t\"", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "(", "seqid", ",", "start", "-", "1", ",", "end", ",", "ies_name", ")", ")", ",", "file", "=", "fw", ")", "ies_id", "+=", "1", "fw", ".", "close", "(", ")", "sort", "(", "[", "countbedfile", ",", "\"-i\"", ",", "sort_tmpdir", "]", ")", "# Remove deletions that contain some read depth", "depthbedfile", "=", "pf", "+", "\".depth.bed\"", "if", "need_update", "(", "(", "sortedbedfile", ",", "countbedfile", ")", ",", "depthbedfile", ")", ":", "depth", "(", "[", "sortedbedfile", ",", "countbedfile", ",", "\"--outfile={0}\"", ".", "format", "(", "depthbedfile", ")", "]", ")", "validbedfile", "=", "pf", "+", "\".valid.bed\"", "if", "need_update", "(", "depthbedfile", ",", "validbedfile", ")", ":", "fw", "=", "open", "(", "validbedfile", ",", "\"w\"", ")", "logging", ".", "debug", "(", "\"Filter valid deletions to `{0}`.\"", ".", "format", "(", "validbedfile", ")", ")", "bed", "=", "Bed", "(", "depthbedfile", ")", "all_scores", "=", "[", "float", "(", "b", ".", "score", ")", "for", "b", "in", "bed", "]", "lb", ",", "ub", "=", "outlier_cutoff", "(", "all_scores", ")", "logging", ".", "debug", "(", "\"Bounds for depths: LB={0:.2f} (ignored) UB={1:.2f}\"", ".", "format", "(", "lb", ",", "ub", ")", ")", "for", "b", "in", "bed", ":", "if", "float", "(", "b", ".", "score", ")", ">", "ub", ":", "continue", "print", "(", "b", ",", "file", "=", "fw", ")", "fw", ".", "close", "(", ")", "# Remove deletions that contain sequencing gaps on its flanks", "selectedbedfile", "=", "pf", "+", "\".selected.bed\"", "if", "need_update", "(", "validbedfile", ",", "selectedbedfile", ")", ":", "flanksbedfile", "=", "pf", "+", "\".flanks.bed\"", "fw", "=", "open", "(", "flanksbedfile", ",", "\"w\"", ")", "bed", "=", "Bed", "(", "validbedfile", ")", "flank", "=", "100", "logging", ".", "debug", "(", "\"Write deletion flanks to `{0}`.\"", ".", "format", "(", "flanksbedfile", ")", ")", "for", "b", "in", "bed", ":", "start", ",", "end", "=", "b", ".", "start", ",", "b", ".", "end", "b", ".", "start", ",", "b", ".", "end", "=", "start", ",", "min", "(", "start", "+", "flank", "-", "1", ",", "end", ")", "print", "(", "b", ",", "file", "=", "fw", ")", "b", ".", "start", ",", "b", ".", "end", "=", "max", "(", "start", ",", "end", "-", "flank", "+", "1", ")", ",", "end", "print", "(", "b", ",", "file", "=", "fw", ")", "fw", ".", "close", "(", ")", "intersectidsfile", "=", "pf", "+", "\".intersect.ids\"", "cmd", "=", "\"intersectBed -a {0} -b {1}\"", ".", "format", "(", "flanksbedfile", ",", "gapsbedfile", ")", "cmd", "+=", "\" | cut -f4 | sort -u\"", "sh", "(", "cmd", ",", "outfile", "=", "intersectidsfile", ")", "some", "(", "[", "validbedfile", ",", "intersectidsfile", ",", "\"-v\"", ",", "\"--outfile={0}\"", ".", "format", "(", "selectedbedfile", ")", "]", ")", "# Find best-scoring non-overlapping set", "iesbedfile", "=", "pf", "+", "\".ies.bed\"", "if", "need_update", "(", "selectedbedfile", ",", "iesbedfile", ")", ":", "bed", "=", "Bed", "(", "selectedbedfile", ")", "fw", "=", "open", "(", "iesbedfile", ",", "\"w\"", ")", "logging", ".", "debug", "(", "\"Write IES to `{0}`.\"", ".", "format", "(", "iesbedfile", ")", ")", "branges", "=", "[", "Range", "(", "x", ".", "seqid", ",", "x", ".", "start", ",", "x", ".", "end", ",", "int", "(", "x", ".", "accn", ".", "rsplit", "(", "\"r\"", ")", "[", "-", "1", "]", ")", ",", "i", ")", "for", "i", ",", "x", "in", "enumerate", "(", "bed", ")", "]", "iranges", ",", "iscore", "=", "range_chain", "(", "branges", ")", "logging", ".", "debug", "(", "\"Best chain score: {0} ({1} IES)\"", ".", "format", "(", "iscore", ",", "len", "(", "iranges", ")", ")", ")", "ies_id", "=", "1", "for", "seqid", ",", "start", ",", "end", ",", "score", ",", "id", "in", "iranges", ":", "ies_name", "=", "\"IES-{0:05d}-r{1}\"", ".", "format", "(", "ies_id", ",", "score", ")", "span", "=", "end", "-", "start", "+", "1", "print", "(", "\"\\t\"", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "(", "seqid", ",", "start", "-", "1", ",", "end", ",", "ies_name", ",", "span", ")", ")", ",", "file", "=", "fw", ")", "ies_id", "+=", "1", "fw", ".", "close", "(", ")" ]
%prog deletion [mac.mic.bam|mac.mic.bed] mic.gaps.bed Find IES based on mapping MAC reads to MIC genome.
[ "%prog", "deletion", "[", "mac", ".", "mic", ".", "bam|mac", ".", "mic", ".", "bed", "]", "mic", ".", "gaps", ".", "bed" ]
python
train
39.691176
pyviz/holoviews
holoviews/plotting/bokeh/heatmap.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/bokeh/heatmap.py#L400-L420
def _get_ann_labels_data(self, order_ann, bins_ann): """Generate ColumnDataSource dictionary for annular labels. """ if self.yticks is None: return dict(x=[], y=[], text=[], angle=[]) mapping = self._compute_tick_mapping("radius", order_ann, bins_ann) values = [(label, radius[0]) for label, radius in mapping.items()] labels, radius = zip(*values) radius = np.array(radius) y_coord = np.sin(np.deg2rad(self.yrotation)) * radius + self.max_radius x_coord = np.cos(np.deg2rad(self.yrotation)) * radius + self.max_radius return dict(x=x_coord, y=y_coord, text=labels, angle=[0]*len(labels))
[ "def", "_get_ann_labels_data", "(", "self", ",", "order_ann", ",", "bins_ann", ")", ":", "if", "self", ".", "yticks", "is", "None", ":", "return", "dict", "(", "x", "=", "[", "]", ",", "y", "=", "[", "]", ",", "text", "=", "[", "]", ",", "angle", "=", "[", "]", ")", "mapping", "=", "self", ".", "_compute_tick_mapping", "(", "\"radius\"", ",", "order_ann", ",", "bins_ann", ")", "values", "=", "[", "(", "label", ",", "radius", "[", "0", "]", ")", "for", "label", ",", "radius", "in", "mapping", ".", "items", "(", ")", "]", "labels", ",", "radius", "=", "zip", "(", "*", "values", ")", "radius", "=", "np", ".", "array", "(", "radius", ")", "y_coord", "=", "np", ".", "sin", "(", "np", ".", "deg2rad", "(", "self", ".", "yrotation", ")", ")", "*", "radius", "+", "self", ".", "max_radius", "x_coord", "=", "np", ".", "cos", "(", "np", ".", "deg2rad", "(", "self", ".", "yrotation", ")", ")", "*", "radius", "+", "self", ".", "max_radius", "return", "dict", "(", "x", "=", "x_coord", ",", "y", "=", "y_coord", ",", "text", "=", "labels", ",", "angle", "=", "[", "0", "]", "*", "len", "(", "labels", ")", ")" ]
Generate ColumnDataSource dictionary for annular labels.
[ "Generate", "ColumnDataSource", "dictionary", "for", "annular", "labels", "." ]
python
train
34.571429
sepandhaghighi/art
art/art.py
https://github.com/sepandhaghighi/art/blob/c5b0409de76464b0714c377f8fca17716f3a9482/art/art.py#L181-L194
def tprint(text, font=DEFAULT_FONT, chr_ignore=True): r""" Print art text (support \n). :param text: input text :type text:str :param font: input font :type font:str :param chr_ignore: ignore not supported character :type chr_ignore:bool :return: None """ result = text2art(text, font=font, chr_ignore=chr_ignore) print(result)
[ "def", "tprint", "(", "text", ",", "font", "=", "DEFAULT_FONT", ",", "chr_ignore", "=", "True", ")", ":", "result", "=", "text2art", "(", "text", ",", "font", "=", "font", ",", "chr_ignore", "=", "chr_ignore", ")", "print", "(", "result", ")" ]
r""" Print art text (support \n). :param text: input text :type text:str :param font: input font :type font:str :param chr_ignore: ignore not supported character :type chr_ignore:bool :return: None
[ "r", "Print", "art", "text", "(", "support", "\\", "n", ")", "." ]
python
train
25.928571
happyleavesaoc/python-snapcast
snapcast/control/protocol.py
https://github.com/happyleavesaoc/python-snapcast/blob/9b3c483358677327c7fd6d0666bf474c19d87f19/snapcast/control/protocol.py#L71-L79
def request(self, method, params): """Send a JSONRPC request.""" identifier = random.randint(1, 1000) self._transport.write(jsonrpc_request(method, identifier, params)) self._buffer[identifier] = {'flag': asyncio.Event()} yield from self._buffer[identifier]['flag'].wait() result = self._buffer[identifier]['data'] del self._buffer[identifier]['data'] return result
[ "def", "request", "(", "self", ",", "method", ",", "params", ")", ":", "identifier", "=", "random", ".", "randint", "(", "1", ",", "1000", ")", "self", ".", "_transport", ".", "write", "(", "jsonrpc_request", "(", "method", ",", "identifier", ",", "params", ")", ")", "self", ".", "_buffer", "[", "identifier", "]", "=", "{", "'flag'", ":", "asyncio", ".", "Event", "(", ")", "}", "yield", "from", "self", ".", "_buffer", "[", "identifier", "]", "[", "'flag'", "]", ".", "wait", "(", ")", "result", "=", "self", ".", "_buffer", "[", "identifier", "]", "[", "'data'", "]", "del", "self", ".", "_buffer", "[", "identifier", "]", "[", "'data'", "]", "return", "result" ]
Send a JSONRPC request.
[ "Send", "a", "JSONRPC", "request", "." ]
python
train
46.777778
secure-systems-lab/securesystemslib
securesystemslib/interface.py
https://github.com/secure-systems-lab/securesystemslib/blob/beb3109d5bb462e5a60eed88fb40ed1167bd354e/securesystemslib/interface.py#L823-L900
def import_ecdsa_privatekey_from_file(filepath, password=None): """ <Purpose> Import the encrypted ECDSA key file in 'filepath', decrypt it, and return the key object in 'securesystemslib.formats.ECDSAKEY_SCHEMA' format. The 'cryptography' library is currently supported and performs the actual cryptographic routine. <Arguments> filepath: <filepath> file, an ECDSA encrypted key file. password: The password, or passphrase, to import the private key (i.e., the encrypted key file 'filepath' must be decrypted before the ECDSA key object can be returned. <Exceptions> securesystemslib.exceptions.FormatError, if the arguments are improperly formatted or the imported key object contains an invalid key type (i.e., not 'ecdsa-sha2-nistp256'). securesystemslib.exceptions.CryptoError, if 'filepath' cannot be decrypted. <Side Effects> 'password' is used to decrypt the 'filepath' key file. <Returns> An ECDSA key object of the form: 'securesystemslib.formats.ECDSAKEY_SCHEMA'. """ # Does 'filepath' have the correct format? # Ensure the arguments have the appropriate number of objects and object # types, and that all dict keys are properly named. # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. securesystemslib.formats.PATH_SCHEMA.check_match(filepath) # If the caller does not provide a password argument, prompt for one. # Password confirmation disabled here, which should ideally happen only # when creating encrypted key files (i.e., improve usability). if password is None: # pragma: no cover # It is safe to specify the full path of 'filepath' in the prompt and not # worry about leaking sensitive information about the key's location. # However, care should be taken when including the full path in exceptions # and log files. password = get_password('Enter a password for the encrypted ECDSA' ' key (' + Fore.RED + filepath + Fore.RESET + '): ', confirm=False) # Does 'password' have the correct format? securesystemslib.formats.PASSWORD_SCHEMA.check_match(password) # Store the encrypted contents of 'filepath' prior to calling the decryption # routine. encrypted_key = None with open(filepath, 'rb') as file_object: encrypted_key = file_object.read() # Decrypt the loaded key file, calling the 'cryptography' library to generate # the derived encryption key from 'password'. Raise # 'securesystemslib.exceptions.CryptoError' if the decryption fails. key_object = securesystemslib.keys.decrypt_key(encrypted_key.decode('utf-8'), password) # Raise an exception if an unexpected key type is imported. if key_object['keytype'] != 'ecdsa-sha2-nistp256': message = 'Invalid key type loaded: ' + repr(key_object['keytype']) raise securesystemslib.exceptions.FormatError(message) # Add "keyid_hash_algorithms" so that equal ecdsa keys with different keyids # can be associated using supported keyid_hash_algorithms. key_object['keyid_hash_algorithms'] = \ securesystemslib.settings.HASH_ALGORITHMS return key_object
[ "def", "import_ecdsa_privatekey_from_file", "(", "filepath", ",", "password", "=", "None", ")", ":", "# Does 'filepath' have the correct format?", "# Ensure the arguments have the appropriate number of objects and object", "# types, and that all dict keys are properly named.", "# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.", "securesystemslib", ".", "formats", ".", "PATH_SCHEMA", ".", "check_match", "(", "filepath", ")", "# If the caller does not provide a password argument, prompt for one.", "# Password confirmation disabled here, which should ideally happen only", "# when creating encrypted key files (i.e., improve usability).", "if", "password", "is", "None", ":", "# pragma: no cover", "# It is safe to specify the full path of 'filepath' in the prompt and not", "# worry about leaking sensitive information about the key's location.", "# However, care should be taken when including the full path in exceptions", "# and log files.", "password", "=", "get_password", "(", "'Enter a password for the encrypted ECDSA'", "' key ('", "+", "Fore", ".", "RED", "+", "filepath", "+", "Fore", ".", "RESET", "+", "'): '", ",", "confirm", "=", "False", ")", "# Does 'password' have the correct format?", "securesystemslib", ".", "formats", ".", "PASSWORD_SCHEMA", ".", "check_match", "(", "password", ")", "# Store the encrypted contents of 'filepath' prior to calling the decryption", "# routine.", "encrypted_key", "=", "None", "with", "open", "(", "filepath", ",", "'rb'", ")", "as", "file_object", ":", "encrypted_key", "=", "file_object", ".", "read", "(", ")", "# Decrypt the loaded key file, calling the 'cryptography' library to generate", "# the derived encryption key from 'password'. Raise", "# 'securesystemslib.exceptions.CryptoError' if the decryption fails.", "key_object", "=", "securesystemslib", ".", "keys", ".", "decrypt_key", "(", "encrypted_key", ".", "decode", "(", "'utf-8'", ")", ",", "password", ")", "# Raise an exception if an unexpected key type is imported.", "if", "key_object", "[", "'keytype'", "]", "!=", "'ecdsa-sha2-nistp256'", ":", "message", "=", "'Invalid key type loaded: '", "+", "repr", "(", "key_object", "[", "'keytype'", "]", ")", "raise", "securesystemslib", ".", "exceptions", ".", "FormatError", "(", "message", ")", "# Add \"keyid_hash_algorithms\" so that equal ecdsa keys with different keyids", "# can be associated using supported keyid_hash_algorithms.", "key_object", "[", "'keyid_hash_algorithms'", "]", "=", "securesystemslib", ".", "settings", ".", "HASH_ALGORITHMS", "return", "key_object" ]
<Purpose> Import the encrypted ECDSA key file in 'filepath', decrypt it, and return the key object in 'securesystemslib.formats.ECDSAKEY_SCHEMA' format. The 'cryptography' library is currently supported and performs the actual cryptographic routine. <Arguments> filepath: <filepath> file, an ECDSA encrypted key file. password: The password, or passphrase, to import the private key (i.e., the encrypted key file 'filepath' must be decrypted before the ECDSA key object can be returned. <Exceptions> securesystemslib.exceptions.FormatError, if the arguments are improperly formatted or the imported key object contains an invalid key type (i.e., not 'ecdsa-sha2-nistp256'). securesystemslib.exceptions.CryptoError, if 'filepath' cannot be decrypted. <Side Effects> 'password' is used to decrypt the 'filepath' key file. <Returns> An ECDSA key object of the form: 'securesystemslib.formats.ECDSAKEY_SCHEMA'.
[ "<Purpose", ">", "Import", "the", "encrypted", "ECDSA", "key", "file", "in", "filepath", "decrypt", "it", "and", "return", "the", "key", "object", "in", "securesystemslib", ".", "formats", ".", "ECDSAKEY_SCHEMA", "format", "." ]
python
train
39.5
CalebBell/thermo
thermo/viscosity.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/viscosity.py#L690-L719
def calculate_P(self, T, P, method): r'''Method to calculate pressure-dependent liquid viscosity at temperature `T` and pressure `P` with a given method. This method has no exception handling; see `TP_dependent_property` for that. Parameters ---------- T : float Temperature at which to calculate viscosity, [K] P : float Pressure at which to calculate viscosity, [K] method : str Name of the method to use Returns ------- mu : float Viscosity of the liquid at T and P, [Pa*S] ''' if method == LUCAS: mu = self.T_dependent_property(T) Psat = self.Psat(T) if hasattr(self.Psat, '__call__') else self.Psat mu = Lucas(T, P, self.Tc, self.Pc, self.omega, Psat, mu) elif method == COOLPROP: mu = PropsSI('V', 'T', T, 'P', P, self.CASRN) elif method in self.tabular_data: mu = self.interpolate_P(T, P, method) return mu
[ "def", "calculate_P", "(", "self", ",", "T", ",", "P", ",", "method", ")", ":", "if", "method", "==", "LUCAS", ":", "mu", "=", "self", ".", "T_dependent_property", "(", "T", ")", "Psat", "=", "self", ".", "Psat", "(", "T", ")", "if", "hasattr", "(", "self", ".", "Psat", ",", "'__call__'", ")", "else", "self", ".", "Psat", "mu", "=", "Lucas", "(", "T", ",", "P", ",", "self", ".", "Tc", ",", "self", ".", "Pc", ",", "self", ".", "omega", ",", "Psat", ",", "mu", ")", "elif", "method", "==", "COOLPROP", ":", "mu", "=", "PropsSI", "(", "'V'", ",", "'T'", ",", "T", ",", "'P'", ",", "P", ",", "self", ".", "CASRN", ")", "elif", "method", "in", "self", ".", "tabular_data", ":", "mu", "=", "self", ".", "interpolate_P", "(", "T", ",", "P", ",", "method", ")", "return", "mu" ]
r'''Method to calculate pressure-dependent liquid viscosity at temperature `T` and pressure `P` with a given method. This method has no exception handling; see `TP_dependent_property` for that. Parameters ---------- T : float Temperature at which to calculate viscosity, [K] P : float Pressure at which to calculate viscosity, [K] method : str Name of the method to use Returns ------- mu : float Viscosity of the liquid at T and P, [Pa*S]
[ "r", "Method", "to", "calculate", "pressure", "-", "dependent", "liquid", "viscosity", "at", "temperature", "T", "and", "pressure", "P", "with", "a", "given", "method", "." ]
python
valid
34.366667
tensorforce/tensorforce
docs/mistune.py
https://github.com/tensorforce/tensorforce/blob/520a8d992230e382f08e315ede5fc477f5e26bfb/docs/mistune.py#L854-L863
def autolink(self, link, is_email=False): """Rendering a given link or email address. :param link: link content or email address. :param is_email: whether this is an email or not. """ text = link = escape(link) if is_email: link = 'mailto:%s' % link return '<a href="%s">%s</a>' % (link, text)
[ "def", "autolink", "(", "self", ",", "link", ",", "is_email", "=", "False", ")", ":", "text", "=", "link", "=", "escape", "(", "link", ")", "if", "is_email", ":", "link", "=", "'mailto:%s'", "%", "link", "return", "'<a href=\"%s\">%s</a>'", "%", "(", "link", ",", "text", ")" ]
Rendering a given link or email address. :param link: link content or email address. :param is_email: whether this is an email or not.
[ "Rendering", "a", "given", "link", "or", "email", "address", "." ]
python
valid
35.3
aloetesting/aloe_webdriver
aloe_webdriver/__init__.py
https://github.com/aloetesting/aloe_webdriver/blob/65d847da4bdc63f9c015cb19d4efdee87df8ffad/aloe_webdriver/__init__.py#L440-L448
def press_button(self, value): """ Click the button with the given label. """ button = find_button(world.browser, value) if not button: raise AssertionError( "Cannot find a button named '{}'.".format(value)) button.click()
[ "def", "press_button", "(", "self", ",", "value", ")", ":", "button", "=", "find_button", "(", "world", ".", "browser", ",", "value", ")", "if", "not", "button", ":", "raise", "AssertionError", "(", "\"Cannot find a button named '{}'.\"", ".", "format", "(", "value", ")", ")", "button", ".", "click", "(", ")" ]
Click the button with the given label.
[ "Click", "the", "button", "with", "the", "given", "label", "." ]
python
train
28.666667
MisterWil/abodepy
abodepy/event_controller.py
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/event_controller.py#L183-L212
def _on_timeline_update(self, event): """Timeline update broadcast from Abode SocketIO server.""" if isinstance(event, (tuple, list)): event = event[0] event_type = event.get('event_type') event_code = event.get('event_code') if not event_type or not event_code: _LOGGER.warning("Invalid timeline update event: %s", event) return _LOGGER.debug("Timeline event received: %s - %s (%s)", event.get('event_name'), event_type, event_code) # Compress our callbacks into those that match this event_code # or ones registered to get callbacks for all events codes = (event_code, TIMELINE.ALL['event_code']) all_callbacks = [self._timeline_callbacks[code] for code in codes] for callbacks in all_callbacks: for callback in callbacks: _execute_callback(callback, event) # Attempt to map the event code to a group and callback event_group = TIMELINE.map_event_code(event_code) if event_group: for callback in self._event_callbacks.get(event_group, ()): _execute_callback(callback, event)
[ "def", "_on_timeline_update", "(", "self", ",", "event", ")", ":", "if", "isinstance", "(", "event", ",", "(", "tuple", ",", "list", ")", ")", ":", "event", "=", "event", "[", "0", "]", "event_type", "=", "event", ".", "get", "(", "'event_type'", ")", "event_code", "=", "event", ".", "get", "(", "'event_code'", ")", "if", "not", "event_type", "or", "not", "event_code", ":", "_LOGGER", ".", "warning", "(", "\"Invalid timeline update event: %s\"", ",", "event", ")", "return", "_LOGGER", ".", "debug", "(", "\"Timeline event received: %s - %s (%s)\"", ",", "event", ".", "get", "(", "'event_name'", ")", ",", "event_type", ",", "event_code", ")", "# Compress our callbacks into those that match this event_code", "# or ones registered to get callbacks for all events", "codes", "=", "(", "event_code", ",", "TIMELINE", ".", "ALL", "[", "'event_code'", "]", ")", "all_callbacks", "=", "[", "self", ".", "_timeline_callbacks", "[", "code", "]", "for", "code", "in", "codes", "]", "for", "callbacks", "in", "all_callbacks", ":", "for", "callback", "in", "callbacks", ":", "_execute_callback", "(", "callback", ",", "event", ")", "# Attempt to map the event code to a group and callback", "event_group", "=", "TIMELINE", ".", "map_event_code", "(", "event_code", ")", "if", "event_group", ":", "for", "callback", "in", "self", ".", "_event_callbacks", ".", "get", "(", "event_group", ",", "(", ")", ")", ":", "_execute_callback", "(", "callback", ",", "event", ")" ]
Timeline update broadcast from Abode SocketIO server.
[ "Timeline", "update", "broadcast", "from", "Abode", "SocketIO", "server", "." ]
python
train
39.333333
cebel/pyctd
src/pyctd/manager/query.py
https://github.com/cebel/pyctd/blob/38ba02adaddb60cef031d3b75516773fe8a046b5/src/pyctd/manager/query.py#L13-L29
def _limit_and_df(self, query, limit, as_df=False): """adds a limit (limit==None := no limit) to any query and allow a return as pandas.DataFrame :param bool as_df: if is set to True results return as pandas.DataFrame :param `sqlalchemy.orm.query.Query` query: SQL Alchemy query :param int limit: maximum number of results :return: query result of pyctd.manager.models.XY objects """ if limit: query = query.limit(limit) if as_df: results = read_sql(query.statement, self.engine) else: results = query.all() return results
[ "def", "_limit_and_df", "(", "self", ",", "query", ",", "limit", ",", "as_df", "=", "False", ")", ":", "if", "limit", ":", "query", "=", "query", ".", "limit", "(", "limit", ")", "if", "as_df", ":", "results", "=", "read_sql", "(", "query", ".", "statement", ",", "self", ".", "engine", ")", "else", ":", "results", "=", "query", ".", "all", "(", ")", "return", "results" ]
adds a limit (limit==None := no limit) to any query and allow a return as pandas.DataFrame :param bool as_df: if is set to True results return as pandas.DataFrame :param `sqlalchemy.orm.query.Query` query: SQL Alchemy query :param int limit: maximum number of results :return: query result of pyctd.manager.models.XY objects
[ "adds", "a", "limit", "(", "limit", "==", "None", ":", "=", "no", "limit", ")", "to", "any", "query", "and", "allow", "a", "return", "as", "pandas", ".", "DataFrame" ]
python
train
36.823529
veeti/decent
decent/validators.py
https://github.com/veeti/decent/blob/07b11536953b9cf4402c65f241706ab717b90bff/decent/validators.py#L11-L21
def All(*validators): """ Combines all the given validator callables into one, running all the validators in sequence on the given value. """ @wraps(All) def built(value): for validator in validators: value = validator(value) return value return built
[ "def", "All", "(", "*", "validators", ")", ":", "@", "wraps", "(", "All", ")", "def", "built", "(", "value", ")", ":", "for", "validator", "in", "validators", ":", "value", "=", "validator", "(", "value", ")", "return", "value", "return", "built" ]
Combines all the given validator callables into one, running all the validators in sequence on the given value.
[ "Combines", "all", "the", "given", "validator", "callables", "into", "one", "running", "all", "the", "validators", "in", "sequence", "on", "the", "given", "value", "." ]
python
train
27
pythongssapi/python-gssapi
gssapi/creds.py
https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/creds.py#L303-L386
def add(self, name, mech, usage='both', init_lifetime=None, accept_lifetime=None, impersonator=None, store=None): """Acquire more credentials to add to the current set This method works like :meth:`acquire`, except that it adds the acquired credentials for a single mechanism to a copy of the current set, instead of creating a new set for multiple mechanisms. Unlike :meth:`acquire`, you cannot pass None desired name or mechanism. If the `impersonator` argument is used, the credentials will impersonate the given name using the impersonator credentials (:requires-ext:`s4u`). If the `store` argument is used, the credentials will be acquired from the given credential store (:requires-ext:`cred_store`). Otherwise, the credentials are acquired from the default store. The credential store information is a dictionary containing mechanisms-specific keys and values pointing to a credential store or stores. Note that the `store` argument is not compatible with the `impersonator` argument. Args: name (Name): the name associated with the credentials mech (OID): the desired :class:`MechType` to be used with the credentials usage (str): the usage for the credentials -- either 'both', 'initiate', or 'accept' init_lifetime (int): the desired initiate lifetime of the credentials, or None for indefinite accept_lifetime (int): the desired accept lifetime of the credentials, or None for indefinite impersonator (Credentials): the credentials to use to impersonate the given name, or None to not acquire normally (:requires-ext:`s4u`) store (dict): the credential store information pointing to the credential store from which to acquire the credentials, or None for the default store (:requires-ext:`cred_store`) Returns: Credentials: the credentials set containing the current credentials and the newly acquired ones. Raises: BadMechanismError BadNameTypeError BadNameError DuplicateCredentialsElementError ExpiredCredentialsError MissingCredentialsError """ if store is not None and impersonator is not None: raise ValueError('You cannot use both the `impersonator` and ' '`store` arguments at the same time') if store is not None: if rcred_cred_store is None: raise NotImplementedError("Your GSSAPI implementation does " "not have support for manipulating " "credential stores") store = _encode_dict(store) res = rcred_cred_store.add_cred_from(store, self, name, mech, usage, init_lifetime, accept_lifetime) elif impersonator is not None: if rcred_s4u is None: raise NotImplementedError("Your GSSAPI implementation does " "not have support for S4U") res = rcred_s4u.add_cred_impersonate_name(self, impersonator, name, mech, usage, init_lifetime, accept_lifetime) else: res = rcreds.add_cred(self, name, mech, usage, init_lifetime, accept_lifetime) return Credentials(res.creds)
[ "def", "add", "(", "self", ",", "name", ",", "mech", ",", "usage", "=", "'both'", ",", "init_lifetime", "=", "None", ",", "accept_lifetime", "=", "None", ",", "impersonator", "=", "None", ",", "store", "=", "None", ")", ":", "if", "store", "is", "not", "None", "and", "impersonator", "is", "not", "None", ":", "raise", "ValueError", "(", "'You cannot use both the `impersonator` and '", "'`store` arguments at the same time'", ")", "if", "store", "is", "not", "None", ":", "if", "rcred_cred_store", "is", "None", ":", "raise", "NotImplementedError", "(", "\"Your GSSAPI implementation does \"", "\"not have support for manipulating \"", "\"credential stores\"", ")", "store", "=", "_encode_dict", "(", "store", ")", "res", "=", "rcred_cred_store", ".", "add_cred_from", "(", "store", ",", "self", ",", "name", ",", "mech", ",", "usage", ",", "init_lifetime", ",", "accept_lifetime", ")", "elif", "impersonator", "is", "not", "None", ":", "if", "rcred_s4u", "is", "None", ":", "raise", "NotImplementedError", "(", "\"Your GSSAPI implementation does \"", "\"not have support for S4U\"", ")", "res", "=", "rcred_s4u", ".", "add_cred_impersonate_name", "(", "self", ",", "impersonator", ",", "name", ",", "mech", ",", "usage", ",", "init_lifetime", ",", "accept_lifetime", ")", "else", ":", "res", "=", "rcreds", ".", "add_cred", "(", "self", ",", "name", ",", "mech", ",", "usage", ",", "init_lifetime", ",", "accept_lifetime", ")", "return", "Credentials", "(", "res", ".", "creds", ")" ]
Acquire more credentials to add to the current set This method works like :meth:`acquire`, except that it adds the acquired credentials for a single mechanism to a copy of the current set, instead of creating a new set for multiple mechanisms. Unlike :meth:`acquire`, you cannot pass None desired name or mechanism. If the `impersonator` argument is used, the credentials will impersonate the given name using the impersonator credentials (:requires-ext:`s4u`). If the `store` argument is used, the credentials will be acquired from the given credential store (:requires-ext:`cred_store`). Otherwise, the credentials are acquired from the default store. The credential store information is a dictionary containing mechanisms-specific keys and values pointing to a credential store or stores. Note that the `store` argument is not compatible with the `impersonator` argument. Args: name (Name): the name associated with the credentials mech (OID): the desired :class:`MechType` to be used with the credentials usage (str): the usage for the credentials -- either 'both', 'initiate', or 'accept' init_lifetime (int): the desired initiate lifetime of the credentials, or None for indefinite accept_lifetime (int): the desired accept lifetime of the credentials, or None for indefinite impersonator (Credentials): the credentials to use to impersonate the given name, or None to not acquire normally (:requires-ext:`s4u`) store (dict): the credential store information pointing to the credential store from which to acquire the credentials, or None for the default store (:requires-ext:`cred_store`) Returns: Credentials: the credentials set containing the current credentials and the newly acquired ones. Raises: BadMechanismError BadNameTypeError BadNameError DuplicateCredentialsElementError ExpiredCredentialsError MissingCredentialsError
[ "Acquire", "more", "credentials", "to", "add", "to", "the", "current", "set" ]
python
train
45.642857
orbingol/NURBS-Python
geomdl/convert.py
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/convert.py#L34-L65
def nurbs_to_bspline(obj, **kwargs): """ Extracts the non-rational components from rational parametric shapes, if possible. The possibility of converting a rational shape to a non-rational one depends on the weights vector. :param obj: NURBS shape :type obj: NURBS.Curve, NURBS.Surface or NURBS.Volume :return: B-Spline shape :rtype: BSpline.Curve, BSpline.Surface or BSpline.Volume :raises: TypeError """ if not obj.rational: raise TypeError("The input must be a rational shape") # Get keyword arguments tol = kwargs.get('tol', 10e-8) # Test for non-rational component extraction for w in obj.weights: if abs(w - 1.0) > tol: print("Cannot extract non-rational components") return obj # NURBS -> B-Spline if isinstance(obj, NURBS.Curve): return _convert.convert_curve(obj, BSpline) elif isinstance(obj, NURBS.Surface): return _convert.convert_surface(obj, BSpline) elif isinstance(obj, NURBS.Volume): return _convert.convert_volume(obj, BSpline) else: raise TypeError("Input must be an instance of NURBS curve, surface or volume")
[ "def", "nurbs_to_bspline", "(", "obj", ",", "*", "*", "kwargs", ")", ":", "if", "not", "obj", ".", "rational", ":", "raise", "TypeError", "(", "\"The input must be a rational shape\"", ")", "# Get keyword arguments", "tol", "=", "kwargs", ".", "get", "(", "'tol'", ",", "10e-8", ")", "# Test for non-rational component extraction", "for", "w", "in", "obj", ".", "weights", ":", "if", "abs", "(", "w", "-", "1.0", ")", ">", "tol", ":", "print", "(", "\"Cannot extract non-rational components\"", ")", "return", "obj", "# NURBS -> B-Spline", "if", "isinstance", "(", "obj", ",", "NURBS", ".", "Curve", ")", ":", "return", "_convert", ".", "convert_curve", "(", "obj", ",", "BSpline", ")", "elif", "isinstance", "(", "obj", ",", "NURBS", ".", "Surface", ")", ":", "return", "_convert", ".", "convert_surface", "(", "obj", ",", "BSpline", ")", "elif", "isinstance", "(", "obj", ",", "NURBS", ".", "Volume", ")", ":", "return", "_convert", ".", "convert_volume", "(", "obj", ",", "BSpline", ")", "else", ":", "raise", "TypeError", "(", "\"Input must be an instance of NURBS curve, surface or volume\"", ")" ]
Extracts the non-rational components from rational parametric shapes, if possible. The possibility of converting a rational shape to a non-rational one depends on the weights vector. :param obj: NURBS shape :type obj: NURBS.Curve, NURBS.Surface or NURBS.Volume :return: B-Spline shape :rtype: BSpline.Curve, BSpline.Surface or BSpline.Volume :raises: TypeError
[ "Extracts", "the", "non", "-", "rational", "components", "from", "rational", "parametric", "shapes", "if", "possible", "." ]
python
train
35.875
albertz/py_better_exchook
better_exchook.py
https://github.com/albertz/py_better_exchook/blob/3d524a027d7fc4e83e47e39a1978849561da69b3/better_exchook.py#L265-L343
def debug_shell(user_ns, user_global_ns, traceback=None, execWrapper=None): """ Spawns some interactive shell. Tries to use IPython if available. Falls back to :func:`pdb.post_mortem` or :func:`simple_debug_shell`. :param dict[str] user_ns: :param dict[str] user_global_ns: :param traceback: :param execWrapper: :return: nothing """ ipshell = None try: # noinspection PyPackageRequirements import IPython have_ipython = True except ImportError: have_ipython = False if not ipshell and traceback and have_ipython: # noinspection PyBroadException try: # noinspection PyPackageRequirements,PyUnresolvedReferences from IPython.core.debugger import Pdb # noinspection PyPackageRequirements,PyUnresolvedReferences from IPython.terminal.debugger import TerminalPdb # noinspection PyPackageRequirements,PyUnresolvedReferences from IPython.terminal.ipapp import TerminalIPythonApp ipapp = TerminalIPythonApp.instance() ipapp.interact = False # Avoid output (banner, prints) ipapp.initialize(argv=[]) def_colors = ipapp.shell.colors pdb_obj = TerminalPdb(def_colors) pdb_obj.botframe = None # not sure. exception otherwise at quit def ipshell(): """ Run the IPython shell. """ pdb_obj.interaction(None, traceback=traceback) except Exception: print("IPython Pdb exception:") better_exchook(*sys.exc_info(), autodebugshell=False) if not ipshell and have_ipython: # noinspection PyBroadException try: # noinspection PyPackageRequirements,PyUnresolvedReferences import IPython # noinspection PyPackageRequirements,PyUnresolvedReferences import IPython.terminal.embed class DummyMod(object): """Dummy module""" module = DummyMod() module.__dict__ = user_global_ns module.__name__ = "_DummyMod" if "__name__" not in user_ns: user_ns = user_ns.copy() user_ns["__name__"] = "_DummyUserNsMod" ipshell = IPython.terminal.embed.InteractiveShellEmbed.instance( user_ns=user_ns, user_module=module) except Exception: print("IPython not available:") better_exchook(*sys.exc_info(), autodebugshell=False) else: if execWrapper: old = ipshell.run_code ipshell.run_code = lambda code: execWrapper(lambda: old(code)) if ipshell: ipshell() else: print("Use simple debug shell:") if traceback: import pdb pdb.post_mortem(traceback) else: simple_debug_shell(user_global_ns, user_ns)
[ "def", "debug_shell", "(", "user_ns", ",", "user_global_ns", ",", "traceback", "=", "None", ",", "execWrapper", "=", "None", ")", ":", "ipshell", "=", "None", "try", ":", "# noinspection PyPackageRequirements", "import", "IPython", "have_ipython", "=", "True", "except", "ImportError", ":", "have_ipython", "=", "False", "if", "not", "ipshell", "and", "traceback", "and", "have_ipython", ":", "# noinspection PyBroadException", "try", ":", "# noinspection PyPackageRequirements,PyUnresolvedReferences", "from", "IPython", ".", "core", ".", "debugger", "import", "Pdb", "# noinspection PyPackageRequirements,PyUnresolvedReferences", "from", "IPython", ".", "terminal", ".", "debugger", "import", "TerminalPdb", "# noinspection PyPackageRequirements,PyUnresolvedReferences", "from", "IPython", ".", "terminal", ".", "ipapp", "import", "TerminalIPythonApp", "ipapp", "=", "TerminalIPythonApp", ".", "instance", "(", ")", "ipapp", ".", "interact", "=", "False", "# Avoid output (banner, prints)", "ipapp", ".", "initialize", "(", "argv", "=", "[", "]", ")", "def_colors", "=", "ipapp", ".", "shell", ".", "colors", "pdb_obj", "=", "TerminalPdb", "(", "def_colors", ")", "pdb_obj", ".", "botframe", "=", "None", "# not sure. exception otherwise at quit", "def", "ipshell", "(", ")", ":", "\"\"\"\n Run the IPython shell.\n \"\"\"", "pdb_obj", ".", "interaction", "(", "None", ",", "traceback", "=", "traceback", ")", "except", "Exception", ":", "print", "(", "\"IPython Pdb exception:\"", ")", "better_exchook", "(", "*", "sys", ".", "exc_info", "(", ")", ",", "autodebugshell", "=", "False", ")", "if", "not", "ipshell", "and", "have_ipython", ":", "# noinspection PyBroadException", "try", ":", "# noinspection PyPackageRequirements,PyUnresolvedReferences", "import", "IPython", "# noinspection PyPackageRequirements,PyUnresolvedReferences", "import", "IPython", ".", "terminal", ".", "embed", "class", "DummyMod", "(", "object", ")", ":", "\"\"\"Dummy module\"\"\"", "module", "=", "DummyMod", "(", ")", "module", ".", "__dict__", "=", "user_global_ns", "module", ".", "__name__", "=", "\"_DummyMod\"", "if", "\"__name__\"", "not", "in", "user_ns", ":", "user_ns", "=", "user_ns", ".", "copy", "(", ")", "user_ns", "[", "\"__name__\"", "]", "=", "\"_DummyUserNsMod\"", "ipshell", "=", "IPython", ".", "terminal", ".", "embed", ".", "InteractiveShellEmbed", ".", "instance", "(", "user_ns", "=", "user_ns", ",", "user_module", "=", "module", ")", "except", "Exception", ":", "print", "(", "\"IPython not available:\"", ")", "better_exchook", "(", "*", "sys", ".", "exc_info", "(", ")", ",", "autodebugshell", "=", "False", ")", "else", ":", "if", "execWrapper", ":", "old", "=", "ipshell", ".", "run_code", "ipshell", ".", "run_code", "=", "lambda", "code", ":", "execWrapper", "(", "lambda", ":", "old", "(", "code", ")", ")", "if", "ipshell", ":", "ipshell", "(", ")", "else", ":", "print", "(", "\"Use simple debug shell:\"", ")", "if", "traceback", ":", "import", "pdb", "pdb", ".", "post_mortem", "(", "traceback", ")", "else", ":", "simple_debug_shell", "(", "user_global_ns", ",", "user_ns", ")" ]
Spawns some interactive shell. Tries to use IPython if available. Falls back to :func:`pdb.post_mortem` or :func:`simple_debug_shell`. :param dict[str] user_ns: :param dict[str] user_global_ns: :param traceback: :param execWrapper: :return: nothing
[ "Spawns", "some", "interactive", "shell", ".", "Tries", "to", "use", "IPython", "if", "available", ".", "Falls", "back", "to", ":", "func", ":", "pdb", ".", "post_mortem", "or", ":", "func", ":", "simple_debug_shell", "." ]
python
train
36.658228
praekeltfoundation/seaworthy
seaworthy/helpers.py
https://github.com/praekeltfoundation/seaworthy/blob/6f10a19b45d4ea1dc3bd0553cc4d0438696c079c/seaworthy/helpers.py#L414-L424
def teardown(self): """ Clean up all resources when we're done with them. """ self.containers._teardown() self.networks._teardown() self.volumes._teardown() # We need to close the underlying APIClient explicitly to avoid # ResourceWarnings from unclosed HTTP connections. self._client.api.close()
[ "def", "teardown", "(", "self", ")", ":", "self", ".", "containers", ".", "_teardown", "(", ")", "self", ".", "networks", ".", "_teardown", "(", ")", "self", ".", "volumes", ".", "_teardown", "(", ")", "# We need to close the underlying APIClient explicitly to avoid", "# ResourceWarnings from unclosed HTTP connections.", "self", ".", "_client", ".", "api", ".", "close", "(", ")" ]
Clean up all resources when we're done with them.
[ "Clean", "up", "all", "resources", "when", "we", "re", "done", "with", "them", "." ]
python
train
32.636364
pymc-devs/pymc
pymc/Matplot.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Matplot.py#L314-L381
def plotwrapper(f): """ This decorator allows for PyMC arguments of various types to be passed to the plotting functions. It identifies the type of object and locates its trace(s), then passes the data to the wrapped plotting function. """ def wrapper(pymc_obj, *args, **kwargs): start = 0 if 'start' in kwargs: start = kwargs.pop('start') # Figure out what type of object it is try: # First try Model type for variable in pymc_obj._variables_to_tally: # Plot object if variable._plot is not False: data = pymc_obj.trace(variable.__name__)[start:] if size(data[-1]) >= 10 and variable._plot != True: continue elif variable.dtype is dtype('object'): continue name = variable.__name__ if args: name = '%s_%s' % (args[0], variable.__name__) f(data, name, *args, **kwargs) return except AttributeError: pass try: # Then try Trace type data = pymc_obj()[:] name = pymc_obj.name f(data, name, *args, **kwargs) return except (AttributeError, TypeError): pass try: # Then try Node type if pymc_obj._plot is not False: data = pymc_obj.trace()[start:] # This is deprecated. DH name = pymc_obj.__name__ f(data, name, *args, **kwargs) return except AttributeError: pass if isinstance(pymc_obj, dict): # Then try dictionary for i in pymc_obj: data = pymc_obj[i][start:] if args: i = '%s_%s' % (args[0], i) elif 'name' in kwargs: i = '%s_%s' % (kwargs.pop('name'), i) f(data, i, *args, **kwargs) return # If others fail, assume that raw data is passed f(pymc_obj, *args, **kwargs) wrapper.__doc__ = f.__doc__ wrapper.__name__ = f.__name__ return wrapper
[ "def", "plotwrapper", "(", "f", ")", ":", "def", "wrapper", "(", "pymc_obj", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "start", "=", "0", "if", "'start'", "in", "kwargs", ":", "start", "=", "kwargs", ".", "pop", "(", "'start'", ")", "# Figure out what type of object it is", "try", ":", "# First try Model type", "for", "variable", "in", "pymc_obj", ".", "_variables_to_tally", ":", "# Plot object", "if", "variable", ".", "_plot", "is", "not", "False", ":", "data", "=", "pymc_obj", ".", "trace", "(", "variable", ".", "__name__", ")", "[", "start", ":", "]", "if", "size", "(", "data", "[", "-", "1", "]", ")", ">=", "10", "and", "variable", ".", "_plot", "!=", "True", ":", "continue", "elif", "variable", ".", "dtype", "is", "dtype", "(", "'object'", ")", ":", "continue", "name", "=", "variable", ".", "__name__", "if", "args", ":", "name", "=", "'%s_%s'", "%", "(", "args", "[", "0", "]", ",", "variable", ".", "__name__", ")", "f", "(", "data", ",", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "except", "AttributeError", ":", "pass", "try", ":", "# Then try Trace type", "data", "=", "pymc_obj", "(", ")", "[", ":", "]", "name", "=", "pymc_obj", ".", "name", "f", "(", "data", ",", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "except", "(", "AttributeError", ",", "TypeError", ")", ":", "pass", "try", ":", "# Then try Node type", "if", "pymc_obj", ".", "_plot", "is", "not", "False", ":", "data", "=", "pymc_obj", ".", "trace", "(", ")", "[", "start", ":", "]", "# This is deprecated. DH", "name", "=", "pymc_obj", ".", "__name__", "f", "(", "data", ",", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "except", "AttributeError", ":", "pass", "if", "isinstance", "(", "pymc_obj", ",", "dict", ")", ":", "# Then try dictionary", "for", "i", "in", "pymc_obj", ":", "data", "=", "pymc_obj", "[", "i", "]", "[", "start", ":", "]", "if", "args", ":", "i", "=", "'%s_%s'", "%", "(", "args", "[", "0", "]", ",", "i", ")", "elif", "'name'", "in", "kwargs", ":", "i", "=", "'%s_%s'", "%", "(", "kwargs", ".", "pop", "(", "'name'", ")", ",", "i", ")", "f", "(", "data", ",", "i", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "# If others fail, assume that raw data is passed", "f", "(", "pymc_obj", ",", "*", "args", ",", "*", "*", "kwargs", ")", "wrapper", ".", "__doc__", "=", "f", ".", "__doc__", "wrapper", ".", "__name__", "=", "f", ".", "__name__", "return", "wrapper" ]
This decorator allows for PyMC arguments of various types to be passed to the plotting functions. It identifies the type of object and locates its trace(s), then passes the data to the wrapped plotting function.
[ "This", "decorator", "allows", "for", "PyMC", "arguments", "of", "various", "types", "to", "be", "passed", "to", "the", "plotting", "functions", ".", "It", "identifies", "the", "type", "of", "object", "and", "locates", "its", "trace", "(", "s", ")", "then", "passes", "the", "data", "to", "the", "wrapped", "plotting", "function", "." ]
python
train
32.426471
ansible/molecule
molecule/command/init/role.py
https://github.com/ansible/molecule/blob/766dc35b0b0ce498cd5e3a62b40f828742d0d08c/molecule/command/init/role.py#L56-L93
def execute(self): """ Execute the actions necessary to perform a `molecule init role` and returns None. :return: None """ role_name = self._command_args['role_name'] role_directory = os.getcwd() msg = 'Initializing new role {}...'.format(role_name) LOG.info(msg) if os.path.isdir(role_name): msg = ('The directory {} exists. ' 'Cannot create new role.').format(role_name) util.sysexit_with_message(msg) template_directory = '' if 'template' in self._command_args.keys(): template_directory = self._command_args['template'] else: template_directory = 'role' self._process_templates(template_directory, self._command_args, role_directory) scenario_base_directory = os.path.join(role_directory, role_name) templates = [ 'scenario/driver/{driver_name}'.format(**self._command_args), 'scenario/verifier/{verifier_name}'.format(**self._command_args), ] for template in templates: self._process_templates(template, self._command_args, scenario_base_directory) self._process_templates('molecule', self._command_args, role_directory) role_directory = os.path.join(role_directory, role_name) msg = 'Initialized role in {} successfully.'.format(role_directory) LOG.success(msg)
[ "def", "execute", "(", "self", ")", ":", "role_name", "=", "self", ".", "_command_args", "[", "'role_name'", "]", "role_directory", "=", "os", ".", "getcwd", "(", ")", "msg", "=", "'Initializing new role {}...'", ".", "format", "(", "role_name", ")", "LOG", ".", "info", "(", "msg", ")", "if", "os", ".", "path", ".", "isdir", "(", "role_name", ")", ":", "msg", "=", "(", "'The directory {} exists. '", "'Cannot create new role.'", ")", ".", "format", "(", "role_name", ")", "util", ".", "sysexit_with_message", "(", "msg", ")", "template_directory", "=", "''", "if", "'template'", "in", "self", ".", "_command_args", ".", "keys", "(", ")", ":", "template_directory", "=", "self", ".", "_command_args", "[", "'template'", "]", "else", ":", "template_directory", "=", "'role'", "self", ".", "_process_templates", "(", "template_directory", ",", "self", ".", "_command_args", ",", "role_directory", ")", "scenario_base_directory", "=", "os", ".", "path", ".", "join", "(", "role_directory", ",", "role_name", ")", "templates", "=", "[", "'scenario/driver/{driver_name}'", ".", "format", "(", "*", "*", "self", ".", "_command_args", ")", ",", "'scenario/verifier/{verifier_name}'", ".", "format", "(", "*", "*", "self", ".", "_command_args", ")", ",", "]", "for", "template", "in", "templates", ":", "self", ".", "_process_templates", "(", "template", ",", "self", ".", "_command_args", ",", "scenario_base_directory", ")", "self", ".", "_process_templates", "(", "'molecule'", ",", "self", ".", "_command_args", ",", "role_directory", ")", "role_directory", "=", "os", ".", "path", ".", "join", "(", "role_directory", ",", "role_name", ")", "msg", "=", "'Initialized role in {} successfully.'", ".", "format", "(", "role_directory", ")", "LOG", ".", "success", "(", "msg", ")" ]
Execute the actions necessary to perform a `molecule init role` and returns None. :return: None
[ "Execute", "the", "actions", "necessary", "to", "perform", "a", "molecule", "init", "role", "and", "returns", "None", "." ]
python
train
39
osrg/ryu
ryu/services/protocols/bgp/core_managers/table_manager.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/core_managers/table_manager.py#L372-L385
def get_vpnv4fs_table(self): """Returns global VPNv4 Flow Specification table. Creates the table if it does not exist. """ vpnv4fs_table = self._global_tables.get(RF_VPNv4_FLOWSPEC) # Lazy initialization of the table. if not vpnv4fs_table: vpnv4fs_table = VPNv4FlowSpecTable(self._core_service, self._signal_bus) self._global_tables[RF_VPNv4_FLOWSPEC] = vpnv4fs_table self._tables[(None, RF_VPNv4_FLOWSPEC)] = vpnv4fs_table return vpnv4fs_table
[ "def", "get_vpnv4fs_table", "(", "self", ")", ":", "vpnv4fs_table", "=", "self", ".", "_global_tables", ".", "get", "(", "RF_VPNv4_FLOWSPEC", ")", "# Lazy initialization of the table.", "if", "not", "vpnv4fs_table", ":", "vpnv4fs_table", "=", "VPNv4FlowSpecTable", "(", "self", ".", "_core_service", ",", "self", ".", "_signal_bus", ")", "self", ".", "_global_tables", "[", "RF_VPNv4_FLOWSPEC", "]", "=", "vpnv4fs_table", "self", ".", "_tables", "[", "(", "None", ",", "RF_VPNv4_FLOWSPEC", ")", "]", "=", "vpnv4fs_table", "return", "vpnv4fs_table" ]
Returns global VPNv4 Flow Specification table. Creates the table if it does not exist.
[ "Returns", "global", "VPNv4", "Flow", "Specification", "table", "." ]
python
train
40.857143
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/input_readers.py
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/input_readers.py#L1794-L1804
def to_json(self): """Returns an input shard state for the remaining inputs. Returns: A json-izable version of the remaining InputReader. """ return {self.BLOB_KEY_PARAM: self._blob_key, self.START_FILE_INDEX_PARAM: self._start_file_index, self.END_FILE_INDEX_PARAM: self._end_file_index, self.OFFSET_PARAM: self._next_offset()}
[ "def", "to_json", "(", "self", ")", ":", "return", "{", "self", ".", "BLOB_KEY_PARAM", ":", "self", ".", "_blob_key", ",", "self", ".", "START_FILE_INDEX_PARAM", ":", "self", ".", "_start_file_index", ",", "self", ".", "END_FILE_INDEX_PARAM", ":", "self", ".", "_end_file_index", ",", "self", ".", "OFFSET_PARAM", ":", "self", ".", "_next_offset", "(", ")", "}" ]
Returns an input shard state for the remaining inputs. Returns: A json-izable version of the remaining InputReader.
[ "Returns", "an", "input", "shard", "state", "for", "the", "remaining", "inputs", "." ]
python
train
34.363636
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/core_v1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/core_v1_api.py#L4143-L4166
def connect_post_namespaced_pod_proxy_with_path(self, name, namespace, path, **kwargs): # noqa: E501 """connect_post_namespaced_pod_proxy_with_path # noqa: E501 connect POST requests to proxy of Pod # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.connect_post_namespaced_pod_proxy_with_path(name, namespace, path, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the PodProxyOptions (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str path: path to the resource (required) :param str path2: Path is the URL path to use for the current proxy request to pod. :return: str If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.connect_post_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501 else: (data) = self.connect_post_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501 return data
[ "def", "connect_post_namespaced_pod_proxy_with_path", "(", "self", ",", "name", ",", "namespace", ",", "path", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "connect_post_namespaced_pod_proxy_with_path_with_http_info", "(", "name", ",", "namespace", ",", "path", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "connect_post_namespaced_pod_proxy_with_path_with_http_info", "(", "name", ",", "namespace", ",", "path", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
connect_post_namespaced_pod_proxy_with_path # noqa: E501 connect POST requests to proxy of Pod # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.connect_post_namespaced_pod_proxy_with_path(name, namespace, path, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the PodProxyOptions (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str path: path to the resource (required) :param str path2: Path is the URL path to use for the current proxy request to pod. :return: str If the method is called asynchronously, returns the request thread.
[ "connect_post_namespaced_pod_proxy_with_path", "#", "noqa", ":", "E501" ]
python
train
56.291667
loli/medpy
medpy/features/utilities.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/features/utilities.py#L181-L221
def join(*vectors): r""" Takes an arbitrary number of aligned vectors of the same length and combines them into a single vector (vertically). E.g. taking two 100-sample feature vectors of once 5 and once 7 features, a 100x12 feature vector is created and returned. The feature vectors are expected to have the form samples*features i.e.:: s1 s2 s3 [...] f1 f2 [...] Parameters ---------- *vectors : sequences A number of vectors with the same number of samples. Returns ------- vector : ndarray The combined vectors. """ # check supplied arguments if len(vectors) < 2: return vectors[0] # process supplied arguments vectors = list(vectors) for i in range(len(vectors)): vectors[i] = numpy.array(vectors[i], copy=False) if vectors[i].ndim == 1: vectors[i] = numpy.array([vectors[i]], copy=False).T # treat single-value cases special (no squeezing) if 1 == len(vectors[0]): return numpy.concatenate(vectors, 1) return numpy.squeeze(numpy.concatenate(vectors, 1))
[ "def", "join", "(", "*", "vectors", ")", ":", "# check supplied arguments", "if", "len", "(", "vectors", ")", "<", "2", ":", "return", "vectors", "[", "0", "]", "# process supplied arguments", "vectors", "=", "list", "(", "vectors", ")", "for", "i", "in", "range", "(", "len", "(", "vectors", ")", ")", ":", "vectors", "[", "i", "]", "=", "numpy", ".", "array", "(", "vectors", "[", "i", "]", ",", "copy", "=", "False", ")", "if", "vectors", "[", "i", "]", ".", "ndim", "==", "1", ":", "vectors", "[", "i", "]", "=", "numpy", ".", "array", "(", "[", "vectors", "[", "i", "]", "]", ",", "copy", "=", "False", ")", ".", "T", "# treat single-value cases special (no squeezing)", "if", "1", "==", "len", "(", "vectors", "[", "0", "]", ")", ":", "return", "numpy", ".", "concatenate", "(", "vectors", ",", "1", ")", "return", "numpy", ".", "squeeze", "(", "numpy", ".", "concatenate", "(", "vectors", ",", "1", ")", ")" ]
r""" Takes an arbitrary number of aligned vectors of the same length and combines them into a single vector (vertically). E.g. taking two 100-sample feature vectors of once 5 and once 7 features, a 100x12 feature vector is created and returned. The feature vectors are expected to have the form samples*features i.e.:: s1 s2 s3 [...] f1 f2 [...] Parameters ---------- *vectors : sequences A number of vectors with the same number of samples. Returns ------- vector : ndarray The combined vectors.
[ "r", "Takes", "an", "arbitrary", "number", "of", "aligned", "vectors", "of", "the", "same", "length", "and", "combines", "them", "into", "a", "single", "vector", "(", "vertically", ")", ".", "E", ".", "g", ".", "taking", "two", "100", "-", "sample", "feature", "vectors", "of", "once", "5", "and", "once", "7", "features", "a", "100x12", "feature", "vector", "is", "created", "and", "returned", ".", "The", "feature", "vectors", "are", "expected", "to", "have", "the", "form", "samples", "*", "features", "i", ".", "e", ".", "::", "s1", "s2", "s3", "[", "...", "]", "f1", "f2", "[", "...", "]", "Parameters", "----------", "*", "vectors", ":", "sequences", "A", "number", "of", "vectors", "with", "the", "same", "number", "of", "samples", ".", "Returns", "-------", "vector", ":", "ndarray", "The", "combined", "vectors", "." ]
python
train
27.95122
Kortemme-Lab/klab
klab/bio/ligand.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/ligand.py#L318-L326
def parse_pdb_ligand_info(self, pdb_ligand_info): '''This only parses the ligand type as all the other information should be in the .cif file. The XML file has proper capitalization whereas the .cif file uses all caps for the ligand type.''' mtchs = re.findall('(<ligand.*?</ligand>)', pdb_ligand_info, re.DOTALL) for m in mtchs: if m.upper().find('CHEMICALID="{0}"'.format(self.PDBCode.upper())) != -1: ligand_type = re.match('<ligand.*?\stype="(.*?)".*?>', m, re.DOTALL) if ligand_type: self.LigandType = ligand_type.group(1)
[ "def", "parse_pdb_ligand_info", "(", "self", ",", "pdb_ligand_info", ")", ":", "mtchs", "=", "re", ".", "findall", "(", "'(<ligand.*?</ligand>)'", ",", "pdb_ligand_info", ",", "re", ".", "DOTALL", ")", "for", "m", "in", "mtchs", ":", "if", "m", ".", "upper", "(", ")", ".", "find", "(", "'CHEMICALID=\"{0}\"'", ".", "format", "(", "self", ".", "PDBCode", ".", "upper", "(", ")", ")", ")", "!=", "-", "1", ":", "ligand_type", "=", "re", ".", "match", "(", "'<ligand.*?\\stype=\"(.*?)\".*?>'", ",", "m", ",", "re", ".", "DOTALL", ")", "if", "ligand_type", ":", "self", ".", "LigandType", "=", "ligand_type", ".", "group", "(", "1", ")" ]
This only parses the ligand type as all the other information should be in the .cif file. The XML file has proper capitalization whereas the .cif file uses all caps for the ligand type.
[ "This", "only", "parses", "the", "ligand", "type", "as", "all", "the", "other", "information", "should", "be", "in", "the", ".", "cif", "file", ".", "The", "XML", "file", "has", "proper", "capitalization", "whereas", "the", ".", "cif", "file", "uses", "all", "caps", "for", "the", "ligand", "type", "." ]
python
train
68.666667
ethereum/eth-abi
eth_abi/registry.py
https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/registry.py#L433-L439
def unregister(self, label: str) -> None: """ Unregisters the entries in the encoder and decoder registries which have the label ``label``. """ self.unregister_encoder(label) self.unregister_decoder(label)
[ "def", "unregister", "(", "self", ",", "label", ":", "str", ")", "->", "None", ":", "self", ".", "unregister_encoder", "(", "label", ")", "self", ".", "unregister_decoder", "(", "label", ")" ]
Unregisters the entries in the encoder and decoder registries which have the label ``label``.
[ "Unregisters", "the", "entries", "in", "the", "encoder", "and", "decoder", "registries", "which", "have", "the", "label", "label", "." ]
python
train
35.285714
CyberReboot/vent
vent/helpers/templates.py
https://github.com/CyberReboot/vent/blob/9956a09146b11a89a0eabab3bc7ce8906d124885/vent/helpers/templates.py#L37-L43
def option(self, section, option): """ Returns the value of the option """ if self.config.has_section(section): if self.config.has_option(section, option): return (True, self.config.get(section, option)) return (False, 'Option: ' + option + ' does not exist') return (False, 'Section: ' + section + ' does not exist')
[ "def", "option", "(", "self", ",", "section", ",", "option", ")", ":", "if", "self", ".", "config", ".", "has_section", "(", "section", ")", ":", "if", "self", ".", "config", ".", "has_option", "(", "section", ",", "option", ")", ":", "return", "(", "True", ",", "self", ".", "config", ".", "get", "(", "section", ",", "option", ")", ")", "return", "(", "False", ",", "'Option: '", "+", "option", "+", "' does not exist'", ")", "return", "(", "False", ",", "'Section: '", "+", "section", "+", "' does not exist'", ")" ]
Returns the value of the option
[ "Returns", "the", "value", "of", "the", "option" ]
python
train
53.571429
marazt/object-mapper
mapper/casedict.py
https://github.com/marazt/object-mapper/blob/b02c6d68c5bf86462aa8080aff3e93b133afd43e/mapper/casedict.py#L56-L71
def get(self, key, default=_sentinel): """ Gets the value from the key. If the key doesn't exist, the default value is returned, otherwise None. :param key: The key :param default: The default value :return: The value """ tup = self._data.get(key.lower()) if tup is not None: return tup[1] elif default is not _sentinel: return default else: return None
[ "def", "get", "(", "self", ",", "key", ",", "default", "=", "_sentinel", ")", ":", "tup", "=", "self", ".", "_data", ".", "get", "(", "key", ".", "lower", "(", ")", ")", "if", "tup", "is", "not", "None", ":", "return", "tup", "[", "1", "]", "elif", "default", "is", "not", "_sentinel", ":", "return", "default", "else", ":", "return", "None" ]
Gets the value from the key. If the key doesn't exist, the default value is returned, otherwise None. :param key: The key :param default: The default value :return: The value
[ "Gets", "the", "value", "from", "the", "key", ".", "If", "the", "key", "doesn", "t", "exist", "the", "default", "value", "is", "returned", "otherwise", "None", "." ]
python
valid
28.9375
fabric-bolt/fabric-bolt
fabric_bolt/projects/models.py
https://github.com/fabric-bolt/fabric-bolt/blob/0f434783026f1b9ce16a416fa496d76921fe49ca/fabric_bolt/projects/models.py#L320-L332
def get_next_input(self): """ Returns the next line of input :return: string of input """ # TODO: could override input if we get input coming in at the same time all_input = Deployment.objects.get(pk=self.id).input or '' lines = all_input.splitlines() first_line = lines[0] if len(lines) else None lines = lines[1:] if len(lines) > 1 else [] Deployment.objects.filter(pk=self.id).update(input='\n'.join(lines)) return first_line
[ "def", "get_next_input", "(", "self", ")", ":", "# TODO: could override input if we get input coming in at the same time", "all_input", "=", "Deployment", ".", "objects", ".", "get", "(", "pk", "=", "self", ".", "id", ")", ".", "input", "or", "''", "lines", "=", "all_input", ".", "splitlines", "(", ")", "first_line", "=", "lines", "[", "0", "]", "if", "len", "(", "lines", ")", "else", "None", "lines", "=", "lines", "[", "1", ":", "]", "if", "len", "(", "lines", ")", ">", "1", "else", "[", "]", "Deployment", ".", "objects", ".", "filter", "(", "pk", "=", "self", ".", "id", ")", ".", "update", "(", "input", "=", "'\\n'", ".", "join", "(", "lines", ")", ")", "return", "first_line" ]
Returns the next line of input :return: string of input
[ "Returns", "the", "next", "line", "of", "input", ":", "return", ":", "string", "of", "input" ]
python
train
38.846154
abe-winter/pg13-py
pg13/treepath.py
https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/treepath.py#L9-L14
def child(self,index): "helper for __getitem__/__setitem__" if isinstance(index,tuple): attr,i = index return getattr(self,attr)[i] else: return getattr(self,index)
[ "def", "child", "(", "self", ",", "index", ")", ":", "if", "isinstance", "(", "index", ",", "tuple", ")", ":", "attr", ",", "i", "=", "index", "return", "getattr", "(", "self", ",", "attr", ")", "[", "i", "]", "else", ":", "return", "getattr", "(", "self", ",", "index", ")" ]
helper for __getitem__/__setitem__
[ "helper", "for", "__getitem__", "/", "__setitem__" ]
python
train
30.5
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_keras2_converter.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_keras2_converter.py#L117-L131
def _get_layer_converter_fn(layer, add_custom_layers = False): """Get the right converter function for Keras """ layer_type = type(layer) if layer_type in _KERAS_LAYER_REGISTRY: convert_func = _KERAS_LAYER_REGISTRY[layer_type] if convert_func is _layers2.convert_activation: act_name = _layers2._get_activation_name_from_keras_layer(layer) if act_name == 'CUSTOM': return None return convert_func elif add_custom_layers: return None else: raise TypeError("Keras layer of type %s is not supported." % type(layer))
[ "def", "_get_layer_converter_fn", "(", "layer", ",", "add_custom_layers", "=", "False", ")", ":", "layer_type", "=", "type", "(", "layer", ")", "if", "layer_type", "in", "_KERAS_LAYER_REGISTRY", ":", "convert_func", "=", "_KERAS_LAYER_REGISTRY", "[", "layer_type", "]", "if", "convert_func", "is", "_layers2", ".", "convert_activation", ":", "act_name", "=", "_layers2", ".", "_get_activation_name_from_keras_layer", "(", "layer", ")", "if", "act_name", "==", "'CUSTOM'", ":", "return", "None", "return", "convert_func", "elif", "add_custom_layers", ":", "return", "None", "else", ":", "raise", "TypeError", "(", "\"Keras layer of type %s is not supported.\"", "%", "type", "(", "layer", ")", ")" ]
Get the right converter function for Keras
[ "Get", "the", "right", "converter", "function", "for", "Keras" ]
python
train
40.133333
andy29485/embypy
embypy/objects/object.py
https://github.com/andy29485/embypy/blob/cde658d380965caaf4789d4d182d045b0346797b/embypy/objects/object.py#L249-L272
async def update(self, fields=''): '''reload object info from emby |coro| Parameters ---------- fields : str additional fields to request when updating See Also -------- refresh : same thing send : post : ''' path = 'Users/{{UserId}}/Items/{}'.format(self.id) info = await self.connector.getJson(path, remote=False, Fields='Path,Overview,'+fields ) self.object_dict.update(info) self.extras = {} return self
[ "async", "def", "update", "(", "self", ",", "fields", "=", "''", ")", ":", "path", "=", "'Users/{{UserId}}/Items/{}'", ".", "format", "(", "self", ".", "id", ")", "info", "=", "await", "self", ".", "connector", ".", "getJson", "(", "path", ",", "remote", "=", "False", ",", "Fields", "=", "'Path,Overview,'", "+", "fields", ")", "self", ".", "object_dict", ".", "update", "(", "info", ")", "self", ".", "extras", "=", "{", "}", "return", "self" ]
reload object info from emby |coro| Parameters ---------- fields : str additional fields to request when updating See Also -------- refresh : same thing send : post :
[ "reload", "object", "info", "from", "emby" ]
python
train
22.791667