repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
PredixDev/predixpy | predix/admin/uaa.py | https://github.com/PredixDev/predixpy/blob/a0cb34cf40f716229351bb6d90d6ecace958c81f/predix/admin/uaa.py#L86-L96 | def authenticate(self):
"""
Authenticate into the UAA instance as the admin user.
"""
# Make sure we've stored uri for use
predix.config.set_env_value(self.use_class, 'uri', self._get_uri())
self.uaac = predix.security.uaa.UserAccountAuthentication()
self.uaac.authenticate('admin', self._get_admin_secret(),
use_cache=False)
self.is_admin = True | [
"def",
"authenticate",
"(",
"self",
")",
":",
"# Make sure we've stored uri for use",
"predix",
".",
"config",
".",
"set_env_value",
"(",
"self",
".",
"use_class",
",",
"'uri'",
",",
"self",
".",
"_get_uri",
"(",
")",
")",
"self",
".",
"uaac",
"=",
"predix",
".",
"security",
".",
"uaa",
".",
"UserAccountAuthentication",
"(",
")",
"self",
".",
"uaac",
".",
"authenticate",
"(",
"'admin'",
",",
"self",
".",
"_get_admin_secret",
"(",
")",
",",
"use_cache",
"=",
"False",
")",
"self",
".",
"is_admin",
"=",
"True"
] | Authenticate into the UAA instance as the admin user. | [
"Authenticate",
"into",
"the",
"UAA",
"instance",
"as",
"the",
"admin",
"user",
"."
] | python | train |
Hackerfleet/hfos | hfos/ui/clientmanager.py | https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/ui/clientmanager.py#L155-L162 | def client_details(self, *args):
"""Display known details about a given client"""
self.log(_('Client details:', lang='de'))
client = self._clients[args[0]]
self.log('UUID:', client.uuid, 'IP:', client.ip, 'Name:', client.name, 'User:', self._users[client.useruuid],
pretty=True) | [
"def",
"client_details",
"(",
"self",
",",
"*",
"args",
")",
":",
"self",
".",
"log",
"(",
"_",
"(",
"'Client details:'",
",",
"lang",
"=",
"'de'",
")",
")",
"client",
"=",
"self",
".",
"_clients",
"[",
"args",
"[",
"0",
"]",
"]",
"self",
".",
"log",
"(",
"'UUID:'",
",",
"client",
".",
"uuid",
",",
"'IP:'",
",",
"client",
".",
"ip",
",",
"'Name:'",
",",
"client",
".",
"name",
",",
"'User:'",
",",
"self",
".",
"_users",
"[",
"client",
".",
"useruuid",
"]",
",",
"pretty",
"=",
"True",
")"
] | Display known details about a given client | [
"Display",
"known",
"details",
"about",
"a",
"given",
"client"
] | python | train |
radjkarl/appBase | appbase/dialogs/FirstStart.py | https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/dialogs/FirstStart.py#L56-L92 | def accept(self, evt):
"""
write setting to the preferences
"""
# determine if application is a script file or frozen exe (pyinstaller)
frozen = getattr(sys, 'frozen', False)
if frozen:
app_file = sys.executable
else:
app_file = PathStr(__main__.__file__).abspath()
if self.cb_startmenu.isChecked():
# TODO: allow only logo location
# icon = app_file.dirname().join('media', 'logo.ico')
StartMenuEntry(self.name, app_file, icon=self.icon,
console=False).create()
if self.cb_mime.isChecked():
# get admin rights
if not isAdmin():
try:
# run this file as __main__ with admin rights:
if frozen:
cmd = "from %s import embeddIntoOS\nembeddIntoOS('%s', '%s', '%s')" % (
__name__, '', self.ftype, self.name)
# in this case there is no python.exe and no moduly.py to call
# thats why we have to import the method and execute it
runAsAdmin((sys.executable, '-exec', cmd))
else:
runAsAdmin((sys.executable, __file__,
app_file, self.ftype, self.name))
except:
print('needs admin rights to work')
else:
embeddIntoOS(app_file, self.ftype, self.name)
QtWidgets.QDialog.accept(self) | [
"def",
"accept",
"(",
"self",
",",
"evt",
")",
":",
"# determine if application is a script file or frozen exe (pyinstaller)\r",
"frozen",
"=",
"getattr",
"(",
"sys",
",",
"'frozen'",
",",
"False",
")",
"if",
"frozen",
":",
"app_file",
"=",
"sys",
".",
"executable",
"else",
":",
"app_file",
"=",
"PathStr",
"(",
"__main__",
".",
"__file__",
")",
".",
"abspath",
"(",
")",
"if",
"self",
".",
"cb_startmenu",
".",
"isChecked",
"(",
")",
":",
"# TODO: allow only logo location\r",
"# icon = app_file.dirname().join('media', 'logo.ico')\r",
"StartMenuEntry",
"(",
"self",
".",
"name",
",",
"app_file",
",",
"icon",
"=",
"self",
".",
"icon",
",",
"console",
"=",
"False",
")",
".",
"create",
"(",
")",
"if",
"self",
".",
"cb_mime",
".",
"isChecked",
"(",
")",
":",
"# get admin rights\r",
"if",
"not",
"isAdmin",
"(",
")",
":",
"try",
":",
"# run this file as __main__ with admin rights:\r",
"if",
"frozen",
":",
"cmd",
"=",
"\"from %s import embeddIntoOS\\nembeddIntoOS('%s', '%s', '%s')\"",
"%",
"(",
"__name__",
",",
"''",
",",
"self",
".",
"ftype",
",",
"self",
".",
"name",
")",
"# in this case there is no python.exe and no moduly.py to call\r",
"# thats why we have to import the method and execute it\r",
"runAsAdmin",
"(",
"(",
"sys",
".",
"executable",
",",
"'-exec'",
",",
"cmd",
")",
")",
"else",
":",
"runAsAdmin",
"(",
"(",
"sys",
".",
"executable",
",",
"__file__",
",",
"app_file",
",",
"self",
".",
"ftype",
",",
"self",
".",
"name",
")",
")",
"except",
":",
"print",
"(",
"'needs admin rights to work'",
")",
"else",
":",
"embeddIntoOS",
"(",
"app_file",
",",
"self",
".",
"ftype",
",",
"self",
".",
"name",
")",
"QtWidgets",
".",
"QDialog",
".",
"accept",
"(",
"self",
")"
] | write setting to the preferences | [
"write",
"setting",
"to",
"the",
"preferences"
] | python | train |
roboogle/gtkmvc3 | gtkmvco/gtkmvc3/support/utils.py | https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/gtkmvc3/support/utils.py#L112-L136 | def __nt_relpath(path, start=os.curdir):
"""Return a relative version of a path"""
if not path: raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.sep)
path_list = os.path.abspath(path).split(os.sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = os.path.splitunc(path)
unc_start, rest = os.path.splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)" \
% (path, start))
else: raise ValueError("path is on drive %s, start on drive %s" \
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else: i += 1
pass
rel_list = [os.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list: return os.curdir
return os.path.join(*rel_list) | [
"def",
"__nt_relpath",
"(",
"path",
",",
"start",
"=",
"os",
".",
"curdir",
")",
":",
"if",
"not",
"path",
":",
"raise",
"ValueError",
"(",
"\"no path specified\"",
")",
"start_list",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"start",
")",
".",
"split",
"(",
"os",
".",
"sep",
")",
"path_list",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
".",
"split",
"(",
"os",
".",
"sep",
")",
"if",
"start_list",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"!=",
"path_list",
"[",
"0",
"]",
".",
"lower",
"(",
")",
":",
"unc_path",
",",
"rest",
"=",
"os",
".",
"path",
".",
"splitunc",
"(",
"path",
")",
"unc_start",
",",
"rest",
"=",
"os",
".",
"path",
".",
"splitunc",
"(",
"start",
")",
"if",
"bool",
"(",
"unc_path",
")",
"^",
"bool",
"(",
"unc_start",
")",
":",
"raise",
"ValueError",
"(",
"\"Cannot mix UNC and non-UNC paths (%s and %s)\"",
"%",
"(",
"path",
",",
"start",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"path is on drive %s, start on drive %s\"",
"%",
"(",
"path_list",
"[",
"0",
"]",
",",
"start_list",
"[",
"0",
"]",
")",
")",
"# Work out how much of the filepath is shared by start and path.",
"for",
"i",
"in",
"range",
"(",
"min",
"(",
"len",
"(",
"start_list",
")",
",",
"len",
"(",
"path_list",
")",
")",
")",
":",
"if",
"start_list",
"[",
"i",
"]",
".",
"lower",
"(",
")",
"!=",
"path_list",
"[",
"i",
"]",
".",
"lower",
"(",
")",
":",
"break",
"else",
":",
"i",
"+=",
"1",
"pass",
"rel_list",
"=",
"[",
"os",
".",
"pardir",
"]",
"*",
"(",
"len",
"(",
"start_list",
")",
"-",
"i",
")",
"+",
"path_list",
"[",
"i",
":",
"]",
"if",
"not",
"rel_list",
":",
"return",
"os",
".",
"curdir",
"return",
"os",
".",
"path",
".",
"join",
"(",
"*",
"rel_list",
")"
] | Return a relative version of a path | [
"Return",
"a",
"relative",
"version",
"of",
"a",
"path"
] | python | train |
markovmodel/msmtools | msmtools/analysis/api.py | https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/analysis/api.py#L1731-L1755 | def stationary_distribution_sensitivity(T, j):
r"""Sensitivity matrix of a stationary distribution element.
Parameters
----------
T : (M, M) ndarray
Transition matrix (stochastic matrix).
j : int
Index of stationary distribution element
for which sensitivity matrix is computed.
Returns
-------
S : (M, M) ndarray
Sensitivity matrix for the specified element
of the stationary distribution.
"""
T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric')
if _issparse(T):
_showSparseConversionWarning()
stationary_distribution_sensitivity(T.todense(), j)
else:
return dense.sensitivity.stationary_distribution_sensitivity(T, j) | [
"def",
"stationary_distribution_sensitivity",
"(",
"T",
",",
"j",
")",
":",
"T",
"=",
"_types",
".",
"ensure_ndarray_or_sparse",
"(",
"T",
",",
"ndim",
"=",
"2",
",",
"uniform",
"=",
"True",
",",
"kind",
"=",
"'numeric'",
")",
"if",
"_issparse",
"(",
"T",
")",
":",
"_showSparseConversionWarning",
"(",
")",
"stationary_distribution_sensitivity",
"(",
"T",
".",
"todense",
"(",
")",
",",
"j",
")",
"else",
":",
"return",
"dense",
".",
"sensitivity",
".",
"stationary_distribution_sensitivity",
"(",
"T",
",",
"j",
")"
] | r"""Sensitivity matrix of a stationary distribution element.
Parameters
----------
T : (M, M) ndarray
Transition matrix (stochastic matrix).
j : int
Index of stationary distribution element
for which sensitivity matrix is computed.
Returns
-------
S : (M, M) ndarray
Sensitivity matrix for the specified element
of the stationary distribution. | [
"r",
"Sensitivity",
"matrix",
"of",
"a",
"stationary",
"distribution",
"element",
"."
] | python | train |
ntoll/microfs | microfs.py | https://github.com/ntoll/microfs/blob/11387109cfc36aaddceb018596ea75d55417ca0c/microfs.py#L175-L191 | def ls(serial=None):
"""
List the files on the micro:bit.
If no serial object is supplied, microfs will attempt to detect the
connection itself.
Returns a list of the files on the connected device or raises an IOError if
there's a problem.
"""
out, err = execute([
'import os',
'print(os.listdir())',
], serial)
if err:
raise IOError(clean_error(err))
return ast.literal_eval(out.decode('utf-8')) | [
"def",
"ls",
"(",
"serial",
"=",
"None",
")",
":",
"out",
",",
"err",
"=",
"execute",
"(",
"[",
"'import os'",
",",
"'print(os.listdir())'",
",",
"]",
",",
"serial",
")",
"if",
"err",
":",
"raise",
"IOError",
"(",
"clean_error",
"(",
"err",
")",
")",
"return",
"ast",
".",
"literal_eval",
"(",
"out",
".",
"decode",
"(",
"'utf-8'",
")",
")"
] | List the files on the micro:bit.
If no serial object is supplied, microfs will attempt to detect the
connection itself.
Returns a list of the files on the connected device or raises an IOError if
there's a problem. | [
"List",
"the",
"files",
"on",
"the",
"micro",
":",
"bit",
"."
] | python | train |
pyamg/pyamg | pyamg/relaxation/relaxation.py | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/relaxation/relaxation.py#L747-L825 | def jacobi_ne(A, x, b, iterations=1, omega=1.0):
"""Perform Jacobi iterations on the linear system A A.H x = A.H b.
Also known as Cimmino relaxation
Parameters
----------
A : csr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
iterations : int
Number of iterations to perform
omega : scalar
Damping parameter
Returns
-------
Nothing, x will be modified in place.
References
----------
.. [1] Brandt, Ta'asan.
"Multigrid Method For Nearly Singular And Slightly Indefinite Problems."
1985. NASA Technical Report Numbers: ICASE-85-57; NAS 1.26:178026;
NASA-CR-178026;
.. [2] Kaczmarz. Angenaeherte Aufloesung von Systemen Linearer
Gleichungen. Bull. Acad. Polon. Sci. Lett. A 35, 355-57. 1937
.. [3] Cimmino. La ricerca scientifica ser. II 1.
Pubbliz. dell'Inst. pre le Appl. del Calculo 34, 326-333, 1938.
Examples
--------
>>> # Use NE Jacobi as a Stand-Alone Solver
>>> from pyamg.relaxation.relaxation import jacobi_ne
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((50,50), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> jacobi_ne(A, x0, b, iterations=10, omega=2.0/3.0)
>>> print norm(b-A*x0)
49.3886046066
>>> #
>>> # Use NE Jacobi as the Multigrid Smoother
>>> from pyamg import smoothed_aggregation_solver
>>> opts = {'iterations' : 2, 'omega' : 4.0/3.0}
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('jacobi_ne', opts),
... postsmoother=('jacobi_ne', opts))
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
"""
A, x, b = make_system(A, x, b, formats=['csr'])
sweep = slice(None)
(row_start, row_stop, row_step) = sweep.indices(A.shape[0])
temp = np.zeros_like(x)
# Dinv for A*A.H
Dinv = get_diagonal(A, norm_eq=2, inv=True)
# Create uniform type, convert possibly complex scalars to length 1 arrays
[omega] = type_prep(A.dtype, [omega])
for i in range(iterations):
delta = (np.ravel(b - A*x)*np.ravel(Dinv)).astype(A.dtype)
amg_core.jacobi_ne(A.indptr, A.indices, A.data,
x, b, delta, temp, row_start,
row_stop, row_step, omega) | [
"def",
"jacobi_ne",
"(",
"A",
",",
"x",
",",
"b",
",",
"iterations",
"=",
"1",
",",
"omega",
"=",
"1.0",
")",
":",
"A",
",",
"x",
",",
"b",
"=",
"make_system",
"(",
"A",
",",
"x",
",",
"b",
",",
"formats",
"=",
"[",
"'csr'",
"]",
")",
"sweep",
"=",
"slice",
"(",
"None",
")",
"(",
"row_start",
",",
"row_stop",
",",
"row_step",
")",
"=",
"sweep",
".",
"indices",
"(",
"A",
".",
"shape",
"[",
"0",
"]",
")",
"temp",
"=",
"np",
".",
"zeros_like",
"(",
"x",
")",
"# Dinv for A*A.H",
"Dinv",
"=",
"get_diagonal",
"(",
"A",
",",
"norm_eq",
"=",
"2",
",",
"inv",
"=",
"True",
")",
"# Create uniform type, convert possibly complex scalars to length 1 arrays",
"[",
"omega",
"]",
"=",
"type_prep",
"(",
"A",
".",
"dtype",
",",
"[",
"omega",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"iterations",
")",
":",
"delta",
"=",
"(",
"np",
".",
"ravel",
"(",
"b",
"-",
"A",
"*",
"x",
")",
"*",
"np",
".",
"ravel",
"(",
"Dinv",
")",
")",
".",
"astype",
"(",
"A",
".",
"dtype",
")",
"amg_core",
".",
"jacobi_ne",
"(",
"A",
".",
"indptr",
",",
"A",
".",
"indices",
",",
"A",
".",
"data",
",",
"x",
",",
"b",
",",
"delta",
",",
"temp",
",",
"row_start",
",",
"row_stop",
",",
"row_step",
",",
"omega",
")"
] | Perform Jacobi iterations on the linear system A A.H x = A.H b.
Also known as Cimmino relaxation
Parameters
----------
A : csr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
iterations : int
Number of iterations to perform
omega : scalar
Damping parameter
Returns
-------
Nothing, x will be modified in place.
References
----------
.. [1] Brandt, Ta'asan.
"Multigrid Method For Nearly Singular And Slightly Indefinite Problems."
1985. NASA Technical Report Numbers: ICASE-85-57; NAS 1.26:178026;
NASA-CR-178026;
.. [2] Kaczmarz. Angenaeherte Aufloesung von Systemen Linearer
Gleichungen. Bull. Acad. Polon. Sci. Lett. A 35, 355-57. 1937
.. [3] Cimmino. La ricerca scientifica ser. II 1.
Pubbliz. dell'Inst. pre le Appl. del Calculo 34, 326-333, 1938.
Examples
--------
>>> # Use NE Jacobi as a Stand-Alone Solver
>>> from pyamg.relaxation.relaxation import jacobi_ne
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((50,50), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> jacobi_ne(A, x0, b, iterations=10, omega=2.0/3.0)
>>> print norm(b-A*x0)
49.3886046066
>>> #
>>> # Use NE Jacobi as the Multigrid Smoother
>>> from pyamg import smoothed_aggregation_solver
>>> opts = {'iterations' : 2, 'omega' : 4.0/3.0}
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('jacobi_ne', opts),
... postsmoother=('jacobi_ne', opts))
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals) | [
"Perform",
"Jacobi",
"iterations",
"on",
"the",
"linear",
"system",
"A",
"A",
".",
"H",
"x",
"=",
"A",
".",
"H",
"b",
"."
] | python | train |
zeromake/aiko | aiko/application.py | https://github.com/zeromake/aiko/blob/53b246fa88652466a9e38ac3d1a99a6198195b0f/aiko/application.py#L143-L162 | def run(self, host: str = "0.0.0.0", port: int = 5000) -> None:
"""
debug run
:param host: the hostname to listen on, default is ``'0.0.0.0'``
:param port: the port of the server, default id ``5000``
"""
loop = cast(asyncio.AbstractEventLoop, self._loop)
listen = self.listen(host=host, port=port)
server = loop.run_until_complete(listen)
def close() -> None:
"""
关闭回调
"""
server.close()
loop.stop()
# print(type(server))
loop.add_signal_handler(SIGTERM, close)
loop.add_signal_handler(SIGINT, close)
loop.run_forever() | [
"def",
"run",
"(",
"self",
",",
"host",
":",
"str",
"=",
"\"0.0.0.0\"",
",",
"port",
":",
"int",
"=",
"5000",
")",
"->",
"None",
":",
"loop",
"=",
"cast",
"(",
"asyncio",
".",
"AbstractEventLoop",
",",
"self",
".",
"_loop",
")",
"listen",
"=",
"self",
".",
"listen",
"(",
"host",
"=",
"host",
",",
"port",
"=",
"port",
")",
"server",
"=",
"loop",
".",
"run_until_complete",
"(",
"listen",
")",
"def",
"close",
"(",
")",
"->",
"None",
":",
"\"\"\"\n 关闭回调\n \"\"\"",
"server",
".",
"close",
"(",
")",
"loop",
".",
"stop",
"(",
")",
"# print(type(server))",
"loop",
".",
"add_signal_handler",
"(",
"SIGTERM",
",",
"close",
")",
"loop",
".",
"add_signal_handler",
"(",
"SIGINT",
",",
"close",
")",
"loop",
".",
"run_forever",
"(",
")"
] | debug run
:param host: the hostname to listen on, default is ``'0.0.0.0'``
:param port: the port of the server, default id ``5000`` | [
"debug",
"run",
":",
"param",
"host",
":",
"the",
"hostname",
"to",
"listen",
"on",
"default",
"is",
"0",
".",
"0",
".",
"0",
".",
"0",
":",
"param",
"port",
":",
"the",
"port",
"of",
"the",
"server",
"default",
"id",
"5000"
] | python | train |
rwl/pylon | pylon/dc_pf.py | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/dc_pf.py#L165-L192 | def _update_model(self, case, B, Bsrc, v_angle, p_srcinj, p_ref, ref_idx):
""" Updates the case with values computed from the voltage phase
angle solution.
"""
iref = ref_idx
base_mva = case.base_mva
buses = case.connected_buses
branches = case.online_branches
p_from = (Bsrc * v_angle + p_srcinj) * base_mva
p_to = -p_from
for i, branch in enumerate(branches):
branch.p_from = p_from[i]
branch.p_to = p_to[i]
branch.q_from = 0.0
branch.q_to = 0.0
for j, bus in enumerate(buses):
bus.v_angle = v_angle[j] * (180 / pi)
bus.v_magnitude = 1.0
# Update Pg for swing generator.
g_ref = [g for g in case.generators if g.bus == buses[iref]][0]
# Pg = Pinj + Pload + Gs
# newPg = oldPg + newPinj - oldPinj
p_inj = (B[iref, :] * v_angle - p_ref) * base_mva
g_ref.p += p_inj[0] | [
"def",
"_update_model",
"(",
"self",
",",
"case",
",",
"B",
",",
"Bsrc",
",",
"v_angle",
",",
"p_srcinj",
",",
"p_ref",
",",
"ref_idx",
")",
":",
"iref",
"=",
"ref_idx",
"base_mva",
"=",
"case",
".",
"base_mva",
"buses",
"=",
"case",
".",
"connected_buses",
"branches",
"=",
"case",
".",
"online_branches",
"p_from",
"=",
"(",
"Bsrc",
"*",
"v_angle",
"+",
"p_srcinj",
")",
"*",
"base_mva",
"p_to",
"=",
"-",
"p_from",
"for",
"i",
",",
"branch",
"in",
"enumerate",
"(",
"branches",
")",
":",
"branch",
".",
"p_from",
"=",
"p_from",
"[",
"i",
"]",
"branch",
".",
"p_to",
"=",
"p_to",
"[",
"i",
"]",
"branch",
".",
"q_from",
"=",
"0.0",
"branch",
".",
"q_to",
"=",
"0.0",
"for",
"j",
",",
"bus",
"in",
"enumerate",
"(",
"buses",
")",
":",
"bus",
".",
"v_angle",
"=",
"v_angle",
"[",
"j",
"]",
"*",
"(",
"180",
"/",
"pi",
")",
"bus",
".",
"v_magnitude",
"=",
"1.0",
"# Update Pg for swing generator.",
"g_ref",
"=",
"[",
"g",
"for",
"g",
"in",
"case",
".",
"generators",
"if",
"g",
".",
"bus",
"==",
"buses",
"[",
"iref",
"]",
"]",
"[",
"0",
"]",
"# Pg = Pinj + Pload + Gs",
"# newPg = oldPg + newPinj - oldPinj",
"p_inj",
"=",
"(",
"B",
"[",
"iref",
",",
":",
"]",
"*",
"v_angle",
"-",
"p_ref",
")",
"*",
"base_mva",
"g_ref",
".",
"p",
"+=",
"p_inj",
"[",
"0",
"]"
] | Updates the case with values computed from the voltage phase
angle solution. | [
"Updates",
"the",
"case",
"with",
"values",
"computed",
"from",
"the",
"voltage",
"phase",
"angle",
"solution",
"."
] | python | train |
craffel/mir_eval | mir_eval/transcription.py | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/transcription.py#L570-L619 | def average_overlap_ratio(ref_intervals, est_intervals, matching):
"""Compute the Average Overlap Ratio between a reference and estimated
note transcription. Given a reference and corresponding estimated note,
their overlap ratio (OR) is defined as the ratio between the duration of
the time segment in which the two notes overlap and the time segment
spanned by the two notes combined (earliest onset to latest offset):
>>> OR = ((min(ref_offset, est_offset) - max(ref_onset, est_onset)) /
... (max(ref_offset, est_offset) - min(ref_onset, est_onset)))
The Average Overlap Ratio (AOR) is given by the mean OR computed over all
matching reference and estimated notes. The metric goes from 0 (worst) to 1
(best).
Note: this function assumes the matching of reference and estimated notes
(see :func:`match_notes`) has already been performed and is provided by the
``matching`` parameter. Furthermore, it is highly recommended to validate
the intervals (see :func:`validate_intervals`) before calling this
function, otherwise it is possible (though unlikely) for this function to
attempt a divide-by-zero operation.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
matching : list of tuples
A list of matched reference and estimated notes.
``matching[i] == (i, j)`` where reference note ``i`` matches estimated
note ``j``.
Returns
-------
avg_overlap_ratio : float
The computed Average Overlap Ratio score
"""
ratios = []
for match in matching:
ref_int = ref_intervals[match[0]]
est_int = est_intervals[match[1]]
overlap_ratio = (
(min(ref_int[1], est_int[1]) - max(ref_int[0], est_int[0])) /
(max(ref_int[1], est_int[1]) - min(ref_int[0], est_int[0])))
ratios.append(overlap_ratio)
if len(ratios) == 0:
return 0
else:
return np.mean(ratios) | [
"def",
"average_overlap_ratio",
"(",
"ref_intervals",
",",
"est_intervals",
",",
"matching",
")",
":",
"ratios",
"=",
"[",
"]",
"for",
"match",
"in",
"matching",
":",
"ref_int",
"=",
"ref_intervals",
"[",
"match",
"[",
"0",
"]",
"]",
"est_int",
"=",
"est_intervals",
"[",
"match",
"[",
"1",
"]",
"]",
"overlap_ratio",
"=",
"(",
"(",
"min",
"(",
"ref_int",
"[",
"1",
"]",
",",
"est_int",
"[",
"1",
"]",
")",
"-",
"max",
"(",
"ref_int",
"[",
"0",
"]",
",",
"est_int",
"[",
"0",
"]",
")",
")",
"/",
"(",
"max",
"(",
"ref_int",
"[",
"1",
"]",
",",
"est_int",
"[",
"1",
"]",
")",
"-",
"min",
"(",
"ref_int",
"[",
"0",
"]",
",",
"est_int",
"[",
"0",
"]",
")",
")",
")",
"ratios",
".",
"append",
"(",
"overlap_ratio",
")",
"if",
"len",
"(",
"ratios",
")",
"==",
"0",
":",
"return",
"0",
"else",
":",
"return",
"np",
".",
"mean",
"(",
"ratios",
")"
] | Compute the Average Overlap Ratio between a reference and estimated
note transcription. Given a reference and corresponding estimated note,
their overlap ratio (OR) is defined as the ratio between the duration of
the time segment in which the two notes overlap and the time segment
spanned by the two notes combined (earliest onset to latest offset):
>>> OR = ((min(ref_offset, est_offset) - max(ref_onset, est_onset)) /
... (max(ref_offset, est_offset) - min(ref_onset, est_onset)))
The Average Overlap Ratio (AOR) is given by the mean OR computed over all
matching reference and estimated notes. The metric goes from 0 (worst) to 1
(best).
Note: this function assumes the matching of reference and estimated notes
(see :func:`match_notes`) has already been performed and is provided by the
``matching`` parameter. Furthermore, it is highly recommended to validate
the intervals (see :func:`validate_intervals`) before calling this
function, otherwise it is possible (though unlikely) for this function to
attempt a divide-by-zero operation.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
matching : list of tuples
A list of matched reference and estimated notes.
``matching[i] == (i, j)`` where reference note ``i`` matches estimated
note ``j``.
Returns
-------
avg_overlap_ratio : float
The computed Average Overlap Ratio score | [
"Compute",
"the",
"Average",
"Overlap",
"Ratio",
"between",
"a",
"reference",
"and",
"estimated",
"note",
"transcription",
".",
"Given",
"a",
"reference",
"and",
"corresponding",
"estimated",
"note",
"their",
"overlap",
"ratio",
"(",
"OR",
")",
"is",
"defined",
"as",
"the",
"ratio",
"between",
"the",
"duration",
"of",
"the",
"time",
"segment",
"in",
"which",
"the",
"two",
"notes",
"overlap",
"and",
"the",
"time",
"segment",
"spanned",
"by",
"the",
"two",
"notes",
"combined",
"(",
"earliest",
"onset",
"to",
"latest",
"offset",
")",
":"
] | python | train |
mozilla/elasticutils | elasticutils/__init__.py | https://github.com/mozilla/elasticutils/blob/b880cc5d51fb1079b0581255ec664c1ec934656e/elasticutils/__init__.py#L1439-L1462 | def get_es(self, default_builder=get_es):
"""Returns the Elasticsearch object to use.
:arg default_builder: The function that takes a bunch of
arguments and generates a elasticsearch Elasticsearch
object.
.. Note::
If you desire special behavior regarding building the
Elasticsearch object for this S, subclass S and override
this method.
"""
# .es() calls are incremental, so we go through them all and
# update bits that are specified.
args = {}
for action, value in self.steps:
if action == 'es':
args.update(**value)
# TODO: store the Elasticsearch on the S if we've already
# created one since we don't need to do it multiple times.
return default_builder(**args) | [
"def",
"get_es",
"(",
"self",
",",
"default_builder",
"=",
"get_es",
")",
":",
"# .es() calls are incremental, so we go through them all and",
"# update bits that are specified.",
"args",
"=",
"{",
"}",
"for",
"action",
",",
"value",
"in",
"self",
".",
"steps",
":",
"if",
"action",
"==",
"'es'",
":",
"args",
".",
"update",
"(",
"*",
"*",
"value",
")",
"# TODO: store the Elasticsearch on the S if we've already",
"# created one since we don't need to do it multiple times.",
"return",
"default_builder",
"(",
"*",
"*",
"args",
")"
] | Returns the Elasticsearch object to use.
:arg default_builder: The function that takes a bunch of
arguments and generates a elasticsearch Elasticsearch
object.
.. Note::
If you desire special behavior regarding building the
Elasticsearch object for this S, subclass S and override
this method. | [
"Returns",
"the",
"Elasticsearch",
"object",
"to",
"use",
"."
] | python | train |
hollenstein/maspy | maspy/core.py | https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/core.py#L344-L354 | def _addSpecfile(self, specfile, path):
"""Adds a new specfile entry to MsrunContainer.info. See also
:class:`MsrunContainer.addSpecfile()`.
:param specfile: the name of an ms-run file
:param path: filedirectory used for loading and saving ``mrc`` files
"""
datatypeStatus = {'rm': False, 'ci': False, 'smi': False, 'sai': False,
'si': False
}
self.info[specfile] = {'path': path, 'status': datatypeStatus} | [
"def",
"_addSpecfile",
"(",
"self",
",",
"specfile",
",",
"path",
")",
":",
"datatypeStatus",
"=",
"{",
"'rm'",
":",
"False",
",",
"'ci'",
":",
"False",
",",
"'smi'",
":",
"False",
",",
"'sai'",
":",
"False",
",",
"'si'",
":",
"False",
"}",
"self",
".",
"info",
"[",
"specfile",
"]",
"=",
"{",
"'path'",
":",
"path",
",",
"'status'",
":",
"datatypeStatus",
"}"
] | Adds a new specfile entry to MsrunContainer.info. See also
:class:`MsrunContainer.addSpecfile()`.
:param specfile: the name of an ms-run file
:param path: filedirectory used for loading and saving ``mrc`` files | [
"Adds",
"a",
"new",
"specfile",
"entry",
"to",
"MsrunContainer",
".",
"info",
".",
"See",
"also",
":",
"class",
":",
"MsrunContainer",
".",
"addSpecfile",
"()",
"."
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/frontend/qt/console/mainwindow.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/qt/console/mainwindow.py#L651-L670 | def _get_magic_menu(self,menuidentifier, menulabel=None):
"""return a submagic menu by name, and create it if needed
parameters:
-----------
menulabel : str
Label for the menu
Will infere the menu name from the identifier at creation if menulabel not given.
To do so you have too give menuidentifier as a CamelCassedString
"""
menu = self._magic_menu_dict.get(menuidentifier,None)
if not menu :
if not menulabel:
menulabel = re.sub("([a-zA-Z]+)([A-Z][a-z])","\g<1> \g<2>",menuidentifier)
menu = QtGui.QMenu(menulabel,self.magic_menu)
self._magic_menu_dict[menuidentifier]=menu
self.magic_menu.insertMenu(self.magic_menu_separator,menu)
return menu | [
"def",
"_get_magic_menu",
"(",
"self",
",",
"menuidentifier",
",",
"menulabel",
"=",
"None",
")",
":",
"menu",
"=",
"self",
".",
"_magic_menu_dict",
".",
"get",
"(",
"menuidentifier",
",",
"None",
")",
"if",
"not",
"menu",
":",
"if",
"not",
"menulabel",
":",
"menulabel",
"=",
"re",
".",
"sub",
"(",
"\"([a-zA-Z]+)([A-Z][a-z])\"",
",",
"\"\\g<1> \\g<2>\"",
",",
"menuidentifier",
")",
"menu",
"=",
"QtGui",
".",
"QMenu",
"(",
"menulabel",
",",
"self",
".",
"magic_menu",
")",
"self",
".",
"_magic_menu_dict",
"[",
"menuidentifier",
"]",
"=",
"menu",
"self",
".",
"magic_menu",
".",
"insertMenu",
"(",
"self",
".",
"magic_menu_separator",
",",
"menu",
")",
"return",
"menu"
] | return a submagic menu by name, and create it if needed
parameters:
-----------
menulabel : str
Label for the menu
Will infere the menu name from the identifier at creation if menulabel not given.
To do so you have too give menuidentifier as a CamelCassedString | [
"return",
"a",
"submagic",
"menu",
"by",
"name",
"and",
"create",
"it",
"if",
"needed",
"parameters",
":",
"-----------"
] | python | test |
barrust/mediawiki | mediawiki/mediawiki.py | https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L136-L140 | def rate_limit(self, rate_limit):
""" Turn on or off rate limiting """
self._rate_limit = bool(rate_limit)
self._rate_limit_last_call = None
self.clear_memoized() | [
"def",
"rate_limit",
"(",
"self",
",",
"rate_limit",
")",
":",
"self",
".",
"_rate_limit",
"=",
"bool",
"(",
"rate_limit",
")",
"self",
".",
"_rate_limit_last_call",
"=",
"None",
"self",
".",
"clear_memoized",
"(",
")"
] | Turn on or off rate limiting | [
"Turn",
"on",
"or",
"off",
"rate",
"limiting"
] | python | train |
XuShaohua/bcloud | bcloud/UploadPage.py | https://github.com/XuShaohua/bcloud/blob/4b54e0fdccf2b3013285fef05c97354cfa31697b/bcloud/UploadPage.py#L416-L485 | def upload_files(self, source_paths, dir_name=None):
'''批量创建上传任务, 会扫描子目录并依次上传.
source_path - 本地文件的绝对路径
dir_name - 文件在服务器上的父目录, 如果为None的话, 会弹出一个
对话框让用户来选择一个目录.
'''
def scan_folders(folder_path):
file_list = os.listdir(folder_path)
source_paths = [os.path.join(folder_path, f) for f in file_list]
self.upload_files(source_paths,
os.path.join(dir_name, os.path.split(folder_path)[1]))
self.check_first()
if not dir_name:
folder_dialog = FolderBrowserDialog(self, self.app)
response = folder_dialog.run()
if response != Gtk.ResponseType.OK:
folder_dialog.destroy()
return
dir_name = folder_dialog.get_path()
folder_dialog.destroy()
invalid_paths = []
for source_path in source_paths:
if util.validate_pathname(source_path) != ValidatePathState.OK:
invalid_paths.append(source_path)
continue
if (os.path.split(source_path)[1].startswith('.') and
not self.app.profile['upload-hidden-files']):
continue
if os.path.isfile(source_path):
self.upload_file(source_path, dir_name)
elif os.path.isdir(source_path):
scan_folders(source_path)
self.app.blink_page(self)
self.scan_tasks()
if not invalid_paths:
return
dialog = Gtk.Dialog(_('Invalid Filepath'), self.app.window,
Gtk.DialogFlags.MODAL,
(Gtk.STOCK_CLOSE, Gtk.ResponseType.OK))
dialog.set_default_size(640, 480)
dialog.set_border_width(10)
box = dialog.get_content_area()
scrolled_window = Gtk.ScrolledWindow()
box.pack_start(scrolled_window, True, True, 0)
text_buffer = Gtk.TextBuffer()
textview = Gtk.TextView.new_with_buffer(text_buffer)
scrolled_window.add(textview)
for invalid_path in invalid_paths:
text_buffer.insert_at_cursor(invalid_path)
text_buffer.insert_at_cursor('\n')
infobar = Gtk.InfoBar()
infobar.set_message_type(Gtk.MessageType.ERROR)
box.pack_end(infobar, False, False, 0)
info_label= Gtk.Label()
infobar.get_content_area().pack_start(info_label, False, False, 0)
info_label.set_label(''.join([
'* ', ValidatePathStateText[1], '\n',
'* ', ValidatePathStateText[2], '\n',
'* ', ValidatePathStateText[3], '\n',
]))
box.show_all()
dialog.run()
dialog.destroy() | [
"def",
"upload_files",
"(",
"self",
",",
"source_paths",
",",
"dir_name",
"=",
"None",
")",
":",
"def",
"scan_folders",
"(",
"folder_path",
")",
":",
"file_list",
"=",
"os",
".",
"listdir",
"(",
"folder_path",
")",
"source_paths",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"folder_path",
",",
"f",
")",
"for",
"f",
"in",
"file_list",
"]",
"self",
".",
"upload_files",
"(",
"source_paths",
",",
"os",
".",
"path",
".",
"join",
"(",
"dir_name",
",",
"os",
".",
"path",
".",
"split",
"(",
"folder_path",
")",
"[",
"1",
"]",
")",
")",
"self",
".",
"check_first",
"(",
")",
"if",
"not",
"dir_name",
":",
"folder_dialog",
"=",
"FolderBrowserDialog",
"(",
"self",
",",
"self",
".",
"app",
")",
"response",
"=",
"folder_dialog",
".",
"run",
"(",
")",
"if",
"response",
"!=",
"Gtk",
".",
"ResponseType",
".",
"OK",
":",
"folder_dialog",
".",
"destroy",
"(",
")",
"return",
"dir_name",
"=",
"folder_dialog",
".",
"get_path",
"(",
")",
"folder_dialog",
".",
"destroy",
"(",
")",
"invalid_paths",
"=",
"[",
"]",
"for",
"source_path",
"in",
"source_paths",
":",
"if",
"util",
".",
"validate_pathname",
"(",
"source_path",
")",
"!=",
"ValidatePathState",
".",
"OK",
":",
"invalid_paths",
".",
"append",
"(",
"source_path",
")",
"continue",
"if",
"(",
"os",
".",
"path",
".",
"split",
"(",
"source_path",
")",
"[",
"1",
"]",
".",
"startswith",
"(",
"'.'",
")",
"and",
"not",
"self",
".",
"app",
".",
"profile",
"[",
"'upload-hidden-files'",
"]",
")",
":",
"continue",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"source_path",
")",
":",
"self",
".",
"upload_file",
"(",
"source_path",
",",
"dir_name",
")",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"source_path",
")",
":",
"scan_folders",
"(",
"source_path",
")",
"self",
".",
"app",
".",
"blink_page",
"(",
"self",
")",
"self",
".",
"scan_tasks",
"(",
")",
"if",
"not",
"invalid_paths",
":",
"return",
"dialog",
"=",
"Gtk",
".",
"Dialog",
"(",
"_",
"(",
"'Invalid Filepath'",
")",
",",
"self",
".",
"app",
".",
"window",
",",
"Gtk",
".",
"DialogFlags",
".",
"MODAL",
",",
"(",
"Gtk",
".",
"STOCK_CLOSE",
",",
"Gtk",
".",
"ResponseType",
".",
"OK",
")",
")",
"dialog",
".",
"set_default_size",
"(",
"640",
",",
"480",
")",
"dialog",
".",
"set_border_width",
"(",
"10",
")",
"box",
"=",
"dialog",
".",
"get_content_area",
"(",
")",
"scrolled_window",
"=",
"Gtk",
".",
"ScrolledWindow",
"(",
")",
"box",
".",
"pack_start",
"(",
"scrolled_window",
",",
"True",
",",
"True",
",",
"0",
")",
"text_buffer",
"=",
"Gtk",
".",
"TextBuffer",
"(",
")",
"textview",
"=",
"Gtk",
".",
"TextView",
".",
"new_with_buffer",
"(",
"text_buffer",
")",
"scrolled_window",
".",
"add",
"(",
"textview",
")",
"for",
"invalid_path",
"in",
"invalid_paths",
":",
"text_buffer",
".",
"insert_at_cursor",
"(",
"invalid_path",
")",
"text_buffer",
".",
"insert_at_cursor",
"(",
"'\\n'",
")",
"infobar",
"=",
"Gtk",
".",
"InfoBar",
"(",
")",
"infobar",
".",
"set_message_type",
"(",
"Gtk",
".",
"MessageType",
".",
"ERROR",
")",
"box",
".",
"pack_end",
"(",
"infobar",
",",
"False",
",",
"False",
",",
"0",
")",
"info_label",
"=",
"Gtk",
".",
"Label",
"(",
")",
"infobar",
".",
"get_content_area",
"(",
")",
".",
"pack_start",
"(",
"info_label",
",",
"False",
",",
"False",
",",
"0",
")",
"info_label",
".",
"set_label",
"(",
"''",
".",
"join",
"(",
"[",
"'* '",
",",
"ValidatePathStateText",
"[",
"1",
"]",
",",
"'\\n'",
",",
"'* '",
",",
"ValidatePathStateText",
"[",
"2",
"]",
",",
"'\\n'",
",",
"'* '",
",",
"ValidatePathStateText",
"[",
"3",
"]",
",",
"'\\n'",
",",
"]",
")",
")",
"box",
".",
"show_all",
"(",
")",
"dialog",
".",
"run",
"(",
")",
"dialog",
".",
"destroy",
"(",
")"
] | 批量创建上传任务, 会扫描子目录并依次上传.
source_path - 本地文件的绝对路径
dir_name - 文件在服务器上的父目录, 如果为None的话, 会弹出一个
对话框让用户来选择一个目录. | [
"批量创建上传任务",
"会扫描子目录并依次上传",
"."
] | python | train |
bitesofcode/projexui | projexui/widgets/xpushbutton.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xpushbutton.py#L72-L90 | def setShowRichText(self, state):
"""
Sets whether or not to display rich text for this button.
:param state | <bool>
"""
self._showRichText = state
text = self.text()
if state:
label = self.richTextLabel()
label.setText(text)
label.show()
super(XPushButton, self).setText('')
else:
if self._richTextLabel:
self._richTextLabel.hide()
super(XPushButton, self).setText(text) | [
"def",
"setShowRichText",
"(",
"self",
",",
"state",
")",
":",
"self",
".",
"_showRichText",
"=",
"state",
"text",
"=",
"self",
".",
"text",
"(",
")",
"if",
"state",
":",
"label",
"=",
"self",
".",
"richTextLabel",
"(",
")",
"label",
".",
"setText",
"(",
"text",
")",
"label",
".",
"show",
"(",
")",
"super",
"(",
"XPushButton",
",",
"self",
")",
".",
"setText",
"(",
"''",
")",
"else",
":",
"if",
"self",
".",
"_richTextLabel",
":",
"self",
".",
"_richTextLabel",
".",
"hide",
"(",
")",
"super",
"(",
"XPushButton",
",",
"self",
")",
".",
"setText",
"(",
"text",
")"
] | Sets whether or not to display rich text for this button.
:param state | <bool> | [
"Sets",
"whether",
"or",
"not",
"to",
"display",
"rich",
"text",
"for",
"this",
"button",
".",
":",
"param",
"state",
"|",
"<bool",
">"
] | python | train |
bioidiap/bob.ip.facedetect | bob/ip/facedetect/train/TrainingSet.py | https://github.com/bioidiap/bob.ip.facedetect/blob/601da5141ca7302ad36424d1421b33190ba46779/bob/ip/facedetect/train/TrainingSet.py#L157-L292 | def extract(self, sampler, feature_extractor, number_of_examples_per_scale = (100, 100), similarity_thresholds = (0.5, 0.8), parallel = None, mirror = False, use_every_nth_negative_scale = 1):
"""Extracts features from **all** images in **all** scales and writes them to file.
This function iterates over all images that are present in the internally stored list, and extracts features using the given ``feature_extractor`` for every image patch that the given ``sampler`` returns.
The final features will be stored in the ``feature_directory`` that is set in the constructor.
For each image, the ``sampler`` samples patch locations, which cover the whole image in different scales.
For each patch locations is tested, how similar they are to the face bounding boxes that belong to that image, using the Jaccard :py:meth:`BoundingBox.similarity`.
The similarity is compared to the ``similarity_thresholds``.
If it is smaller than the first threshold, the patch is considered as background, when it is greater the the second threshold, it is considered as a face, otherwise it is rejected.
Depending on the image resolution and the number of bounding boxes, this will usually result in some positive and thousands of negative patches per image.
To limit the total amount of training data, for all scales, only up to a given number of positive and negative patches are kept.
Also, to further limit the number of negative samples, only every ``use_every_nth_negative_scale`` scale is considered (for the positives, always all scales are processed).
To increase the number (especially of positive) examples, features can also be extracted for horizontally mirrored images.
Simply set the ``mirror`` parameter to ``True``.
Furthermore, this function is designed to be run using several parallel processes, e.g., using the `GridTK <https://pypi.python.org/pypi/gridtk>`_.
Each of the processes will run on a particular subset of the images, which is defined by the ``SGE_TASK_ID`` environment variable.
The ``parallel`` parameter defines the total number of parallel processes that are used.
**Parameters:**
``sampler`` : :py:class:`Sampler`
The sampler to use to sample patches of the images. Please assure that the sampler is set up such that it samples patch locations which can overlap with the face locations.
``feature_extractor`` : :py:class:`FeatureExtractor`
The feature extractor to be used to extract features from image patches
``number_of_examples_per_scale`` : (int, int)
The maximum number of positive and negative examples to extract for each scale of the image
``similarity_thresholds`` : (float, float)
The Jaccard similarity threshold, below which patch locations are considered to be negative, and above which patch locations are considered to be positive examples.
``parallel`` : int or ``None``
If given, the total number of parallel processes, which are used to extract features (the current process index is read from the ``SGE_TASK_ID`` environment variable)
``mirror`` : bool
Extract positive and negative samples also from horizontally mirrored images?
``use_every_nth_negative_scale`` : int
Skip some negative scales to decrease the number of negative examples, i.e., only extract and store negative features, when ``scale_counter % use_every_nth_negative_scale == 0``
.. note::
The ``scale_counter`` is not reset between images, so that we might get features from different scales in subsequent images.
"""
feature_file = self._feature_file(parallel)
bob.io.base.create_directories_safe(self.feature_directory)
if parallel is None or "SGE_TASK_ID" not in os.environ or os.environ["SGE_TASK_ID"] == '1':
extractor_file = os.path.join(self.feature_directory, "Extractor.hdf5")
hdf5 = bob.io.base.HDF5File(extractor_file, "w")
feature_extractor.save(hdf5)
del hdf5
total_positives, total_negatives = 0, 0
indices = parallel_part(range(len(self)), parallel)
if not indices:
logger.warning("The index range for the current parallel thread is empty.")
else:
logger.info("Extracting features for images in range %d - %d of %d", indices[0], indices[-1], len(self))
hdf5 = bob.io.base.HDF5File(feature_file, "w")
for index in indices:
hdf5.create_group("Image-%d" % index)
hdf5.cd("Image-%d" % index)
logger.debug("Processing file %d of %d: %s", index+1, indices[-1]+1, self.image_paths[index])
# load image
image = bob.io.base.load(self.image_paths[index])
if image.ndim == 3:
image = bob.ip.color.rgb_to_gray(image)
# get ground_truth bounding boxes
ground_truth = self.bounding_boxes[index]
# collect image and GT for originally and mirrored image
images = [image] if not mirror else [image, bob.ip.base.flop(image)]
ground_truths = [ground_truth] if not mirror else [ground_truth, [gt.mirror_x(image.shape[1]) for gt in ground_truth]]
parts = "om"
# now, sample
scale_counter = -1
for image, ground_truth, part in zip(images, ground_truths, parts):
for scale, scaled_image_shape in sampler.scales(image):
scale_counter += 1
scaled_gt = [gt.scale(scale) for gt in ground_truth]
positives = []
negatives = []
# iterate over all possible positions in the image
for bb in sampler.sample_scaled(scaled_image_shape):
# check if the patch is a positive example
positive = False
negative = True
for gt in scaled_gt:
similarity = bb.similarity(gt)
if similarity > similarity_thresholds[1]:
positive = True
break
if similarity > similarity_thresholds[0]:
negative = False
break
if positive:
positives.append(bb)
elif negative and scale_counter % use_every_nth_negative_scale == 0:
negatives.append(bb)
# per scale, limit the number of positive and negative samples
positives = [positives[i] for i in quasi_random_indices(len(positives), number_of_examples_per_scale[0])]
negatives = [negatives[i] for i in quasi_random_indices(len(negatives), number_of_examples_per_scale[1])]
# extract features
feature_extractor.prepare(image, scale)
# .. negative features
if negatives:
negative_features = numpy.zeros((len(negatives), feature_extractor.number_of_features), numpy.uint16)
for i, bb in enumerate(negatives):
feature_extractor.extract_all(bb, negative_features, i)
hdf5.set("Negatives-%s-%.5f" % (part,scale), negative_features)
total_negatives += len(negatives)
# positive features
if positives:
positive_features = numpy.zeros((len(positives), feature_extractor.number_of_features), numpy.uint16)
for i, bb in enumerate(positives):
feature_extractor.extract_all(bb, positive_features, i)
hdf5.set("Positives-%s-%.5f" % (part,scale), positive_features)
total_positives += len(positives)
# cd backwards after each image
hdf5.cd("..")
hdf5.set("TotalPositives", total_positives)
hdf5.set("TotalNegatives", total_negatives) | [
"def",
"extract",
"(",
"self",
",",
"sampler",
",",
"feature_extractor",
",",
"number_of_examples_per_scale",
"=",
"(",
"100",
",",
"100",
")",
",",
"similarity_thresholds",
"=",
"(",
"0.5",
",",
"0.8",
")",
",",
"parallel",
"=",
"None",
",",
"mirror",
"=",
"False",
",",
"use_every_nth_negative_scale",
"=",
"1",
")",
":",
"feature_file",
"=",
"self",
".",
"_feature_file",
"(",
"parallel",
")",
"bob",
".",
"io",
".",
"base",
".",
"create_directories_safe",
"(",
"self",
".",
"feature_directory",
")",
"if",
"parallel",
"is",
"None",
"or",
"\"SGE_TASK_ID\"",
"not",
"in",
"os",
".",
"environ",
"or",
"os",
".",
"environ",
"[",
"\"SGE_TASK_ID\"",
"]",
"==",
"'1'",
":",
"extractor_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"feature_directory",
",",
"\"Extractor.hdf5\"",
")",
"hdf5",
"=",
"bob",
".",
"io",
".",
"base",
".",
"HDF5File",
"(",
"extractor_file",
",",
"\"w\"",
")",
"feature_extractor",
".",
"save",
"(",
"hdf5",
")",
"del",
"hdf5",
"total_positives",
",",
"total_negatives",
"=",
"0",
",",
"0",
"indices",
"=",
"parallel_part",
"(",
"range",
"(",
"len",
"(",
"self",
")",
")",
",",
"parallel",
")",
"if",
"not",
"indices",
":",
"logger",
".",
"warning",
"(",
"\"The index range for the current parallel thread is empty.\"",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"Extracting features for images in range %d - %d of %d\"",
",",
"indices",
"[",
"0",
"]",
",",
"indices",
"[",
"-",
"1",
"]",
",",
"len",
"(",
"self",
")",
")",
"hdf5",
"=",
"bob",
".",
"io",
".",
"base",
".",
"HDF5File",
"(",
"feature_file",
",",
"\"w\"",
")",
"for",
"index",
"in",
"indices",
":",
"hdf5",
".",
"create_group",
"(",
"\"Image-%d\"",
"%",
"index",
")",
"hdf5",
".",
"cd",
"(",
"\"Image-%d\"",
"%",
"index",
")",
"logger",
".",
"debug",
"(",
"\"Processing file %d of %d: %s\"",
",",
"index",
"+",
"1",
",",
"indices",
"[",
"-",
"1",
"]",
"+",
"1",
",",
"self",
".",
"image_paths",
"[",
"index",
"]",
")",
"# load image",
"image",
"=",
"bob",
".",
"io",
".",
"base",
".",
"load",
"(",
"self",
".",
"image_paths",
"[",
"index",
"]",
")",
"if",
"image",
".",
"ndim",
"==",
"3",
":",
"image",
"=",
"bob",
".",
"ip",
".",
"color",
".",
"rgb_to_gray",
"(",
"image",
")",
"# get ground_truth bounding boxes",
"ground_truth",
"=",
"self",
".",
"bounding_boxes",
"[",
"index",
"]",
"# collect image and GT for originally and mirrored image",
"images",
"=",
"[",
"image",
"]",
"if",
"not",
"mirror",
"else",
"[",
"image",
",",
"bob",
".",
"ip",
".",
"base",
".",
"flop",
"(",
"image",
")",
"]",
"ground_truths",
"=",
"[",
"ground_truth",
"]",
"if",
"not",
"mirror",
"else",
"[",
"ground_truth",
",",
"[",
"gt",
".",
"mirror_x",
"(",
"image",
".",
"shape",
"[",
"1",
"]",
")",
"for",
"gt",
"in",
"ground_truth",
"]",
"]",
"parts",
"=",
"\"om\"",
"# now, sample",
"scale_counter",
"=",
"-",
"1",
"for",
"image",
",",
"ground_truth",
",",
"part",
"in",
"zip",
"(",
"images",
",",
"ground_truths",
",",
"parts",
")",
":",
"for",
"scale",
",",
"scaled_image_shape",
"in",
"sampler",
".",
"scales",
"(",
"image",
")",
":",
"scale_counter",
"+=",
"1",
"scaled_gt",
"=",
"[",
"gt",
".",
"scale",
"(",
"scale",
")",
"for",
"gt",
"in",
"ground_truth",
"]",
"positives",
"=",
"[",
"]",
"negatives",
"=",
"[",
"]",
"# iterate over all possible positions in the image",
"for",
"bb",
"in",
"sampler",
".",
"sample_scaled",
"(",
"scaled_image_shape",
")",
":",
"# check if the patch is a positive example",
"positive",
"=",
"False",
"negative",
"=",
"True",
"for",
"gt",
"in",
"scaled_gt",
":",
"similarity",
"=",
"bb",
".",
"similarity",
"(",
"gt",
")",
"if",
"similarity",
">",
"similarity_thresholds",
"[",
"1",
"]",
":",
"positive",
"=",
"True",
"break",
"if",
"similarity",
">",
"similarity_thresholds",
"[",
"0",
"]",
":",
"negative",
"=",
"False",
"break",
"if",
"positive",
":",
"positives",
".",
"append",
"(",
"bb",
")",
"elif",
"negative",
"and",
"scale_counter",
"%",
"use_every_nth_negative_scale",
"==",
"0",
":",
"negatives",
".",
"append",
"(",
"bb",
")",
"# per scale, limit the number of positive and negative samples",
"positives",
"=",
"[",
"positives",
"[",
"i",
"]",
"for",
"i",
"in",
"quasi_random_indices",
"(",
"len",
"(",
"positives",
")",
",",
"number_of_examples_per_scale",
"[",
"0",
"]",
")",
"]",
"negatives",
"=",
"[",
"negatives",
"[",
"i",
"]",
"for",
"i",
"in",
"quasi_random_indices",
"(",
"len",
"(",
"negatives",
")",
",",
"number_of_examples_per_scale",
"[",
"1",
"]",
")",
"]",
"# extract features",
"feature_extractor",
".",
"prepare",
"(",
"image",
",",
"scale",
")",
"# .. negative features",
"if",
"negatives",
":",
"negative_features",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"len",
"(",
"negatives",
")",
",",
"feature_extractor",
".",
"number_of_features",
")",
",",
"numpy",
".",
"uint16",
")",
"for",
"i",
",",
"bb",
"in",
"enumerate",
"(",
"negatives",
")",
":",
"feature_extractor",
".",
"extract_all",
"(",
"bb",
",",
"negative_features",
",",
"i",
")",
"hdf5",
".",
"set",
"(",
"\"Negatives-%s-%.5f\"",
"%",
"(",
"part",
",",
"scale",
")",
",",
"negative_features",
")",
"total_negatives",
"+=",
"len",
"(",
"negatives",
")",
"# positive features",
"if",
"positives",
":",
"positive_features",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"len",
"(",
"positives",
")",
",",
"feature_extractor",
".",
"number_of_features",
")",
",",
"numpy",
".",
"uint16",
")",
"for",
"i",
",",
"bb",
"in",
"enumerate",
"(",
"positives",
")",
":",
"feature_extractor",
".",
"extract_all",
"(",
"bb",
",",
"positive_features",
",",
"i",
")",
"hdf5",
".",
"set",
"(",
"\"Positives-%s-%.5f\"",
"%",
"(",
"part",
",",
"scale",
")",
",",
"positive_features",
")",
"total_positives",
"+=",
"len",
"(",
"positives",
")",
"# cd backwards after each image",
"hdf5",
".",
"cd",
"(",
"\"..\"",
")",
"hdf5",
".",
"set",
"(",
"\"TotalPositives\"",
",",
"total_positives",
")",
"hdf5",
".",
"set",
"(",
"\"TotalNegatives\"",
",",
"total_negatives",
")"
] | Extracts features from **all** images in **all** scales and writes them to file.
This function iterates over all images that are present in the internally stored list, and extracts features using the given ``feature_extractor`` for every image patch that the given ``sampler`` returns.
The final features will be stored in the ``feature_directory`` that is set in the constructor.
For each image, the ``sampler`` samples patch locations, which cover the whole image in different scales.
For each patch locations is tested, how similar they are to the face bounding boxes that belong to that image, using the Jaccard :py:meth:`BoundingBox.similarity`.
The similarity is compared to the ``similarity_thresholds``.
If it is smaller than the first threshold, the patch is considered as background, when it is greater the the second threshold, it is considered as a face, otherwise it is rejected.
Depending on the image resolution and the number of bounding boxes, this will usually result in some positive and thousands of negative patches per image.
To limit the total amount of training data, for all scales, only up to a given number of positive and negative patches are kept.
Also, to further limit the number of negative samples, only every ``use_every_nth_negative_scale`` scale is considered (for the positives, always all scales are processed).
To increase the number (especially of positive) examples, features can also be extracted for horizontally mirrored images.
Simply set the ``mirror`` parameter to ``True``.
Furthermore, this function is designed to be run using several parallel processes, e.g., using the `GridTK <https://pypi.python.org/pypi/gridtk>`_.
Each of the processes will run on a particular subset of the images, which is defined by the ``SGE_TASK_ID`` environment variable.
The ``parallel`` parameter defines the total number of parallel processes that are used.
**Parameters:**
``sampler`` : :py:class:`Sampler`
The sampler to use to sample patches of the images. Please assure that the sampler is set up such that it samples patch locations which can overlap with the face locations.
``feature_extractor`` : :py:class:`FeatureExtractor`
The feature extractor to be used to extract features from image patches
``number_of_examples_per_scale`` : (int, int)
The maximum number of positive and negative examples to extract for each scale of the image
``similarity_thresholds`` : (float, float)
The Jaccard similarity threshold, below which patch locations are considered to be negative, and above which patch locations are considered to be positive examples.
``parallel`` : int or ``None``
If given, the total number of parallel processes, which are used to extract features (the current process index is read from the ``SGE_TASK_ID`` environment variable)
``mirror`` : bool
Extract positive and negative samples also from horizontally mirrored images?
``use_every_nth_negative_scale`` : int
Skip some negative scales to decrease the number of negative examples, i.e., only extract and store negative features, when ``scale_counter % use_every_nth_negative_scale == 0``
.. note::
The ``scale_counter`` is not reset between images, so that we might get features from different scales in subsequent images. | [
"Extracts",
"features",
"from",
"**",
"all",
"**",
"images",
"in",
"**",
"all",
"**",
"scales",
"and",
"writes",
"them",
"to",
"file",
"."
] | python | train |
inveniosoftware/invenio-search | invenio_search/ext.py | https://github.com/inveniosoftware/invenio-search/blob/19c073d608d4c811f1c5aecb6622402d39715228/invenio_search/ext.py#L285-L314 | def put_templates(self, ignore=None):
"""Yield tuple with registered template and response from client."""
ignore = ignore or []
def _replace_prefix(template_path, body):
"""Replace index prefix in template request body."""
pattern = '__SEARCH_INDEX_PREFIX__'
prefix = self.app.config['SEARCH_INDEX_PREFIX'] or ''
if prefix:
assert pattern in body, "You are using the prefix `{0}`, "
"but the template `{1}` does not contain the "
"pattern `{2}`.".format(prefix, template_path, pattern)
return body.replace(pattern, prefix)
def _put_template(template):
"""Put template in search client."""
with open(self.templates[template], 'r') as fp:
body = fp.read()
replaced_body = _replace_prefix(self.templates[template], body)
return self.templates[template],\
current_search_client.indices.put_template(
name=template,
body=json.loads(replaced_body),
ignore=ignore,
)
for template in self.templates:
yield _put_template(template) | [
"def",
"put_templates",
"(",
"self",
",",
"ignore",
"=",
"None",
")",
":",
"ignore",
"=",
"ignore",
"or",
"[",
"]",
"def",
"_replace_prefix",
"(",
"template_path",
",",
"body",
")",
":",
"\"\"\"Replace index prefix in template request body.\"\"\"",
"pattern",
"=",
"'__SEARCH_INDEX_PREFIX__'",
"prefix",
"=",
"self",
".",
"app",
".",
"config",
"[",
"'SEARCH_INDEX_PREFIX'",
"]",
"or",
"''",
"if",
"prefix",
":",
"assert",
"pattern",
"in",
"body",
",",
"\"You are using the prefix `{0}`, \"",
"\"but the template `{1}` does not contain the \"",
"\"pattern `{2}`.\"",
".",
"format",
"(",
"prefix",
",",
"template_path",
",",
"pattern",
")",
"return",
"body",
".",
"replace",
"(",
"pattern",
",",
"prefix",
")",
"def",
"_put_template",
"(",
"template",
")",
":",
"\"\"\"Put template in search client.\"\"\"",
"with",
"open",
"(",
"self",
".",
"templates",
"[",
"template",
"]",
",",
"'r'",
")",
"as",
"fp",
":",
"body",
"=",
"fp",
".",
"read",
"(",
")",
"replaced_body",
"=",
"_replace_prefix",
"(",
"self",
".",
"templates",
"[",
"template",
"]",
",",
"body",
")",
"return",
"self",
".",
"templates",
"[",
"template",
"]",
",",
"current_search_client",
".",
"indices",
".",
"put_template",
"(",
"name",
"=",
"template",
",",
"body",
"=",
"json",
".",
"loads",
"(",
"replaced_body",
")",
",",
"ignore",
"=",
"ignore",
",",
")",
"for",
"template",
"in",
"self",
".",
"templates",
":",
"yield",
"_put_template",
"(",
"template",
")"
] | Yield tuple with registered template and response from client. | [
"Yield",
"tuple",
"with",
"registered",
"template",
"and",
"response",
"from",
"client",
"."
] | python | train |
timkpaine/pyEX | pyEX/stocks.py | https://github.com/timkpaine/pyEX/blob/91cf751dafdb208a0c8b5377945e5808b99f94ba/pyEX/stocks.py#L130-L176 | def bulkBatch(symbols, fields=None, range_='1m', last=10, token='', version=''):
'''Optimized batch to fetch as much as possible at once
https://iexcloud.io/docs/api/#batch-requests
Args:
symbols (list); List of tickers to request
fields (list); List of fields to request
range_ (string); Date range for chart
last (int);
token (string); Access token
version (string); API version
Returns:
dict: results in json
'''
fields = fields or _BATCH_TYPES
args = []
empty_data = []
list_orig = empty_data.__class__
if not isinstance(symbols, list_orig):
raise PyEXception('Symbols must be of type list')
for i in range(0, len(symbols), 99):
args.append((symbols[i:i+99], fields, range_, last, token, version))
pool = ThreadPool(20)
rets = pool.starmap(batch, args)
pool.close()
ret = {}
for i, d in enumerate(rets):
symbols_subset = args[i][0]
if len(d) != len(symbols_subset):
empty_data.extend(list_orig(set(symbols_subset) - set(d.keys())))
ret.update(d)
for k in empty_data:
if k not in ret:
if isinstance(fields, str):
ret[k] = {}
else:
ret[k] = {x: {} for x in fields}
return ret | [
"def",
"bulkBatch",
"(",
"symbols",
",",
"fields",
"=",
"None",
",",
"range_",
"=",
"'1m'",
",",
"last",
"=",
"10",
",",
"token",
"=",
"''",
",",
"version",
"=",
"''",
")",
":",
"fields",
"=",
"fields",
"or",
"_BATCH_TYPES",
"args",
"=",
"[",
"]",
"empty_data",
"=",
"[",
"]",
"list_orig",
"=",
"empty_data",
".",
"__class__",
"if",
"not",
"isinstance",
"(",
"symbols",
",",
"list_orig",
")",
":",
"raise",
"PyEXception",
"(",
"'Symbols must be of type list'",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"symbols",
")",
",",
"99",
")",
":",
"args",
".",
"append",
"(",
"(",
"symbols",
"[",
"i",
":",
"i",
"+",
"99",
"]",
",",
"fields",
",",
"range_",
",",
"last",
",",
"token",
",",
"version",
")",
")",
"pool",
"=",
"ThreadPool",
"(",
"20",
")",
"rets",
"=",
"pool",
".",
"starmap",
"(",
"batch",
",",
"args",
")",
"pool",
".",
"close",
"(",
")",
"ret",
"=",
"{",
"}",
"for",
"i",
",",
"d",
"in",
"enumerate",
"(",
"rets",
")",
":",
"symbols_subset",
"=",
"args",
"[",
"i",
"]",
"[",
"0",
"]",
"if",
"len",
"(",
"d",
")",
"!=",
"len",
"(",
"symbols_subset",
")",
":",
"empty_data",
".",
"extend",
"(",
"list_orig",
"(",
"set",
"(",
"symbols_subset",
")",
"-",
"set",
"(",
"d",
".",
"keys",
"(",
")",
")",
")",
")",
"ret",
".",
"update",
"(",
"d",
")",
"for",
"k",
"in",
"empty_data",
":",
"if",
"k",
"not",
"in",
"ret",
":",
"if",
"isinstance",
"(",
"fields",
",",
"str",
")",
":",
"ret",
"[",
"k",
"]",
"=",
"{",
"}",
"else",
":",
"ret",
"[",
"k",
"]",
"=",
"{",
"x",
":",
"{",
"}",
"for",
"x",
"in",
"fields",
"}",
"return",
"ret"
] | Optimized batch to fetch as much as possible at once
https://iexcloud.io/docs/api/#batch-requests
Args:
symbols (list); List of tickers to request
fields (list); List of fields to request
range_ (string); Date range for chart
last (int);
token (string); Access token
version (string); API version
Returns:
dict: results in json | [
"Optimized",
"batch",
"to",
"fetch",
"as",
"much",
"as",
"possible",
"at",
"once"
] | python | valid |
marshmallow-code/marshmallow | src/marshmallow/schema.py | https://github.com/marshmallow-code/marshmallow/blob/a6b6c4151f1fbf16f3774d4052ca2bddf6903750/src/marshmallow/schema.py#L776-L870 | def _do_load(
self, data, many=None, partial=None, unknown=None,
postprocess=True,
):
"""Deserialize `data`, returning the deserialized result.
:param data: The data to deserialize.
:param bool many: Whether to deserialize `data` as a collection. If `None`, the
value for `self.many` is used.
:param bool|tuple partial: Whether to validate required fields. If its value is an iterable,
only fields listed in that iterable will be ignored will be allowed missing.
If `True`, all fields will be allowed missing.
If `None`, the value for `self.partial` is used.
:param unknown: Whether to exclude, include, or raise an error for unknown
fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`.
If `None`, the value for `self.unknown` is used.
:param bool postprocess: Whether to run post_load methods..
:return: A dict of deserialized data
:rtype: dict
"""
error_store = ErrorStore()
errors = {}
many = self.many if many is None else bool(many)
unknown = unknown or self.unknown
if partial is None:
partial = self.partial
# Run preprocessors
if self._has_processors(PRE_LOAD):
try:
processed_data = self._invoke_load_processors(
PRE_LOAD,
data,
many,
original_data=data,
)
except ValidationError as err:
errors = err.normalized_messages()
result = None
else:
processed_data = data
if not errors:
# Deserialize data
result = self._deserialize(
processed_data,
self.fields,
error_store,
many=many,
partial=partial,
unknown=unknown,
dict_class=self.dict_class,
index_errors=self.opts.index_errors,
)
# Run field-level validation
self._invoke_field_validators(error_store, data=result, many=many)
# Run schema-level validation
if self._has_processors(VALIDATES_SCHEMA):
field_errors = bool(error_store.errors)
self._invoke_schema_validators(
error_store,
pass_many=True,
data=result,
original_data=data,
many=many,
field_errors=field_errors,
)
self._invoke_schema_validators(
error_store,
pass_many=False,
data=result,
original_data=data,
many=many,
field_errors=field_errors,
)
errors = error_store.errors
# Run post processors
if not errors and postprocess and self._has_processors(POST_LOAD):
try:
result = self._invoke_load_processors(
POST_LOAD,
result,
many,
original_data=data,
)
except ValidationError as err:
errors = err.normalized_messages()
if errors:
exc = ValidationError(
errors,
data=data,
valid_data=result,
)
self.handle_error(exc, data)
raise exc
return result | [
"def",
"_do_load",
"(",
"self",
",",
"data",
",",
"many",
"=",
"None",
",",
"partial",
"=",
"None",
",",
"unknown",
"=",
"None",
",",
"postprocess",
"=",
"True",
",",
")",
":",
"error_store",
"=",
"ErrorStore",
"(",
")",
"errors",
"=",
"{",
"}",
"many",
"=",
"self",
".",
"many",
"if",
"many",
"is",
"None",
"else",
"bool",
"(",
"many",
")",
"unknown",
"=",
"unknown",
"or",
"self",
".",
"unknown",
"if",
"partial",
"is",
"None",
":",
"partial",
"=",
"self",
".",
"partial",
"# Run preprocessors",
"if",
"self",
".",
"_has_processors",
"(",
"PRE_LOAD",
")",
":",
"try",
":",
"processed_data",
"=",
"self",
".",
"_invoke_load_processors",
"(",
"PRE_LOAD",
",",
"data",
",",
"many",
",",
"original_data",
"=",
"data",
",",
")",
"except",
"ValidationError",
"as",
"err",
":",
"errors",
"=",
"err",
".",
"normalized_messages",
"(",
")",
"result",
"=",
"None",
"else",
":",
"processed_data",
"=",
"data",
"if",
"not",
"errors",
":",
"# Deserialize data",
"result",
"=",
"self",
".",
"_deserialize",
"(",
"processed_data",
",",
"self",
".",
"fields",
",",
"error_store",
",",
"many",
"=",
"many",
",",
"partial",
"=",
"partial",
",",
"unknown",
"=",
"unknown",
",",
"dict_class",
"=",
"self",
".",
"dict_class",
",",
"index_errors",
"=",
"self",
".",
"opts",
".",
"index_errors",
",",
")",
"# Run field-level validation",
"self",
".",
"_invoke_field_validators",
"(",
"error_store",
",",
"data",
"=",
"result",
",",
"many",
"=",
"many",
")",
"# Run schema-level validation",
"if",
"self",
".",
"_has_processors",
"(",
"VALIDATES_SCHEMA",
")",
":",
"field_errors",
"=",
"bool",
"(",
"error_store",
".",
"errors",
")",
"self",
".",
"_invoke_schema_validators",
"(",
"error_store",
",",
"pass_many",
"=",
"True",
",",
"data",
"=",
"result",
",",
"original_data",
"=",
"data",
",",
"many",
"=",
"many",
",",
"field_errors",
"=",
"field_errors",
",",
")",
"self",
".",
"_invoke_schema_validators",
"(",
"error_store",
",",
"pass_many",
"=",
"False",
",",
"data",
"=",
"result",
",",
"original_data",
"=",
"data",
",",
"many",
"=",
"many",
",",
"field_errors",
"=",
"field_errors",
",",
")",
"errors",
"=",
"error_store",
".",
"errors",
"# Run post processors",
"if",
"not",
"errors",
"and",
"postprocess",
"and",
"self",
".",
"_has_processors",
"(",
"POST_LOAD",
")",
":",
"try",
":",
"result",
"=",
"self",
".",
"_invoke_load_processors",
"(",
"POST_LOAD",
",",
"result",
",",
"many",
",",
"original_data",
"=",
"data",
",",
")",
"except",
"ValidationError",
"as",
"err",
":",
"errors",
"=",
"err",
".",
"normalized_messages",
"(",
")",
"if",
"errors",
":",
"exc",
"=",
"ValidationError",
"(",
"errors",
",",
"data",
"=",
"data",
",",
"valid_data",
"=",
"result",
",",
")",
"self",
".",
"handle_error",
"(",
"exc",
",",
"data",
")",
"raise",
"exc",
"return",
"result"
] | Deserialize `data`, returning the deserialized result.
:param data: The data to deserialize.
:param bool many: Whether to deserialize `data` as a collection. If `None`, the
value for `self.many` is used.
:param bool|tuple partial: Whether to validate required fields. If its value is an iterable,
only fields listed in that iterable will be ignored will be allowed missing.
If `True`, all fields will be allowed missing.
If `None`, the value for `self.partial` is used.
:param unknown: Whether to exclude, include, or raise an error for unknown
fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`.
If `None`, the value for `self.unknown` is used.
:param bool postprocess: Whether to run post_load methods..
:return: A dict of deserialized data
:rtype: dict | [
"Deserialize",
"data",
"returning",
"the",
"deserialized",
"result",
"."
] | python | train |
xolox/python-vcs-repo-mgr | vcs_repo_mgr/backends/git.py | https://github.com/xolox/python-vcs-repo-mgr/blob/fdad2441a3e7ba5deeeddfa1c2f5ebc00c393aed/vcs_repo_mgr/backends/git.py#L213-L220 | def find_branches(self):
"""Find information about the branches in the repository."""
for prefix, name, revision_id in self.find_branches_raw():
yield Revision(
branch=name,
repository=self,
revision_id=revision_id,
) | [
"def",
"find_branches",
"(",
"self",
")",
":",
"for",
"prefix",
",",
"name",
",",
"revision_id",
"in",
"self",
".",
"find_branches_raw",
"(",
")",
":",
"yield",
"Revision",
"(",
"branch",
"=",
"name",
",",
"repository",
"=",
"self",
",",
"revision_id",
"=",
"revision_id",
",",
")"
] | Find information about the branches in the repository. | [
"Find",
"information",
"about",
"the",
"branches",
"in",
"the",
"repository",
"."
] | python | train |
gitpython-developers/GitPython | git/refs/log.py | https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/refs/log.py#L47-L57 | def format(self):
""":return: a string suitable to be placed in a reflog file"""
act = self.actor
time = self.time
return u"{} {} {} <{}> {!s} {}\t{}\n".format(self.oldhexsha,
self.newhexsha,
act.name,
act.email,
time[0],
altz_to_utctz_str(time[1]),
self.message) | [
"def",
"format",
"(",
"self",
")",
":",
"act",
"=",
"self",
".",
"actor",
"time",
"=",
"self",
".",
"time",
"return",
"u\"{} {} {} <{}> {!s} {}\\t{}\\n\"",
".",
"format",
"(",
"self",
".",
"oldhexsha",
",",
"self",
".",
"newhexsha",
",",
"act",
".",
"name",
",",
"act",
".",
"email",
",",
"time",
"[",
"0",
"]",
",",
"altz_to_utctz_str",
"(",
"time",
"[",
"1",
"]",
")",
",",
"self",
".",
"message",
")"
] | :return: a string suitable to be placed in a reflog file | [
":",
"return",
":",
"a",
"string",
"suitable",
"to",
"be",
"placed",
"in",
"a",
"reflog",
"file"
] | python | train |
NoviceLive/intellicoder | intellicoder/msbuild/locators.py | https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/msbuild/locators.py#L103-L116 | def get_lib(self, arch='x86'):
"""
Get lib directories of Visual C++.
"""
if arch == 'x86':
arch = ''
if arch == 'x64':
arch = 'amd64'
lib = os.path.join(self.vc_dir, 'lib', arch)
if os.path.isdir(lib):
logging.info(_('using lib: %s'), lib)
return [lib]
logging.debug(_('lib not found: %s'), lib)
return [] | [
"def",
"get_lib",
"(",
"self",
",",
"arch",
"=",
"'x86'",
")",
":",
"if",
"arch",
"==",
"'x86'",
":",
"arch",
"=",
"''",
"if",
"arch",
"==",
"'x64'",
":",
"arch",
"=",
"'amd64'",
"lib",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"vc_dir",
",",
"'lib'",
",",
"arch",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"lib",
")",
":",
"logging",
".",
"info",
"(",
"_",
"(",
"'using lib: %s'",
")",
",",
"lib",
")",
"return",
"[",
"lib",
"]",
"logging",
".",
"debug",
"(",
"_",
"(",
"'lib not found: %s'",
")",
",",
"lib",
")",
"return",
"[",
"]"
] | Get lib directories of Visual C++. | [
"Get",
"lib",
"directories",
"of",
"Visual",
"C",
"++",
"."
] | python | train |
regebro/svg.path | src/svg/path/path.py | https://github.com/regebro/svg.path/blob/cb58e104e5aa3472be205c75da59690db30aecc9/src/svg/path/path.py#L126-L132 | def is_smooth_from(self, previous):
"""Checks if this segment would be a smooth segment following the previous"""
if isinstance(previous, QuadraticBezier):
return (self.start == previous.end and
(self.control - self.start) == (previous.end - previous.control))
else:
return self.control == self.start | [
"def",
"is_smooth_from",
"(",
"self",
",",
"previous",
")",
":",
"if",
"isinstance",
"(",
"previous",
",",
"QuadraticBezier",
")",
":",
"return",
"(",
"self",
".",
"start",
"==",
"previous",
".",
"end",
"and",
"(",
"self",
".",
"control",
"-",
"self",
".",
"start",
")",
"==",
"(",
"previous",
".",
"end",
"-",
"previous",
".",
"control",
")",
")",
"else",
":",
"return",
"self",
".",
"control",
"==",
"self",
".",
"start"
] | Checks if this segment would be a smooth segment following the previous | [
"Checks",
"if",
"this",
"segment",
"would",
"be",
"a",
"smooth",
"segment",
"following",
"the",
"previous"
] | python | train |
HHammond/PrettyPandas | prettypandas/summarizer.py | https://github.com/HHammond/PrettyPandas/blob/99a814ffc3aa61f66eaf902afaa4b7802518d33a/prettypandas/summarizer.py#L162-L190 | def _apply_summaries(self):
"""Add all summary rows and columns."""
def as_frame(r):
if isinstance(r, pd.Series):
return r.to_frame()
else:
return r
df = self.data
if df.index.nlevels > 1:
raise ValueError(
"You cannot currently have both summary rows and columns on a "
"MultiIndex."
)
_df = df
if self.summary_rows:
rows = pd.concat([agg.apply(_df)
for agg in self._cleaned_summary_rows], axis=1).T
df = pd.concat([df, as_frame(rows)], axis=0)
if self.summary_cols:
cols = pd.concat([agg.apply(_df)
for agg in self._cleaned_summary_cols], axis=1)
df = pd.concat([df, as_frame(cols)], axis=1)
return df | [
"def",
"_apply_summaries",
"(",
"self",
")",
":",
"def",
"as_frame",
"(",
"r",
")",
":",
"if",
"isinstance",
"(",
"r",
",",
"pd",
".",
"Series",
")",
":",
"return",
"r",
".",
"to_frame",
"(",
")",
"else",
":",
"return",
"r",
"df",
"=",
"self",
".",
"data",
"if",
"df",
".",
"index",
".",
"nlevels",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"You cannot currently have both summary rows and columns on a \"",
"\"MultiIndex.\"",
")",
"_df",
"=",
"df",
"if",
"self",
".",
"summary_rows",
":",
"rows",
"=",
"pd",
".",
"concat",
"(",
"[",
"agg",
".",
"apply",
"(",
"_df",
")",
"for",
"agg",
"in",
"self",
".",
"_cleaned_summary_rows",
"]",
",",
"axis",
"=",
"1",
")",
".",
"T",
"df",
"=",
"pd",
".",
"concat",
"(",
"[",
"df",
",",
"as_frame",
"(",
"rows",
")",
"]",
",",
"axis",
"=",
"0",
")",
"if",
"self",
".",
"summary_cols",
":",
"cols",
"=",
"pd",
".",
"concat",
"(",
"[",
"agg",
".",
"apply",
"(",
"_df",
")",
"for",
"agg",
"in",
"self",
".",
"_cleaned_summary_cols",
"]",
",",
"axis",
"=",
"1",
")",
"df",
"=",
"pd",
".",
"concat",
"(",
"[",
"df",
",",
"as_frame",
"(",
"cols",
")",
"]",
",",
"axis",
"=",
"1",
")",
"return",
"df"
] | Add all summary rows and columns. | [
"Add",
"all",
"summary",
"rows",
"and",
"columns",
"."
] | python | train |
venmo/slouch | slouch/__init__.py | https://github.com/venmo/slouch/blob/000b03bc220a0d7aa5b06f59caf423e2b63a81d7/slouch/__init__.py#L258-L301 | def _handle_long_response(self, res):
"""Splits messages that are too long into multiple events
:param res: a slack response string or dict
"""
is_rtm_message = isinstance(res, basestring)
is_api_message = isinstance(res, dict)
if is_rtm_message:
text = res
elif is_api_message:
text = res['text']
message_length = len(text)
if message_length <= SLACK_MESSAGE_LIMIT:
return [res]
remaining_str = text
responses = []
while remaining_str:
less_than_limit = len(remaining_str) < SLACK_MESSAGE_LIMIT
if less_than_limit:
last_line_break = None
else:
last_line_break = remaining_str[:SLACK_MESSAGE_LIMIT].rfind('\n')
if is_rtm_message:
responses.append(remaining_str[:last_line_break])
elif is_api_message:
template = res.copy()
template['text'] = remaining_str[:last_line_break]
responses.append(template)
if less_than_limit:
remaining_str = None
else:
remaining_str = remaining_str[last_line_break:]
self.log.debug("_handle_long_response: splitting long response %s, returns: \n %s",
pprint.pformat(res), pprint.pformat(responses))
return responses | [
"def",
"_handle_long_response",
"(",
"self",
",",
"res",
")",
":",
"is_rtm_message",
"=",
"isinstance",
"(",
"res",
",",
"basestring",
")",
"is_api_message",
"=",
"isinstance",
"(",
"res",
",",
"dict",
")",
"if",
"is_rtm_message",
":",
"text",
"=",
"res",
"elif",
"is_api_message",
":",
"text",
"=",
"res",
"[",
"'text'",
"]",
"message_length",
"=",
"len",
"(",
"text",
")",
"if",
"message_length",
"<=",
"SLACK_MESSAGE_LIMIT",
":",
"return",
"[",
"res",
"]",
"remaining_str",
"=",
"text",
"responses",
"=",
"[",
"]",
"while",
"remaining_str",
":",
"less_than_limit",
"=",
"len",
"(",
"remaining_str",
")",
"<",
"SLACK_MESSAGE_LIMIT",
"if",
"less_than_limit",
":",
"last_line_break",
"=",
"None",
"else",
":",
"last_line_break",
"=",
"remaining_str",
"[",
":",
"SLACK_MESSAGE_LIMIT",
"]",
".",
"rfind",
"(",
"'\\n'",
")",
"if",
"is_rtm_message",
":",
"responses",
".",
"append",
"(",
"remaining_str",
"[",
":",
"last_line_break",
"]",
")",
"elif",
"is_api_message",
":",
"template",
"=",
"res",
".",
"copy",
"(",
")",
"template",
"[",
"'text'",
"]",
"=",
"remaining_str",
"[",
":",
"last_line_break",
"]",
"responses",
".",
"append",
"(",
"template",
")",
"if",
"less_than_limit",
":",
"remaining_str",
"=",
"None",
"else",
":",
"remaining_str",
"=",
"remaining_str",
"[",
"last_line_break",
":",
"]",
"self",
".",
"log",
".",
"debug",
"(",
"\"_handle_long_response: splitting long response %s, returns: \\n %s\"",
",",
"pprint",
".",
"pformat",
"(",
"res",
")",
",",
"pprint",
".",
"pformat",
"(",
"responses",
")",
")",
"return",
"responses"
] | Splits messages that are too long into multiple events
:param res: a slack response string or dict | [
"Splits",
"messages",
"that",
"are",
"too",
"long",
"into",
"multiple",
"events",
":",
"param",
"res",
":",
"a",
"slack",
"response",
"string",
"or",
"dict"
] | python | train |
materialsproject/pymatgen | pymatgen/analysis/elasticity/elastic.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/elasticity/elastic.py#L1020-L1044 | def get_symbol_list(rank, dim=6):
"""
Returns a symbolic representation of the voigt-notation
tensor that places identical symbols for entries related
by index transposition, i. e. C_1121 = C_1211 etc.
Args:
dim (int): dimension of matrix/tensor, e. g. 6 for
voigt notation and 3 for standard
rank (int): rank of tensor, e. g. 3 for third-order ECs
Returns:
c_vec (array): array representing distinct indices
c_arr (array): array representing tensor with equivalent
indices assigned as above
"""
indices = list(
itertools.combinations_with_replacement(range(dim), r=rank))
c_vec = np.zeros(len(indices), dtype=object)
c_arr = np.zeros([dim]*rank, dtype=object)
for n, idx in enumerate(indices):
c_vec[n] = sp.Symbol('c_'+''.join([str(i) for i in idx]))
for perm in itertools.permutations(idx):
c_arr[perm] = c_vec[n]
return c_vec, c_arr | [
"def",
"get_symbol_list",
"(",
"rank",
",",
"dim",
"=",
"6",
")",
":",
"indices",
"=",
"list",
"(",
"itertools",
".",
"combinations_with_replacement",
"(",
"range",
"(",
"dim",
")",
",",
"r",
"=",
"rank",
")",
")",
"c_vec",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"indices",
")",
",",
"dtype",
"=",
"object",
")",
"c_arr",
"=",
"np",
".",
"zeros",
"(",
"[",
"dim",
"]",
"*",
"rank",
",",
"dtype",
"=",
"object",
")",
"for",
"n",
",",
"idx",
"in",
"enumerate",
"(",
"indices",
")",
":",
"c_vec",
"[",
"n",
"]",
"=",
"sp",
".",
"Symbol",
"(",
"'c_'",
"+",
"''",
".",
"join",
"(",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"idx",
"]",
")",
")",
"for",
"perm",
"in",
"itertools",
".",
"permutations",
"(",
"idx",
")",
":",
"c_arr",
"[",
"perm",
"]",
"=",
"c_vec",
"[",
"n",
"]",
"return",
"c_vec",
",",
"c_arr"
] | Returns a symbolic representation of the voigt-notation
tensor that places identical symbols for entries related
by index transposition, i. e. C_1121 = C_1211 etc.
Args:
dim (int): dimension of matrix/tensor, e. g. 6 for
voigt notation and 3 for standard
rank (int): rank of tensor, e. g. 3 for third-order ECs
Returns:
c_vec (array): array representing distinct indices
c_arr (array): array representing tensor with equivalent
indices assigned as above | [
"Returns",
"a",
"symbolic",
"representation",
"of",
"the",
"voigt",
"-",
"notation",
"tensor",
"that",
"places",
"identical",
"symbols",
"for",
"entries",
"related",
"by",
"index",
"transposition",
"i",
".",
"e",
".",
"C_1121",
"=",
"C_1211",
"etc",
"."
] | python | train |
nerdvegas/rez | src/rezplugins/build_system/custom.py | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rezplugins/build_system/custom.py#L87-L176 | def build(self, context, variant, build_path, install_path, install=False,
build_type=BuildType.local):
"""Perform the build.
Note that most of the func args aren't used here - that's because this
info is already passed to the custom build command via environment
variables.
"""
ret = {}
if self.write_build_scripts:
# write out the script that places the user in a build env, where
# they can run bez directly themselves.
build_env_script = os.path.join(build_path, "build-env")
create_forwarding_script(build_env_script,
module=("build_system", "custom"),
func_name="_FWD__spawn_build_shell",
working_dir=self.working_dir,
build_path=build_path,
variant_index=variant.index,
install=install,
install_path=install_path)
ret["success"] = True
ret["build_env_script"] = build_env_script
return ret
# get build command
command = self.package.build_command
# False just means no build command
if command is False:
ret["success"] = True
return ret
def expand(txt):
root = self.package.root
install_ = "install" if install else ''
return txt.format(root=root, install=install_).strip()
if isinstance(command, basestring):
if self.build_args:
command = command + ' ' + ' '.join(map(quote, self.build_args))
command = expand(command)
cmd_str = command
else: # list
command = command + self.build_args
command = map(expand, command)
cmd_str = ' '.join(map(quote, command))
if self.verbose:
pr = Printer(sys.stdout)
pr("Running build command: %s" % cmd_str, heading)
# run the build command
def _callback(executor):
self._add_build_actions(executor,
context=context,
package=self.package,
variant=variant,
build_type=build_type,
install=install,
build_path=build_path,
install_path=install_path)
if self.opts:
# write args defined in ./parse_build_args.py out as env vars
extra_args = getattr(self.opts.parser, "_rezbuild_extra_args", [])
for key, value in vars(self.opts).iteritems():
if key in extra_args:
varname = "__PARSE_ARG_%s" % key.upper()
# do some value conversions
if isinstance(value, bool):
value = 1 if value else 0
elif isinstance(value, (list, tuple)):
value = map(str, value)
value = map(quote, value)
value = ' '.join(value)
executor.env[varname] = value
retcode, _, _ = context.execute_shell(command=command,
block=True,
cwd=build_path,
actions_callback=_callback)
ret["success"] = (not retcode)
return ret | [
"def",
"build",
"(",
"self",
",",
"context",
",",
"variant",
",",
"build_path",
",",
"install_path",
",",
"install",
"=",
"False",
",",
"build_type",
"=",
"BuildType",
".",
"local",
")",
":",
"ret",
"=",
"{",
"}",
"if",
"self",
".",
"write_build_scripts",
":",
"# write out the script that places the user in a build env, where",
"# they can run bez directly themselves.",
"build_env_script",
"=",
"os",
".",
"path",
".",
"join",
"(",
"build_path",
",",
"\"build-env\"",
")",
"create_forwarding_script",
"(",
"build_env_script",
",",
"module",
"=",
"(",
"\"build_system\"",
",",
"\"custom\"",
")",
",",
"func_name",
"=",
"\"_FWD__spawn_build_shell\"",
",",
"working_dir",
"=",
"self",
".",
"working_dir",
",",
"build_path",
"=",
"build_path",
",",
"variant_index",
"=",
"variant",
".",
"index",
",",
"install",
"=",
"install",
",",
"install_path",
"=",
"install_path",
")",
"ret",
"[",
"\"success\"",
"]",
"=",
"True",
"ret",
"[",
"\"build_env_script\"",
"]",
"=",
"build_env_script",
"return",
"ret",
"# get build command",
"command",
"=",
"self",
".",
"package",
".",
"build_command",
"# False just means no build command",
"if",
"command",
"is",
"False",
":",
"ret",
"[",
"\"success\"",
"]",
"=",
"True",
"return",
"ret",
"def",
"expand",
"(",
"txt",
")",
":",
"root",
"=",
"self",
".",
"package",
".",
"root",
"install_",
"=",
"\"install\"",
"if",
"install",
"else",
"''",
"return",
"txt",
".",
"format",
"(",
"root",
"=",
"root",
",",
"install",
"=",
"install_",
")",
".",
"strip",
"(",
")",
"if",
"isinstance",
"(",
"command",
",",
"basestring",
")",
":",
"if",
"self",
".",
"build_args",
":",
"command",
"=",
"command",
"+",
"' '",
"+",
"' '",
".",
"join",
"(",
"map",
"(",
"quote",
",",
"self",
".",
"build_args",
")",
")",
"command",
"=",
"expand",
"(",
"command",
")",
"cmd_str",
"=",
"command",
"else",
":",
"# list",
"command",
"=",
"command",
"+",
"self",
".",
"build_args",
"command",
"=",
"map",
"(",
"expand",
",",
"command",
")",
"cmd_str",
"=",
"' '",
".",
"join",
"(",
"map",
"(",
"quote",
",",
"command",
")",
")",
"if",
"self",
".",
"verbose",
":",
"pr",
"=",
"Printer",
"(",
"sys",
".",
"stdout",
")",
"pr",
"(",
"\"Running build command: %s\"",
"%",
"cmd_str",
",",
"heading",
")",
"# run the build command",
"def",
"_callback",
"(",
"executor",
")",
":",
"self",
".",
"_add_build_actions",
"(",
"executor",
",",
"context",
"=",
"context",
",",
"package",
"=",
"self",
".",
"package",
",",
"variant",
"=",
"variant",
",",
"build_type",
"=",
"build_type",
",",
"install",
"=",
"install",
",",
"build_path",
"=",
"build_path",
",",
"install_path",
"=",
"install_path",
")",
"if",
"self",
".",
"opts",
":",
"# write args defined in ./parse_build_args.py out as env vars",
"extra_args",
"=",
"getattr",
"(",
"self",
".",
"opts",
".",
"parser",
",",
"\"_rezbuild_extra_args\"",
",",
"[",
"]",
")",
"for",
"key",
",",
"value",
"in",
"vars",
"(",
"self",
".",
"opts",
")",
".",
"iteritems",
"(",
")",
":",
"if",
"key",
"in",
"extra_args",
":",
"varname",
"=",
"\"__PARSE_ARG_%s\"",
"%",
"key",
".",
"upper",
"(",
")",
"# do some value conversions",
"if",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"value",
"=",
"1",
"if",
"value",
"else",
"0",
"elif",
"isinstance",
"(",
"value",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"value",
"=",
"map",
"(",
"str",
",",
"value",
")",
"value",
"=",
"map",
"(",
"quote",
",",
"value",
")",
"value",
"=",
"' '",
".",
"join",
"(",
"value",
")",
"executor",
".",
"env",
"[",
"varname",
"]",
"=",
"value",
"retcode",
",",
"_",
",",
"_",
"=",
"context",
".",
"execute_shell",
"(",
"command",
"=",
"command",
",",
"block",
"=",
"True",
",",
"cwd",
"=",
"build_path",
",",
"actions_callback",
"=",
"_callback",
")",
"ret",
"[",
"\"success\"",
"]",
"=",
"(",
"not",
"retcode",
")",
"return",
"ret"
] | Perform the build.
Note that most of the func args aren't used here - that's because this
info is already passed to the custom build command via environment
variables. | [
"Perform",
"the",
"build",
"."
] | python | train |
allenai/allennlp | allennlp/semparse/domain_languages/wikitables_language.py | https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/wikitables_language.py#L676-L686 | def max_date(self, rows: List[Row], column: DateColumn) -> Date:
"""
Takes a list of rows and a column and returns the max of the values under that column in
those rows.
"""
cell_values = [row.values[column.name] for row in rows]
if not cell_values:
return Date(-1, -1, -1)
if not all([isinstance(value, Date) for value in cell_values]):
raise ExecutionError(f"Invalid values for date selection function: {cell_values}")
return max(cell_values) | [
"def",
"max_date",
"(",
"self",
",",
"rows",
":",
"List",
"[",
"Row",
"]",
",",
"column",
":",
"DateColumn",
")",
"->",
"Date",
":",
"cell_values",
"=",
"[",
"row",
".",
"values",
"[",
"column",
".",
"name",
"]",
"for",
"row",
"in",
"rows",
"]",
"if",
"not",
"cell_values",
":",
"return",
"Date",
"(",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
"if",
"not",
"all",
"(",
"[",
"isinstance",
"(",
"value",
",",
"Date",
")",
"for",
"value",
"in",
"cell_values",
"]",
")",
":",
"raise",
"ExecutionError",
"(",
"f\"Invalid values for date selection function: {cell_values}\"",
")",
"return",
"max",
"(",
"cell_values",
")"
] | Takes a list of rows and a column and returns the max of the values under that column in
those rows. | [
"Takes",
"a",
"list",
"of",
"rows",
"and",
"a",
"column",
"and",
"returns",
"the",
"max",
"of",
"the",
"values",
"under",
"that",
"column",
"in",
"those",
"rows",
"."
] | python | train |
doconix/django-mako-plus | django_mako_plus/management/commands/dmp_collectstatic.py | https://github.com/doconix/django-mako-plus/blob/a90f9b4af19e5fa9f83452989cdcaed21569a181/django_mako_plus/management/commands/dmp_collectstatic.py#L31-L38 | def match(self, fname, flevel, ftype):
'''Returns the result score if the file matches this rule'''
# if filetype is the same
# and level isn't set or level is the same
# and pattern matche the filename
if self.filetype == ftype and (self.level is None or self.level == flevel) and fnmatch.fnmatch(fname, self.pattern):
return self.score
return 0 | [
"def",
"match",
"(",
"self",
",",
"fname",
",",
"flevel",
",",
"ftype",
")",
":",
"# if filetype is the same",
"# and level isn't set or level is the same",
"# and pattern matche the filename",
"if",
"self",
".",
"filetype",
"==",
"ftype",
"and",
"(",
"self",
".",
"level",
"is",
"None",
"or",
"self",
".",
"level",
"==",
"flevel",
")",
"and",
"fnmatch",
".",
"fnmatch",
"(",
"fname",
",",
"self",
".",
"pattern",
")",
":",
"return",
"self",
".",
"score",
"return",
"0"
] | Returns the result score if the file matches this rule | [
"Returns",
"the",
"result",
"score",
"if",
"the",
"file",
"matches",
"this",
"rule"
] | python | train |
TissueMAPS/TmClient | src/python/tmclient/api.py | https://github.com/TissueMAPS/TmClient/blob/6fb40622af19142cb5169a64b8c2965993a25ab1/src/python/tmclient/api.py#L486-L510 | def delete_plate(self, name):
'''Deletes a plate.
Parameters
----------
name: str
name of the plate that should be deleted
See also
--------
:func:`tmserver.api.plate.delete_plate`
:class:`tmlib.models.plate.Plate`
'''
logger.info(
'delete plate "%s" of experiment "%s"',
name, self.experiment_name
)
plate_id = self._get_plate_id(name)
url = self._build_api_url(
'/experiments/{experiment_id}/plates/{plate_id}'.format(
experiment_id=self._experiment_id, plate_id=plate_id
)
)
res = self._session.delete(url)
res.raise_for_status() | [
"def",
"delete_plate",
"(",
"self",
",",
"name",
")",
":",
"logger",
".",
"info",
"(",
"'delete plate \"%s\" of experiment \"%s\"'",
",",
"name",
",",
"self",
".",
"experiment_name",
")",
"plate_id",
"=",
"self",
".",
"_get_plate_id",
"(",
"name",
")",
"url",
"=",
"self",
".",
"_build_api_url",
"(",
"'/experiments/{experiment_id}/plates/{plate_id}'",
".",
"format",
"(",
"experiment_id",
"=",
"self",
".",
"_experiment_id",
",",
"plate_id",
"=",
"plate_id",
")",
")",
"res",
"=",
"self",
".",
"_session",
".",
"delete",
"(",
"url",
")",
"res",
".",
"raise_for_status",
"(",
")"
] | Deletes a plate.
Parameters
----------
name: str
name of the plate that should be deleted
See also
--------
:func:`tmserver.api.plate.delete_plate`
:class:`tmlib.models.plate.Plate` | [
"Deletes",
"a",
"plate",
"."
] | python | train |
Kaggle/kaggle-api | kaggle/api/kaggle_api_extended.py | https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L740-L749 | def competition_leaderboard_view(self, competition):
""" view a leaderboard based on a competition name
Parameters
==========
competition: the competition name to view leadboard for
"""
result = self.process_response(
self.competition_view_leaderboard_with_http_info(competition))
return [LeaderboardEntry(e) for e in result['submissions']] | [
"def",
"competition_leaderboard_view",
"(",
"self",
",",
"competition",
")",
":",
"result",
"=",
"self",
".",
"process_response",
"(",
"self",
".",
"competition_view_leaderboard_with_http_info",
"(",
"competition",
")",
")",
"return",
"[",
"LeaderboardEntry",
"(",
"e",
")",
"for",
"e",
"in",
"result",
"[",
"'submissions'",
"]",
"]"
] | view a leaderboard based on a competition name
Parameters
==========
competition: the competition name to view leadboard for | [
"view",
"a",
"leaderboard",
"based",
"on",
"a",
"competition",
"name"
] | python | train |
twisted/axiom | axiom/batch.py | https://github.com/twisted/axiom/blob/7de70bc8fe1bb81f9c2339fba8daec9eb2e92b68/axiom/batch.py#L462-L518 | def processor(forType):
"""
Create an Axiom Item type which is suitable to use as a batch processor for
the given Axiom Item type.
Processors created this way depend on a L{iaxiom.IScheduler} powerup on the
on which store they are installed.
@type forType: L{item.MetaItem}
@param forType: The Axiom Item type for which to create a batch processor
type.
@rtype: L{item.MetaItem}
@return: An Axiom Item type suitable for use as a batch processor. If such
a type previously existed, it will be returned. Otherwise, a new type is
created.
"""
MILLI = 1000
try:
processor = _processors[forType]
except KeyError:
def __init__(self, *a, **kw):
item.Item.__init__(self, *a, **kw)
self.store.powerUp(self, iaxiom.IBatchProcessor)
attrs = {
'__name__': 'Batch_' + forType.__name__,
'__module__': forType.__module__,
'__init__': __init__,
'__repr__': lambda self: '<Batch of %s #%d>' % (reflect.qual(self.workUnitType), self.storeID),
'schemaVersion': 2,
'workUnitType': forType,
'scheduled': attributes.timestamp(doc="""
The next time at which this processor is scheduled to run.
""", default=None),
# MAGIC NUMBERS AREN'T THEY WONDERFUL?
'busyInterval': attributes.integer(doc="", default=MILLI / 10),
}
_processors[forType] = processor = item.MetaItem(
attrs['__name__'],
(item.Item, _BatchProcessorMixin),
attrs)
registerUpgrader(
upgradeProcessor1to2,
_processors[forType].typeName,
1, 2)
return processor | [
"def",
"processor",
"(",
"forType",
")",
":",
"MILLI",
"=",
"1000",
"try",
":",
"processor",
"=",
"_processors",
"[",
"forType",
"]",
"except",
"KeyError",
":",
"def",
"__init__",
"(",
"self",
",",
"*",
"a",
",",
"*",
"*",
"kw",
")",
":",
"item",
".",
"Item",
".",
"__init__",
"(",
"self",
",",
"*",
"a",
",",
"*",
"*",
"kw",
")",
"self",
".",
"store",
".",
"powerUp",
"(",
"self",
",",
"iaxiom",
".",
"IBatchProcessor",
")",
"attrs",
"=",
"{",
"'__name__'",
":",
"'Batch_'",
"+",
"forType",
".",
"__name__",
",",
"'__module__'",
":",
"forType",
".",
"__module__",
",",
"'__init__'",
":",
"__init__",
",",
"'__repr__'",
":",
"lambda",
"self",
":",
"'<Batch of %s #%d>'",
"%",
"(",
"reflect",
".",
"qual",
"(",
"self",
".",
"workUnitType",
")",
",",
"self",
".",
"storeID",
")",
",",
"'schemaVersion'",
":",
"2",
",",
"'workUnitType'",
":",
"forType",
",",
"'scheduled'",
":",
"attributes",
".",
"timestamp",
"(",
"doc",
"=",
"\"\"\"\n The next time at which this processor is scheduled to run.\n \"\"\"",
",",
"default",
"=",
"None",
")",
",",
"# MAGIC NUMBERS AREN'T THEY WONDERFUL?",
"'busyInterval'",
":",
"attributes",
".",
"integer",
"(",
"doc",
"=",
"\"\"",
",",
"default",
"=",
"MILLI",
"/",
"10",
")",
",",
"}",
"_processors",
"[",
"forType",
"]",
"=",
"processor",
"=",
"item",
".",
"MetaItem",
"(",
"attrs",
"[",
"'__name__'",
"]",
",",
"(",
"item",
".",
"Item",
",",
"_BatchProcessorMixin",
")",
",",
"attrs",
")",
"registerUpgrader",
"(",
"upgradeProcessor1to2",
",",
"_processors",
"[",
"forType",
"]",
".",
"typeName",
",",
"1",
",",
"2",
")",
"return",
"processor"
] | Create an Axiom Item type which is suitable to use as a batch processor for
the given Axiom Item type.
Processors created this way depend on a L{iaxiom.IScheduler} powerup on the
on which store they are installed.
@type forType: L{item.MetaItem}
@param forType: The Axiom Item type for which to create a batch processor
type.
@rtype: L{item.MetaItem}
@return: An Axiom Item type suitable for use as a batch processor. If such
a type previously existed, it will be returned. Otherwise, a new type is
created. | [
"Create",
"an",
"Axiom",
"Item",
"type",
"which",
"is",
"suitable",
"to",
"use",
"as",
"a",
"batch",
"processor",
"for",
"the",
"given",
"Axiom",
"Item",
"type",
"."
] | python | train |
epfl-idevelop/epfl-ldap | epflldap/ldap_search.py | https://github.com/epfl-idevelop/epfl-ldap/blob/bebb94da3609d358bd83f31672eeaddcda872c5d/epflldap/ldap_search.py#L7-L15 | def _get_LDAP_connection():
"""
Return a LDAP connection
"""
server = ldap3.Server('ldap://' + get_optional_env('EPFL_LDAP_SERVER_FOR_SEARCH'))
connection = ldap3.Connection(server)
connection.open()
return connection, get_optional_env('EPFL_LDAP_BASE_DN_FOR_SEARCH') | [
"def",
"_get_LDAP_connection",
"(",
")",
":",
"server",
"=",
"ldap3",
".",
"Server",
"(",
"'ldap://'",
"+",
"get_optional_env",
"(",
"'EPFL_LDAP_SERVER_FOR_SEARCH'",
")",
")",
"connection",
"=",
"ldap3",
".",
"Connection",
"(",
"server",
")",
"connection",
".",
"open",
"(",
")",
"return",
"connection",
",",
"get_optional_env",
"(",
"'EPFL_LDAP_BASE_DN_FOR_SEARCH'",
")"
] | Return a LDAP connection | [
"Return",
"a",
"LDAP",
"connection"
] | python | train |
jorahn/icy | icy/icy.py | https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/icy.py#L469-L541 | def merge(data, cfg=None):
""" WORK IN PROGRESS
Concat, merge, join, drop keys in dictionary of pandas.DataFrames
into one pandas.DataFrame (data) and a pandas.Series (labels).
Parameters
----------
data : dict of pandas.DataFrames
Result of icy.read()
cfg : dict or str, optional
Dictionary of actions to perform on data
or str with path to YAML, that will be parsed.
Returns
-------
data : pandas.DataFrame
The aggregated dataset
labels : pandas.Series
The target variable for analysis of the dataset,
can have fewer samples than the aggregated dataset
Notes
-----
"""
# go from a dict of dataframes (data) to one dataframe (data) and one series (labels)
# pd.concat([df1, df2], join, join_axes, ignore_index) and pd.merge(left, right, how, on, suffixes)
# should be easy to iterate from normalized tables to a fully joined set of dataframes
if type(cfg) == str:
cfg = _read_yaml(cfg)
if cfg == None:
cfg = _read_yaml('local/merge.yml')
if cfg == None:
print('creating merge.yml config file draft ...')
cfg = {}
# find all tables with identical column names
# if no common key-col
# concat along rows, add col (src)
# e.g. chimps
# find all tables with same length
# if no duplicate column names
# concat along columns
# find master table (by length?)
# from smalles to biggest table
# find possible key-cols by uniques == len
# find bigger tables with common column names -> cands
# check for highest overlap-ratio of uniques -> cand (prefer smaller table if equal ratio)
# join table on best cand
# if ratio below treshold put table on unidentified list
for key in data:
cfg[key] = list(data[key].columns)
with open('local/merge.yml', 'xt') as f:
yaml.dump(cfg, f)
cfg = _read_yaml('local/merge.yml')
# if cfg == None:
# if not os.path.exists(default_cfg):
# create default_cfg draft
# else:
# join on default_cfg
# report join_result
# else:
# join on cfg
# report join_result
labels = None
return data, labels | [
"def",
"merge",
"(",
"data",
",",
"cfg",
"=",
"None",
")",
":",
"# go from a dict of dataframes (data) to one dataframe (data) and one series (labels)",
"# pd.concat([df1, df2], join, join_axes, ignore_index) and pd.merge(left, right, how, on, suffixes)",
"# should be easy to iterate from normalized tables to a fully joined set of dataframes",
"if",
"type",
"(",
"cfg",
")",
"==",
"str",
":",
"cfg",
"=",
"_read_yaml",
"(",
"cfg",
")",
"if",
"cfg",
"==",
"None",
":",
"cfg",
"=",
"_read_yaml",
"(",
"'local/merge.yml'",
")",
"if",
"cfg",
"==",
"None",
":",
"print",
"(",
"'creating merge.yml config file draft ...'",
")",
"cfg",
"=",
"{",
"}",
"# find all tables with identical column names",
"# if no common key-col",
"# concat along rows, add col (src)",
"# e.g. chimps",
"# find all tables with same length",
"# if no duplicate column names",
"# concat along columns",
"# find master table (by length?)",
"# from smalles to biggest table",
"# find possible key-cols by uniques == len",
"# find bigger tables with common column names -> cands",
"# check for highest overlap-ratio of uniques -> cand (prefer smaller table if equal ratio)",
"# join table on best cand",
"# if ratio below treshold put table on unidentified list",
"for",
"key",
"in",
"data",
":",
"cfg",
"[",
"key",
"]",
"=",
"list",
"(",
"data",
"[",
"key",
"]",
".",
"columns",
")",
"with",
"open",
"(",
"'local/merge.yml'",
",",
"'xt'",
")",
"as",
"f",
":",
"yaml",
".",
"dump",
"(",
"cfg",
",",
"f",
")",
"cfg",
"=",
"_read_yaml",
"(",
"'local/merge.yml'",
")",
"# if cfg == None:",
"# if not os.path.exists(default_cfg):",
"# create default_cfg draft",
"# else:",
"# join on default_cfg",
"# report join_result",
"# else:",
"# join on cfg",
"# report join_result",
"labels",
"=",
"None",
"return",
"data",
",",
"labels"
] | WORK IN PROGRESS
Concat, merge, join, drop keys in dictionary of pandas.DataFrames
into one pandas.DataFrame (data) and a pandas.Series (labels).
Parameters
----------
data : dict of pandas.DataFrames
Result of icy.read()
cfg : dict or str, optional
Dictionary of actions to perform on data
or str with path to YAML, that will be parsed.
Returns
-------
data : pandas.DataFrame
The aggregated dataset
labels : pandas.Series
The target variable for analysis of the dataset,
can have fewer samples than the aggregated dataset
Notes
----- | [
"WORK",
"IN",
"PROGRESS",
"Concat",
"merge",
"join",
"drop",
"keys",
"in",
"dictionary",
"of",
"pandas",
".",
"DataFrames",
"into",
"one",
"pandas",
".",
"DataFrame",
"(",
"data",
")",
"and",
"a",
"pandas",
".",
"Series",
"(",
"labels",
")",
".",
"Parameters",
"----------",
"data",
":",
"dict",
"of",
"pandas",
".",
"DataFrames",
"Result",
"of",
"icy",
".",
"read",
"()",
"cfg",
":",
"dict",
"or",
"str",
"optional",
"Dictionary",
"of",
"actions",
"to",
"perform",
"on",
"data",
"or",
"str",
"with",
"path",
"to",
"YAML",
"that",
"will",
"be",
"parsed",
".",
"Returns",
"-------",
"data",
":",
"pandas",
".",
"DataFrame",
"The",
"aggregated",
"dataset",
"labels",
":",
"pandas",
".",
"Series",
"The",
"target",
"variable",
"for",
"analysis",
"of",
"the",
"dataset",
"can",
"have",
"fewer",
"samples",
"than",
"the",
"aggregated",
"dataset",
"Notes",
"-----"
] | python | train |
phodge/homely | homely/_utils.py | https://github.com/phodge/homely/blob/98ddcf3e4f29b0749645817b4866baaea8376085/homely/_utils.py#L112-L197 | def run(cmd, stdout=None, stderr=None, **kwargs):
"""
A blocking wrapper around subprocess.Popen(), but with a simpler interface
for the stdout/stderr arguments:
stdout=False / stderr=False
stdout/stderr will be redirected to /dev/null (or discarded in some
other suitable manner)
stdout=True / stderr=True
stdout/stderr will be captured and returned as a list of lines.
stdout=None
stdout will be redirected to the python process's stdout, which may be
a tty (same as using stdout=subprocess.None)
stderr=None:
stderr will be redirected to the python process's stderr, which may be
a tty (same as using stderr=subprocess.None)
stderr="STDOUT"
Same as using stderr=subprocess.STDOUT
The return value will be a tuple of (exitcode, stdout, stderr)
If stdout and/or stderr were not captured, they will be None instead.
"""
devnull = None
try:
stdoutfilter = None
stderrfilter = None
wantstdout = False
wantstderr = False
if stdout is False:
devnull = open('/dev/null', 'w')
stdout = devnull
elif stdout is True:
stdout = subprocess.PIPE
wantstdout = True
elif callable(stdout):
stdoutfilter = partial(stdout)
stdout = subprocess.PIPE
else:
assert stdout is None, "Invalid stdout %r" % stdout
if stderr is False:
if devnull is None:
devnull = open('/dev/null', 'w')
stderr = devnull
elif stderr is True:
stderr = subprocess.PIPE
wantstderr = True
elif stderr == "STDOUT":
stderr = subprocess.STDOUT
elif callable(stderr):
stderrfilter = partial(stderr)
stderr = subprocess.PIPE
else:
assert stderr is None, "Invalid stderr %r" % stderr
if (stdoutfilter or stderrfilter) and asyncio:
# run background process asynchronously and filter output as
# it is running
exitcode, out, err, = _runasync(stdoutfilter,
stderrfilter,
cmd,
stdout=stdout,
stderr=stderr,
**kwargs)
if not wantstdout:
out = None
if not wantstderr:
err = None
return exitcode, out, err
proc = subprocess.Popen(cmd, stdout=stdout, stderr=stderr, **kwargs)
out, err = proc.communicate()
if not wantstdout:
if stdoutfilter:
stdoutfilter(out, True)
out = None
if not wantstderr:
if stderrfilter:
stderrfilter(err, True)
err = None
return proc.returncode, out, err
finally:
if devnull is not None:
devnull.close() | [
"def",
"run",
"(",
"cmd",
",",
"stdout",
"=",
"None",
",",
"stderr",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"devnull",
"=",
"None",
"try",
":",
"stdoutfilter",
"=",
"None",
"stderrfilter",
"=",
"None",
"wantstdout",
"=",
"False",
"wantstderr",
"=",
"False",
"if",
"stdout",
"is",
"False",
":",
"devnull",
"=",
"open",
"(",
"'/dev/null'",
",",
"'w'",
")",
"stdout",
"=",
"devnull",
"elif",
"stdout",
"is",
"True",
":",
"stdout",
"=",
"subprocess",
".",
"PIPE",
"wantstdout",
"=",
"True",
"elif",
"callable",
"(",
"stdout",
")",
":",
"stdoutfilter",
"=",
"partial",
"(",
"stdout",
")",
"stdout",
"=",
"subprocess",
".",
"PIPE",
"else",
":",
"assert",
"stdout",
"is",
"None",
",",
"\"Invalid stdout %r\"",
"%",
"stdout",
"if",
"stderr",
"is",
"False",
":",
"if",
"devnull",
"is",
"None",
":",
"devnull",
"=",
"open",
"(",
"'/dev/null'",
",",
"'w'",
")",
"stderr",
"=",
"devnull",
"elif",
"stderr",
"is",
"True",
":",
"stderr",
"=",
"subprocess",
".",
"PIPE",
"wantstderr",
"=",
"True",
"elif",
"stderr",
"==",
"\"STDOUT\"",
":",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
"elif",
"callable",
"(",
"stderr",
")",
":",
"stderrfilter",
"=",
"partial",
"(",
"stderr",
")",
"stderr",
"=",
"subprocess",
".",
"PIPE",
"else",
":",
"assert",
"stderr",
"is",
"None",
",",
"\"Invalid stderr %r\"",
"%",
"stderr",
"if",
"(",
"stdoutfilter",
"or",
"stderrfilter",
")",
"and",
"asyncio",
":",
"# run background process asynchronously and filter output as",
"# it is running",
"exitcode",
",",
"out",
",",
"err",
",",
"=",
"_runasync",
"(",
"stdoutfilter",
",",
"stderrfilter",
",",
"cmd",
",",
"stdout",
"=",
"stdout",
",",
"stderr",
"=",
"stderr",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"wantstdout",
":",
"out",
"=",
"None",
"if",
"not",
"wantstderr",
":",
"err",
"=",
"None",
"return",
"exitcode",
",",
"out",
",",
"err",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"stdout",
",",
"stderr",
"=",
"stderr",
",",
"*",
"*",
"kwargs",
")",
"out",
",",
"err",
"=",
"proc",
".",
"communicate",
"(",
")",
"if",
"not",
"wantstdout",
":",
"if",
"stdoutfilter",
":",
"stdoutfilter",
"(",
"out",
",",
"True",
")",
"out",
"=",
"None",
"if",
"not",
"wantstderr",
":",
"if",
"stderrfilter",
":",
"stderrfilter",
"(",
"err",
",",
"True",
")",
"err",
"=",
"None",
"return",
"proc",
".",
"returncode",
",",
"out",
",",
"err",
"finally",
":",
"if",
"devnull",
"is",
"not",
"None",
":",
"devnull",
".",
"close",
"(",
")"
] | A blocking wrapper around subprocess.Popen(), but with a simpler interface
for the stdout/stderr arguments:
stdout=False / stderr=False
stdout/stderr will be redirected to /dev/null (or discarded in some
other suitable manner)
stdout=True / stderr=True
stdout/stderr will be captured and returned as a list of lines.
stdout=None
stdout will be redirected to the python process's stdout, which may be
a tty (same as using stdout=subprocess.None)
stderr=None:
stderr will be redirected to the python process's stderr, which may be
a tty (same as using stderr=subprocess.None)
stderr="STDOUT"
Same as using stderr=subprocess.STDOUT
The return value will be a tuple of (exitcode, stdout, stderr)
If stdout and/or stderr were not captured, they will be None instead. | [
"A",
"blocking",
"wrapper",
"around",
"subprocess",
".",
"Popen",
"()",
"but",
"with",
"a",
"simpler",
"interface",
"for",
"the",
"stdout",
"/",
"stderr",
"arguments",
":"
] | python | train |
google/mobly | mobly/controllers/monsoon.py | https://github.com/google/mobly/blob/38ba2cf7d29a20e6a2fca1718eecb337df38db26/mobly/controllers/monsoon.py#L422-L437 | def _FlushInput(self):
""" Flush all read data until no more available. """
self.ser.flush()
flushed = 0
while True:
ready_r, ready_w, ready_x = select.select([self.ser], [],
[self.ser], 0)
if len(ready_x) > 0:
logging.error("Exception from serial port.")
return None
elif len(ready_r) > 0:
flushed += 1
self.ser.read(1) # This may cause underlying buffering.
self.ser.flush() # Flush the underlying buffer too.
else:
break | [
"def",
"_FlushInput",
"(",
"self",
")",
":",
"self",
".",
"ser",
".",
"flush",
"(",
")",
"flushed",
"=",
"0",
"while",
"True",
":",
"ready_r",
",",
"ready_w",
",",
"ready_x",
"=",
"select",
".",
"select",
"(",
"[",
"self",
".",
"ser",
"]",
",",
"[",
"]",
",",
"[",
"self",
".",
"ser",
"]",
",",
"0",
")",
"if",
"len",
"(",
"ready_x",
")",
">",
"0",
":",
"logging",
".",
"error",
"(",
"\"Exception from serial port.\"",
")",
"return",
"None",
"elif",
"len",
"(",
"ready_r",
")",
">",
"0",
":",
"flushed",
"+=",
"1",
"self",
".",
"ser",
".",
"read",
"(",
"1",
")",
"# This may cause underlying buffering.",
"self",
".",
"ser",
".",
"flush",
"(",
")",
"# Flush the underlying buffer too.",
"else",
":",
"break"
] | Flush all read data until no more available. | [
"Flush",
"all",
"read",
"data",
"until",
"no",
"more",
"available",
"."
] | python | train |
bierschenk/ode | ode/integrators.py | https://github.com/bierschenk/ode/blob/01fb714874926f0988a4bb250d2a0c8a2429e4f0/ode/integrators.py#L72-L88 | def euler(dfun, xzero, timerange, timestep):
'''Euler method integration. This function wraps the Euler class.
:param dfun:
derivative function of the system.
The differential system arranged as a series of first-order
equations: \\dot{X} = dfun(t, x)
:param xzero:
the initial condition of the system
:param timerange:
the start and end times as (starttime, endtime)
:param timestep:
the timestep
:returns: t, x:
as lists
'''
return zip(*list(Euler(dfun, xzero, timerange, timestep))) | [
"def",
"euler",
"(",
"dfun",
",",
"xzero",
",",
"timerange",
",",
"timestep",
")",
":",
"return",
"zip",
"(",
"*",
"list",
"(",
"Euler",
"(",
"dfun",
",",
"xzero",
",",
"timerange",
",",
"timestep",
")",
")",
")"
] | Euler method integration. This function wraps the Euler class.
:param dfun:
derivative function of the system.
The differential system arranged as a series of first-order
equations: \\dot{X} = dfun(t, x)
:param xzero:
the initial condition of the system
:param timerange:
the start and end times as (starttime, endtime)
:param timestep:
the timestep
:returns: t, x:
as lists | [
"Euler",
"method",
"integration",
".",
"This",
"function",
"wraps",
"the",
"Euler",
"class",
"."
] | python | train |
senaite/senaite.core | bika/lims/browser/partition_magic.py | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/partition_magic.py#L162-L167 | def get_container_data(self):
"""Returns a list of Container data
"""
for obj in self.get_containers():
info = self.get_base_info(obj)
yield info | [
"def",
"get_container_data",
"(",
"self",
")",
":",
"for",
"obj",
"in",
"self",
".",
"get_containers",
"(",
")",
":",
"info",
"=",
"self",
".",
"get_base_info",
"(",
"obj",
")",
"yield",
"info"
] | Returns a list of Container data | [
"Returns",
"a",
"list",
"of",
"Container",
"data"
] | python | train |
uuazed/numerapi | numerapi/numerapi.py | https://github.com/uuazed/numerapi/blob/fc9dcc53b32ede95bfda1ceeb62aec1d67d26697/numerapi/numerapi.py#L944-L1003 | def get_payments(self):
"""Get all your payments.
Returns:
list of dicts: payments
For each payout in the list, a dict contains the following items:
* nmrAmount (`decimal.Decimal`)
* usdAmount (`decimal.Decimal`)
* tournament (`str`)
* round (`dict`)
* number (`int`)
* openTime (`datetime`)
* resolveTime (`datetime`)
* resolvedGeneral (`bool`)
* resolvedStaking (`bool`)
Example:
>>> api = NumerAPI(secret_key="..", public_id="..")
>>> api.get_payments()
[{'nmrAmount': Decimal('0.00'),
'round': {'number': 84,
'openTime': datetime.datetime(2017, 12, 2, 18, 0, tzinfo=tzutc()),
'resolveTime': datetime.datetime(2018, 1, 1, 18, 0, tzinfo=tzutc()),
'resolvedGeneral': True,
'resolvedStaking': True},
'tournament': 'staking',
'usdAmount': Decimal('17.44')},
...
]
"""
query = """
query {
user {
payments {
nmrAmount
round {
number
openTime
resolveTime
resolvedGeneral
resolvedStaking
}
tournament
usdAmount
}
}
}
"""
data = self.raw_query(query, authorization=True)['data']
payments = data['user']['payments']
# convert strings to python objects
for p in payments:
utils.replace(p['round'], "openTime", utils.parse_datetime_string)
utils.replace(p['round'], "resolveTime",
utils.parse_datetime_string)
utils.replace(p, "usdAmount", utils.parse_float_string)
utils.replace(p, "nmrAmount", utils.parse_float_string)
return payments | [
"def",
"get_payments",
"(",
"self",
")",
":",
"query",
"=",
"\"\"\"\n query {\n user {\n payments {\n nmrAmount\n round {\n number\n openTime\n resolveTime\n resolvedGeneral\n resolvedStaking\n }\n tournament\n usdAmount\n }\n }\n }\n \"\"\"",
"data",
"=",
"self",
".",
"raw_query",
"(",
"query",
",",
"authorization",
"=",
"True",
")",
"[",
"'data'",
"]",
"payments",
"=",
"data",
"[",
"'user'",
"]",
"[",
"'payments'",
"]",
"# convert strings to python objects",
"for",
"p",
"in",
"payments",
":",
"utils",
".",
"replace",
"(",
"p",
"[",
"'round'",
"]",
",",
"\"openTime\"",
",",
"utils",
".",
"parse_datetime_string",
")",
"utils",
".",
"replace",
"(",
"p",
"[",
"'round'",
"]",
",",
"\"resolveTime\"",
",",
"utils",
".",
"parse_datetime_string",
")",
"utils",
".",
"replace",
"(",
"p",
",",
"\"usdAmount\"",
",",
"utils",
".",
"parse_float_string",
")",
"utils",
".",
"replace",
"(",
"p",
",",
"\"nmrAmount\"",
",",
"utils",
".",
"parse_float_string",
")",
"return",
"payments"
] | Get all your payments.
Returns:
list of dicts: payments
For each payout in the list, a dict contains the following items:
* nmrAmount (`decimal.Decimal`)
* usdAmount (`decimal.Decimal`)
* tournament (`str`)
* round (`dict`)
* number (`int`)
* openTime (`datetime`)
* resolveTime (`datetime`)
* resolvedGeneral (`bool`)
* resolvedStaking (`bool`)
Example:
>>> api = NumerAPI(secret_key="..", public_id="..")
>>> api.get_payments()
[{'nmrAmount': Decimal('0.00'),
'round': {'number': 84,
'openTime': datetime.datetime(2017, 12, 2, 18, 0, tzinfo=tzutc()),
'resolveTime': datetime.datetime(2018, 1, 1, 18, 0, tzinfo=tzutc()),
'resolvedGeneral': True,
'resolvedStaking': True},
'tournament': 'staking',
'usdAmount': Decimal('17.44')},
...
] | [
"Get",
"all",
"your",
"payments",
"."
] | python | train |
ejeschke/ginga | ginga/rv/plugins/Cuts.py | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/Cuts.py#L995-L1001 | def set_mode_cb(self, mode, tf):
"""Called when one of the Move/Draw/Edit radio buttons is selected."""
if tf:
self.canvas.set_draw_mode(mode)
if mode == 'edit':
self.edit_select_cuts()
return True | [
"def",
"set_mode_cb",
"(",
"self",
",",
"mode",
",",
"tf",
")",
":",
"if",
"tf",
":",
"self",
".",
"canvas",
".",
"set_draw_mode",
"(",
"mode",
")",
"if",
"mode",
"==",
"'edit'",
":",
"self",
".",
"edit_select_cuts",
"(",
")",
"return",
"True"
] | Called when one of the Move/Draw/Edit radio buttons is selected. | [
"Called",
"when",
"one",
"of",
"the",
"Move",
"/",
"Draw",
"/",
"Edit",
"radio",
"buttons",
"is",
"selected",
"."
] | python | train |
klen/zeta-library | zetalibrary/scss/__init__.py | https://github.com/klen/zeta-library/blob/b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd/zetalibrary/scss/__init__.py#L1019-L1072 | def _do_functions(self, rule, p_selectors, p_parents, p_children, scope, media, c_lineno, c_property, c_codestr, code, name):
"""
Implements @mixin and @function
"""
if name:
funct, params, _ = name.partition('(')
funct = funct.strip()
params = split_params(depar(params + _))
defaults = {}
new_params = []
for param in params:
param, _, default = param.partition(':')
param = param.strip()
default = default.strip()
if param:
new_params.append(param)
if default:
default = self.apply_vars(
default, rule[CONTEXT], None, rule)
defaults[param] = default
context = rule[CONTEXT].copy()
for p in new_params:
context.pop(p, None)
mixin = [list(new_params), defaults, self.
apply_vars(c_codestr, context, None, rule)]
if code == '@function':
def _call(mixin):
def __call(R, *args, **kwargs):
m_params = mixin[0]
m_vars = rule[CONTEXT].copy()
m_vars.update(mixin[1])
m_codestr = mixin[2]
for i, a in enumerate(args):
m_vars[m_params[i]] = a
m_vars.update(kwargs)
_options = rule[OPTIONS].copy()
_rule = spawn_rule(R, codestr=m_codestr, context=m_vars, options=_options, deps=set(), properties=[], final=False, lineno=c_lineno)
self.manage_children(_rule, p_selectors, p_parents,
p_children, (scope or '') + '', R[MEDIA])
ret = _rule[OPTIONS].pop('@return', '')
return ret
return __call
_mixin = _call(mixin)
_mixin.mixin = mixin
mixin = _mixin
# Insert as many @mixin options as the default parameters:
while len(new_params):
rule[OPTIONS]['%s %s:%d' % (code, funct,
len(new_params))] = mixin
param = new_params.pop()
if param not in defaults:
break
if not new_params:
rule[OPTIONS][code + ' ' + funct + ':0'] = mixin | [
"def",
"_do_functions",
"(",
"self",
",",
"rule",
",",
"p_selectors",
",",
"p_parents",
",",
"p_children",
",",
"scope",
",",
"media",
",",
"c_lineno",
",",
"c_property",
",",
"c_codestr",
",",
"code",
",",
"name",
")",
":",
"if",
"name",
":",
"funct",
",",
"params",
",",
"_",
"=",
"name",
".",
"partition",
"(",
"'('",
")",
"funct",
"=",
"funct",
".",
"strip",
"(",
")",
"params",
"=",
"split_params",
"(",
"depar",
"(",
"params",
"+",
"_",
")",
")",
"defaults",
"=",
"{",
"}",
"new_params",
"=",
"[",
"]",
"for",
"param",
"in",
"params",
":",
"param",
",",
"_",
",",
"default",
"=",
"param",
".",
"partition",
"(",
"':'",
")",
"param",
"=",
"param",
".",
"strip",
"(",
")",
"default",
"=",
"default",
".",
"strip",
"(",
")",
"if",
"param",
":",
"new_params",
".",
"append",
"(",
"param",
")",
"if",
"default",
":",
"default",
"=",
"self",
".",
"apply_vars",
"(",
"default",
",",
"rule",
"[",
"CONTEXT",
"]",
",",
"None",
",",
"rule",
")",
"defaults",
"[",
"param",
"]",
"=",
"default",
"context",
"=",
"rule",
"[",
"CONTEXT",
"]",
".",
"copy",
"(",
")",
"for",
"p",
"in",
"new_params",
":",
"context",
".",
"pop",
"(",
"p",
",",
"None",
")",
"mixin",
"=",
"[",
"list",
"(",
"new_params",
")",
",",
"defaults",
",",
"self",
".",
"apply_vars",
"(",
"c_codestr",
",",
"context",
",",
"None",
",",
"rule",
")",
"]",
"if",
"code",
"==",
"'@function'",
":",
"def",
"_call",
"(",
"mixin",
")",
":",
"def",
"__call",
"(",
"R",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"m_params",
"=",
"mixin",
"[",
"0",
"]",
"m_vars",
"=",
"rule",
"[",
"CONTEXT",
"]",
".",
"copy",
"(",
")",
"m_vars",
".",
"update",
"(",
"mixin",
"[",
"1",
"]",
")",
"m_codestr",
"=",
"mixin",
"[",
"2",
"]",
"for",
"i",
",",
"a",
"in",
"enumerate",
"(",
"args",
")",
":",
"m_vars",
"[",
"m_params",
"[",
"i",
"]",
"]",
"=",
"a",
"m_vars",
".",
"update",
"(",
"kwargs",
")",
"_options",
"=",
"rule",
"[",
"OPTIONS",
"]",
".",
"copy",
"(",
")",
"_rule",
"=",
"spawn_rule",
"(",
"R",
",",
"codestr",
"=",
"m_codestr",
",",
"context",
"=",
"m_vars",
",",
"options",
"=",
"_options",
",",
"deps",
"=",
"set",
"(",
")",
",",
"properties",
"=",
"[",
"]",
",",
"final",
"=",
"False",
",",
"lineno",
"=",
"c_lineno",
")",
"self",
".",
"manage_children",
"(",
"_rule",
",",
"p_selectors",
",",
"p_parents",
",",
"p_children",
",",
"(",
"scope",
"or",
"''",
")",
"+",
"''",
",",
"R",
"[",
"MEDIA",
"]",
")",
"ret",
"=",
"_rule",
"[",
"OPTIONS",
"]",
".",
"pop",
"(",
"'@return'",
",",
"''",
")",
"return",
"ret",
"return",
"__call",
"_mixin",
"=",
"_call",
"(",
"mixin",
")",
"_mixin",
".",
"mixin",
"=",
"mixin",
"mixin",
"=",
"_mixin",
"# Insert as many @mixin options as the default parameters:",
"while",
"len",
"(",
"new_params",
")",
":",
"rule",
"[",
"OPTIONS",
"]",
"[",
"'%s %s:%d'",
"%",
"(",
"code",
",",
"funct",
",",
"len",
"(",
"new_params",
")",
")",
"]",
"=",
"mixin",
"param",
"=",
"new_params",
".",
"pop",
"(",
")",
"if",
"param",
"not",
"in",
"defaults",
":",
"break",
"if",
"not",
"new_params",
":",
"rule",
"[",
"OPTIONS",
"]",
"[",
"code",
"+",
"' '",
"+",
"funct",
"+",
"':0'",
"]",
"=",
"mixin"
] | Implements @mixin and @function | [
"Implements"
] | python | train |
MillionIntegrals/vel | vel/rl/env/classic_control.py | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/env/classic_control.py#L62-L68 | def create(game, settings=None, presets=None):
""" Vel factory function """
return ClassicControlEnv(
envname=game,
settings=settings,
presets=presets
) | [
"def",
"create",
"(",
"game",
",",
"settings",
"=",
"None",
",",
"presets",
"=",
"None",
")",
":",
"return",
"ClassicControlEnv",
"(",
"envname",
"=",
"game",
",",
"settings",
"=",
"settings",
",",
"presets",
"=",
"presets",
")"
] | Vel factory function | [
"Vel",
"factory",
"function"
] | python | train |
rs/domcheck | domcheck/strategies.py | https://github.com/rs/domcheck/blob/43e10c345320564a1236778e8577e2b8ef825925/domcheck/strategies.py#L16-L30 | def check_dns_txt(domain, prefix, code):
"""
Validates a domain by checking that {prefix}={code} is present in the TXT DNS record
of the domain to check.
Returns true if verification suceeded.
"""
token = '{}={}'.format(prefix, code)
try:
for rr in dns.resolver.query(domain, 'TXT'):
if token in rr.to_text():
return True
except:
logger.debug('', exc_info=True)
return False | [
"def",
"check_dns_txt",
"(",
"domain",
",",
"prefix",
",",
"code",
")",
":",
"token",
"=",
"'{}={}'",
".",
"format",
"(",
"prefix",
",",
"code",
")",
"try",
":",
"for",
"rr",
"in",
"dns",
".",
"resolver",
".",
"query",
"(",
"domain",
",",
"'TXT'",
")",
":",
"if",
"token",
"in",
"rr",
".",
"to_text",
"(",
")",
":",
"return",
"True",
"except",
":",
"logger",
".",
"debug",
"(",
"''",
",",
"exc_info",
"=",
"True",
")",
"return",
"False"
] | Validates a domain by checking that {prefix}={code} is present in the TXT DNS record
of the domain to check.
Returns true if verification suceeded. | [
"Validates",
"a",
"domain",
"by",
"checking",
"that",
"{",
"prefix",
"}",
"=",
"{",
"code",
"}",
"is",
"present",
"in",
"the",
"TXT",
"DNS",
"record",
"of",
"the",
"domain",
"to",
"check",
"."
] | python | train |
tjvr/kurt | kurt/__init__.py | https://github.com/tjvr/kurt/blob/fcccd80cae11dc233f6dd02b40ec9a388c62f259/kurt/__init__.py#L1596-L1599 | def has_conversion(self, plugin):
"""Return True if the plugin supports this block."""
plugin = kurt.plugin.Kurt.get_plugin(plugin)
return plugin.name in self._plugins | [
"def",
"has_conversion",
"(",
"self",
",",
"plugin",
")",
":",
"plugin",
"=",
"kurt",
".",
"plugin",
".",
"Kurt",
".",
"get_plugin",
"(",
"plugin",
")",
"return",
"plugin",
".",
"name",
"in",
"self",
".",
"_plugins"
] | Return True if the plugin supports this block. | [
"Return",
"True",
"if",
"the",
"plugin",
"supports",
"this",
"block",
"."
] | python | train |
MycroftAI/adapt | adapt/entity_tagger.py | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/entity_tagger.py#L33-L45 | def _iterate_subsequences(self, tokens):
"""
Using regex invokes this function, which significantly impacts performance of adapt. it is an N! operation.
Args:
tokens(list): list of tokens for Yield results.
Yields:
str: ?
"""
for start_idx in xrange(len(tokens)):
for end_idx in xrange(start_idx + 1, len(tokens) + 1):
yield ' '.join(tokens[start_idx:end_idx]), start_idx | [
"def",
"_iterate_subsequences",
"(",
"self",
",",
"tokens",
")",
":",
"for",
"start_idx",
"in",
"xrange",
"(",
"len",
"(",
"tokens",
")",
")",
":",
"for",
"end_idx",
"in",
"xrange",
"(",
"start_idx",
"+",
"1",
",",
"len",
"(",
"tokens",
")",
"+",
"1",
")",
":",
"yield",
"' '",
".",
"join",
"(",
"tokens",
"[",
"start_idx",
":",
"end_idx",
"]",
")",
",",
"start_idx"
] | Using regex invokes this function, which significantly impacts performance of adapt. it is an N! operation.
Args:
tokens(list): list of tokens for Yield results.
Yields:
str: ? | [
"Using",
"regex",
"invokes",
"this",
"function",
"which",
"significantly",
"impacts",
"performance",
"of",
"adapt",
".",
"it",
"is",
"an",
"N!",
"operation",
"."
] | python | train |
SeattleTestbed/seash | pyreadline/modes/emacs.py | https://github.com/SeattleTestbed/seash/blob/40f9d2285662ff8b61e0468b4196acee089b273b/pyreadline/modes/emacs.py#L196-L212 | def _init_digit_argument(self, keyinfo):
"""Initialize search prompt
"""
c = self.console
line = self.l_buffer.get_line_text()
self._digit_argument_oldprompt = self.prompt
queue = self.process_keyevent_queue
queue = self.process_keyevent_queue
queue.append(self._process_digit_argument_keyevent)
if keyinfo.char == "-":
self.argument = -1
elif keyinfo.char in u"0123456789":
self.argument = int(keyinfo.char)
log(u"<%s> %s"%(self.argument, type(self.argument)))
self.prompt = u"(arg: %s) "%self.argument
log(u"arg-init %s %s"%(self.argument, keyinfo.char)) | [
"def",
"_init_digit_argument",
"(",
"self",
",",
"keyinfo",
")",
":",
"c",
"=",
"self",
".",
"console",
"line",
"=",
"self",
".",
"l_buffer",
".",
"get_line_text",
"(",
")",
"self",
".",
"_digit_argument_oldprompt",
"=",
"self",
".",
"prompt",
"queue",
"=",
"self",
".",
"process_keyevent_queue",
"queue",
"=",
"self",
".",
"process_keyevent_queue",
"queue",
".",
"append",
"(",
"self",
".",
"_process_digit_argument_keyevent",
")",
"if",
"keyinfo",
".",
"char",
"==",
"\"-\"",
":",
"self",
".",
"argument",
"=",
"-",
"1",
"elif",
"keyinfo",
".",
"char",
"in",
"u\"0123456789\"",
":",
"self",
".",
"argument",
"=",
"int",
"(",
"keyinfo",
".",
"char",
")",
"log",
"(",
"u\"<%s> %s\"",
"%",
"(",
"self",
".",
"argument",
",",
"type",
"(",
"self",
".",
"argument",
")",
")",
")",
"self",
".",
"prompt",
"=",
"u\"(arg: %s) \"",
"%",
"self",
".",
"argument",
"log",
"(",
"u\"arg-init %s %s\"",
"%",
"(",
"self",
".",
"argument",
",",
"keyinfo",
".",
"char",
")",
")"
] | Initialize search prompt | [
"Initialize",
"search",
"prompt"
] | python | train |
openstack/networking-cisco | networking_cisco/plugins/cisco/cfg_agent/device_drivers/asr1k/asr1k_cfg_syncer.py | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/cfg_agent/device_drivers/asr1k/asr1k_cfg_syncer.py#L326-L336 | def get_running_config(self, conn):
"""Get the ASR1k's current running config.
:return: Current IOS running config as multiline string
"""
config = conn.get_config(source="running")
if config:
root = ET.fromstring(config._raw)
running_config = root[0][0]
rgx = re.compile("\r*\n+")
ioscfg = rgx.split(running_config.text)
return ioscfg | [
"def",
"get_running_config",
"(",
"self",
",",
"conn",
")",
":",
"config",
"=",
"conn",
".",
"get_config",
"(",
"source",
"=",
"\"running\"",
")",
"if",
"config",
":",
"root",
"=",
"ET",
".",
"fromstring",
"(",
"config",
".",
"_raw",
")",
"running_config",
"=",
"root",
"[",
"0",
"]",
"[",
"0",
"]",
"rgx",
"=",
"re",
".",
"compile",
"(",
"\"\\r*\\n+\"",
")",
"ioscfg",
"=",
"rgx",
".",
"split",
"(",
"running_config",
".",
"text",
")",
"return",
"ioscfg"
] | Get the ASR1k's current running config.
:return: Current IOS running config as multiline string | [
"Get",
"the",
"ASR1k",
"s",
"current",
"running",
"config",
".",
":",
"return",
":",
"Current",
"IOS",
"running",
"config",
"as",
"multiline",
"string"
] | python | train |
quantmind/pulsar | pulsar/apps/wsgi/content.py | https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/apps/wsgi/content.py#L210-L222 | def html_factory(tag, **defaults):
'''Returns an :class:`Html` factory function for ``tag`` and a given
dictionary of ``defaults`` parameters. For example::
>>> input_factory = html_factory('input', type='text')
>>> html = input_factory(value='bla')
'''
def html_input(*children, **params):
p = defaults.copy()
p.update(params)
return Html(tag, *children, **p)
return html_input | [
"def",
"html_factory",
"(",
"tag",
",",
"*",
"*",
"defaults",
")",
":",
"def",
"html_input",
"(",
"*",
"children",
",",
"*",
"*",
"params",
")",
":",
"p",
"=",
"defaults",
".",
"copy",
"(",
")",
"p",
".",
"update",
"(",
"params",
")",
"return",
"Html",
"(",
"tag",
",",
"*",
"children",
",",
"*",
"*",
"p",
")",
"return",
"html_input"
] | Returns an :class:`Html` factory function for ``tag`` and a given
dictionary of ``defaults`` parameters. For example::
>>> input_factory = html_factory('input', type='text')
>>> html = input_factory(value='bla') | [
"Returns",
"an",
":",
"class",
":",
"Html",
"factory",
"function",
"for",
"tag",
"and",
"a",
"given",
"dictionary",
"of",
"defaults",
"parameters",
".",
"For",
"example",
"::"
] | python | train |
Azure/msrest-for-python | msrest/service_client.py | https://github.com/Azure/msrest-for-python/blob/0732bc90bdb290e5f58c675ffdd7dbfa9acefc93/msrest/service_client.py#L172-L182 | def put(self, url, params=None, headers=None, content=None, form_content=None):
# type: (str, Optional[Dict[str, str]], Optional[Dict[str, str]], Any, Optional[Dict[str, Any]]) -> ClientRequest
"""Create a PUT request object.
:param str url: The request URL.
:param dict params: Request URL parameters.
:param dict headers: Headers
:param dict form_content: Form content
"""
request = self._request('PUT', url, params, headers, content, form_content)
return request | [
"def",
"put",
"(",
"self",
",",
"url",
",",
"params",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"content",
"=",
"None",
",",
"form_content",
"=",
"None",
")",
":",
"# type: (str, Optional[Dict[str, str]], Optional[Dict[str, str]], Any, Optional[Dict[str, Any]]) -> ClientRequest",
"request",
"=",
"self",
".",
"_request",
"(",
"'PUT'",
",",
"url",
",",
"params",
",",
"headers",
",",
"content",
",",
"form_content",
")",
"return",
"request"
] | Create a PUT request object.
:param str url: The request URL.
:param dict params: Request URL parameters.
:param dict headers: Headers
:param dict form_content: Form content | [
"Create",
"a",
"PUT",
"request",
"object",
"."
] | python | train |
pennlabs/penn-sdk-python | penn/wharton.py | https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/wharton.py#L142-L184 | def switch_format(self, gsr):
""" Convert the Wharton GSR format into the studyspaces API format. """
if "error" in gsr:
return gsr
categories = {
"cid": 1,
"name": "Huntsman Hall",
"rooms": []
}
for time in gsr["times"]:
for entry in time:
entry["name"] = entry["room_number"]
del entry["room_number"]
start_time_str = entry["start_time"]
end_time = datetime.datetime.strptime(start_time_str[:-6], '%Y-%m-%dT%H:%M:%S') + datetime.timedelta(minutes=30)
end_time_str = end_time.strftime("%Y-%m-%dT%H:%M:%S") + "-{}".format(self.get_dst_gmt_timezone())
time = {
"available": not entry["reserved"],
"start": entry["start_time"],
"end": end_time_str,
}
exists = False
for room in categories["rooms"]:
if room["name"] == entry["name"]:
room["times"].append(time)
exists = True
if not exists:
del entry["booked_by_user"]
del entry["building"]
if "reservation_id" in entry:
del entry["reservation_id"]
entry["lid"] = 1
entry["gid"] = 1
entry["capacity"] = 5
entry["room_id"] = int(entry["id"])
del entry["id"]
entry["times"] = [time]
del entry["reserved"]
del entry["end_time"]
del entry["start_time"]
categories["rooms"].append(entry)
return {"categories": [categories], "rooms": categories["rooms"]} | [
"def",
"switch_format",
"(",
"self",
",",
"gsr",
")",
":",
"if",
"\"error\"",
"in",
"gsr",
":",
"return",
"gsr",
"categories",
"=",
"{",
"\"cid\"",
":",
"1",
",",
"\"name\"",
":",
"\"Huntsman Hall\"",
",",
"\"rooms\"",
":",
"[",
"]",
"}",
"for",
"time",
"in",
"gsr",
"[",
"\"times\"",
"]",
":",
"for",
"entry",
"in",
"time",
":",
"entry",
"[",
"\"name\"",
"]",
"=",
"entry",
"[",
"\"room_number\"",
"]",
"del",
"entry",
"[",
"\"room_number\"",
"]",
"start_time_str",
"=",
"entry",
"[",
"\"start_time\"",
"]",
"end_time",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"start_time_str",
"[",
":",
"-",
"6",
"]",
",",
"'%Y-%m-%dT%H:%M:%S'",
")",
"+",
"datetime",
".",
"timedelta",
"(",
"minutes",
"=",
"30",
")",
"end_time_str",
"=",
"end_time",
".",
"strftime",
"(",
"\"%Y-%m-%dT%H:%M:%S\"",
")",
"+",
"\"-{}\"",
".",
"format",
"(",
"self",
".",
"get_dst_gmt_timezone",
"(",
")",
")",
"time",
"=",
"{",
"\"available\"",
":",
"not",
"entry",
"[",
"\"reserved\"",
"]",
",",
"\"start\"",
":",
"entry",
"[",
"\"start_time\"",
"]",
",",
"\"end\"",
":",
"end_time_str",
",",
"}",
"exists",
"=",
"False",
"for",
"room",
"in",
"categories",
"[",
"\"rooms\"",
"]",
":",
"if",
"room",
"[",
"\"name\"",
"]",
"==",
"entry",
"[",
"\"name\"",
"]",
":",
"room",
"[",
"\"times\"",
"]",
".",
"append",
"(",
"time",
")",
"exists",
"=",
"True",
"if",
"not",
"exists",
":",
"del",
"entry",
"[",
"\"booked_by_user\"",
"]",
"del",
"entry",
"[",
"\"building\"",
"]",
"if",
"\"reservation_id\"",
"in",
"entry",
":",
"del",
"entry",
"[",
"\"reservation_id\"",
"]",
"entry",
"[",
"\"lid\"",
"]",
"=",
"1",
"entry",
"[",
"\"gid\"",
"]",
"=",
"1",
"entry",
"[",
"\"capacity\"",
"]",
"=",
"5",
"entry",
"[",
"\"room_id\"",
"]",
"=",
"int",
"(",
"entry",
"[",
"\"id\"",
"]",
")",
"del",
"entry",
"[",
"\"id\"",
"]",
"entry",
"[",
"\"times\"",
"]",
"=",
"[",
"time",
"]",
"del",
"entry",
"[",
"\"reserved\"",
"]",
"del",
"entry",
"[",
"\"end_time\"",
"]",
"del",
"entry",
"[",
"\"start_time\"",
"]",
"categories",
"[",
"\"rooms\"",
"]",
".",
"append",
"(",
"entry",
")",
"return",
"{",
"\"categories\"",
":",
"[",
"categories",
"]",
",",
"\"rooms\"",
":",
"categories",
"[",
"\"rooms\"",
"]",
"}"
] | Convert the Wharton GSR format into the studyspaces API format. | [
"Convert",
"the",
"Wharton",
"GSR",
"format",
"into",
"the",
"studyspaces",
"API",
"format",
"."
] | python | train |
mangalam-research/selenic | selenic/builder.py | https://github.com/mangalam-research/selenic/blob/2284c68e15fa3d34b88aa2eec1a2e8ecd37f44ad/selenic/builder.py#L260-L295 | def chromedriver_element_center_patch():
"""
Patch move_to_element on ActionChains to work around a bug present
in Chromedriver 2.14 to 2.20.
Calling this function multiple times in the same process will
install the patch once, and just once.
"""
patch_name = "_selenic_chromedriver_element_center_patched"
if getattr(ActionChains, patch_name, None):
return # We've patched ActionChains already!!
# This is the patched method, which uses getBoundingClientRect
# to get the location of the center.
def move_to_element(self, el):
pos = self._driver.execute_script("""
var rect = arguments[0].getBoundingClientRect();
return { x: rect.width / 2, y: rect.height / 2};
""", el)
self.move_to_element_with_offset(el, pos["x"], pos["y"])
return self
old_init = ActionChains.__init__
def init(self, driver):
old_init(self, driver)
# Patch the instance, only if the driver needs it.
if getattr(driver, CHROMEDRIVER_ELEMENT_CENTER_PATCH_FLAG, None):
self.move_to_element = types.MethodType(move_to_element, self)
ActionChains.__init__ = init
# Mark ActionChains as patched!
setattr(ActionChains, patch_name, True) | [
"def",
"chromedriver_element_center_patch",
"(",
")",
":",
"patch_name",
"=",
"\"_selenic_chromedriver_element_center_patched\"",
"if",
"getattr",
"(",
"ActionChains",
",",
"patch_name",
",",
"None",
")",
":",
"return",
"# We've patched ActionChains already!!",
"# This is the patched method, which uses getBoundingClientRect",
"# to get the location of the center.",
"def",
"move_to_element",
"(",
"self",
",",
"el",
")",
":",
"pos",
"=",
"self",
".",
"_driver",
".",
"execute_script",
"(",
"\"\"\"\n var rect = arguments[0].getBoundingClientRect();\n return { x: rect.width / 2, y: rect.height / 2};\n \"\"\"",
",",
"el",
")",
"self",
".",
"move_to_element_with_offset",
"(",
"el",
",",
"pos",
"[",
"\"x\"",
"]",
",",
"pos",
"[",
"\"y\"",
"]",
")",
"return",
"self",
"old_init",
"=",
"ActionChains",
".",
"__init__",
"def",
"init",
"(",
"self",
",",
"driver",
")",
":",
"old_init",
"(",
"self",
",",
"driver",
")",
"# Patch the instance, only if the driver needs it.",
"if",
"getattr",
"(",
"driver",
",",
"CHROMEDRIVER_ELEMENT_CENTER_PATCH_FLAG",
",",
"None",
")",
":",
"self",
".",
"move_to_element",
"=",
"types",
".",
"MethodType",
"(",
"move_to_element",
",",
"self",
")",
"ActionChains",
".",
"__init__",
"=",
"init",
"# Mark ActionChains as patched!",
"setattr",
"(",
"ActionChains",
",",
"patch_name",
",",
"True",
")"
] | Patch move_to_element on ActionChains to work around a bug present
in Chromedriver 2.14 to 2.20.
Calling this function multiple times in the same process will
install the patch once, and just once. | [
"Patch",
"move_to_element",
"on",
"ActionChains",
"to",
"work",
"around",
"a",
"bug",
"present",
"in",
"Chromedriver",
"2",
".",
"14",
"to",
"2",
".",
"20",
"."
] | python | train |
i3visio/osrframework | osrframework/checkfy.py | https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/checkfy.py#L39-L63 | def createEmails(nicks=None, nicksFile=None):
"""
Method that globally permits to generate the emails to be checked.
Args:
-----
nicks: List of aliases.
nicksFile: The filepath to the aliases file.
Returns:
--------
list: list of emails to be checked.
"""
candidate_emails = set()
if nicks != None:
for n in nicks:
for e in email_providers.domains:
candidate_emails.add("{}@{}".format(n, e))
elif nicksFile != None:
with open(nicksFile, "r") as iF:
nicks = iF.read().splitlines()
for n in nicks:
for e in email_providers.domains:
candidate_emails.add("{}@{}".format(n, e))
return candidate_emails | [
"def",
"createEmails",
"(",
"nicks",
"=",
"None",
",",
"nicksFile",
"=",
"None",
")",
":",
"candidate_emails",
"=",
"set",
"(",
")",
"if",
"nicks",
"!=",
"None",
":",
"for",
"n",
"in",
"nicks",
":",
"for",
"e",
"in",
"email_providers",
".",
"domains",
":",
"candidate_emails",
".",
"add",
"(",
"\"{}@{}\"",
".",
"format",
"(",
"n",
",",
"e",
")",
")",
"elif",
"nicksFile",
"!=",
"None",
":",
"with",
"open",
"(",
"nicksFile",
",",
"\"r\"",
")",
"as",
"iF",
":",
"nicks",
"=",
"iF",
".",
"read",
"(",
")",
".",
"splitlines",
"(",
")",
"for",
"n",
"in",
"nicks",
":",
"for",
"e",
"in",
"email_providers",
".",
"domains",
":",
"candidate_emails",
".",
"add",
"(",
"\"{}@{}\"",
".",
"format",
"(",
"n",
",",
"e",
")",
")",
"return",
"candidate_emails"
] | Method that globally permits to generate the emails to be checked.
Args:
-----
nicks: List of aliases.
nicksFile: The filepath to the aliases file.
Returns:
--------
list: list of emails to be checked. | [
"Method",
"that",
"globally",
"permits",
"to",
"generate",
"the",
"emails",
"to",
"be",
"checked",
"."
] | python | train |
ekmmetering/ekmmeters | ekmmeters.py | https://github.com/ekmmetering/ekmmeters/blob/b3748bdf30263bfa46ea40157bdf8df2522e1904/ekmmeters.py#L1574-L1609 | def setMaxDemandPeriod(self, period, password="00000000"):
""" Serial call to set max demand period.
Args:
period (int): : as int.
password (str): Optional password.
Returns:
bool: True on completion with ACK.
"""
result = False
self.setContext("setMaxDemandPeriod")
try:
if period < 1 or period > 3:
self.writeCmdMsg("Correct parameter: 1 = 15 minute, 2 = 30 minute, 3 = hour")
self.setContext("")
return result
if not self.request(False):
self.writeCmdMsg("Bad read CRC on setting")
else:
if not self.serialCmdPwdAuth(password):
self.writeCmdMsg("Password failure")
else:
req_str = "015731023030353028" + binascii.hexlify(str(period)).zfill(2) + "2903"
req_str += self.calc_crc16(req_str[2:].decode("hex"))
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
self.writeCmdMsg("Success(setMaxDemandPeriod): 06 returned.")
result = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return result | [
"def",
"setMaxDemandPeriod",
"(",
"self",
",",
"period",
",",
"password",
"=",
"\"00000000\"",
")",
":",
"result",
"=",
"False",
"self",
".",
"setContext",
"(",
"\"setMaxDemandPeriod\"",
")",
"try",
":",
"if",
"period",
"<",
"1",
"or",
"period",
">",
"3",
":",
"self",
".",
"writeCmdMsg",
"(",
"\"Correct parameter: 1 = 15 minute, 2 = 30 minute, 3 = hour\"",
")",
"self",
".",
"setContext",
"(",
"\"\"",
")",
"return",
"result",
"if",
"not",
"self",
".",
"request",
"(",
"False",
")",
":",
"self",
".",
"writeCmdMsg",
"(",
"\"Bad read CRC on setting\"",
")",
"else",
":",
"if",
"not",
"self",
".",
"serialCmdPwdAuth",
"(",
"password",
")",
":",
"self",
".",
"writeCmdMsg",
"(",
"\"Password failure\"",
")",
"else",
":",
"req_str",
"=",
"\"015731023030353028\"",
"+",
"binascii",
".",
"hexlify",
"(",
"str",
"(",
"period",
")",
")",
".",
"zfill",
"(",
"2",
")",
"+",
"\"2903\"",
"req_str",
"+=",
"self",
".",
"calc_crc16",
"(",
"req_str",
"[",
"2",
":",
"]",
".",
"decode",
"(",
"\"hex\"",
")",
")",
"self",
".",
"m_serial_port",
".",
"write",
"(",
"req_str",
".",
"decode",
"(",
"\"hex\"",
")",
")",
"if",
"self",
".",
"m_serial_port",
".",
"getResponse",
"(",
"self",
".",
"getContext",
"(",
")",
")",
".",
"encode",
"(",
"\"hex\"",
")",
"==",
"\"06\"",
":",
"self",
".",
"writeCmdMsg",
"(",
"\"Success(setMaxDemandPeriod): 06 returned.\"",
")",
"result",
"=",
"True",
"self",
".",
"serialPostEnd",
"(",
")",
"except",
":",
"ekm_log",
"(",
"traceback",
".",
"format_exc",
"(",
"sys",
".",
"exc_info",
"(",
")",
")",
")",
"self",
".",
"setContext",
"(",
"\"\"",
")",
"return",
"result"
] | Serial call to set max demand period.
Args:
period (int): : as int.
password (str): Optional password.
Returns:
bool: True on completion with ACK. | [
"Serial",
"call",
"to",
"set",
"max",
"demand",
"period",
"."
] | python | test |
Karaage-Cluster/karaage | karaage/people/emails.py | https://github.com/Karaage-Cluster/karaage/blob/2f4c8b4e2d728b3fcbb151160c49000f1c04f5c9/karaage/people/emails.py#L67-L83 | def send_reset_password_email(person):
"""Sends an email to user allowing them to set their password."""
uid = urlsafe_base64_encode(force_bytes(person.pk)).decode("ascii")
token = default_token_generator.make_token(person)
url = '%s/persons/reset/%s/%s/' % (
settings.REGISTRATION_BASE_URL, uid, token)
context = CONTEXT.copy()
context.update({
'url': url,
'receiver': person,
})
to_email = person.email
subject, body = render_email('reset_password', context)
send_mail(subject, body, settings.ACCOUNTS_EMAIL, [to_email]) | [
"def",
"send_reset_password_email",
"(",
"person",
")",
":",
"uid",
"=",
"urlsafe_base64_encode",
"(",
"force_bytes",
"(",
"person",
".",
"pk",
")",
")",
".",
"decode",
"(",
"\"ascii\"",
")",
"token",
"=",
"default_token_generator",
".",
"make_token",
"(",
"person",
")",
"url",
"=",
"'%s/persons/reset/%s/%s/'",
"%",
"(",
"settings",
".",
"REGISTRATION_BASE_URL",
",",
"uid",
",",
"token",
")",
"context",
"=",
"CONTEXT",
".",
"copy",
"(",
")",
"context",
".",
"update",
"(",
"{",
"'url'",
":",
"url",
",",
"'receiver'",
":",
"person",
",",
"}",
")",
"to_email",
"=",
"person",
".",
"email",
"subject",
",",
"body",
"=",
"render_email",
"(",
"'reset_password'",
",",
"context",
")",
"send_mail",
"(",
"subject",
",",
"body",
",",
"settings",
".",
"ACCOUNTS_EMAIL",
",",
"[",
"to_email",
"]",
")"
] | Sends an email to user allowing them to set their password. | [
"Sends",
"an",
"email",
"to",
"user",
"allowing",
"them",
"to",
"set",
"their",
"password",
"."
] | python | train |
juju/charm-helpers | charmhelpers/contrib/storage/linux/ceph.py | https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/storage/linux/ceph.py#L392-L413 | def get_mon_map(service):
"""
Returns the current monitor map.
:param service: six.string_types. The Ceph user name to run the command under
:return: json string. :raise: ValueError if the monmap fails to parse.
Also raises CalledProcessError if our ceph command fails
"""
try:
mon_status = check_output(['ceph', '--id', service,
'mon_status', '--format=json'])
if six.PY3:
mon_status = mon_status.decode('UTF-8')
try:
return json.loads(mon_status)
except ValueError as v:
log("Unable to parse mon_status json: {}. Error: {}"
.format(mon_status, str(v)))
raise
except CalledProcessError as e:
log("mon_status command failed with message: {}"
.format(str(e)))
raise | [
"def",
"get_mon_map",
"(",
"service",
")",
":",
"try",
":",
"mon_status",
"=",
"check_output",
"(",
"[",
"'ceph'",
",",
"'--id'",
",",
"service",
",",
"'mon_status'",
",",
"'--format=json'",
"]",
")",
"if",
"six",
".",
"PY3",
":",
"mon_status",
"=",
"mon_status",
".",
"decode",
"(",
"'UTF-8'",
")",
"try",
":",
"return",
"json",
".",
"loads",
"(",
"mon_status",
")",
"except",
"ValueError",
"as",
"v",
":",
"log",
"(",
"\"Unable to parse mon_status json: {}. Error: {}\"",
".",
"format",
"(",
"mon_status",
",",
"str",
"(",
"v",
")",
")",
")",
"raise",
"except",
"CalledProcessError",
"as",
"e",
":",
"log",
"(",
"\"mon_status command failed with message: {}\"",
".",
"format",
"(",
"str",
"(",
"e",
")",
")",
")",
"raise"
] | Returns the current monitor map.
:param service: six.string_types. The Ceph user name to run the command under
:return: json string. :raise: ValueError if the monmap fails to parse.
Also raises CalledProcessError if our ceph command fails | [
"Returns",
"the",
"current",
"monitor",
"map",
".",
":",
"param",
"service",
":",
"six",
".",
"string_types",
".",
"The",
"Ceph",
"user",
"name",
"to",
"run",
"the",
"command",
"under",
":",
"return",
":",
"json",
"string",
".",
":",
"raise",
":",
"ValueError",
"if",
"the",
"monmap",
"fails",
"to",
"parse",
".",
"Also",
"raises",
"CalledProcessError",
"if",
"our",
"ceph",
"command",
"fails"
] | python | train |
pypa/pipenv | pipenv/cli/command.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/cli/command.py#L593-L618 | def sync(
ctx,
state,
bare=False,
user=False,
unused=False,
**kwargs
):
"""Installs all packages specified in Pipfile.lock."""
from ..core import do_sync
retcode = do_sync(
ctx=ctx,
dev=state.installstate.dev,
three=state.three,
python=state.python,
bare=bare,
dont_upgrade=(not state.installstate.keep_outdated),
user=user,
clear=state.clear,
unused=unused,
sequential=state.installstate.sequential,
pypi_mirror=state.pypi_mirror,
)
if retcode:
ctx.abort() | [
"def",
"sync",
"(",
"ctx",
",",
"state",
",",
"bare",
"=",
"False",
",",
"user",
"=",
"False",
",",
"unused",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
".",
".",
"core",
"import",
"do_sync",
"retcode",
"=",
"do_sync",
"(",
"ctx",
"=",
"ctx",
",",
"dev",
"=",
"state",
".",
"installstate",
".",
"dev",
",",
"three",
"=",
"state",
".",
"three",
",",
"python",
"=",
"state",
".",
"python",
",",
"bare",
"=",
"bare",
",",
"dont_upgrade",
"=",
"(",
"not",
"state",
".",
"installstate",
".",
"keep_outdated",
")",
",",
"user",
"=",
"user",
",",
"clear",
"=",
"state",
".",
"clear",
",",
"unused",
"=",
"unused",
",",
"sequential",
"=",
"state",
".",
"installstate",
".",
"sequential",
",",
"pypi_mirror",
"=",
"state",
".",
"pypi_mirror",
",",
")",
"if",
"retcode",
":",
"ctx",
".",
"abort",
"(",
")"
] | Installs all packages specified in Pipfile.lock. | [
"Installs",
"all",
"packages",
"specified",
"in",
"Pipfile",
".",
"lock",
"."
] | python | train |
saltstack/salt | salt/utils/msgpack.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/msgpack.py#L76-L87 | def unpackb(packed, **kwargs):
'''
.. versionadded:: 2018.3.4
Wraps msgpack.unpack.
By default, this function uses the msgpack module and falls back to
msgpack_pure, if the msgpack is not available. You can pass an alternate
msgpack module using the _msgpack_module argument.
'''
msgpack_module = kwargs.pop('_msgpack_module', msgpack)
return msgpack_module.unpackb(packed, **kwargs) | [
"def",
"unpackb",
"(",
"packed",
",",
"*",
"*",
"kwargs",
")",
":",
"msgpack_module",
"=",
"kwargs",
".",
"pop",
"(",
"'_msgpack_module'",
",",
"msgpack",
")",
"return",
"msgpack_module",
".",
"unpackb",
"(",
"packed",
",",
"*",
"*",
"kwargs",
")"
] | .. versionadded:: 2018.3.4
Wraps msgpack.unpack.
By default, this function uses the msgpack module and falls back to
msgpack_pure, if the msgpack is not available. You can pass an alternate
msgpack module using the _msgpack_module argument. | [
"..",
"versionadded",
"::",
"2018",
".",
"3",
".",
"4"
] | python | train |
marcomusy/vtkplotter | vtkplotter/shapes.py | https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/shapes.py#L588-L632 | def Polygon(pos=(0, 0, 0), normal=(0, 0, 1), nsides=6, r=1, c="coral",
bc="darkgreen", lw=1, alpha=1, followcam=False):
"""
Build a 2D polygon of `nsides` of radius `r` oriented as `normal`.
:param followcam: if `True` the text will auto-orient itself to the active camera.
A ``vtkCamera`` object can also be passed.
:type followcam: bool, vtkCamera
|Polygon|
"""
ps = vtk.vtkRegularPolygonSource()
ps.SetNumberOfSides(nsides)
ps.SetRadius(r)
ps.SetNormal(-np.array(normal))
ps.Update()
tf = vtk.vtkTriangleFilter()
tf.SetInputConnection(ps.GetOutputPort())
tf.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(tf.GetOutputPort())
if followcam:
actor = vtk.vtkFollower()
if isinstance(followcam, vtk.vtkCamera):
actor.SetCamera(followcam)
else:
actor.SetCamera(settings.plotter_instance.camera)
else:
actor = Actor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(colors.getColor(c))
actor.GetProperty().SetOpacity(alpha)
actor.GetProperty().SetLineWidth(lw)
actor.GetProperty().SetInterpolationToFlat()
if bc: # defines a specific color for the backface
backProp = vtk.vtkProperty()
backProp.SetDiffuseColor(colors.getColor(bc))
backProp.SetOpacity(alpha)
actor.SetBackfaceProperty(backProp)
actor.SetPosition(pos)
settings.collectable_actors.append(actor)
return actor | [
"def",
"Polygon",
"(",
"pos",
"=",
"(",
"0",
",",
"0",
",",
"0",
")",
",",
"normal",
"=",
"(",
"0",
",",
"0",
",",
"1",
")",
",",
"nsides",
"=",
"6",
",",
"r",
"=",
"1",
",",
"c",
"=",
"\"coral\"",
",",
"bc",
"=",
"\"darkgreen\"",
",",
"lw",
"=",
"1",
",",
"alpha",
"=",
"1",
",",
"followcam",
"=",
"False",
")",
":",
"ps",
"=",
"vtk",
".",
"vtkRegularPolygonSource",
"(",
")",
"ps",
".",
"SetNumberOfSides",
"(",
"nsides",
")",
"ps",
".",
"SetRadius",
"(",
"r",
")",
"ps",
".",
"SetNormal",
"(",
"-",
"np",
".",
"array",
"(",
"normal",
")",
")",
"ps",
".",
"Update",
"(",
")",
"tf",
"=",
"vtk",
".",
"vtkTriangleFilter",
"(",
")",
"tf",
".",
"SetInputConnection",
"(",
"ps",
".",
"GetOutputPort",
"(",
")",
")",
"tf",
".",
"Update",
"(",
")",
"mapper",
"=",
"vtk",
".",
"vtkPolyDataMapper",
"(",
")",
"mapper",
".",
"SetInputConnection",
"(",
"tf",
".",
"GetOutputPort",
"(",
")",
")",
"if",
"followcam",
":",
"actor",
"=",
"vtk",
".",
"vtkFollower",
"(",
")",
"if",
"isinstance",
"(",
"followcam",
",",
"vtk",
".",
"vtkCamera",
")",
":",
"actor",
".",
"SetCamera",
"(",
"followcam",
")",
"else",
":",
"actor",
".",
"SetCamera",
"(",
"settings",
".",
"plotter_instance",
".",
"camera",
")",
"else",
":",
"actor",
"=",
"Actor",
"(",
")",
"actor",
".",
"SetMapper",
"(",
"mapper",
")",
"actor",
".",
"GetProperty",
"(",
")",
".",
"SetColor",
"(",
"colors",
".",
"getColor",
"(",
"c",
")",
")",
"actor",
".",
"GetProperty",
"(",
")",
".",
"SetOpacity",
"(",
"alpha",
")",
"actor",
".",
"GetProperty",
"(",
")",
".",
"SetLineWidth",
"(",
"lw",
")",
"actor",
".",
"GetProperty",
"(",
")",
".",
"SetInterpolationToFlat",
"(",
")",
"if",
"bc",
":",
"# defines a specific color for the backface",
"backProp",
"=",
"vtk",
".",
"vtkProperty",
"(",
")",
"backProp",
".",
"SetDiffuseColor",
"(",
"colors",
".",
"getColor",
"(",
"bc",
")",
")",
"backProp",
".",
"SetOpacity",
"(",
"alpha",
")",
"actor",
".",
"SetBackfaceProperty",
"(",
"backProp",
")",
"actor",
".",
"SetPosition",
"(",
"pos",
")",
"settings",
".",
"collectable_actors",
".",
"append",
"(",
"actor",
")",
"return",
"actor"
] | Build a 2D polygon of `nsides` of radius `r` oriented as `normal`.
:param followcam: if `True` the text will auto-orient itself to the active camera.
A ``vtkCamera`` object can also be passed.
:type followcam: bool, vtkCamera
|Polygon| | [
"Build",
"a",
"2D",
"polygon",
"of",
"nsides",
"of",
"radius",
"r",
"oriented",
"as",
"normal",
"."
] | python | train |
mozilla/configman | configman/value_sources/for_getopt.py | https://github.com/mozilla/configman/blob/83159fed61cc4cbbe5a4a6a00d3acad8a0c39c96/configman/value_sources/for_getopt.py#L171-L210 | def getopt_with_ignore(args, shortopts, longopts=[]):
"""my_getopt(args, options[, long_options]) -> opts, args
This function works like gnu_getopt(), except that unknown parameters
are ignored rather than raising an error.
"""
opts = []
prog_args = []
if isinstance(longopts, str):
longopts = [longopts]
else:
longopts = list(longopts)
while args:
if args[0] == '--':
prog_args += args[1:]
break
if args[0].startswith('--'):
try:
opts, args = getopt.do_longs(
opts,
args[0][2:],
longopts,
args[1:]
)
except getopt.GetoptError:
args = args[1:]
elif args[0][0] == '-':
try:
opts, args = getopt.do_shorts(
opts,
args[0][1:],
shortopts,
args[1:]
)
except getopt.GetoptError:
args = args[1:]
else:
prog_args.append(args[0])
args = args[1:]
return opts, prog_args | [
"def",
"getopt_with_ignore",
"(",
"args",
",",
"shortopts",
",",
"longopts",
"=",
"[",
"]",
")",
":",
"opts",
"=",
"[",
"]",
"prog_args",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"longopts",
",",
"str",
")",
":",
"longopts",
"=",
"[",
"longopts",
"]",
"else",
":",
"longopts",
"=",
"list",
"(",
"longopts",
")",
"while",
"args",
":",
"if",
"args",
"[",
"0",
"]",
"==",
"'--'",
":",
"prog_args",
"+=",
"args",
"[",
"1",
":",
"]",
"break",
"if",
"args",
"[",
"0",
"]",
".",
"startswith",
"(",
"'--'",
")",
":",
"try",
":",
"opts",
",",
"args",
"=",
"getopt",
".",
"do_longs",
"(",
"opts",
",",
"args",
"[",
"0",
"]",
"[",
"2",
":",
"]",
",",
"longopts",
",",
"args",
"[",
"1",
":",
"]",
")",
"except",
"getopt",
".",
"GetoptError",
":",
"args",
"=",
"args",
"[",
"1",
":",
"]",
"elif",
"args",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"'-'",
":",
"try",
":",
"opts",
",",
"args",
"=",
"getopt",
".",
"do_shorts",
"(",
"opts",
",",
"args",
"[",
"0",
"]",
"[",
"1",
":",
"]",
",",
"shortopts",
",",
"args",
"[",
"1",
":",
"]",
")",
"except",
"getopt",
".",
"GetoptError",
":",
"args",
"=",
"args",
"[",
"1",
":",
"]",
"else",
":",
"prog_args",
".",
"append",
"(",
"args",
"[",
"0",
"]",
")",
"args",
"=",
"args",
"[",
"1",
":",
"]",
"return",
"opts",
",",
"prog_args"
] | my_getopt(args, options[, long_options]) -> opts, args
This function works like gnu_getopt(), except that unknown parameters
are ignored rather than raising an error. | [
"my_getopt",
"(",
"args",
"options",
"[",
"long_options",
"]",
")",
"-",
">",
"opts",
"args"
] | python | train |
dpkp/kafka-python | kafka/admin/client.py | https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/admin/client.py#L646-L693 | def list_consumer_groups(self, broker_ids=None):
"""List all consumer groups known to the cluster.
This returns a list of Consumer Group tuples. The tuples are
composed of the consumer group name and the consumer group protocol
type.
Only consumer groups that store their offsets in Kafka are returned.
The protocol type will be an empty string for groups created using
Kafka < 0.9 APIs because, although they store their offsets in Kafka,
they don't use Kafka for group coordination. For groups created using
Kafka >= 0.9, the protocol type will typically be "consumer".
As soon as any error is encountered, it is immediately raised.
:param broker_ids: A list of broker node_ids to query for consumer
groups. If set to None, will query all brokers in the cluster.
Explicitly specifying broker(s) can be useful for determining which
consumer groups are coordinated by those broker(s). Default: None
:return list: List of tuples of Consumer Groups.
:exception GroupCoordinatorNotAvailableError: The coordinator is not
available, so cannot process requests.
:exception GroupLoadInProgressError: The coordinator is loading and
hence can't process requests.
"""
# While we return a list, internally use a set to prevent duplicates
# because if a group coordinator fails after being queried, and its
# consumer groups move to new brokers that haven't yet been queried,
# then the same group could be returned by multiple brokers.
consumer_groups = set()
if broker_ids is None:
broker_ids = [broker.nodeId for broker in self._client.cluster.brokers()]
version = self._matching_api_version(ListGroupsRequest)
if version <= 2:
request = ListGroupsRequest[version]()
for broker_id in broker_ids:
response = self._send_request_to_node(broker_id, request)
error_type = Errors.for_code(response.error_code)
if error_type is not Errors.NoError:
raise error_type(
"Request '{}' failed with response '{}'."
.format(request, response))
consumer_groups.update(response.groups)
else:
raise NotImplementedError(
"Support for ListGroups v{} has not yet been added to KafkaAdminClient."
.format(version))
return list(consumer_groups) | [
"def",
"list_consumer_groups",
"(",
"self",
",",
"broker_ids",
"=",
"None",
")",
":",
"# While we return a list, internally use a set to prevent duplicates",
"# because if a group coordinator fails after being queried, and its",
"# consumer groups move to new brokers that haven't yet been queried,",
"# then the same group could be returned by multiple brokers.",
"consumer_groups",
"=",
"set",
"(",
")",
"if",
"broker_ids",
"is",
"None",
":",
"broker_ids",
"=",
"[",
"broker",
".",
"nodeId",
"for",
"broker",
"in",
"self",
".",
"_client",
".",
"cluster",
".",
"brokers",
"(",
")",
"]",
"version",
"=",
"self",
".",
"_matching_api_version",
"(",
"ListGroupsRequest",
")",
"if",
"version",
"<=",
"2",
":",
"request",
"=",
"ListGroupsRequest",
"[",
"version",
"]",
"(",
")",
"for",
"broker_id",
"in",
"broker_ids",
":",
"response",
"=",
"self",
".",
"_send_request_to_node",
"(",
"broker_id",
",",
"request",
")",
"error_type",
"=",
"Errors",
".",
"for_code",
"(",
"response",
".",
"error_code",
")",
"if",
"error_type",
"is",
"not",
"Errors",
".",
"NoError",
":",
"raise",
"error_type",
"(",
"\"Request '{}' failed with response '{}'.\"",
".",
"format",
"(",
"request",
",",
"response",
")",
")",
"consumer_groups",
".",
"update",
"(",
"response",
".",
"groups",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"Support for ListGroups v{} has not yet been added to KafkaAdminClient.\"",
".",
"format",
"(",
"version",
")",
")",
"return",
"list",
"(",
"consumer_groups",
")"
] | List all consumer groups known to the cluster.
This returns a list of Consumer Group tuples. The tuples are
composed of the consumer group name and the consumer group protocol
type.
Only consumer groups that store their offsets in Kafka are returned.
The protocol type will be an empty string for groups created using
Kafka < 0.9 APIs because, although they store their offsets in Kafka,
they don't use Kafka for group coordination. For groups created using
Kafka >= 0.9, the protocol type will typically be "consumer".
As soon as any error is encountered, it is immediately raised.
:param broker_ids: A list of broker node_ids to query for consumer
groups. If set to None, will query all brokers in the cluster.
Explicitly specifying broker(s) can be useful for determining which
consumer groups are coordinated by those broker(s). Default: None
:return list: List of tuples of Consumer Groups.
:exception GroupCoordinatorNotAvailableError: The coordinator is not
available, so cannot process requests.
:exception GroupLoadInProgressError: The coordinator is loading and
hence can't process requests. | [
"List",
"all",
"consumer",
"groups",
"known",
"to",
"the",
"cluster",
"."
] | python | train |
ianlini/bistiming | bistiming/stopwatch.py | https://github.com/ianlini/bistiming/blob/46a78ec647723c3516fc4fc73f2619ab41f647f2/bistiming/stopwatch.py#L157-L164 | def reset(self):
"""Reset the stopwatch.
"""
self._start_time = None
self._end_time = None
self._elapsed_time = datetime.timedelta()
self._cumulative_elapsed_time = datetime.timedelta()
self.split_elapsed_time = [] | [
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"_start_time",
"=",
"None",
"self",
".",
"_end_time",
"=",
"None",
"self",
".",
"_elapsed_time",
"=",
"datetime",
".",
"timedelta",
"(",
")",
"self",
".",
"_cumulative_elapsed_time",
"=",
"datetime",
".",
"timedelta",
"(",
")",
"self",
".",
"split_elapsed_time",
"=",
"[",
"]"
] | Reset the stopwatch. | [
"Reset",
"the",
"stopwatch",
"."
] | python | train |
mattupstate/flask-security | flask_security/views.py | https://github.com/mattupstate/flask-security/blob/a401fb47018fbbbe0b899ea55afadfd0e3cd847a/flask_security/views.py#L273-L303 | def reset_password(token):
"""View function that handles a reset password request."""
expired, invalid, user = reset_password_token_status(token)
if not user or invalid:
invalid = True
do_flash(*get_message('INVALID_RESET_PASSWORD_TOKEN'))
if expired:
send_reset_password_instructions(user)
do_flash(*get_message('PASSWORD_RESET_EXPIRED', email=user.email,
within=_security.reset_password_within))
if invalid or expired:
return redirect(url_for('forgot_password'))
form = _security.reset_password_form()
if form.validate_on_submit():
after_this_request(_commit)
update_password(user, form.password.data)
do_flash(*get_message('PASSWORD_RESET'))
return redirect(get_url(_security.post_reset_view) or
get_url(_security.login_url))
return _security.render_template(
config_value('RESET_PASSWORD_TEMPLATE'),
reset_password_form=form,
reset_password_token=token,
**_ctx('reset_password')
) | [
"def",
"reset_password",
"(",
"token",
")",
":",
"expired",
",",
"invalid",
",",
"user",
"=",
"reset_password_token_status",
"(",
"token",
")",
"if",
"not",
"user",
"or",
"invalid",
":",
"invalid",
"=",
"True",
"do_flash",
"(",
"*",
"get_message",
"(",
"'INVALID_RESET_PASSWORD_TOKEN'",
")",
")",
"if",
"expired",
":",
"send_reset_password_instructions",
"(",
"user",
")",
"do_flash",
"(",
"*",
"get_message",
"(",
"'PASSWORD_RESET_EXPIRED'",
",",
"email",
"=",
"user",
".",
"email",
",",
"within",
"=",
"_security",
".",
"reset_password_within",
")",
")",
"if",
"invalid",
"or",
"expired",
":",
"return",
"redirect",
"(",
"url_for",
"(",
"'forgot_password'",
")",
")",
"form",
"=",
"_security",
".",
"reset_password_form",
"(",
")",
"if",
"form",
".",
"validate_on_submit",
"(",
")",
":",
"after_this_request",
"(",
"_commit",
")",
"update_password",
"(",
"user",
",",
"form",
".",
"password",
".",
"data",
")",
"do_flash",
"(",
"*",
"get_message",
"(",
"'PASSWORD_RESET'",
")",
")",
"return",
"redirect",
"(",
"get_url",
"(",
"_security",
".",
"post_reset_view",
")",
"or",
"get_url",
"(",
"_security",
".",
"login_url",
")",
")",
"return",
"_security",
".",
"render_template",
"(",
"config_value",
"(",
"'RESET_PASSWORD_TEMPLATE'",
")",
",",
"reset_password_form",
"=",
"form",
",",
"reset_password_token",
"=",
"token",
",",
"*",
"*",
"_ctx",
"(",
"'reset_password'",
")",
")"
] | View function that handles a reset password request. | [
"View",
"function",
"that",
"handles",
"a",
"reset",
"password",
"request",
"."
] | python | train |
bmcfee/pumpp | pumpp/feature/base.py | https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/feature/base.py#L151-L167 | def n_frames(self, duration):
'''Get the number of frames for a given duration
Parameters
----------
duration : number >= 0
The duration, in seconds
Returns
-------
n_frames : int >= 0
The number of frames at this extractor's sampling rate and
hop length
'''
return int(time_to_frames(duration, sr=self.sr,
hop_length=self.hop_length)) | [
"def",
"n_frames",
"(",
"self",
",",
"duration",
")",
":",
"return",
"int",
"(",
"time_to_frames",
"(",
"duration",
",",
"sr",
"=",
"self",
".",
"sr",
",",
"hop_length",
"=",
"self",
".",
"hop_length",
")",
")"
] | Get the number of frames for a given duration
Parameters
----------
duration : number >= 0
The duration, in seconds
Returns
-------
n_frames : int >= 0
The number of frames at this extractor's sampling rate and
hop length | [
"Get",
"the",
"number",
"of",
"frames",
"for",
"a",
"given",
"duration"
] | python | train |
jenisys/parse_type | parse_type/parse_util.py | https://github.com/jenisys/parse_type/blob/7cad3a67a5ca725cb786da31f656fd473084289f/parse_type/parse_util.py#L105-L150 | def extract_format_spec(cls, format):
"""Pull apart the format: [[fill]align][0][width][.precision][type]"""
# -- BASED-ON: parse.extract_format()
# pylint: disable=redefined-builtin, unsubscriptable-object
if not format:
raise ValueError("INVALID-FORMAT: %s (empty-string)" % format)
orig_format = format
fill = align = None
if format[0] in cls.ALIGN_CHARS:
align = format[0]
format = format[1:]
elif len(format) > 1 and format[1] in cls.ALIGN_CHARS:
fill = format[0]
align = format[1]
format = format[2:]
zero = False
if format and format[0] == '0':
zero = True
format = format[1:]
width = ''
while format:
if not format[0].isdigit():
break
width += format[0]
format = format[1:]
precision = None
if format.startswith('.'):
# Precision isn't needed but we need to capture it so that
# the ValueError isn't raised.
format = format[1:] # drop the '.'
precision = ''
while format:
if not format[0].isdigit():
break
precision += format[0]
format = format[1:]
# the rest is the type, if present
type = format
if not type:
raise ValueError("INVALID-FORMAT: %s (without type)" % orig_format)
return FormatSpec(type, width, zero, align, fill, precision) | [
"def",
"extract_format_spec",
"(",
"cls",
",",
"format",
")",
":",
"# -- BASED-ON: parse.extract_format()",
"# pylint: disable=redefined-builtin, unsubscriptable-object",
"if",
"not",
"format",
":",
"raise",
"ValueError",
"(",
"\"INVALID-FORMAT: %s (empty-string)\"",
"%",
"format",
")",
"orig_format",
"=",
"format",
"fill",
"=",
"align",
"=",
"None",
"if",
"format",
"[",
"0",
"]",
"in",
"cls",
".",
"ALIGN_CHARS",
":",
"align",
"=",
"format",
"[",
"0",
"]",
"format",
"=",
"format",
"[",
"1",
":",
"]",
"elif",
"len",
"(",
"format",
")",
">",
"1",
"and",
"format",
"[",
"1",
"]",
"in",
"cls",
".",
"ALIGN_CHARS",
":",
"fill",
"=",
"format",
"[",
"0",
"]",
"align",
"=",
"format",
"[",
"1",
"]",
"format",
"=",
"format",
"[",
"2",
":",
"]",
"zero",
"=",
"False",
"if",
"format",
"and",
"format",
"[",
"0",
"]",
"==",
"'0'",
":",
"zero",
"=",
"True",
"format",
"=",
"format",
"[",
"1",
":",
"]",
"width",
"=",
"''",
"while",
"format",
":",
"if",
"not",
"format",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
":",
"break",
"width",
"+=",
"format",
"[",
"0",
"]",
"format",
"=",
"format",
"[",
"1",
":",
"]",
"precision",
"=",
"None",
"if",
"format",
".",
"startswith",
"(",
"'.'",
")",
":",
"# Precision isn't needed but we need to capture it so that",
"# the ValueError isn't raised.",
"format",
"=",
"format",
"[",
"1",
":",
"]",
"# drop the '.'",
"precision",
"=",
"''",
"while",
"format",
":",
"if",
"not",
"format",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
":",
"break",
"precision",
"+=",
"format",
"[",
"0",
"]",
"format",
"=",
"format",
"[",
"1",
":",
"]",
"# the rest is the type, if present",
"type",
"=",
"format",
"if",
"not",
"type",
":",
"raise",
"ValueError",
"(",
"\"INVALID-FORMAT: %s (without type)\"",
"%",
"orig_format",
")",
"return",
"FormatSpec",
"(",
"type",
",",
"width",
",",
"zero",
",",
"align",
",",
"fill",
",",
"precision",
")"
] | Pull apart the format: [[fill]align][0][width][.precision][type] | [
"Pull",
"apart",
"the",
"format",
":",
"[[",
"fill",
"]",
"align",
"]",
"[",
"0",
"]",
"[",
"width",
"]",
"[",
".",
"precision",
"]",
"[",
"type",
"]"
] | python | train |
tensorflow/cleverhans | cleverhans/attacks/carlini_wagner_l2.py | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/carlini_wagner_l2.py#L293-L415 | def attack_batch(self, imgs, labs):
"""
Run the attack on a batch of instance and labels.
"""
def compare(x, y):
if not isinstance(x, (float, int, np.int64)):
x = np.copy(x)
if self.TARGETED:
x[y] -= self.CONFIDENCE
else:
x[y] += self.CONFIDENCE
x = np.argmax(x)
if self.TARGETED:
return x == y
else:
return x != y
batch_size = self.batch_size
oimgs = np.clip(imgs, self.clip_min, self.clip_max)
# re-scale instances to be within range [0, 1]
imgs = (imgs - self.clip_min) / (self.clip_max - self.clip_min)
imgs = np.clip(imgs, 0, 1)
# now convert to [-1, 1]
imgs = (imgs * 2) - 1
# convert to tanh-space
imgs = np.arctanh(imgs * .999999)
# set the lower and upper bounds accordingly
lower_bound = np.zeros(batch_size)
CONST = np.ones(batch_size) * self.initial_const
upper_bound = np.ones(batch_size) * 1e10
# placeholders for the best l2, score, and instance attack found so far
o_bestl2 = [1e10] * batch_size
o_bestscore = [-1] * batch_size
o_bestattack = np.copy(oimgs)
for outer_step in range(self.BINARY_SEARCH_STEPS):
# completely reset adam's internal state.
self.sess.run(self.init)
batch = imgs[:batch_size]
batchlab = labs[:batch_size]
bestl2 = [1e10] * batch_size
bestscore = [-1] * batch_size
_logger.debug(" Binary search step %s of %s",
outer_step, self.BINARY_SEARCH_STEPS)
# The last iteration (if we run many steps) repeat the search once.
if self.repeat and outer_step == self.BINARY_SEARCH_STEPS - 1:
CONST = upper_bound
# set the variables so that we don't have to send them over again
self.sess.run(
self.setup, {
self.assign_timg: batch,
self.assign_tlab: batchlab,
self.assign_const: CONST
})
prev = 1e6
for iteration in range(self.MAX_ITERATIONS):
# perform the attack
_, l, l2s, scores, nimg = self.sess.run([
self.train, self.loss, self.l2dist, self.output,
self.newimg
])
if iteration % ((self.MAX_ITERATIONS // 10) or 1) == 0:
_logger.debug((" Iteration {} of {}: loss={:.3g} " +
"l2={:.3g} f={:.3g}").format(
iteration, self.MAX_ITERATIONS, l,
np.mean(l2s), np.mean(scores)))
# check if we should abort search if we're getting nowhere.
if self.ABORT_EARLY and \
iteration % ((self.MAX_ITERATIONS // 10) or 1) == 0:
if l > prev * .9999:
msg = " Failed to make progress; stop early"
_logger.debug(msg)
break
prev = l
# adjust the best result found so far
for e, (l2, sc, ii) in enumerate(zip(l2s, scores, nimg)):
lab = np.argmax(batchlab[e])
if l2 < bestl2[e] and compare(sc, lab):
bestl2[e] = l2
bestscore[e] = np.argmax(sc)
if l2 < o_bestl2[e] and compare(sc, lab):
o_bestl2[e] = l2
o_bestscore[e] = np.argmax(sc)
o_bestattack[e] = ii
# adjust the constant as needed
for e in range(batch_size):
if compare(bestscore[e], np.argmax(batchlab[e])) and \
bestscore[e] != -1:
# success, divide const by two
upper_bound[e] = min(upper_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
else:
# failure, either multiply by 10 if no solution found yet
# or do binary search with the known upper bound
lower_bound[e] = max(lower_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
else:
CONST[e] *= 10
_logger.debug(" Successfully generated adversarial examples " +
"on {} of {} instances.".format(
sum(upper_bound < 1e9), batch_size))
o_bestl2 = np.array(o_bestl2)
mean = np.mean(np.sqrt(o_bestl2[o_bestl2 < 1e9]))
_logger.debug(" Mean successful distortion: {:.4g}".format(mean))
# return the best solution found
o_bestl2 = np.array(o_bestl2)
return o_bestattack | [
"def",
"attack_batch",
"(",
"self",
",",
"imgs",
",",
"labs",
")",
":",
"def",
"compare",
"(",
"x",
",",
"y",
")",
":",
"if",
"not",
"isinstance",
"(",
"x",
",",
"(",
"float",
",",
"int",
",",
"np",
".",
"int64",
")",
")",
":",
"x",
"=",
"np",
".",
"copy",
"(",
"x",
")",
"if",
"self",
".",
"TARGETED",
":",
"x",
"[",
"y",
"]",
"-=",
"self",
".",
"CONFIDENCE",
"else",
":",
"x",
"[",
"y",
"]",
"+=",
"self",
".",
"CONFIDENCE",
"x",
"=",
"np",
".",
"argmax",
"(",
"x",
")",
"if",
"self",
".",
"TARGETED",
":",
"return",
"x",
"==",
"y",
"else",
":",
"return",
"x",
"!=",
"y",
"batch_size",
"=",
"self",
".",
"batch_size",
"oimgs",
"=",
"np",
".",
"clip",
"(",
"imgs",
",",
"self",
".",
"clip_min",
",",
"self",
".",
"clip_max",
")",
"# re-scale instances to be within range [0, 1]",
"imgs",
"=",
"(",
"imgs",
"-",
"self",
".",
"clip_min",
")",
"/",
"(",
"self",
".",
"clip_max",
"-",
"self",
".",
"clip_min",
")",
"imgs",
"=",
"np",
".",
"clip",
"(",
"imgs",
",",
"0",
",",
"1",
")",
"# now convert to [-1, 1]",
"imgs",
"=",
"(",
"imgs",
"*",
"2",
")",
"-",
"1",
"# convert to tanh-space",
"imgs",
"=",
"np",
".",
"arctanh",
"(",
"imgs",
"*",
".999999",
")",
"# set the lower and upper bounds accordingly",
"lower_bound",
"=",
"np",
".",
"zeros",
"(",
"batch_size",
")",
"CONST",
"=",
"np",
".",
"ones",
"(",
"batch_size",
")",
"*",
"self",
".",
"initial_const",
"upper_bound",
"=",
"np",
".",
"ones",
"(",
"batch_size",
")",
"*",
"1e10",
"# placeholders for the best l2, score, and instance attack found so far",
"o_bestl2",
"=",
"[",
"1e10",
"]",
"*",
"batch_size",
"o_bestscore",
"=",
"[",
"-",
"1",
"]",
"*",
"batch_size",
"o_bestattack",
"=",
"np",
".",
"copy",
"(",
"oimgs",
")",
"for",
"outer_step",
"in",
"range",
"(",
"self",
".",
"BINARY_SEARCH_STEPS",
")",
":",
"# completely reset adam's internal state.",
"self",
".",
"sess",
".",
"run",
"(",
"self",
".",
"init",
")",
"batch",
"=",
"imgs",
"[",
":",
"batch_size",
"]",
"batchlab",
"=",
"labs",
"[",
":",
"batch_size",
"]",
"bestl2",
"=",
"[",
"1e10",
"]",
"*",
"batch_size",
"bestscore",
"=",
"[",
"-",
"1",
"]",
"*",
"batch_size",
"_logger",
".",
"debug",
"(",
"\" Binary search step %s of %s\"",
",",
"outer_step",
",",
"self",
".",
"BINARY_SEARCH_STEPS",
")",
"# The last iteration (if we run many steps) repeat the search once.",
"if",
"self",
".",
"repeat",
"and",
"outer_step",
"==",
"self",
".",
"BINARY_SEARCH_STEPS",
"-",
"1",
":",
"CONST",
"=",
"upper_bound",
"# set the variables so that we don't have to send them over again",
"self",
".",
"sess",
".",
"run",
"(",
"self",
".",
"setup",
",",
"{",
"self",
".",
"assign_timg",
":",
"batch",
",",
"self",
".",
"assign_tlab",
":",
"batchlab",
",",
"self",
".",
"assign_const",
":",
"CONST",
"}",
")",
"prev",
"=",
"1e6",
"for",
"iteration",
"in",
"range",
"(",
"self",
".",
"MAX_ITERATIONS",
")",
":",
"# perform the attack",
"_",
",",
"l",
",",
"l2s",
",",
"scores",
",",
"nimg",
"=",
"self",
".",
"sess",
".",
"run",
"(",
"[",
"self",
".",
"train",
",",
"self",
".",
"loss",
",",
"self",
".",
"l2dist",
",",
"self",
".",
"output",
",",
"self",
".",
"newimg",
"]",
")",
"if",
"iteration",
"%",
"(",
"(",
"self",
".",
"MAX_ITERATIONS",
"//",
"10",
")",
"or",
"1",
")",
"==",
"0",
":",
"_logger",
".",
"debug",
"(",
"(",
"\" Iteration {} of {}: loss={:.3g} \"",
"+",
"\"l2={:.3g} f={:.3g}\"",
")",
".",
"format",
"(",
"iteration",
",",
"self",
".",
"MAX_ITERATIONS",
",",
"l",
",",
"np",
".",
"mean",
"(",
"l2s",
")",
",",
"np",
".",
"mean",
"(",
"scores",
")",
")",
")",
"# check if we should abort search if we're getting nowhere.",
"if",
"self",
".",
"ABORT_EARLY",
"and",
"iteration",
"%",
"(",
"(",
"self",
".",
"MAX_ITERATIONS",
"//",
"10",
")",
"or",
"1",
")",
"==",
"0",
":",
"if",
"l",
">",
"prev",
"*",
".9999",
":",
"msg",
"=",
"\" Failed to make progress; stop early\"",
"_logger",
".",
"debug",
"(",
"msg",
")",
"break",
"prev",
"=",
"l",
"# adjust the best result found so far",
"for",
"e",
",",
"(",
"l2",
",",
"sc",
",",
"ii",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"l2s",
",",
"scores",
",",
"nimg",
")",
")",
":",
"lab",
"=",
"np",
".",
"argmax",
"(",
"batchlab",
"[",
"e",
"]",
")",
"if",
"l2",
"<",
"bestl2",
"[",
"e",
"]",
"and",
"compare",
"(",
"sc",
",",
"lab",
")",
":",
"bestl2",
"[",
"e",
"]",
"=",
"l2",
"bestscore",
"[",
"e",
"]",
"=",
"np",
".",
"argmax",
"(",
"sc",
")",
"if",
"l2",
"<",
"o_bestl2",
"[",
"e",
"]",
"and",
"compare",
"(",
"sc",
",",
"lab",
")",
":",
"o_bestl2",
"[",
"e",
"]",
"=",
"l2",
"o_bestscore",
"[",
"e",
"]",
"=",
"np",
".",
"argmax",
"(",
"sc",
")",
"o_bestattack",
"[",
"e",
"]",
"=",
"ii",
"# adjust the constant as needed",
"for",
"e",
"in",
"range",
"(",
"batch_size",
")",
":",
"if",
"compare",
"(",
"bestscore",
"[",
"e",
"]",
",",
"np",
".",
"argmax",
"(",
"batchlab",
"[",
"e",
"]",
")",
")",
"and",
"bestscore",
"[",
"e",
"]",
"!=",
"-",
"1",
":",
"# success, divide const by two",
"upper_bound",
"[",
"e",
"]",
"=",
"min",
"(",
"upper_bound",
"[",
"e",
"]",
",",
"CONST",
"[",
"e",
"]",
")",
"if",
"upper_bound",
"[",
"e",
"]",
"<",
"1e9",
":",
"CONST",
"[",
"e",
"]",
"=",
"(",
"lower_bound",
"[",
"e",
"]",
"+",
"upper_bound",
"[",
"e",
"]",
")",
"/",
"2",
"else",
":",
"# failure, either multiply by 10 if no solution found yet",
"# or do binary search with the known upper bound",
"lower_bound",
"[",
"e",
"]",
"=",
"max",
"(",
"lower_bound",
"[",
"e",
"]",
",",
"CONST",
"[",
"e",
"]",
")",
"if",
"upper_bound",
"[",
"e",
"]",
"<",
"1e9",
":",
"CONST",
"[",
"e",
"]",
"=",
"(",
"lower_bound",
"[",
"e",
"]",
"+",
"upper_bound",
"[",
"e",
"]",
")",
"/",
"2",
"else",
":",
"CONST",
"[",
"e",
"]",
"*=",
"10",
"_logger",
".",
"debug",
"(",
"\" Successfully generated adversarial examples \"",
"+",
"\"on {} of {} instances.\"",
".",
"format",
"(",
"sum",
"(",
"upper_bound",
"<",
"1e9",
")",
",",
"batch_size",
")",
")",
"o_bestl2",
"=",
"np",
".",
"array",
"(",
"o_bestl2",
")",
"mean",
"=",
"np",
".",
"mean",
"(",
"np",
".",
"sqrt",
"(",
"o_bestl2",
"[",
"o_bestl2",
"<",
"1e9",
"]",
")",
")",
"_logger",
".",
"debug",
"(",
"\" Mean successful distortion: {:.4g}\"",
".",
"format",
"(",
"mean",
")",
")",
"# return the best solution found",
"o_bestl2",
"=",
"np",
".",
"array",
"(",
"o_bestl2",
")",
"return",
"o_bestattack"
] | Run the attack on a batch of instance and labels. | [
"Run",
"the",
"attack",
"on",
"a",
"batch",
"of",
"instance",
"and",
"labels",
"."
] | python | train |
jschaf/pylint-flask | pylint_flask/__init__.py | https://github.com/jschaf/pylint-flask/blob/3851d142679facbc60b4755dc7fb5428aafdebe7/pylint_flask/__init__.py#L112-L132 | def transform_flask_bare_import(node):
'''Translates a flask.ext.wtf bare import into a non-magical import.
Translates:
import flask.ext.admin as admin
Into:
import flask_admin as admin
'''
new_names = []
for (name, as_name) in node.names:
match = re.match(r'flask\.ext\.(.*)', name)
from_name = match.group(1)
actual_module_name = 'flask_{}'.format(from_name)
new_names.append((actual_module_name, as_name))
new_node = nodes.Import()
copy_node_info(node, new_node)
new_node.names = new_names
mark_transformed(new_node)
return new_node | [
"def",
"transform_flask_bare_import",
"(",
"node",
")",
":",
"new_names",
"=",
"[",
"]",
"for",
"(",
"name",
",",
"as_name",
")",
"in",
"node",
".",
"names",
":",
"match",
"=",
"re",
".",
"match",
"(",
"r'flask\\.ext\\.(.*)'",
",",
"name",
")",
"from_name",
"=",
"match",
".",
"group",
"(",
"1",
")",
"actual_module_name",
"=",
"'flask_{}'",
".",
"format",
"(",
"from_name",
")",
"new_names",
".",
"append",
"(",
"(",
"actual_module_name",
",",
"as_name",
")",
")",
"new_node",
"=",
"nodes",
".",
"Import",
"(",
")",
"copy_node_info",
"(",
"node",
",",
"new_node",
")",
"new_node",
".",
"names",
"=",
"new_names",
"mark_transformed",
"(",
"new_node",
")",
"return",
"new_node"
] | Translates a flask.ext.wtf bare import into a non-magical import.
Translates:
import flask.ext.admin as admin
Into:
import flask_admin as admin | [
"Translates",
"a",
"flask",
".",
"ext",
".",
"wtf",
"bare",
"import",
"into",
"a",
"non",
"-",
"magical",
"import",
"."
] | python | train |
pantsbuild/pants | src/python/pants/goal/products.py | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/goal/products.py#L452-L472 | def get_only(self, product_type, target):
"""If there is exactly one product for the given product type and target, returns the
full filepath of said product.
Otherwise, raises a ProductError.
Useful for retrieving the filepath for the executable of a binary target.
:API: public
"""
product_mapping = self.get(product_type).get(target)
if len(product_mapping) != 1:
raise ProductError('{} directories in product mapping: requires exactly 1.'
.format(len(product_mapping)))
for _, files in product_mapping.items():
if len(files) != 1:
raise ProductError('{} files in target directory: requires exactly 1.'
.format(len(files)))
return files[0] | [
"def",
"get_only",
"(",
"self",
",",
"product_type",
",",
"target",
")",
":",
"product_mapping",
"=",
"self",
".",
"get",
"(",
"product_type",
")",
".",
"get",
"(",
"target",
")",
"if",
"len",
"(",
"product_mapping",
")",
"!=",
"1",
":",
"raise",
"ProductError",
"(",
"'{} directories in product mapping: requires exactly 1.'",
".",
"format",
"(",
"len",
"(",
"product_mapping",
")",
")",
")",
"for",
"_",
",",
"files",
"in",
"product_mapping",
".",
"items",
"(",
")",
":",
"if",
"len",
"(",
"files",
")",
"!=",
"1",
":",
"raise",
"ProductError",
"(",
"'{} files in target directory: requires exactly 1.'",
".",
"format",
"(",
"len",
"(",
"files",
")",
")",
")",
"return",
"files",
"[",
"0",
"]"
] | If there is exactly one product for the given product type and target, returns the
full filepath of said product.
Otherwise, raises a ProductError.
Useful for retrieving the filepath for the executable of a binary target.
:API: public | [
"If",
"there",
"is",
"exactly",
"one",
"product",
"for",
"the",
"given",
"product",
"type",
"and",
"target",
"returns",
"the",
"full",
"filepath",
"of",
"said",
"product",
"."
] | python | train |
Hackerfleet/hfos | hfos/ui/auth.py | https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/ui/auth.py#L145-L188 | def _handle_autologin(self, event):
"""Automatic logins for client configurations that allow it"""
self.log("Verifying automatic login request")
# TODO: Check for a common secret
# noinspection PyBroadException
try:
client_config = objectmodels['client'].find_one({
'uuid': event.requestedclientuuid
})
except Exception:
client_config = None
if client_config is None or client_config.autologin is False:
self.log("Autologin failed:", event.requestedclientuuid,
lvl=error)
self._fail(event)
return
try:
user_account = objectmodels['user'].find_one({
'uuid': client_config.owner
})
if user_account is None:
raise AuthenticationError
self.log("Autologin for", user_account.name, lvl=debug)
except Exception as e:
self.log("No user object due to error: ", e, type(e),
lvl=error)
self._fail(event)
return
if user_account.active is False:
self.log("Account deactivated.")
self._fail(event, 'Account deactivated.')
return
user_profile = self._get_profile(user_account)
self._login(event, user_account, user_profile, client_config)
self.log("Autologin successful!", lvl=warn) | [
"def",
"_handle_autologin",
"(",
"self",
",",
"event",
")",
":",
"self",
".",
"log",
"(",
"\"Verifying automatic login request\"",
")",
"# TODO: Check for a common secret",
"# noinspection PyBroadException",
"try",
":",
"client_config",
"=",
"objectmodels",
"[",
"'client'",
"]",
".",
"find_one",
"(",
"{",
"'uuid'",
":",
"event",
".",
"requestedclientuuid",
"}",
")",
"except",
"Exception",
":",
"client_config",
"=",
"None",
"if",
"client_config",
"is",
"None",
"or",
"client_config",
".",
"autologin",
"is",
"False",
":",
"self",
".",
"log",
"(",
"\"Autologin failed:\"",
",",
"event",
".",
"requestedclientuuid",
",",
"lvl",
"=",
"error",
")",
"self",
".",
"_fail",
"(",
"event",
")",
"return",
"try",
":",
"user_account",
"=",
"objectmodels",
"[",
"'user'",
"]",
".",
"find_one",
"(",
"{",
"'uuid'",
":",
"client_config",
".",
"owner",
"}",
")",
"if",
"user_account",
"is",
"None",
":",
"raise",
"AuthenticationError",
"self",
".",
"log",
"(",
"\"Autologin for\"",
",",
"user_account",
".",
"name",
",",
"lvl",
"=",
"debug",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"log",
"(",
"\"No user object due to error: \"",
",",
"e",
",",
"type",
"(",
"e",
")",
",",
"lvl",
"=",
"error",
")",
"self",
".",
"_fail",
"(",
"event",
")",
"return",
"if",
"user_account",
".",
"active",
"is",
"False",
":",
"self",
".",
"log",
"(",
"\"Account deactivated.\"",
")",
"self",
".",
"_fail",
"(",
"event",
",",
"'Account deactivated.'",
")",
"return",
"user_profile",
"=",
"self",
".",
"_get_profile",
"(",
"user_account",
")",
"self",
".",
"_login",
"(",
"event",
",",
"user_account",
",",
"user_profile",
",",
"client_config",
")",
"self",
".",
"log",
"(",
"\"Autologin successful!\"",
",",
"lvl",
"=",
"warn",
")"
] | Automatic logins for client configurations that allow it | [
"Automatic",
"logins",
"for",
"client",
"configurations",
"that",
"allow",
"it"
] | python | train |
pandas-dev/pandas | pandas/core/strings.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/strings.py#L2564-L2625 | def zfill(self, width):
"""
Pad strings in the Series/Index by prepending '0' characters.
Strings in the Series/Index are padded with '0' characters on the
left of the string to reach a total string length `width`. Strings
in the Series/Index with length greater or equal to `width` are
unchanged.
Parameters
----------
width : int
Minimum length of resulting string; strings with length less
than `width` be prepended with '0' characters.
Returns
-------
Series/Index of objects
See Also
--------
Series.str.rjust : Fills the left side of strings with an arbitrary
character.
Series.str.ljust : Fills the right side of strings with an arbitrary
character.
Series.str.pad : Fills the specified sides of strings with an arbitrary
character.
Series.str.center : Fills boths sides of strings with an arbitrary
character.
Notes
-----
Differs from :meth:`str.zfill` which has special handling
for '+'/'-' in the string.
Examples
--------
>>> s = pd.Series(['-1', '1', '1000', 10, np.nan])
>>> s
0 -1
1 1
2 1000
3 10
4 NaN
dtype: object
Note that ``10`` and ``NaN`` are not strings, therefore they are
converted to ``NaN``. The minus sign in ``'-1'`` is treated as a
regular character and the zero is added to the left of it
(:meth:`str.zfill` would have moved it to the left). ``1000``
remains unchanged as it is longer than `width`.
>>> s.str.zfill(3)
0 0-1
1 001
2 1000
3 NaN
4 NaN
dtype: object
"""
result = str_pad(self._parent, width, side='left', fillchar='0')
return self._wrap_result(result) | [
"def",
"zfill",
"(",
"self",
",",
"width",
")",
":",
"result",
"=",
"str_pad",
"(",
"self",
".",
"_parent",
",",
"width",
",",
"side",
"=",
"'left'",
",",
"fillchar",
"=",
"'0'",
")",
"return",
"self",
".",
"_wrap_result",
"(",
"result",
")"
] | Pad strings in the Series/Index by prepending '0' characters.
Strings in the Series/Index are padded with '0' characters on the
left of the string to reach a total string length `width`. Strings
in the Series/Index with length greater or equal to `width` are
unchanged.
Parameters
----------
width : int
Minimum length of resulting string; strings with length less
than `width` be prepended with '0' characters.
Returns
-------
Series/Index of objects
See Also
--------
Series.str.rjust : Fills the left side of strings with an arbitrary
character.
Series.str.ljust : Fills the right side of strings with an arbitrary
character.
Series.str.pad : Fills the specified sides of strings with an arbitrary
character.
Series.str.center : Fills boths sides of strings with an arbitrary
character.
Notes
-----
Differs from :meth:`str.zfill` which has special handling
for '+'/'-' in the string.
Examples
--------
>>> s = pd.Series(['-1', '1', '1000', 10, np.nan])
>>> s
0 -1
1 1
2 1000
3 10
4 NaN
dtype: object
Note that ``10`` and ``NaN`` are not strings, therefore they are
converted to ``NaN``. The minus sign in ``'-1'`` is treated as a
regular character and the zero is added to the left of it
(:meth:`str.zfill` would have moved it to the left). ``1000``
remains unchanged as it is longer than `width`.
>>> s.str.zfill(3)
0 0-1
1 001
2 1000
3 NaN
4 NaN
dtype: object | [
"Pad",
"strings",
"in",
"the",
"Series",
"/",
"Index",
"by",
"prepending",
"0",
"characters",
"."
] | python | train |
ungarj/mapchete | mapchete/io/raster.py | https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/raster.py#L487-L556 | def resample_from_array(
in_raster=None,
in_affine=None,
out_tile=None,
in_crs=None,
resampling="nearest",
nodataval=0
):
"""
Extract and resample from array to target tile.
Parameters
----------
in_raster : array
in_affine : ``Affine``
out_tile : ``BufferedTile``
resampling : string
one of rasterio's resampling methods (default: nearest)
nodataval : integer or float
raster nodata value (default: 0)
Returns
-------
resampled array : array
"""
# TODO rename function
if isinstance(in_raster, ma.MaskedArray):
pass
if isinstance(in_raster, np.ndarray):
in_raster = ma.MaskedArray(in_raster, mask=in_raster == nodataval)
elif isinstance(in_raster, ReferencedRaster):
in_affine = in_raster.affine
in_crs = in_raster.crs
in_raster = in_raster.data
elif isinstance(in_raster, tuple):
in_raster = ma.MaskedArray(
data=np.stack(in_raster),
mask=np.stack([
band.mask
if isinstance(band, ma.masked_array)
else np.where(band == nodataval, True, False)
for band in in_raster
]),
fill_value=nodataval
)
else:
raise TypeError("wrong input data type: %s" % type(in_raster))
if in_raster.ndim == 2:
in_raster = ma.expand_dims(in_raster, axis=0)
elif in_raster.ndim == 3:
pass
else:
raise TypeError("input array must have 2 or 3 dimensions")
if in_raster.fill_value != nodataval:
ma.set_fill_value(in_raster, nodataval)
out_shape = (in_raster.shape[0], ) + out_tile.shape
dst_data = np.empty(out_shape, in_raster.dtype)
in_raster = ma.masked_array(
data=in_raster.filled(), mask=in_raster.mask, fill_value=nodataval
)
reproject(
in_raster,
dst_data,
src_transform=in_affine,
src_crs=in_crs if in_crs else out_tile.crs,
dst_transform=out_tile.affine,
dst_crs=out_tile.crs,
resampling=Resampling[resampling]
)
return ma.MaskedArray(dst_data, mask=dst_data == nodataval) | [
"def",
"resample_from_array",
"(",
"in_raster",
"=",
"None",
",",
"in_affine",
"=",
"None",
",",
"out_tile",
"=",
"None",
",",
"in_crs",
"=",
"None",
",",
"resampling",
"=",
"\"nearest\"",
",",
"nodataval",
"=",
"0",
")",
":",
"# TODO rename function",
"if",
"isinstance",
"(",
"in_raster",
",",
"ma",
".",
"MaskedArray",
")",
":",
"pass",
"if",
"isinstance",
"(",
"in_raster",
",",
"np",
".",
"ndarray",
")",
":",
"in_raster",
"=",
"ma",
".",
"MaskedArray",
"(",
"in_raster",
",",
"mask",
"=",
"in_raster",
"==",
"nodataval",
")",
"elif",
"isinstance",
"(",
"in_raster",
",",
"ReferencedRaster",
")",
":",
"in_affine",
"=",
"in_raster",
".",
"affine",
"in_crs",
"=",
"in_raster",
".",
"crs",
"in_raster",
"=",
"in_raster",
".",
"data",
"elif",
"isinstance",
"(",
"in_raster",
",",
"tuple",
")",
":",
"in_raster",
"=",
"ma",
".",
"MaskedArray",
"(",
"data",
"=",
"np",
".",
"stack",
"(",
"in_raster",
")",
",",
"mask",
"=",
"np",
".",
"stack",
"(",
"[",
"band",
".",
"mask",
"if",
"isinstance",
"(",
"band",
",",
"ma",
".",
"masked_array",
")",
"else",
"np",
".",
"where",
"(",
"band",
"==",
"nodataval",
",",
"True",
",",
"False",
")",
"for",
"band",
"in",
"in_raster",
"]",
")",
",",
"fill_value",
"=",
"nodataval",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"wrong input data type: %s\"",
"%",
"type",
"(",
"in_raster",
")",
")",
"if",
"in_raster",
".",
"ndim",
"==",
"2",
":",
"in_raster",
"=",
"ma",
".",
"expand_dims",
"(",
"in_raster",
",",
"axis",
"=",
"0",
")",
"elif",
"in_raster",
".",
"ndim",
"==",
"3",
":",
"pass",
"else",
":",
"raise",
"TypeError",
"(",
"\"input array must have 2 or 3 dimensions\"",
")",
"if",
"in_raster",
".",
"fill_value",
"!=",
"nodataval",
":",
"ma",
".",
"set_fill_value",
"(",
"in_raster",
",",
"nodataval",
")",
"out_shape",
"=",
"(",
"in_raster",
".",
"shape",
"[",
"0",
"]",
",",
")",
"+",
"out_tile",
".",
"shape",
"dst_data",
"=",
"np",
".",
"empty",
"(",
"out_shape",
",",
"in_raster",
".",
"dtype",
")",
"in_raster",
"=",
"ma",
".",
"masked_array",
"(",
"data",
"=",
"in_raster",
".",
"filled",
"(",
")",
",",
"mask",
"=",
"in_raster",
".",
"mask",
",",
"fill_value",
"=",
"nodataval",
")",
"reproject",
"(",
"in_raster",
",",
"dst_data",
",",
"src_transform",
"=",
"in_affine",
",",
"src_crs",
"=",
"in_crs",
"if",
"in_crs",
"else",
"out_tile",
".",
"crs",
",",
"dst_transform",
"=",
"out_tile",
".",
"affine",
",",
"dst_crs",
"=",
"out_tile",
".",
"crs",
",",
"resampling",
"=",
"Resampling",
"[",
"resampling",
"]",
")",
"return",
"ma",
".",
"MaskedArray",
"(",
"dst_data",
",",
"mask",
"=",
"dst_data",
"==",
"nodataval",
")"
] | Extract and resample from array to target tile.
Parameters
----------
in_raster : array
in_affine : ``Affine``
out_tile : ``BufferedTile``
resampling : string
one of rasterio's resampling methods (default: nearest)
nodataval : integer or float
raster nodata value (default: 0)
Returns
-------
resampled array : array | [
"Extract",
"and",
"resample",
"from",
"array",
"to",
"target",
"tile",
"."
] | python | valid |
tensorpack/tensorpack | examples/FasterRCNN/data.py | https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/data.py#L30-L50 | def print_class_histogram(roidbs):
"""
Args:
roidbs (list[dict]): the same format as the output of `load_training_roidbs`.
"""
dataset = DetectionDataset()
hist_bins = np.arange(dataset.num_classes + 1)
# Histogram of ground-truth objects
gt_hist = np.zeros((dataset.num_classes,), dtype=np.int)
for entry in roidbs:
# filter crowd?
gt_inds = np.where(
(entry['class'] > 0) & (entry['is_crowd'] == 0))[0]
gt_classes = entry['class'][gt_inds]
gt_hist += np.histogram(gt_classes, bins=hist_bins)[0]
data = [[dataset.class_names[i], v] for i, v in enumerate(gt_hist)]
data.append(['total', sum(x[1] for x in data)])
# the first line is BG
table = tabulate(data[1:], headers=['class', '#box'], tablefmt='pipe')
logger.info("Ground-Truth Boxes:\n" + colored(table, 'cyan')) | [
"def",
"print_class_histogram",
"(",
"roidbs",
")",
":",
"dataset",
"=",
"DetectionDataset",
"(",
")",
"hist_bins",
"=",
"np",
".",
"arange",
"(",
"dataset",
".",
"num_classes",
"+",
"1",
")",
"# Histogram of ground-truth objects",
"gt_hist",
"=",
"np",
".",
"zeros",
"(",
"(",
"dataset",
".",
"num_classes",
",",
")",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"for",
"entry",
"in",
"roidbs",
":",
"# filter crowd?",
"gt_inds",
"=",
"np",
".",
"where",
"(",
"(",
"entry",
"[",
"'class'",
"]",
">",
"0",
")",
"&",
"(",
"entry",
"[",
"'is_crowd'",
"]",
"==",
"0",
")",
")",
"[",
"0",
"]",
"gt_classes",
"=",
"entry",
"[",
"'class'",
"]",
"[",
"gt_inds",
"]",
"gt_hist",
"+=",
"np",
".",
"histogram",
"(",
"gt_classes",
",",
"bins",
"=",
"hist_bins",
")",
"[",
"0",
"]",
"data",
"=",
"[",
"[",
"dataset",
".",
"class_names",
"[",
"i",
"]",
",",
"v",
"]",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"gt_hist",
")",
"]",
"data",
".",
"append",
"(",
"[",
"'total'",
",",
"sum",
"(",
"x",
"[",
"1",
"]",
"for",
"x",
"in",
"data",
")",
"]",
")",
"# the first line is BG",
"table",
"=",
"tabulate",
"(",
"data",
"[",
"1",
":",
"]",
",",
"headers",
"=",
"[",
"'class'",
",",
"'#box'",
"]",
",",
"tablefmt",
"=",
"'pipe'",
")",
"logger",
".",
"info",
"(",
"\"Ground-Truth Boxes:\\n\"",
"+",
"colored",
"(",
"table",
",",
"'cyan'",
")",
")"
] | Args:
roidbs (list[dict]): the same format as the output of `load_training_roidbs`. | [
"Args",
":",
"roidbs",
"(",
"list",
"[",
"dict",
"]",
")",
":",
"the",
"same",
"format",
"as",
"the",
"output",
"of",
"load_training_roidbs",
"."
] | python | train |
tensorforce/tensorforce | tensorforce/models/model.py | https://github.com/tensorforce/tensorforce/blob/520a8d992230e382f08e315ede5fc477f5e26bfb/tensorforce/models/model.py#L237-L368 | def setup(self):
"""
Sets up the TensorFlow model graph, starts the servers (distributed mode), creates summarizers
and savers, initializes (and enters) the TensorFlow session.
"""
# Create/get our graph, setup local model/global model links, set scope and device.
graph_default_context = self.setup_graph()
# Start a tf Server (in case of distributed setup). Only start once.
if self.execution_type == "distributed" and self.server is None and self.is_local_model:
self.start_server()
# build the graph
with tf.device(device_name_or_function=self.device):
with tf.variable_scope(name_or_scope=self.scope, reuse=False):
# Variables and summaries
self.variables = dict()
self.all_variables = dict()
self.registered_variables = set()
# Build the graph's placeholders, tf_functions, etc
self.setup_placeholders()
# Create model's "external" components.
# Create tensorflow functions from "tf_"-methods.
self.setup_components_and_tf_funcs()
# Create core variables (timestep, episode counters, buffers for states/actions/internals).
self.fn_initialize()
if self.summarizer_spec is not None:
with tf.name_scope(name='summarizer'):
self.summarizer = tf.contrib.summary.create_file_writer(
logdir=self.summarizer_spec['directory'],
max_queue=None,
flush_millis=(self.summarizer_spec.get('flush', 10) * 1000),
filename_suffix=None,
name=None
)
default_summarizer = self.summarizer.as_default()
# Problem: not all parts of the graph are called on every step
assert 'steps' not in self.summarizer_spec
# if 'steps' in self.summarizer_spec:
# record_summaries = tf.contrib.summary.record_summaries_every_n_global_steps(
# n=self.summarizer_spec['steps'],
# global_step=self.global_timestep
# )
# else:
record_summaries = tf.contrib.summary.always_record_summaries()
default_summarizer.__enter__()
record_summaries.__enter__()
# Input tensors
states = util.map_tensors(fn=tf.identity, tensors=self.states_input)
internals = util.map_tensors(fn=tf.identity, tensors=self.internals_input)
actions = util.map_tensors(fn=tf.identity, tensors=self.actions_input)
terminal = tf.identity(input=self.terminal_input)
reward = tf.identity(input=self.reward_input)
# Probably both deterministic and independent should be the same at some point.
deterministic = tf.identity(input=self.deterministic_input)
independent = tf.identity(input=self.independent_input)
episode_index = tf.identity(input=self.episode_index_input)
states, actions, reward = self.fn_preprocess(states=states, actions=actions, reward=reward)
self.create_operations(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
deterministic=deterministic,
independent=independent,
index=episode_index
)
# Add all summaries specified in summary_labels
if 'inputs' in self.summary_labels or 'states' in self.summary_labels:
for name in sorted(states):
tf.contrib.summary.histogram(name=('states-' + name), tensor=states[name])
if 'inputs' in self.summary_labels or 'actions' in self.summary_labels:
for name in sorted(actions):
tf.contrib.summary.histogram(name=('actions-' + name), tensor=actions[name])
if 'inputs' in self.summary_labels or 'reward' in self.summary_labels:
tf.contrib.summary.histogram(name='reward', tensor=reward)
if 'graph' in self.summary_labels:
with tf.name_scope(name='summarizer'):
graph_def = self.graph.as_graph_def()
graph_str = tf.constant(
value=graph_def.SerializeToString(),
dtype=tf.string,
shape=()
)
self.graph_summary = tf.contrib.summary.graph(
param=graph_str,
step=self.global_timestep
)
if 'meta_param_recorder_class' in self.summarizer_spec:
self.graph_summary = tf.group(
self.graph_summary,
*self.summarizer_spec['meta_param_recorder_class'].build_metagraph_list()
)
if self.summarizer_spec is not None:
record_summaries.__exit__(None, None, None)
default_summarizer.__exit__(None, None, None)
with tf.name_scope(name='summarizer'):
self.flush_summarizer = tf.contrib.summary.flush()
self.summarizer_init_op = tf.contrib.summary.summary_writer_initializer_op()
assert len(self.summarizer_init_op) == 1
self.summarizer_init_op = self.summarizer_init_op[0]
# If we are a global model -> return here.
# Saving, syncing, finalizing graph, session is done by local replica model.
if self.execution_type == "distributed" and not self.is_local_model:
return
# Saver/Summary -> Scaffold.
self.setup_saver()
self.setup_scaffold()
# Create necessary hooks for the upcoming session.
hooks = self.setup_hooks()
# We are done constructing: Finalize our graph, create and enter the session.
self.setup_session(self.server, hooks, graph_default_context) | [
"def",
"setup",
"(",
"self",
")",
":",
"# Create/get our graph, setup local model/global model links, set scope and device.",
"graph_default_context",
"=",
"self",
".",
"setup_graph",
"(",
")",
"# Start a tf Server (in case of distributed setup). Only start once.",
"if",
"self",
".",
"execution_type",
"==",
"\"distributed\"",
"and",
"self",
".",
"server",
"is",
"None",
"and",
"self",
".",
"is_local_model",
":",
"self",
".",
"start_server",
"(",
")",
"# build the graph",
"with",
"tf",
".",
"device",
"(",
"device_name_or_function",
"=",
"self",
".",
"device",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name_or_scope",
"=",
"self",
".",
"scope",
",",
"reuse",
"=",
"False",
")",
":",
"# Variables and summaries",
"self",
".",
"variables",
"=",
"dict",
"(",
")",
"self",
".",
"all_variables",
"=",
"dict",
"(",
")",
"self",
".",
"registered_variables",
"=",
"set",
"(",
")",
"# Build the graph's placeholders, tf_functions, etc",
"self",
".",
"setup_placeholders",
"(",
")",
"# Create model's \"external\" components.",
"# Create tensorflow functions from \"tf_\"-methods.",
"self",
".",
"setup_components_and_tf_funcs",
"(",
")",
"# Create core variables (timestep, episode counters, buffers for states/actions/internals).",
"self",
".",
"fn_initialize",
"(",
")",
"if",
"self",
".",
"summarizer_spec",
"is",
"not",
"None",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"=",
"'summarizer'",
")",
":",
"self",
".",
"summarizer",
"=",
"tf",
".",
"contrib",
".",
"summary",
".",
"create_file_writer",
"(",
"logdir",
"=",
"self",
".",
"summarizer_spec",
"[",
"'directory'",
"]",
",",
"max_queue",
"=",
"None",
",",
"flush_millis",
"=",
"(",
"self",
".",
"summarizer_spec",
".",
"get",
"(",
"'flush'",
",",
"10",
")",
"*",
"1000",
")",
",",
"filename_suffix",
"=",
"None",
",",
"name",
"=",
"None",
")",
"default_summarizer",
"=",
"self",
".",
"summarizer",
".",
"as_default",
"(",
")",
"# Problem: not all parts of the graph are called on every step",
"assert",
"'steps'",
"not",
"in",
"self",
".",
"summarizer_spec",
"# if 'steps' in self.summarizer_spec:",
"# record_summaries = tf.contrib.summary.record_summaries_every_n_global_steps(",
"# n=self.summarizer_spec['steps'],",
"# global_step=self.global_timestep",
"# )",
"# else:",
"record_summaries",
"=",
"tf",
".",
"contrib",
".",
"summary",
".",
"always_record_summaries",
"(",
")",
"default_summarizer",
".",
"__enter__",
"(",
")",
"record_summaries",
".",
"__enter__",
"(",
")",
"# Input tensors",
"states",
"=",
"util",
".",
"map_tensors",
"(",
"fn",
"=",
"tf",
".",
"identity",
",",
"tensors",
"=",
"self",
".",
"states_input",
")",
"internals",
"=",
"util",
".",
"map_tensors",
"(",
"fn",
"=",
"tf",
".",
"identity",
",",
"tensors",
"=",
"self",
".",
"internals_input",
")",
"actions",
"=",
"util",
".",
"map_tensors",
"(",
"fn",
"=",
"tf",
".",
"identity",
",",
"tensors",
"=",
"self",
".",
"actions_input",
")",
"terminal",
"=",
"tf",
".",
"identity",
"(",
"input",
"=",
"self",
".",
"terminal_input",
")",
"reward",
"=",
"tf",
".",
"identity",
"(",
"input",
"=",
"self",
".",
"reward_input",
")",
"# Probably both deterministic and independent should be the same at some point.",
"deterministic",
"=",
"tf",
".",
"identity",
"(",
"input",
"=",
"self",
".",
"deterministic_input",
")",
"independent",
"=",
"tf",
".",
"identity",
"(",
"input",
"=",
"self",
".",
"independent_input",
")",
"episode_index",
"=",
"tf",
".",
"identity",
"(",
"input",
"=",
"self",
".",
"episode_index_input",
")",
"states",
",",
"actions",
",",
"reward",
"=",
"self",
".",
"fn_preprocess",
"(",
"states",
"=",
"states",
",",
"actions",
"=",
"actions",
",",
"reward",
"=",
"reward",
")",
"self",
".",
"create_operations",
"(",
"states",
"=",
"states",
",",
"internals",
"=",
"internals",
",",
"actions",
"=",
"actions",
",",
"terminal",
"=",
"terminal",
",",
"reward",
"=",
"reward",
",",
"deterministic",
"=",
"deterministic",
",",
"independent",
"=",
"independent",
",",
"index",
"=",
"episode_index",
")",
"# Add all summaries specified in summary_labels",
"if",
"'inputs'",
"in",
"self",
".",
"summary_labels",
"or",
"'states'",
"in",
"self",
".",
"summary_labels",
":",
"for",
"name",
"in",
"sorted",
"(",
"states",
")",
":",
"tf",
".",
"contrib",
".",
"summary",
".",
"histogram",
"(",
"name",
"=",
"(",
"'states-'",
"+",
"name",
")",
",",
"tensor",
"=",
"states",
"[",
"name",
"]",
")",
"if",
"'inputs'",
"in",
"self",
".",
"summary_labels",
"or",
"'actions'",
"in",
"self",
".",
"summary_labels",
":",
"for",
"name",
"in",
"sorted",
"(",
"actions",
")",
":",
"tf",
".",
"contrib",
".",
"summary",
".",
"histogram",
"(",
"name",
"=",
"(",
"'actions-'",
"+",
"name",
")",
",",
"tensor",
"=",
"actions",
"[",
"name",
"]",
")",
"if",
"'inputs'",
"in",
"self",
".",
"summary_labels",
"or",
"'reward'",
"in",
"self",
".",
"summary_labels",
":",
"tf",
".",
"contrib",
".",
"summary",
".",
"histogram",
"(",
"name",
"=",
"'reward'",
",",
"tensor",
"=",
"reward",
")",
"if",
"'graph'",
"in",
"self",
".",
"summary_labels",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"=",
"'summarizer'",
")",
":",
"graph_def",
"=",
"self",
".",
"graph",
".",
"as_graph_def",
"(",
")",
"graph_str",
"=",
"tf",
".",
"constant",
"(",
"value",
"=",
"graph_def",
".",
"SerializeToString",
"(",
")",
",",
"dtype",
"=",
"tf",
".",
"string",
",",
"shape",
"=",
"(",
")",
")",
"self",
".",
"graph_summary",
"=",
"tf",
".",
"contrib",
".",
"summary",
".",
"graph",
"(",
"param",
"=",
"graph_str",
",",
"step",
"=",
"self",
".",
"global_timestep",
")",
"if",
"'meta_param_recorder_class'",
"in",
"self",
".",
"summarizer_spec",
":",
"self",
".",
"graph_summary",
"=",
"tf",
".",
"group",
"(",
"self",
".",
"graph_summary",
",",
"*",
"self",
".",
"summarizer_spec",
"[",
"'meta_param_recorder_class'",
"]",
".",
"build_metagraph_list",
"(",
")",
")",
"if",
"self",
".",
"summarizer_spec",
"is",
"not",
"None",
":",
"record_summaries",
".",
"__exit__",
"(",
"None",
",",
"None",
",",
"None",
")",
"default_summarizer",
".",
"__exit__",
"(",
"None",
",",
"None",
",",
"None",
")",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"=",
"'summarizer'",
")",
":",
"self",
".",
"flush_summarizer",
"=",
"tf",
".",
"contrib",
".",
"summary",
".",
"flush",
"(",
")",
"self",
".",
"summarizer_init_op",
"=",
"tf",
".",
"contrib",
".",
"summary",
".",
"summary_writer_initializer_op",
"(",
")",
"assert",
"len",
"(",
"self",
".",
"summarizer_init_op",
")",
"==",
"1",
"self",
".",
"summarizer_init_op",
"=",
"self",
".",
"summarizer_init_op",
"[",
"0",
"]",
"# If we are a global model -> return here.",
"# Saving, syncing, finalizing graph, session is done by local replica model.",
"if",
"self",
".",
"execution_type",
"==",
"\"distributed\"",
"and",
"not",
"self",
".",
"is_local_model",
":",
"return",
"# Saver/Summary -> Scaffold.",
"self",
".",
"setup_saver",
"(",
")",
"self",
".",
"setup_scaffold",
"(",
")",
"# Create necessary hooks for the upcoming session.",
"hooks",
"=",
"self",
".",
"setup_hooks",
"(",
")",
"# We are done constructing: Finalize our graph, create and enter the session.",
"self",
".",
"setup_session",
"(",
"self",
".",
"server",
",",
"hooks",
",",
"graph_default_context",
")"
] | Sets up the TensorFlow model graph, starts the servers (distributed mode), creates summarizers
and savers, initializes (and enters) the TensorFlow session. | [
"Sets",
"up",
"the",
"TensorFlow",
"model",
"graph",
"starts",
"the",
"servers",
"(",
"distributed",
"mode",
")",
"creates",
"summarizers",
"and",
"savers",
"initializes",
"(",
"and",
"enters",
")",
"the",
"TensorFlow",
"session",
"."
] | python | valid |
podio/podio-py | pypodio2/encode.py | https://github.com/podio/podio-py/blob/5ce956034a06c98b0ef18fcd940b36da0908ad6c/pypodio2/encode.py#L195-L219 | def encode_hdr(self, boundary):
"""Returns the header of the encoding of this parameter"""
boundary = encode_and_quote(boundary)
headers = ["--%s" % boundary]
if self.filename:
disposition = 'form-data; name="%s"; filename="%s"' % (self.name,
self.filename)
else:
disposition = 'form-data; name="%s"' % self.name
headers.append("Content-Disposition: %s" % disposition)
if self.filetype:
filetype = self.filetype
else:
filetype = "text/plain; charset=utf-8"
headers.append("Content-Type: %s" % filetype)
headers.append("")
headers.append("")
return "\r\n".join(headers) | [
"def",
"encode_hdr",
"(",
"self",
",",
"boundary",
")",
":",
"boundary",
"=",
"encode_and_quote",
"(",
"boundary",
")",
"headers",
"=",
"[",
"\"--%s\"",
"%",
"boundary",
"]",
"if",
"self",
".",
"filename",
":",
"disposition",
"=",
"'form-data; name=\"%s\"; filename=\"%s\"'",
"%",
"(",
"self",
".",
"name",
",",
"self",
".",
"filename",
")",
"else",
":",
"disposition",
"=",
"'form-data; name=\"%s\"'",
"%",
"self",
".",
"name",
"headers",
".",
"append",
"(",
"\"Content-Disposition: %s\"",
"%",
"disposition",
")",
"if",
"self",
".",
"filetype",
":",
"filetype",
"=",
"self",
".",
"filetype",
"else",
":",
"filetype",
"=",
"\"text/plain; charset=utf-8\"",
"headers",
".",
"append",
"(",
"\"Content-Type: %s\"",
"%",
"filetype",
")",
"headers",
".",
"append",
"(",
"\"\"",
")",
"headers",
".",
"append",
"(",
"\"\"",
")",
"return",
"\"\\r\\n\"",
".",
"join",
"(",
"headers",
")"
] | Returns the header of the encoding of this parameter | [
"Returns",
"the",
"header",
"of",
"the",
"encoding",
"of",
"this",
"parameter"
] | python | train |
mitodl/PyLmod | pylmod/gradebook.py | https://github.com/mitodl/PyLmod/blob/b798b86c33d1eb615e7cd4f3457b5c15da1d86e0/pylmod/gradebook.py#L282-L333 | def get_assignment_by_name(self, assignment_name, assignments=None):
"""Get assignment by name.
Get an assignment by name. It works by retrieving all assignments
and returning the first assignment with a matching name. If the
optional parameter ``assignments`` is provided, it uses this
collection rather than retrieving all assignments from the service.
Args:
assignment_name (str): name of assignment
assignments (list): assignments to search, default: None
When ``assignments`` is unspecified, all assignments
are retrieved from the service.
Raises:
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
tuple: tuple of assignment id and assignment dictionary
.. code-block:: python
(
16708850,
{
u'assignmentId': 16708850,
u'categoryId': 1293820,
u'description': u'',
u'dueDate': 1383541200000,
u'dueDateString': u'11-04-2013',
u'gradebookId': 1293808,
u'graderVisible': False,
u'gradingSchemeId': 16708851,
u'gradingSchemeType': u'NUMERIC',
u'isComposite': False,
u'isHomework': False,
u'maxPointsTotal': 100.0,
u'name': u'midterm1',
u'shortName': u'mid1',
u'userDeleted': False,
u'weight': 1.0
}
)
"""
if assignments is None:
assignments = self.get_assignments()
for assignment in assignments:
if assignment['name'] == assignment_name:
return assignment['assignmentId'], assignment
return None, None | [
"def",
"get_assignment_by_name",
"(",
"self",
",",
"assignment_name",
",",
"assignments",
"=",
"None",
")",
":",
"if",
"assignments",
"is",
"None",
":",
"assignments",
"=",
"self",
".",
"get_assignments",
"(",
")",
"for",
"assignment",
"in",
"assignments",
":",
"if",
"assignment",
"[",
"'name'",
"]",
"==",
"assignment_name",
":",
"return",
"assignment",
"[",
"'assignmentId'",
"]",
",",
"assignment",
"return",
"None",
",",
"None"
] | Get assignment by name.
Get an assignment by name. It works by retrieving all assignments
and returning the first assignment with a matching name. If the
optional parameter ``assignments`` is provided, it uses this
collection rather than retrieving all assignments from the service.
Args:
assignment_name (str): name of assignment
assignments (list): assignments to search, default: None
When ``assignments`` is unspecified, all assignments
are retrieved from the service.
Raises:
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
tuple: tuple of assignment id and assignment dictionary
.. code-block:: python
(
16708850,
{
u'assignmentId': 16708850,
u'categoryId': 1293820,
u'description': u'',
u'dueDate': 1383541200000,
u'dueDateString': u'11-04-2013',
u'gradebookId': 1293808,
u'graderVisible': False,
u'gradingSchemeId': 16708851,
u'gradingSchemeType': u'NUMERIC',
u'isComposite': False,
u'isHomework': False,
u'maxPointsTotal': 100.0,
u'name': u'midterm1',
u'shortName': u'mid1',
u'userDeleted': False,
u'weight': 1.0
}
) | [
"Get",
"assignment",
"by",
"name",
"."
] | python | train |
auth0/auth0-python | auth0/v3/management/logs.py | https://github.com/auth0/auth0-python/blob/34adad3f342226aaaa6071387fa405ab840e5c02/auth0/v3/management/logs.py#L27-L71 | def search(self, page=0, per_page=50, sort=None, q=None,
include_totals=True, fields=None, from_param=None, take=None,
include_fields=True):
"""Search log events.
Args:
page (int, optional): The result's page number (zero based).
per_page (int, optional): The amount of entries per page.
sort (str, optional): The field to use for sorting.
1 == ascending and -1 == descending. (e.g: date:1)
q (str, optional): Query in Lucene query string syntax.
fields (list of str, optional): A list of fields to include or
exclude from the result (depending on include_fields). Empty to
retrieve all fields.
include_fields (bool, optional): True if the fields specified are
to be included in the result, False otherwise.
include_totals (bool, optional): True if the query summary is
to be included in the result, False otherwise.
from_param (str, optional): Log Event Id to start retrieving logs. You can
limit the amount of logs using the take parameter
take (int, optional): The total amount of entries to retrieve when
using the from parameter.
https://auth0.com/docs/api/management/v2#!/Logs/get_logs
"""
params = {
'per_page': per_page,
'page': page,
'include_totals': str(include_totals).lower(),
'sort': sort,
'fields': fields and ','.join(fields) or None,
'include_fields': str(include_fields).lower(),
'q': q,
'from': from_param,
'take': take
}
return self.client.get(self._url(), params=params) | [
"def",
"search",
"(",
"self",
",",
"page",
"=",
"0",
",",
"per_page",
"=",
"50",
",",
"sort",
"=",
"None",
",",
"q",
"=",
"None",
",",
"include_totals",
"=",
"True",
",",
"fields",
"=",
"None",
",",
"from_param",
"=",
"None",
",",
"take",
"=",
"None",
",",
"include_fields",
"=",
"True",
")",
":",
"params",
"=",
"{",
"'per_page'",
":",
"per_page",
",",
"'page'",
":",
"page",
",",
"'include_totals'",
":",
"str",
"(",
"include_totals",
")",
".",
"lower",
"(",
")",
",",
"'sort'",
":",
"sort",
",",
"'fields'",
":",
"fields",
"and",
"','",
".",
"join",
"(",
"fields",
")",
"or",
"None",
",",
"'include_fields'",
":",
"str",
"(",
"include_fields",
")",
".",
"lower",
"(",
")",
",",
"'q'",
":",
"q",
",",
"'from'",
":",
"from_param",
",",
"'take'",
":",
"take",
"}",
"return",
"self",
".",
"client",
".",
"get",
"(",
"self",
".",
"_url",
"(",
")",
",",
"params",
"=",
"params",
")"
] | Search log events.
Args:
page (int, optional): The result's page number (zero based).
per_page (int, optional): The amount of entries per page.
sort (str, optional): The field to use for sorting.
1 == ascending and -1 == descending. (e.g: date:1)
q (str, optional): Query in Lucene query string syntax.
fields (list of str, optional): A list of fields to include or
exclude from the result (depending on include_fields). Empty to
retrieve all fields.
include_fields (bool, optional): True if the fields specified are
to be included in the result, False otherwise.
include_totals (bool, optional): True if the query summary is
to be included in the result, False otherwise.
from_param (str, optional): Log Event Id to start retrieving logs. You can
limit the amount of logs using the take parameter
take (int, optional): The total amount of entries to retrieve when
using the from parameter.
https://auth0.com/docs/api/management/v2#!/Logs/get_logs | [
"Search",
"log",
"events",
"."
] | python | train |
lambdamusic/Ontospy | ontospy/extras/hacks/matcher.py | https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/hacks/matcher.py#L69-L122 | def matcher(graph1, graph2, confidence=0.5, output_file="matching_results.csv", class_or_prop="classes", verbose=False):
"""
takes two graphs and matches its classes based on qname, label etc..
@todo extend to properties and skos etc..
"""
printDebug("----------\nNow matching...")
f = open(output_file, 'wt')
counter = 0
try:
writer = csv.writer(f, quoting=csv.QUOTE_NONNUMERIC)
writer.writerow( ('name 1', 'name 2', 'uri 1', 'uri 2') )
# a) match classes
if class_or_prop == "classes":
for x in graph1.all_classes:
l1 = unicode(x.bestLabel(qname_allowed=True))
for y in graph2.all_classes:
l2 = unicode(y.bestLabel(qname_allowed=True))
if similar(l1, l2) > confidence:
counter += 1
row = [l1, l2, x.uri, y.uri]
writer.writerow([s.encode('utf8') if type(s) is unicode else s for s in row])
if verbose:
print("%s ==~== %s" % (l1, l2))
# b) match properties
elif class_or_prop == "properties":
for x in graph1.all_properties:
l1 = unicode(x.bestLabel(qname_allowed=True))
for y in graph2.all_properties:
l2 = unicode(y.bestLabel(qname_allowed=True))
if similar(l1, l2) > confidence:
counter += 1
row = [l1, l2, x.uri, y.uri]
writer.writerow([s.encode('utf8') if type(s) is unicode else s for s in row])
if verbose:
print("%s ==~== %s" % (l1, l2))
finally:
f.close()
printDebug("%d candidates found." % counter) | [
"def",
"matcher",
"(",
"graph1",
",",
"graph2",
",",
"confidence",
"=",
"0.5",
",",
"output_file",
"=",
"\"matching_results.csv\"",
",",
"class_or_prop",
"=",
"\"classes\"",
",",
"verbose",
"=",
"False",
")",
":",
"printDebug",
"(",
"\"----------\\nNow matching...\"",
")",
"f",
"=",
"open",
"(",
"output_file",
",",
"'wt'",
")",
"counter",
"=",
"0",
"try",
":",
"writer",
"=",
"csv",
".",
"writer",
"(",
"f",
",",
"quoting",
"=",
"csv",
".",
"QUOTE_NONNUMERIC",
")",
"writer",
".",
"writerow",
"(",
"(",
"'name 1'",
",",
"'name 2'",
",",
"'uri 1'",
",",
"'uri 2'",
")",
")",
"# a) match classes",
"if",
"class_or_prop",
"==",
"\"classes\"",
":",
"for",
"x",
"in",
"graph1",
".",
"all_classes",
":",
"l1",
"=",
"unicode",
"(",
"x",
".",
"bestLabel",
"(",
"qname_allowed",
"=",
"True",
")",
")",
"for",
"y",
"in",
"graph2",
".",
"all_classes",
":",
"l2",
"=",
"unicode",
"(",
"y",
".",
"bestLabel",
"(",
"qname_allowed",
"=",
"True",
")",
")",
"if",
"similar",
"(",
"l1",
",",
"l2",
")",
">",
"confidence",
":",
"counter",
"+=",
"1",
"row",
"=",
"[",
"l1",
",",
"l2",
",",
"x",
".",
"uri",
",",
"y",
".",
"uri",
"]",
"writer",
".",
"writerow",
"(",
"[",
"s",
".",
"encode",
"(",
"'utf8'",
")",
"if",
"type",
"(",
"s",
")",
"is",
"unicode",
"else",
"s",
"for",
"s",
"in",
"row",
"]",
")",
"if",
"verbose",
":",
"print",
"(",
"\"%s ==~== %s\"",
"%",
"(",
"l1",
",",
"l2",
")",
")",
"# b) match properties",
"elif",
"class_or_prop",
"==",
"\"properties\"",
":",
"for",
"x",
"in",
"graph1",
".",
"all_properties",
":",
"l1",
"=",
"unicode",
"(",
"x",
".",
"bestLabel",
"(",
"qname_allowed",
"=",
"True",
")",
")",
"for",
"y",
"in",
"graph2",
".",
"all_properties",
":",
"l2",
"=",
"unicode",
"(",
"y",
".",
"bestLabel",
"(",
"qname_allowed",
"=",
"True",
")",
")",
"if",
"similar",
"(",
"l1",
",",
"l2",
")",
">",
"confidence",
":",
"counter",
"+=",
"1",
"row",
"=",
"[",
"l1",
",",
"l2",
",",
"x",
".",
"uri",
",",
"y",
".",
"uri",
"]",
"writer",
".",
"writerow",
"(",
"[",
"s",
".",
"encode",
"(",
"'utf8'",
")",
"if",
"type",
"(",
"s",
")",
"is",
"unicode",
"else",
"s",
"for",
"s",
"in",
"row",
"]",
")",
"if",
"verbose",
":",
"print",
"(",
"\"%s ==~== %s\"",
"%",
"(",
"l1",
",",
"l2",
")",
")",
"finally",
":",
"f",
".",
"close",
"(",
")",
"printDebug",
"(",
"\"%d candidates found.\"",
"%",
"counter",
")"
] | takes two graphs and matches its classes based on qname, label etc..
@todo extend to properties and skos etc.. | [
"takes",
"two",
"graphs",
"and",
"matches",
"its",
"classes",
"based",
"on",
"qname",
"label",
"etc",
".."
] | python | train |
sethmlarson/virtualbox-python | virtualbox/library.py | https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L26670-L26688 | def get_registers(self, cpu_id):
"""Gets all the registers for the given CPU.
in cpu_id of type int
The identifier of the Virtual CPU.
out names of type str
Array containing the lowercase register names.
out values of type str
Array parallel to the names holding the register values as if the
register was returned by :py:func:`IMachineDebugger.get_register` .
"""
if not isinstance(cpu_id, baseinteger):
raise TypeError("cpu_id can only be an instance of type baseinteger")
(names, values) = self._call("getRegisters",
in_p=[cpu_id])
return (names, values) | [
"def",
"get_registers",
"(",
"self",
",",
"cpu_id",
")",
":",
"if",
"not",
"isinstance",
"(",
"cpu_id",
",",
"baseinteger",
")",
":",
"raise",
"TypeError",
"(",
"\"cpu_id can only be an instance of type baseinteger\"",
")",
"(",
"names",
",",
"values",
")",
"=",
"self",
".",
"_call",
"(",
"\"getRegisters\"",
",",
"in_p",
"=",
"[",
"cpu_id",
"]",
")",
"return",
"(",
"names",
",",
"values",
")"
] | Gets all the registers for the given CPU.
in cpu_id of type int
The identifier of the Virtual CPU.
out names of type str
Array containing the lowercase register names.
out values of type str
Array parallel to the names holding the register values as if the
register was returned by :py:func:`IMachineDebugger.get_register` . | [
"Gets",
"all",
"the",
"registers",
"for",
"the",
"given",
"CPU",
"."
] | python | train |
foobarbecue/afterflight | afterflight/af_utils.py | https://github.com/foobarbecue/afterflight/blob/7085f719593f88999dce93f35caec5f15d2991b6/afterflight/af_utils.py#L24-L29 | def logpath2dt(filepath):
"""
given a dataflashlog in the format produced by Mission Planner,
return a datetime which says when the file was downloaded from the APM
"""
return datetime.datetime.strptime(re.match(r'.*/(.*) .*$',filepath).groups()[0],'%Y-%m-%d %H-%M') | [
"def",
"logpath2dt",
"(",
"filepath",
")",
":",
"return",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"re",
".",
"match",
"(",
"r'.*/(.*) .*$'",
",",
"filepath",
")",
".",
"groups",
"(",
")",
"[",
"0",
"]",
",",
"'%Y-%m-%d %H-%M'",
")"
] | given a dataflashlog in the format produced by Mission Planner,
return a datetime which says when the file was downloaded from the APM | [
"given",
"a",
"dataflashlog",
"in",
"the",
"format",
"produced",
"by",
"Mission",
"Planner",
"return",
"a",
"datetime",
"which",
"says",
"when",
"the",
"file",
"was",
"downloaded",
"from",
"the",
"APM"
] | python | train |
zagaran/mongolia | mongolia/mongo_connection.py | https://github.com/zagaran/mongolia/blob/82c499345f0a8610c7289545e19f5f633e8a81c0/mongolia/mongo_connection.py#L180-L203 | def add_user(name, password=None, read_only=None, db=None, **kwargs):
"""
Adds a user that can be used for authentication.
@param name: the name of the user to create
@param passowrd: the password of the user to create. Can not be used with
the userSource argument.
@param read_only: if True the user will be read only
@param db: the database the user is authenticated to access. Passing None
(the default) means add the user to the admin database, which gives the
user access to all databases
@param **kwargs: forwarded to pymongo.database.add_user
Example; adding a user with full database access:
add_user("username", "password")
Example; adding a user with read only privilage on a partiucalr database:
add_user("username", "password", read_only=True, db="somedb")
NOTE: This function will only work if mongo is being run unauthenticated
or you have already authenticated with another user with appropriate
privileges to add a user to the specified database.
"""
return CONNECTION.add_user(name, password=password, read_only=read_only, db=db, **kwargs) | [
"def",
"add_user",
"(",
"name",
",",
"password",
"=",
"None",
",",
"read_only",
"=",
"None",
",",
"db",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"CONNECTION",
".",
"add_user",
"(",
"name",
",",
"password",
"=",
"password",
",",
"read_only",
"=",
"read_only",
",",
"db",
"=",
"db",
",",
"*",
"*",
"kwargs",
")"
] | Adds a user that can be used for authentication.
@param name: the name of the user to create
@param passowrd: the password of the user to create. Can not be used with
the userSource argument.
@param read_only: if True the user will be read only
@param db: the database the user is authenticated to access. Passing None
(the default) means add the user to the admin database, which gives the
user access to all databases
@param **kwargs: forwarded to pymongo.database.add_user
Example; adding a user with full database access:
add_user("username", "password")
Example; adding a user with read only privilage on a partiucalr database:
add_user("username", "password", read_only=True, db="somedb")
NOTE: This function will only work if mongo is being run unauthenticated
or you have already authenticated with another user with appropriate
privileges to add a user to the specified database. | [
"Adds",
"a",
"user",
"that",
"can",
"be",
"used",
"for",
"authentication",
"."
] | python | train |
sveetch/py-css-styleguide | py_css_styleguide/serializer.py | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/serializer.py#L71-L94 | def validate_variable_name(self, name):
"""
Validate variable name.
Arguments:
name (string): Property name.
Returns:
bool: ``True`` if variable name is valid.
"""
if not name:
raise SerializerError("Variable name is empty".format(name))
if name[0] not in PROPERTY_ALLOWED_START:
msg = "Variable name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in PROPERTY_ALLOWED_CHARS:
msg = ("Invalid variable name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True | [
"def",
"validate_variable_name",
"(",
"self",
",",
"name",
")",
":",
"if",
"not",
"name",
":",
"raise",
"SerializerError",
"(",
"\"Variable name is empty\"",
".",
"format",
"(",
"name",
")",
")",
"if",
"name",
"[",
"0",
"]",
"not",
"in",
"PROPERTY_ALLOWED_START",
":",
"msg",
"=",
"\"Variable name '{}' must starts with a letter\"",
"raise",
"SerializerError",
"(",
"msg",
".",
"format",
"(",
"name",
")",
")",
"for",
"item",
"in",
"name",
":",
"if",
"item",
"not",
"in",
"PROPERTY_ALLOWED_CHARS",
":",
"msg",
"=",
"(",
"\"Invalid variable name '{}': it must only contains \"",
"\"letters, numbers and '_' character\"",
")",
"raise",
"SerializerError",
"(",
"msg",
".",
"format",
"(",
"name",
")",
")",
"return",
"True"
] | Validate variable name.
Arguments:
name (string): Property name.
Returns:
bool: ``True`` if variable name is valid. | [
"Validate",
"variable",
"name",
"."
] | python | train |
the01/python-flotils | flotils/runable.py | https://github.com/the01/python-flotils/blob/5954712776bb590107e5b2f4362d010bf74f77a1/flotils/runable.py#L118-L135 | def start(self, blocking=False):
"""
Start the interface
:param blocking: Should the call block until stop() is called
(default: False)
:type blocking: bool
:rtype: None
"""
super(StartStopable, self).start()
self._is_running = True
# blocking
try:
while blocking and self._is_running:
time.sleep(self._start_block_timeout)
except IOError as e:
if not str(e).lower().startswith("[errno 4]"):
raise | [
"def",
"start",
"(",
"self",
",",
"blocking",
"=",
"False",
")",
":",
"super",
"(",
"StartStopable",
",",
"self",
")",
".",
"start",
"(",
")",
"self",
".",
"_is_running",
"=",
"True",
"# blocking",
"try",
":",
"while",
"blocking",
"and",
"self",
".",
"_is_running",
":",
"time",
".",
"sleep",
"(",
"self",
".",
"_start_block_timeout",
")",
"except",
"IOError",
"as",
"e",
":",
"if",
"not",
"str",
"(",
"e",
")",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"\"[errno 4]\"",
")",
":",
"raise"
] | Start the interface
:param blocking: Should the call block until stop() is called
(default: False)
:type blocking: bool
:rtype: None | [
"Start",
"the",
"interface"
] | python | train |
niloch/iplotter | iplotter/c3_plotter.py | https://github.com/niloch/iplotter/blob/0403486d8633f601a33c4d2b9c9fa3ec88e9327b/iplotter/c3_plotter.py#L34-L44 | def render(self, data, div_id="chart", head=""):
"""Render the data in HTML template."""
if not self.is_valid_name(div_id):
raise ValueError(
"Name {} is invalid. Only letters, numbers, '_', and '-' are permitted ".format(
div_id))
return Template(head + self.template).render(
div_id=div_id.replace(" ", "_"),
data=json.dumps(
data, indent=4).replace("'", "\\'").replace('"', "'")) | [
"def",
"render",
"(",
"self",
",",
"data",
",",
"div_id",
"=",
"\"chart\"",
",",
"head",
"=",
"\"\"",
")",
":",
"if",
"not",
"self",
".",
"is_valid_name",
"(",
"div_id",
")",
":",
"raise",
"ValueError",
"(",
"\"Name {} is invalid. Only letters, numbers, '_', and '-' are permitted \"",
".",
"format",
"(",
"div_id",
")",
")",
"return",
"Template",
"(",
"head",
"+",
"self",
".",
"template",
")",
".",
"render",
"(",
"div_id",
"=",
"div_id",
".",
"replace",
"(",
"\" \"",
",",
"\"_\"",
")",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
",",
"indent",
"=",
"4",
")",
".",
"replace",
"(",
"\"'\"",
",",
"\"\\\\'\"",
")",
".",
"replace",
"(",
"'\"'",
",",
"\"'\"",
")",
")"
] | Render the data in HTML template. | [
"Render",
"the",
"data",
"in",
"HTML",
"template",
"."
] | python | train |
miyakogi/wdom | wdom/server/base.py | https://github.com/miyakogi/wdom/blob/a21bcd23e94baceee71161829f6897bee3fd39c1/wdom/server/base.py#L64-L74 | def open_browser(url: str, browser: str = None) -> None:
"""Open web browser."""
if '--open-browser' in sys.argv:
# Remove open browser to prevent making new tab on autoreload
sys.argv.remove('--open-browser')
if browser is None:
browser = config.browser
if browser in _browsers:
webbrowser.get(browser).open(url)
else:
webbrowser.open(url) | [
"def",
"open_browser",
"(",
"url",
":",
"str",
",",
"browser",
":",
"str",
"=",
"None",
")",
"->",
"None",
":",
"if",
"'--open-browser'",
"in",
"sys",
".",
"argv",
":",
"# Remove open browser to prevent making new tab on autoreload",
"sys",
".",
"argv",
".",
"remove",
"(",
"'--open-browser'",
")",
"if",
"browser",
"is",
"None",
":",
"browser",
"=",
"config",
".",
"browser",
"if",
"browser",
"in",
"_browsers",
":",
"webbrowser",
".",
"get",
"(",
"browser",
")",
".",
"open",
"(",
"url",
")",
"else",
":",
"webbrowser",
".",
"open",
"(",
"url",
")"
] | Open web browser. | [
"Open",
"web",
"browser",
"."
] | python | train |
koehlma/pygrooveshark | src/grooveshark/classes/album.py | https://github.com/koehlma/pygrooveshark/blob/17673758ac12f54dc26ac879c30ea44f13b81057/src/grooveshark/classes/album.py#L76-L82 | def cover(self):
"""
album cover as :class:`Picture` object
"""
if not self._cover:
self._cover = Picture(self._cover_url, self._connection)
return self._cover | [
"def",
"cover",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_cover",
":",
"self",
".",
"_cover",
"=",
"Picture",
"(",
"self",
".",
"_cover_url",
",",
"self",
".",
"_connection",
")",
"return",
"self",
".",
"_cover"
] | album cover as :class:`Picture` object | [
"album",
"cover",
"as",
":",
"class",
":",
"Picture",
"object"
] | python | train |
tensorflow/probability | tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py | https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py#L666-L730 | def _satisfies_wolfe(val_0,
val_c,
f_lim,
sufficient_decrease_param,
curvature_param):
"""Checks whether the Wolfe or approx Wolfe conditions are satisfied.
The Wolfe conditions are a set of stopping criteria for an inexact line search
algorithm. Let f(a) be the function value along the search direction and
df(a) the derivative along the search direction evaluated a distance 'a'.
Here 'a' is the distance along the search direction. The Wolfe conditions are:
```None
f(a) <= f(0) + delta * a * df(0) (Armijo/Sufficient decrease condition)
df(a) >= sigma * df(0) (Weak curvature condition)
```
`delta` and `sigma` are two user supplied parameters satisfying:
`0 < delta < sigma <= 1.`. In the following, delta is called
`sufficient_decrease_param` and sigma is called `curvature_param`.
On a finite precision machine, the Wolfe conditions are difficult to satisfy
when one is close to the minimum. Hence, Hager-Zhang propose replacing
the sufficient decrease condition with the following condition on the
derivative in the vicinity of a minimum.
```None
df(a) <= (2 * delta - 1) * df(0) (Approx Wolfe sufficient decrease)
```
This condition is only used if one is near the minimum. This is tested using
```None
f(a) <= f(0) + epsilon * |f(0)|
```
The following function checks both the Wolfe and approx Wolfe conditions.
Here, `epsilon` is a small positive constant. In the following, the argument
`f_lim` corresponds to the product: epsilon * |f(0)|.
Args:
val_0: A namedtuple, as returned by value_and_gradients_function
evaluated at 0.
val_c: A namedtuple, as returned by value_and_gradients_function
evaluated at the point to be tested.
f_lim: Scalar `Tensor` of real dtype. The function value threshold for
the approximate Wolfe conditions to be checked.
sufficient_decrease_param: Positive scalar `Tensor` of real dtype.
Bounded above by the curvature param. Corresponds to 'delta' in the
terminology of [Hager and Zhang (2006)][2].
curvature_param: Positive scalar `Tensor` of real dtype. Bounded above
by `1.`. Corresponds to 'sigma' in the terminology of
[Hager Zhang (2005)][1].
Returns:
is_satisfied: A scalar boolean `Tensor` which is True if either the
Wolfe or approximate Wolfe conditions are satisfied.
"""
exact_wolfe_suff_dec = (sufficient_decrease_param * val_0.df >=
(val_c.f - val_0.f) / val_c.x)
wolfe_curvature = val_c.df >= curvature_param * val_0.df
exact_wolfe = exact_wolfe_suff_dec & wolfe_curvature
approx_wolfe_applies = val_c.f <= f_lim
approx_wolfe_suff_dec = ((2 * sufficient_decrease_param - 1) * val_0.df
>= val_c.df)
approx_wolfe = approx_wolfe_applies & approx_wolfe_suff_dec & wolfe_curvature
is_satisfied = exact_wolfe | approx_wolfe
return is_satisfied | [
"def",
"_satisfies_wolfe",
"(",
"val_0",
",",
"val_c",
",",
"f_lim",
",",
"sufficient_decrease_param",
",",
"curvature_param",
")",
":",
"exact_wolfe_suff_dec",
"=",
"(",
"sufficient_decrease_param",
"*",
"val_0",
".",
"df",
">=",
"(",
"val_c",
".",
"f",
"-",
"val_0",
".",
"f",
")",
"/",
"val_c",
".",
"x",
")",
"wolfe_curvature",
"=",
"val_c",
".",
"df",
">=",
"curvature_param",
"*",
"val_0",
".",
"df",
"exact_wolfe",
"=",
"exact_wolfe_suff_dec",
"&",
"wolfe_curvature",
"approx_wolfe_applies",
"=",
"val_c",
".",
"f",
"<=",
"f_lim",
"approx_wolfe_suff_dec",
"=",
"(",
"(",
"2",
"*",
"sufficient_decrease_param",
"-",
"1",
")",
"*",
"val_0",
".",
"df",
">=",
"val_c",
".",
"df",
")",
"approx_wolfe",
"=",
"approx_wolfe_applies",
"&",
"approx_wolfe_suff_dec",
"&",
"wolfe_curvature",
"is_satisfied",
"=",
"exact_wolfe",
"|",
"approx_wolfe",
"return",
"is_satisfied"
] | Checks whether the Wolfe or approx Wolfe conditions are satisfied.
The Wolfe conditions are a set of stopping criteria for an inexact line search
algorithm. Let f(a) be the function value along the search direction and
df(a) the derivative along the search direction evaluated a distance 'a'.
Here 'a' is the distance along the search direction. The Wolfe conditions are:
```None
f(a) <= f(0) + delta * a * df(0) (Armijo/Sufficient decrease condition)
df(a) >= sigma * df(0) (Weak curvature condition)
```
`delta` and `sigma` are two user supplied parameters satisfying:
`0 < delta < sigma <= 1.`. In the following, delta is called
`sufficient_decrease_param` and sigma is called `curvature_param`.
On a finite precision machine, the Wolfe conditions are difficult to satisfy
when one is close to the minimum. Hence, Hager-Zhang propose replacing
the sufficient decrease condition with the following condition on the
derivative in the vicinity of a minimum.
```None
df(a) <= (2 * delta - 1) * df(0) (Approx Wolfe sufficient decrease)
```
This condition is only used if one is near the minimum. This is tested using
```None
f(a) <= f(0) + epsilon * |f(0)|
```
The following function checks both the Wolfe and approx Wolfe conditions.
Here, `epsilon` is a small positive constant. In the following, the argument
`f_lim` corresponds to the product: epsilon * |f(0)|.
Args:
val_0: A namedtuple, as returned by value_and_gradients_function
evaluated at 0.
val_c: A namedtuple, as returned by value_and_gradients_function
evaluated at the point to be tested.
f_lim: Scalar `Tensor` of real dtype. The function value threshold for
the approximate Wolfe conditions to be checked.
sufficient_decrease_param: Positive scalar `Tensor` of real dtype.
Bounded above by the curvature param. Corresponds to 'delta' in the
terminology of [Hager and Zhang (2006)][2].
curvature_param: Positive scalar `Tensor` of real dtype. Bounded above
by `1.`. Corresponds to 'sigma' in the terminology of
[Hager Zhang (2005)][1].
Returns:
is_satisfied: A scalar boolean `Tensor` which is True if either the
Wolfe or approximate Wolfe conditions are satisfied. | [
"Checks",
"whether",
"the",
"Wolfe",
"or",
"approx",
"Wolfe",
"conditions",
"are",
"satisfied",
"."
] | python | test |
nkgilley/python-ecobee-api | pyecobee/__init__.py | https://github.com/nkgilley/python-ecobee-api/blob/cc8d90d20abcb9ef5b66ec9cb035bae2f06ba174/pyecobee/__init__.py#L295-L305 | def send_message(self, index, message="Hello from python-ecobee!"):
''' Send a message to the thermostat '''
body = {"selection": {
"selectionType": "thermostats",
"selectionMatch": self.thermostats[index]['identifier']},
"functions": [{"type": "sendMessage", "params": {
"text": message[0:500]
}}]}
log_msg_action = "send message"
return self.make_request(body, log_msg_action) | [
"def",
"send_message",
"(",
"self",
",",
"index",
",",
"message",
"=",
"\"Hello from python-ecobee!\"",
")",
":",
"body",
"=",
"{",
"\"selection\"",
":",
"{",
"\"selectionType\"",
":",
"\"thermostats\"",
",",
"\"selectionMatch\"",
":",
"self",
".",
"thermostats",
"[",
"index",
"]",
"[",
"'identifier'",
"]",
"}",
",",
"\"functions\"",
":",
"[",
"{",
"\"type\"",
":",
"\"sendMessage\"",
",",
"\"params\"",
":",
"{",
"\"text\"",
":",
"message",
"[",
"0",
":",
"500",
"]",
"}",
"}",
"]",
"}",
"log_msg_action",
"=",
"\"send message\"",
"return",
"self",
".",
"make_request",
"(",
"body",
",",
"log_msg_action",
")"
] | Send a message to the thermostat | [
"Send",
"a",
"message",
"to",
"the",
"thermostat"
] | python | test |
mwickert/scikit-dsp-comm | sk_dsp_comm/fec_conv.py | https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/fec_conv.py#L790-L847 | def conv_Pb_bound(R,dfree,Ck,SNRdB,hard_soft,M=2):
"""
Coded bit error probabilty
Convolution coding bit error probability upper bound
according to Ziemer & Peterson 7-16, p. 507
Mark Wickert November 2014
Parameters
----------
R: Code rate
dfree: Free distance of the code
Ck: Weight coefficient
SNRdB: Signal to noise ratio in dB
hard_soft: 0 hard, 1 soft, 2 uncoded
M: M-ary
Examples
--------
>>> import numpy as np
>>> from sk_dsp_comm import fec_conv as fec
>>> import matplotlib.pyplot as plt
>>> SNRdB = np.arange(2,12,.1)
>>> Pb = fec.conv_Pb_bound(1./2,10,[36, 0, 211, 0, 1404, 0, 11633],SNRdB,2)
>>> Pb_1_2 = fec.conv_Pb_bound(1./2,10,[36, 0, 211, 0, 1404, 0, 11633],SNRdB,1)
>>> Pb_3_4 = fec.conv_Pb_bound(3./4,4,[164, 0, 5200, 0, 151211, 0, 3988108],SNRdB,1)
>>> plt.semilogy(SNRdB,Pb)
>>> plt.semilogy(SNRdB,Pb_1_2)
>>> plt.semilogy(SNRdB,Pb_3_4)
>>> plt.axis([2,12,1e-7,1e0])
>>> plt.xlabel(r'$E_b/N_0$ (dB)')
>>> plt.ylabel(r'Symbol Error Probability')
>>> plt.legend(('Uncoded BPSK','R=1/2, K=7, Soft','R=3/4 (punc), K=7, Soft'),loc='best')
>>> plt.grid();
>>> plt.show()
Notes
-----
The code rate R is given by :math:`R_{s} = \\frac{k}{n}`.
Mark Wickert and Andrew Smit 2018
"""
Pb = np.zeros_like(SNRdB)
SNR = 10.**(SNRdB/10.)
for n,SNRn in enumerate(SNR):
for k in range(dfree,len(Ck)+dfree):
if hard_soft == 0: # Evaluate hard decision bound
Pb[n] += Ck[k-dfree]*hard_Pk(k,R,SNRn,M)
elif hard_soft == 1: # Evaluate soft decision bound
Pb[n] += Ck[k-dfree]*soft_Pk(k,R,SNRn,M)
else: # Compute Uncoded Pe
if M == 2:
Pb[n] = Q_fctn(np.sqrt(2.*SNRn))
else:
Pb[n] = 4./np.log2(M)*(1 - 1/np.sqrt(M))*\
np.gaussQ(np.sqrt(3*np.log2(M)/(M-1)*SNRn));
return Pb | [
"def",
"conv_Pb_bound",
"(",
"R",
",",
"dfree",
",",
"Ck",
",",
"SNRdB",
",",
"hard_soft",
",",
"M",
"=",
"2",
")",
":",
"Pb",
"=",
"np",
".",
"zeros_like",
"(",
"SNRdB",
")",
"SNR",
"=",
"10.",
"**",
"(",
"SNRdB",
"/",
"10.",
")",
"for",
"n",
",",
"SNRn",
"in",
"enumerate",
"(",
"SNR",
")",
":",
"for",
"k",
"in",
"range",
"(",
"dfree",
",",
"len",
"(",
"Ck",
")",
"+",
"dfree",
")",
":",
"if",
"hard_soft",
"==",
"0",
":",
"# Evaluate hard decision bound\r",
"Pb",
"[",
"n",
"]",
"+=",
"Ck",
"[",
"k",
"-",
"dfree",
"]",
"*",
"hard_Pk",
"(",
"k",
",",
"R",
",",
"SNRn",
",",
"M",
")",
"elif",
"hard_soft",
"==",
"1",
":",
"# Evaluate soft decision bound\r",
"Pb",
"[",
"n",
"]",
"+=",
"Ck",
"[",
"k",
"-",
"dfree",
"]",
"*",
"soft_Pk",
"(",
"k",
",",
"R",
",",
"SNRn",
",",
"M",
")",
"else",
":",
"# Compute Uncoded Pe\r",
"if",
"M",
"==",
"2",
":",
"Pb",
"[",
"n",
"]",
"=",
"Q_fctn",
"(",
"np",
".",
"sqrt",
"(",
"2.",
"*",
"SNRn",
")",
")",
"else",
":",
"Pb",
"[",
"n",
"]",
"=",
"4.",
"/",
"np",
".",
"log2",
"(",
"M",
")",
"*",
"(",
"1",
"-",
"1",
"/",
"np",
".",
"sqrt",
"(",
"M",
")",
")",
"*",
"np",
".",
"gaussQ",
"(",
"np",
".",
"sqrt",
"(",
"3",
"*",
"np",
".",
"log2",
"(",
"M",
")",
"/",
"(",
"M",
"-",
"1",
")",
"*",
"SNRn",
")",
")",
"return",
"Pb"
] | Coded bit error probabilty
Convolution coding bit error probability upper bound
according to Ziemer & Peterson 7-16, p. 507
Mark Wickert November 2014
Parameters
----------
R: Code rate
dfree: Free distance of the code
Ck: Weight coefficient
SNRdB: Signal to noise ratio in dB
hard_soft: 0 hard, 1 soft, 2 uncoded
M: M-ary
Examples
--------
>>> import numpy as np
>>> from sk_dsp_comm import fec_conv as fec
>>> import matplotlib.pyplot as plt
>>> SNRdB = np.arange(2,12,.1)
>>> Pb = fec.conv_Pb_bound(1./2,10,[36, 0, 211, 0, 1404, 0, 11633],SNRdB,2)
>>> Pb_1_2 = fec.conv_Pb_bound(1./2,10,[36, 0, 211, 0, 1404, 0, 11633],SNRdB,1)
>>> Pb_3_4 = fec.conv_Pb_bound(3./4,4,[164, 0, 5200, 0, 151211, 0, 3988108],SNRdB,1)
>>> plt.semilogy(SNRdB,Pb)
>>> plt.semilogy(SNRdB,Pb_1_2)
>>> plt.semilogy(SNRdB,Pb_3_4)
>>> plt.axis([2,12,1e-7,1e0])
>>> plt.xlabel(r'$E_b/N_0$ (dB)')
>>> plt.ylabel(r'Symbol Error Probability')
>>> plt.legend(('Uncoded BPSK','R=1/2, K=7, Soft','R=3/4 (punc), K=7, Soft'),loc='best')
>>> plt.grid();
>>> plt.show()
Notes
-----
The code rate R is given by :math:`R_{s} = \\frac{k}{n}`.
Mark Wickert and Andrew Smit 2018 | [
"Coded",
"bit",
"error",
"probabilty",
"Convolution",
"coding",
"bit",
"error",
"probability",
"upper",
"bound",
"according",
"to",
"Ziemer",
"&",
"Peterson",
"7",
"-",
"16",
"p",
".",
"507",
"Mark",
"Wickert",
"November",
"2014",
"Parameters",
"----------",
"R",
":",
"Code",
"rate",
"dfree",
":",
"Free",
"distance",
"of",
"the",
"code",
"Ck",
":",
"Weight",
"coefficient",
"SNRdB",
":",
"Signal",
"to",
"noise",
"ratio",
"in",
"dB",
"hard_soft",
":",
"0",
"hard",
"1",
"soft",
"2",
"uncoded",
"M",
":",
"M",
"-",
"ary",
"Examples",
"--------",
">>>",
"import",
"numpy",
"as",
"np",
">>>",
"from",
"sk_dsp_comm",
"import",
"fec_conv",
"as",
"fec",
">>>",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
">>>",
"SNRdB",
"=",
"np",
".",
"arange",
"(",
"2",
"12",
".",
"1",
")",
">>>",
"Pb",
"=",
"fec",
".",
"conv_Pb_bound",
"(",
"1",
".",
"/",
"2",
"10",
"[",
"36",
"0",
"211",
"0",
"1404",
"0",
"11633",
"]",
"SNRdB",
"2",
")",
">>>",
"Pb_1_2",
"=",
"fec",
".",
"conv_Pb_bound",
"(",
"1",
".",
"/",
"2",
"10",
"[",
"36",
"0",
"211",
"0",
"1404",
"0",
"11633",
"]",
"SNRdB",
"1",
")",
">>>",
"Pb_3_4",
"=",
"fec",
".",
"conv_Pb_bound",
"(",
"3",
".",
"/",
"4",
"4",
"[",
"164",
"0",
"5200",
"0",
"151211",
"0",
"3988108",
"]",
"SNRdB",
"1",
")",
">>>",
"plt",
".",
"semilogy",
"(",
"SNRdB",
"Pb",
")",
">>>",
"plt",
".",
"semilogy",
"(",
"SNRdB",
"Pb_1_2",
")",
">>>",
"plt",
".",
"semilogy",
"(",
"SNRdB",
"Pb_3_4",
")",
">>>",
"plt",
".",
"axis",
"(",
"[",
"2",
"12",
"1e",
"-",
"7",
"1e0",
"]",
")",
">>>",
"plt",
".",
"xlabel",
"(",
"r",
"$E_b",
"/",
"N_0$",
"(",
"dB",
")",
")",
">>>",
"plt",
".",
"ylabel",
"(",
"r",
"Symbol",
"Error",
"Probability",
")",
">>>",
"plt",
".",
"legend",
"((",
"Uncoded",
"BPSK",
"R",
"=",
"1",
"/",
"2",
"K",
"=",
"7",
"Soft",
"R",
"=",
"3",
"/",
"4",
"(",
"punc",
")",
"K",
"=",
"7",
"Soft",
")",
"loc",
"=",
"best",
")",
">>>",
"plt",
".",
"grid",
"()",
";",
">>>",
"plt",
".",
"show",
"()",
"Notes",
"-----",
"The",
"code",
"rate",
"R",
"is",
"given",
"by",
":",
"math",
":",
"R_",
"{",
"s",
"}",
"=",
"\\\\",
"frac",
"{",
"k",
"}",
"{",
"n",
"}",
".",
"Mark",
"Wickert",
"and",
"Andrew",
"Smit",
"2018"
] | python | valid |
Telefonica/toolium | toolium/selenoid.py | https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/selenoid.py#L206-L220 | def download_file(self, filename, timeout=5):
"""
download a file from remote selenoid and removing the file in the server.
request: http://<username>:<password>@<ggr_host>:<ggr_port>/download/<ggr_session_id>/<filename>
:param filename: file name with extension to download
:param timeout: threshold until the video file is downloaded
:return: downloaded file path or None
"""
path_file = os.path.join(self.output_directory, DOWNLOADS_PATH, self.session_id[-8:], filename)
file_url = '{}/download/{}/{}'.format(self.server_url, self.session_id, filename)
# download the file
if self.browser_remote:
self.__download_file(file_url, path_file, timeout)
return path_file
return None | [
"def",
"download_file",
"(",
"self",
",",
"filename",
",",
"timeout",
"=",
"5",
")",
":",
"path_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"output_directory",
",",
"DOWNLOADS_PATH",
",",
"self",
".",
"session_id",
"[",
"-",
"8",
":",
"]",
",",
"filename",
")",
"file_url",
"=",
"'{}/download/{}/{}'",
".",
"format",
"(",
"self",
".",
"server_url",
",",
"self",
".",
"session_id",
",",
"filename",
")",
"# download the file",
"if",
"self",
".",
"browser_remote",
":",
"self",
".",
"__download_file",
"(",
"file_url",
",",
"path_file",
",",
"timeout",
")",
"return",
"path_file",
"return",
"None"
] | download a file from remote selenoid and removing the file in the server.
request: http://<username>:<password>@<ggr_host>:<ggr_port>/download/<ggr_session_id>/<filename>
:param filename: file name with extension to download
:param timeout: threshold until the video file is downloaded
:return: downloaded file path or None | [
"download",
"a",
"file",
"from",
"remote",
"selenoid",
"and",
"removing",
"the",
"file",
"in",
"the",
"server",
".",
"request",
":",
"http",
":",
"//",
"<username",
">",
":",
"<password",
">"
] | python | train |
oceanprotocol/aquarius | aquarius/app/assets.py | https://github.com/oceanprotocol/aquarius/blob/9fb094b1ac01f0604d0c854166dd324e476a010e/aquarius/app/assets.py#L631-L649 | def retire_all():
"""Retire metadata of all the assets.
---
tags:
- ddo
responses:
200:
description: successfully deleted
500:
description: Error
"""
try:
all_ids = [a['id'] for a in dao.get_all_assets()]
for i in all_ids:
dao.delete(i)
return 'All ddo successfully deleted', 200
except Exception as e:
logger.error(e)
return 'An error was found', 500 | [
"def",
"retire_all",
"(",
")",
":",
"try",
":",
"all_ids",
"=",
"[",
"a",
"[",
"'id'",
"]",
"for",
"a",
"in",
"dao",
".",
"get_all_assets",
"(",
")",
"]",
"for",
"i",
"in",
"all_ids",
":",
"dao",
".",
"delete",
"(",
"i",
")",
"return",
"'All ddo successfully deleted'",
",",
"200",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"e",
")",
"return",
"'An error was found'",
",",
"500"
] | Retire metadata of all the assets.
---
tags:
- ddo
responses:
200:
description: successfully deleted
500:
description: Error | [
"Retire",
"metadata",
"of",
"all",
"the",
"assets",
".",
"---",
"tags",
":",
"-",
"ddo",
"responses",
":",
"200",
":",
"description",
":",
"successfully",
"deleted",
"500",
":",
"description",
":",
"Error"
] | python | train |
saltstack/salt | salt/states/keystone.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/keystone.py#L98-L298 | def user_present(name,
password,
email,
tenant=None,
enabled=True,
roles=None,
profile=None,
password_reset=True,
project=None,
**connection_args):
'''
Ensure that the keystone user is present with the specified properties.
name
The name of the user to manage
password
The password to use for this user.
.. note::
If the user already exists and a different password was set for
the user than the one specified here, the password for the user
will be updated. Please set the ``password_reset`` option to
``False`` if this is not the desired behavior.
password_reset
Whether or not to reset password after initial set. Defaults to
``True``.
email
The email address for this user
tenant
The tenant (name) for this user
project
The project (name) for this user (overrides tenant in api v3)
enabled
Availability state for this user
roles
The roles the user should have under given tenants.
Passed as a dictionary mapping tenant names to a list
of roles in this tenant, i.e.::
roles:
admin: # tenant
- admin # role
service:
- admin
- Member
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'User "{0}" will be updated'.format(name)}
_api_version(profile=profile, **connection_args)
if project and not tenant:
tenant = project
# Validate tenant if set
if tenant is not None:
tenantdata = __salt__['keystone.tenant_get'](name=tenant,
profile=profile,
**connection_args)
if 'Error' in tenantdata:
ret['result'] = False
ret['comment'] = 'Tenant / project "{0}" does not exist'.format(tenant)
return ret
tenant_id = tenantdata[tenant]['id']
else:
tenant_id = None
# Check if user is already present
user = __salt__['keystone.user_get'](name=name, profile=profile,
**connection_args)
if 'Error' not in user:
change_email = False
change_enabled = False
change_tenant = False
change_password = False
if user[name].get('email', None) != email:
change_email = True
if user[name].get('enabled', None) != enabled:
change_enabled = True
if tenant and (_TENANT_ID not in user[name] or
user[name].get(_TENANT_ID, None) != tenant_id):
change_tenant = True
if (password_reset is True and
not __salt__['keystone.user_verify_password'](name=name,
password=password,
profile=profile,
**connection_args)):
change_password = True
if __opts__.get('test') and (change_email or change_enabled or change_tenant or change_password):
ret['result'] = None
ret['comment'] = 'User "{0}" will be updated'.format(name)
if change_email is True:
ret['changes']['Email'] = 'Will be updated'
if change_enabled is True:
ret['changes']['Enabled'] = 'Will be True'
if change_tenant is True:
ret['changes']['Tenant'] = 'Will be added to "{0}" tenant'.format(tenant)
if change_password is True:
ret['changes']['Password'] = 'Will be updated'
return ret
ret['comment'] = 'User "{0}" is already present'.format(name)
if change_email:
__salt__['keystone.user_update'](name=name, email=email, profile=profile, **connection_args)
ret['comment'] = 'User "{0}" has been updated'.format(name)
ret['changes']['Email'] = 'Updated'
if change_enabled:
__salt__['keystone.user_update'](name=name, enabled=enabled, profile=profile, **connection_args)
ret['comment'] = 'User "{0}" has been updated'.format(name)
ret['changes']['Enabled'] = 'Now {0}'.format(enabled)
if change_tenant:
__salt__['keystone.user_update'](name=name, tenant=tenant, profile=profile, **connection_args)
ret['comment'] = 'User "{0}" has been updated'.format(name)
ret['changes']['Tenant'] = 'Added to "{0}" tenant'.format(tenant)
if change_password:
__salt__['keystone.user_password_update'](name=name, password=password, profile=profile,
**connection_args)
ret['comment'] = 'User "{0}" has been updated'.format(name)
ret['changes']['Password'] = 'Updated'
if roles:
for tenant in roles:
args = dict({'user_name': name, 'tenant_name':
tenant, 'profile': profile}, **connection_args)
tenant_roles = __salt__['keystone.user_role_list'](**args)
for role in roles[tenant]:
if role not in tenant_roles:
if __opts__.get('test'):
ret['result'] = None
ret['comment'] = 'User roles "{0}" will been updated'.format(name)
return ret
addargs = dict({'user': name, 'role': role,
'tenant': tenant,
'profile': profile},
**connection_args)
newrole = __salt__['keystone.user_role_add'](**addargs)
if 'roles' in ret['changes']:
ret['changes']['roles'].append(newrole)
else:
ret['changes']['roles'] = [newrole]
roles_to_remove = list(set(tenant_roles) - set(roles[tenant]))
for role in roles_to_remove:
if __opts__.get('test'):
ret['result'] = None
ret['comment'] = 'User roles "{0}" will been updated'.format(name)
return ret
addargs = dict({'user': name, 'role': role,
'tenant': tenant,
'profile': profile},
**connection_args)
oldrole = __salt__['keystone.user_role_remove'](**addargs)
if 'roles' in ret['changes']:
ret['changes']['roles'].append(oldrole)
else:
ret['changes']['roles'] = [oldrole]
else:
# Create that user!
if __opts__.get('test'):
ret['result'] = None
ret['comment'] = 'Keystone user "{0}" will be added'.format(name)
ret['changes']['User'] = 'Will be created'
return ret
__salt__['keystone.user_create'](name=name,
password=password,
email=email,
tenant_id=tenant_id,
enabled=enabled,
profile=profile,
**connection_args)
if roles:
for tenant in roles:
for role in roles[tenant]:
__salt__['keystone.user_role_add'](user=name,
role=role,
tenant=tenant,
profile=profile,
**connection_args)
ret['comment'] = 'Keystone user {0} has been added'.format(name)
ret['changes']['User'] = 'Created'
return ret | [
"def",
"user_present",
"(",
"name",
",",
"password",
",",
"email",
",",
"tenant",
"=",
"None",
",",
"enabled",
"=",
"True",
",",
"roles",
"=",
"None",
",",
"profile",
"=",
"None",
",",
"password_reset",
"=",
"True",
",",
"project",
"=",
"None",
",",
"*",
"*",
"connection_args",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"True",
",",
"'comment'",
":",
"'User \"{0}\" will be updated'",
".",
"format",
"(",
"name",
")",
"}",
"_api_version",
"(",
"profile",
"=",
"profile",
",",
"*",
"*",
"connection_args",
")",
"if",
"project",
"and",
"not",
"tenant",
":",
"tenant",
"=",
"project",
"# Validate tenant if set",
"if",
"tenant",
"is",
"not",
"None",
":",
"tenantdata",
"=",
"__salt__",
"[",
"'keystone.tenant_get'",
"]",
"(",
"name",
"=",
"tenant",
",",
"profile",
"=",
"profile",
",",
"*",
"*",
"connection_args",
")",
"if",
"'Error'",
"in",
"tenantdata",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Tenant / project \"{0}\" does not exist'",
".",
"format",
"(",
"tenant",
")",
"return",
"ret",
"tenant_id",
"=",
"tenantdata",
"[",
"tenant",
"]",
"[",
"'id'",
"]",
"else",
":",
"tenant_id",
"=",
"None",
"# Check if user is already present",
"user",
"=",
"__salt__",
"[",
"'keystone.user_get'",
"]",
"(",
"name",
"=",
"name",
",",
"profile",
"=",
"profile",
",",
"*",
"*",
"connection_args",
")",
"if",
"'Error'",
"not",
"in",
"user",
":",
"change_email",
"=",
"False",
"change_enabled",
"=",
"False",
"change_tenant",
"=",
"False",
"change_password",
"=",
"False",
"if",
"user",
"[",
"name",
"]",
".",
"get",
"(",
"'email'",
",",
"None",
")",
"!=",
"email",
":",
"change_email",
"=",
"True",
"if",
"user",
"[",
"name",
"]",
".",
"get",
"(",
"'enabled'",
",",
"None",
")",
"!=",
"enabled",
":",
"change_enabled",
"=",
"True",
"if",
"tenant",
"and",
"(",
"_TENANT_ID",
"not",
"in",
"user",
"[",
"name",
"]",
"or",
"user",
"[",
"name",
"]",
".",
"get",
"(",
"_TENANT_ID",
",",
"None",
")",
"!=",
"tenant_id",
")",
":",
"change_tenant",
"=",
"True",
"if",
"(",
"password_reset",
"is",
"True",
"and",
"not",
"__salt__",
"[",
"'keystone.user_verify_password'",
"]",
"(",
"name",
"=",
"name",
",",
"password",
"=",
"password",
",",
"profile",
"=",
"profile",
",",
"*",
"*",
"connection_args",
")",
")",
":",
"change_password",
"=",
"True",
"if",
"__opts__",
".",
"get",
"(",
"'test'",
")",
"and",
"(",
"change_email",
"or",
"change_enabled",
"or",
"change_tenant",
"or",
"change_password",
")",
":",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"ret",
"[",
"'comment'",
"]",
"=",
"'User \"{0}\" will be updated'",
".",
"format",
"(",
"name",
")",
"if",
"change_email",
"is",
"True",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'Email'",
"]",
"=",
"'Will be updated'",
"if",
"change_enabled",
"is",
"True",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'Enabled'",
"]",
"=",
"'Will be True'",
"if",
"change_tenant",
"is",
"True",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'Tenant'",
"]",
"=",
"'Will be added to \"{0}\" tenant'",
".",
"format",
"(",
"tenant",
")",
"if",
"change_password",
"is",
"True",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'Password'",
"]",
"=",
"'Will be updated'",
"return",
"ret",
"ret",
"[",
"'comment'",
"]",
"=",
"'User \"{0}\" is already present'",
".",
"format",
"(",
"name",
")",
"if",
"change_email",
":",
"__salt__",
"[",
"'keystone.user_update'",
"]",
"(",
"name",
"=",
"name",
",",
"email",
"=",
"email",
",",
"profile",
"=",
"profile",
",",
"*",
"*",
"connection_args",
")",
"ret",
"[",
"'comment'",
"]",
"=",
"'User \"{0}\" has been updated'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'changes'",
"]",
"[",
"'Email'",
"]",
"=",
"'Updated'",
"if",
"change_enabled",
":",
"__salt__",
"[",
"'keystone.user_update'",
"]",
"(",
"name",
"=",
"name",
",",
"enabled",
"=",
"enabled",
",",
"profile",
"=",
"profile",
",",
"*",
"*",
"connection_args",
")",
"ret",
"[",
"'comment'",
"]",
"=",
"'User \"{0}\" has been updated'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'changes'",
"]",
"[",
"'Enabled'",
"]",
"=",
"'Now {0}'",
".",
"format",
"(",
"enabled",
")",
"if",
"change_tenant",
":",
"__salt__",
"[",
"'keystone.user_update'",
"]",
"(",
"name",
"=",
"name",
",",
"tenant",
"=",
"tenant",
",",
"profile",
"=",
"profile",
",",
"*",
"*",
"connection_args",
")",
"ret",
"[",
"'comment'",
"]",
"=",
"'User \"{0}\" has been updated'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'changes'",
"]",
"[",
"'Tenant'",
"]",
"=",
"'Added to \"{0}\" tenant'",
".",
"format",
"(",
"tenant",
")",
"if",
"change_password",
":",
"__salt__",
"[",
"'keystone.user_password_update'",
"]",
"(",
"name",
"=",
"name",
",",
"password",
"=",
"password",
",",
"profile",
"=",
"profile",
",",
"*",
"*",
"connection_args",
")",
"ret",
"[",
"'comment'",
"]",
"=",
"'User \"{0}\" has been updated'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'changes'",
"]",
"[",
"'Password'",
"]",
"=",
"'Updated'",
"if",
"roles",
":",
"for",
"tenant",
"in",
"roles",
":",
"args",
"=",
"dict",
"(",
"{",
"'user_name'",
":",
"name",
",",
"'tenant_name'",
":",
"tenant",
",",
"'profile'",
":",
"profile",
"}",
",",
"*",
"*",
"connection_args",
")",
"tenant_roles",
"=",
"__salt__",
"[",
"'keystone.user_role_list'",
"]",
"(",
"*",
"*",
"args",
")",
"for",
"role",
"in",
"roles",
"[",
"tenant",
"]",
":",
"if",
"role",
"not",
"in",
"tenant_roles",
":",
"if",
"__opts__",
".",
"get",
"(",
"'test'",
")",
":",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"ret",
"[",
"'comment'",
"]",
"=",
"'User roles \"{0}\" will been updated'",
".",
"format",
"(",
"name",
")",
"return",
"ret",
"addargs",
"=",
"dict",
"(",
"{",
"'user'",
":",
"name",
",",
"'role'",
":",
"role",
",",
"'tenant'",
":",
"tenant",
",",
"'profile'",
":",
"profile",
"}",
",",
"*",
"*",
"connection_args",
")",
"newrole",
"=",
"__salt__",
"[",
"'keystone.user_role_add'",
"]",
"(",
"*",
"*",
"addargs",
")",
"if",
"'roles'",
"in",
"ret",
"[",
"'changes'",
"]",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'roles'",
"]",
".",
"append",
"(",
"newrole",
")",
"else",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'roles'",
"]",
"=",
"[",
"newrole",
"]",
"roles_to_remove",
"=",
"list",
"(",
"set",
"(",
"tenant_roles",
")",
"-",
"set",
"(",
"roles",
"[",
"tenant",
"]",
")",
")",
"for",
"role",
"in",
"roles_to_remove",
":",
"if",
"__opts__",
".",
"get",
"(",
"'test'",
")",
":",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"ret",
"[",
"'comment'",
"]",
"=",
"'User roles \"{0}\" will been updated'",
".",
"format",
"(",
"name",
")",
"return",
"ret",
"addargs",
"=",
"dict",
"(",
"{",
"'user'",
":",
"name",
",",
"'role'",
":",
"role",
",",
"'tenant'",
":",
"tenant",
",",
"'profile'",
":",
"profile",
"}",
",",
"*",
"*",
"connection_args",
")",
"oldrole",
"=",
"__salt__",
"[",
"'keystone.user_role_remove'",
"]",
"(",
"*",
"*",
"addargs",
")",
"if",
"'roles'",
"in",
"ret",
"[",
"'changes'",
"]",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'roles'",
"]",
".",
"append",
"(",
"oldrole",
")",
"else",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'roles'",
"]",
"=",
"[",
"oldrole",
"]",
"else",
":",
"# Create that user!",
"if",
"__opts__",
".",
"get",
"(",
"'test'",
")",
":",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"ret",
"[",
"'comment'",
"]",
"=",
"'Keystone user \"{0}\" will be added'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'changes'",
"]",
"[",
"'User'",
"]",
"=",
"'Will be created'",
"return",
"ret",
"__salt__",
"[",
"'keystone.user_create'",
"]",
"(",
"name",
"=",
"name",
",",
"password",
"=",
"password",
",",
"email",
"=",
"email",
",",
"tenant_id",
"=",
"tenant_id",
",",
"enabled",
"=",
"enabled",
",",
"profile",
"=",
"profile",
",",
"*",
"*",
"connection_args",
")",
"if",
"roles",
":",
"for",
"tenant",
"in",
"roles",
":",
"for",
"role",
"in",
"roles",
"[",
"tenant",
"]",
":",
"__salt__",
"[",
"'keystone.user_role_add'",
"]",
"(",
"user",
"=",
"name",
",",
"role",
"=",
"role",
",",
"tenant",
"=",
"tenant",
",",
"profile",
"=",
"profile",
",",
"*",
"*",
"connection_args",
")",
"ret",
"[",
"'comment'",
"]",
"=",
"'Keystone user {0} has been added'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'changes'",
"]",
"[",
"'User'",
"]",
"=",
"'Created'",
"return",
"ret"
] | Ensure that the keystone user is present with the specified properties.
name
The name of the user to manage
password
The password to use for this user.
.. note::
If the user already exists and a different password was set for
the user than the one specified here, the password for the user
will be updated. Please set the ``password_reset`` option to
``False`` if this is not the desired behavior.
password_reset
Whether or not to reset password after initial set. Defaults to
``True``.
email
The email address for this user
tenant
The tenant (name) for this user
project
The project (name) for this user (overrides tenant in api v3)
enabled
Availability state for this user
roles
The roles the user should have under given tenants.
Passed as a dictionary mapping tenant names to a list
of roles in this tenant, i.e.::
roles:
admin: # tenant
- admin # role
service:
- admin
- Member | [
"Ensure",
"that",
"the",
"keystone",
"user",
"is",
"present",
"with",
"the",
"specified",
"properties",
"."
] | python | train |
anthill/koala | koala/reader.py | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/reader.py#L231-L244 | def read_rels(archive):
"""Read relationships for a workbook"""
xml_source = archive.read(ARC_WORKBOOK_RELS)
tree = fromstring(xml_source)
for element in safe_iterator(tree, '{%s}Relationship' % PKG_REL_NS):
rId = element.get('Id')
pth = element.get("Target")
typ = element.get('Type')
# normalise path
if pth.startswith("/xl"):
pth = pth.replace("/xl", "xl")
elif not pth.startswith("xl") and not pth.startswith(".."):
pth = "xl/" + pth
yield rId, {'path':pth, 'type':typ} | [
"def",
"read_rels",
"(",
"archive",
")",
":",
"xml_source",
"=",
"archive",
".",
"read",
"(",
"ARC_WORKBOOK_RELS",
")",
"tree",
"=",
"fromstring",
"(",
"xml_source",
")",
"for",
"element",
"in",
"safe_iterator",
"(",
"tree",
",",
"'{%s}Relationship'",
"%",
"PKG_REL_NS",
")",
":",
"rId",
"=",
"element",
".",
"get",
"(",
"'Id'",
")",
"pth",
"=",
"element",
".",
"get",
"(",
"\"Target\"",
")",
"typ",
"=",
"element",
".",
"get",
"(",
"'Type'",
")",
"# normalise path",
"if",
"pth",
".",
"startswith",
"(",
"\"/xl\"",
")",
":",
"pth",
"=",
"pth",
".",
"replace",
"(",
"\"/xl\"",
",",
"\"xl\"",
")",
"elif",
"not",
"pth",
".",
"startswith",
"(",
"\"xl\"",
")",
"and",
"not",
"pth",
".",
"startswith",
"(",
"\"..\"",
")",
":",
"pth",
"=",
"\"xl/\"",
"+",
"pth",
"yield",
"rId",
",",
"{",
"'path'",
":",
"pth",
",",
"'type'",
":",
"typ",
"}"
] | Read relationships for a workbook | [
"Read",
"relationships",
"for",
"a",
"workbook"
] | python | train |
bitprophet/ssh | ssh/file.py | https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/file.py#L391-L429 | def _set_mode(self, mode='r', bufsize=-1):
"""
Subclasses call this method to initialize the BufferedFile.
"""
# set bufsize in any event, because it's used for readline().
self._bufsize = self._DEFAULT_BUFSIZE
if bufsize < 0:
# do no buffering by default, because otherwise writes will get
# buffered in a way that will probably confuse people.
bufsize = 0
if bufsize == 1:
# apparently, line buffering only affects writes. reads are only
# buffered if you call readline (directly or indirectly: iterating
# over a file will indirectly call readline).
self._flags |= self.FLAG_BUFFERED | self.FLAG_LINE_BUFFERED
elif bufsize > 1:
self._bufsize = bufsize
self._flags |= self.FLAG_BUFFERED
self._flags &= ~self.FLAG_LINE_BUFFERED
elif bufsize == 0:
# unbuffered
self._flags &= ~(self.FLAG_BUFFERED | self.FLAG_LINE_BUFFERED)
if ('r' in mode) or ('+' in mode):
self._flags |= self.FLAG_READ
if ('w' in mode) or ('+' in mode):
self._flags |= self.FLAG_WRITE
if ('a' in mode):
self._flags |= self.FLAG_WRITE | self.FLAG_APPEND
self._size = self._get_size()
self._pos = self._realpos = self._size
if ('b' in mode):
self._flags |= self.FLAG_BINARY
if ('U' in mode):
self._flags |= self.FLAG_UNIVERSAL_NEWLINE
# built-in file objects have this attribute to store which kinds of
# line terminations they've seen:
# <http://www.python.org/doc/current/lib/built-in-funcs.html>
self.newlines = None | [
"def",
"_set_mode",
"(",
"self",
",",
"mode",
"=",
"'r'",
",",
"bufsize",
"=",
"-",
"1",
")",
":",
"# set bufsize in any event, because it's used for readline().",
"self",
".",
"_bufsize",
"=",
"self",
".",
"_DEFAULT_BUFSIZE",
"if",
"bufsize",
"<",
"0",
":",
"# do no buffering by default, because otherwise writes will get",
"# buffered in a way that will probably confuse people.",
"bufsize",
"=",
"0",
"if",
"bufsize",
"==",
"1",
":",
"# apparently, line buffering only affects writes. reads are only",
"# buffered if you call readline (directly or indirectly: iterating",
"# over a file will indirectly call readline).",
"self",
".",
"_flags",
"|=",
"self",
".",
"FLAG_BUFFERED",
"|",
"self",
".",
"FLAG_LINE_BUFFERED",
"elif",
"bufsize",
">",
"1",
":",
"self",
".",
"_bufsize",
"=",
"bufsize",
"self",
".",
"_flags",
"|=",
"self",
".",
"FLAG_BUFFERED",
"self",
".",
"_flags",
"&=",
"~",
"self",
".",
"FLAG_LINE_BUFFERED",
"elif",
"bufsize",
"==",
"0",
":",
"# unbuffered",
"self",
".",
"_flags",
"&=",
"~",
"(",
"self",
".",
"FLAG_BUFFERED",
"|",
"self",
".",
"FLAG_LINE_BUFFERED",
")",
"if",
"(",
"'r'",
"in",
"mode",
")",
"or",
"(",
"'+'",
"in",
"mode",
")",
":",
"self",
".",
"_flags",
"|=",
"self",
".",
"FLAG_READ",
"if",
"(",
"'w'",
"in",
"mode",
")",
"or",
"(",
"'+'",
"in",
"mode",
")",
":",
"self",
".",
"_flags",
"|=",
"self",
".",
"FLAG_WRITE",
"if",
"(",
"'a'",
"in",
"mode",
")",
":",
"self",
".",
"_flags",
"|=",
"self",
".",
"FLAG_WRITE",
"|",
"self",
".",
"FLAG_APPEND",
"self",
".",
"_size",
"=",
"self",
".",
"_get_size",
"(",
")",
"self",
".",
"_pos",
"=",
"self",
".",
"_realpos",
"=",
"self",
".",
"_size",
"if",
"(",
"'b'",
"in",
"mode",
")",
":",
"self",
".",
"_flags",
"|=",
"self",
".",
"FLAG_BINARY",
"if",
"(",
"'U'",
"in",
"mode",
")",
":",
"self",
".",
"_flags",
"|=",
"self",
".",
"FLAG_UNIVERSAL_NEWLINE",
"# built-in file objects have this attribute to store which kinds of",
"# line terminations they've seen:",
"# <http://www.python.org/doc/current/lib/built-in-funcs.html>",
"self",
".",
"newlines",
"=",
"None"
] | Subclasses call this method to initialize the BufferedFile. | [
"Subclasses",
"call",
"this",
"method",
"to",
"initialize",
"the",
"BufferedFile",
"."
] | python | train |
zhelev/python-afsapi | afsapi/__init__.py | https://github.com/zhelev/python-afsapi/blob/bb1990cf1460ae42f2dde75f2291625ddac2c0e4/afsapi/__init__.py#L228-L232 | def set_power(self, value=False):
"""Power on or off the device."""
power = (yield from self.handle_set(
self.API.get('power'), int(value)))
return bool(power) | [
"def",
"set_power",
"(",
"self",
",",
"value",
"=",
"False",
")",
":",
"power",
"=",
"(",
"yield",
"from",
"self",
".",
"handle_set",
"(",
"self",
".",
"API",
".",
"get",
"(",
"'power'",
")",
",",
"int",
"(",
"value",
")",
")",
")",
"return",
"bool",
"(",
"power",
")"
] | Power on or off the device. | [
"Power",
"on",
"or",
"off",
"the",
"device",
"."
] | python | valid |
mehmetg/streak_client | streak_client/streak_client.py | https://github.com/mehmetg/streak_client/blob/46575510b4e4163a4a3cc06f7283a1ae377cdce6/streak_client/streak_client.py#L561-L583 | def _create_field(self, uri , name, field_type, **kwargs):
'''Creates a field with the provided attributes.
Args:
uri base uri for the field (pipeline or box uri)
name required name string
field_type required type string [TEXT_INPUT, DATE or PERSON]
kwargs {}
return (status code, field dict)
'''
#req sanity check
if not (name and (field_type in ['TEXT_INPUT', 'DATE', 'PERSON'])):
return requests.codes.bad_request, {'success' : 'False',
'error': 'name needs to be provided and field_type needs to be \'TEXT_INPUT\', \'DATE\' or \'PERSON\''}
kwargs.update({'name':name, 'type':field_type})
new_box = StreakField(**kwargs)
#print(new_pl.attributes)
#print(new_pl.to_dict())
#raw_input()
code, data = self._req('put', uri, new_box.to_dict(rw = True))
return code, data | [
"def",
"_create_field",
"(",
"self",
",",
"uri",
",",
"name",
",",
"field_type",
",",
"*",
"*",
"kwargs",
")",
":",
"#req sanity check",
"if",
"not",
"(",
"name",
"and",
"(",
"field_type",
"in",
"[",
"'TEXT_INPUT'",
",",
"'DATE'",
",",
"'PERSON'",
"]",
")",
")",
":",
"return",
"requests",
".",
"codes",
".",
"bad_request",
",",
"{",
"'success'",
":",
"'False'",
",",
"'error'",
":",
"'name needs to be provided and field_type needs to be \\'TEXT_INPUT\\', \\'DATE\\' or \\'PERSON\\''",
"}",
"kwargs",
".",
"update",
"(",
"{",
"'name'",
":",
"name",
",",
"'type'",
":",
"field_type",
"}",
")",
"new_box",
"=",
"StreakField",
"(",
"*",
"*",
"kwargs",
")",
"#print(new_pl.attributes)",
"#print(new_pl.to_dict())",
"#raw_input()",
"code",
",",
"data",
"=",
"self",
".",
"_req",
"(",
"'put'",
",",
"uri",
",",
"new_box",
".",
"to_dict",
"(",
"rw",
"=",
"True",
")",
")",
"return",
"code",
",",
"data"
] | Creates a field with the provided attributes.
Args:
uri base uri for the field (pipeline or box uri)
name required name string
field_type required type string [TEXT_INPUT, DATE or PERSON]
kwargs {}
return (status code, field dict) | [
"Creates",
"a",
"field",
"with",
"the",
"provided",
"attributes",
".",
"Args",
":",
"uri",
"base",
"uri",
"for",
"the",
"field",
"(",
"pipeline",
"or",
"box",
"uri",
")",
"name",
"required",
"name",
"string",
"field_type",
"required",
"type",
"string",
"[",
"TEXT_INPUT",
"DATE",
"or",
"PERSON",
"]",
"kwargs",
"{}",
"return",
"(",
"status",
"code",
"field",
"dict",
")"
] | python | train |
AndrewAnnex/SpiceyPy | spiceypy/spiceypy.py | https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L9423-L9440 | def pcpool(name, cvals):
"""
This entry point provides toolkit programmers a method for
programmatically inserting character data into the
kernel pool.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pcpool_c.html
:param name: The kernel pool name to associate with cvals.
:type name: str
:param cvals: An array of strings to insert into the kernel pool.
:type cvals: Array of str
"""
name = stypes.stringToCharP(name)
lenvals = ctypes.c_int(len(max(cvals, key=len)) + 1)
n = ctypes.c_int(len(cvals))
cvals = stypes.listToCharArray(cvals, lenvals, n)
libspice.pcpool_c(name, n, lenvals, cvals) | [
"def",
"pcpool",
"(",
"name",
",",
"cvals",
")",
":",
"name",
"=",
"stypes",
".",
"stringToCharP",
"(",
"name",
")",
"lenvals",
"=",
"ctypes",
".",
"c_int",
"(",
"len",
"(",
"max",
"(",
"cvals",
",",
"key",
"=",
"len",
")",
")",
"+",
"1",
")",
"n",
"=",
"ctypes",
".",
"c_int",
"(",
"len",
"(",
"cvals",
")",
")",
"cvals",
"=",
"stypes",
".",
"listToCharArray",
"(",
"cvals",
",",
"lenvals",
",",
"n",
")",
"libspice",
".",
"pcpool_c",
"(",
"name",
",",
"n",
",",
"lenvals",
",",
"cvals",
")"
] | This entry point provides toolkit programmers a method for
programmatically inserting character data into the
kernel pool.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pcpool_c.html
:param name: The kernel pool name to associate with cvals.
:type name: str
:param cvals: An array of strings to insert into the kernel pool.
:type cvals: Array of str | [
"This",
"entry",
"point",
"provides",
"toolkit",
"programmers",
"a",
"method",
"for",
"programmatically",
"inserting",
"character",
"data",
"into",
"the",
"kernel",
"pool",
"."
] | python | train |
hydraplatform/hydra-base | hydra_base/lib/data.py | https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/data.py#L406-L462 | def update_dataset(dataset_id, name, data_type, val, unit_id, metadata={}, flush=True, **kwargs):
"""
Update an existing dataset
"""
if dataset_id is None:
raise HydraError("Dataset must have an ID to be updated.")
user_id = kwargs.get('user_id')
dataset = db.DBSession.query(Dataset).filter(Dataset.id==dataset_id).one()
#This dataset been seen before, so it may be attached
#to other scenarios, which may be locked. If they are locked, we must
#not change their data, so new data must be created for the unlocked scenarios
locked_scenarios = []
unlocked_scenarios = []
for dataset_rs in dataset.resourcescenarios:
if dataset_rs.scenario.locked == 'Y':
locked_scenarios.append(dataset_rs)
else:
unlocked_scenarios.append(dataset_rs)
#Are any of these scenarios locked?
if len(locked_scenarios) > 0:
#If so, create a new dataset and assign to all unlocked datasets.
dataset = add_dataset(data_type,
val,
unit_id,
metadata=metadata,
name=name,
user_id=kwargs['user_id'])
for unlocked_rs in unlocked_scenarios:
unlocked_rs.dataset = dataset
else:
dataset.type = data_type
dataset.value = val
dataset.set_metadata(metadata)
dataset.unit_id = unit_id
dataset.name = name
dataset.created_by = kwargs['user_id']
dataset.hash = dataset.set_hash()
#Is there a dataset in the DB already which is identical to the updated dataset?
existing_dataset = db.DBSession.query(Dataset).filter(Dataset.hash==dataset.hash, Dataset.id != dataset.id).first()
if existing_dataset is not None and existing_dataset.check_user(user_id):
log.warning("An identical dataset %s has been found to dataset %s."
" Deleting dataset and returning dataset %s",
existing_dataset.id, dataset.id, existing_dataset.id)
db.DBSession.delete(dataset)
dataset = existing_dataset
if flush==True:
db.DBSession.flush()
return dataset | [
"def",
"update_dataset",
"(",
"dataset_id",
",",
"name",
",",
"data_type",
",",
"val",
",",
"unit_id",
",",
"metadata",
"=",
"{",
"}",
",",
"flush",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"dataset_id",
"is",
"None",
":",
"raise",
"HydraError",
"(",
"\"Dataset must have an ID to be updated.\"",
")",
"user_id",
"=",
"kwargs",
".",
"get",
"(",
"'user_id'",
")",
"dataset",
"=",
"db",
".",
"DBSession",
".",
"query",
"(",
"Dataset",
")",
".",
"filter",
"(",
"Dataset",
".",
"id",
"==",
"dataset_id",
")",
".",
"one",
"(",
")",
"#This dataset been seen before, so it may be attached",
"#to other scenarios, which may be locked. If they are locked, we must",
"#not change their data, so new data must be created for the unlocked scenarios",
"locked_scenarios",
"=",
"[",
"]",
"unlocked_scenarios",
"=",
"[",
"]",
"for",
"dataset_rs",
"in",
"dataset",
".",
"resourcescenarios",
":",
"if",
"dataset_rs",
".",
"scenario",
".",
"locked",
"==",
"'Y'",
":",
"locked_scenarios",
".",
"append",
"(",
"dataset_rs",
")",
"else",
":",
"unlocked_scenarios",
".",
"append",
"(",
"dataset_rs",
")",
"#Are any of these scenarios locked?",
"if",
"len",
"(",
"locked_scenarios",
")",
">",
"0",
":",
"#If so, create a new dataset and assign to all unlocked datasets.",
"dataset",
"=",
"add_dataset",
"(",
"data_type",
",",
"val",
",",
"unit_id",
",",
"metadata",
"=",
"metadata",
",",
"name",
"=",
"name",
",",
"user_id",
"=",
"kwargs",
"[",
"'user_id'",
"]",
")",
"for",
"unlocked_rs",
"in",
"unlocked_scenarios",
":",
"unlocked_rs",
".",
"dataset",
"=",
"dataset",
"else",
":",
"dataset",
".",
"type",
"=",
"data_type",
"dataset",
".",
"value",
"=",
"val",
"dataset",
".",
"set_metadata",
"(",
"metadata",
")",
"dataset",
".",
"unit_id",
"=",
"unit_id",
"dataset",
".",
"name",
"=",
"name",
"dataset",
".",
"created_by",
"=",
"kwargs",
"[",
"'user_id'",
"]",
"dataset",
".",
"hash",
"=",
"dataset",
".",
"set_hash",
"(",
")",
"#Is there a dataset in the DB already which is identical to the updated dataset?",
"existing_dataset",
"=",
"db",
".",
"DBSession",
".",
"query",
"(",
"Dataset",
")",
".",
"filter",
"(",
"Dataset",
".",
"hash",
"==",
"dataset",
".",
"hash",
",",
"Dataset",
".",
"id",
"!=",
"dataset",
".",
"id",
")",
".",
"first",
"(",
")",
"if",
"existing_dataset",
"is",
"not",
"None",
"and",
"existing_dataset",
".",
"check_user",
"(",
"user_id",
")",
":",
"log",
".",
"warning",
"(",
"\"An identical dataset %s has been found to dataset %s.\"",
"\" Deleting dataset and returning dataset %s\"",
",",
"existing_dataset",
".",
"id",
",",
"dataset",
".",
"id",
",",
"existing_dataset",
".",
"id",
")",
"db",
".",
"DBSession",
".",
"delete",
"(",
"dataset",
")",
"dataset",
"=",
"existing_dataset",
"if",
"flush",
"==",
"True",
":",
"db",
".",
"DBSession",
".",
"flush",
"(",
")",
"return",
"dataset"
] | Update an existing dataset | [
"Update",
"an",
"existing",
"dataset"
] | python | train |
refinery29/chassis | chassis/util/encoders.py | https://github.com/refinery29/chassis/blob/1238d5214cbb8f3e1fe7c0dc2fa72f45bf085192/chassis/util/encoders.py#L9-L18 | def default(self, obj): # pylint: disable=method-hidden
"""Use the default behavior unless the object to be encoded has a
`strftime` attribute."""
if hasattr(obj, 'strftime'):
return obj.strftime("%Y-%m-%dT%H:%M:%SZ")
elif hasattr(obj, 'get_public_dict'):
return obj.get_public_dict()
else:
return json.JSONEncoder.default(self, obj) | [
"def",
"default",
"(",
"self",
",",
"obj",
")",
":",
"# pylint: disable=method-hidden",
"if",
"hasattr",
"(",
"obj",
",",
"'strftime'",
")",
":",
"return",
"obj",
".",
"strftime",
"(",
"\"%Y-%m-%dT%H:%M:%SZ\"",
")",
"elif",
"hasattr",
"(",
"obj",
",",
"'get_public_dict'",
")",
":",
"return",
"obj",
".",
"get_public_dict",
"(",
")",
"else",
":",
"return",
"json",
".",
"JSONEncoder",
".",
"default",
"(",
"self",
",",
"obj",
")"
] | Use the default behavior unless the object to be encoded has a
`strftime` attribute. | [
"Use",
"the",
"default",
"behavior",
"unless",
"the",
"object",
"to",
"be",
"encoded",
"has",
"a",
"strftime",
"attribute",
"."
] | python | train |
hyperledger/indy-sdk | wrappers/python/indy/crypto.py | https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/wrappers/python/indy/crypto.py#L453-L501 | async def unpack_message(wallet_handle: int,
jwe: bytes) -> bytes:
"""
Unpacks a JWE-like formatted message outputted by pack_message (Experimental)
#Params
command_handle: command handle to map callback to user context.
wallet_handle: wallet handler (created by open_wallet)
message: the output of a pack message
#Returns -> See HIPE 0028 for details
(Authcrypt mode)
{
"message": <decrypted message>,
"recipient_verkey": <recipient verkey used to decrypt>,
"sender_verkey": <sender verkey used to encrypt>
}
(Anoncrypt mode)
{
"message": <decrypted message>,
"recipient_verkey": <recipient verkey used to decrypt>,
}
"""
logger = logging.getLogger(__name__)
logger.debug("unpack_message: >>> wallet_handle: %r, jwe: %r",
wallet_handle,
jwe)
def transform_cb(arr_ptr: POINTER(c_uint8), arr_len: c_uint32):
return bytes(arr_ptr[:arr_len]),
if not hasattr(unpack_message, "cb"):
logger.debug("unpack_message: Creating callback")
unpack_message.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, POINTER(c_uint8), c_uint32), transform_cb)
c_wallet_handle = c_int32(wallet_handle)
c_jwe_len = c_uint32(len(jwe))
res = await do_call('indy_unpack_message',
c_wallet_handle,
jwe,
c_jwe_len,
unpack_message.cb)
logger.debug("unpack_message: <<< res: %r", res)
return res | [
"async",
"def",
"unpack_message",
"(",
"wallet_handle",
":",
"int",
",",
"jwe",
":",
"bytes",
")",
"->",
"bytes",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"debug",
"(",
"\"unpack_message: >>> wallet_handle: %r, jwe: %r\"",
",",
"wallet_handle",
",",
"jwe",
")",
"def",
"transform_cb",
"(",
"arr_ptr",
":",
"POINTER",
"(",
"c_uint8",
")",
",",
"arr_len",
":",
"c_uint32",
")",
":",
"return",
"bytes",
"(",
"arr_ptr",
"[",
":",
"arr_len",
"]",
")",
",",
"if",
"not",
"hasattr",
"(",
"unpack_message",
",",
"\"cb\"",
")",
":",
"logger",
".",
"debug",
"(",
"\"unpack_message: Creating callback\"",
")",
"unpack_message",
".",
"cb",
"=",
"create_cb",
"(",
"CFUNCTYPE",
"(",
"None",
",",
"c_int32",
",",
"c_int32",
",",
"POINTER",
"(",
"c_uint8",
")",
",",
"c_uint32",
")",
",",
"transform_cb",
")",
"c_wallet_handle",
"=",
"c_int32",
"(",
"wallet_handle",
")",
"c_jwe_len",
"=",
"c_uint32",
"(",
"len",
"(",
"jwe",
")",
")",
"res",
"=",
"await",
"do_call",
"(",
"'indy_unpack_message'",
",",
"c_wallet_handle",
",",
"jwe",
",",
"c_jwe_len",
",",
"unpack_message",
".",
"cb",
")",
"logger",
".",
"debug",
"(",
"\"unpack_message: <<< res: %r\"",
",",
"res",
")",
"return",
"res"
] | Unpacks a JWE-like formatted message outputted by pack_message (Experimental)
#Params
command_handle: command handle to map callback to user context.
wallet_handle: wallet handler (created by open_wallet)
message: the output of a pack message
#Returns -> See HIPE 0028 for details
(Authcrypt mode)
{
"message": <decrypted message>,
"recipient_verkey": <recipient verkey used to decrypt>,
"sender_verkey": <sender verkey used to encrypt>
}
(Anoncrypt mode)
{
"message": <decrypted message>,
"recipient_verkey": <recipient verkey used to decrypt>,
} | [
"Unpacks",
"a",
"JWE",
"-",
"like",
"formatted",
"message",
"outputted",
"by",
"pack_message",
"(",
"Experimental",
")"
] | python | train |
Jajcus/pyxmpp2 | pyxmpp2/ext/muc/muc.py | https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/muc/muc.py#L909-L921 | def forget(self,rs):
"""
Remove a room from the list of managed rooms.
:Parameters:
- `rs`: the state object of the room.
:Types:
- `rs`: `MucRoomState`
"""
try:
del self.rooms[rs.room_jid.bare().as_unicode()]
except KeyError:
pass | [
"def",
"forget",
"(",
"self",
",",
"rs",
")",
":",
"try",
":",
"del",
"self",
".",
"rooms",
"[",
"rs",
".",
"room_jid",
".",
"bare",
"(",
")",
".",
"as_unicode",
"(",
")",
"]",
"except",
"KeyError",
":",
"pass"
] | Remove a room from the list of managed rooms.
:Parameters:
- `rs`: the state object of the room.
:Types:
- `rs`: `MucRoomState` | [
"Remove",
"a",
"room",
"from",
"the",
"list",
"of",
"managed",
"rooms",
"."
] | python | valid |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.