repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
Yelp/detect-secrets | detect_secrets/plugins/common/initialize.py | https://github.com/Yelp/detect-secrets/blob/473923ea71f1ac2b5ea1eacc49b98f97967e3d05/detect_secrets/plugins/common/initialize.py#L195-L205 | def _get_mapping_from_secret_type_to_class_name():
"""Returns secret_type => plugin classname"""
mapping = {}
for key, value in globals().items():
try:
if issubclass(value, BasePlugin) and value != BasePlugin:
mapping[value.secret_type] = key
except TypeError:
pass
return mapping | [
"def",
"_get_mapping_from_secret_type_to_class_name",
"(",
")",
":",
"mapping",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"globals",
"(",
")",
".",
"items",
"(",
")",
":",
"try",
":",
"if",
"issubclass",
"(",
"value",
",",
"BasePlugin",
")",
"and",
"value",
"!=",
"BasePlugin",
":",
"mapping",
"[",
"value",
".",
"secret_type",
"]",
"=",
"key",
"except",
"TypeError",
":",
"pass",
"return",
"mapping"
] | Returns secret_type => plugin classname | [
"Returns",
"secret_type",
"=",
">",
"plugin",
"classname"
] | python | train |
phoebe-project/phoebe2 | phoebe/backend/mesh_wd.py | https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/backend/mesh_wd.py#L65-L92 | def project_onto_potential(r, pot_name, *args):
"""
TODO: add documentation
"""
pot = globals()[pot_name]
dpdx = globals()['d%sdx'%(pot_name)]
dpdy = globals()['d%sdy'%(pot_name)]
dpdz = globals()['d%sdz'%(pot_name)]
dpdr = globals()['d%sdr'%(pot_name)]
n_iter = 0
rmag, rmag0 = np.sqrt((r*r).sum()), 0
lam, nu = r[0]/rmag, r[2]/rmag
dc = np.array((lam, np.sqrt(1-lam*lam-nu*nu), nu)) # direction cosines -- must not change during reprojection
D, q, F, p0 = args
while np.abs(rmag-rmag0) > 1e-12 and n_iter < 100:
rmag0 = rmag
rmag = rmag0 - pot(rmag0*dc, *args)/dpdr(rmag0*dc, *args[:-1])
n_iter += 1
if n_iter == 100:
logger.warning('projection did not converge')
r = rmag*dc
return MeshVertex(r, dpdx, dpdy, dpdz, *args[:-1]) | [
"def",
"project_onto_potential",
"(",
"r",
",",
"pot_name",
",",
"*",
"args",
")",
":",
"pot",
"=",
"globals",
"(",
")",
"[",
"pot_name",
"]",
"dpdx",
"=",
"globals",
"(",
")",
"[",
"'d%sdx'",
"%",
"(",
"pot_name",
")",
"]",
"dpdy",
"=",
"globals",
"(",
")",
"[",
"'d%sdy'",
"%",
"(",
"pot_name",
")",
"]",
"dpdz",
"=",
"globals",
"(",
")",
"[",
"'d%sdz'",
"%",
"(",
"pot_name",
")",
"]",
"dpdr",
"=",
"globals",
"(",
")",
"[",
"'d%sdr'",
"%",
"(",
"pot_name",
")",
"]",
"n_iter",
"=",
"0",
"rmag",
",",
"rmag0",
"=",
"np",
".",
"sqrt",
"(",
"(",
"r",
"*",
"r",
")",
".",
"sum",
"(",
")",
")",
",",
"0",
"lam",
",",
"nu",
"=",
"r",
"[",
"0",
"]",
"/",
"rmag",
",",
"r",
"[",
"2",
"]",
"/",
"rmag",
"dc",
"=",
"np",
".",
"array",
"(",
"(",
"lam",
",",
"np",
".",
"sqrt",
"(",
"1",
"-",
"lam",
"*",
"lam",
"-",
"nu",
"*",
"nu",
")",
",",
"nu",
")",
")",
"# direction cosines -- must not change during reprojection",
"D",
",",
"q",
",",
"F",
",",
"p0",
"=",
"args",
"while",
"np",
".",
"abs",
"(",
"rmag",
"-",
"rmag0",
")",
">",
"1e-12",
"and",
"n_iter",
"<",
"100",
":",
"rmag0",
"=",
"rmag",
"rmag",
"=",
"rmag0",
"-",
"pot",
"(",
"rmag0",
"*",
"dc",
",",
"*",
"args",
")",
"/",
"dpdr",
"(",
"rmag0",
"*",
"dc",
",",
"*",
"args",
"[",
":",
"-",
"1",
"]",
")",
"n_iter",
"+=",
"1",
"if",
"n_iter",
"==",
"100",
":",
"logger",
".",
"warning",
"(",
"'projection did not converge'",
")",
"r",
"=",
"rmag",
"*",
"dc",
"return",
"MeshVertex",
"(",
"r",
",",
"dpdx",
",",
"dpdy",
",",
"dpdz",
",",
"*",
"args",
"[",
":",
"-",
"1",
"]",
")"
] | TODO: add documentation | [
"TODO",
":",
"add",
"documentation"
] | python | train |
GoogleCloudPlatform/datastore-ndb-python | ndb/query.py | https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L1453-L1468 | def analyze(self):
"""Return a list giving the parameters required by a query."""
class MockBindings(dict):
def __contains__(self, key):
self[key] = None
return True
bindings = MockBindings()
used = {}
ancestor = self.ancestor
if isinstance(ancestor, ParameterizedThing):
ancestor = ancestor.resolve(bindings, used)
filters = self.filters
if filters is not None:
filters = filters.resolve(bindings, used)
return sorted(used) | [
"def",
"analyze",
"(",
"self",
")",
":",
"class",
"MockBindings",
"(",
"dict",
")",
":",
"def",
"__contains__",
"(",
"self",
",",
"key",
")",
":",
"self",
"[",
"key",
"]",
"=",
"None",
"return",
"True",
"bindings",
"=",
"MockBindings",
"(",
")",
"used",
"=",
"{",
"}",
"ancestor",
"=",
"self",
".",
"ancestor",
"if",
"isinstance",
"(",
"ancestor",
",",
"ParameterizedThing",
")",
":",
"ancestor",
"=",
"ancestor",
".",
"resolve",
"(",
"bindings",
",",
"used",
")",
"filters",
"=",
"self",
".",
"filters",
"if",
"filters",
"is",
"not",
"None",
":",
"filters",
"=",
"filters",
".",
"resolve",
"(",
"bindings",
",",
"used",
")",
"return",
"sorted",
"(",
"used",
")"
] | Return a list giving the parameters required by a query. | [
"Return",
"a",
"list",
"giving",
"the",
"parameters",
"required",
"by",
"a",
"query",
"."
] | python | train |
obriencj/python-javatools | javatools/manifest.py | https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/manifest.py#L860-L874 | def multi_path_generator(pathnames):
"""
yields (name,chunkgen) for all of the files found under the list
of pathnames given. This is recursive, so directories will have
their contents emitted. chunkgen is a function that can called and
iterated over to obtain the contents of the file in multiple
reads.
"""
for pathname in pathnames:
if isdir(pathname):
for entry in directory_generator(pathname):
yield entry
else:
yield pathname, file_chunk(pathname) | [
"def",
"multi_path_generator",
"(",
"pathnames",
")",
":",
"for",
"pathname",
"in",
"pathnames",
":",
"if",
"isdir",
"(",
"pathname",
")",
":",
"for",
"entry",
"in",
"directory_generator",
"(",
"pathname",
")",
":",
"yield",
"entry",
"else",
":",
"yield",
"pathname",
",",
"file_chunk",
"(",
"pathname",
")"
] | yields (name,chunkgen) for all of the files found under the list
of pathnames given. This is recursive, so directories will have
their contents emitted. chunkgen is a function that can called and
iterated over to obtain the contents of the file in multiple
reads. | [
"yields",
"(",
"name",
"chunkgen",
")",
"for",
"all",
"of",
"the",
"files",
"found",
"under",
"the",
"list",
"of",
"pathnames",
"given",
".",
"This",
"is",
"recursive",
"so",
"directories",
"will",
"have",
"their",
"contents",
"emitted",
".",
"chunkgen",
"is",
"a",
"function",
"that",
"can",
"called",
"and",
"iterated",
"over",
"to",
"obtain",
"the",
"contents",
"of",
"the",
"file",
"in",
"multiple",
"reads",
"."
] | python | train |
fabioz/PyDev.Debugger | pydevd_attach_to_process/winappdbg/module.py | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/module.py#L1930-L1965 | def __add_loaded_module(self, event):
"""
Private method to automatically add new module objects from debug events.
@type event: L{Event}
@param event: Event object.
"""
lpBaseOfDll = event.get_module_base()
hFile = event.get_file_handle()
## if not self.has_module(lpBaseOfDll): # XXX this would trigger a scan
if lpBaseOfDll not in self.__moduleDict:
fileName = event.get_filename()
if not fileName:
fileName = None
if hasattr(event, 'get_start_address'):
EntryPoint = event.get_start_address()
else:
EntryPoint = None
aModule = Module(lpBaseOfDll, hFile, fileName = fileName,
EntryPoint = EntryPoint,
process = self)
self._add_module(aModule)
else:
aModule = self.get_module(lpBaseOfDll)
if not aModule.hFile and hFile not in (None, 0,
win32.INVALID_HANDLE_VALUE):
aModule.hFile = hFile
if not aModule.process:
aModule.process = self
if aModule.EntryPoint is None and \
hasattr(event, 'get_start_address'):
aModule.EntryPoint = event.get_start_address()
if not aModule.fileName:
fileName = event.get_filename()
if fileName:
aModule.fileName = fileName | [
"def",
"__add_loaded_module",
"(",
"self",
",",
"event",
")",
":",
"lpBaseOfDll",
"=",
"event",
".",
"get_module_base",
"(",
")",
"hFile",
"=",
"event",
".",
"get_file_handle",
"(",
")",
"## if not self.has_module(lpBaseOfDll): # XXX this would trigger a scan",
"if",
"lpBaseOfDll",
"not",
"in",
"self",
".",
"__moduleDict",
":",
"fileName",
"=",
"event",
".",
"get_filename",
"(",
")",
"if",
"not",
"fileName",
":",
"fileName",
"=",
"None",
"if",
"hasattr",
"(",
"event",
",",
"'get_start_address'",
")",
":",
"EntryPoint",
"=",
"event",
".",
"get_start_address",
"(",
")",
"else",
":",
"EntryPoint",
"=",
"None",
"aModule",
"=",
"Module",
"(",
"lpBaseOfDll",
",",
"hFile",
",",
"fileName",
"=",
"fileName",
",",
"EntryPoint",
"=",
"EntryPoint",
",",
"process",
"=",
"self",
")",
"self",
".",
"_add_module",
"(",
"aModule",
")",
"else",
":",
"aModule",
"=",
"self",
".",
"get_module",
"(",
"lpBaseOfDll",
")",
"if",
"not",
"aModule",
".",
"hFile",
"and",
"hFile",
"not",
"in",
"(",
"None",
",",
"0",
",",
"win32",
".",
"INVALID_HANDLE_VALUE",
")",
":",
"aModule",
".",
"hFile",
"=",
"hFile",
"if",
"not",
"aModule",
".",
"process",
":",
"aModule",
".",
"process",
"=",
"self",
"if",
"aModule",
".",
"EntryPoint",
"is",
"None",
"and",
"hasattr",
"(",
"event",
",",
"'get_start_address'",
")",
":",
"aModule",
".",
"EntryPoint",
"=",
"event",
".",
"get_start_address",
"(",
")",
"if",
"not",
"aModule",
".",
"fileName",
":",
"fileName",
"=",
"event",
".",
"get_filename",
"(",
")",
"if",
"fileName",
":",
"aModule",
".",
"fileName",
"=",
"fileName"
] | Private method to automatically add new module objects from debug events.
@type event: L{Event}
@param event: Event object. | [
"Private",
"method",
"to",
"automatically",
"add",
"new",
"module",
"objects",
"from",
"debug",
"events",
"."
] | python | train |
linnarsson-lab/loompy | loompy/loompy.py | https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/loompy.py#L784-L806 | def permute(self, ordering: np.ndarray, axis: int) -> None:
"""
Permute the dataset along the indicated axis.
Args:
ordering (list of int): The desired order along the axis
axis (int): The axis along which to permute
Returns:
Nothing.
"""
if self._file.__contains__("tiles"):
del self._file['tiles']
ordering = list(np.array(ordering).flatten()) # Flatten the ordering, in case we got a column vector
self.layers._permute(ordering, axis=axis)
if axis == 0:
self.row_attrs._permute(ordering)
self.row_graphs._permute(ordering)
if axis == 1:
self.col_attrs._permute(ordering)
self.col_graphs._permute(ordering) | [
"def",
"permute",
"(",
"self",
",",
"ordering",
":",
"np",
".",
"ndarray",
",",
"axis",
":",
"int",
")",
"->",
"None",
":",
"if",
"self",
".",
"_file",
".",
"__contains__",
"(",
"\"tiles\"",
")",
":",
"del",
"self",
".",
"_file",
"[",
"'tiles'",
"]",
"ordering",
"=",
"list",
"(",
"np",
".",
"array",
"(",
"ordering",
")",
".",
"flatten",
"(",
")",
")",
"# Flatten the ordering, in case we got a column vector",
"self",
".",
"layers",
".",
"_permute",
"(",
"ordering",
",",
"axis",
"=",
"axis",
")",
"if",
"axis",
"==",
"0",
":",
"self",
".",
"row_attrs",
".",
"_permute",
"(",
"ordering",
")",
"self",
".",
"row_graphs",
".",
"_permute",
"(",
"ordering",
")",
"if",
"axis",
"==",
"1",
":",
"self",
".",
"col_attrs",
".",
"_permute",
"(",
"ordering",
")",
"self",
".",
"col_graphs",
".",
"_permute",
"(",
"ordering",
")"
] | Permute the dataset along the indicated axis.
Args:
ordering (list of int): The desired order along the axis
axis (int): The axis along which to permute
Returns:
Nothing. | [
"Permute",
"the",
"dataset",
"along",
"the",
"indicated",
"axis",
"."
] | python | train |
instaloader/instaloader | instaloader/instaloader.py | https://github.com/instaloader/instaloader/blob/87d877e650cd8020b04b8b51be120599a441fd5b/instaloader/instaloader.py#L694-L715 | def download_saved_posts(self, max_count: int = None, fast_update: bool = False,
post_filter: Optional[Callable[[Post], bool]] = None) -> None:
"""Download user's saved pictures.
:param max_count: Maximum count of pictures to download
:param fast_update: If true, abort when first already-downloaded picture is encountered
:param post_filter: function(post), which returns True if given picture should be downloaded
"""
self.context.log("Retrieving saved posts...")
count = 1
for post in Profile.from_username(self.context, self.context.username).get_saved_posts():
if max_count is not None and count > max_count:
break
if post_filter is not None and not post_filter(post):
self.context.log("<{} skipped>".format(post), flush=True)
continue
self.context.log("[{:>3}] ".format(count), end=str(), flush=True)
count += 1
with self.context.error_catcher('Download saved posts'):
downloaded = self.download_post(post, target=':saved')
if fast_update and not downloaded:
break | [
"def",
"download_saved_posts",
"(",
"self",
",",
"max_count",
":",
"int",
"=",
"None",
",",
"fast_update",
":",
"bool",
"=",
"False",
",",
"post_filter",
":",
"Optional",
"[",
"Callable",
"[",
"[",
"Post",
"]",
",",
"bool",
"]",
"]",
"=",
"None",
")",
"->",
"None",
":",
"self",
".",
"context",
".",
"log",
"(",
"\"Retrieving saved posts...\"",
")",
"count",
"=",
"1",
"for",
"post",
"in",
"Profile",
".",
"from_username",
"(",
"self",
".",
"context",
",",
"self",
".",
"context",
".",
"username",
")",
".",
"get_saved_posts",
"(",
")",
":",
"if",
"max_count",
"is",
"not",
"None",
"and",
"count",
">",
"max_count",
":",
"break",
"if",
"post_filter",
"is",
"not",
"None",
"and",
"not",
"post_filter",
"(",
"post",
")",
":",
"self",
".",
"context",
".",
"log",
"(",
"\"<{} skipped>\"",
".",
"format",
"(",
"post",
")",
",",
"flush",
"=",
"True",
")",
"continue",
"self",
".",
"context",
".",
"log",
"(",
"\"[{:>3}] \"",
".",
"format",
"(",
"count",
")",
",",
"end",
"=",
"str",
"(",
")",
",",
"flush",
"=",
"True",
")",
"count",
"+=",
"1",
"with",
"self",
".",
"context",
".",
"error_catcher",
"(",
"'Download saved posts'",
")",
":",
"downloaded",
"=",
"self",
".",
"download_post",
"(",
"post",
",",
"target",
"=",
"':saved'",
")",
"if",
"fast_update",
"and",
"not",
"downloaded",
":",
"break"
] | Download user's saved pictures.
:param max_count: Maximum count of pictures to download
:param fast_update: If true, abort when first already-downloaded picture is encountered
:param post_filter: function(post), which returns True if given picture should be downloaded | [
"Download",
"user",
"s",
"saved",
"pictures",
"."
] | python | train |
proycon/pynlpl | pynlpl/statistics.py | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/statistics.py#L91-L99 | def count(self, type, amount = 1):
"""Count a certain type. The counter will increase by the amount specified (defaults to one)"""
if self.dovalidation: type = self._validate(type)
if self._ranked: self._ranked = None
if type in self._count:
self._count[type] += amount
else:
self._count[type] = amount
self.total += amount | [
"def",
"count",
"(",
"self",
",",
"type",
",",
"amount",
"=",
"1",
")",
":",
"if",
"self",
".",
"dovalidation",
":",
"type",
"=",
"self",
".",
"_validate",
"(",
"type",
")",
"if",
"self",
".",
"_ranked",
":",
"self",
".",
"_ranked",
"=",
"None",
"if",
"type",
"in",
"self",
".",
"_count",
":",
"self",
".",
"_count",
"[",
"type",
"]",
"+=",
"amount",
"else",
":",
"self",
".",
"_count",
"[",
"type",
"]",
"=",
"amount",
"self",
".",
"total",
"+=",
"amount"
] | Count a certain type. The counter will increase by the amount specified (defaults to one) | [
"Count",
"a",
"certain",
"type",
".",
"The",
"counter",
"will",
"increase",
"by",
"the",
"amount",
"specified",
"(",
"defaults",
"to",
"one",
")"
] | python | train |
ray-project/ray | python/ray/tune/trial_runner.py | https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/trial_runner.py#L322-L334 | def add_trial(self, trial):
"""Adds a new trial to this TrialRunner.
Trials may be added at any time.
Args:
trial (Trial): Trial to queue.
"""
trial.set_verbose(self._verbose)
self._trials.append(trial)
with warn_if_slow("scheduler.on_trial_add"):
self._scheduler_alg.on_trial_add(self, trial)
self.trial_executor.try_checkpoint_metadata(trial) | [
"def",
"add_trial",
"(",
"self",
",",
"trial",
")",
":",
"trial",
".",
"set_verbose",
"(",
"self",
".",
"_verbose",
")",
"self",
".",
"_trials",
".",
"append",
"(",
"trial",
")",
"with",
"warn_if_slow",
"(",
"\"scheduler.on_trial_add\"",
")",
":",
"self",
".",
"_scheduler_alg",
".",
"on_trial_add",
"(",
"self",
",",
"trial",
")",
"self",
".",
"trial_executor",
".",
"try_checkpoint_metadata",
"(",
"trial",
")"
] | Adds a new trial to this TrialRunner.
Trials may be added at any time.
Args:
trial (Trial): Trial to queue. | [
"Adds",
"a",
"new",
"trial",
"to",
"this",
"TrialRunner",
"."
] | python | train |
pysal/giddy | giddy/ergodic.py | https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/ergodic.py#L62-L118 | def fmpt(P):
"""
Calculates the matrix of first mean passage times for an ergodic transition
probability matrix.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
M : array
(k, k), elements are the expected value for the number of intervals
required for a chain starting in state i to first enter state j.
If i=j then this is the recurrence time.
Examples
--------
>>> import numpy as np
>>> from giddy.ergodic import fmpt
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> fm=fmpt(p)
>>> fm
array([[2.5 , 4. , 3.33333333],
[2.66666667, 5. , 2.66666667],
[3.33333333, 4. , 2.5 ]])
Thus, if it is raining today in Oz we can expect a nice day to come
along in another 4 days, on average, and snow to hit in 3.33 days. We can
expect another rainy day in 2.5 days. If it is nice today in Oz, we would
experience a change in the weather (either rain or snow) in 2.67 days from
today. (That wicked witch can only die once so I reckon that is the
ultimate absorbing state).
Notes
-----
Uses formulation (and examples on p. 218) in :cite:`Kemeny1967`.
"""
P = np.matrix(P)
k = P.shape[0]
A = np.zeros_like(P)
ss = steady_state(P).reshape(k, 1)
for i in range(k):
A[:, i] = ss
A = A.transpose()
I = np.identity(k)
Z = la.inv(I - P + A)
E = np.ones_like(Z)
A_diag = np.diag(A)
A_diag = A_diag + (A_diag == 0)
D = np.diag(1. / A_diag)
Zdg = np.diag(np.diag(Z))
M = (I - Z + E * Zdg) * D
return np.array(M) | [
"def",
"fmpt",
"(",
"P",
")",
":",
"P",
"=",
"np",
".",
"matrix",
"(",
"P",
")",
"k",
"=",
"P",
".",
"shape",
"[",
"0",
"]",
"A",
"=",
"np",
".",
"zeros_like",
"(",
"P",
")",
"ss",
"=",
"steady_state",
"(",
"P",
")",
".",
"reshape",
"(",
"k",
",",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"k",
")",
":",
"A",
"[",
":",
",",
"i",
"]",
"=",
"ss",
"A",
"=",
"A",
".",
"transpose",
"(",
")",
"I",
"=",
"np",
".",
"identity",
"(",
"k",
")",
"Z",
"=",
"la",
".",
"inv",
"(",
"I",
"-",
"P",
"+",
"A",
")",
"E",
"=",
"np",
".",
"ones_like",
"(",
"Z",
")",
"A_diag",
"=",
"np",
".",
"diag",
"(",
"A",
")",
"A_diag",
"=",
"A_diag",
"+",
"(",
"A_diag",
"==",
"0",
")",
"D",
"=",
"np",
".",
"diag",
"(",
"1.",
"/",
"A_diag",
")",
"Zdg",
"=",
"np",
".",
"diag",
"(",
"np",
".",
"diag",
"(",
"Z",
")",
")",
"M",
"=",
"(",
"I",
"-",
"Z",
"+",
"E",
"*",
"Zdg",
")",
"*",
"D",
"return",
"np",
".",
"array",
"(",
"M",
")"
] | Calculates the matrix of first mean passage times for an ergodic transition
probability matrix.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
M : array
(k, k), elements are the expected value for the number of intervals
required for a chain starting in state i to first enter state j.
If i=j then this is the recurrence time.
Examples
--------
>>> import numpy as np
>>> from giddy.ergodic import fmpt
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> fm=fmpt(p)
>>> fm
array([[2.5 , 4. , 3.33333333],
[2.66666667, 5. , 2.66666667],
[3.33333333, 4. , 2.5 ]])
Thus, if it is raining today in Oz we can expect a nice day to come
along in another 4 days, on average, and snow to hit in 3.33 days. We can
expect another rainy day in 2.5 days. If it is nice today in Oz, we would
experience a change in the weather (either rain or snow) in 2.67 days from
today. (That wicked witch can only die once so I reckon that is the
ultimate absorbing state).
Notes
-----
Uses formulation (and examples on p. 218) in :cite:`Kemeny1967`. | [
"Calculates",
"the",
"matrix",
"of",
"first",
"mean",
"passage",
"times",
"for",
"an",
"ergodic",
"transition",
"probability",
"matrix",
"."
] | python | train |
HazyResearch/pdftotree | pdftotree/utils/pdf/node.py | https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/node.py#L187-L206 | def _get_cols(row_content):
"""
Counting the number columns based on the content of this row
"""
cols = []
subcell_col = []
prev_bar = None
for _coord, item in row_content:
if isinstance(item, LTTextLine):
subcell_col.append(item)
else: # bar, add column content
# When there is no content, we count a None column
if prev_bar:
bar_ranges = (prev_bar, item)
col_items = subcell_col if subcell_col else [None]
cols.extend([bar_ranges, col_items])
prev_bar = item
subcell_col = []
# Remove extra column before first bar
return cols | [
"def",
"_get_cols",
"(",
"row_content",
")",
":",
"cols",
"=",
"[",
"]",
"subcell_col",
"=",
"[",
"]",
"prev_bar",
"=",
"None",
"for",
"_coord",
",",
"item",
"in",
"row_content",
":",
"if",
"isinstance",
"(",
"item",
",",
"LTTextLine",
")",
":",
"subcell_col",
".",
"append",
"(",
"item",
")",
"else",
":",
"# bar, add column content",
"# When there is no content, we count a None column",
"if",
"prev_bar",
":",
"bar_ranges",
"=",
"(",
"prev_bar",
",",
"item",
")",
"col_items",
"=",
"subcell_col",
"if",
"subcell_col",
"else",
"[",
"None",
"]",
"cols",
".",
"extend",
"(",
"[",
"bar_ranges",
",",
"col_items",
"]",
")",
"prev_bar",
"=",
"item",
"subcell_col",
"=",
"[",
"]",
"# Remove extra column before first bar",
"return",
"cols"
] | Counting the number columns based on the content of this row | [
"Counting",
"the",
"number",
"columns",
"based",
"on",
"the",
"content",
"of",
"this",
"row"
] | python | train |
thoughtworksarts/EmoPy | EmoPy/src/neuralnets.py | https://github.com/thoughtworksarts/EmoPy/blob/a0ab97b3719ebe0a9de9bfc5adae5e46c9b77fd7/EmoPy/src/neuralnets.py#L94-L109 | def _get_base_model(self):
"""
:return: base model from Keras based on user-supplied model name
"""
if self.model_name == 'inception_v3':
return InceptionV3(weights='imagenet', include_top=False)
elif self.model_name == 'xception':
return Xception(weights='imagenet', include_top=False)
elif self.model_name == 'vgg16':
return VGG16(weights='imagenet', include_top=False)
elif self.model_name == 'vgg19':
return VGG19(weights='imagenet', include_top=False)
elif self.model_name == 'resnet50':
return ResNet50(weights='imagenet', include_top=False)
else:
raise ValueError('Cannot find base model %s' % self.model_name) | [
"def",
"_get_base_model",
"(",
"self",
")",
":",
"if",
"self",
".",
"model_name",
"==",
"'inception_v3'",
":",
"return",
"InceptionV3",
"(",
"weights",
"=",
"'imagenet'",
",",
"include_top",
"=",
"False",
")",
"elif",
"self",
".",
"model_name",
"==",
"'xception'",
":",
"return",
"Xception",
"(",
"weights",
"=",
"'imagenet'",
",",
"include_top",
"=",
"False",
")",
"elif",
"self",
".",
"model_name",
"==",
"'vgg16'",
":",
"return",
"VGG16",
"(",
"weights",
"=",
"'imagenet'",
",",
"include_top",
"=",
"False",
")",
"elif",
"self",
".",
"model_name",
"==",
"'vgg19'",
":",
"return",
"VGG19",
"(",
"weights",
"=",
"'imagenet'",
",",
"include_top",
"=",
"False",
")",
"elif",
"self",
".",
"model_name",
"==",
"'resnet50'",
":",
"return",
"ResNet50",
"(",
"weights",
"=",
"'imagenet'",
",",
"include_top",
"=",
"False",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Cannot find base model %s'",
"%",
"self",
".",
"model_name",
")"
] | :return: base model from Keras based on user-supplied model name | [
":",
"return",
":",
"base",
"model",
"from",
"Keras",
"based",
"on",
"user",
"-",
"supplied",
"model",
"name"
] | python | train |
apache/spark | python/pyspark/sql/streaming.py | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L829-L878 | def trigger(self, processingTime=None, once=None, continuous=None):
"""Set the trigger for the stream query. If this is not set it will run the query as fast
as possible, which is equivalent to setting the trigger to ``processingTime='0 seconds'``.
.. note:: Evolving.
:param processingTime: a processing time interval as a string, e.g. '5 seconds', '1 minute'.
Set a trigger that runs a query periodically based on the processing
time. Only one trigger can be set.
:param once: if set to True, set a trigger that processes only one batch of data in a
streaming query then terminates the query. Only one trigger can be set.
>>> # trigger the query for execution every 5 seconds
>>> writer = sdf.writeStream.trigger(processingTime='5 seconds')
>>> # trigger the query for just once batch of data
>>> writer = sdf.writeStream.trigger(once=True)
>>> # trigger the query for execution every 5 seconds
>>> writer = sdf.writeStream.trigger(continuous='5 seconds')
"""
params = [processingTime, once, continuous]
if params.count(None) == 3:
raise ValueError('No trigger provided')
elif params.count(None) < 2:
raise ValueError('Multiple triggers not allowed.')
jTrigger = None
if processingTime is not None:
if type(processingTime) != str or len(processingTime.strip()) == 0:
raise ValueError('Value for processingTime must be a non empty string. Got: %s' %
processingTime)
interval = processingTime.strip()
jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.ProcessingTime(
interval)
elif once is not None:
if once is not True:
raise ValueError('Value for once must be True. Got: %s' % once)
jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.Once()
else:
if type(continuous) != str or len(continuous.strip()) == 0:
raise ValueError('Value for continuous must be a non empty string. Got: %s' %
continuous)
interval = continuous.strip()
jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.Continuous(
interval)
self._jwrite = self._jwrite.trigger(jTrigger)
return self | [
"def",
"trigger",
"(",
"self",
",",
"processingTime",
"=",
"None",
",",
"once",
"=",
"None",
",",
"continuous",
"=",
"None",
")",
":",
"params",
"=",
"[",
"processingTime",
",",
"once",
",",
"continuous",
"]",
"if",
"params",
".",
"count",
"(",
"None",
")",
"==",
"3",
":",
"raise",
"ValueError",
"(",
"'No trigger provided'",
")",
"elif",
"params",
".",
"count",
"(",
"None",
")",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"'Multiple triggers not allowed.'",
")",
"jTrigger",
"=",
"None",
"if",
"processingTime",
"is",
"not",
"None",
":",
"if",
"type",
"(",
"processingTime",
")",
"!=",
"str",
"or",
"len",
"(",
"processingTime",
".",
"strip",
"(",
")",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"'Value for processingTime must be a non empty string. Got: %s'",
"%",
"processingTime",
")",
"interval",
"=",
"processingTime",
".",
"strip",
"(",
")",
"jTrigger",
"=",
"self",
".",
"_spark",
".",
"_sc",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"sql",
".",
"streaming",
".",
"Trigger",
".",
"ProcessingTime",
"(",
"interval",
")",
"elif",
"once",
"is",
"not",
"None",
":",
"if",
"once",
"is",
"not",
"True",
":",
"raise",
"ValueError",
"(",
"'Value for once must be True. Got: %s'",
"%",
"once",
")",
"jTrigger",
"=",
"self",
".",
"_spark",
".",
"_sc",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"sql",
".",
"streaming",
".",
"Trigger",
".",
"Once",
"(",
")",
"else",
":",
"if",
"type",
"(",
"continuous",
")",
"!=",
"str",
"or",
"len",
"(",
"continuous",
".",
"strip",
"(",
")",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"'Value for continuous must be a non empty string. Got: %s'",
"%",
"continuous",
")",
"interval",
"=",
"continuous",
".",
"strip",
"(",
")",
"jTrigger",
"=",
"self",
".",
"_spark",
".",
"_sc",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"sql",
".",
"streaming",
".",
"Trigger",
".",
"Continuous",
"(",
"interval",
")",
"self",
".",
"_jwrite",
"=",
"self",
".",
"_jwrite",
".",
"trigger",
"(",
"jTrigger",
")",
"return",
"self"
] | Set the trigger for the stream query. If this is not set it will run the query as fast
as possible, which is equivalent to setting the trigger to ``processingTime='0 seconds'``.
.. note:: Evolving.
:param processingTime: a processing time interval as a string, e.g. '5 seconds', '1 minute'.
Set a trigger that runs a query periodically based on the processing
time. Only one trigger can be set.
:param once: if set to True, set a trigger that processes only one batch of data in a
streaming query then terminates the query. Only one trigger can be set.
>>> # trigger the query for execution every 5 seconds
>>> writer = sdf.writeStream.trigger(processingTime='5 seconds')
>>> # trigger the query for just once batch of data
>>> writer = sdf.writeStream.trigger(once=True)
>>> # trigger the query for execution every 5 seconds
>>> writer = sdf.writeStream.trigger(continuous='5 seconds') | [
"Set",
"the",
"trigger",
"for",
"the",
"stream",
"query",
".",
"If",
"this",
"is",
"not",
"set",
"it",
"will",
"run",
"the",
"query",
"as",
"fast",
"as",
"possible",
"which",
"is",
"equivalent",
"to",
"setting",
"the",
"trigger",
"to",
"processingTime",
"=",
"0",
"seconds",
"."
] | python | train |
Stewori/pytypes | pytypes/type_util.py | https://github.com/Stewori/pytypes/blob/b814d38709e84c0e0825caf8b721c20eb5a8ab3b/pytypes/type_util.py#L109-L122 | def get_generator_type(genr):
"""Obtains PEP 484 style type of a generator object, i.e. returns a
typing.Generator object.
"""
if genr in _checked_generator_types:
return _checked_generator_types[genr]
if not genr.gi_frame is None and 'gen_type' in genr.gi_frame.f_locals:
return genr.gi_frame.f_locals['gen_type']
else:
cllble, nesting, slf = util.get_callable_fq_for_code(genr.gi_code)
if cllble is None:
return Generator
return _funcsigtypes(cllble, slf, nesting[-1] if slf else None,
genr.gi_frame.f_globals if not genr.gi_frame is None else None)[1] | [
"def",
"get_generator_type",
"(",
"genr",
")",
":",
"if",
"genr",
"in",
"_checked_generator_types",
":",
"return",
"_checked_generator_types",
"[",
"genr",
"]",
"if",
"not",
"genr",
".",
"gi_frame",
"is",
"None",
"and",
"'gen_type'",
"in",
"genr",
".",
"gi_frame",
".",
"f_locals",
":",
"return",
"genr",
".",
"gi_frame",
".",
"f_locals",
"[",
"'gen_type'",
"]",
"else",
":",
"cllble",
",",
"nesting",
",",
"slf",
"=",
"util",
".",
"get_callable_fq_for_code",
"(",
"genr",
".",
"gi_code",
")",
"if",
"cllble",
"is",
"None",
":",
"return",
"Generator",
"return",
"_funcsigtypes",
"(",
"cllble",
",",
"slf",
",",
"nesting",
"[",
"-",
"1",
"]",
"if",
"slf",
"else",
"None",
",",
"genr",
".",
"gi_frame",
".",
"f_globals",
"if",
"not",
"genr",
".",
"gi_frame",
"is",
"None",
"else",
"None",
")",
"[",
"1",
"]"
] | Obtains PEP 484 style type of a generator object, i.e. returns a
typing.Generator object. | [
"Obtains",
"PEP",
"484",
"style",
"type",
"of",
"a",
"generator",
"object",
"i",
".",
"e",
".",
"returns",
"a",
"typing",
".",
"Generator",
"object",
"."
] | python | train |
neovim/pynvim | pynvim/msgpack_rpc/session.py | https://github.com/neovim/pynvim/blob/5e577188e6d7133f597ad0ce60dc6a4b1314064a/pynvim/msgpack_rpc/session.py#L65-L103 | def request(self, method, *args, **kwargs):
"""Send a msgpack-rpc request and block until as response is received.
If the event loop is running, this method must have been called by a
request or notification handler running on a greenlet. In that case,
send the quest and yield to the parent greenlet until a response is
available.
When the event loop is not running, it will perform a blocking request
like this:
- Send the request
- Run the loop until the response is available
- Put requests/notifications received while waiting into a queue
If the `async_` flag is present and True, a asynchronous notification
is sent instead. This will never block, and the return value or error
is ignored.
"""
async_ = check_async(kwargs.pop('async_', None), kwargs, False)
if async_:
self._async_session.notify(method, args)
return
if kwargs:
raise ValueError("request got unsupported keyword argument(s): {}"
.format(', '.join(kwargs.keys())))
if self._is_running:
v = self._yielding_request(method, args)
else:
v = self._blocking_request(method, args)
if not v:
# EOF
raise IOError('EOF')
err, rv = v
if err:
info("'Received error: %s", err)
raise self.error_wrapper(err)
return rv | [
"def",
"request",
"(",
"self",
",",
"method",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"async_",
"=",
"check_async",
"(",
"kwargs",
".",
"pop",
"(",
"'async_'",
",",
"None",
")",
",",
"kwargs",
",",
"False",
")",
"if",
"async_",
":",
"self",
".",
"_async_session",
".",
"notify",
"(",
"method",
",",
"args",
")",
"return",
"if",
"kwargs",
":",
"raise",
"ValueError",
"(",
"\"request got unsupported keyword argument(s): {}\"",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
")",
")",
"if",
"self",
".",
"_is_running",
":",
"v",
"=",
"self",
".",
"_yielding_request",
"(",
"method",
",",
"args",
")",
"else",
":",
"v",
"=",
"self",
".",
"_blocking_request",
"(",
"method",
",",
"args",
")",
"if",
"not",
"v",
":",
"# EOF",
"raise",
"IOError",
"(",
"'EOF'",
")",
"err",
",",
"rv",
"=",
"v",
"if",
"err",
":",
"info",
"(",
"\"'Received error: %s\"",
",",
"err",
")",
"raise",
"self",
".",
"error_wrapper",
"(",
"err",
")",
"return",
"rv"
] | Send a msgpack-rpc request and block until as response is received.
If the event loop is running, this method must have been called by a
request or notification handler running on a greenlet. In that case,
send the quest and yield to the parent greenlet until a response is
available.
When the event loop is not running, it will perform a blocking request
like this:
- Send the request
- Run the loop until the response is available
- Put requests/notifications received while waiting into a queue
If the `async_` flag is present and True, a asynchronous notification
is sent instead. This will never block, and the return value or error
is ignored. | [
"Send",
"a",
"msgpack",
"-",
"rpc",
"request",
"and",
"block",
"until",
"as",
"response",
"is",
"received",
"."
] | python | train |
balloob/pychromecast | pychromecast/socket_client.py | https://github.com/balloob/pychromecast/blob/831b09c4fed185a7bffe0ea330b7849d5f4e36b6/pychromecast/socket_client.py#L646-L659 | def _read_message(self):
""" Reads a message from the socket and converts it to a message. """
# first 4 bytes is Big-Endian payload length
payload_info = self._read_bytes_from_socket(4)
read_len = unpack(">I", payload_info)[0]
# now read the payload
payload = self._read_bytes_from_socket(read_len)
# pylint: disable=no-member
message = cast_channel_pb2.CastMessage()
message.ParseFromString(payload)
return message | [
"def",
"_read_message",
"(",
"self",
")",
":",
"# first 4 bytes is Big-Endian payload length",
"payload_info",
"=",
"self",
".",
"_read_bytes_from_socket",
"(",
"4",
")",
"read_len",
"=",
"unpack",
"(",
"\">I\"",
",",
"payload_info",
")",
"[",
"0",
"]",
"# now read the payload",
"payload",
"=",
"self",
".",
"_read_bytes_from_socket",
"(",
"read_len",
")",
"# pylint: disable=no-member",
"message",
"=",
"cast_channel_pb2",
".",
"CastMessage",
"(",
")",
"message",
".",
"ParseFromString",
"(",
"payload",
")",
"return",
"message"
] | Reads a message from the socket and converts it to a message. | [
"Reads",
"a",
"message",
"from",
"the",
"socket",
"and",
"converts",
"it",
"to",
"a",
"message",
"."
] | python | train |
saltstack/salt | salt/states/module.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/module.py#L439-L496 | def _call_function(name, returner=None, **kwargs):
'''
Calls a function from the specified module.
:param name:
:param kwargs:
:return:
'''
argspec = salt.utils.args.get_function_argspec(__salt__[name])
# func_kw is initialized to a dictionary of keyword arguments the function to be run accepts
func_kw = dict(zip(argspec.args[-len(argspec.defaults or []):], # pylint: disable=incompatible-py3-code
argspec.defaults or []))
# func_args is initialized to a list of positional arguments that the function to be run accepts
func_args = argspec.args[:len(argspec.args or []) - len(argspec.defaults or [])]
arg_type, kw_to_arg_type, na_type, kw_type = [], {}, {}, False
for funcset in reversed(kwargs.get('func_args') or []):
if not isinstance(funcset, dict):
# We are just receiving a list of args to the function to be run, so just append
# those to the arg list that we will pass to the func.
arg_type.append(funcset)
else:
for kwarg_key in six.iterkeys(funcset):
# We are going to pass in a keyword argument. The trick here is to make certain
# that if we find that in the *args* list that we pass it there and not as a kwarg
if kwarg_key in func_args:
kw_to_arg_type[kwarg_key] = funcset[kwarg_key]
continue
else:
# Otherwise, we're good and just go ahead and pass the keyword/value pair into
# the kwargs list to be run.
func_kw.update(funcset)
arg_type.reverse()
for arg in func_args:
if arg in kw_to_arg_type:
arg_type.append(kw_to_arg_type[arg])
_exp_prm = len(argspec.args or []) - len(argspec.defaults or [])
_passed_prm = len(arg_type)
missing = []
if na_type and _exp_prm > _passed_prm:
for arg in argspec.args:
if arg not in func_kw:
missing.append(arg)
if missing:
raise SaltInvocationError('Missing arguments: {0}'.format(', '.join(missing)))
elif _exp_prm > _passed_prm:
raise SaltInvocationError('Function expects {0} parameters, got only {1}'.format(
_exp_prm, _passed_prm))
mret = __salt__[name](*arg_type, **func_kw)
if returner is not None:
returners = salt.loader.returners(__opts__, __salt__)
if returner in returners:
returners[returner]({'id': __opts__['id'], 'ret': mret,
'fun': name, 'jid': salt.utils.jid.gen_jid(__opts__)})
return mret | [
"def",
"_call_function",
"(",
"name",
",",
"returner",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"argspec",
"=",
"salt",
".",
"utils",
".",
"args",
".",
"get_function_argspec",
"(",
"__salt__",
"[",
"name",
"]",
")",
"# func_kw is initialized to a dictionary of keyword arguments the function to be run accepts",
"func_kw",
"=",
"dict",
"(",
"zip",
"(",
"argspec",
".",
"args",
"[",
"-",
"len",
"(",
"argspec",
".",
"defaults",
"or",
"[",
"]",
")",
":",
"]",
",",
"# pylint: disable=incompatible-py3-code",
"argspec",
".",
"defaults",
"or",
"[",
"]",
")",
")",
"# func_args is initialized to a list of positional arguments that the function to be run accepts",
"func_args",
"=",
"argspec",
".",
"args",
"[",
":",
"len",
"(",
"argspec",
".",
"args",
"or",
"[",
"]",
")",
"-",
"len",
"(",
"argspec",
".",
"defaults",
"or",
"[",
"]",
")",
"]",
"arg_type",
",",
"kw_to_arg_type",
",",
"na_type",
",",
"kw_type",
"=",
"[",
"]",
",",
"{",
"}",
",",
"{",
"}",
",",
"False",
"for",
"funcset",
"in",
"reversed",
"(",
"kwargs",
".",
"get",
"(",
"'func_args'",
")",
"or",
"[",
"]",
")",
":",
"if",
"not",
"isinstance",
"(",
"funcset",
",",
"dict",
")",
":",
"# We are just receiving a list of args to the function to be run, so just append",
"# those to the arg list that we will pass to the func.",
"arg_type",
".",
"append",
"(",
"funcset",
")",
"else",
":",
"for",
"kwarg_key",
"in",
"six",
".",
"iterkeys",
"(",
"funcset",
")",
":",
"# We are going to pass in a keyword argument. The trick here is to make certain",
"# that if we find that in the *args* list that we pass it there and not as a kwarg",
"if",
"kwarg_key",
"in",
"func_args",
":",
"kw_to_arg_type",
"[",
"kwarg_key",
"]",
"=",
"funcset",
"[",
"kwarg_key",
"]",
"continue",
"else",
":",
"# Otherwise, we're good and just go ahead and pass the keyword/value pair into",
"# the kwargs list to be run.",
"func_kw",
".",
"update",
"(",
"funcset",
")",
"arg_type",
".",
"reverse",
"(",
")",
"for",
"arg",
"in",
"func_args",
":",
"if",
"arg",
"in",
"kw_to_arg_type",
":",
"arg_type",
".",
"append",
"(",
"kw_to_arg_type",
"[",
"arg",
"]",
")",
"_exp_prm",
"=",
"len",
"(",
"argspec",
".",
"args",
"or",
"[",
"]",
")",
"-",
"len",
"(",
"argspec",
".",
"defaults",
"or",
"[",
"]",
")",
"_passed_prm",
"=",
"len",
"(",
"arg_type",
")",
"missing",
"=",
"[",
"]",
"if",
"na_type",
"and",
"_exp_prm",
">",
"_passed_prm",
":",
"for",
"arg",
"in",
"argspec",
".",
"args",
":",
"if",
"arg",
"not",
"in",
"func_kw",
":",
"missing",
".",
"append",
"(",
"arg",
")",
"if",
"missing",
":",
"raise",
"SaltInvocationError",
"(",
"'Missing arguments: {0}'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"missing",
")",
")",
")",
"elif",
"_exp_prm",
">",
"_passed_prm",
":",
"raise",
"SaltInvocationError",
"(",
"'Function expects {0} parameters, got only {1}'",
".",
"format",
"(",
"_exp_prm",
",",
"_passed_prm",
")",
")",
"mret",
"=",
"__salt__",
"[",
"name",
"]",
"(",
"*",
"arg_type",
",",
"*",
"*",
"func_kw",
")",
"if",
"returner",
"is",
"not",
"None",
":",
"returners",
"=",
"salt",
".",
"loader",
".",
"returners",
"(",
"__opts__",
",",
"__salt__",
")",
"if",
"returner",
"in",
"returners",
":",
"returners",
"[",
"returner",
"]",
"(",
"{",
"'id'",
":",
"__opts__",
"[",
"'id'",
"]",
",",
"'ret'",
":",
"mret",
",",
"'fun'",
":",
"name",
",",
"'jid'",
":",
"salt",
".",
"utils",
".",
"jid",
".",
"gen_jid",
"(",
"__opts__",
")",
"}",
")",
"return",
"mret"
] | Calls a function from the specified module.
:param name:
:param kwargs:
:return: | [
"Calls",
"a",
"function",
"from",
"the",
"specified",
"module",
"."
] | python | train |
zhelev/python-afsapi | afsapi/__init__.py | https://github.com/zhelev/python-afsapi/blob/bb1990cf1460ae42f2dde75f2291625ddac2c0e4/afsapi/__init__.py#L141-L147 | def handle_set(self, item, value):
"""Helper method for setting a value by using the fsapi API."""
doc = yield from self.call('SET/{}'.format(item), dict(value=value))
if doc is None:
return None
return doc.status == 'FS_OK' | [
"def",
"handle_set",
"(",
"self",
",",
"item",
",",
"value",
")",
":",
"doc",
"=",
"yield",
"from",
"self",
".",
"call",
"(",
"'SET/{}'",
".",
"format",
"(",
"item",
")",
",",
"dict",
"(",
"value",
"=",
"value",
")",
")",
"if",
"doc",
"is",
"None",
":",
"return",
"None",
"return",
"doc",
".",
"status",
"==",
"'FS_OK'"
] | Helper method for setting a value by using the fsapi API. | [
"Helper",
"method",
"for",
"setting",
"a",
"value",
"by",
"using",
"the",
"fsapi",
"API",
"."
] | python | valid |
xflr6/gsheets | gsheets/api.py | https://github.com/xflr6/gsheets/blob/ca4f1273044704e529c1138e3f942836fc496e1b/gsheets/api.py#L92-L109 | def get(self, id_or_url, default=None):
"""Fetch and return the spreadsheet with the given id or url.
Args:
id_or_url (str): unique alphanumeric id or URL of the spreadsheet
Returns:
New SpreadSheet instance or given default if none is found
Raises:
ValueError: if an URL is given from which no id could be extracted
"""
if '/' in id_or_url:
id = urls.SheetUrl.from_string(id_or_url).id
else:
id = id_or_url
try:
return self[id]
except KeyError:
return default | [
"def",
"get",
"(",
"self",
",",
"id_or_url",
",",
"default",
"=",
"None",
")",
":",
"if",
"'/'",
"in",
"id_or_url",
":",
"id",
"=",
"urls",
".",
"SheetUrl",
".",
"from_string",
"(",
"id_or_url",
")",
".",
"id",
"else",
":",
"id",
"=",
"id_or_url",
"try",
":",
"return",
"self",
"[",
"id",
"]",
"except",
"KeyError",
":",
"return",
"default"
] | Fetch and return the spreadsheet with the given id or url.
Args:
id_or_url (str): unique alphanumeric id or URL of the spreadsheet
Returns:
New SpreadSheet instance or given default if none is found
Raises:
ValueError: if an URL is given from which no id could be extracted | [
"Fetch",
"and",
"return",
"the",
"spreadsheet",
"with",
"the",
"given",
"id",
"or",
"url",
"."
] | python | train |
projecthamster/hamster | src/hamster/lib/layout.py | https://github.com/projecthamster/hamster/blob/ca5254eff53172796ddafc72226c394ed1858245/src/hamster/lib/layout.py#L474-L496 | def resize_children(self):
"""default container alignment is to pile stuff just up, respecting only
padding, margin and element's alignment properties"""
width = self.width - self.horizontal_padding
height = self.height - self.vertical_padding
for sprite, props in (get_props(sprite) for sprite in self.sprites if sprite.visible):
sprite.alloc_w = width
sprite.alloc_h = height
w, h = getattr(sprite, "width", 0), getattr(sprite, "height", 0)
if hasattr(sprite, "get_height_for_width_size"):
w2, h2 = sprite.get_height_for_width_size()
w, h = max(w, w2), max(h, h2)
w = w * sprite.scale_x + props["margin_left"] + props["margin_right"]
h = h * sprite.scale_y + props["margin_top"] + props["margin_bottom"]
sprite.x = self.padding_left + props["margin_left"] + (max(sprite.alloc_w * sprite.scale_x, w) - w) * getattr(sprite, "x_align", 0)
sprite.y = self.padding_top + props["margin_top"] + (max(sprite.alloc_h * sprite.scale_y, h) - h) * getattr(sprite, "y_align", 0)
self.__dict__['_children_resize_queued'] = False | [
"def",
"resize_children",
"(",
"self",
")",
":",
"width",
"=",
"self",
".",
"width",
"-",
"self",
".",
"horizontal_padding",
"height",
"=",
"self",
".",
"height",
"-",
"self",
".",
"vertical_padding",
"for",
"sprite",
",",
"props",
"in",
"(",
"get_props",
"(",
"sprite",
")",
"for",
"sprite",
"in",
"self",
".",
"sprites",
"if",
"sprite",
".",
"visible",
")",
":",
"sprite",
".",
"alloc_w",
"=",
"width",
"sprite",
".",
"alloc_h",
"=",
"height",
"w",
",",
"h",
"=",
"getattr",
"(",
"sprite",
",",
"\"width\"",
",",
"0",
")",
",",
"getattr",
"(",
"sprite",
",",
"\"height\"",
",",
"0",
")",
"if",
"hasattr",
"(",
"sprite",
",",
"\"get_height_for_width_size\"",
")",
":",
"w2",
",",
"h2",
"=",
"sprite",
".",
"get_height_for_width_size",
"(",
")",
"w",
",",
"h",
"=",
"max",
"(",
"w",
",",
"w2",
")",
",",
"max",
"(",
"h",
",",
"h2",
")",
"w",
"=",
"w",
"*",
"sprite",
".",
"scale_x",
"+",
"props",
"[",
"\"margin_left\"",
"]",
"+",
"props",
"[",
"\"margin_right\"",
"]",
"h",
"=",
"h",
"*",
"sprite",
".",
"scale_y",
"+",
"props",
"[",
"\"margin_top\"",
"]",
"+",
"props",
"[",
"\"margin_bottom\"",
"]",
"sprite",
".",
"x",
"=",
"self",
".",
"padding_left",
"+",
"props",
"[",
"\"margin_left\"",
"]",
"+",
"(",
"max",
"(",
"sprite",
".",
"alloc_w",
"*",
"sprite",
".",
"scale_x",
",",
"w",
")",
"-",
"w",
")",
"*",
"getattr",
"(",
"sprite",
",",
"\"x_align\"",
",",
"0",
")",
"sprite",
".",
"y",
"=",
"self",
".",
"padding_top",
"+",
"props",
"[",
"\"margin_top\"",
"]",
"+",
"(",
"max",
"(",
"sprite",
".",
"alloc_h",
"*",
"sprite",
".",
"scale_y",
",",
"h",
")",
"-",
"h",
")",
"*",
"getattr",
"(",
"sprite",
",",
"\"y_align\"",
",",
"0",
")",
"self",
".",
"__dict__",
"[",
"'_children_resize_queued'",
"]",
"=",
"False"
] | default container alignment is to pile stuff just up, respecting only
padding, margin and element's alignment properties | [
"default",
"container",
"alignment",
"is",
"to",
"pile",
"stuff",
"just",
"up",
"respecting",
"only",
"padding",
"margin",
"and",
"element",
"s",
"alignment",
"properties"
] | python | train |
Erotemic/utool | utool/util_gridsearch.py | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_gridsearch.py#L2033-L2117 | def interact_gridsearch_result_images(show_result_func, cfgdict_list,
cfglbl_list, cfgresult_list,
score_list=None, fnum=None, figtitle='',
unpack=False, max_plots=25, verbose=True,
precision=3, scorelbl='score',
onclick_func=None):
""" helper function for visualizing results of gridsearch """
assert callable(show_result_func), 'NEED FUNCTION GOT: %r' % (show_result_func,)
import utool as ut
import plottool as pt
from plottool import plot_helpers as ph
from plottool import interact_helpers as ih
if verbose:
print('Plotting gridsearch results figtitle=%r' % (figtitle,))
if score_list is None:
score_list = [None] * len(cfgdict_list)
else:
# sort by score if available
sortx_list = ut.list_argsort(score_list, reverse=True)
score_list = ut.take(score_list, sortx_list)
cfgdict_list = ut.take(cfgdict_list, sortx_list)
cfglbl_list = ut.take(cfglbl_list, sortx_list)
cfgresult_list = ut.take(cfgresult_list, sortx_list)
# Dont show too many results only the top few
score_list = ut.listclip(score_list, max_plots)
# Show the config results
fig = pt.figure(fnum=fnum)
# Get plots for each of the resutls
nRows, nCols = pt.get_square_row_cols(len(score_list), fix=True)
next_pnum = pt.make_pnum_nextgen(nRows, nCols)
for cfgdict, cfglbl, cfgresult, score in zip(cfgdict_list, cfglbl_list,
cfgresult_list,
score_list):
if score is not None:
cfglbl += '\n' + scorelbl + '=' + ut.repr2(score, precision=precision)
pnum = next_pnum()
try:
if unpack:
show_result_func(*cfgresult, fnum=fnum, pnum=pnum)
else:
show_result_func(cfgresult, fnum=fnum, pnum=pnum)
except Exception as ex:
if isinstance(cfgresult, tuple):
#print(ut.repr4(cfgresult))
print(ut.depth_profile(cfgresult))
print(ut.list_type_profile(cfgresult))
ut.printex(ex, 'error showing', keys=['cfgresult', 'fnum', 'pnum'])
raise
#pt.imshow(255 * cfgresult, fnum=fnum, pnum=next_pnum(), title=cfglbl)
ax = pt.gca()
pt.set_title(cfglbl, ax=ax) # , size)
ph.set_plotdat(ax, 'cfgdict', cfgdict)
ph.set_plotdat(ax, 'cfglbl', cfglbl)
ph.set_plotdat(ax, 'cfgresult', cfgresult)
# Define clicked callback
def on_clicked(event):
print('\n[pt] clicked gridsearch axes')
if event is None or event.xdata is None or event.inaxes is None:
print('out of axes')
pass
else:
ax = event.inaxes
plotdat_dict = ph.get_plotdat_dict(ax)
print(ut.repr4(plotdat_dict))
cfglbl = ph.get_plotdat(ax, 'cfglbl', None)
cfgdict = ph.get_plotdat(ax, 'cfgdict', {})
cfgresult = ph.get_plotdat(ax, 'cfgresult', {})
infostr_list = [
('cfglbl = %s' % (cfglbl,)),
'',
('cfgdict = ' + ut.repr4(cfgdict, sorted_=True)),
]
# Call a user defined function if given
if onclick_func is not None:
if unpack:
onclick_func(*cfgresult)
else:
onclick_func(cfgresult)
infostr = ut.msgblock('CLICKED', '\n'.join(infostr_list))
print(infostr)
# Connect callbacks
ih.connect_callback(fig, 'button_press_event', on_clicked)
pt.set_figtitle(figtitle) | [
"def",
"interact_gridsearch_result_images",
"(",
"show_result_func",
",",
"cfgdict_list",
",",
"cfglbl_list",
",",
"cfgresult_list",
",",
"score_list",
"=",
"None",
",",
"fnum",
"=",
"None",
",",
"figtitle",
"=",
"''",
",",
"unpack",
"=",
"False",
",",
"max_plots",
"=",
"25",
",",
"verbose",
"=",
"True",
",",
"precision",
"=",
"3",
",",
"scorelbl",
"=",
"'score'",
",",
"onclick_func",
"=",
"None",
")",
":",
"assert",
"callable",
"(",
"show_result_func",
")",
",",
"'NEED FUNCTION GOT: %r'",
"%",
"(",
"show_result_func",
",",
")",
"import",
"utool",
"as",
"ut",
"import",
"plottool",
"as",
"pt",
"from",
"plottool",
"import",
"plot_helpers",
"as",
"ph",
"from",
"plottool",
"import",
"interact_helpers",
"as",
"ih",
"if",
"verbose",
":",
"print",
"(",
"'Plotting gridsearch results figtitle=%r'",
"%",
"(",
"figtitle",
",",
")",
")",
"if",
"score_list",
"is",
"None",
":",
"score_list",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"cfgdict_list",
")",
"else",
":",
"# sort by score if available",
"sortx_list",
"=",
"ut",
".",
"list_argsort",
"(",
"score_list",
",",
"reverse",
"=",
"True",
")",
"score_list",
"=",
"ut",
".",
"take",
"(",
"score_list",
",",
"sortx_list",
")",
"cfgdict_list",
"=",
"ut",
".",
"take",
"(",
"cfgdict_list",
",",
"sortx_list",
")",
"cfglbl_list",
"=",
"ut",
".",
"take",
"(",
"cfglbl_list",
",",
"sortx_list",
")",
"cfgresult_list",
"=",
"ut",
".",
"take",
"(",
"cfgresult_list",
",",
"sortx_list",
")",
"# Dont show too many results only the top few",
"score_list",
"=",
"ut",
".",
"listclip",
"(",
"score_list",
",",
"max_plots",
")",
"# Show the config results",
"fig",
"=",
"pt",
".",
"figure",
"(",
"fnum",
"=",
"fnum",
")",
"# Get plots for each of the resutls",
"nRows",
",",
"nCols",
"=",
"pt",
".",
"get_square_row_cols",
"(",
"len",
"(",
"score_list",
")",
",",
"fix",
"=",
"True",
")",
"next_pnum",
"=",
"pt",
".",
"make_pnum_nextgen",
"(",
"nRows",
",",
"nCols",
")",
"for",
"cfgdict",
",",
"cfglbl",
",",
"cfgresult",
",",
"score",
"in",
"zip",
"(",
"cfgdict_list",
",",
"cfglbl_list",
",",
"cfgresult_list",
",",
"score_list",
")",
":",
"if",
"score",
"is",
"not",
"None",
":",
"cfglbl",
"+=",
"'\\n'",
"+",
"scorelbl",
"+",
"'='",
"+",
"ut",
".",
"repr2",
"(",
"score",
",",
"precision",
"=",
"precision",
")",
"pnum",
"=",
"next_pnum",
"(",
")",
"try",
":",
"if",
"unpack",
":",
"show_result_func",
"(",
"*",
"cfgresult",
",",
"fnum",
"=",
"fnum",
",",
"pnum",
"=",
"pnum",
")",
"else",
":",
"show_result_func",
"(",
"cfgresult",
",",
"fnum",
"=",
"fnum",
",",
"pnum",
"=",
"pnum",
")",
"except",
"Exception",
"as",
"ex",
":",
"if",
"isinstance",
"(",
"cfgresult",
",",
"tuple",
")",
":",
"#print(ut.repr4(cfgresult))",
"print",
"(",
"ut",
".",
"depth_profile",
"(",
"cfgresult",
")",
")",
"print",
"(",
"ut",
".",
"list_type_profile",
"(",
"cfgresult",
")",
")",
"ut",
".",
"printex",
"(",
"ex",
",",
"'error showing'",
",",
"keys",
"=",
"[",
"'cfgresult'",
",",
"'fnum'",
",",
"'pnum'",
"]",
")",
"raise",
"#pt.imshow(255 * cfgresult, fnum=fnum, pnum=next_pnum(), title=cfglbl)",
"ax",
"=",
"pt",
".",
"gca",
"(",
")",
"pt",
".",
"set_title",
"(",
"cfglbl",
",",
"ax",
"=",
"ax",
")",
"# , size)",
"ph",
".",
"set_plotdat",
"(",
"ax",
",",
"'cfgdict'",
",",
"cfgdict",
")",
"ph",
".",
"set_plotdat",
"(",
"ax",
",",
"'cfglbl'",
",",
"cfglbl",
")",
"ph",
".",
"set_plotdat",
"(",
"ax",
",",
"'cfgresult'",
",",
"cfgresult",
")",
"# Define clicked callback",
"def",
"on_clicked",
"(",
"event",
")",
":",
"print",
"(",
"'\\n[pt] clicked gridsearch axes'",
")",
"if",
"event",
"is",
"None",
"or",
"event",
".",
"xdata",
"is",
"None",
"or",
"event",
".",
"inaxes",
"is",
"None",
":",
"print",
"(",
"'out of axes'",
")",
"pass",
"else",
":",
"ax",
"=",
"event",
".",
"inaxes",
"plotdat_dict",
"=",
"ph",
".",
"get_plotdat_dict",
"(",
"ax",
")",
"print",
"(",
"ut",
".",
"repr4",
"(",
"plotdat_dict",
")",
")",
"cfglbl",
"=",
"ph",
".",
"get_plotdat",
"(",
"ax",
",",
"'cfglbl'",
",",
"None",
")",
"cfgdict",
"=",
"ph",
".",
"get_plotdat",
"(",
"ax",
",",
"'cfgdict'",
",",
"{",
"}",
")",
"cfgresult",
"=",
"ph",
".",
"get_plotdat",
"(",
"ax",
",",
"'cfgresult'",
",",
"{",
"}",
")",
"infostr_list",
"=",
"[",
"(",
"'cfglbl = %s'",
"%",
"(",
"cfglbl",
",",
")",
")",
",",
"''",
",",
"(",
"'cfgdict = '",
"+",
"ut",
".",
"repr4",
"(",
"cfgdict",
",",
"sorted_",
"=",
"True",
")",
")",
",",
"]",
"# Call a user defined function if given",
"if",
"onclick_func",
"is",
"not",
"None",
":",
"if",
"unpack",
":",
"onclick_func",
"(",
"*",
"cfgresult",
")",
"else",
":",
"onclick_func",
"(",
"cfgresult",
")",
"infostr",
"=",
"ut",
".",
"msgblock",
"(",
"'CLICKED'",
",",
"'\\n'",
".",
"join",
"(",
"infostr_list",
")",
")",
"print",
"(",
"infostr",
")",
"# Connect callbacks",
"ih",
".",
"connect_callback",
"(",
"fig",
",",
"'button_press_event'",
",",
"on_clicked",
")",
"pt",
".",
"set_figtitle",
"(",
"figtitle",
")"
] | helper function for visualizing results of gridsearch | [
"helper",
"function",
"for",
"visualizing",
"results",
"of",
"gridsearch"
] | python | train |
bwohlberg/sporco | sporco/admm/pdcsc.py | https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/pdcsc.py#L422-L432 | def block_sep1(self, Y):
r"""Separate variable into component corresponding to
:math:`\mathbf{y}_1` in :math:`\mathbf{y}\;\;`.
"""
# This method is overridden because we have to change the
# mechanism for combining the Y0 and Y1 blocks into a single
# array (see comment in the __init__ method).
shp = Y.shape[0:self.cri.axisC] + self.y1shp[self.cri.axisC:]
return Y[(slice(None),)*self.cri.axisC +
(slice(self.y0I, None),)].reshape(shp) | [
"def",
"block_sep1",
"(",
"self",
",",
"Y",
")",
":",
"# This method is overridden because we have to change the",
"# mechanism for combining the Y0 and Y1 blocks into a single",
"# array (see comment in the __init__ method).",
"shp",
"=",
"Y",
".",
"shape",
"[",
"0",
":",
"self",
".",
"cri",
".",
"axisC",
"]",
"+",
"self",
".",
"y1shp",
"[",
"self",
".",
"cri",
".",
"axisC",
":",
"]",
"return",
"Y",
"[",
"(",
"slice",
"(",
"None",
")",
",",
")",
"*",
"self",
".",
"cri",
".",
"axisC",
"+",
"(",
"slice",
"(",
"self",
".",
"y0I",
",",
"None",
")",
",",
")",
"]",
".",
"reshape",
"(",
"shp",
")"
] | r"""Separate variable into component corresponding to
:math:`\mathbf{y}_1` in :math:`\mathbf{y}\;\;`. | [
"r",
"Separate",
"variable",
"into",
"component",
"corresponding",
"to",
":",
"math",
":",
"\\",
"mathbf",
"{",
"y",
"}",
"_1",
"in",
":",
"math",
":",
"\\",
"mathbf",
"{",
"y",
"}",
"\\",
";",
"\\",
";",
"."
] | python | train |
twisted/epsilon | epsilon/hotfixes/timeoutmixin_calllater.py | https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/hotfixes/timeoutmixin_calllater.py#L22-L41 | def setTimeout(self, period):
"""Change the timeout period
@type period: C{int} or C{NoneType}
@param period: The period, in seconds, to change the timeout to, or
C{None} to disable the timeout.
"""
prev = self.timeOut
self.timeOut = period
if self.__timeoutCall is not None:
if period is None:
self.__timeoutCall.cancel()
self.__timeoutCall = None
else:
self.__timeoutCall.reset(period)
elif period is not None:
self.__timeoutCall = self.callLater(period, self.__timedOut)
return prev | [
"def",
"setTimeout",
"(",
"self",
",",
"period",
")",
":",
"prev",
"=",
"self",
".",
"timeOut",
"self",
".",
"timeOut",
"=",
"period",
"if",
"self",
".",
"__timeoutCall",
"is",
"not",
"None",
":",
"if",
"period",
"is",
"None",
":",
"self",
".",
"__timeoutCall",
".",
"cancel",
"(",
")",
"self",
".",
"__timeoutCall",
"=",
"None",
"else",
":",
"self",
".",
"__timeoutCall",
".",
"reset",
"(",
"period",
")",
"elif",
"period",
"is",
"not",
"None",
":",
"self",
".",
"__timeoutCall",
"=",
"self",
".",
"callLater",
"(",
"period",
",",
"self",
".",
"__timedOut",
")",
"return",
"prev"
] | Change the timeout period
@type period: C{int} or C{NoneType}
@param period: The period, in seconds, to change the timeout to, or
C{None} to disable the timeout. | [
"Change",
"the",
"timeout",
"period"
] | python | train |
manns/pyspread | pyspread/src/lib/vlc.py | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/vlc.py#L5214-L5230 | def libvlc_audio_set_callbacks(mp, play, pause, resume, flush, drain, opaque):
'''Set callbacks and private data for decoded audio.
Use L{libvlc_audio_set_format}() or L{libvlc_audio_set_format_callbacks}()
to configure the decoded audio format.
@param mp: the media player.
@param play: callback to play audio samples (must not be NULL).
@param pause: callback to pause playback (or NULL to ignore).
@param resume: callback to resume playback (or NULL to ignore).
@param flush: callback to flush audio buffers (or NULL to ignore).
@param drain: callback to drain audio buffers (or NULL to ignore).
@param opaque: private pointer for the audio callbacks (as first parameter).
@version: LibVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_set_callbacks', None) or \
_Cfunction('libvlc_audio_set_callbacks', ((1,), (1,), (1,), (1,), (1,), (1,), (1,),), None,
None, MediaPlayer, AudioPlayCb, AudioPauseCb, AudioResumeCb, AudioFlushCb, AudioDrainCb, ctypes.c_void_p)
return f(mp, play, pause, resume, flush, drain, opaque) | [
"def",
"libvlc_audio_set_callbacks",
"(",
"mp",
",",
"play",
",",
"pause",
",",
"resume",
",",
"flush",
",",
"drain",
",",
"opaque",
")",
":",
"f",
"=",
"_Cfunctions",
".",
"get",
"(",
"'libvlc_audio_set_callbacks'",
",",
"None",
")",
"or",
"_Cfunction",
"(",
"'libvlc_audio_set_callbacks'",
",",
"(",
"(",
"1",
",",
")",
",",
"(",
"1",
",",
")",
",",
"(",
"1",
",",
")",
",",
"(",
"1",
",",
")",
",",
"(",
"1",
",",
")",
",",
"(",
"1",
",",
")",
",",
"(",
"1",
",",
")",
",",
")",
",",
"None",
",",
"None",
",",
"MediaPlayer",
",",
"AudioPlayCb",
",",
"AudioPauseCb",
",",
"AudioResumeCb",
",",
"AudioFlushCb",
",",
"AudioDrainCb",
",",
"ctypes",
".",
"c_void_p",
")",
"return",
"f",
"(",
"mp",
",",
"play",
",",
"pause",
",",
"resume",
",",
"flush",
",",
"drain",
",",
"opaque",
")"
] | Set callbacks and private data for decoded audio.
Use L{libvlc_audio_set_format}() or L{libvlc_audio_set_format_callbacks}()
to configure the decoded audio format.
@param mp: the media player.
@param play: callback to play audio samples (must not be NULL).
@param pause: callback to pause playback (or NULL to ignore).
@param resume: callback to resume playback (or NULL to ignore).
@param flush: callback to flush audio buffers (or NULL to ignore).
@param drain: callback to drain audio buffers (or NULL to ignore).
@param opaque: private pointer for the audio callbacks (as first parameter).
@version: LibVLC 2.0.0 or later. | [
"Set",
"callbacks",
"and",
"private",
"data",
"for",
"decoded",
"audio",
".",
"Use",
"L",
"{",
"libvlc_audio_set_format",
"}",
"()",
"or",
"L",
"{",
"libvlc_audio_set_format_callbacks",
"}",
"()",
"to",
"configure",
"the",
"decoded",
"audio",
"format",
"."
] | python | train |
absperf/python-req | req.py | https://github.com/absperf/python-req/blob/de878f08f4fb28fa140c80d5cbdb04518ef5e968/req.py#L83-L150 | def load(fp, separator=DEFAULT, index_separator=DEFAULT, cls=dict, list_cls=list):
'''Load an object from the file pointer.
:param fp: A readable filehandle.
:param separator: The separator between key and value. Defaults to u'|' or b'|', depending on the types.
:param index_separator: The separator between key and index. Defaults to u'_' or b'_', depending on the types.
:param cls: A callable that returns a Mapping that is filled with pairs. The most common alternate option would be OrderedDict.
:param list_cls: A callable that takes an iterable and returns a sequence.
'''
converter = None
output = cls()
arraykeys = set()
for line in fp:
if converter is None:
if isinstance(line, six.text_type):
converter = six.u
else:
converter = six.b
default_separator = converter('|')
default_index_separator = converter('_')
newline = converter('\n')
if separator is DEFAULT:
separator = default_separator
if index_separator is DEFAULT:
index_separator = default_index_separator
key, value = line.strip().split(separator, 1)
keyparts = key.split(index_separator)
try:
index = int(keyparts[-1])
endwithint = True
except ValueError:
endwithint = False
# We do everything in-place to ensure that we maintain order when using
# an OrderedDict.
if len(keyparts) > 1 and endwithint:
# If this is an array key
basekey = key.rsplit(index_separator, 1)[0]
if basekey not in arraykeys:
arraykeys.add(basekey)
if basekey in output:
# If key already exists as non-array, fix it
if not isinstance(output[basekey], dict):
output[basekey] = {-1: output[basekey]}
else:
output[basekey] = {}
output[basekey][index] = value
else:
if key in output and isinstance(output[key], dict):
output[key][-1] = value
else:
output[key] = value
# Convert array keys
for key in arraykeys:
output[key] = list_cls(pair[1] for pair in sorted(six.iteritems(output[key])))
return output | [
"def",
"load",
"(",
"fp",
",",
"separator",
"=",
"DEFAULT",
",",
"index_separator",
"=",
"DEFAULT",
",",
"cls",
"=",
"dict",
",",
"list_cls",
"=",
"list",
")",
":",
"converter",
"=",
"None",
"output",
"=",
"cls",
"(",
")",
"arraykeys",
"=",
"set",
"(",
")",
"for",
"line",
"in",
"fp",
":",
"if",
"converter",
"is",
"None",
":",
"if",
"isinstance",
"(",
"line",
",",
"six",
".",
"text_type",
")",
":",
"converter",
"=",
"six",
".",
"u",
"else",
":",
"converter",
"=",
"six",
".",
"b",
"default_separator",
"=",
"converter",
"(",
"'|'",
")",
"default_index_separator",
"=",
"converter",
"(",
"'_'",
")",
"newline",
"=",
"converter",
"(",
"'\\n'",
")",
"if",
"separator",
"is",
"DEFAULT",
":",
"separator",
"=",
"default_separator",
"if",
"index_separator",
"is",
"DEFAULT",
":",
"index_separator",
"=",
"default_index_separator",
"key",
",",
"value",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"separator",
",",
"1",
")",
"keyparts",
"=",
"key",
".",
"split",
"(",
"index_separator",
")",
"try",
":",
"index",
"=",
"int",
"(",
"keyparts",
"[",
"-",
"1",
"]",
")",
"endwithint",
"=",
"True",
"except",
"ValueError",
":",
"endwithint",
"=",
"False",
"# We do everything in-place to ensure that we maintain order when using",
"# an OrderedDict.",
"if",
"len",
"(",
"keyparts",
")",
">",
"1",
"and",
"endwithint",
":",
"# If this is an array key",
"basekey",
"=",
"key",
".",
"rsplit",
"(",
"index_separator",
",",
"1",
")",
"[",
"0",
"]",
"if",
"basekey",
"not",
"in",
"arraykeys",
":",
"arraykeys",
".",
"add",
"(",
"basekey",
")",
"if",
"basekey",
"in",
"output",
":",
"# If key already exists as non-array, fix it",
"if",
"not",
"isinstance",
"(",
"output",
"[",
"basekey",
"]",
",",
"dict",
")",
":",
"output",
"[",
"basekey",
"]",
"=",
"{",
"-",
"1",
":",
"output",
"[",
"basekey",
"]",
"}",
"else",
":",
"output",
"[",
"basekey",
"]",
"=",
"{",
"}",
"output",
"[",
"basekey",
"]",
"[",
"index",
"]",
"=",
"value",
"else",
":",
"if",
"key",
"in",
"output",
"and",
"isinstance",
"(",
"output",
"[",
"key",
"]",
",",
"dict",
")",
":",
"output",
"[",
"key",
"]",
"[",
"-",
"1",
"]",
"=",
"value",
"else",
":",
"output",
"[",
"key",
"]",
"=",
"value",
"# Convert array keys",
"for",
"key",
"in",
"arraykeys",
":",
"output",
"[",
"key",
"]",
"=",
"list_cls",
"(",
"pair",
"[",
"1",
"]",
"for",
"pair",
"in",
"sorted",
"(",
"six",
".",
"iteritems",
"(",
"output",
"[",
"key",
"]",
")",
")",
")",
"return",
"output"
] | Load an object from the file pointer.
:param fp: A readable filehandle.
:param separator: The separator between key and value. Defaults to u'|' or b'|', depending on the types.
:param index_separator: The separator between key and index. Defaults to u'_' or b'_', depending on the types.
:param cls: A callable that returns a Mapping that is filled with pairs. The most common alternate option would be OrderedDict.
:param list_cls: A callable that takes an iterable and returns a sequence. | [
"Load",
"an",
"object",
"from",
"the",
"file",
"pointer",
"."
] | python | valid |
mitsei/dlkit | dlkit/records/osid/base_records.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/osid/base_records.py#L2383-L2389 | def clear_color_coordinate(self):
"""stub"""
if (self.get_color_coordinate_metadata().is_read_only() or
self.get_color_coordinate_metadata().is_required()):
raise NoAccess()
self.my_osid_object_form._my_map['colorCoordinate'] = \
dict(self.get_color_coordinate_metadata().get_default_coordinate_values()[0]) | [
"def",
"clear_color_coordinate",
"(",
"self",
")",
":",
"if",
"(",
"self",
".",
"get_color_coordinate_metadata",
"(",
")",
".",
"is_read_only",
"(",
")",
"or",
"self",
".",
"get_color_coordinate_metadata",
"(",
")",
".",
"is_required",
"(",
")",
")",
":",
"raise",
"NoAccess",
"(",
")",
"self",
".",
"my_osid_object_form",
".",
"_my_map",
"[",
"'colorCoordinate'",
"]",
"=",
"dict",
"(",
"self",
".",
"get_color_coordinate_metadata",
"(",
")",
".",
"get_default_coordinate_values",
"(",
")",
"[",
"0",
"]",
")"
] | stub | [
"stub"
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/zmq/kernelmanager.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/zmq/kernelmanager.py#L604-L609 | def is_beating(self):
"""Is the heartbeat running and responsive (and not paused)."""
if self.is_alive() and not self._pause and self._beating:
return True
else:
return False | [
"def",
"is_beating",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_alive",
"(",
")",
"and",
"not",
"self",
".",
"_pause",
"and",
"self",
".",
"_beating",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | Is the heartbeat running and responsive (and not paused). | [
"Is",
"the",
"heartbeat",
"running",
"and",
"responsive",
"(",
"and",
"not",
"paused",
")",
"."
] | python | test |
CalebBell/ht | ht/conv_free_immersed.py | https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/conv_free_immersed.py#L521-L579 | def Nu_vertical_cylinder_Kreith_Eckert(Pr, Gr, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ correlated by
[2]_, also as presented in [3]_, [4]_, and [5]_.
.. math::
Nu_H = 0.555 Ra_H^{0.25},\; 10^{5} < Ra < 10^{9}
Nu_H = 0.021 Ra_H^{0.4},\; 10^{9} < Ra < 10^{12}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Transition between ranges is not smooth. If outside of range, no warning is
given.
Examples
--------
>>> Nu_vertical_cylinder_Kreith_Eckert(.7, 2E10)
240.25393473033196
References
----------
.. [1] Eckert, E. R. G., Thomas W. Jackson, and United States. Analysis of
Turbulent Free-Convection Boundary Layer on Flat Plate. National
Advisory Committee for Aeronautics, no. 2207. Washington, D.C.: National
Advisoty Committee for Aeronautics, 1950.
.. [2] Kreith, Frank, Raj Manglik, and Mark Bohn. Principles of Heat
Transfer. Cengage, 2010.
.. [3] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [4] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [5] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if turbulent or (Ra > 1E9 and turbulent is None):
return 0.021*Ra**0.4
else:
return 0.555*Ra**0.25 | [
"def",
"Nu_vertical_cylinder_Kreith_Eckert",
"(",
"Pr",
",",
"Gr",
",",
"turbulent",
"=",
"None",
")",
":",
"Ra",
"=",
"Pr",
"*",
"Gr",
"if",
"turbulent",
"or",
"(",
"Ra",
">",
"1E9",
"and",
"turbulent",
"is",
"None",
")",
":",
"return",
"0.021",
"*",
"Ra",
"**",
"0.4",
"else",
":",
"return",
"0.555",
"*",
"Ra",
"**",
"0.25"
] | r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ correlated by
[2]_, also as presented in [3]_, [4]_, and [5]_.
.. math::
Nu_H = 0.555 Ra_H^{0.25},\; 10^{5} < Ra < 10^{9}
Nu_H = 0.021 Ra_H^{0.4},\; 10^{9} < Ra < 10^{12}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Transition between ranges is not smooth. If outside of range, no warning is
given.
Examples
--------
>>> Nu_vertical_cylinder_Kreith_Eckert(.7, 2E10)
240.25393473033196
References
----------
.. [1] Eckert, E. R. G., Thomas W. Jackson, and United States. Analysis of
Turbulent Free-Convection Boundary Layer on Flat Plate. National
Advisory Committee for Aeronautics, no. 2207. Washington, D.C.: National
Advisoty Committee for Aeronautics, 1950.
.. [2] Kreith, Frank, Raj Manglik, and Mark Bohn. Principles of Heat
Transfer. Cengage, 2010.
.. [3] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [4] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [5] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014. | [
"r",
"Calculates",
"Nusselt",
"number",
"for",
"natural",
"convection",
"around",
"a",
"vertical",
"isothermal",
"cylinder",
"according",
"to",
"the",
"results",
"of",
"[",
"1",
"]",
"_",
"correlated",
"by",
"[",
"2",
"]",
"_",
"also",
"as",
"presented",
"in",
"[",
"3",
"]",
"_",
"[",
"4",
"]",
"_",
"and",
"[",
"5",
"]",
"_",
"."
] | python | train |
xu2243051/easyui-menu | easyui/mixins/view_mixins.py | https://github.com/xu2243051/easyui-menu/blob/4da0b50cf2d3ddb0f1ec7a4da65fd3c4339f8dfb/easyui/mixins/view_mixins.py#L65-L71 | def get_template_names(self):
"""
datagrid的默认模板
"""
names = super(EasyUIDeleteView, self).get_template_names()
names.append('easyui/confirm_delete.html')
return names | [
"def",
"get_template_names",
"(",
"self",
")",
":",
"names",
"=",
"super",
"(",
"EasyUIDeleteView",
",",
"self",
")",
".",
"get_template_names",
"(",
")",
"names",
".",
"append",
"(",
"'easyui/confirm_delete.html'",
")",
"return",
"names"
] | datagrid的默认模板 | [
"datagrid的默认模板"
] | python | valid |
log2timeline/dfvfs | dfvfs/vfs/encoded_stream_file_system.py | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/encoded_stream_file_system.py#L72-L81 | def GetRootFileEntry(self):
"""Retrieves the root file entry.
Returns:
EncodedStreamFileEntry: a file entry or None if not available.
"""
path_spec = encoded_stream_path_spec.EncodedStreamPathSpec(
encoding_method=self._encoding_method,
parent=self._path_spec.parent)
return self.GetFileEntryByPathSpec(path_spec) | [
"def",
"GetRootFileEntry",
"(",
"self",
")",
":",
"path_spec",
"=",
"encoded_stream_path_spec",
".",
"EncodedStreamPathSpec",
"(",
"encoding_method",
"=",
"self",
".",
"_encoding_method",
",",
"parent",
"=",
"self",
".",
"_path_spec",
".",
"parent",
")",
"return",
"self",
".",
"GetFileEntryByPathSpec",
"(",
"path_spec",
")"
] | Retrieves the root file entry.
Returns:
EncodedStreamFileEntry: a file entry or None if not available. | [
"Retrieves",
"the",
"root",
"file",
"entry",
"."
] | python | train |
eumis/pyviews | pyviews/core/common.py | https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/core/common.py#L38-L40 | def add_cause(self, error: Exception):
'''Adds cause error to error message'''
self.add_info('Cause error', '{0} - {1}'.format(type(error).__name__, error)) | [
"def",
"add_cause",
"(",
"self",
",",
"error",
":",
"Exception",
")",
":",
"self",
".",
"add_info",
"(",
"'Cause error'",
",",
"'{0} - {1}'",
".",
"format",
"(",
"type",
"(",
"error",
")",
".",
"__name__",
",",
"error",
")",
")"
] | Adds cause error to error message | [
"Adds",
"cause",
"error",
"to",
"error",
"message"
] | python | train |
fhcrc/seqmagick | seqmagick/transform.py | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L256-L267 | def multi_mask_sequences(records, slices):
"""
Replace characters sliced by slices with gap characters.
"""
for record in records:
record_indices = list(range(len(record)))
keep_indices = reduce(lambda i, s: i - frozenset(record_indices[s]),
slices, frozenset(record_indices))
seq = ''.join(b if i in keep_indices else '-'
for i, b in enumerate(str(record.seq)))
record.seq = Seq(seq)
yield record | [
"def",
"multi_mask_sequences",
"(",
"records",
",",
"slices",
")",
":",
"for",
"record",
"in",
"records",
":",
"record_indices",
"=",
"list",
"(",
"range",
"(",
"len",
"(",
"record",
")",
")",
")",
"keep_indices",
"=",
"reduce",
"(",
"lambda",
"i",
",",
"s",
":",
"i",
"-",
"frozenset",
"(",
"record_indices",
"[",
"s",
"]",
")",
",",
"slices",
",",
"frozenset",
"(",
"record_indices",
")",
")",
"seq",
"=",
"''",
".",
"join",
"(",
"b",
"if",
"i",
"in",
"keep_indices",
"else",
"'-'",
"for",
"i",
",",
"b",
"in",
"enumerate",
"(",
"str",
"(",
"record",
".",
"seq",
")",
")",
")",
"record",
".",
"seq",
"=",
"Seq",
"(",
"seq",
")",
"yield",
"record"
] | Replace characters sliced by slices with gap characters. | [
"Replace",
"characters",
"sliced",
"by",
"slices",
"with",
"gap",
"characters",
"."
] | python | train |
apache/incubator-mxnet | example/ctc/lstm_ocr_train.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ctc/lstm_ocr_train.py#L58-L121 | def main():
"""Program entry point"""
args = parse_args()
if not any(args.loss == s for s in ['ctc', 'warpctc']):
raise ValueError("Invalid loss '{}' (must be 'ctc' or 'warpctc')".format(args.loss))
hp = Hyperparams()
# Start a multiprocessor captcha image generator
mp_captcha = MPDigitCaptcha(
font_paths=get_fonts(args.font_path), h=hp.seq_length, w=30,
num_digit_min=3, num_digit_max=4, num_processes=args.num_proc, max_queue_size=hp.batch_size * 2)
try:
# Must call start() before any call to mxnet module (https://github.com/apache/incubator-mxnet/issues/9213)
mp_captcha.start()
if args.gpu:
contexts = [mx.context.gpu(i) for i in range(args.gpu)]
else:
contexts = [mx.context.cpu(i) for i in range(args.cpu)]
init_states = lstm.init_states(hp.batch_size, hp.num_lstm_layer, hp.num_hidden)
data_train = OCRIter(
hp.train_epoch_size // hp.batch_size, hp.batch_size, init_states, captcha=mp_captcha, name='train')
data_val = OCRIter(
hp.eval_epoch_size // hp.batch_size, hp.batch_size, init_states, captcha=mp_captcha, name='val')
symbol = lstm.lstm_unroll(
num_lstm_layer=hp.num_lstm_layer,
seq_len=hp.seq_length,
num_hidden=hp.num_hidden,
num_label=hp.num_label,
loss_type=args.loss)
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
module = mx.mod.Module(
symbol,
data_names=['data', 'l0_init_c', 'l0_init_h', 'l1_init_c', 'l1_init_h'],
label_names=['label'],
context=contexts)
metrics = CtcMetrics(hp.seq_length)
module.fit(train_data=data_train,
eval_data=data_val,
# use metrics.accuracy or metrics.accuracy_lcs
eval_metric=mx.metric.np(metrics.accuracy, allow_extra_outputs=True),
optimizer='sgd',
optimizer_params={'learning_rate': hp.learning_rate,
'momentum': hp.momentum,
'wd': 0.00001,
},
initializer=mx.init.Xavier(factor_type="in", magnitude=2.34),
num_epoch=hp.num_epoch,
batch_end_callback=mx.callback.Speedometer(hp.batch_size, 50),
epoch_end_callback=mx.callback.do_checkpoint(args.prefix),
)
except KeyboardInterrupt:
print("W: interrupt received, stopping...")
finally:
# Reset multiprocessing captcha generator to stop processes
mp_captcha.reset() | [
"def",
"main",
"(",
")",
":",
"args",
"=",
"parse_args",
"(",
")",
"if",
"not",
"any",
"(",
"args",
".",
"loss",
"==",
"s",
"for",
"s",
"in",
"[",
"'ctc'",
",",
"'warpctc'",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid loss '{}' (must be 'ctc' or 'warpctc')\"",
".",
"format",
"(",
"args",
".",
"loss",
")",
")",
"hp",
"=",
"Hyperparams",
"(",
")",
"# Start a multiprocessor captcha image generator",
"mp_captcha",
"=",
"MPDigitCaptcha",
"(",
"font_paths",
"=",
"get_fonts",
"(",
"args",
".",
"font_path",
")",
",",
"h",
"=",
"hp",
".",
"seq_length",
",",
"w",
"=",
"30",
",",
"num_digit_min",
"=",
"3",
",",
"num_digit_max",
"=",
"4",
",",
"num_processes",
"=",
"args",
".",
"num_proc",
",",
"max_queue_size",
"=",
"hp",
".",
"batch_size",
"*",
"2",
")",
"try",
":",
"# Must call start() before any call to mxnet module (https://github.com/apache/incubator-mxnet/issues/9213)",
"mp_captcha",
".",
"start",
"(",
")",
"if",
"args",
".",
"gpu",
":",
"contexts",
"=",
"[",
"mx",
".",
"context",
".",
"gpu",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"args",
".",
"gpu",
")",
"]",
"else",
":",
"contexts",
"=",
"[",
"mx",
".",
"context",
".",
"cpu",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"args",
".",
"cpu",
")",
"]",
"init_states",
"=",
"lstm",
".",
"init_states",
"(",
"hp",
".",
"batch_size",
",",
"hp",
".",
"num_lstm_layer",
",",
"hp",
".",
"num_hidden",
")",
"data_train",
"=",
"OCRIter",
"(",
"hp",
".",
"train_epoch_size",
"//",
"hp",
".",
"batch_size",
",",
"hp",
".",
"batch_size",
",",
"init_states",
",",
"captcha",
"=",
"mp_captcha",
",",
"name",
"=",
"'train'",
")",
"data_val",
"=",
"OCRIter",
"(",
"hp",
".",
"eval_epoch_size",
"//",
"hp",
".",
"batch_size",
",",
"hp",
".",
"batch_size",
",",
"init_states",
",",
"captcha",
"=",
"mp_captcha",
",",
"name",
"=",
"'val'",
")",
"symbol",
"=",
"lstm",
".",
"lstm_unroll",
"(",
"num_lstm_layer",
"=",
"hp",
".",
"num_lstm_layer",
",",
"seq_len",
"=",
"hp",
".",
"seq_length",
",",
"num_hidden",
"=",
"hp",
".",
"num_hidden",
",",
"num_label",
"=",
"hp",
".",
"num_label",
",",
"loss_type",
"=",
"args",
".",
"loss",
")",
"head",
"=",
"'%(asctime)-15s %(message)s'",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"DEBUG",
",",
"format",
"=",
"head",
")",
"module",
"=",
"mx",
".",
"mod",
".",
"Module",
"(",
"symbol",
",",
"data_names",
"=",
"[",
"'data'",
",",
"'l0_init_c'",
",",
"'l0_init_h'",
",",
"'l1_init_c'",
",",
"'l1_init_h'",
"]",
",",
"label_names",
"=",
"[",
"'label'",
"]",
",",
"context",
"=",
"contexts",
")",
"metrics",
"=",
"CtcMetrics",
"(",
"hp",
".",
"seq_length",
")",
"module",
".",
"fit",
"(",
"train_data",
"=",
"data_train",
",",
"eval_data",
"=",
"data_val",
",",
"# use metrics.accuracy or metrics.accuracy_lcs",
"eval_metric",
"=",
"mx",
".",
"metric",
".",
"np",
"(",
"metrics",
".",
"accuracy",
",",
"allow_extra_outputs",
"=",
"True",
")",
",",
"optimizer",
"=",
"'sgd'",
",",
"optimizer_params",
"=",
"{",
"'learning_rate'",
":",
"hp",
".",
"learning_rate",
",",
"'momentum'",
":",
"hp",
".",
"momentum",
",",
"'wd'",
":",
"0.00001",
",",
"}",
",",
"initializer",
"=",
"mx",
".",
"init",
".",
"Xavier",
"(",
"factor_type",
"=",
"\"in\"",
",",
"magnitude",
"=",
"2.34",
")",
",",
"num_epoch",
"=",
"hp",
".",
"num_epoch",
",",
"batch_end_callback",
"=",
"mx",
".",
"callback",
".",
"Speedometer",
"(",
"hp",
".",
"batch_size",
",",
"50",
")",
",",
"epoch_end_callback",
"=",
"mx",
".",
"callback",
".",
"do_checkpoint",
"(",
"args",
".",
"prefix",
")",
",",
")",
"except",
"KeyboardInterrupt",
":",
"print",
"(",
"\"W: interrupt received, stopping...\"",
")",
"finally",
":",
"# Reset multiprocessing captcha generator to stop processes",
"mp_captcha",
".",
"reset",
"(",
")"
] | Program entry point | [
"Program",
"entry",
"point"
] | python | train |
lsst-sqre/lsst-projectmeta-kit | lsstprojectmeta/git/timestamp.py | https://github.com/lsst-sqre/lsst-projectmeta-kit/blob/ac8d4ff65bb93d8fdeb1b46ae6eb5d7414f1ae14/lsstprojectmeta/git/timestamp.py#L88-L165 | def get_content_commit_date(extensions, acceptance_callback=None,
root_dir='.'):
"""Get the datetime for the most recent commit to a project that
affected certain types of content.
Parameters
----------
extensions : sequence of 'str'
Extensions of files to consider in getting the most recent commit
date. For example, ``('rst', 'svg', 'png')`` are content extensions
for a Sphinx project. **Extension comparision is case sensitive.** add
uppercase variants to match uppercase extensions.
acceptance_callback : callable
Callable function whose sole argument is a file path, and returns
`True` or `False` depending on whether the file's commit date should
be considered or not. This callback is only run on files that are
included by ``extensions``. Thus this callback is a way to exclude
specific files that would otherwise be included by their extension.
root_dir : 'str`, optional
Only content contained within this root directory is considered.
This directory must be, or be contained by, a Git repository. This is
the current working directory by default.
Returns
-------
commit_date : `datetime.datetime`
Datetime of the most recent content commit.
Raises
------
RuntimeError
Raised if no content files are found.
"""
logger = logging.getLogger(__name__)
def _null_callback(_):
return True
if acceptance_callback is None:
acceptance_callback = _null_callback
# Cache the repo object for each query
root_dir = os.path.abspath(root_dir)
repo = git.repo.base.Repo(path=root_dir, search_parent_directories=True)
# Iterate over all files with all file extensions, looking for the
# newest commit datetime.
newest_datetime = None
iters = [_iter_filepaths_with_extension(ext, root_dir=root_dir)
for ext in extensions]
for content_path in itertools.chain(*iters):
content_path = os.path.abspath(os.path.join(root_dir, content_path))
if acceptance_callback(content_path):
logger.debug('Found content path %r', content_path)
try:
commit_datetime = read_git_commit_timestamp_for_file(
content_path, repo=repo)
logger.debug('Commit timestamp of %r is %s',
content_path, commit_datetime)
except IOError:
logger.warning(
'Count not get commit for %r, skipping',
content_path)
continue
if not newest_datetime or commit_datetime > newest_datetime:
# Seed initial newest_datetime
# or set a newer newest_datetime
newest_datetime = commit_datetime
logger.debug('Newest commit timestamp is %s', newest_datetime)
logger.debug('Final commit timestamp is %s', newest_datetime)
if newest_datetime is None:
raise RuntimeError('No content files found in {}'.format(root_dir))
return newest_datetime | [
"def",
"get_content_commit_date",
"(",
"extensions",
",",
"acceptance_callback",
"=",
"None",
",",
"root_dir",
"=",
"'.'",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"def",
"_null_callback",
"(",
"_",
")",
":",
"return",
"True",
"if",
"acceptance_callback",
"is",
"None",
":",
"acceptance_callback",
"=",
"_null_callback",
"# Cache the repo object for each query",
"root_dir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"root_dir",
")",
"repo",
"=",
"git",
".",
"repo",
".",
"base",
".",
"Repo",
"(",
"path",
"=",
"root_dir",
",",
"search_parent_directories",
"=",
"True",
")",
"# Iterate over all files with all file extensions, looking for the",
"# newest commit datetime.",
"newest_datetime",
"=",
"None",
"iters",
"=",
"[",
"_iter_filepaths_with_extension",
"(",
"ext",
",",
"root_dir",
"=",
"root_dir",
")",
"for",
"ext",
"in",
"extensions",
"]",
"for",
"content_path",
"in",
"itertools",
".",
"chain",
"(",
"*",
"iters",
")",
":",
"content_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"content_path",
")",
")",
"if",
"acceptance_callback",
"(",
"content_path",
")",
":",
"logger",
".",
"debug",
"(",
"'Found content path %r'",
",",
"content_path",
")",
"try",
":",
"commit_datetime",
"=",
"read_git_commit_timestamp_for_file",
"(",
"content_path",
",",
"repo",
"=",
"repo",
")",
"logger",
".",
"debug",
"(",
"'Commit timestamp of %r is %s'",
",",
"content_path",
",",
"commit_datetime",
")",
"except",
"IOError",
":",
"logger",
".",
"warning",
"(",
"'Count not get commit for %r, skipping'",
",",
"content_path",
")",
"continue",
"if",
"not",
"newest_datetime",
"or",
"commit_datetime",
">",
"newest_datetime",
":",
"# Seed initial newest_datetime",
"# or set a newer newest_datetime",
"newest_datetime",
"=",
"commit_datetime",
"logger",
".",
"debug",
"(",
"'Newest commit timestamp is %s'",
",",
"newest_datetime",
")",
"logger",
".",
"debug",
"(",
"'Final commit timestamp is %s'",
",",
"newest_datetime",
")",
"if",
"newest_datetime",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"'No content files found in {}'",
".",
"format",
"(",
"root_dir",
")",
")",
"return",
"newest_datetime"
] | Get the datetime for the most recent commit to a project that
affected certain types of content.
Parameters
----------
extensions : sequence of 'str'
Extensions of files to consider in getting the most recent commit
date. For example, ``('rst', 'svg', 'png')`` are content extensions
for a Sphinx project. **Extension comparision is case sensitive.** add
uppercase variants to match uppercase extensions.
acceptance_callback : callable
Callable function whose sole argument is a file path, and returns
`True` or `False` depending on whether the file's commit date should
be considered or not. This callback is only run on files that are
included by ``extensions``. Thus this callback is a way to exclude
specific files that would otherwise be included by their extension.
root_dir : 'str`, optional
Only content contained within this root directory is considered.
This directory must be, or be contained by, a Git repository. This is
the current working directory by default.
Returns
-------
commit_date : `datetime.datetime`
Datetime of the most recent content commit.
Raises
------
RuntimeError
Raised if no content files are found. | [
"Get",
"the",
"datetime",
"for",
"the",
"most",
"recent",
"commit",
"to",
"a",
"project",
"that",
"affected",
"certain",
"types",
"of",
"content",
"."
] | python | valid |
CalebBell/thermo | thermo/volume.py | https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/volume.py#L937-L1023 | def load_all_methods(self):
r'''Method which picks out coefficients for the specified chemical
from the various dictionaries and DataFrames storing it. All data is
stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`,
:obj:`all_methods` and obj:`all_methods_P` as a set of methods for
which the data exists for.
Called on initialization only. See the source code for the variables at
which the coefficients are stored. The coefficients can safely be
altered once the class is initialized. This method can be called again
to reset the parameters.
'''
methods = []
methods_P = []
Tmins, Tmaxs = [], []
if has_CoolProp and self.CASRN in coolprop_dict:
methods.append(COOLPROP); methods_P.append(COOLPROP)
self.CP_f = coolprop_fluids[self.CASRN]
Tmins.append(self.CP_f.Tt); Tmaxs.append(self.CP_f.Tc)
if self.CASRN in CRC_inorg_l_data.index:
methods.append(CRC_INORG_L)
_, self.CRC_INORG_L_MW, self.CRC_INORG_L_rho, self.CRC_INORG_L_k, self.CRC_INORG_L_Tm, self.CRC_INORG_L_Tmax = _CRC_inorg_l_data_values[CRC_inorg_l_data.index.get_loc(self.CASRN)].tolist()
Tmins.append(self.CRC_INORG_L_Tm); Tmaxs.append(self.CRC_INORG_L_Tmax)
if self.CASRN in Perry_l_data.index:
methods.append(PERRYDIPPR)
_, C1, C2, C3, C4, self.DIPPR_Tmin, self.DIPPR_Tmax = _Perry_l_data_values[Perry_l_data.index.get_loc(self.CASRN)].tolist()
self.DIPPR_coeffs = [C1, C2, C3, C4]
Tmins.append(self.DIPPR_Tmin); Tmaxs.append(self.DIPPR_Tmax)
if self.CASRN in VDI_PPDS_2.index:
methods.append(VDI_PPDS)
_, MW, Tc, rhoc, A, B, C, D = _VDI_PPDS_2_values[VDI_PPDS_2.index.get_loc(self.CASRN)].tolist()
self.VDI_PPDS_coeffs = [A, B, C, D]
self.VDI_PPDS_MW = MW
self.VDI_PPDS_Tc = Tc
self.VDI_PPDS_rhoc = rhoc
Tmaxs.append(self.VDI_PPDS_Tc)
if self.CASRN in _VDISaturationDict:
methods.append(VDI_TABULAR)
Ts, props = VDI_tabular_data(self.CASRN, 'Volume (l)')
self.VDI_Tmin = Ts[0]
self.VDI_Tmax = Ts[-1]
self.tabular_data[VDI_TABULAR] = (Ts, props)
Tmins.append(self.VDI_Tmin); Tmaxs.append(self.VDI_Tmax)
if self.Tc and self.CASRN in COSTALD_data.index:
methods.append(HTCOSTALDFIT)
self.COSTALD_Vchar = float(COSTALD_data.at[self.CASRN, 'Vchar'])
self.COSTALD_omega_SRK = float(COSTALD_data.at[self.CASRN, 'omega_SRK'])
Tmins.append(0); Tmaxs.append(self.Tc)
if self.Tc and self.Pc and self.CASRN in COSTALD_data.index and not np.isnan(COSTALD_data.at[self.CASRN, 'Z_RA']):
methods.append(RACKETTFIT)
self.RACKETT_Z_RA = float(COSTALD_data.at[self.CASRN, 'Z_RA'])
Tmins.append(0); Tmaxs.append(self.Tc)
if self.CASRN in CRC_inorg_l_const_data.index:
methods.append(CRC_INORG_L_CONST)
self.CRC_INORG_L_CONST_Vm = float(CRC_inorg_l_const_data.at[self.CASRN, 'Vm'])
# Roughly data at STP; not guaranteed however; not used for Trange
if all((self.Tc, self.Vc, self.Zc)):
methods.append(YEN_WOODS_SAT)
Tmins.append(0); Tmaxs.append(self.Tc)
if all((self.Tc, self.Pc, self.Zc)):
methods.append(RACKETT)
Tmins.append(0); Tmaxs.append(self.Tc)
if all((self.Tc, self.Pc, self.omega)):
methods.append(YAMADA_GUNN)
methods.append(BHIRUD_NORMAL)
Tmins.append(0); Tmaxs.append(self.Tc)
if all((self.Tc, self.Vc, self.omega)):
methods.append(TOWNSEND_HALES)
methods.append(HTCOSTALD)
methods.append(MMSNM0)
if self.CASRN in SNM0_data.index:
methods.append(MMSNM0FIT)
self.SNM0_delta_SRK = float(SNM0_data.at[self.CASRN, 'delta_SRK'])
Tmins.append(0); Tmaxs.append(self.Tc)
if all((self.Tc, self.Vc, self.omega, self.Tb, self.MW)):
methods.append(CAMPBELL_THODOS)
Tmins.append(0); Tmaxs.append(self.Tc)
if all((self.Tc, self.Pc, self.omega)):
methods_P.append(COSTALD_COMPRESSED)
if self.eos:
methods_P.append(EOS)
if Tmins and Tmaxs:
self.Tmin, self.Tmax = min(Tmins), max(Tmaxs)
self.all_methods = set(methods)
self.all_methods_P = set(methods_P) | [
"def",
"load_all_methods",
"(",
"self",
")",
":",
"methods",
"=",
"[",
"]",
"methods_P",
"=",
"[",
"]",
"Tmins",
",",
"Tmaxs",
"=",
"[",
"]",
",",
"[",
"]",
"if",
"has_CoolProp",
"and",
"self",
".",
"CASRN",
"in",
"coolprop_dict",
":",
"methods",
".",
"append",
"(",
"COOLPROP",
")",
"methods_P",
".",
"append",
"(",
"COOLPROP",
")",
"self",
".",
"CP_f",
"=",
"coolprop_fluids",
"[",
"self",
".",
"CASRN",
"]",
"Tmins",
".",
"append",
"(",
"self",
".",
"CP_f",
".",
"Tt",
")",
"Tmaxs",
".",
"append",
"(",
"self",
".",
"CP_f",
".",
"Tc",
")",
"if",
"self",
".",
"CASRN",
"in",
"CRC_inorg_l_data",
".",
"index",
":",
"methods",
".",
"append",
"(",
"CRC_INORG_L",
")",
"_",
",",
"self",
".",
"CRC_INORG_L_MW",
",",
"self",
".",
"CRC_INORG_L_rho",
",",
"self",
".",
"CRC_INORG_L_k",
",",
"self",
".",
"CRC_INORG_L_Tm",
",",
"self",
".",
"CRC_INORG_L_Tmax",
"=",
"_CRC_inorg_l_data_values",
"[",
"CRC_inorg_l_data",
".",
"index",
".",
"get_loc",
"(",
"self",
".",
"CASRN",
")",
"]",
".",
"tolist",
"(",
")",
"Tmins",
".",
"append",
"(",
"self",
".",
"CRC_INORG_L_Tm",
")",
"Tmaxs",
".",
"append",
"(",
"self",
".",
"CRC_INORG_L_Tmax",
")",
"if",
"self",
".",
"CASRN",
"in",
"Perry_l_data",
".",
"index",
":",
"methods",
".",
"append",
"(",
"PERRYDIPPR",
")",
"_",
",",
"C1",
",",
"C2",
",",
"C3",
",",
"C4",
",",
"self",
".",
"DIPPR_Tmin",
",",
"self",
".",
"DIPPR_Tmax",
"=",
"_Perry_l_data_values",
"[",
"Perry_l_data",
".",
"index",
".",
"get_loc",
"(",
"self",
".",
"CASRN",
")",
"]",
".",
"tolist",
"(",
")",
"self",
".",
"DIPPR_coeffs",
"=",
"[",
"C1",
",",
"C2",
",",
"C3",
",",
"C4",
"]",
"Tmins",
".",
"append",
"(",
"self",
".",
"DIPPR_Tmin",
")",
"Tmaxs",
".",
"append",
"(",
"self",
".",
"DIPPR_Tmax",
")",
"if",
"self",
".",
"CASRN",
"in",
"VDI_PPDS_2",
".",
"index",
":",
"methods",
".",
"append",
"(",
"VDI_PPDS",
")",
"_",
",",
"MW",
",",
"Tc",
",",
"rhoc",
",",
"A",
",",
"B",
",",
"C",
",",
"D",
"=",
"_VDI_PPDS_2_values",
"[",
"VDI_PPDS_2",
".",
"index",
".",
"get_loc",
"(",
"self",
".",
"CASRN",
")",
"]",
".",
"tolist",
"(",
")",
"self",
".",
"VDI_PPDS_coeffs",
"=",
"[",
"A",
",",
"B",
",",
"C",
",",
"D",
"]",
"self",
".",
"VDI_PPDS_MW",
"=",
"MW",
"self",
".",
"VDI_PPDS_Tc",
"=",
"Tc",
"self",
".",
"VDI_PPDS_rhoc",
"=",
"rhoc",
"Tmaxs",
".",
"append",
"(",
"self",
".",
"VDI_PPDS_Tc",
")",
"if",
"self",
".",
"CASRN",
"in",
"_VDISaturationDict",
":",
"methods",
".",
"append",
"(",
"VDI_TABULAR",
")",
"Ts",
",",
"props",
"=",
"VDI_tabular_data",
"(",
"self",
".",
"CASRN",
",",
"'Volume (l)'",
")",
"self",
".",
"VDI_Tmin",
"=",
"Ts",
"[",
"0",
"]",
"self",
".",
"VDI_Tmax",
"=",
"Ts",
"[",
"-",
"1",
"]",
"self",
".",
"tabular_data",
"[",
"VDI_TABULAR",
"]",
"=",
"(",
"Ts",
",",
"props",
")",
"Tmins",
".",
"append",
"(",
"self",
".",
"VDI_Tmin",
")",
"Tmaxs",
".",
"append",
"(",
"self",
".",
"VDI_Tmax",
")",
"if",
"self",
".",
"Tc",
"and",
"self",
".",
"CASRN",
"in",
"COSTALD_data",
".",
"index",
":",
"methods",
".",
"append",
"(",
"HTCOSTALDFIT",
")",
"self",
".",
"COSTALD_Vchar",
"=",
"float",
"(",
"COSTALD_data",
".",
"at",
"[",
"self",
".",
"CASRN",
",",
"'Vchar'",
"]",
")",
"self",
".",
"COSTALD_omega_SRK",
"=",
"float",
"(",
"COSTALD_data",
".",
"at",
"[",
"self",
".",
"CASRN",
",",
"'omega_SRK'",
"]",
")",
"Tmins",
".",
"append",
"(",
"0",
")",
"Tmaxs",
".",
"append",
"(",
"self",
".",
"Tc",
")",
"if",
"self",
".",
"Tc",
"and",
"self",
".",
"Pc",
"and",
"self",
".",
"CASRN",
"in",
"COSTALD_data",
".",
"index",
"and",
"not",
"np",
".",
"isnan",
"(",
"COSTALD_data",
".",
"at",
"[",
"self",
".",
"CASRN",
",",
"'Z_RA'",
"]",
")",
":",
"methods",
".",
"append",
"(",
"RACKETTFIT",
")",
"self",
".",
"RACKETT_Z_RA",
"=",
"float",
"(",
"COSTALD_data",
".",
"at",
"[",
"self",
".",
"CASRN",
",",
"'Z_RA'",
"]",
")",
"Tmins",
".",
"append",
"(",
"0",
")",
"Tmaxs",
".",
"append",
"(",
"self",
".",
"Tc",
")",
"if",
"self",
".",
"CASRN",
"in",
"CRC_inorg_l_const_data",
".",
"index",
":",
"methods",
".",
"append",
"(",
"CRC_INORG_L_CONST",
")",
"self",
".",
"CRC_INORG_L_CONST_Vm",
"=",
"float",
"(",
"CRC_inorg_l_const_data",
".",
"at",
"[",
"self",
".",
"CASRN",
",",
"'Vm'",
"]",
")",
"# Roughly data at STP; not guaranteed however; not used for Trange",
"if",
"all",
"(",
"(",
"self",
".",
"Tc",
",",
"self",
".",
"Vc",
",",
"self",
".",
"Zc",
")",
")",
":",
"methods",
".",
"append",
"(",
"YEN_WOODS_SAT",
")",
"Tmins",
".",
"append",
"(",
"0",
")",
"Tmaxs",
".",
"append",
"(",
"self",
".",
"Tc",
")",
"if",
"all",
"(",
"(",
"self",
".",
"Tc",
",",
"self",
".",
"Pc",
",",
"self",
".",
"Zc",
")",
")",
":",
"methods",
".",
"append",
"(",
"RACKETT",
")",
"Tmins",
".",
"append",
"(",
"0",
")",
"Tmaxs",
".",
"append",
"(",
"self",
".",
"Tc",
")",
"if",
"all",
"(",
"(",
"self",
".",
"Tc",
",",
"self",
".",
"Pc",
",",
"self",
".",
"omega",
")",
")",
":",
"methods",
".",
"append",
"(",
"YAMADA_GUNN",
")",
"methods",
".",
"append",
"(",
"BHIRUD_NORMAL",
")",
"Tmins",
".",
"append",
"(",
"0",
")",
"Tmaxs",
".",
"append",
"(",
"self",
".",
"Tc",
")",
"if",
"all",
"(",
"(",
"self",
".",
"Tc",
",",
"self",
".",
"Vc",
",",
"self",
".",
"omega",
")",
")",
":",
"methods",
".",
"append",
"(",
"TOWNSEND_HALES",
")",
"methods",
".",
"append",
"(",
"HTCOSTALD",
")",
"methods",
".",
"append",
"(",
"MMSNM0",
")",
"if",
"self",
".",
"CASRN",
"in",
"SNM0_data",
".",
"index",
":",
"methods",
".",
"append",
"(",
"MMSNM0FIT",
")",
"self",
".",
"SNM0_delta_SRK",
"=",
"float",
"(",
"SNM0_data",
".",
"at",
"[",
"self",
".",
"CASRN",
",",
"'delta_SRK'",
"]",
")",
"Tmins",
".",
"append",
"(",
"0",
")",
"Tmaxs",
".",
"append",
"(",
"self",
".",
"Tc",
")",
"if",
"all",
"(",
"(",
"self",
".",
"Tc",
",",
"self",
".",
"Vc",
",",
"self",
".",
"omega",
",",
"self",
".",
"Tb",
",",
"self",
".",
"MW",
")",
")",
":",
"methods",
".",
"append",
"(",
"CAMPBELL_THODOS",
")",
"Tmins",
".",
"append",
"(",
"0",
")",
"Tmaxs",
".",
"append",
"(",
"self",
".",
"Tc",
")",
"if",
"all",
"(",
"(",
"self",
".",
"Tc",
",",
"self",
".",
"Pc",
",",
"self",
".",
"omega",
")",
")",
":",
"methods_P",
".",
"append",
"(",
"COSTALD_COMPRESSED",
")",
"if",
"self",
".",
"eos",
":",
"methods_P",
".",
"append",
"(",
"EOS",
")",
"if",
"Tmins",
"and",
"Tmaxs",
":",
"self",
".",
"Tmin",
",",
"self",
".",
"Tmax",
"=",
"min",
"(",
"Tmins",
")",
",",
"max",
"(",
"Tmaxs",
")",
"self",
".",
"all_methods",
"=",
"set",
"(",
"methods",
")",
"self",
".",
"all_methods_P",
"=",
"set",
"(",
"methods_P",
")"
] | r'''Method which picks out coefficients for the specified chemical
from the various dictionaries and DataFrames storing it. All data is
stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`,
:obj:`all_methods` and obj:`all_methods_P` as a set of methods for
which the data exists for.
Called on initialization only. See the source code for the variables at
which the coefficients are stored. The coefficients can safely be
altered once the class is initialized. This method can be called again
to reset the parameters. | [
"r",
"Method",
"which",
"picks",
"out",
"coefficients",
"for",
"the",
"specified",
"chemical",
"from",
"the",
"various",
"dictionaries",
"and",
"DataFrames",
"storing",
"it",
".",
"All",
"data",
"is",
"stored",
"as",
"attributes",
".",
"This",
"method",
"also",
"sets",
":",
"obj",
":",
"Tmin",
":",
"obj",
":",
"Tmax",
":",
"obj",
":",
"all_methods",
"and",
"obj",
":",
"all_methods_P",
"as",
"a",
"set",
"of",
"methods",
"for",
"which",
"the",
"data",
"exists",
"for",
"."
] | python | valid |
wesm/feather | cpp/build-support/cpplint.py | https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L3776-L3809 | def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error):
"""Check for rvalue references.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Find lines missing spaces around &&.
# TODO(unknown): currently we don't check for rvalue references
# with spaces surrounding the && to avoid false positives with
# boolean expressions.
line = clean_lines.elided[linenum]
match = Match(r'^(.*\S)&&', line)
if not match:
match = Match(r'(.*)&&\S', line)
if (not match) or '(&&)' in line or Search(r'\boperator\s*$', match.group(1)):
return
# Either poorly formed && or an rvalue reference, check the context
# to get a more accurate error message. Mostly we want to determine
# if what's to the left of "&&" is a type or not.
typenames = GetTemplateArgs(clean_lines, linenum)
and_pos = len(match.group(1))
if IsRValueType(typenames, clean_lines, nesting_state, linenum, and_pos):
if not IsRValueAllowed(clean_lines, linenum, typenames):
error(filename, linenum, 'build/c++11', 3,
'RValue references are an unapproved C++ feature.')
else:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around &&') | [
"def",
"CheckRValueReference",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"nesting_state",
",",
"error",
")",
":",
"# Find lines missing spaces around &&.",
"# TODO(unknown): currently we don't check for rvalue references",
"# with spaces surrounding the && to avoid false positives with",
"# boolean expressions.",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"match",
"=",
"Match",
"(",
"r'^(.*\\S)&&'",
",",
"line",
")",
"if",
"not",
"match",
":",
"match",
"=",
"Match",
"(",
"r'(.*)&&\\S'",
",",
"line",
")",
"if",
"(",
"not",
"match",
")",
"or",
"'(&&)'",
"in",
"line",
"or",
"Search",
"(",
"r'\\boperator\\s*$'",
",",
"match",
".",
"group",
"(",
"1",
")",
")",
":",
"return",
"# Either poorly formed && or an rvalue reference, check the context",
"# to get a more accurate error message. Mostly we want to determine",
"# if what's to the left of \"&&\" is a type or not.",
"typenames",
"=",
"GetTemplateArgs",
"(",
"clean_lines",
",",
"linenum",
")",
"and_pos",
"=",
"len",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"if",
"IsRValueType",
"(",
"typenames",
",",
"clean_lines",
",",
"nesting_state",
",",
"linenum",
",",
"and_pos",
")",
":",
"if",
"not",
"IsRValueAllowed",
"(",
"clean_lines",
",",
"linenum",
",",
"typenames",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'build/c++11'",
",",
"3",
",",
"'RValue references are an unapproved C++ feature.'",
")",
"else",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/operators'",
",",
"3",
",",
"'Missing spaces around &&'",
")"
] | Check for rvalue references.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found. | [
"Check",
"for",
"rvalue",
"references",
"."
] | python | train |
wind-python/windpowerlib | windpowerlib/wake_losses.py | https://github.com/wind-python/windpowerlib/blob/421b316139743311b7cb68a69f6b53d2665f7e23/windpowerlib/wake_losses.py#L59-L150 | def get_wind_efficiency_curve(curve_name='all'):
r"""
Reads wind efficiency curve(s) specified in `curve_name`.
Parameters
----------
curve_name : str or list
Specifies the curve. Use 'all' to get all curves in a MultiIndex
DataFrame or one of the curve names to retrieve a single curve.
Default: 'all'.
Returns
-------
efficiency_curve : pd.DataFrame
Wind efficiency curve. Contains 'wind_speed' and 'efficiency' columns
with wind speed in m/s and wind efficiency (dimensionless).
If `curve_name` is 'all' or a list of strings a MultiIndex DataFrame is
returned with curve names in the first level of the columns.
Notes
-----
The wind efficiency curves were generated in the "Dena Netzstudie" [1]_ and
in the work of Kaspar Knorr [2]_. The mean wind efficiency curve is an
average curve from 12 wind farm distributed over Germany ([1]_) or
respectively an average from over 2000 wind farms in Germany ([2]_). Curves
with the appendix 'extreme' are wind efficiency curves of single wind farms
that are extremely deviating from the respective mean wind efficiency
curve. For more information see [1]_ and [2]_.
References
----------
.. [1] Kohler et.al.: "dena-Netzstudie II. Integration erneuerbarer
Energien in die deutsche Stromversorgung im Zeitraum 2015 – 2020
mit Ausblick 2025.", Deutsche Energie-Agentur GmbH (dena),
Tech. rept., 2010, p. 101
.. [2] Knorr, K.: "Modellierung von raum-zeitlichen Eigenschaften der
Windenergieeinspeisung für wetterdatenbasierte
Windleistungssimulationen". Universität Kassel, Diss., 2016,
p. 124
Examples
--------
.. parsed-literal::
# Example to plot all curves
fig, ax = plt.subplots() /n
df = get_wind_efficiency_curve(curve_name='all')
for t in df.columns.get_level_values(0).unique():
p = df[t].set_index('wind_speed')['efficiency']
p.name = t
ax = p.plot(ax=ax, legend=True)
plt.show()
"""
possible_curve_names = ['dena_mean', 'knorr_mean', 'dena_extreme1',
'dena_extreme2', 'knorr_extreme1',
'knorr_extreme2', 'knorr_extreme3']
if curve_name == 'all':
curve_names = possible_curve_names
elif isinstance(curve_name, str):
curve_names = [curve_name]
else:
curve_names = curve_name
efficiency_curve = pd.DataFrame(columns=pd.MultiIndex(levels=[[], []],
codes=[[], []]))
for curve_name in curve_names:
if curve_name.split('_')[0] not in ['dena', 'knorr']:
raise ValueError("`curve_name` must be one of the following: " +
"{} but is {}".format(possible_curve_names,
curve_name))
path = os.path.join(os.path.dirname(__file__), 'data',
'wind_efficiency_curves_{}.csv'.format(
curve_name.split('_')[0]))
# Read wind efficiency curves from file
wind_efficiency_curves = pd.read_csv(path)
# Raise error if wind efficiency curve specified in 'curve_name' does
# not exist
if curve_name not in list(wind_efficiency_curves):
msg = ("Efficiency curve <{0}> does not exist. Must be one of the"
"following: {1}.")
raise ValueError(msg.format(curve_name, *possible_curve_names))
# Get wind efficiency curve and rename column containing efficiency
wec = wind_efficiency_curves[['wind_speed', curve_name]]
efficiency_curve[curve_name, 'wind_speed'] = wec['wind_speed']
efficiency_curve[curve_name, 'efficiency'] = wec[curve_name]
if len(curve_names) == 1:
return efficiency_curve[curve_names[0]]
else:
return efficiency_curve | [
"def",
"get_wind_efficiency_curve",
"(",
"curve_name",
"=",
"'all'",
")",
":",
"possible_curve_names",
"=",
"[",
"'dena_mean'",
",",
"'knorr_mean'",
",",
"'dena_extreme1'",
",",
"'dena_extreme2'",
",",
"'knorr_extreme1'",
",",
"'knorr_extreme2'",
",",
"'knorr_extreme3'",
"]",
"if",
"curve_name",
"==",
"'all'",
":",
"curve_names",
"=",
"possible_curve_names",
"elif",
"isinstance",
"(",
"curve_name",
",",
"str",
")",
":",
"curve_names",
"=",
"[",
"curve_name",
"]",
"else",
":",
"curve_names",
"=",
"curve_name",
"efficiency_curve",
"=",
"pd",
".",
"DataFrame",
"(",
"columns",
"=",
"pd",
".",
"MultiIndex",
"(",
"levels",
"=",
"[",
"[",
"]",
",",
"[",
"]",
"]",
",",
"codes",
"=",
"[",
"[",
"]",
",",
"[",
"]",
"]",
")",
")",
"for",
"curve_name",
"in",
"curve_names",
":",
"if",
"curve_name",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
"not",
"in",
"[",
"'dena'",
",",
"'knorr'",
"]",
":",
"raise",
"ValueError",
"(",
"\"`curve_name` must be one of the following: \"",
"+",
"\"{} but is {}\"",
".",
"format",
"(",
"possible_curve_names",
",",
"curve_name",
")",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'data'",
",",
"'wind_efficiency_curves_{}.csv'",
".",
"format",
"(",
"curve_name",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
")",
")",
"# Read wind efficiency curves from file",
"wind_efficiency_curves",
"=",
"pd",
".",
"read_csv",
"(",
"path",
")",
"# Raise error if wind efficiency curve specified in 'curve_name' does",
"# not exist",
"if",
"curve_name",
"not",
"in",
"list",
"(",
"wind_efficiency_curves",
")",
":",
"msg",
"=",
"(",
"\"Efficiency curve <{0}> does not exist. Must be one of the\"",
"\"following: {1}.\"",
")",
"raise",
"ValueError",
"(",
"msg",
".",
"format",
"(",
"curve_name",
",",
"*",
"possible_curve_names",
")",
")",
"# Get wind efficiency curve and rename column containing efficiency",
"wec",
"=",
"wind_efficiency_curves",
"[",
"[",
"'wind_speed'",
",",
"curve_name",
"]",
"]",
"efficiency_curve",
"[",
"curve_name",
",",
"'wind_speed'",
"]",
"=",
"wec",
"[",
"'wind_speed'",
"]",
"efficiency_curve",
"[",
"curve_name",
",",
"'efficiency'",
"]",
"=",
"wec",
"[",
"curve_name",
"]",
"if",
"len",
"(",
"curve_names",
")",
"==",
"1",
":",
"return",
"efficiency_curve",
"[",
"curve_names",
"[",
"0",
"]",
"]",
"else",
":",
"return",
"efficiency_curve"
] | r"""
Reads wind efficiency curve(s) specified in `curve_name`.
Parameters
----------
curve_name : str or list
Specifies the curve. Use 'all' to get all curves in a MultiIndex
DataFrame or one of the curve names to retrieve a single curve.
Default: 'all'.
Returns
-------
efficiency_curve : pd.DataFrame
Wind efficiency curve. Contains 'wind_speed' and 'efficiency' columns
with wind speed in m/s and wind efficiency (dimensionless).
If `curve_name` is 'all' or a list of strings a MultiIndex DataFrame is
returned with curve names in the first level of the columns.
Notes
-----
The wind efficiency curves were generated in the "Dena Netzstudie" [1]_ and
in the work of Kaspar Knorr [2]_. The mean wind efficiency curve is an
average curve from 12 wind farm distributed over Germany ([1]_) or
respectively an average from over 2000 wind farms in Germany ([2]_). Curves
with the appendix 'extreme' are wind efficiency curves of single wind farms
that are extremely deviating from the respective mean wind efficiency
curve. For more information see [1]_ and [2]_.
References
----------
.. [1] Kohler et.al.: "dena-Netzstudie II. Integration erneuerbarer
Energien in die deutsche Stromversorgung im Zeitraum 2015 – 2020
mit Ausblick 2025.", Deutsche Energie-Agentur GmbH (dena),
Tech. rept., 2010, p. 101
.. [2] Knorr, K.: "Modellierung von raum-zeitlichen Eigenschaften der
Windenergieeinspeisung für wetterdatenbasierte
Windleistungssimulationen". Universität Kassel, Diss., 2016,
p. 124
Examples
--------
.. parsed-literal::
# Example to plot all curves
fig, ax = plt.subplots() /n
df = get_wind_efficiency_curve(curve_name='all')
for t in df.columns.get_level_values(0).unique():
p = df[t].set_index('wind_speed')['efficiency']
p.name = t
ax = p.plot(ax=ax, legend=True)
plt.show() | [
"r",
"Reads",
"wind",
"efficiency",
"curve",
"(",
"s",
")",
"specified",
"in",
"curve_name",
"."
] | python | train |
getpelican/pelican-plugins | events/events.py | https://github.com/getpelican/pelican-plugins/blob/cfc7a3f224f1743063b034561f89a6a712d13587/events/events.py#L159-L168 | def generate_events_list(generator):
"""Populate the event_list variable to be used in jinja templates"""
if not localized_events:
generator.context['events_list'] = sorted(events, reverse = True,
key=lambda ev: (ev.dtstart, ev.dtend))
else:
generator.context['events_list'] = {k: sorted(v, reverse = True,
key=lambda ev: (ev.dtstart, ev.dtend))
for k, v in localized_events.items()} | [
"def",
"generate_events_list",
"(",
"generator",
")",
":",
"if",
"not",
"localized_events",
":",
"generator",
".",
"context",
"[",
"'events_list'",
"]",
"=",
"sorted",
"(",
"events",
",",
"reverse",
"=",
"True",
",",
"key",
"=",
"lambda",
"ev",
":",
"(",
"ev",
".",
"dtstart",
",",
"ev",
".",
"dtend",
")",
")",
"else",
":",
"generator",
".",
"context",
"[",
"'events_list'",
"]",
"=",
"{",
"k",
":",
"sorted",
"(",
"v",
",",
"reverse",
"=",
"True",
",",
"key",
"=",
"lambda",
"ev",
":",
"(",
"ev",
".",
"dtstart",
",",
"ev",
".",
"dtend",
")",
")",
"for",
"k",
",",
"v",
"in",
"localized_events",
".",
"items",
"(",
")",
"}"
] | Populate the event_list variable to be used in jinja templates | [
"Populate",
"the",
"event_list",
"variable",
"to",
"be",
"used",
"in",
"jinja",
"templates"
] | python | train |
fastai/fastai | fastai/train.py | https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/train.py#L34-L43 | def to_fp16(learn:Learner, loss_scale:float=None, max_noskip:int=1000, dynamic:bool=True, clip:float=None,
flat_master:bool=False, max_scale:float=2**24)->Learner:
"Put `learn` in FP16 precision mode."
learn.to_fp32()
learn.model = model2half(learn.model)
learn.data.add_tfm(batch_to_half)
learn.mp_cb = MixedPrecision(learn, loss_scale=loss_scale, max_noskip=max_noskip, dynamic=dynamic, clip=clip,
flat_master=flat_master, max_scale=max_scale)
learn.callbacks.append(learn.mp_cb)
return learn | [
"def",
"to_fp16",
"(",
"learn",
":",
"Learner",
",",
"loss_scale",
":",
"float",
"=",
"None",
",",
"max_noskip",
":",
"int",
"=",
"1000",
",",
"dynamic",
":",
"bool",
"=",
"True",
",",
"clip",
":",
"float",
"=",
"None",
",",
"flat_master",
":",
"bool",
"=",
"False",
",",
"max_scale",
":",
"float",
"=",
"2",
"**",
"24",
")",
"->",
"Learner",
":",
"learn",
".",
"to_fp32",
"(",
")",
"learn",
".",
"model",
"=",
"model2half",
"(",
"learn",
".",
"model",
")",
"learn",
".",
"data",
".",
"add_tfm",
"(",
"batch_to_half",
")",
"learn",
".",
"mp_cb",
"=",
"MixedPrecision",
"(",
"learn",
",",
"loss_scale",
"=",
"loss_scale",
",",
"max_noskip",
"=",
"max_noskip",
",",
"dynamic",
"=",
"dynamic",
",",
"clip",
"=",
"clip",
",",
"flat_master",
"=",
"flat_master",
",",
"max_scale",
"=",
"max_scale",
")",
"learn",
".",
"callbacks",
".",
"append",
"(",
"learn",
".",
"mp_cb",
")",
"return",
"learn"
] | Put `learn` in FP16 precision mode. | [
"Put",
"learn",
"in",
"FP16",
"precision",
"mode",
"."
] | python | train |
alephdata/memorious | memorious/helpers/__init__.py | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/helpers/__init__.py#L33-L39 | def search_results_total(html, xpath, check, delimiter):
""" Get the total number of results from the DOM of a search index. """
for container in html.findall(xpath):
if check in container.findtext('.'):
text = container.findtext('.').split(delimiter)
total = int(text[-1].strip())
return total | [
"def",
"search_results_total",
"(",
"html",
",",
"xpath",
",",
"check",
",",
"delimiter",
")",
":",
"for",
"container",
"in",
"html",
".",
"findall",
"(",
"xpath",
")",
":",
"if",
"check",
"in",
"container",
".",
"findtext",
"(",
"'.'",
")",
":",
"text",
"=",
"container",
".",
"findtext",
"(",
"'.'",
")",
".",
"split",
"(",
"delimiter",
")",
"total",
"=",
"int",
"(",
"text",
"[",
"-",
"1",
"]",
".",
"strip",
"(",
")",
")",
"return",
"total"
] | Get the total number of results from the DOM of a search index. | [
"Get",
"the",
"total",
"number",
"of",
"results",
"from",
"the",
"DOM",
"of",
"a",
"search",
"index",
"."
] | python | train |
pgxcentre/geneparse | geneparse/readers/impute2.py | https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/readers/impute2.py#L222-L239 | def _get_biallelic_variant(self, variant, info, _check_alleles=True):
"""Creates a bi-allelic variant."""
info = info.iloc[0, :]
assert not info.multiallelic
# Seeking and parsing the file
self._impute2_file.seek(info.seek)
genotypes = self._parse_impute2_line(self._impute2_file.readline())
variant_alleles = variant._encode_alleles([
genotypes.reference, genotypes.coded,
])
if (_check_alleles and variant_alleles != variant.alleles):
# Variant with requested alleles is unavailable.
logging.variant_not_found(variant)
return []
return [genotypes] | [
"def",
"_get_biallelic_variant",
"(",
"self",
",",
"variant",
",",
"info",
",",
"_check_alleles",
"=",
"True",
")",
":",
"info",
"=",
"info",
".",
"iloc",
"[",
"0",
",",
":",
"]",
"assert",
"not",
"info",
".",
"multiallelic",
"# Seeking and parsing the file",
"self",
".",
"_impute2_file",
".",
"seek",
"(",
"info",
".",
"seek",
")",
"genotypes",
"=",
"self",
".",
"_parse_impute2_line",
"(",
"self",
".",
"_impute2_file",
".",
"readline",
"(",
")",
")",
"variant_alleles",
"=",
"variant",
".",
"_encode_alleles",
"(",
"[",
"genotypes",
".",
"reference",
",",
"genotypes",
".",
"coded",
",",
"]",
")",
"if",
"(",
"_check_alleles",
"and",
"variant_alleles",
"!=",
"variant",
".",
"alleles",
")",
":",
"# Variant with requested alleles is unavailable.",
"logging",
".",
"variant_not_found",
"(",
"variant",
")",
"return",
"[",
"]",
"return",
"[",
"genotypes",
"]"
] | Creates a bi-allelic variant. | [
"Creates",
"a",
"bi",
"-",
"allelic",
"variant",
"."
] | python | train |
GluuFederation/oxd-python | oxdpython/client.py | https://github.com/GluuFederation/oxd-python/blob/a0448cda03b4384bc50a8c20bd65eacd983bceb8/oxdpython/client.py#L115-L153 | def get_authorization_url(self, acr_values=None, prompt=None, scope=None,
custom_params=None):
"""Function to get the authorization url that can be opened in the
browser for the user to provide authorization and authentication
Parameters:
* **acr_values (list, optional):** acr values in the order of priority
* **prompt (string, optional):** prompt=login is required if you want to force alter current user session (in case user is already logged in from site1 and site2 constructs authorization request and want to force alter current user session)
* **scope (list, optional):** scopes required, takes the one provided during site registrations by default
* **custom_params (dict, optional):** Any custom arguments that the client wishes to pass on to the OP can be passed on as extra parameters to the function
Returns:
**string:** The authorization url that the user must access for authentication and authorization
Raises:
**OxdServerError:** If the oxd throws an error for any reason.
"""
params = {"oxd_id": self.oxd_id}
if scope and isinstance(scope, list):
params["scope"] = scope
if acr_values and isinstance(acr_values, list):
params["acr_values"] = acr_values
if prompt and isinstance(prompt, str):
params["prompt"] = prompt
if custom_params:
params["custom_parameters"] = custom_params
logger.debug("Sending command `get_authorization_url` with params %s",
params)
response = self.msgr.request("get_authorization_url", **params)
logger.debug("Received response: %s", response)
if response['status'] == 'error':
raise OxdServerError(response['data'])
return response['data']['authorization_url'] | [
"def",
"get_authorization_url",
"(",
"self",
",",
"acr_values",
"=",
"None",
",",
"prompt",
"=",
"None",
",",
"scope",
"=",
"None",
",",
"custom_params",
"=",
"None",
")",
":",
"params",
"=",
"{",
"\"oxd_id\"",
":",
"self",
".",
"oxd_id",
"}",
"if",
"scope",
"and",
"isinstance",
"(",
"scope",
",",
"list",
")",
":",
"params",
"[",
"\"scope\"",
"]",
"=",
"scope",
"if",
"acr_values",
"and",
"isinstance",
"(",
"acr_values",
",",
"list",
")",
":",
"params",
"[",
"\"acr_values\"",
"]",
"=",
"acr_values",
"if",
"prompt",
"and",
"isinstance",
"(",
"prompt",
",",
"str",
")",
":",
"params",
"[",
"\"prompt\"",
"]",
"=",
"prompt",
"if",
"custom_params",
":",
"params",
"[",
"\"custom_parameters\"",
"]",
"=",
"custom_params",
"logger",
".",
"debug",
"(",
"\"Sending command `get_authorization_url` with params %s\"",
",",
"params",
")",
"response",
"=",
"self",
".",
"msgr",
".",
"request",
"(",
"\"get_authorization_url\"",
",",
"*",
"*",
"params",
")",
"logger",
".",
"debug",
"(",
"\"Received response: %s\"",
",",
"response",
")",
"if",
"response",
"[",
"'status'",
"]",
"==",
"'error'",
":",
"raise",
"OxdServerError",
"(",
"response",
"[",
"'data'",
"]",
")",
"return",
"response",
"[",
"'data'",
"]",
"[",
"'authorization_url'",
"]"
] | Function to get the authorization url that can be opened in the
browser for the user to provide authorization and authentication
Parameters:
* **acr_values (list, optional):** acr values in the order of priority
* **prompt (string, optional):** prompt=login is required if you want to force alter current user session (in case user is already logged in from site1 and site2 constructs authorization request and want to force alter current user session)
* **scope (list, optional):** scopes required, takes the one provided during site registrations by default
* **custom_params (dict, optional):** Any custom arguments that the client wishes to pass on to the OP can be passed on as extra parameters to the function
Returns:
**string:** The authorization url that the user must access for authentication and authorization
Raises:
**OxdServerError:** If the oxd throws an error for any reason. | [
"Function",
"to",
"get",
"the",
"authorization",
"url",
"that",
"can",
"be",
"opened",
"in",
"the",
"browser",
"for",
"the",
"user",
"to",
"provide",
"authorization",
"and",
"authentication"
] | python | train |
modin-project/modin | modin/backends/pandas/query_compiler.py | https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1564-L1601 | def mode(self, **kwargs):
"""Returns a new QueryCompiler with modes calculated for each label along given axis.
Returns:
A new QueryCompiler with modes calculated.
"""
axis = kwargs.get("axis", 0)
def mode_builder(df, **kwargs):
result = df.mode(**kwargs)
# We return a dataframe with the same shape as the input to ensure
# that all the partitions will be the same shape
if not axis and len(df) != len(result):
# Pad columns
append_values = pandas.DataFrame(
columns=result.columns, index=range(len(result), len(df))
)
result = pandas.concat([result, append_values], ignore_index=True)
elif axis and len(df.columns) != len(result.columns):
# Pad rows
append_vals = pandas.DataFrame(
columns=range(len(result.columns), len(df.columns)),
index=result.index,
)
result = pandas.concat([result, append_vals], axis=1)
return pandas.DataFrame(result)
func = self._prepare_method(mode_builder, **kwargs)
new_data = self._map_across_full_axis(axis, func)
new_index = pandas.RangeIndex(len(self.index)) if not axis else self.index
new_columns = self.columns if not axis else pandas.RangeIndex(len(self.columns))
new_dtypes = self._dtype_cache
if new_dtypes is not None:
new_dtypes.index = new_columns
return self.__constructor__(
new_data, new_index, new_columns, new_dtypes
).dropna(axis=axis, how="all") | [
"def",
"mode",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"axis",
"=",
"kwargs",
".",
"get",
"(",
"\"axis\"",
",",
"0",
")",
"def",
"mode_builder",
"(",
"df",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"df",
".",
"mode",
"(",
"*",
"*",
"kwargs",
")",
"# We return a dataframe with the same shape as the input to ensure",
"# that all the partitions will be the same shape",
"if",
"not",
"axis",
"and",
"len",
"(",
"df",
")",
"!=",
"len",
"(",
"result",
")",
":",
"# Pad columns",
"append_values",
"=",
"pandas",
".",
"DataFrame",
"(",
"columns",
"=",
"result",
".",
"columns",
",",
"index",
"=",
"range",
"(",
"len",
"(",
"result",
")",
",",
"len",
"(",
"df",
")",
")",
")",
"result",
"=",
"pandas",
".",
"concat",
"(",
"[",
"result",
",",
"append_values",
"]",
",",
"ignore_index",
"=",
"True",
")",
"elif",
"axis",
"and",
"len",
"(",
"df",
".",
"columns",
")",
"!=",
"len",
"(",
"result",
".",
"columns",
")",
":",
"# Pad rows",
"append_vals",
"=",
"pandas",
".",
"DataFrame",
"(",
"columns",
"=",
"range",
"(",
"len",
"(",
"result",
".",
"columns",
")",
",",
"len",
"(",
"df",
".",
"columns",
")",
")",
",",
"index",
"=",
"result",
".",
"index",
",",
")",
"result",
"=",
"pandas",
".",
"concat",
"(",
"[",
"result",
",",
"append_vals",
"]",
",",
"axis",
"=",
"1",
")",
"return",
"pandas",
".",
"DataFrame",
"(",
"result",
")",
"func",
"=",
"self",
".",
"_prepare_method",
"(",
"mode_builder",
",",
"*",
"*",
"kwargs",
")",
"new_data",
"=",
"self",
".",
"_map_across_full_axis",
"(",
"axis",
",",
"func",
")",
"new_index",
"=",
"pandas",
".",
"RangeIndex",
"(",
"len",
"(",
"self",
".",
"index",
")",
")",
"if",
"not",
"axis",
"else",
"self",
".",
"index",
"new_columns",
"=",
"self",
".",
"columns",
"if",
"not",
"axis",
"else",
"pandas",
".",
"RangeIndex",
"(",
"len",
"(",
"self",
".",
"columns",
")",
")",
"new_dtypes",
"=",
"self",
".",
"_dtype_cache",
"if",
"new_dtypes",
"is",
"not",
"None",
":",
"new_dtypes",
".",
"index",
"=",
"new_columns",
"return",
"self",
".",
"__constructor__",
"(",
"new_data",
",",
"new_index",
",",
"new_columns",
",",
"new_dtypes",
")",
".",
"dropna",
"(",
"axis",
"=",
"axis",
",",
"how",
"=",
"\"all\"",
")"
] | Returns a new QueryCompiler with modes calculated for each label along given axis.
Returns:
A new QueryCompiler with modes calculated. | [
"Returns",
"a",
"new",
"QueryCompiler",
"with",
"modes",
"calculated",
"for",
"each",
"label",
"along",
"given",
"axis",
"."
] | python | train |
briancappello/flask-unchained | flask_unchained/bundles/security/extensions/security.py | https://github.com/briancappello/flask-unchained/blob/4d536cb90e2cc4829c1c05f2c74d3e22901a1399/flask_unchained/bundles/security/extensions/security.py#L215-L226 | def _get_pwd_context(self, app: FlaskUnchained) -> CryptContext:
"""
Get the password hashing context.
"""
pw_hash = app.config.SECURITY_PASSWORD_HASH
schemes = app.config.SECURITY_PASSWORD_SCHEMES
if pw_hash not in schemes:
allowed = (', '.join(schemes[:-1]) + ' and ' + schemes[-1])
raise ValueError(f'Invalid password hashing scheme {pw_hash}. '
f'Allowed values are {allowed}.')
return CryptContext(schemes=schemes, default=pw_hash,
deprecated=app.config.SECURITY_DEPRECATED_PASSWORD_SCHEMES) | [
"def",
"_get_pwd_context",
"(",
"self",
",",
"app",
":",
"FlaskUnchained",
")",
"->",
"CryptContext",
":",
"pw_hash",
"=",
"app",
".",
"config",
".",
"SECURITY_PASSWORD_HASH",
"schemes",
"=",
"app",
".",
"config",
".",
"SECURITY_PASSWORD_SCHEMES",
"if",
"pw_hash",
"not",
"in",
"schemes",
":",
"allowed",
"=",
"(",
"', '",
".",
"join",
"(",
"schemes",
"[",
":",
"-",
"1",
"]",
")",
"+",
"' and '",
"+",
"schemes",
"[",
"-",
"1",
"]",
")",
"raise",
"ValueError",
"(",
"f'Invalid password hashing scheme {pw_hash}. '",
"f'Allowed values are {allowed}.'",
")",
"return",
"CryptContext",
"(",
"schemes",
"=",
"schemes",
",",
"default",
"=",
"pw_hash",
",",
"deprecated",
"=",
"app",
".",
"config",
".",
"SECURITY_DEPRECATED_PASSWORD_SCHEMES",
")"
] | Get the password hashing context. | [
"Get",
"the",
"password",
"hashing",
"context",
"."
] | python | train |
rsgalloway/grit | grit/server/handler.py | https://github.com/rsgalloway/grit/blob/e6434ad8a1f4ac5d0903ebad630c81f8a5164d78/grit/server/handler.py#L79-L84 | def handle_versions(repo, **kwargs):
""":return: repo.versions()"""
log.info('versions: %s %s' %(repo, kwargs))
if not hasattr(repo, 'versions'):
return []
return [v.serialize() for v in repo.versions(**kwargs)] | [
"def",
"handle_versions",
"(",
"repo",
",",
"*",
"*",
"kwargs",
")",
":",
"log",
".",
"info",
"(",
"'versions: %s %s'",
"%",
"(",
"repo",
",",
"kwargs",
")",
")",
"if",
"not",
"hasattr",
"(",
"repo",
",",
"'versions'",
")",
":",
"return",
"[",
"]",
"return",
"[",
"v",
".",
"serialize",
"(",
")",
"for",
"v",
"in",
"repo",
".",
"versions",
"(",
"*",
"*",
"kwargs",
")",
"]"
] | :return: repo.versions() | [
":",
"return",
":",
"repo",
".",
"versions",
"()"
] | python | train |
googledatalab/pydatalab | google/datalab/bigquery/commands/_bigquery.py | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/commands/_bigquery.py#L519-L545 | def _datasource_cell(args, cell_body):
"""Implements the BigQuery datasource cell magic for ipython notebooks.
The supported syntax is
%%bq datasource --name <var> --paths <url> [--format <CSV|JSON>]
<schema>
Args:
args: the optional arguments following '%%bq datasource'
cell_body: the datasource's schema in json/yaml
"""
name = args['name']
paths = args['paths']
data_format = (args['format'] or 'CSV').lower()
compressed = args['compressed'] or False
# Get the source schema from the cell body
record = google.datalab.utils.commands.parse_config(
cell_body, google.datalab.utils.commands.notebook_environment(), as_dict=False)
jsonschema.validate(record, BigQuerySchema.TABLE_SCHEMA_SCHEMA)
schema = bigquery.Schema(record['schema'])
# Finally build the datasource object
datasource = bigquery.ExternalDataSource(source=paths, source_format=data_format,
compressed=compressed, schema=schema)
google.datalab.utils.commands.notebook_environment()[name] = datasource | [
"def",
"_datasource_cell",
"(",
"args",
",",
"cell_body",
")",
":",
"name",
"=",
"args",
"[",
"'name'",
"]",
"paths",
"=",
"args",
"[",
"'paths'",
"]",
"data_format",
"=",
"(",
"args",
"[",
"'format'",
"]",
"or",
"'CSV'",
")",
".",
"lower",
"(",
")",
"compressed",
"=",
"args",
"[",
"'compressed'",
"]",
"or",
"False",
"# Get the source schema from the cell body",
"record",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"parse_config",
"(",
"cell_body",
",",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"notebook_environment",
"(",
")",
",",
"as_dict",
"=",
"False",
")",
"jsonschema",
".",
"validate",
"(",
"record",
",",
"BigQuerySchema",
".",
"TABLE_SCHEMA_SCHEMA",
")",
"schema",
"=",
"bigquery",
".",
"Schema",
"(",
"record",
"[",
"'schema'",
"]",
")",
"# Finally build the datasource object",
"datasource",
"=",
"bigquery",
".",
"ExternalDataSource",
"(",
"source",
"=",
"paths",
",",
"source_format",
"=",
"data_format",
",",
"compressed",
"=",
"compressed",
",",
"schema",
"=",
"schema",
")",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"notebook_environment",
"(",
")",
"[",
"name",
"]",
"=",
"datasource"
] | Implements the BigQuery datasource cell magic for ipython notebooks.
The supported syntax is
%%bq datasource --name <var> --paths <url> [--format <CSV|JSON>]
<schema>
Args:
args: the optional arguments following '%%bq datasource'
cell_body: the datasource's schema in json/yaml | [
"Implements",
"the",
"BigQuery",
"datasource",
"cell",
"magic",
"for",
"ipython",
"notebooks",
"."
] | python | train |
apache/spark | python/pyspark/sql/functions.py | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1283-L1316 | def from_utc_timestamp(timestamp, tz):
"""
This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function
takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in UTC, and
renders that timestamp as a timestamp in the given time zone.
However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not
timezone-agnostic. So in Spark this function just shift the timestamp value from UTC timezone to
the given timezone.
This function may return confusing result if the input is a string with timezone, e.g.
'2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp
according to the timezone in the string, and finally display the result by converting the
timestamp to string according to the session local timezone.
:param timestamp: the column that contains timestamps
:param tz: a string that has the ID of timezone, e.g. "GMT", "America/Los_Angeles", etc
.. versionchanged:: 2.4
`tz` can take a :class:`Column` containing timezone ID strings.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz'])
>>> df.select(from_utc_timestamp(df.ts, "PST").alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 2, 30))]
>>> df.select(from_utc_timestamp(df.ts, df.tz).alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 19, 30))]
.. note:: Deprecated in 3.0. See SPARK-25496
"""
warnings.warn("Deprecated in 3.0. See SPARK-25496", DeprecationWarning)
sc = SparkContext._active_spark_context
if isinstance(tz, Column):
tz = _to_java_column(tz)
return Column(sc._jvm.functions.from_utc_timestamp(_to_java_column(timestamp), tz)) | [
"def",
"from_utc_timestamp",
"(",
"timestamp",
",",
"tz",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Deprecated in 3.0. See SPARK-25496\"",
",",
"DeprecationWarning",
")",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"if",
"isinstance",
"(",
"tz",
",",
"Column",
")",
":",
"tz",
"=",
"_to_java_column",
"(",
"tz",
")",
"return",
"Column",
"(",
"sc",
".",
"_jvm",
".",
"functions",
".",
"from_utc_timestamp",
"(",
"_to_java_column",
"(",
"timestamp",
")",
",",
"tz",
")",
")"
] | This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function
takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in UTC, and
renders that timestamp as a timestamp in the given time zone.
However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not
timezone-agnostic. So in Spark this function just shift the timestamp value from UTC timezone to
the given timezone.
This function may return confusing result if the input is a string with timezone, e.g.
'2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp
according to the timezone in the string, and finally display the result by converting the
timestamp to string according to the session local timezone.
:param timestamp: the column that contains timestamps
:param tz: a string that has the ID of timezone, e.g. "GMT", "America/Los_Angeles", etc
.. versionchanged:: 2.4
`tz` can take a :class:`Column` containing timezone ID strings.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz'])
>>> df.select(from_utc_timestamp(df.ts, "PST").alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 2, 30))]
>>> df.select(from_utc_timestamp(df.ts, df.tz).alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 19, 30))]
.. note:: Deprecated in 3.0. See SPARK-25496 | [
"This",
"is",
"a",
"common",
"function",
"for",
"databases",
"supporting",
"TIMESTAMP",
"WITHOUT",
"TIMEZONE",
".",
"This",
"function",
"takes",
"a",
"timestamp",
"which",
"is",
"timezone",
"-",
"agnostic",
"and",
"interprets",
"it",
"as",
"a",
"timestamp",
"in",
"UTC",
"and",
"renders",
"that",
"timestamp",
"as",
"a",
"timestamp",
"in",
"the",
"given",
"time",
"zone",
"."
] | python | train |
ellmetha/django-machina | machina/apps/forum/abstract_models.py | https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum/abstract_models.py#L155-L173 | def save(self, *args, **kwargs):
""" Saves the forum instance. """
# It is vital to track the changes of the parent associated with a forum in order to
# maintain counters up-to-date and to trigger other operations such as permissions updates.
old_instance = None
if self.pk:
old_instance = self.__class__._default_manager.get(pk=self.pk)
# Update the slug field
self.slug = slugify(force_text(self.name), allow_unicode=True)
# Do the save
super().save(*args, **kwargs)
# If any change has been made to the forum parent, trigger the update of the counters
if old_instance and old_instance.parent != self.parent:
self.update_trackers()
# Trigger the 'forum_moved' signal
signals.forum_moved.send(sender=self, previous_parent=old_instance.parent) | [
"def",
"save",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# It is vital to track the changes of the parent associated with a forum in order to",
"# maintain counters up-to-date and to trigger other operations such as permissions updates.",
"old_instance",
"=",
"None",
"if",
"self",
".",
"pk",
":",
"old_instance",
"=",
"self",
".",
"__class__",
".",
"_default_manager",
".",
"get",
"(",
"pk",
"=",
"self",
".",
"pk",
")",
"# Update the slug field",
"self",
".",
"slug",
"=",
"slugify",
"(",
"force_text",
"(",
"self",
".",
"name",
")",
",",
"allow_unicode",
"=",
"True",
")",
"# Do the save",
"super",
"(",
")",
".",
"save",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# If any change has been made to the forum parent, trigger the update of the counters",
"if",
"old_instance",
"and",
"old_instance",
".",
"parent",
"!=",
"self",
".",
"parent",
":",
"self",
".",
"update_trackers",
"(",
")",
"# Trigger the 'forum_moved' signal",
"signals",
".",
"forum_moved",
".",
"send",
"(",
"sender",
"=",
"self",
",",
"previous_parent",
"=",
"old_instance",
".",
"parent",
")"
] | Saves the forum instance. | [
"Saves",
"the",
"forum",
"instance",
"."
] | python | train |
howie6879/ruia | ruia/middleware.py | https://github.com/howie6879/ruia/blob/2dc5262fc9c3e902a8faa7d5fa2f046f9d9ee1fa/ruia/middleware.py#L19-L31 | def request(self, *args, **kwargs):
"""
Define a Decorate to be called before a request.
eg: @middleware.request
"""
middleware = args[0]
@wraps(middleware)
def register_middleware(*args, **kwargs):
self.request_middleware.append(middleware)
return middleware
return register_middleware() | [
"def",
"request",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"middleware",
"=",
"args",
"[",
"0",
"]",
"@",
"wraps",
"(",
"middleware",
")",
"def",
"register_middleware",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"request_middleware",
".",
"append",
"(",
"middleware",
")",
"return",
"middleware",
"return",
"register_middleware",
"(",
")"
] | Define a Decorate to be called before a request.
eg: @middleware.request | [
"Define",
"a",
"Decorate",
"to",
"be",
"called",
"before",
"a",
"request",
".",
"eg",
":"
] | python | test |
bioidiap/bob.ip.facedetect | bob/ip/facedetect/detector/sampler.py | https://github.com/bioidiap/bob.ip.facedetect/blob/601da5141ca7302ad36424d1421b33190ba46779/bob/ip/facedetect/detector/sampler.py#L156-L195 | def iterate_cascade(self, cascade, image, threshold = None):
"""iterate_cascade(self, cascade, image, [threshold]) -> prediction, bounding_box
Iterates over the given image and computes the cascade of classifiers.
This function will compute the cascaded classification result for the given ``image`` using the given ``cascade``.
It yields a tuple of prediction value and the according bounding box.
If a ``threshold`` is specified, only those ``prediction``\s are returned, which exceed the given ``threshold``.
.. note::
The ``threshold`` does not overwrite the cascade thresholds `:py:attr:`Cascade.thresholds`, but only threshold the final prediction.
Specifying the ``threshold`` here is just slightly faster than thresholding the yielded prediction.
**Parameters:**
``cascade`` : :py:class:`Cascade`
The cascade that performs the predictions
``image`` : array_like(2D)
The image for which the predictions should be computed
``threshold`` : float
The threshold, which limits the number of predictions
**Yields:**
``prediction`` : float
The prediction value for the current bounding box
``bounding_box`` : :py:class:`BoundingBox`
An iterator over all possible sampled bounding boxes (which exceed the prediction ``threshold``, if given)
"""
for scale, scaled_image_shape in self.scales(image):
# prepare the feature extractor to extract features from the given image
cascade.prepare(image, scale)
for bb in self.sample_scaled(scaled_image_shape):
# return the prediction and the bounding box, if the prediction is over threshold
prediction = cascade(bb)
if threshold is None or prediction > threshold:
yield prediction, bb.scale(1./scale) | [
"def",
"iterate_cascade",
"(",
"self",
",",
"cascade",
",",
"image",
",",
"threshold",
"=",
"None",
")",
":",
"for",
"scale",
",",
"scaled_image_shape",
"in",
"self",
".",
"scales",
"(",
"image",
")",
":",
"# prepare the feature extractor to extract features from the given image",
"cascade",
".",
"prepare",
"(",
"image",
",",
"scale",
")",
"for",
"bb",
"in",
"self",
".",
"sample_scaled",
"(",
"scaled_image_shape",
")",
":",
"# return the prediction and the bounding box, if the prediction is over threshold",
"prediction",
"=",
"cascade",
"(",
"bb",
")",
"if",
"threshold",
"is",
"None",
"or",
"prediction",
">",
"threshold",
":",
"yield",
"prediction",
",",
"bb",
".",
"scale",
"(",
"1.",
"/",
"scale",
")"
] | iterate_cascade(self, cascade, image, [threshold]) -> prediction, bounding_box
Iterates over the given image and computes the cascade of classifiers.
This function will compute the cascaded classification result for the given ``image`` using the given ``cascade``.
It yields a tuple of prediction value and the according bounding box.
If a ``threshold`` is specified, only those ``prediction``\s are returned, which exceed the given ``threshold``.
.. note::
The ``threshold`` does not overwrite the cascade thresholds `:py:attr:`Cascade.thresholds`, but only threshold the final prediction.
Specifying the ``threshold`` here is just slightly faster than thresholding the yielded prediction.
**Parameters:**
``cascade`` : :py:class:`Cascade`
The cascade that performs the predictions
``image`` : array_like(2D)
The image for which the predictions should be computed
``threshold`` : float
The threshold, which limits the number of predictions
**Yields:**
``prediction`` : float
The prediction value for the current bounding box
``bounding_box`` : :py:class:`BoundingBox`
An iterator over all possible sampled bounding boxes (which exceed the prediction ``threshold``, if given) | [
"iterate_cascade",
"(",
"self",
"cascade",
"image",
"[",
"threshold",
"]",
")",
"-",
">",
"prediction",
"bounding_box"
] | python | train |
vfilimonov/pydatastream | pydatastream/pydatastream.py | https://github.com/vfilimonov/pydatastream/blob/15d2adac1c83501715db1542373fa8428542816e/pydatastream/pydatastream.py#L598-L646 | def get_epit_vintage_matrix(self, mnemonic, date_from='1951-01-01', date_to=None):
""" Construct the vintage matrix for a given economic series.
Requires subscription to Thomson Reuters Economic Point-in-Time (EPiT).
Vintage matrix represents a DataFrame where columns correspond to a
particular period (quarter or month) for the reported statistic and
index represents timestamps at which these values were released by
the respective official agency. I.e. every line corresponds to all
available reported values by the given date.
For example:
>> DWE.get_epit_vintage_matrix('USGDP...D', date_from='2015-01-01')
2015-02-15 2015-05-15 2015-08-15 2015-11-15 \
2015-04-29 16304.80 NaN NaN NaN
2015-05-29 16264.10 NaN NaN NaN
2015-06-24 16287.70 NaN NaN NaN
2015-07-30 16177.30 16270.400 NaN NaN
2015-08-27 16177.30 16324.300 NaN NaN
2015-09-25 16177.30 16333.600 NaN NaN
2015-10-29 16177.30 16333.600 16394.200 NaN
2015-11-24 16177.30 16333.600 16417.800 NaN
From the matrix it is seen for example, that the advance GDP estimate
for 2015-Q1 (corresponding to 2015-02-15) was released on 2015-04-29
and was equal to 16304.80 (B USD). The first revision (16264.10) has
happened on 2015-05-29 and the second (16287.70) - on 2015-06-24.
On 2015-07-30 the advance GDP figure for 2015-Q2 was released
(16270.400) together with update on the 2015-Q1 value (16177.30)
and so on.
"""
# Get first available date from the REL1 series
rel1 = self.fetch(mnemonic, 'REL1', date_from=date_from, date_to=date_to)
date_0 = rel1.dropna().index[0]
# All release dates
reld123 = self.fetch(mnemonic, ['RELD1', 'RELD2', 'RELD3'],
date_from=date_0, date_to=date_to).dropna(how='all')
# Fetch all vintages
res = {}
for date in reld123.index:
try:
_tmp = self.fetch(mnemonic, 'RELV', date_from=date_0, date_to=date).dropna()
except DatastreamException:
continue
res[date] = _tmp
return pd.concat(res).RELV.unstack() | [
"def",
"get_epit_vintage_matrix",
"(",
"self",
",",
"mnemonic",
",",
"date_from",
"=",
"'1951-01-01'",
",",
"date_to",
"=",
"None",
")",
":",
"# Get first available date from the REL1 series",
"rel1",
"=",
"self",
".",
"fetch",
"(",
"mnemonic",
",",
"'REL1'",
",",
"date_from",
"=",
"date_from",
",",
"date_to",
"=",
"date_to",
")",
"date_0",
"=",
"rel1",
".",
"dropna",
"(",
")",
".",
"index",
"[",
"0",
"]",
"# All release dates",
"reld123",
"=",
"self",
".",
"fetch",
"(",
"mnemonic",
",",
"[",
"'RELD1'",
",",
"'RELD2'",
",",
"'RELD3'",
"]",
",",
"date_from",
"=",
"date_0",
",",
"date_to",
"=",
"date_to",
")",
".",
"dropna",
"(",
"how",
"=",
"'all'",
")",
"# Fetch all vintages",
"res",
"=",
"{",
"}",
"for",
"date",
"in",
"reld123",
".",
"index",
":",
"try",
":",
"_tmp",
"=",
"self",
".",
"fetch",
"(",
"mnemonic",
",",
"'RELV'",
",",
"date_from",
"=",
"date_0",
",",
"date_to",
"=",
"date",
")",
".",
"dropna",
"(",
")",
"except",
"DatastreamException",
":",
"continue",
"res",
"[",
"date",
"]",
"=",
"_tmp",
"return",
"pd",
".",
"concat",
"(",
"res",
")",
".",
"RELV",
".",
"unstack",
"(",
")"
] | Construct the vintage matrix for a given economic series.
Requires subscription to Thomson Reuters Economic Point-in-Time (EPiT).
Vintage matrix represents a DataFrame where columns correspond to a
particular period (quarter or month) for the reported statistic and
index represents timestamps at which these values were released by
the respective official agency. I.e. every line corresponds to all
available reported values by the given date.
For example:
>> DWE.get_epit_vintage_matrix('USGDP...D', date_from='2015-01-01')
2015-02-15 2015-05-15 2015-08-15 2015-11-15 \
2015-04-29 16304.80 NaN NaN NaN
2015-05-29 16264.10 NaN NaN NaN
2015-06-24 16287.70 NaN NaN NaN
2015-07-30 16177.30 16270.400 NaN NaN
2015-08-27 16177.30 16324.300 NaN NaN
2015-09-25 16177.30 16333.600 NaN NaN
2015-10-29 16177.30 16333.600 16394.200 NaN
2015-11-24 16177.30 16333.600 16417.800 NaN
From the matrix it is seen for example, that the advance GDP estimate
for 2015-Q1 (corresponding to 2015-02-15) was released on 2015-04-29
and was equal to 16304.80 (B USD). The first revision (16264.10) has
happened on 2015-05-29 and the second (16287.70) - on 2015-06-24.
On 2015-07-30 the advance GDP figure for 2015-Q2 was released
(16270.400) together with update on the 2015-Q1 value (16177.30)
and so on. | [
"Construct",
"the",
"vintage",
"matrix",
"for",
"a",
"given",
"economic",
"series",
".",
"Requires",
"subscription",
"to",
"Thomson",
"Reuters",
"Economic",
"Point",
"-",
"in",
"-",
"Time",
"(",
"EPiT",
")",
"."
] | python | train |
mk-fg/feedjack | feedjack/models.py | https://github.com/mk-fg/feedjack/blob/3fe65c0f66dc2cfdf45834aaa7235ec9f81b3ca3/feedjack/models.py#L616-L622 | def update_handler(feeds):
'''Update all cross-referencing filters results for feeds and others, related to them.
Intended to be called from non-Feed update hooks (like new Post saving).'''
# Check if this call is a result of actions initiated from
# one of the hooks in a higher frame (resulting in recursion).
if Feed._filters_update_handler_lock: return
return Feed._filters_update_handler(Feed, feeds, force=True) | [
"def",
"update_handler",
"(",
"feeds",
")",
":",
"# Check if this call is a result of actions initiated from",
"# one of the hooks in a higher frame (resulting in recursion).",
"if",
"Feed",
".",
"_filters_update_handler_lock",
":",
"return",
"return",
"Feed",
".",
"_filters_update_handler",
"(",
"Feed",
",",
"feeds",
",",
"force",
"=",
"True",
")"
] | Update all cross-referencing filters results for feeds and others, related to them.
Intended to be called from non-Feed update hooks (like new Post saving). | [
"Update",
"all",
"cross",
"-",
"referencing",
"filters",
"results",
"for",
"feeds",
"and",
"others",
"related",
"to",
"them",
".",
"Intended",
"to",
"be",
"called",
"from",
"non",
"-",
"Feed",
"update",
"hooks",
"(",
"like",
"new",
"Post",
"saving",
")",
"."
] | python | train |
stephenmcd/gnotty | gnotty/migrations/0003_joins_leaves.py | https://github.com/stephenmcd/gnotty/blob/bea3762dc9cbc3cb21a5ae7224091cf027273c40/gnotty/migrations/0003_joins_leaves.py#L9-L14 | def forwards(self, orm):
"Write your forwards methods here."
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
if not db.dry_run:
orm['gnotty.IRCMessage'].objects.filter(message="joins").update(join_or_leave=True)
orm['gnotty.IRCMessage'].objects.filter(message="leaves").update(join_or_leave=True) | [
"def",
"forwards",
"(",
"self",
",",
"orm",
")",
":",
"# Note: Remember to use orm['appname.ModelName'] rather than \"from appname.models...\"",
"if",
"not",
"db",
".",
"dry_run",
":",
"orm",
"[",
"'gnotty.IRCMessage'",
"]",
".",
"objects",
".",
"filter",
"(",
"message",
"=",
"\"joins\"",
")",
".",
"update",
"(",
"join_or_leave",
"=",
"True",
")",
"orm",
"[",
"'gnotty.IRCMessage'",
"]",
".",
"objects",
".",
"filter",
"(",
"message",
"=",
"\"leaves\"",
")",
".",
"update",
"(",
"join_or_leave",
"=",
"True",
")"
] | Write your forwards methods here. | [
"Write",
"your",
"forwards",
"methods",
"here",
"."
] | python | train |
dshean/pygeotools | pygeotools/lib/malib.py | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/malib.py#L1474-L1488 | def nanfill(a, f_a, *args, **kwargs):
"""Fill masked areas with np.nan
Wrapper for functions that can't handle ma (e.g. scipy.ndimage)
This will force filters to ignore nan, but causes adjacent pixels to be set to nan as well: http://projects.scipy.org/scipy/ticket/1155
"""
a = checkma(a)
ndv = a.fill_value
#Note: The following fails for arrays that are not float (np.nan is float)
b = f_a(a.filled(np.nan), *args, **kwargs)
#the fix_invalid fill_value parameter doesn't seem to work
out = np.ma.fix_invalid(b, copy=False)
out.set_fill_value(ndv)
return out | [
"def",
"nanfill",
"(",
"a",
",",
"f_a",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"a",
"=",
"checkma",
"(",
"a",
")",
"ndv",
"=",
"a",
".",
"fill_value",
"#Note: The following fails for arrays that are not float (np.nan is float)",
"b",
"=",
"f_a",
"(",
"a",
".",
"filled",
"(",
"np",
".",
"nan",
")",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"#the fix_invalid fill_value parameter doesn't seem to work",
"out",
"=",
"np",
".",
"ma",
".",
"fix_invalid",
"(",
"b",
",",
"copy",
"=",
"False",
")",
"out",
".",
"set_fill_value",
"(",
"ndv",
")",
"return",
"out"
] | Fill masked areas with np.nan
Wrapper for functions that can't handle ma (e.g. scipy.ndimage)
This will force filters to ignore nan, but causes adjacent pixels to be set to nan as well: http://projects.scipy.org/scipy/ticket/1155 | [
"Fill",
"masked",
"areas",
"with",
"np",
".",
"nan"
] | python | train |
rwl/pylon | pylon/io/rst.py | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/io/rst.py#L312-L355 | def write_how_many(self, file):
""" Writes component numbers to a table.
"""
report = CaseReport(self.case)
# Map component labels to attribute names
components = [("Bus", "n_buses"), ("Generator", "n_generators"),
("Committed Generator", "n_online_generators"),
("Load", "n_loads"), ("Fixed Load", "n_fixed_loads"),
("Despatchable Load", "n_online_vloads"), ("Shunt", "n_shunts"),
("Branch", "n_branches"), ("Transformer", "n_transformers"),
("Inter-tie", "n_interties"), ("Area", "n_areas")
]
# Column 1 width
longest = max([len(c[0]) for c in components])
col1_header = "Object"
col1_width = longest
col2_header = "Quantity"
col2_width = len(col2_header)
# Row separator
sep = "="*col1_width + " " + "="*col2_width + "\n"
# Row headers
file.write(sep)
file.write(col1_header.center(col1_width))
file.write(" ")
file.write("%s\n" % col2_header.center(col2_width))
file.write(sep)
# Rows
for label, attr in components:
col2_value = str(getattr(report, attr))
file.write("%s %s\n" %
(label.ljust(col1_width), col2_value.rjust(col2_width)))
else:
file.write(sep)
file.write("\n")
del report | [
"def",
"write_how_many",
"(",
"self",
",",
"file",
")",
":",
"report",
"=",
"CaseReport",
"(",
"self",
".",
"case",
")",
"# Map component labels to attribute names",
"components",
"=",
"[",
"(",
"\"Bus\"",
",",
"\"n_buses\"",
")",
",",
"(",
"\"Generator\"",
",",
"\"n_generators\"",
")",
",",
"(",
"\"Committed Generator\"",
",",
"\"n_online_generators\"",
")",
",",
"(",
"\"Load\"",
",",
"\"n_loads\"",
")",
",",
"(",
"\"Fixed Load\"",
",",
"\"n_fixed_loads\"",
")",
",",
"(",
"\"Despatchable Load\"",
",",
"\"n_online_vloads\"",
")",
",",
"(",
"\"Shunt\"",
",",
"\"n_shunts\"",
")",
",",
"(",
"\"Branch\"",
",",
"\"n_branches\"",
")",
",",
"(",
"\"Transformer\"",
",",
"\"n_transformers\"",
")",
",",
"(",
"\"Inter-tie\"",
",",
"\"n_interties\"",
")",
",",
"(",
"\"Area\"",
",",
"\"n_areas\"",
")",
"]",
"# Column 1 width",
"longest",
"=",
"max",
"(",
"[",
"len",
"(",
"c",
"[",
"0",
"]",
")",
"for",
"c",
"in",
"components",
"]",
")",
"col1_header",
"=",
"\"Object\"",
"col1_width",
"=",
"longest",
"col2_header",
"=",
"\"Quantity\"",
"col2_width",
"=",
"len",
"(",
"col2_header",
")",
"# Row separator",
"sep",
"=",
"\"=\"",
"*",
"col1_width",
"+",
"\" \"",
"+",
"\"=\"",
"*",
"col2_width",
"+",
"\"\\n\"",
"# Row headers",
"file",
".",
"write",
"(",
"sep",
")",
"file",
".",
"write",
"(",
"col1_header",
".",
"center",
"(",
"col1_width",
")",
")",
"file",
".",
"write",
"(",
"\" \"",
")",
"file",
".",
"write",
"(",
"\"%s\\n\"",
"%",
"col2_header",
".",
"center",
"(",
"col2_width",
")",
")",
"file",
".",
"write",
"(",
"sep",
")",
"# Rows",
"for",
"label",
",",
"attr",
"in",
"components",
":",
"col2_value",
"=",
"str",
"(",
"getattr",
"(",
"report",
",",
"attr",
")",
")",
"file",
".",
"write",
"(",
"\"%s %s\\n\"",
"%",
"(",
"label",
".",
"ljust",
"(",
"col1_width",
")",
",",
"col2_value",
".",
"rjust",
"(",
"col2_width",
")",
")",
")",
"else",
":",
"file",
".",
"write",
"(",
"sep",
")",
"file",
".",
"write",
"(",
"\"\\n\"",
")",
"del",
"report"
] | Writes component numbers to a table. | [
"Writes",
"component",
"numbers",
"to",
"a",
"table",
"."
] | python | train |
aestrivex/bctpy | bct/algorithms/distance.py | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/distance.py#L540-L644 | def efficiency_wei(Gw, local=False):
'''
The global efficiency is the average of inverse shortest path length,
and is inversely related to the characteristic path length.
The local efficiency is the global efficiency computed on the
neighborhood of the node, and is related to the clustering coefficient.
Parameters
----------
W : NxN np.ndarray
undirected weighted connection matrix
(all weights in W must be between 0 and 1)
local : bool
If True, computes local efficiency instead of global efficiency.
Default value = False.
Returns
-------
Eglob : float
global efficiency, only if local=False
Eloc : Nx1 np.ndarray
local efficiency, only if local=True
Notes
-----
The efficiency is computed using an auxiliary connection-length
matrix L, defined as L_ij = 1/W_ij for all nonzero L_ij; This has an
intuitive interpretation, as higher connection weights intuitively
correspond to shorter lengths.
The weighted local efficiency broadly parallels the weighted
clustering coefficient of Onnela et al. (2005) and distinguishes the
influence of different paths based on connection weights of the
corresponding neighbors to the node in question. In other words, a path
between two neighbors with strong connections to the node in question
contributes more to the local efficiency than a path between two weakly
connected neighbors. Note that this weighted variant of the local
efficiency is hence not a strict generalization of the binary variant.
Algorithm: Dijkstra's algorithm
'''
def distance_inv_wei(G):
n = len(G)
D = np.zeros((n, n)) # distance matrix
D[np.logical_not(np.eye(n))] = np.inf
for u in range(n):
# distance permanence (true is temporary)
S = np.ones((n,), dtype=bool)
G1 = G.copy()
V = [u]
while True:
S[V] = 0 # distance u->V is now permanent
G1[:, V] = 0 # no in-edges as already shortest
for v in V:
W, = np.where(G1[v, :]) # neighbors of smallest nodes
td = np.array(
[D[u, W].flatten(), (D[u, v] + G1[v, W]).flatten()])
D[u, W] = np.min(td, axis=0)
if D[u, S].size == 0: # all nodes reached
break
minD = np.min(D[u, S])
if np.isinf(minD): # some nodes cannot be reached
break
V, = np.where(D[u, :] == minD)
np.fill_diagonal(D, 1)
D = 1 / D
np.fill_diagonal(D, 0)
return D
n = len(Gw)
Gl = invert(Gw, copy=True) # connection length matrix
A = np.array((Gw != 0), dtype=int)
if local:
E = np.zeros((n,)) # local efficiency
for u in range(n):
# V,=np.where(Gw[u,:]) #neighbors
# k=len(V) #degree
# if k>=2: #degree must be at least 2
# e=(distance_inv_wei(Gl[V].T[V])*np.outer(Gw[V,u],Gw[u,V]))**1/3
# E[u]=np.sum(e)/(k*k-k)
# find pairs of neighbors
V, = np.where(np.logical_or(Gw[u, :], Gw[:, u].T))
# symmetrized vector of weights
sw = cuberoot(Gw[u, V]) + cuberoot(Gw[V, u].T)
# inverse distance matrix
e = distance_inv_wei(Gl[np.ix_(V, V)])
# symmetrized inverse distance matrix
se = cuberoot(e) + cuberoot(e.T)
numer = np.sum(np.outer(sw.T, sw) * se) / 2
if numer != 0:
# symmetrized adjacency vector
sa = A[u, V] + A[V, u].T
denom = np.sum(sa)**2 - np.sum(sa * sa)
# print numer,denom
E[u] = numer / denom # local efficiency
else:
e = distance_inv_wei(Gl)
E = np.sum(e) / (n * n - n)
return E | [
"def",
"efficiency_wei",
"(",
"Gw",
",",
"local",
"=",
"False",
")",
":",
"def",
"distance_inv_wei",
"(",
"G",
")",
":",
"n",
"=",
"len",
"(",
"G",
")",
"D",
"=",
"np",
".",
"zeros",
"(",
"(",
"n",
",",
"n",
")",
")",
"# distance matrix",
"D",
"[",
"np",
".",
"logical_not",
"(",
"np",
".",
"eye",
"(",
"n",
")",
")",
"]",
"=",
"np",
".",
"inf",
"for",
"u",
"in",
"range",
"(",
"n",
")",
":",
"# distance permanence (true is temporary)",
"S",
"=",
"np",
".",
"ones",
"(",
"(",
"n",
",",
")",
",",
"dtype",
"=",
"bool",
")",
"G1",
"=",
"G",
".",
"copy",
"(",
")",
"V",
"=",
"[",
"u",
"]",
"while",
"True",
":",
"S",
"[",
"V",
"]",
"=",
"0",
"# distance u->V is now permanent",
"G1",
"[",
":",
",",
"V",
"]",
"=",
"0",
"# no in-edges as already shortest",
"for",
"v",
"in",
"V",
":",
"W",
",",
"=",
"np",
".",
"where",
"(",
"G1",
"[",
"v",
",",
":",
"]",
")",
"# neighbors of smallest nodes",
"td",
"=",
"np",
".",
"array",
"(",
"[",
"D",
"[",
"u",
",",
"W",
"]",
".",
"flatten",
"(",
")",
",",
"(",
"D",
"[",
"u",
",",
"v",
"]",
"+",
"G1",
"[",
"v",
",",
"W",
"]",
")",
".",
"flatten",
"(",
")",
"]",
")",
"D",
"[",
"u",
",",
"W",
"]",
"=",
"np",
".",
"min",
"(",
"td",
",",
"axis",
"=",
"0",
")",
"if",
"D",
"[",
"u",
",",
"S",
"]",
".",
"size",
"==",
"0",
":",
"# all nodes reached",
"break",
"minD",
"=",
"np",
".",
"min",
"(",
"D",
"[",
"u",
",",
"S",
"]",
")",
"if",
"np",
".",
"isinf",
"(",
"minD",
")",
":",
"# some nodes cannot be reached",
"break",
"V",
",",
"=",
"np",
".",
"where",
"(",
"D",
"[",
"u",
",",
":",
"]",
"==",
"minD",
")",
"np",
".",
"fill_diagonal",
"(",
"D",
",",
"1",
")",
"D",
"=",
"1",
"/",
"D",
"np",
".",
"fill_diagonal",
"(",
"D",
",",
"0",
")",
"return",
"D",
"n",
"=",
"len",
"(",
"Gw",
")",
"Gl",
"=",
"invert",
"(",
"Gw",
",",
"copy",
"=",
"True",
")",
"# connection length matrix",
"A",
"=",
"np",
".",
"array",
"(",
"(",
"Gw",
"!=",
"0",
")",
",",
"dtype",
"=",
"int",
")",
"if",
"local",
":",
"E",
"=",
"np",
".",
"zeros",
"(",
"(",
"n",
",",
")",
")",
"# local efficiency",
"for",
"u",
"in",
"range",
"(",
"n",
")",
":",
"# V,=np.where(Gw[u,:])\t\t#neighbors",
"# k=len(V)\t\t\t\t\t#degree",
"# if k>=2:\t\t\t\t\t#degree must be at least 2",
"#\te=(distance_inv_wei(Gl[V].T[V])*np.outer(Gw[V,u],Gw[u,V]))**1/3",
"#\tE[u]=np.sum(e)/(k*k-k)",
"# find pairs of neighbors",
"V",
",",
"=",
"np",
".",
"where",
"(",
"np",
".",
"logical_or",
"(",
"Gw",
"[",
"u",
",",
":",
"]",
",",
"Gw",
"[",
":",
",",
"u",
"]",
".",
"T",
")",
")",
"# symmetrized vector of weights",
"sw",
"=",
"cuberoot",
"(",
"Gw",
"[",
"u",
",",
"V",
"]",
")",
"+",
"cuberoot",
"(",
"Gw",
"[",
"V",
",",
"u",
"]",
".",
"T",
")",
"# inverse distance matrix",
"e",
"=",
"distance_inv_wei",
"(",
"Gl",
"[",
"np",
".",
"ix_",
"(",
"V",
",",
"V",
")",
"]",
")",
"# symmetrized inverse distance matrix",
"se",
"=",
"cuberoot",
"(",
"e",
")",
"+",
"cuberoot",
"(",
"e",
".",
"T",
")",
"numer",
"=",
"np",
".",
"sum",
"(",
"np",
".",
"outer",
"(",
"sw",
".",
"T",
",",
"sw",
")",
"*",
"se",
")",
"/",
"2",
"if",
"numer",
"!=",
"0",
":",
"# symmetrized adjacency vector",
"sa",
"=",
"A",
"[",
"u",
",",
"V",
"]",
"+",
"A",
"[",
"V",
",",
"u",
"]",
".",
"T",
"denom",
"=",
"np",
".",
"sum",
"(",
"sa",
")",
"**",
"2",
"-",
"np",
".",
"sum",
"(",
"sa",
"*",
"sa",
")",
"# print numer,denom",
"E",
"[",
"u",
"]",
"=",
"numer",
"/",
"denom",
"# local efficiency",
"else",
":",
"e",
"=",
"distance_inv_wei",
"(",
"Gl",
")",
"E",
"=",
"np",
".",
"sum",
"(",
"e",
")",
"/",
"(",
"n",
"*",
"n",
"-",
"n",
")",
"return",
"E"
] | The global efficiency is the average of inverse shortest path length,
and is inversely related to the characteristic path length.
The local efficiency is the global efficiency computed on the
neighborhood of the node, and is related to the clustering coefficient.
Parameters
----------
W : NxN np.ndarray
undirected weighted connection matrix
(all weights in W must be between 0 and 1)
local : bool
If True, computes local efficiency instead of global efficiency.
Default value = False.
Returns
-------
Eglob : float
global efficiency, only if local=False
Eloc : Nx1 np.ndarray
local efficiency, only if local=True
Notes
-----
The efficiency is computed using an auxiliary connection-length
matrix L, defined as L_ij = 1/W_ij for all nonzero L_ij; This has an
intuitive interpretation, as higher connection weights intuitively
correspond to shorter lengths.
The weighted local efficiency broadly parallels the weighted
clustering coefficient of Onnela et al. (2005) and distinguishes the
influence of different paths based on connection weights of the
corresponding neighbors to the node in question. In other words, a path
between two neighbors with strong connections to the node in question
contributes more to the local efficiency than a path between two weakly
connected neighbors. Note that this weighted variant of the local
efficiency is hence not a strict generalization of the binary variant.
Algorithm: Dijkstra's algorithm | [
"The",
"global",
"efficiency",
"is",
"the",
"average",
"of",
"inverse",
"shortest",
"path",
"length",
"and",
"is",
"inversely",
"related",
"to",
"the",
"characteristic",
"path",
"length",
"."
] | python | train |
eXamadeus/godaddypy | godaddypy/client.py | https://github.com/eXamadeus/godaddypy/blob/67820604ffe233a67ef9f6b3a59ab85b02653e57/godaddypy/client.py#L187-L203 | def replace_records(self, domain, records, record_type=None, name=None):
"""This will replace all records at the domain. Record type and record name can be provided to filter
which records to replace.
:param domain: the domain to replace records at
:param records: the records you will be saving
:param record_type: the type of records you want to replace (eg. only replace 'A' records)
:param name: the name of records you want to replace (eg. only replace records with name 'test')
:return: True if no exceptions occurred
"""
url = self._build_record_url(domain, name=name, record_type=record_type)
self._put(url, json=records)
# If we didn't get any exceptions, return True to let the user know
return True | [
"def",
"replace_records",
"(",
"self",
",",
"domain",
",",
"records",
",",
"record_type",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"url",
"=",
"self",
".",
"_build_record_url",
"(",
"domain",
",",
"name",
"=",
"name",
",",
"record_type",
"=",
"record_type",
")",
"self",
".",
"_put",
"(",
"url",
",",
"json",
"=",
"records",
")",
"# If we didn't get any exceptions, return True to let the user know",
"return",
"True"
] | This will replace all records at the domain. Record type and record name can be provided to filter
which records to replace.
:param domain: the domain to replace records at
:param records: the records you will be saving
:param record_type: the type of records you want to replace (eg. only replace 'A' records)
:param name: the name of records you want to replace (eg. only replace records with name 'test')
:return: True if no exceptions occurred | [
"This",
"will",
"replace",
"all",
"records",
"at",
"the",
"domain",
".",
"Record",
"type",
"and",
"record",
"name",
"can",
"be",
"provided",
"to",
"filter",
"which",
"records",
"to",
"replace",
"."
] | python | train |
sassoo/goldman | goldman/serializers/jsonapi_error.py | https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/serializers/jsonapi_error.py#L74-L85 | def get_headers(self):
""" Return a HTTPStatus compliant headers attribute
FIX: duplicate headers will collide terribly!
"""
headers = {'Content-Type': goldman.JSON_MIMETYPE}
for error in self.errors:
if 'headers' in error:
headers.update(error['headers'])
return headers | [
"def",
"get_headers",
"(",
"self",
")",
":",
"headers",
"=",
"{",
"'Content-Type'",
":",
"goldman",
".",
"JSON_MIMETYPE",
"}",
"for",
"error",
"in",
"self",
".",
"errors",
":",
"if",
"'headers'",
"in",
"error",
":",
"headers",
".",
"update",
"(",
"error",
"[",
"'headers'",
"]",
")",
"return",
"headers"
] | Return a HTTPStatus compliant headers attribute
FIX: duplicate headers will collide terribly! | [
"Return",
"a",
"HTTPStatus",
"compliant",
"headers",
"attribute"
] | python | train |
neherlab/treetime | treetime/gtr.py | https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/gtr.py#L229-L369 | def standard(model, **kwargs):
"""
Create standard model of molecular evolution.
Parameters
----------
model : str
Model to create. See list of available models below
**kwargs:
Key word arguments to be passed to the model
**Available models**
- JC69:
Jukes-Cantor 1969 model. This model assumes equal frequencies
of the nucleotides and equal transition rates between nucleotide states.
For more info, see: Jukes and Cantor (1969).
Evolution of Protein Molecules. New York: Academic Press. pp. 21-132.
To create this model, use:
:code:`mygtr = GTR.standard(model='jc69', mu=<my_mu>, alphabet=<my_alph>)`
:code:`my_mu` - substitution rate (float)
:code:`my_alph` - alphabet (str: :code:`'nuc'` or :code:`'nuc_nogap'`)
- K80:
Kimura 1980 model. Assumes equal concentrations across nucleotides, but
allows different rates between transitions and transversions. The ratio
of the transversion/transition rates is given by kappa parameter.
For more info, see
Kimura (1980), J. Mol. Evol. 16 (2): 111-120. doi:10.1007/BF01731581.
Current implementation of the model does not account for the gaps.
:code:`mygtr = GTR.standard(model='k80', mu=<my_mu>, kappa=<my_kappa>)`
:code:`mu` - overall substitution rate (float)
:code:`kappa` - ratio of transversion/transition rates (float)
- F81:
Felsenstein 1981 model. Assumes non-equal concentrations across nucleotides,
but the transition rate between all states is assumed to be equal. See
Felsenstein (1981), J. Mol. Evol. 17 (6): 368-376. doi:10.1007/BF01734359
for details.
:code:`mygtr = GTR.standard(model='F81', mu=<mu>, pi=<pi>, alphabet=<alph>)`
:code:`mu` - substitution rate (float)
:code:`pi` - : nucleotide concentrations (numpy.array)
:code:`alphabet' - alphabet to use. (:code:`'nuc'` or :code:`'nuc_nogap'`)
- HKY85:
Hasegawa, Kishino and Yano 1985 model. Allows different concentrations of the
nucleotides (as in F81) + distinguishes between transition/transversion substitutions
(similar to K80). Link:
Hasegawa, Kishino, Yano (1985), J. Mol. Evol. 22 (2): 160-174. doi:10.1007/BF02101694
Current implementation of the model does not account for the gaps
:code:`mygtr = GTR.standard(model='HKY85', mu=<mu>, pi=<pi>, kappa=<kappa>)`
:code:`mu` - substitution rate (float)
:code:`pi` - : nucleotide concentrations (numpy.array)
:code:`kappa` - ratio of transversion/transition rates (float)
- T92:
Tamura 1992 model. Extending Kimura (1980) model for the case where a
G+C-content bias exists. Link:
Tamura K (1992), Mol. Biol. Evol. 9 (4): 678-687. DOI: 10.1093/oxfordjournals.molbev.a040752
Current implementation of the model does not account for the gaps
:code:`mygtr = GTR.standard(model='T92', mu=<mu>, pi_GC=<pi_gc>, kappa=<kappa>)`
:code:`mu` - substitution rate (float)
:code:`pi_GC` - : relative GC content
:code:`kappa` - ratio of transversion/transition rates (float)
- TN93:
Tamura and Nei 1993. The model distinguishes between the two different types of
transition: (A <-> G) is allowed to have a different rate to (C<->T).
Transversions have the same rate. The frequencies of the nucleotides are allowed
to be different. Link: Tamura, Nei (1993), MolBiol Evol. 10 (3): 512-526.
DOI:10.1093/oxfordjournals.molbev.a040023
:code:`mygtr = GTR.standard(model='TN93', mu=<mu>, kappa1=<k1>, kappa2=<k2>)`
:code:`mu` - substitution rate (float)
:code:`kappa1` - relative A<-->C, A<-->T, T<-->G and G<-->C rates (float)
:code:`kappa` - relative C<-->T rate (float)
.. Note::
Rate of A<-->G substitution is set to one. All other rates
(kappa1, kappa2) are specified relative to this rate
"""
from .nuc_models import JC69, K80, F81, HKY85, T92, TN93
from .aa_models import JTT92
if model.lower() in ['jc', 'jc69', 'jukes-cantor', 'jukes-cantor69', 'jukescantor', 'jukescantor69']:
return JC69(**kwargs)
elif model.lower() in ['k80', 'kimura80', 'kimura1980']:
return K80(**kwargs)
elif model.lower() in ['f81', 'felsenstein81', 'felsenstein1981']:
return F81(**kwargs)
elif model.lower() in ['hky', 'hky85', 'hky1985']:
return HKY85(**kwargs)
elif model.lower() in ['t92', 'tamura92', 'tamura1992']:
return T92(**kwargs)
elif model.lower() in ['tn93', 'tamura_nei_93', 'tamuranei93']:
return TN93(**kwargs)
elif model.lower() in ['jtt', 'jtt92']:
return JTT92(**kwargs)
else:
raise KeyError("The GTR model '{}' is not in the list of available models."
"".format(model)) | [
"def",
"standard",
"(",
"model",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
".",
"nuc_models",
"import",
"JC69",
",",
"K80",
",",
"F81",
",",
"HKY85",
",",
"T92",
",",
"TN93",
"from",
".",
"aa_models",
"import",
"JTT92",
"if",
"model",
".",
"lower",
"(",
")",
"in",
"[",
"'jc'",
",",
"'jc69'",
",",
"'jukes-cantor'",
",",
"'jukes-cantor69'",
",",
"'jukescantor'",
",",
"'jukescantor69'",
"]",
":",
"return",
"JC69",
"(",
"*",
"*",
"kwargs",
")",
"elif",
"model",
".",
"lower",
"(",
")",
"in",
"[",
"'k80'",
",",
"'kimura80'",
",",
"'kimura1980'",
"]",
":",
"return",
"K80",
"(",
"*",
"*",
"kwargs",
")",
"elif",
"model",
".",
"lower",
"(",
")",
"in",
"[",
"'f81'",
",",
"'felsenstein81'",
",",
"'felsenstein1981'",
"]",
":",
"return",
"F81",
"(",
"*",
"*",
"kwargs",
")",
"elif",
"model",
".",
"lower",
"(",
")",
"in",
"[",
"'hky'",
",",
"'hky85'",
",",
"'hky1985'",
"]",
":",
"return",
"HKY85",
"(",
"*",
"*",
"kwargs",
")",
"elif",
"model",
".",
"lower",
"(",
")",
"in",
"[",
"'t92'",
",",
"'tamura92'",
",",
"'tamura1992'",
"]",
":",
"return",
"T92",
"(",
"*",
"*",
"kwargs",
")",
"elif",
"model",
".",
"lower",
"(",
")",
"in",
"[",
"'tn93'",
",",
"'tamura_nei_93'",
",",
"'tamuranei93'",
"]",
":",
"return",
"TN93",
"(",
"*",
"*",
"kwargs",
")",
"elif",
"model",
".",
"lower",
"(",
")",
"in",
"[",
"'jtt'",
",",
"'jtt92'",
"]",
":",
"return",
"JTT92",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"KeyError",
"(",
"\"The GTR model '{}' is not in the list of available models.\"",
"\"\"",
".",
"format",
"(",
"model",
")",
")"
] | Create standard model of molecular evolution.
Parameters
----------
model : str
Model to create. See list of available models below
**kwargs:
Key word arguments to be passed to the model
**Available models**
- JC69:
Jukes-Cantor 1969 model. This model assumes equal frequencies
of the nucleotides and equal transition rates between nucleotide states.
For more info, see: Jukes and Cantor (1969).
Evolution of Protein Molecules. New York: Academic Press. pp. 21-132.
To create this model, use:
:code:`mygtr = GTR.standard(model='jc69', mu=<my_mu>, alphabet=<my_alph>)`
:code:`my_mu` - substitution rate (float)
:code:`my_alph` - alphabet (str: :code:`'nuc'` or :code:`'nuc_nogap'`)
- K80:
Kimura 1980 model. Assumes equal concentrations across nucleotides, but
allows different rates between transitions and transversions. The ratio
of the transversion/transition rates is given by kappa parameter.
For more info, see
Kimura (1980), J. Mol. Evol. 16 (2): 111-120. doi:10.1007/BF01731581.
Current implementation of the model does not account for the gaps.
:code:`mygtr = GTR.standard(model='k80', mu=<my_mu>, kappa=<my_kappa>)`
:code:`mu` - overall substitution rate (float)
:code:`kappa` - ratio of transversion/transition rates (float)
- F81:
Felsenstein 1981 model. Assumes non-equal concentrations across nucleotides,
but the transition rate between all states is assumed to be equal. See
Felsenstein (1981), J. Mol. Evol. 17 (6): 368-376. doi:10.1007/BF01734359
for details.
:code:`mygtr = GTR.standard(model='F81', mu=<mu>, pi=<pi>, alphabet=<alph>)`
:code:`mu` - substitution rate (float)
:code:`pi` - : nucleotide concentrations (numpy.array)
:code:`alphabet' - alphabet to use. (:code:`'nuc'` or :code:`'nuc_nogap'`)
- HKY85:
Hasegawa, Kishino and Yano 1985 model. Allows different concentrations of the
nucleotides (as in F81) + distinguishes between transition/transversion substitutions
(similar to K80). Link:
Hasegawa, Kishino, Yano (1985), J. Mol. Evol. 22 (2): 160-174. doi:10.1007/BF02101694
Current implementation of the model does not account for the gaps
:code:`mygtr = GTR.standard(model='HKY85', mu=<mu>, pi=<pi>, kappa=<kappa>)`
:code:`mu` - substitution rate (float)
:code:`pi` - : nucleotide concentrations (numpy.array)
:code:`kappa` - ratio of transversion/transition rates (float)
- T92:
Tamura 1992 model. Extending Kimura (1980) model for the case where a
G+C-content bias exists. Link:
Tamura K (1992), Mol. Biol. Evol. 9 (4): 678-687. DOI: 10.1093/oxfordjournals.molbev.a040752
Current implementation of the model does not account for the gaps
:code:`mygtr = GTR.standard(model='T92', mu=<mu>, pi_GC=<pi_gc>, kappa=<kappa>)`
:code:`mu` - substitution rate (float)
:code:`pi_GC` - : relative GC content
:code:`kappa` - ratio of transversion/transition rates (float)
- TN93:
Tamura and Nei 1993. The model distinguishes between the two different types of
transition: (A <-> G) is allowed to have a different rate to (C<->T).
Transversions have the same rate. The frequencies of the nucleotides are allowed
to be different. Link: Tamura, Nei (1993), MolBiol Evol. 10 (3): 512-526.
DOI:10.1093/oxfordjournals.molbev.a040023
:code:`mygtr = GTR.standard(model='TN93', mu=<mu>, kappa1=<k1>, kappa2=<k2>)`
:code:`mu` - substitution rate (float)
:code:`kappa1` - relative A<-->C, A<-->T, T<-->G and G<-->C rates (float)
:code:`kappa` - relative C<-->T rate (float)
.. Note::
Rate of A<-->G substitution is set to one. All other rates
(kappa1, kappa2) are specified relative to this rate | [
"Create",
"standard",
"model",
"of",
"molecular",
"evolution",
"."
] | python | test |
Blueqat/Blueqat | blueqat/pauli.py | https://github.com/Blueqat/Blueqat/blob/2ac8592c79e7acf4f385d982af82fbd68dafa5cc/blueqat/pauli.py#L707-L709 | def max_n(self):
"""Returns the maximum index of Pauli matrices in the Term."""
return max(term.max_n() for term in self.terms if term.ops) | [
"def",
"max_n",
"(",
"self",
")",
":",
"return",
"max",
"(",
"term",
".",
"max_n",
"(",
")",
"for",
"term",
"in",
"self",
".",
"terms",
"if",
"term",
".",
"ops",
")"
] | Returns the maximum index of Pauli matrices in the Term. | [
"Returns",
"the",
"maximum",
"index",
"of",
"Pauli",
"matrices",
"in",
"the",
"Term",
"."
] | python | train |
devopshq/artifactory | artifactory.py | https://github.com/devopshq/artifactory/blob/b9ec08cd72527d7d43159fe45c3a98a0b0838534/artifactory.py#L662-L673 | def owner(self, pathobj):
"""
Returns file owner
This makes little sense for Artifactory, but to be consistent
with pathlib, we return modified_by instead, if available
"""
stat = self.stat(pathobj)
if not stat.is_dir:
return stat.modified_by
else:
return 'nobody' | [
"def",
"owner",
"(",
"self",
",",
"pathobj",
")",
":",
"stat",
"=",
"self",
".",
"stat",
"(",
"pathobj",
")",
"if",
"not",
"stat",
".",
"is_dir",
":",
"return",
"stat",
".",
"modified_by",
"else",
":",
"return",
"'nobody'"
] | Returns file owner
This makes little sense for Artifactory, but to be consistent
with pathlib, we return modified_by instead, if available | [
"Returns",
"file",
"owner",
"This",
"makes",
"little",
"sense",
"for",
"Artifactory",
"but",
"to",
"be",
"consistent",
"with",
"pathlib",
"we",
"return",
"modified_by",
"instead",
"if",
"available"
] | python | train |
jmcarp/robobrowser | robobrowser/browser.py | https://github.com/jmcarp/robobrowser/blob/4284c11d00ae1397983e269aa180e5cf7ee5f4cf/robobrowser/browser.py#L311-L323 | def follow_link(self, link, **kwargs):
"""Click a link.
:param Tag link: Link to click
:param kwargs: Keyword arguments to `Session::send`
"""
try:
href = link['href']
except KeyError:
raise exceptions.RoboError('Link element must have "href" '
'attribute')
self.open(self._build_url(href), **kwargs) | [
"def",
"follow_link",
"(",
"self",
",",
"link",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"href",
"=",
"link",
"[",
"'href'",
"]",
"except",
"KeyError",
":",
"raise",
"exceptions",
".",
"RoboError",
"(",
"'Link element must have \"href\" '",
"'attribute'",
")",
"self",
".",
"open",
"(",
"self",
".",
"_build_url",
"(",
"href",
")",
",",
"*",
"*",
"kwargs",
")"
] | Click a link.
:param Tag link: Link to click
:param kwargs: Keyword arguments to `Session::send` | [
"Click",
"a",
"link",
"."
] | python | train |
pgjones/quart | quart/blueprints.py | https://github.com/pgjones/quart/blob/7cb2d3bd98e8746025764f2b933abc12041fa175/quart/blueprints.py#L694-L711 | def register(
self,
app: 'Quart',
first_registration: bool,
*,
url_prefix: Optional[str]=None,
) -> None:
"""Register this blueprint on the app given."""
state = self.make_setup_state(app, first_registration, url_prefix=url_prefix)
if self.has_static_folder:
state.add_url_rule(
self.static_url_path + '/<path:filename>',
view_func=self.send_static_file, endpoint='static',
)
for func in self.deferred_functions:
func(state) | [
"def",
"register",
"(",
"self",
",",
"app",
":",
"'Quart'",
",",
"first_registration",
":",
"bool",
",",
"*",
",",
"url_prefix",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
")",
"->",
"None",
":",
"state",
"=",
"self",
".",
"make_setup_state",
"(",
"app",
",",
"first_registration",
",",
"url_prefix",
"=",
"url_prefix",
")",
"if",
"self",
".",
"has_static_folder",
":",
"state",
".",
"add_url_rule",
"(",
"self",
".",
"static_url_path",
"+",
"'/<path:filename>'",
",",
"view_func",
"=",
"self",
".",
"send_static_file",
",",
"endpoint",
"=",
"'static'",
",",
")",
"for",
"func",
"in",
"self",
".",
"deferred_functions",
":",
"func",
"(",
"state",
")"
] | Register this blueprint on the app given. | [
"Register",
"this",
"blueprint",
"on",
"the",
"app",
"given",
"."
] | python | train |
rsennrich/Bleualign | bleualign/gale_church.py | https://github.com/rsennrich/Bleualign/blob/1de181dcc3257d885a2b981f751c0220c0e8958f/bleualign/gale_church.py#L10-L27 | def erfcc(x):
"""Complementary error function."""
z = abs(x)
t = 1 / (1 + 0.5 * z)
r = t * math.exp(-z * z -
1.26551223 + t *
(1.00002368 + t *
(.37409196 + t *
(.09678418 + t *
(-.18628806 + t *
(.27886807 + t *
(-1.13520398 + t *
(1.48851587 + t *
(-.82215223 + t * .17087277)))))))))
if (x >= 0.):
return r
else:
return 2. - r | [
"def",
"erfcc",
"(",
"x",
")",
":",
"z",
"=",
"abs",
"(",
"x",
")",
"t",
"=",
"1",
"/",
"(",
"1",
"+",
"0.5",
"*",
"z",
")",
"r",
"=",
"t",
"*",
"math",
".",
"exp",
"(",
"-",
"z",
"*",
"z",
"-",
"1.26551223",
"+",
"t",
"*",
"(",
"1.00002368",
"+",
"t",
"*",
"(",
".37409196",
"+",
"t",
"*",
"(",
".09678418",
"+",
"t",
"*",
"(",
"-",
".18628806",
"+",
"t",
"*",
"(",
".27886807",
"+",
"t",
"*",
"(",
"-",
"1.13520398",
"+",
"t",
"*",
"(",
"1.48851587",
"+",
"t",
"*",
"(",
"-",
".82215223",
"+",
"t",
"*",
".17087277",
")",
")",
")",
")",
")",
")",
")",
")",
")",
"if",
"(",
"x",
">=",
"0.",
")",
":",
"return",
"r",
"else",
":",
"return",
"2.",
"-",
"r"
] | Complementary error function. | [
"Complementary",
"error",
"function",
"."
] | python | test |
aio-libs/aioredis | aioredis/sentinel/commands.py | https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/sentinel/commands.py#L78-L81 | def master_address(self, name):
"""Returns a (host, port) pair for the given ``name``."""
fut = self.execute(b'get-master-addr-by-name', name, encoding='utf-8')
return wait_convert(fut, parse_address) | [
"def",
"master_address",
"(",
"self",
",",
"name",
")",
":",
"fut",
"=",
"self",
".",
"execute",
"(",
"b'get-master-addr-by-name'",
",",
"name",
",",
"encoding",
"=",
"'utf-8'",
")",
"return",
"wait_convert",
"(",
"fut",
",",
"parse_address",
")"
] | Returns a (host, port) pair for the given ``name``. | [
"Returns",
"a",
"(",
"host",
"port",
")",
"pair",
"for",
"the",
"given",
"name",
"."
] | python | train |
yjzhang/uncurl_python | uncurl/gap_score.py | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/gap_score.py#L7-L25 | def preproc_data(data, gene_subset=False, **kwargs):
"""
basic data preprocessing before running gap score
Assumes that data is a matrix of shape (genes, cells).
Returns a matrix of shape (cells, 8), using the first 8 SVD
components. Why 8? It's an arbitrary selection...
"""
import uncurl
from uncurl.preprocessing import log1p, cell_normalize
from sklearn.decomposition import TruncatedSVD
data_subset = data
if gene_subset:
gene_subset = uncurl.max_variance_genes(data)
data_subset = data[gene_subset, :]
tsvd = TruncatedSVD(min(8, data_subset.shape[0] - 1))
data_tsvd = tsvd.fit_transform(log1p(cell_normalize(data_subset)).T)
return data_tsvd | [
"def",
"preproc_data",
"(",
"data",
",",
"gene_subset",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"uncurl",
"from",
"uncurl",
".",
"preprocessing",
"import",
"log1p",
",",
"cell_normalize",
"from",
"sklearn",
".",
"decomposition",
"import",
"TruncatedSVD",
"data_subset",
"=",
"data",
"if",
"gene_subset",
":",
"gene_subset",
"=",
"uncurl",
".",
"max_variance_genes",
"(",
"data",
")",
"data_subset",
"=",
"data",
"[",
"gene_subset",
",",
":",
"]",
"tsvd",
"=",
"TruncatedSVD",
"(",
"min",
"(",
"8",
",",
"data_subset",
".",
"shape",
"[",
"0",
"]",
"-",
"1",
")",
")",
"data_tsvd",
"=",
"tsvd",
".",
"fit_transform",
"(",
"log1p",
"(",
"cell_normalize",
"(",
"data_subset",
")",
")",
".",
"T",
")",
"return",
"data_tsvd"
] | basic data preprocessing before running gap score
Assumes that data is a matrix of shape (genes, cells).
Returns a matrix of shape (cells, 8), using the first 8 SVD
components. Why 8? It's an arbitrary selection... | [
"basic",
"data",
"preprocessing",
"before",
"running",
"gap",
"score"
] | python | train |
xtuml/pyxtuml | xtuml/load.py | https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/xtuml/load.py#L615-L620 | def p_create_rop_statement(self, p):
'''create_rop_statement : CREATE ROP REF_ID RELID FROM association_end TO association_end'''
args = [p[4]]
args.extend(p[6])
args.extend(p[8])
p[0] = CreateAssociationStmt(*args) | [
"def",
"p_create_rop_statement",
"(",
"self",
",",
"p",
")",
":",
"args",
"=",
"[",
"p",
"[",
"4",
"]",
"]",
"args",
".",
"extend",
"(",
"p",
"[",
"6",
"]",
")",
"args",
".",
"extend",
"(",
"p",
"[",
"8",
"]",
")",
"p",
"[",
"0",
"]",
"=",
"CreateAssociationStmt",
"(",
"*",
"args",
")"
] | create_rop_statement : CREATE ROP REF_ID RELID FROM association_end TO association_end | [
"create_rop_statement",
":",
"CREATE",
"ROP",
"REF_ID",
"RELID",
"FROM",
"association_end",
"TO",
"association_end"
] | python | test |
DmitryUlyanov/Multicore-TSNE | tsne-embedding.py | https://github.com/DmitryUlyanov/Multicore-TSNE/blob/62dedde52469f3a0aeb22fdd7bce2538f17f77ef/tsne-embedding.py#L9-L42 | def imscatter(images, positions):
'''
Creates a scatter plot, where each plot is shown by corresponding image
'''
positions = np.array(positions)
bottoms = positions[:, 1] - np.array([im.shape[1] / 2.0 for im in images])
tops = bottoms + np.array([im.shape[1] for im in images])
lefts = positions[:, 0] - np.array([im.shape[0] / 2.0 for im in images])
rigths = lefts + np.array([im.shape[0] for im in images])
most_bottom = int(np.floor(bottoms.min()))
most_top = int(np.ceil(tops.max()))
most_left = int(np.floor(lefts.min()))
most_right = int(np.ceil(rigths.max()))
scatter_image = np.zeros(
[most_right - most_left, most_top - most_bottom, 3], dtype=imgs[0].dtype)
# shift, now all from zero
positions -= [most_left, most_bottom]
for im, pos in zip(images, positions):
xl = int(pos[0] - im.shape[0] / 2)
xr = xl + im.shape[0]
yb = int(pos[1] - im.shape[1] / 2)
yt = yb + im.shape[1]
scatter_image[xl:xr, yb:yt, :] = im
return scatter_image | [
"def",
"imscatter",
"(",
"images",
",",
"positions",
")",
":",
"positions",
"=",
"np",
".",
"array",
"(",
"positions",
")",
"bottoms",
"=",
"positions",
"[",
":",
",",
"1",
"]",
"-",
"np",
".",
"array",
"(",
"[",
"im",
".",
"shape",
"[",
"1",
"]",
"/",
"2.0",
"for",
"im",
"in",
"images",
"]",
")",
"tops",
"=",
"bottoms",
"+",
"np",
".",
"array",
"(",
"[",
"im",
".",
"shape",
"[",
"1",
"]",
"for",
"im",
"in",
"images",
"]",
")",
"lefts",
"=",
"positions",
"[",
":",
",",
"0",
"]",
"-",
"np",
".",
"array",
"(",
"[",
"im",
".",
"shape",
"[",
"0",
"]",
"/",
"2.0",
"for",
"im",
"in",
"images",
"]",
")",
"rigths",
"=",
"lefts",
"+",
"np",
".",
"array",
"(",
"[",
"im",
".",
"shape",
"[",
"0",
"]",
"for",
"im",
"in",
"images",
"]",
")",
"most_bottom",
"=",
"int",
"(",
"np",
".",
"floor",
"(",
"bottoms",
".",
"min",
"(",
")",
")",
")",
"most_top",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"tops",
".",
"max",
"(",
")",
")",
")",
"most_left",
"=",
"int",
"(",
"np",
".",
"floor",
"(",
"lefts",
".",
"min",
"(",
")",
")",
")",
"most_right",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"rigths",
".",
"max",
"(",
")",
")",
")",
"scatter_image",
"=",
"np",
".",
"zeros",
"(",
"[",
"most_right",
"-",
"most_left",
",",
"most_top",
"-",
"most_bottom",
",",
"3",
"]",
",",
"dtype",
"=",
"imgs",
"[",
"0",
"]",
".",
"dtype",
")",
"# shift, now all from zero",
"positions",
"-=",
"[",
"most_left",
",",
"most_bottom",
"]",
"for",
"im",
",",
"pos",
"in",
"zip",
"(",
"images",
",",
"positions",
")",
":",
"xl",
"=",
"int",
"(",
"pos",
"[",
"0",
"]",
"-",
"im",
".",
"shape",
"[",
"0",
"]",
"/",
"2",
")",
"xr",
"=",
"xl",
"+",
"im",
".",
"shape",
"[",
"0",
"]",
"yb",
"=",
"int",
"(",
"pos",
"[",
"1",
"]",
"-",
"im",
".",
"shape",
"[",
"1",
"]",
"/",
"2",
")",
"yt",
"=",
"yb",
"+",
"im",
".",
"shape",
"[",
"1",
"]",
"scatter_image",
"[",
"xl",
":",
"xr",
",",
"yb",
":",
"yt",
",",
":",
"]",
"=",
"im",
"return",
"scatter_image"
] | Creates a scatter plot, where each plot is shown by corresponding image | [
"Creates",
"a",
"scatter",
"plot",
"where",
"each",
"plot",
"is",
"shown",
"by",
"corresponding",
"image"
] | python | train |
materialsproject/pymatgen | pymatgen/analysis/find_dimension.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/find_dimension.py#L72-L121 | def find_clusters(struct, connected_list):
"""
Finds bonded clusters of atoms in the structure with periodic boundary conditions.
If there are atoms that are not bonded to anything, returns [0,1,0].(For faster computation time in FindDimension())
Args:
struct (Structure): Input structure
connected_list: Must be made from the same structure with FindConnected() function.
An array of shape (number of bonded pairs, 2); each row of is of the form [atomi, atomj].
Returns:
max_cluster: the size of the largest cluster in the crystal structure
min_cluster: the size of the smallest cluster in the crystal structure
clusters: list of bonded clusters found here, clusters are formatted as sets of indices of atoms
"""
n_atoms = len(struct.species)
if len(np.unique(connected_list)) != n_atoms:
return [0, 1, 0]
if n_atoms == 0:
return [0, 0, 0]
cluster_sizes = []
clusters = []
for atom in range(n_atoms):
connected_inds = np.where(connected_list == atom)[0]
atom_cluster = np.unique(connected_list[connected_inds])
atom_cluster = set(atom_cluster)
if len(clusters) == 0:
new_clusters = [atom_cluster]
new_cluster_sizes = [len(atom_cluster)]
else:
clusters_w_atom = [atom_cluster]
clusters_noatom = []
clusters_noatom_sizes = []
for cluster in clusters:
if len(cluster.intersection(atom_cluster)) > 0:
clusters_w_atom.append(cluster)
else:
clusters_noatom.append(cluster)
clusters_noatom_sizes.append(len(cluster))
if len(clusters_w_atom) > 1:
clusters_w_atom = [set.union(*clusters_w_atom)]
new_clusters = clusters_noatom + clusters_w_atom
new_cluster_sizes = clusters_noatom_sizes + [len(clusters_w_atom[0])]
clusters = list(new_clusters)
cluster_sizes = list(new_cluster_sizes)
if n_atoms in cluster_sizes:
break
max_cluster = max(cluster_sizes)
min_cluster = min(cluster_sizes)
return [max_cluster, min_cluster, clusters] | [
"def",
"find_clusters",
"(",
"struct",
",",
"connected_list",
")",
":",
"n_atoms",
"=",
"len",
"(",
"struct",
".",
"species",
")",
"if",
"len",
"(",
"np",
".",
"unique",
"(",
"connected_list",
")",
")",
"!=",
"n_atoms",
":",
"return",
"[",
"0",
",",
"1",
",",
"0",
"]",
"if",
"n_atoms",
"==",
"0",
":",
"return",
"[",
"0",
",",
"0",
",",
"0",
"]",
"cluster_sizes",
"=",
"[",
"]",
"clusters",
"=",
"[",
"]",
"for",
"atom",
"in",
"range",
"(",
"n_atoms",
")",
":",
"connected_inds",
"=",
"np",
".",
"where",
"(",
"connected_list",
"==",
"atom",
")",
"[",
"0",
"]",
"atom_cluster",
"=",
"np",
".",
"unique",
"(",
"connected_list",
"[",
"connected_inds",
"]",
")",
"atom_cluster",
"=",
"set",
"(",
"atom_cluster",
")",
"if",
"len",
"(",
"clusters",
")",
"==",
"0",
":",
"new_clusters",
"=",
"[",
"atom_cluster",
"]",
"new_cluster_sizes",
"=",
"[",
"len",
"(",
"atom_cluster",
")",
"]",
"else",
":",
"clusters_w_atom",
"=",
"[",
"atom_cluster",
"]",
"clusters_noatom",
"=",
"[",
"]",
"clusters_noatom_sizes",
"=",
"[",
"]",
"for",
"cluster",
"in",
"clusters",
":",
"if",
"len",
"(",
"cluster",
".",
"intersection",
"(",
"atom_cluster",
")",
")",
">",
"0",
":",
"clusters_w_atom",
".",
"append",
"(",
"cluster",
")",
"else",
":",
"clusters_noatom",
".",
"append",
"(",
"cluster",
")",
"clusters_noatom_sizes",
".",
"append",
"(",
"len",
"(",
"cluster",
")",
")",
"if",
"len",
"(",
"clusters_w_atom",
")",
">",
"1",
":",
"clusters_w_atom",
"=",
"[",
"set",
".",
"union",
"(",
"*",
"clusters_w_atom",
")",
"]",
"new_clusters",
"=",
"clusters_noatom",
"+",
"clusters_w_atom",
"new_cluster_sizes",
"=",
"clusters_noatom_sizes",
"+",
"[",
"len",
"(",
"clusters_w_atom",
"[",
"0",
"]",
")",
"]",
"clusters",
"=",
"list",
"(",
"new_clusters",
")",
"cluster_sizes",
"=",
"list",
"(",
"new_cluster_sizes",
")",
"if",
"n_atoms",
"in",
"cluster_sizes",
":",
"break",
"max_cluster",
"=",
"max",
"(",
"cluster_sizes",
")",
"min_cluster",
"=",
"min",
"(",
"cluster_sizes",
")",
"return",
"[",
"max_cluster",
",",
"min_cluster",
",",
"clusters",
"]"
] | Finds bonded clusters of atoms in the structure with periodic boundary conditions.
If there are atoms that are not bonded to anything, returns [0,1,0].(For faster computation time in FindDimension())
Args:
struct (Structure): Input structure
connected_list: Must be made from the same structure with FindConnected() function.
An array of shape (number of bonded pairs, 2); each row of is of the form [atomi, atomj].
Returns:
max_cluster: the size of the largest cluster in the crystal structure
min_cluster: the size of the smallest cluster in the crystal structure
clusters: list of bonded clusters found here, clusters are formatted as sets of indices of atoms | [
"Finds",
"bonded",
"clusters",
"of",
"atoms",
"in",
"the",
"structure",
"with",
"periodic",
"boundary",
"conditions",
".",
"If",
"there",
"are",
"atoms",
"that",
"are",
"not",
"bonded",
"to",
"anything",
"returns",
"[",
"0",
"1",
"0",
"]",
".",
"(",
"For",
"faster",
"computation",
"time",
"in",
"FindDimension",
"()",
")"
] | python | train |
pydata/xarray | xarray/core/formatting.py | https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/formatting.py#L122-L136 | def format_timedelta(t, timedelta_format=None):
"""Cast given object to a Timestamp and return a nicely formatted string"""
timedelta_str = str(pd.Timedelta(t))
try:
days_str, time_str = timedelta_str.split(' days ')
except ValueError:
# catch NaT and others that don't split nicely
return timedelta_str
else:
if timedelta_format == 'date':
return days_str + ' days'
elif timedelta_format == 'time':
return time_str
else:
return timedelta_str | [
"def",
"format_timedelta",
"(",
"t",
",",
"timedelta_format",
"=",
"None",
")",
":",
"timedelta_str",
"=",
"str",
"(",
"pd",
".",
"Timedelta",
"(",
"t",
")",
")",
"try",
":",
"days_str",
",",
"time_str",
"=",
"timedelta_str",
".",
"split",
"(",
"' days '",
")",
"except",
"ValueError",
":",
"# catch NaT and others that don't split nicely",
"return",
"timedelta_str",
"else",
":",
"if",
"timedelta_format",
"==",
"'date'",
":",
"return",
"days_str",
"+",
"' days'",
"elif",
"timedelta_format",
"==",
"'time'",
":",
"return",
"time_str",
"else",
":",
"return",
"timedelta_str"
] | Cast given object to a Timestamp and return a nicely formatted string | [
"Cast",
"given",
"object",
"to",
"a",
"Timestamp",
"and",
"return",
"a",
"nicely",
"formatted",
"string"
] | python | train |
profitbricks/profitbricks-sdk-python | profitbricks/client.py | https://github.com/profitbricks/profitbricks-sdk-python/blob/2c804b141688eccb07d6ae56601d5c60a62abebd/profitbricks/client.py#L1181-L1209 | def update_nic(self, datacenter_id, server_id,
nic_id, **kwargs):
"""
Updates a NIC with the parameters provided.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str``
:param nic_id: The unique ID of the NIC.
:type nic_id: ``str``
"""
data = {}
for attr, value in kwargs.items():
data[self._underscore_to_camelcase(attr)] = value
response = self._perform_request(
url='/datacenters/%s/servers/%s/nics/%s' % (
datacenter_id,
server_id,
nic_id),
method='PATCH',
data=json.dumps(data))
return response | [
"def",
"update_nic",
"(",
"self",
",",
"datacenter_id",
",",
"server_id",
",",
"nic_id",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"{",
"}",
"for",
"attr",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"data",
"[",
"self",
".",
"_underscore_to_camelcase",
"(",
"attr",
")",
"]",
"=",
"value",
"response",
"=",
"self",
".",
"_perform_request",
"(",
"url",
"=",
"'/datacenters/%s/servers/%s/nics/%s'",
"%",
"(",
"datacenter_id",
",",
"server_id",
",",
"nic_id",
")",
",",
"method",
"=",
"'PATCH'",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
")",
"return",
"response"
] | Updates a NIC with the parameters provided.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str``
:param nic_id: The unique ID of the NIC.
:type nic_id: ``str`` | [
"Updates",
"a",
"NIC",
"with",
"the",
"parameters",
"provided",
"."
] | python | valid |
CiscoUcs/UcsPythonSDK | src/UcsSdk/UcsBase.py | https://github.com/CiscoUcs/UcsPythonSDK/blob/bf6b07d6abeacb922c92b198352eda4eb9e4629b/src/UcsSdk/UcsBase.py#L765-L819 | def GetSyncMoConfig(ConfigDoc):
""" Internal support method for SyncManagedObject. """
moConfigMap = {}
configList = ConfigDoc.getElementsByTagName("mo")
for moConfigNode in configList:
classId = None
noun = None
version = None
actionVersion = None
action = None
ignoreReason = None
status = None
excludeList = None
if moConfigNode.hasAttribute("classid"):
classId = moConfigNode.getAttribute("classid")
if moConfigNode.hasAttribute("noun"):
noun = moConfigNode.getAttribute("noun")
if moConfigNode.hasAttribute("version"):
version = moConfigNode.getAttribute("version")
if moConfigNode.hasAttribute("actionVersion"):
actionVersion = moConfigNode.getAttribute("actionVersion")
if moConfigNode.hasAttribute("action"):
action = moConfigNode.getAttribute("action")
if moConfigNode.hasAttribute("ignoreReason"):
ignoreReason = moConfigNode.getAttribute("ignoreReason")
if moConfigNode.hasAttribute("status"):
status = moConfigNode.getAttribute("status")
if moConfigNode.hasAttribute("excludeList"):
excludeList = moConfigNode.getAttribute("excludeList")
# SyncMoConfig Object
moConfig = None
if classId:
moConfig = SyncMoConfig(classId, noun, version, actionVersion, action, ignoreReason, status,
excludeList)
if moConfig:
if classId in moConfigMap:
moConfigMap[classId] = moConfig
else:
moConfigList = []
moConfigList.append(moConfig)
moConfigMap[classId] = moConfigList
return moConfigMap | [
"def",
"GetSyncMoConfig",
"(",
"ConfigDoc",
")",
":",
"moConfigMap",
"=",
"{",
"}",
"configList",
"=",
"ConfigDoc",
".",
"getElementsByTagName",
"(",
"\"mo\"",
")",
"for",
"moConfigNode",
"in",
"configList",
":",
"classId",
"=",
"None",
"noun",
"=",
"None",
"version",
"=",
"None",
"actionVersion",
"=",
"None",
"action",
"=",
"None",
"ignoreReason",
"=",
"None",
"status",
"=",
"None",
"excludeList",
"=",
"None",
"if",
"moConfigNode",
".",
"hasAttribute",
"(",
"\"classid\"",
")",
":",
"classId",
"=",
"moConfigNode",
".",
"getAttribute",
"(",
"\"classid\"",
")",
"if",
"moConfigNode",
".",
"hasAttribute",
"(",
"\"noun\"",
")",
":",
"noun",
"=",
"moConfigNode",
".",
"getAttribute",
"(",
"\"noun\"",
")",
"if",
"moConfigNode",
".",
"hasAttribute",
"(",
"\"version\"",
")",
":",
"version",
"=",
"moConfigNode",
".",
"getAttribute",
"(",
"\"version\"",
")",
"if",
"moConfigNode",
".",
"hasAttribute",
"(",
"\"actionVersion\"",
")",
":",
"actionVersion",
"=",
"moConfigNode",
".",
"getAttribute",
"(",
"\"actionVersion\"",
")",
"if",
"moConfigNode",
".",
"hasAttribute",
"(",
"\"action\"",
")",
":",
"action",
"=",
"moConfigNode",
".",
"getAttribute",
"(",
"\"action\"",
")",
"if",
"moConfigNode",
".",
"hasAttribute",
"(",
"\"ignoreReason\"",
")",
":",
"ignoreReason",
"=",
"moConfigNode",
".",
"getAttribute",
"(",
"\"ignoreReason\"",
")",
"if",
"moConfigNode",
".",
"hasAttribute",
"(",
"\"status\"",
")",
":",
"status",
"=",
"moConfigNode",
".",
"getAttribute",
"(",
"\"status\"",
")",
"if",
"moConfigNode",
".",
"hasAttribute",
"(",
"\"excludeList\"",
")",
":",
"excludeList",
"=",
"moConfigNode",
".",
"getAttribute",
"(",
"\"excludeList\"",
")",
"# SyncMoConfig Object",
"moConfig",
"=",
"None",
"if",
"classId",
":",
"moConfig",
"=",
"SyncMoConfig",
"(",
"classId",
",",
"noun",
",",
"version",
",",
"actionVersion",
",",
"action",
",",
"ignoreReason",
",",
"status",
",",
"excludeList",
")",
"if",
"moConfig",
":",
"if",
"classId",
"in",
"moConfigMap",
":",
"moConfigMap",
"[",
"classId",
"]",
"=",
"moConfig",
"else",
":",
"moConfigList",
"=",
"[",
"]",
"moConfigList",
".",
"append",
"(",
"moConfig",
")",
"moConfigMap",
"[",
"classId",
"]",
"=",
"moConfigList",
"return",
"moConfigMap"
] | Internal support method for SyncManagedObject. | [
"Internal",
"support",
"method",
"for",
"SyncManagedObject",
"."
] | python | train |
saltstack/salt | salt/utils/iam.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/iam.py#L56-L66 | def _convert_key_to_str(key):
'''
Stolen completely from boto.providers
'''
# IMPORTANT: on PY2, the secret key must be str and not unicode to work
# properly with hmac.new (see http://bugs.python.org/issue5285)
#
# pylint: disable=incompatible-py3-code,undefined-variable
return salt.utils.data.encode(key) \
if six.PY2 and isinstance(key, unicode) \
else key | [
"def",
"_convert_key_to_str",
"(",
"key",
")",
":",
"# IMPORTANT: on PY2, the secret key must be str and not unicode to work",
"# properly with hmac.new (see http://bugs.python.org/issue5285)",
"#",
"# pylint: disable=incompatible-py3-code,undefined-variable",
"return",
"salt",
".",
"utils",
".",
"data",
".",
"encode",
"(",
"key",
")",
"if",
"six",
".",
"PY2",
"and",
"isinstance",
"(",
"key",
",",
"unicode",
")",
"else",
"key"
] | Stolen completely from boto.providers | [
"Stolen",
"completely",
"from",
"boto",
".",
"providers"
] | python | train |
F5Networks/f5-common-python | f5/bigip/resource.py | https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5/bigip/resource.py#L137-L149 | def _missing_required_parameters(rqset, **kwargs):
"""Helper function to do operation on sets.
Checks for any missing required parameters.
Returns non-empty or empty list. With empty
list being False.
::returns list
"""
key_set = set(list(iterkeys(kwargs)))
required_minus_received = rqset - key_set
if required_minus_received != set():
return list(required_minus_received) | [
"def",
"_missing_required_parameters",
"(",
"rqset",
",",
"*",
"*",
"kwargs",
")",
":",
"key_set",
"=",
"set",
"(",
"list",
"(",
"iterkeys",
"(",
"kwargs",
")",
")",
")",
"required_minus_received",
"=",
"rqset",
"-",
"key_set",
"if",
"required_minus_received",
"!=",
"set",
"(",
")",
":",
"return",
"list",
"(",
"required_minus_received",
")"
] | Helper function to do operation on sets.
Checks for any missing required parameters.
Returns non-empty or empty list. With empty
list being False.
::returns list | [
"Helper",
"function",
"to",
"do",
"operation",
"on",
"sets",
"."
] | python | train |
uber/tchannel-python | tchannel/tornado/response.py | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/tornado/response.py#L109-L119 | def get_header(self):
"""Get the header value from the response.
:return: a future contains the deserialized value of header
"""
raw_header = yield get_arg(self, 1)
if not self.serializer:
raise tornado.gen.Return(raw_header)
else:
header = self.serializer.deserialize_header(raw_header)
raise tornado.gen.Return(header) | [
"def",
"get_header",
"(",
"self",
")",
":",
"raw_header",
"=",
"yield",
"get_arg",
"(",
"self",
",",
"1",
")",
"if",
"not",
"self",
".",
"serializer",
":",
"raise",
"tornado",
".",
"gen",
".",
"Return",
"(",
"raw_header",
")",
"else",
":",
"header",
"=",
"self",
".",
"serializer",
".",
"deserialize_header",
"(",
"raw_header",
")",
"raise",
"tornado",
".",
"gen",
".",
"Return",
"(",
"header",
")"
] | Get the header value from the response.
:return: a future contains the deserialized value of header | [
"Get",
"the",
"header",
"value",
"from",
"the",
"response",
"."
] | python | train |
mdsol/rwslib | rwslib/rws_requests/odm_adapter.py | https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/rws_requests/odm_adapter.py#L121-L127 | def _querystring(self):
"""Additional keyword arguments"""
kw = {"studyoid": self.studyoid}
if self.location_oid is not None:
kw["locationoid"] = self.location_oid
return kw | [
"def",
"_querystring",
"(",
"self",
")",
":",
"kw",
"=",
"{",
"\"studyoid\"",
":",
"self",
".",
"studyoid",
"}",
"if",
"self",
".",
"location_oid",
"is",
"not",
"None",
":",
"kw",
"[",
"\"locationoid\"",
"]",
"=",
"self",
".",
"location_oid",
"return",
"kw"
] | Additional keyword arguments | [
"Additional",
"keyword",
"arguments"
] | python | train |
aio-libs/aiohttp-debugtoolbar | aiohttp_debugtoolbar/tbtools/tbtools.py | https://github.com/aio-libs/aiohttp-debugtoolbar/blob/a1c3fb2b487bcaaf23eb71ee4c9c3cfc9cb94322/aiohttp_debugtoolbar/tbtools/tbtools.py#L198-L227 | def render_full(self, request, lodgeit_url=None):
"""Render the Full HTML page with the traceback info."""
static_path = request.app.router[STATIC_ROUTE_NAME].url_for(
filename='')
root_path = request.app.router[ROOT_ROUTE_NAME].url_for()
exc = escape(self.exception)
summary = self.render_summary(request.app, include_title=False)
token = request.app[APP_KEY]['pdtb_token']
qs = {'token': token, 'tb': str(self.id)}
url = request.app.router[EXC_ROUTE_NAME].url_for().with_query(qs)
evalex = request.app[APP_KEY]['exc_history'].eval_exc
vars = {
'evalex': evalex and 'true' or 'false',
'console': 'false',
'lodgeit_url': escape(lodgeit_url),
'title': exc,
'exception': exc,
'exception_type': escape(self.exception_type),
'summary': summary,
'plaintext': self.plaintext,
'plaintext_cs': re.sub('-{2,}', '-', self.plaintext),
'traceback_id': self.id,
'static_path': static_path,
'token': token,
'root_path': root_path,
'url': url,
}
return render('exception.jinja2', request.app, vars, request=request) | [
"def",
"render_full",
"(",
"self",
",",
"request",
",",
"lodgeit_url",
"=",
"None",
")",
":",
"static_path",
"=",
"request",
".",
"app",
".",
"router",
"[",
"STATIC_ROUTE_NAME",
"]",
".",
"url_for",
"(",
"filename",
"=",
"''",
")",
"root_path",
"=",
"request",
".",
"app",
".",
"router",
"[",
"ROOT_ROUTE_NAME",
"]",
".",
"url_for",
"(",
")",
"exc",
"=",
"escape",
"(",
"self",
".",
"exception",
")",
"summary",
"=",
"self",
".",
"render_summary",
"(",
"request",
".",
"app",
",",
"include_title",
"=",
"False",
")",
"token",
"=",
"request",
".",
"app",
"[",
"APP_KEY",
"]",
"[",
"'pdtb_token'",
"]",
"qs",
"=",
"{",
"'token'",
":",
"token",
",",
"'tb'",
":",
"str",
"(",
"self",
".",
"id",
")",
"}",
"url",
"=",
"request",
".",
"app",
".",
"router",
"[",
"EXC_ROUTE_NAME",
"]",
".",
"url_for",
"(",
")",
".",
"with_query",
"(",
"qs",
")",
"evalex",
"=",
"request",
".",
"app",
"[",
"APP_KEY",
"]",
"[",
"'exc_history'",
"]",
".",
"eval_exc",
"vars",
"=",
"{",
"'evalex'",
":",
"evalex",
"and",
"'true'",
"or",
"'false'",
",",
"'console'",
":",
"'false'",
",",
"'lodgeit_url'",
":",
"escape",
"(",
"lodgeit_url",
")",
",",
"'title'",
":",
"exc",
",",
"'exception'",
":",
"exc",
",",
"'exception_type'",
":",
"escape",
"(",
"self",
".",
"exception_type",
")",
",",
"'summary'",
":",
"summary",
",",
"'plaintext'",
":",
"self",
".",
"plaintext",
",",
"'plaintext_cs'",
":",
"re",
".",
"sub",
"(",
"'-{2,}'",
",",
"'-'",
",",
"self",
".",
"plaintext",
")",
",",
"'traceback_id'",
":",
"self",
".",
"id",
",",
"'static_path'",
":",
"static_path",
",",
"'token'",
":",
"token",
",",
"'root_path'",
":",
"root_path",
",",
"'url'",
":",
"url",
",",
"}",
"return",
"render",
"(",
"'exception.jinja2'",
",",
"request",
".",
"app",
",",
"vars",
",",
"request",
"=",
"request",
")"
] | Render the Full HTML page with the traceback info. | [
"Render",
"the",
"Full",
"HTML",
"page",
"with",
"the",
"traceback",
"info",
"."
] | python | train |
Hypex/hyppy | hyppy/hapi.py | https://github.com/Hypex/hyppy/blob/a425619c2a102b0e598fd6cac8aa0f6b766f542d/hyppy/hapi.py#L17-L22 | def requires_api_auth(fn):
"""Decorator for HAPI methods that requires the instance to be authenticated with a HAPI token"""
def wrapper(self, *args, **kwargs):
self.auth_context = HAPI.auth_context_hapi
return fn(self, *args, **kwargs)
return wrapper | [
"def",
"requires_api_auth",
"(",
"fn",
")",
":",
"def",
"wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"auth_context",
"=",
"HAPI",
".",
"auth_context_hapi",
"return",
"fn",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
] | Decorator for HAPI methods that requires the instance to be authenticated with a HAPI token | [
"Decorator",
"for",
"HAPI",
"methods",
"that",
"requires",
"the",
"instance",
"to",
"be",
"authenticated",
"with",
"a",
"HAPI",
"token"
] | python | train |
mozilla/python-zeppelin | zeppelin/converters/markdown.py | https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/converters/markdown.py#L129-L135 | def build_output(self, fout):
"""Squash self.out into string.
Join every line in self.out with a new line and write the
result to the output file.
"""
fout.write('\n'.join([s for s in self.out])) | [
"def",
"build_output",
"(",
"self",
",",
"fout",
")",
":",
"fout",
".",
"write",
"(",
"'\\n'",
".",
"join",
"(",
"[",
"s",
"for",
"s",
"in",
"self",
".",
"out",
"]",
")",
")"
] | Squash self.out into string.
Join every line in self.out with a new line and write the
result to the output file. | [
"Squash",
"self",
".",
"out",
"into",
"string",
"."
] | python | train |
guaix-ucm/pyemir | emirdrp/processing/bardetect.py | https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/bardetect.py#L186-L211 | def _locate_bar_gen(icut, epos, transform1, transform2):
"""Generic function for the fine position of the CSU"""
epos_pix = coor_to_pix_1d(epos)
# transform ->
epos_pix_s = transform1(epos_pix)
icut2 = transform2(icut)
#
try:
res = position_half_h(icut2, epos_pix_s)
xint_s, next_peak_s, wpos1_s, wpos2_s, background_level, half_height = res
#
xint = transform1(xint_s)
#
epos_f = xint
error = 0
except ValueError:
error = 2
epos_f = epos
return epos_pix, epos_f, error | [
"def",
"_locate_bar_gen",
"(",
"icut",
",",
"epos",
",",
"transform1",
",",
"transform2",
")",
":",
"epos_pix",
"=",
"coor_to_pix_1d",
"(",
"epos",
")",
"# transform ->",
"epos_pix_s",
"=",
"transform1",
"(",
"epos_pix",
")",
"icut2",
"=",
"transform2",
"(",
"icut",
")",
"#",
"try",
":",
"res",
"=",
"position_half_h",
"(",
"icut2",
",",
"epos_pix_s",
")",
"xint_s",
",",
"next_peak_s",
",",
"wpos1_s",
",",
"wpos2_s",
",",
"background_level",
",",
"half_height",
"=",
"res",
"#",
"xint",
"=",
"transform1",
"(",
"xint_s",
")",
"#",
"epos_f",
"=",
"xint",
"error",
"=",
"0",
"except",
"ValueError",
":",
"error",
"=",
"2",
"epos_f",
"=",
"epos",
"return",
"epos_pix",
",",
"epos_f",
",",
"error"
] | Generic function for the fine position of the CSU | [
"Generic",
"function",
"for",
"the",
"fine",
"position",
"of",
"the",
"CSU"
] | python | train |
agoragames/haigha | haigha/connections/rabbit_connection.py | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connections/rabbit_connection.py#L240-L266 | def consume(self, queue, consumer, consumer_tag='', no_local=False,
no_ack=True, exclusive=False, nowait=True, ticket=None,
cb=None, cancel_cb=None):
'''Start a queue consumer.
Accepts the following optional arg in addition to those of
`BasicClass.consume()`:
:param cancel_cb: a callable to be called when the broker cancels the
consumer; e.g., when the consumer's queue is deleted. See
www.rabbitmq.com/consumer-cancel.html.
:type cancel_cb: None or callable with signature cancel_cb(consumer_tag)
'''
# Register the consumer's broker-cancel callback entry
if cancel_cb is not None:
if not callable(cancel_cb):
raise ValueError('cancel_cb is not callable: %r' % (cancel_cb,))
if not consumer_tag:
consumer_tag = self._generate_consumer_tag()
self._broker_cancel_cb_map[consumer_tag] = cancel_cb
# Start consumer
super(RabbitBasicClass, self).consume(queue, consumer, consumer_tag,
no_local, no_ack, exclusive,
nowait, ticket, cb) | [
"def",
"consume",
"(",
"self",
",",
"queue",
",",
"consumer",
",",
"consumer_tag",
"=",
"''",
",",
"no_local",
"=",
"False",
",",
"no_ack",
"=",
"True",
",",
"exclusive",
"=",
"False",
",",
"nowait",
"=",
"True",
",",
"ticket",
"=",
"None",
",",
"cb",
"=",
"None",
",",
"cancel_cb",
"=",
"None",
")",
":",
"# Register the consumer's broker-cancel callback entry",
"if",
"cancel_cb",
"is",
"not",
"None",
":",
"if",
"not",
"callable",
"(",
"cancel_cb",
")",
":",
"raise",
"ValueError",
"(",
"'cancel_cb is not callable: %r'",
"%",
"(",
"cancel_cb",
",",
")",
")",
"if",
"not",
"consumer_tag",
":",
"consumer_tag",
"=",
"self",
".",
"_generate_consumer_tag",
"(",
")",
"self",
".",
"_broker_cancel_cb_map",
"[",
"consumer_tag",
"]",
"=",
"cancel_cb",
"# Start consumer",
"super",
"(",
"RabbitBasicClass",
",",
"self",
")",
".",
"consume",
"(",
"queue",
",",
"consumer",
",",
"consumer_tag",
",",
"no_local",
",",
"no_ack",
",",
"exclusive",
",",
"nowait",
",",
"ticket",
",",
"cb",
")"
] | Start a queue consumer.
Accepts the following optional arg in addition to those of
`BasicClass.consume()`:
:param cancel_cb: a callable to be called when the broker cancels the
consumer; e.g., when the consumer's queue is deleted. See
www.rabbitmq.com/consumer-cancel.html.
:type cancel_cb: None or callable with signature cancel_cb(consumer_tag) | [
"Start",
"a",
"queue",
"consumer",
"."
] | python | train |
deepmind/sonnet | sonnet/python/modules/gated_rnn.py | https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/gated_rnn.py#L178-L207 | def get_possible_initializer_keys(cls, use_peepholes=False,
use_projection=False):
"""Returns the keys the dictionary of variable initializers may contain.
The set of all possible initializer keys are:
w_gates: weight for gates
b_gates: bias of gates
w_f_diag: weight for prev_cell -> forget gate peephole
w_i_diag: weight for prev_cell -> input gate peephole
w_o_diag: weight for prev_cell -> output gate peephole
Args:
cls:The class.
use_peepholes: Boolean that indicates whether peephole connections are
used.
use_projection: Boolean that indicates whether a recurrent projection
layer is used.
Returns:
Set with strings corresponding to the strings that may be passed to the
constructor.
"""
possible_keys = cls.POSSIBLE_INITIALIZER_KEYS.copy()
if not use_peepholes:
possible_keys.difference_update(
{cls.W_F_DIAG, cls.W_I_DIAG, cls.W_O_DIAG})
if not use_projection:
possible_keys.difference_update({cls.W_H_PROJECTION})
return possible_keys | [
"def",
"get_possible_initializer_keys",
"(",
"cls",
",",
"use_peepholes",
"=",
"False",
",",
"use_projection",
"=",
"False",
")",
":",
"possible_keys",
"=",
"cls",
".",
"POSSIBLE_INITIALIZER_KEYS",
".",
"copy",
"(",
")",
"if",
"not",
"use_peepholes",
":",
"possible_keys",
".",
"difference_update",
"(",
"{",
"cls",
".",
"W_F_DIAG",
",",
"cls",
".",
"W_I_DIAG",
",",
"cls",
".",
"W_O_DIAG",
"}",
")",
"if",
"not",
"use_projection",
":",
"possible_keys",
".",
"difference_update",
"(",
"{",
"cls",
".",
"W_H_PROJECTION",
"}",
")",
"return",
"possible_keys"
] | Returns the keys the dictionary of variable initializers may contain.
The set of all possible initializer keys are:
w_gates: weight for gates
b_gates: bias of gates
w_f_diag: weight for prev_cell -> forget gate peephole
w_i_diag: weight for prev_cell -> input gate peephole
w_o_diag: weight for prev_cell -> output gate peephole
Args:
cls:The class.
use_peepholes: Boolean that indicates whether peephole connections are
used.
use_projection: Boolean that indicates whether a recurrent projection
layer is used.
Returns:
Set with strings corresponding to the strings that may be passed to the
constructor. | [
"Returns",
"the",
"keys",
"the",
"dictionary",
"of",
"variable",
"initializers",
"may",
"contain",
"."
] | python | train |
Tathorack/searchcolor | searchcolor/average.py | https://github.com/Tathorack/searchcolor/blob/f50b0a40e9da59fd994440f20e106730b9deb6bf/searchcolor/average.py#L123-L146 | def google_average(search_term, num_results, api_key, cse_id, **kwargs):
"""Does a Google image search to get the average color of the
top x results.
Arguments
search_term: str
tearm to search for
num_results: int
number of results to average
api_key: str
Google API key
cse_id: str
Google CSE ID
max_threads: int
max number of processes to spawn
return {'name':search_term, 'red':r_avg, 'green':g_avg, 'blue':b_avg}
or None
"""
url_list = []
result = {'name': search_term}
GIS = GoogleImageSearch(api_key, cse_id)
url_list = GIS.search(search_term, num_results)
result.update(_image_search_average(url_list, **kwargs))
return(result) | [
"def",
"google_average",
"(",
"search_term",
",",
"num_results",
",",
"api_key",
",",
"cse_id",
",",
"*",
"*",
"kwargs",
")",
":",
"url_list",
"=",
"[",
"]",
"result",
"=",
"{",
"'name'",
":",
"search_term",
"}",
"GIS",
"=",
"GoogleImageSearch",
"(",
"api_key",
",",
"cse_id",
")",
"url_list",
"=",
"GIS",
".",
"search",
"(",
"search_term",
",",
"num_results",
")",
"result",
".",
"update",
"(",
"_image_search_average",
"(",
"url_list",
",",
"*",
"*",
"kwargs",
")",
")",
"return",
"(",
"result",
")"
] | Does a Google image search to get the average color of the
top x results.
Arguments
search_term: str
tearm to search for
num_results: int
number of results to average
api_key: str
Google API key
cse_id: str
Google CSE ID
max_threads: int
max number of processes to spawn
return {'name':search_term, 'red':r_avg, 'green':g_avg, 'blue':b_avg}
or None | [
"Does",
"a",
"Google",
"image",
"search",
"to",
"get",
"the",
"average",
"color",
"of",
"the",
"top",
"x",
"results",
".",
"Arguments",
"search_term",
":",
"str",
"tearm",
"to",
"search",
"for",
"num_results",
":",
"int",
"number",
"of",
"results",
"to",
"average",
"api_key",
":",
"str",
"Google",
"API",
"key",
"cse_id",
":",
"str",
"Google",
"CSE",
"ID",
"max_threads",
":",
"int",
"max",
"number",
"of",
"processes",
"to",
"spawn"
] | python | train |
timothydmorton/isochrones | isochrones/starmodel_old.py | https://github.com/timothydmorton/isochrones/blob/d84495573044c66db2fd6b959fe69e370757ea14/isochrones/starmodel_old.py#L767-L841 | def triangle(self, params=None, query=None, extent=0.999,
**kwargs):
"""
Makes a nifty corner plot.
Uses :func:`triangle.corner`.
:param params: (optional)
Names of columns (from :attr:`StarModel.samples`)
to plot. If ``None``, then it will plot samples
of the parameters used in the MCMC fit-- that is,
mass, age, [Fe/H], and optionally distance and A_V.
:param query: (optional)
Optional query on samples.
:param extent: (optional)
Will be appropriately passed to :func:`triangle.corner`.
:param **kwargs:
Additional keyword arguments passed to :func:`triangle.corner`.
:return:
Figure oject containing corner plot.
"""
if triangle is None:
raise ImportError('please run "pip install triangle_plot".')
if params is None:
if self.fit_for_distance:
params = ['mass', 'age', 'feh', 'distance', 'AV']
else:
params = ['mass', 'age', 'feh']
df = self.samples
if query is not None:
df = df.query(query)
#convert extent to ranges, but making sure
# that truths are in range.
extents = []
remove = []
for i,par in enumerate(params):
m = re.search('delta_(\w+)$',par)
if m:
if type(self) == BinaryStarModel:
b = m.group(1)
values = (df['{}_mag_B'.format(b)] -
df['{}_mag_A'.format(b)])
df[par] = values
else:
remove.append(i)
continue
else:
values = df[par]
qs = np.array([0.5 - 0.5*extent, 0.5 + 0.5*extent])
minval, maxval = values.quantile(qs)
if 'truths' in kwargs:
datarange = maxval - minval
if kwargs['truths'][i] < minval:
minval = kwargs['truths'][i] - 0.05*datarange
if kwargs['truths'][i] > maxval:
maxval = kwargs['truths'][i] + 0.05*datarange
extents.append((minval,maxval))
[params.pop(i) for i in remove]
fig = triangle.corner(df[params], labels=params,
extents=extents, **kwargs)
fig.suptitle(self.name, fontsize=22)
return fig | [
"def",
"triangle",
"(",
"self",
",",
"params",
"=",
"None",
",",
"query",
"=",
"None",
",",
"extent",
"=",
"0.999",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"triangle",
"is",
"None",
":",
"raise",
"ImportError",
"(",
"'please run \"pip install triangle_plot\".'",
")",
"if",
"params",
"is",
"None",
":",
"if",
"self",
".",
"fit_for_distance",
":",
"params",
"=",
"[",
"'mass'",
",",
"'age'",
",",
"'feh'",
",",
"'distance'",
",",
"'AV'",
"]",
"else",
":",
"params",
"=",
"[",
"'mass'",
",",
"'age'",
",",
"'feh'",
"]",
"df",
"=",
"self",
".",
"samples",
"if",
"query",
"is",
"not",
"None",
":",
"df",
"=",
"df",
".",
"query",
"(",
"query",
")",
"#convert extent to ranges, but making sure",
"# that truths are in range.",
"extents",
"=",
"[",
"]",
"remove",
"=",
"[",
"]",
"for",
"i",
",",
"par",
"in",
"enumerate",
"(",
"params",
")",
":",
"m",
"=",
"re",
".",
"search",
"(",
"'delta_(\\w+)$'",
",",
"par",
")",
"if",
"m",
":",
"if",
"type",
"(",
"self",
")",
"==",
"BinaryStarModel",
":",
"b",
"=",
"m",
".",
"group",
"(",
"1",
")",
"values",
"=",
"(",
"df",
"[",
"'{}_mag_B'",
".",
"format",
"(",
"b",
")",
"]",
"-",
"df",
"[",
"'{}_mag_A'",
".",
"format",
"(",
"b",
")",
"]",
")",
"df",
"[",
"par",
"]",
"=",
"values",
"else",
":",
"remove",
".",
"append",
"(",
"i",
")",
"continue",
"else",
":",
"values",
"=",
"df",
"[",
"par",
"]",
"qs",
"=",
"np",
".",
"array",
"(",
"[",
"0.5",
"-",
"0.5",
"*",
"extent",
",",
"0.5",
"+",
"0.5",
"*",
"extent",
"]",
")",
"minval",
",",
"maxval",
"=",
"values",
".",
"quantile",
"(",
"qs",
")",
"if",
"'truths'",
"in",
"kwargs",
":",
"datarange",
"=",
"maxval",
"-",
"minval",
"if",
"kwargs",
"[",
"'truths'",
"]",
"[",
"i",
"]",
"<",
"minval",
":",
"minval",
"=",
"kwargs",
"[",
"'truths'",
"]",
"[",
"i",
"]",
"-",
"0.05",
"*",
"datarange",
"if",
"kwargs",
"[",
"'truths'",
"]",
"[",
"i",
"]",
">",
"maxval",
":",
"maxval",
"=",
"kwargs",
"[",
"'truths'",
"]",
"[",
"i",
"]",
"+",
"0.05",
"*",
"datarange",
"extents",
".",
"append",
"(",
"(",
"minval",
",",
"maxval",
")",
")",
"[",
"params",
".",
"pop",
"(",
"i",
")",
"for",
"i",
"in",
"remove",
"]",
"fig",
"=",
"triangle",
".",
"corner",
"(",
"df",
"[",
"params",
"]",
",",
"labels",
"=",
"params",
",",
"extents",
"=",
"extents",
",",
"*",
"*",
"kwargs",
")",
"fig",
".",
"suptitle",
"(",
"self",
".",
"name",
",",
"fontsize",
"=",
"22",
")",
"return",
"fig"
] | Makes a nifty corner plot.
Uses :func:`triangle.corner`.
:param params: (optional)
Names of columns (from :attr:`StarModel.samples`)
to plot. If ``None``, then it will plot samples
of the parameters used in the MCMC fit-- that is,
mass, age, [Fe/H], and optionally distance and A_V.
:param query: (optional)
Optional query on samples.
:param extent: (optional)
Will be appropriately passed to :func:`triangle.corner`.
:param **kwargs:
Additional keyword arguments passed to :func:`triangle.corner`.
:return:
Figure oject containing corner plot. | [
"Makes",
"a",
"nifty",
"corner",
"plot",
"."
] | python | train |
acutesoftware/AIKIF | aikif/index.py | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/index.py#L89-L97 | def format_op_row(ipFile, totLines, totWords, uniqueWords):
"""
Format the output row with stats
"""
txt = os.path.basename(ipFile).ljust(36) + ' '
txt += str(totLines).rjust(7) + ' '
txt += str(totWords).rjust(7) + ' '
txt += str(len(uniqueWords)).rjust(7) + ' '
return txt | [
"def",
"format_op_row",
"(",
"ipFile",
",",
"totLines",
",",
"totWords",
",",
"uniqueWords",
")",
":",
"txt",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"ipFile",
")",
".",
"ljust",
"(",
"36",
")",
"+",
"' '",
"txt",
"+=",
"str",
"(",
"totLines",
")",
".",
"rjust",
"(",
"7",
")",
"+",
"' '",
"txt",
"+=",
"str",
"(",
"totWords",
")",
".",
"rjust",
"(",
"7",
")",
"+",
"' '",
"txt",
"+=",
"str",
"(",
"len",
"(",
"uniqueWords",
")",
")",
".",
"rjust",
"(",
"7",
")",
"+",
"' '",
"return",
"txt"
] | Format the output row with stats | [
"Format",
"the",
"output",
"row",
"with",
"stats"
] | python | train |
Microsoft/azure-devops-python-api | azure-devops/azure/devops/v5_0/gallery/gallery_client.py | https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/gallery/gallery_client.py#L961-L981 | def share_extension_with_host(self, publisher_name, extension_name, host_type, host_name):
"""ShareExtensionWithHost.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param str host_type:
:param str host_name:
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if host_type is not None:
route_values['hostType'] = self._serialize.url('host_type', host_type, 'str')
if host_name is not None:
route_values['hostName'] = self._serialize.url('host_name', host_name, 'str')
self._send(http_method='POST',
location_id='328a3af8-d124-46e9-9483-01690cd415b9',
version='5.0-preview.1',
route_values=route_values) | [
"def",
"share_extension_with_host",
"(",
"self",
",",
"publisher_name",
",",
"extension_name",
",",
"host_type",
",",
"host_name",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"publisher_name",
"is",
"not",
"None",
":",
"route_values",
"[",
"'publisherName'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'publisher_name'",
",",
"publisher_name",
",",
"'str'",
")",
"if",
"extension_name",
"is",
"not",
"None",
":",
"route_values",
"[",
"'extensionName'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'extension_name'",
",",
"extension_name",
",",
"'str'",
")",
"if",
"host_type",
"is",
"not",
"None",
":",
"route_values",
"[",
"'hostType'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'host_type'",
",",
"host_type",
",",
"'str'",
")",
"if",
"host_name",
"is",
"not",
"None",
":",
"route_values",
"[",
"'hostName'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'host_name'",
",",
"host_name",
",",
"'str'",
")",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'POST'",
",",
"location_id",
"=",
"'328a3af8-d124-46e9-9483-01690cd415b9'",
",",
"version",
"=",
"'5.0-preview.1'",
",",
"route_values",
"=",
"route_values",
")"
] | ShareExtensionWithHost.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param str host_type:
:param str host_name: | [
"ShareExtensionWithHost",
".",
"[",
"Preview",
"API",
"]",
":",
"param",
"str",
"publisher_name",
":",
":",
"param",
"str",
"extension_name",
":",
":",
"param",
"str",
"host_type",
":",
":",
"param",
"str",
"host_name",
":"
] | python | train |
idlesign/django-admirarchy | admirarchy/utils.py | https://github.com/idlesign/django-admirarchy/blob/723e4fd212fdebcc156492cb16b9d65356f5ca73/admirarchy/utils.py#L131-L139 | def get_results(self, request):
"""Gets query set results.
:param request:
:return:
"""
super(HierarchicalChangeList, self).get_results(request)
self._hierarchy.hook_get_results(self) | [
"def",
"get_results",
"(",
"self",
",",
"request",
")",
":",
"super",
"(",
"HierarchicalChangeList",
",",
"self",
")",
".",
"get_results",
"(",
"request",
")",
"self",
".",
"_hierarchy",
".",
"hook_get_results",
"(",
"self",
")"
] | Gets query set results.
:param request:
:return: | [
"Gets",
"query",
"set",
"results",
"."
] | python | train |
acorg/dark-matter | dark/fastq.py | https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/fastq.py#L26-L38 | def iter(self):
"""
Iterate over the sequences in the files in self.files_, yielding each
as an instance of the desired read class.
"""
for _file in self._files:
with asHandle(_file) as fp:
# Use FastqGeneralIterator because it provides access to
# the unconverted quality string (i.e., it doesn't try to
# figure out the numeric quality values, which we don't
# care about at this point).
for sequenceId, sequence, quality in FastqGeneralIterator(fp):
yield self.readClass(sequenceId, sequence, quality) | [
"def",
"iter",
"(",
"self",
")",
":",
"for",
"_file",
"in",
"self",
".",
"_files",
":",
"with",
"asHandle",
"(",
"_file",
")",
"as",
"fp",
":",
"# Use FastqGeneralIterator because it provides access to",
"# the unconverted quality string (i.e., it doesn't try to",
"# figure out the numeric quality values, which we don't",
"# care about at this point).",
"for",
"sequenceId",
",",
"sequence",
",",
"quality",
"in",
"FastqGeneralIterator",
"(",
"fp",
")",
":",
"yield",
"self",
".",
"readClass",
"(",
"sequenceId",
",",
"sequence",
",",
"quality",
")"
] | Iterate over the sequences in the files in self.files_, yielding each
as an instance of the desired read class. | [
"Iterate",
"over",
"the",
"sequences",
"in",
"the",
"files",
"in",
"self",
".",
"files_",
"yielding",
"each",
"as",
"an",
"instance",
"of",
"the",
"desired",
"read",
"class",
"."
] | python | train |
hellosign/hellosign-python-sdk | hellosign_sdk/utils/hsaccesstokenauth.py | https://github.com/hellosign/hellosign-python-sdk/blob/4325a29ad5766380a214eac3914511f62f7ecba4/hellosign_sdk/utils/hsaccesstokenauth.py#L53-L69 | def from_response(self, response_data):
''' Builds a new HSAccessTokenAuth straight from response data
Args:
response_data (dict): Response data to use
Returns:
A HSAccessTokenAuth objet
'''
return HSAccessTokenAuth(
response_data['access_token'],
response_data['token_type'],
response_data['refresh_token'],
response_data['expires_in'],
response_data.get('state') # Not always here
) | [
"def",
"from_response",
"(",
"self",
",",
"response_data",
")",
":",
"return",
"HSAccessTokenAuth",
"(",
"response_data",
"[",
"'access_token'",
"]",
",",
"response_data",
"[",
"'token_type'",
"]",
",",
"response_data",
"[",
"'refresh_token'",
"]",
",",
"response_data",
"[",
"'expires_in'",
"]",
",",
"response_data",
".",
"get",
"(",
"'state'",
")",
"# Not always here",
")"
] | Builds a new HSAccessTokenAuth straight from response data
Args:
response_data (dict): Response data to use
Returns:
A HSAccessTokenAuth objet | [
"Builds",
"a",
"new",
"HSAccessTokenAuth",
"straight",
"from",
"response",
"data"
] | python | train |
jstitch/MambuPy | MambuPy/rest/mambuactivity.py | https://github.com/jstitch/MambuPy/blob/2af98cc12e7ed5ec183b3e97644e880e70b79ee8/MambuPy/rest/mambuactivity.py#L67-L90 | def convertDict2Attrs(self, *args, **kwargs):
"""The trick for iterable Mambu Objects comes here:
You iterate over each element of the responded List from Mambu,
and create a Mambu Activity object for each one, initializing
them one at a time, and changing the attrs attribute (which just
holds a list of plain dictionaries) with a MambuActivity just
created.
"""
for n,a in enumerate(self.attrs):
# ok ok, I'm modifying elements of a list while iterating it. BAD PRACTICE!
try:
params = self.params
except AttributeError as aerr:
params = {}
kwargs.update(params)
try:
activity = self.mambuactivityclass(urlfunc=None, entid=None, *args, **kwargs)
except AttributeError as ae:
self.mambuactivityclass = MambuActivity
activity = self.mambuactivityclass(urlfunc=None, entid=None, *args, **kwargs)
activity.init(a, *args, **kwargs)
self.attrs[n] = activity | [
"def",
"convertDict2Attrs",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"n",
",",
"a",
"in",
"enumerate",
"(",
"self",
".",
"attrs",
")",
":",
"# ok ok, I'm modifying elements of a list while iterating it. BAD PRACTICE!",
"try",
":",
"params",
"=",
"self",
".",
"params",
"except",
"AttributeError",
"as",
"aerr",
":",
"params",
"=",
"{",
"}",
"kwargs",
".",
"update",
"(",
"params",
")",
"try",
":",
"activity",
"=",
"self",
".",
"mambuactivityclass",
"(",
"urlfunc",
"=",
"None",
",",
"entid",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"AttributeError",
"as",
"ae",
":",
"self",
".",
"mambuactivityclass",
"=",
"MambuActivity",
"activity",
"=",
"self",
".",
"mambuactivityclass",
"(",
"urlfunc",
"=",
"None",
",",
"entid",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"activity",
".",
"init",
"(",
"a",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"attrs",
"[",
"n",
"]",
"=",
"activity"
] | The trick for iterable Mambu Objects comes here:
You iterate over each element of the responded List from Mambu,
and create a Mambu Activity object for each one, initializing
them one at a time, and changing the attrs attribute (which just
holds a list of plain dictionaries) with a MambuActivity just
created. | [
"The",
"trick",
"for",
"iterable",
"Mambu",
"Objects",
"comes",
"here",
":"
] | python | train |
has2k1/plotnine | plotnine/facets/facet.py | https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/facets/facet.py#L654-L697 | def eval_facet_vars(data, vars, env):
"""
Evaluate facet variables
Parameters
----------
data : DataFrame
Factet dataframe
vars : list
Facet variables
env : environment
Plot environment
Returns
-------
facet_vals : DataFrame
Facet values that correspond to the specified
variables.
"""
# To allow expressions in facet formula
def I(value):
return value
env = env.with_outer_namespace({'I': I})
facet_vals = pd.DataFrame(index=data.index)
for name in vars:
if name in data:
# This is a limited solution. If a keyword is
# part of an expression it will fail in the
# else statement below
res = data[name]
elif str.isidentifier(name):
# All other non-statements
continue
else:
# Statements
try:
res = env.eval(name, inner_namespace=data)
except NameError:
continue
facet_vals[name] = res
return facet_vals | [
"def",
"eval_facet_vars",
"(",
"data",
",",
"vars",
",",
"env",
")",
":",
"# To allow expressions in facet formula",
"def",
"I",
"(",
"value",
")",
":",
"return",
"value",
"env",
"=",
"env",
".",
"with_outer_namespace",
"(",
"{",
"'I'",
":",
"I",
"}",
")",
"facet_vals",
"=",
"pd",
".",
"DataFrame",
"(",
"index",
"=",
"data",
".",
"index",
")",
"for",
"name",
"in",
"vars",
":",
"if",
"name",
"in",
"data",
":",
"# This is a limited solution. If a keyword is",
"# part of an expression it will fail in the",
"# else statement below",
"res",
"=",
"data",
"[",
"name",
"]",
"elif",
"str",
".",
"isidentifier",
"(",
"name",
")",
":",
"# All other non-statements",
"continue",
"else",
":",
"# Statements",
"try",
":",
"res",
"=",
"env",
".",
"eval",
"(",
"name",
",",
"inner_namespace",
"=",
"data",
")",
"except",
"NameError",
":",
"continue",
"facet_vals",
"[",
"name",
"]",
"=",
"res",
"return",
"facet_vals"
] | Evaluate facet variables
Parameters
----------
data : DataFrame
Factet dataframe
vars : list
Facet variables
env : environment
Plot environment
Returns
-------
facet_vals : DataFrame
Facet values that correspond to the specified
variables. | [
"Evaluate",
"facet",
"variables"
] | python | train |
scopus-api/scopus | scopus/utils/get_encoded_text.py | https://github.com/scopus-api/scopus/blob/27ce02dd3095bfdab9d3e8475543d7c17767d1ab/scopus/utils/get_encoded_text.py#L15-L33 | def get_encoded_text(container, xpath):
"""Return text for element at xpath in the container xml if it is there.
Parameters
----------
container : xml.etree.ElementTree.Element
The element to be searched in.
xpath : str
The path to be looked for.
Returns
-------
result : str
"""
try:
return "".join(container.find(xpath, ns).itertext())
except AttributeError:
return None | [
"def",
"get_encoded_text",
"(",
"container",
",",
"xpath",
")",
":",
"try",
":",
"return",
"\"\"",
".",
"join",
"(",
"container",
".",
"find",
"(",
"xpath",
",",
"ns",
")",
".",
"itertext",
"(",
")",
")",
"except",
"AttributeError",
":",
"return",
"None"
] | Return text for element at xpath in the container xml if it is there.
Parameters
----------
container : xml.etree.ElementTree.Element
The element to be searched in.
xpath : str
The path to be looked for.
Returns
-------
result : str | [
"Return",
"text",
"for",
"element",
"at",
"xpath",
"in",
"the",
"container",
"xml",
"if",
"it",
"is",
"there",
"."
] | python | train |
jay-johnson/network-pipeline | network_pipeline/create_layer_2_socket.py | https://github.com/jay-johnson/network-pipeline/blob/4e53ae13fe12085e0cf2e5e1aff947368f4f1ffa/network_pipeline/create_layer_2_socket.py#L9-L22 | def create_layer_2_socket():
"""create_layer_2_socket"""
# create a socket for recording layer 2, 3 and 4 frames
s = None
try:
log.info("Creating l234 socket")
s = socket.socket(socket.AF_PACKET,
socket.SOCK_RAW,
socket.ntohs(0x0003))
except socket.error as msg:
log.error(("Socket could not be created ex={}")
.format(msg))
return s | [
"def",
"create_layer_2_socket",
"(",
")",
":",
"# create a socket for recording layer 2, 3 and 4 frames",
"s",
"=",
"None",
"try",
":",
"log",
".",
"info",
"(",
"\"Creating l234 socket\"",
")",
"s",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_PACKET",
",",
"socket",
".",
"SOCK_RAW",
",",
"socket",
".",
"ntohs",
"(",
"0x0003",
")",
")",
"except",
"socket",
".",
"error",
"as",
"msg",
":",
"log",
".",
"error",
"(",
"(",
"\"Socket could not be created ex={}\"",
")",
".",
"format",
"(",
"msg",
")",
")",
"return",
"s"
] | create_layer_2_socket | [
"create_layer_2_socket"
] | python | train |
peeringdb/django-peeringdb | django_peeringdb/client_adaptor/backend.py | https://github.com/peeringdb/django-peeringdb/blob/2a32aae8a7e1c11ab6e5a873bb19619c641098c8/django_peeringdb/client_adaptor/backend.py#L157-L171 | def detect_missing_relations(self, obj, exc):
"""
Parse error messages and collect the missing-relationship errors
as a dict of Resource -> {id set}
"""
missing = defaultdict(set)
for name, err in exc.error_dict.items():
# check if it was a relationship that doesnt exist locally
pattern = r".+ with id (\d+) does not exist.+"
m = re.match(pattern, str(err))
if m:
field = obj._meta.get_field(name)
res = self.get_resource(field.related_model)
missing[res].add(int(m.group(1)))
return missing | [
"def",
"detect_missing_relations",
"(",
"self",
",",
"obj",
",",
"exc",
")",
":",
"missing",
"=",
"defaultdict",
"(",
"set",
")",
"for",
"name",
",",
"err",
"in",
"exc",
".",
"error_dict",
".",
"items",
"(",
")",
":",
"# check if it was a relationship that doesnt exist locally",
"pattern",
"=",
"r\".+ with id (\\d+) does not exist.+\"",
"m",
"=",
"re",
".",
"match",
"(",
"pattern",
",",
"str",
"(",
"err",
")",
")",
"if",
"m",
":",
"field",
"=",
"obj",
".",
"_meta",
".",
"get_field",
"(",
"name",
")",
"res",
"=",
"self",
".",
"get_resource",
"(",
"field",
".",
"related_model",
")",
"missing",
"[",
"res",
"]",
".",
"add",
"(",
"int",
"(",
"m",
".",
"group",
"(",
"1",
")",
")",
")",
"return",
"missing"
] | Parse error messages and collect the missing-relationship errors
as a dict of Resource -> {id set} | [
"Parse",
"error",
"messages",
"and",
"collect",
"the",
"missing",
"-",
"relationship",
"errors",
"as",
"a",
"dict",
"of",
"Resource",
"-",
">",
"{",
"id",
"set",
"}"
] | python | train |
Alignak-monitoring/alignak | alignak/util.py | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/util.py#L1145-L1162 | def filter_service_by_servicegroup_name(group):
"""Filter for service
Filter on group
:param group: group to filter
:type group: str
:return: Filter
:rtype: bool
"""
def inner_filter(items):
"""Inner filter for service. Accept if group in service.servicegroups"""
service = items["service"]
if service is None:
return False
return group in [items["servicegroups"][g].servicegroup_name for g in service.servicegroups]
return inner_filter | [
"def",
"filter_service_by_servicegroup_name",
"(",
"group",
")",
":",
"def",
"inner_filter",
"(",
"items",
")",
":",
"\"\"\"Inner filter for service. Accept if group in service.servicegroups\"\"\"",
"service",
"=",
"items",
"[",
"\"service\"",
"]",
"if",
"service",
"is",
"None",
":",
"return",
"False",
"return",
"group",
"in",
"[",
"items",
"[",
"\"servicegroups\"",
"]",
"[",
"g",
"]",
".",
"servicegroup_name",
"for",
"g",
"in",
"service",
".",
"servicegroups",
"]",
"return",
"inner_filter"
] | Filter for service
Filter on group
:param group: group to filter
:type group: str
:return: Filter
:rtype: bool | [
"Filter",
"for",
"service",
"Filter",
"on",
"group"
] | python | train |
xolox/python-qpass | qpass/__init__.py | https://github.com/xolox/python-qpass/blob/43ce447b0904ff42a54b8f1dd4d2479f950f258f/qpass/__init__.py#L219-L224 | def entries(self):
"""A list of :class:`PasswordEntry` objects."""
passwords = []
for store in self.stores:
passwords.extend(store.entries)
return natsort(passwords, key=lambda e: e.name) | [
"def",
"entries",
"(",
"self",
")",
":",
"passwords",
"=",
"[",
"]",
"for",
"store",
"in",
"self",
".",
"stores",
":",
"passwords",
".",
"extend",
"(",
"store",
".",
"entries",
")",
"return",
"natsort",
"(",
"passwords",
",",
"key",
"=",
"lambda",
"e",
":",
"e",
".",
"name",
")"
] | A list of :class:`PasswordEntry` objects. | [
"A",
"list",
"of",
":",
"class",
":",
"PasswordEntry",
"objects",
"."
] | python | train |
nitely/kua | kua/routes.py | https://github.com/nitely/kua/blob/6ffc9d0426e87a34cf8c3f8e7aedac6d35e59cb6/kua/routes.py#L213-L231 | def _deconstruct_url(self, url: str) -> List[str]:
"""
Split a regular URL into parts
:param url: A normalized URL
:return: Parts of the URL
:raises kua.routes.RouteError: \
If the depth of the URL exceeds\
the max depth of the deepest\
registered pattern
:private:
"""
parts = url.split('/', self._max_depth + 1)
if depth_of(parts) > self._max_depth:
raise RouteError('No match')
return parts | [
"def",
"_deconstruct_url",
"(",
"self",
",",
"url",
":",
"str",
")",
"->",
"List",
"[",
"str",
"]",
":",
"parts",
"=",
"url",
".",
"split",
"(",
"'/'",
",",
"self",
".",
"_max_depth",
"+",
"1",
")",
"if",
"depth_of",
"(",
"parts",
")",
">",
"self",
".",
"_max_depth",
":",
"raise",
"RouteError",
"(",
"'No match'",
")",
"return",
"parts"
] | Split a regular URL into parts
:param url: A normalized URL
:return: Parts of the URL
:raises kua.routes.RouteError: \
If the depth of the URL exceeds\
the max depth of the deepest\
registered pattern
:private: | [
"Split",
"a",
"regular",
"URL",
"into",
"parts"
] | python | train |
milesrichardson/ParsePy | parse_rest/datatypes.py | https://github.com/milesrichardson/ParsePy/blob/7c52d8a5dc63bb7c3b0b8c0c09d032b4bc7299ea/parse_rest/datatypes.py#L598-L610 | def increment(self, key, amount=1):
"""
Increment one value in the object. Note that this happens immediately:
it does not wait for save() to be called
"""
payload = {
key: {
'__op': 'Increment',
'amount': amount
}
}
self.__class__.PUT(self._absolute_url, **payload)
self.__dict__[key] += amount | [
"def",
"increment",
"(",
"self",
",",
"key",
",",
"amount",
"=",
"1",
")",
":",
"payload",
"=",
"{",
"key",
":",
"{",
"'__op'",
":",
"'Increment'",
",",
"'amount'",
":",
"amount",
"}",
"}",
"self",
".",
"__class__",
".",
"PUT",
"(",
"self",
".",
"_absolute_url",
",",
"*",
"*",
"payload",
")",
"self",
".",
"__dict__",
"[",
"key",
"]",
"+=",
"amount"
] | Increment one value in the object. Note that this happens immediately:
it does not wait for save() to be called | [
"Increment",
"one",
"value",
"in",
"the",
"object",
".",
"Note",
"that",
"this",
"happens",
"immediately",
":",
"it",
"does",
"not",
"wait",
"for",
"save",
"()",
"to",
"be",
"called"
] | python | train |
tonybaloney/wily | wily/__main__.py | https://github.com/tonybaloney/wily/blob/bae259354a91b57d56603f0ca7403186f086a84c/wily/__main__.py#L299-L313 | def clean(ctx, yes):
"""Clear the .wily/ folder."""
config = ctx.obj["CONFIG"]
if not exists(config):
handle_no_cache(ctx)
if not yes:
p = input("Are you sure you want to delete wily cache? [y/N]")
if p.lower() != "y":
exit(0)
from wily.cache import clean
clean(config) | [
"def",
"clean",
"(",
"ctx",
",",
"yes",
")",
":",
"config",
"=",
"ctx",
".",
"obj",
"[",
"\"CONFIG\"",
"]",
"if",
"not",
"exists",
"(",
"config",
")",
":",
"handle_no_cache",
"(",
"ctx",
")",
"if",
"not",
"yes",
":",
"p",
"=",
"input",
"(",
"\"Are you sure you want to delete wily cache? [y/N]\"",
")",
"if",
"p",
".",
"lower",
"(",
")",
"!=",
"\"y\"",
":",
"exit",
"(",
"0",
")",
"from",
"wily",
".",
"cache",
"import",
"clean",
"clean",
"(",
"config",
")"
] | Clear the .wily/ folder. | [
"Clear",
"the",
".",
"wily",
"/",
"folder",
"."
] | python | train |
davisd50/sparc.db | sparc/db/splunk/search.py | https://github.com/davisd50/sparc.db/blob/12dfcb51f7bdc4fbe9c8ec3b5af65059c1f66392/sparc/db/splunk/search.py#L25-L37 | def saved_searches_factory_helper(splunk_connection_info):
"""Return a valid splunklib.client.SavedSearches object
kwargs:
- see splunklib.client.connect()
"""
if not ISplunkConnectionInfo.providedBy(splunk_connection_info):
DoesNotImplement('argument did not provide expected interface')
service = connect(**splunk_connection_info)
saved_searches = service.saved_searches
for s in saved_searches:
logger.debug("Found Splunk saved search with name %s" % s.name)
return saved_searches | [
"def",
"saved_searches_factory_helper",
"(",
"splunk_connection_info",
")",
":",
"if",
"not",
"ISplunkConnectionInfo",
".",
"providedBy",
"(",
"splunk_connection_info",
")",
":",
"DoesNotImplement",
"(",
"'argument did not provide expected interface'",
")",
"service",
"=",
"connect",
"(",
"*",
"*",
"splunk_connection_info",
")",
"saved_searches",
"=",
"service",
".",
"saved_searches",
"for",
"s",
"in",
"saved_searches",
":",
"logger",
".",
"debug",
"(",
"\"Found Splunk saved search with name %s\"",
"%",
"s",
".",
"name",
")",
"return",
"saved_searches"
] | Return a valid splunklib.client.SavedSearches object
kwargs:
- see splunklib.client.connect() | [
"Return",
"a",
"valid",
"splunklib",
".",
"client",
".",
"SavedSearches",
"object",
"kwargs",
":",
"-",
"see",
"splunklib",
".",
"client",
".",
"connect",
"()"
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.