repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
mbakker7/timml | timml/equation.py | https://github.com/mbakker7/timml/blob/91e99ad573cb8a9ad8ac1fa041c3ca44520c2390/timml/equation.py#L250-L273 | def equation(self):
"""Mix-in class that returns matrix rows for difference in head between inside and
outside equals zeros
Returns matrix part (nunknowns,neq)
Returns rhs part nunknowns
"""
mat = np.empty((self.nunknowns, self.model.neq))
rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero
for icp in range(self.ncp):
istart = icp * self.nlayers
ieq = 0
for e in self.model.elementlist:
if e.nunknowns > 0:
qxin, qyin = e.disvecinflayers(self.xcin[icp], self.ycin[icp], self.layers, aq=self.aqin)
qxout, qyout = e.disvecinflayers(self.xcout[icp], self.ycout[icp], self.layers, aq=self.aqout)
mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \
(qxin - qxout) * self.cosnorm[icp] + (qyin - qyout) * self.sinnorm[icp]
ieq += e.nunknowns
else:
qxin, qyin = e.disveclayers(self.xcin[icp], self.ycin[icp], self.layers, aq=self.aqin)
qxout, qyout = e.disveclayers(self.xcout[icp], self.ycout[icp], self.layers, aq=self.aqout)
rhs[istart:istart + self.nlayers] -= (qxin - qxout) * self.cosnorm[icp] + (qyin - qyout) * \
self.sinnorm[icp]
return mat, rhs | [
"def",
"equation",
"(",
"self",
")",
":",
"mat",
"=",
"np",
".",
"empty",
"(",
"(",
"self",
".",
"nunknowns",
",",
"self",
".",
"model",
".",
"neq",
")",
")",
"rhs",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"nunknowns",
")",
"# Needs to be initialized to zero",
"for",
"icp",
"in",
"range",
"(",
"self",
".",
"ncp",
")",
":",
"istart",
"=",
"icp",
"*",
"self",
".",
"nlayers",
"ieq",
"=",
"0",
"for",
"e",
"in",
"self",
".",
"model",
".",
"elementlist",
":",
"if",
"e",
".",
"nunknowns",
">",
"0",
":",
"qxin",
",",
"qyin",
"=",
"e",
".",
"disvecinflayers",
"(",
"self",
".",
"xcin",
"[",
"icp",
"]",
",",
"self",
".",
"ycin",
"[",
"icp",
"]",
",",
"self",
".",
"layers",
",",
"aq",
"=",
"self",
".",
"aqin",
")",
"qxout",
",",
"qyout",
"=",
"e",
".",
"disvecinflayers",
"(",
"self",
".",
"xcout",
"[",
"icp",
"]",
",",
"self",
".",
"ycout",
"[",
"icp",
"]",
",",
"self",
".",
"layers",
",",
"aq",
"=",
"self",
".",
"aqout",
")",
"mat",
"[",
"istart",
":",
"istart",
"+",
"self",
".",
"nlayers",
",",
"ieq",
":",
"ieq",
"+",
"e",
".",
"nunknowns",
"]",
"=",
"(",
"qxin",
"-",
"qxout",
")",
"*",
"self",
".",
"cosnorm",
"[",
"icp",
"]",
"+",
"(",
"qyin",
"-",
"qyout",
")",
"*",
"self",
".",
"sinnorm",
"[",
"icp",
"]",
"ieq",
"+=",
"e",
".",
"nunknowns",
"else",
":",
"qxin",
",",
"qyin",
"=",
"e",
".",
"disveclayers",
"(",
"self",
".",
"xcin",
"[",
"icp",
"]",
",",
"self",
".",
"ycin",
"[",
"icp",
"]",
",",
"self",
".",
"layers",
",",
"aq",
"=",
"self",
".",
"aqin",
")",
"qxout",
",",
"qyout",
"=",
"e",
".",
"disveclayers",
"(",
"self",
".",
"xcout",
"[",
"icp",
"]",
",",
"self",
".",
"ycout",
"[",
"icp",
"]",
",",
"self",
".",
"layers",
",",
"aq",
"=",
"self",
".",
"aqout",
")",
"rhs",
"[",
"istart",
":",
"istart",
"+",
"self",
".",
"nlayers",
"]",
"-=",
"(",
"qxin",
"-",
"qxout",
")",
"*",
"self",
".",
"cosnorm",
"[",
"icp",
"]",
"+",
"(",
"qyin",
"-",
"qyout",
")",
"*",
"self",
".",
"sinnorm",
"[",
"icp",
"]",
"return",
"mat",
",",
"rhs"
] | Mix-in class that returns matrix rows for difference in head between inside and
outside equals zeros
Returns matrix part (nunknowns,neq)
Returns rhs part nunknowns | [
"Mix",
"-",
"in",
"class",
"that",
"returns",
"matrix",
"rows",
"for",
"difference",
"in",
"head",
"between",
"inside",
"and",
"outside",
"equals",
"zeros",
"Returns",
"matrix",
"part",
"(",
"nunknowns",
"neq",
")",
"Returns",
"rhs",
"part",
"nunknowns"
] | python | train |
spdx/tools-python | spdx/parsers/tagvalue.py | https://github.com/spdx/tools-python/blob/301d72f6ae57c832c1da7f6402fa49b192de6810/spdx/parsers/tagvalue.py#L245-L250 | def p_extr_lic_name_value_1(self, p):
"""extr_lic_name_value : LINE"""
if six.PY2:
p[0] = p[1].decode(encoding='utf-8')
else:
p[0] = p[1] | [
"def",
"p_extr_lic_name_value_1",
"(",
"self",
",",
"p",
")",
":",
"if",
"six",
".",
"PY2",
":",
"p",
"[",
"0",
"]",
"=",
"p",
"[",
"1",
"]",
".",
"decode",
"(",
"encoding",
"=",
"'utf-8'",
")",
"else",
":",
"p",
"[",
"0",
"]",
"=",
"p",
"[",
"1",
"]"
] | extr_lic_name_value : LINE | [
"extr_lic_name_value",
":",
"LINE"
] | python | valid |
pauleveritt/kaybee | kaybee/plugins/articles/image_type.py | https://github.com/pauleveritt/kaybee/blob/a00a718aaaa23b2d12db30dfacb6b2b6ec84459c/kaybee/plugins/articles/image_type.py#L37-L61 | def env_updated(self,
kb_app,
sphinx_app: Sphinx,
sphinx_env: BuildEnvironment,
resource
):
""" Make images and enter them in Sphinx's output writer """
docname = resource.docname
srcdir = sphinx_app.env.srcdir
source_imgpath = self.source_filename(docname, srcdir)
# Copy the image to the Sphinx build directory
build_dir = sphinx_app.outdir
docpath = Path(docname)
parent = docpath.parent
target_imgpath = str(Path(build_dir, parent, self.filename))
# Does the target dir exist yet in the build dir? Probably not. If
# not, make it
target_dir = Path(build_dir, parent)
if not target_dir.exists():
target_dir.mkdir(parents=True, exist_ok=True)
shutil.copy(source_imgpath, target_imgpath) | [
"def",
"env_updated",
"(",
"self",
",",
"kb_app",
",",
"sphinx_app",
":",
"Sphinx",
",",
"sphinx_env",
":",
"BuildEnvironment",
",",
"resource",
")",
":",
"docname",
"=",
"resource",
".",
"docname",
"srcdir",
"=",
"sphinx_app",
".",
"env",
".",
"srcdir",
"source_imgpath",
"=",
"self",
".",
"source_filename",
"(",
"docname",
",",
"srcdir",
")",
"# Copy the image to the Sphinx build directory",
"build_dir",
"=",
"sphinx_app",
".",
"outdir",
"docpath",
"=",
"Path",
"(",
"docname",
")",
"parent",
"=",
"docpath",
".",
"parent",
"target_imgpath",
"=",
"str",
"(",
"Path",
"(",
"build_dir",
",",
"parent",
",",
"self",
".",
"filename",
")",
")",
"# Does the target dir exist yet in the build dir? Probably not. If",
"# not, make it",
"target_dir",
"=",
"Path",
"(",
"build_dir",
",",
"parent",
")",
"if",
"not",
"target_dir",
".",
"exists",
"(",
")",
":",
"target_dir",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"exist_ok",
"=",
"True",
")",
"shutil",
".",
"copy",
"(",
"source_imgpath",
",",
"target_imgpath",
")"
] | Make images and enter them in Sphinx's output writer | [
"Make",
"images",
"and",
"enter",
"them",
"in",
"Sphinx",
"s",
"output",
"writer"
] | python | train |
jobovy/galpy | galpy/actionAngle/actionAngleAxi.py | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/actionAngle/actionAngleAxi.py#L108-L142 | def TR(self,**kwargs): #pragma: no cover
"""
NAME:
TR
PURPOSE:
Calculate the radial period for a power-law rotation curve
INPUT:
scipy.integrate.quadrature keywords
OUTPUT:
T_R(R,vT,vT)*vc/ro + estimate of the error
HISTORY:
2010-12-01 - Written - Bovy (NYU)
"""
if hasattr(self,'_TR'):
return self._TR
(rperi,rap)= self.calcRapRperi(**kwargs)
if nu.fabs(rap-rperi)/rap < 10.**-4.: #Rough limit
self._TR= 2.*m.pi/epifreq(self._pot,self._R,use_physical=False)
return self._TR
Rmean= m.exp((m.log(rperi)+m.log(rap))/2.)
EL= self.calcEL(**kwargs)
E, L= EL
TR= 0.
if Rmean > rperi:
TR+= integrate.quadrature(_TRAxiIntegrandSmall,
0.,m.sqrt(Rmean-rperi),
args=(E,L,self._pot,rperi),
**kwargs)[0]
if Rmean < rap:
TR+= integrate.quadrature(_TRAxiIntegrandLarge,
0.,m.sqrt(rap-Rmean),
args=(E,L,self._pot,rap),
**kwargs)[0]
self._TR= 2.*TR
return self._TR | [
"def",
"TR",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"#pragma: no cover",
"if",
"hasattr",
"(",
"self",
",",
"'_TR'",
")",
":",
"return",
"self",
".",
"_TR",
"(",
"rperi",
",",
"rap",
")",
"=",
"self",
".",
"calcRapRperi",
"(",
"*",
"*",
"kwargs",
")",
"if",
"nu",
".",
"fabs",
"(",
"rap",
"-",
"rperi",
")",
"/",
"rap",
"<",
"10.",
"**",
"-",
"4.",
":",
"#Rough limit",
"self",
".",
"_TR",
"=",
"2.",
"*",
"m",
".",
"pi",
"/",
"epifreq",
"(",
"self",
".",
"_pot",
",",
"self",
".",
"_R",
",",
"use_physical",
"=",
"False",
")",
"return",
"self",
".",
"_TR",
"Rmean",
"=",
"m",
".",
"exp",
"(",
"(",
"m",
".",
"log",
"(",
"rperi",
")",
"+",
"m",
".",
"log",
"(",
"rap",
")",
")",
"/",
"2.",
")",
"EL",
"=",
"self",
".",
"calcEL",
"(",
"*",
"*",
"kwargs",
")",
"E",
",",
"L",
"=",
"EL",
"TR",
"=",
"0.",
"if",
"Rmean",
">",
"rperi",
":",
"TR",
"+=",
"integrate",
".",
"quadrature",
"(",
"_TRAxiIntegrandSmall",
",",
"0.",
",",
"m",
".",
"sqrt",
"(",
"Rmean",
"-",
"rperi",
")",
",",
"args",
"=",
"(",
"E",
",",
"L",
",",
"self",
".",
"_pot",
",",
"rperi",
")",
",",
"*",
"*",
"kwargs",
")",
"[",
"0",
"]",
"if",
"Rmean",
"<",
"rap",
":",
"TR",
"+=",
"integrate",
".",
"quadrature",
"(",
"_TRAxiIntegrandLarge",
",",
"0.",
",",
"m",
".",
"sqrt",
"(",
"rap",
"-",
"Rmean",
")",
",",
"args",
"=",
"(",
"E",
",",
"L",
",",
"self",
".",
"_pot",
",",
"rap",
")",
",",
"*",
"*",
"kwargs",
")",
"[",
"0",
"]",
"self",
".",
"_TR",
"=",
"2.",
"*",
"TR",
"return",
"self",
".",
"_TR"
] | NAME:
TR
PURPOSE:
Calculate the radial period for a power-law rotation curve
INPUT:
scipy.integrate.quadrature keywords
OUTPUT:
T_R(R,vT,vT)*vc/ro + estimate of the error
HISTORY:
2010-12-01 - Written - Bovy (NYU) | [
"NAME",
":",
"TR",
"PURPOSE",
":",
"Calculate",
"the",
"radial",
"period",
"for",
"a",
"power",
"-",
"law",
"rotation",
"curve",
"INPUT",
":",
"scipy",
".",
"integrate",
".",
"quadrature",
"keywords",
"OUTPUT",
":",
"T_R",
"(",
"R",
"vT",
"vT",
")",
"*",
"vc",
"/",
"ro",
"+",
"estimate",
"of",
"the",
"error",
"HISTORY",
":",
"2010",
"-",
"12",
"-",
"01",
"-",
"Written",
"-",
"Bovy",
"(",
"NYU",
")"
] | python | train |
mitsei/dlkit | dlkit/json_/grading/sessions.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/grading/sessions.py#L5012-L5032 | def get_parent_gradebooks(self, gradebook_id):
"""Gets the parents of the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of a gradebook
return: (osid.grading.GradebookList) - the parents of the
gradebook
raise: NotFound - ``gradebook_id`` is not found
raise: NullArgument - ``gradebook_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_parent_bins
if self._catalog_session is not None:
return self._catalog_session.get_parent_catalogs(catalog_id=gradebook_id)
return GradebookLookupSession(
self._proxy,
self._runtime).get_gradebooks_by_ids(
list(self.get_parent_gradebook_ids(gradebook_id))) | [
"def",
"get_parent_gradebooks",
"(",
"self",
",",
"gradebook_id",
")",
":",
"# Implemented from template for",
"# osid.resource.BinHierarchySession.get_parent_bins",
"if",
"self",
".",
"_catalog_session",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_catalog_session",
".",
"get_parent_catalogs",
"(",
"catalog_id",
"=",
"gradebook_id",
")",
"return",
"GradebookLookupSession",
"(",
"self",
".",
"_proxy",
",",
"self",
".",
"_runtime",
")",
".",
"get_gradebooks_by_ids",
"(",
"list",
"(",
"self",
".",
"get_parent_gradebook_ids",
"(",
"gradebook_id",
")",
")",
")"
] | Gets the parents of the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of a gradebook
return: (osid.grading.GradebookList) - the parents of the
gradebook
raise: NotFound - ``gradebook_id`` is not found
raise: NullArgument - ``gradebook_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | [
"Gets",
"the",
"parents",
"of",
"the",
"given",
"gradebook",
"."
] | python | train |
nicferrier/md | src/mdlib/cmdln.py | https://github.com/nicferrier/md/blob/302ca8882dae060fb15bd5ae470d8e661fb67ec4/src/mdlib/cmdln.py#L1123-L1215 | def _dispatch_cmd(self, handler, argv):
"""Introspect sub-command handler signature to determine how to
dispatch the command. The raw handler provided by the base
'RawCmdln' class is still supported:
def do_foo(self, argv):
# 'argv' is the vector of command line args, argv[0] is
# the command name itself (i.e. "foo" or an alias)
pass
In addition, if the handler has more than 2 arguments option
processing is automatically done (using optparse):
@cmdln.option('-v', '--verbose', action='store_true')
def do_bar(self, subcmd, opts, *args):
# subcmd = <"bar" or an alias>
# opts = <an optparse.Values instance>
if opts.verbose:
print "lots of debugging output..."
# args = <tuple of arguments>
for arg in args:
bar(arg)
TODO: explain that "*args" can be other signatures as well.
The `cmdln.option` decorator corresponds to an `add_option()`
method call on an `optparse.OptionParser` instance.
You can declare a specific number of arguments:
@cmdln.option('-v', '--verbose', action='store_true')
def do_bar2(self, subcmd, opts, bar_one, bar_two):
#...
and an appropriate error message will be raised/printed if the
command is called with a different number of args.
"""
co_argcount = handler.__func__.__code__.co_argcount
if co_argcount == 2: # handler ::= do_foo(self, argv)
return handler(argv)
elif co_argcount >= 3: # handler ::= do_foo(self, subcmd, opts, ...)
try:
optparser = handler.optparser
except AttributeError:
optparser = handler.__func__.optparser = SubCmdOptionParser()
assert isinstance(optparser, SubCmdOptionParser)
# apply subcommand options' defaults from config files, if any.
subcmd = handler.__name__.split('do_', 1)[1]
optparser.set_defaults(**self.get_option_defaults(subcmd))
optparser.set_cmdln_info(self, argv[0])
try:
opts, args = optparser.parse_args(argv[1:])
except StopOptionProcessing:
#TODO: this doesn't really fly for a replacement of
# optparse.py behaviour, does it?
return 0 # Normal command termination
try:
return handler(argv[0], opts, *args)
except TypeError:
_, ex, _ = sys.exc_info()
# Some TypeError's are user errors:
# do_foo() takes at least 4 arguments (3 given)
# do_foo() takes at most 5 arguments (6 given)
# do_foo() takes exactly 5 arguments (6 given)
# do_foo() takes exactly 5 positional arguments (6 given)
# Raise CmdlnUserError for these with a suitably
# massaged error message.
tb = sys.exc_info()[2] # the traceback object
if tb.tb_next is not None:
# If the traceback is more than one level deep, then the
# TypeError do *not* happen on the "handler(...)" call
# above. In that we don't want to handle it specially
# here: it would falsely mask deeper code errors.
raise
msg = ex.args[0]
match = _INCORRECT_NUM_ARGS_RE.search(msg)
if match:
msg = list(match.groups())
msg[1] = int(msg[1]) - 3
if msg[1] == 1:
msg[2] = msg[2].replace("arguments", "argument")
msg[3] = int(msg[3]) - 3
msg = ''.join(map(str, msg))
raise CmdlnUserError(msg)
else:
raise
else:
raise CmdlnError("incorrect argcount for %s(): takes %d, must "
"take 2 for 'argv' signature or 3+ for 'opts' "
"signature" % (handler.__name__, co_argcount)) | [
"def",
"_dispatch_cmd",
"(",
"self",
",",
"handler",
",",
"argv",
")",
":",
"co_argcount",
"=",
"handler",
".",
"__func__",
".",
"__code__",
".",
"co_argcount",
"if",
"co_argcount",
"==",
"2",
":",
"# handler ::= do_foo(self, argv)",
"return",
"handler",
"(",
"argv",
")",
"elif",
"co_argcount",
">=",
"3",
":",
"# handler ::= do_foo(self, subcmd, opts, ...)",
"try",
":",
"optparser",
"=",
"handler",
".",
"optparser",
"except",
"AttributeError",
":",
"optparser",
"=",
"handler",
".",
"__func__",
".",
"optparser",
"=",
"SubCmdOptionParser",
"(",
")",
"assert",
"isinstance",
"(",
"optparser",
",",
"SubCmdOptionParser",
")",
"# apply subcommand options' defaults from config files, if any.",
"subcmd",
"=",
"handler",
".",
"__name__",
".",
"split",
"(",
"'do_'",
",",
"1",
")",
"[",
"1",
"]",
"optparser",
".",
"set_defaults",
"(",
"*",
"*",
"self",
".",
"get_option_defaults",
"(",
"subcmd",
")",
")",
"optparser",
".",
"set_cmdln_info",
"(",
"self",
",",
"argv",
"[",
"0",
"]",
")",
"try",
":",
"opts",
",",
"args",
"=",
"optparser",
".",
"parse_args",
"(",
"argv",
"[",
"1",
":",
"]",
")",
"except",
"StopOptionProcessing",
":",
"#TODO: this doesn't really fly for a replacement of",
"# optparse.py behaviour, does it?",
"return",
"0",
"# Normal command termination",
"try",
":",
"return",
"handler",
"(",
"argv",
"[",
"0",
"]",
",",
"opts",
",",
"*",
"args",
")",
"except",
"TypeError",
":",
"_",
",",
"ex",
",",
"_",
"=",
"sys",
".",
"exc_info",
"(",
")",
"# Some TypeError's are user errors:",
"# do_foo() takes at least 4 arguments (3 given)",
"# do_foo() takes at most 5 arguments (6 given)",
"# do_foo() takes exactly 5 arguments (6 given)",
"# do_foo() takes exactly 5 positional arguments (6 given)",
"# Raise CmdlnUserError for these with a suitably",
"# massaged error message.",
"tb",
"=",
"sys",
".",
"exc_info",
"(",
")",
"[",
"2",
"]",
"# the traceback object",
"if",
"tb",
".",
"tb_next",
"is",
"not",
"None",
":",
"# If the traceback is more than one level deep, then the",
"# TypeError do *not* happen on the \"handler(...)\" call",
"# above. In that we don't want to handle it specially",
"# here: it would falsely mask deeper code errors.",
"raise",
"msg",
"=",
"ex",
".",
"args",
"[",
"0",
"]",
"match",
"=",
"_INCORRECT_NUM_ARGS_RE",
".",
"search",
"(",
"msg",
")",
"if",
"match",
":",
"msg",
"=",
"list",
"(",
"match",
".",
"groups",
"(",
")",
")",
"msg",
"[",
"1",
"]",
"=",
"int",
"(",
"msg",
"[",
"1",
"]",
")",
"-",
"3",
"if",
"msg",
"[",
"1",
"]",
"==",
"1",
":",
"msg",
"[",
"2",
"]",
"=",
"msg",
"[",
"2",
"]",
".",
"replace",
"(",
"\"arguments\"",
",",
"\"argument\"",
")",
"msg",
"[",
"3",
"]",
"=",
"int",
"(",
"msg",
"[",
"3",
"]",
")",
"-",
"3",
"msg",
"=",
"''",
".",
"join",
"(",
"map",
"(",
"str",
",",
"msg",
")",
")",
"raise",
"CmdlnUserError",
"(",
"msg",
")",
"else",
":",
"raise",
"else",
":",
"raise",
"CmdlnError",
"(",
"\"incorrect argcount for %s(): takes %d, must \"",
"\"take 2 for 'argv' signature or 3+ for 'opts' \"",
"\"signature\"",
"%",
"(",
"handler",
".",
"__name__",
",",
"co_argcount",
")",
")"
] | Introspect sub-command handler signature to determine how to
dispatch the command. The raw handler provided by the base
'RawCmdln' class is still supported:
def do_foo(self, argv):
# 'argv' is the vector of command line args, argv[0] is
# the command name itself (i.e. "foo" or an alias)
pass
In addition, if the handler has more than 2 arguments option
processing is automatically done (using optparse):
@cmdln.option('-v', '--verbose', action='store_true')
def do_bar(self, subcmd, opts, *args):
# subcmd = <"bar" or an alias>
# opts = <an optparse.Values instance>
if opts.verbose:
print "lots of debugging output..."
# args = <tuple of arguments>
for arg in args:
bar(arg)
TODO: explain that "*args" can be other signatures as well.
The `cmdln.option` decorator corresponds to an `add_option()`
method call on an `optparse.OptionParser` instance.
You can declare a specific number of arguments:
@cmdln.option('-v', '--verbose', action='store_true')
def do_bar2(self, subcmd, opts, bar_one, bar_two):
#...
and an appropriate error message will be raised/printed if the
command is called with a different number of args. | [
"Introspect",
"sub",
"-",
"command",
"handler",
"signature",
"to",
"determine",
"how",
"to",
"dispatch",
"the",
"command",
".",
"The",
"raw",
"handler",
"provided",
"by",
"the",
"base",
"RawCmdln",
"class",
"is",
"still",
"supported",
":"
] | python | train |
fastai/fastai | fastai/callbacks/tensorboard.py | https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callbacks/tensorboard.py#L294-L297 | def _add_gradient_scalar(self, name:str, scalar_value)->None:
"Writes a single scalar value for a gradient statistic to Tensorboard."
tag = self.name + '/gradients/' + name
self.tbwriter.add_scalar(tag=tag, scalar_value=scalar_value, global_step=self.iteration) | [
"def",
"_add_gradient_scalar",
"(",
"self",
",",
"name",
":",
"str",
",",
"scalar_value",
")",
"->",
"None",
":",
"tag",
"=",
"self",
".",
"name",
"+",
"'/gradients/'",
"+",
"name",
"self",
".",
"tbwriter",
".",
"add_scalar",
"(",
"tag",
"=",
"tag",
",",
"scalar_value",
"=",
"scalar_value",
",",
"global_step",
"=",
"self",
".",
"iteration",
")"
] | Writes a single scalar value for a gradient statistic to Tensorboard. | [
"Writes",
"a",
"single",
"scalar",
"value",
"for",
"a",
"gradient",
"statistic",
"to",
"Tensorboard",
"."
] | python | train |
GoogleCloudPlatform/datastore-ndb-python | ndb/eventloop.py | https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/eventloop.py#L182-L194 | def add_idle(self, callback, *args, **kwds):
"""Add an idle callback.
An idle callback can return True, False or None. These mean:
- None: remove the callback (don't reschedule)
- False: the callback did no work; reschedule later
- True: the callback did some work; reschedule soon
If the callback raises an exception, the traceback is logged and
the callback is removed.
"""
self.idlers.append((callback, args, kwds)) | [
"def",
"add_idle",
"(",
"self",
",",
"callback",
",",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"self",
".",
"idlers",
".",
"append",
"(",
"(",
"callback",
",",
"args",
",",
"kwds",
")",
")"
] | Add an idle callback.
An idle callback can return True, False or None. These mean:
- None: remove the callback (don't reschedule)
- False: the callback did no work; reschedule later
- True: the callback did some work; reschedule soon
If the callback raises an exception, the traceback is logged and
the callback is removed. | [
"Add",
"an",
"idle",
"callback",
"."
] | python | train |
XRDX/pyleap | pyleap/collision.py | https://github.com/XRDX/pyleap/blob/234c722cfbe66814254ab0d8f67d16b0b774f4d5/pyleap/collision.py#L66-L88 | def line_cross(x1, y1, x2, y2, x3, y3, x4, y4):
""" 判断两条线段是否交叉 """
# out of the rect
if min(x1, x2) > max(x3, x4) or max(x1, x2) < min(x3, x4) or \
min(y1, y2) > max(y3, y4) or max(y1, y2) < min(y3, y4):
return False
# same slope rate
if ((y1 - y2) * (x3 - x4) == (x1 - x2) * (y3 - y4)):
return False
if cross_product(x3, y3, x2, y2, x4, y4) * cross_product(x3, y3, x4, y4, x1, y1) < 0 or \
cross_product(x1, y1, x4, y4, x2, y2) * cross_product(x1, y1, x2, y2, x3, y3) < 0:
return False
# get collide point
b1 = (y2 - y1) * x1 + (x1 - x2) * y1
b2 = (y4 - y3) * x3 + (x3 - x4) * y3
D = (x2 - x1) * (y4 - y3) - (x4 - x3) * (y2 - y1)
D1 = b2 * (x2 - x1) - b1 * (x4 - x3)
D2 = b2 * (y2 - y1) - b1 * (y4 - y3)
return P(D1 / D, D2 / D) | [
"def",
"line_cross",
"(",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
",",
"x3",
",",
"y3",
",",
"x4",
",",
"y4",
")",
":",
"# out of the rect",
"if",
"min",
"(",
"x1",
",",
"x2",
")",
">",
"max",
"(",
"x3",
",",
"x4",
")",
"or",
"max",
"(",
"x1",
",",
"x2",
")",
"<",
"min",
"(",
"x3",
",",
"x4",
")",
"or",
"min",
"(",
"y1",
",",
"y2",
")",
">",
"max",
"(",
"y3",
",",
"y4",
")",
"or",
"max",
"(",
"y1",
",",
"y2",
")",
"<",
"min",
"(",
"y3",
",",
"y4",
")",
":",
"return",
"False",
"# same slope rate",
"if",
"(",
"(",
"y1",
"-",
"y2",
")",
"*",
"(",
"x3",
"-",
"x4",
")",
"==",
"(",
"x1",
"-",
"x2",
")",
"*",
"(",
"y3",
"-",
"y4",
")",
")",
":",
"return",
"False",
"if",
"cross_product",
"(",
"x3",
",",
"y3",
",",
"x2",
",",
"y2",
",",
"x4",
",",
"y4",
")",
"*",
"cross_product",
"(",
"x3",
",",
"y3",
",",
"x4",
",",
"y4",
",",
"x1",
",",
"y1",
")",
"<",
"0",
"or",
"cross_product",
"(",
"x1",
",",
"y1",
",",
"x4",
",",
"y4",
",",
"x2",
",",
"y2",
")",
"*",
"cross_product",
"(",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
",",
"x3",
",",
"y3",
")",
"<",
"0",
":",
"return",
"False",
"# get collide point",
"b1",
"=",
"(",
"y2",
"-",
"y1",
")",
"*",
"x1",
"+",
"(",
"x1",
"-",
"x2",
")",
"*",
"y1",
"b2",
"=",
"(",
"y4",
"-",
"y3",
")",
"*",
"x3",
"+",
"(",
"x3",
"-",
"x4",
")",
"*",
"y3",
"D",
"=",
"(",
"x2",
"-",
"x1",
")",
"*",
"(",
"y4",
"-",
"y3",
")",
"-",
"(",
"x4",
"-",
"x3",
")",
"*",
"(",
"y2",
"-",
"y1",
")",
"D1",
"=",
"b2",
"*",
"(",
"x2",
"-",
"x1",
")",
"-",
"b1",
"*",
"(",
"x4",
"-",
"x3",
")",
"D2",
"=",
"b2",
"*",
"(",
"y2",
"-",
"y1",
")",
"-",
"b1",
"*",
"(",
"y4",
"-",
"y3",
")",
"return",
"P",
"(",
"D1",
"/",
"D",
",",
"D2",
"/",
"D",
")"
] | 判断两条线段是否交叉 | [
"判断两条线段是否交叉"
] | python | train |
MoseleyBioinformaticsLab/ctfile | ctfile/ctfile.py | https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L829-L841 | def add_sdfile(self, sdfile):
"""Add new ``SDfile`` to current ``SDfile``.
:param sdfile: ``SDfile`` instance.
:return: None.
:rtype: :py:obj:`None`.
"""
if not isinstance(sdfile, SDfile):
raise ValueError('Not a SDfile type: "{}"'.format(type(sdfile)))
for entry_id in sdfile:
self.add_molfile(molfile=sdfile[entry_id]['molfile'],
data=sdfile[entry_id]['data']) | [
"def",
"add_sdfile",
"(",
"self",
",",
"sdfile",
")",
":",
"if",
"not",
"isinstance",
"(",
"sdfile",
",",
"SDfile",
")",
":",
"raise",
"ValueError",
"(",
"'Not a SDfile type: \"{}\"'",
".",
"format",
"(",
"type",
"(",
"sdfile",
")",
")",
")",
"for",
"entry_id",
"in",
"sdfile",
":",
"self",
".",
"add_molfile",
"(",
"molfile",
"=",
"sdfile",
"[",
"entry_id",
"]",
"[",
"'molfile'",
"]",
",",
"data",
"=",
"sdfile",
"[",
"entry_id",
"]",
"[",
"'data'",
"]",
")"
] | Add new ``SDfile`` to current ``SDfile``.
:param sdfile: ``SDfile`` instance.
:return: None.
:rtype: :py:obj:`None`. | [
"Add",
"new",
"SDfile",
"to",
"current",
"SDfile",
"."
] | python | train |
cathalgarvey/deadlock | deadlock/crypto.py | https://github.com/cathalgarvey/deadlock/blob/30099b476ff767611ce617150a0c574fc03fdf79/deadlock/crypto.py#L139-L167 | def fancy(cls, contains, max_tries, inner=False, keepcase=False):
"""
Try to create a key with a chosen prefix, by starting with a 26-bit
urandom number and appending with 8-byte integers until prefix matches.
This function is naive, but has a max_tries argument which will abort when
reached with a ValueError.
TODO: make this smarter, in general. Variable byte length according to
expected attempts, warnings of expected duration of iteration, etc.
TODO: Implement multiprocessing to use poly-core machines fully:
- Shared list, each process checks if empty every cycle, aborts if
contains a value.
- Successful values are pushed to list, cancelling all processes?
- Server waits on all child processes then expects a list?
- Ensure child processes start with different random base numbers,
to avoid duplication?
- Investigate server/manager aspect of multiprocessing; mini-clustering?
"""
contains = contains if keepcase else contains.lower()
if not set(contains).issubset(base58.alphabet):
raise ValueError("Cannot find contained phrase '{}' as it contains non-b58 characters".format(contains))
basenum = os.urandom(26)
for i in range(max_tries):
k = nacl.public.PrivateKey(basenum + i.to_bytes(6, 'big'))
ukey = cls(k.public_key, k)
test_uid = ukey.userID if keepcase else ukey.userID.lower()
if test_uid.startswith(contains) or test_uid.endswith(contains) or (inner and contains in test_uid):
return ukey
else:
raise ValueError("Could not create key with desired prefix '{}' in {} attempts.".format(prefix, max_tries)) | [
"def",
"fancy",
"(",
"cls",
",",
"contains",
",",
"max_tries",
",",
"inner",
"=",
"False",
",",
"keepcase",
"=",
"False",
")",
":",
"contains",
"=",
"contains",
"if",
"keepcase",
"else",
"contains",
".",
"lower",
"(",
")",
"if",
"not",
"set",
"(",
"contains",
")",
".",
"issubset",
"(",
"base58",
".",
"alphabet",
")",
":",
"raise",
"ValueError",
"(",
"\"Cannot find contained phrase '{}' as it contains non-b58 characters\"",
".",
"format",
"(",
"contains",
")",
")",
"basenum",
"=",
"os",
".",
"urandom",
"(",
"26",
")",
"for",
"i",
"in",
"range",
"(",
"max_tries",
")",
":",
"k",
"=",
"nacl",
".",
"public",
".",
"PrivateKey",
"(",
"basenum",
"+",
"i",
".",
"to_bytes",
"(",
"6",
",",
"'big'",
")",
")",
"ukey",
"=",
"cls",
"(",
"k",
".",
"public_key",
",",
"k",
")",
"test_uid",
"=",
"ukey",
".",
"userID",
"if",
"keepcase",
"else",
"ukey",
".",
"userID",
".",
"lower",
"(",
")",
"if",
"test_uid",
".",
"startswith",
"(",
"contains",
")",
"or",
"test_uid",
".",
"endswith",
"(",
"contains",
")",
"or",
"(",
"inner",
"and",
"contains",
"in",
"test_uid",
")",
":",
"return",
"ukey",
"else",
":",
"raise",
"ValueError",
"(",
"\"Could not create key with desired prefix '{}' in {} attempts.\"",
".",
"format",
"(",
"prefix",
",",
"max_tries",
")",
")"
] | Try to create a key with a chosen prefix, by starting with a 26-bit
urandom number and appending with 8-byte integers until prefix matches.
This function is naive, but has a max_tries argument which will abort when
reached with a ValueError.
TODO: make this smarter, in general. Variable byte length according to
expected attempts, warnings of expected duration of iteration, etc.
TODO: Implement multiprocessing to use poly-core machines fully:
- Shared list, each process checks if empty every cycle, aborts if
contains a value.
- Successful values are pushed to list, cancelling all processes?
- Server waits on all child processes then expects a list?
- Ensure child processes start with different random base numbers,
to avoid duplication?
- Investigate server/manager aspect of multiprocessing; mini-clustering? | [
"Try",
"to",
"create",
"a",
"key",
"with",
"a",
"chosen",
"prefix",
"by",
"starting",
"with",
"a",
"26",
"-",
"bit",
"urandom",
"number",
"and",
"appending",
"with",
"8",
"-",
"byte",
"integers",
"until",
"prefix",
"matches",
".",
"This",
"function",
"is",
"naive",
"but",
"has",
"a",
"max_tries",
"argument",
"which",
"will",
"abort",
"when",
"reached",
"with",
"a",
"ValueError",
".",
"TODO",
":",
"make",
"this",
"smarter",
"in",
"general",
".",
"Variable",
"byte",
"length",
"according",
"to",
"expected",
"attempts",
"warnings",
"of",
"expected",
"duration",
"of",
"iteration",
"etc",
".",
"TODO",
":",
"Implement",
"multiprocessing",
"to",
"use",
"poly",
"-",
"core",
"machines",
"fully",
":",
"-",
"Shared",
"list",
"each",
"process",
"checks",
"if",
"empty",
"every",
"cycle",
"aborts",
"if",
"contains",
"a",
"value",
".",
"-",
"Successful",
"values",
"are",
"pushed",
"to",
"list",
"cancelling",
"all",
"processes?",
"-",
"Server",
"waits",
"on",
"all",
"child",
"processes",
"then",
"expects",
"a",
"list?",
"-",
"Ensure",
"child",
"processes",
"start",
"with",
"different",
"random",
"base",
"numbers",
"to",
"avoid",
"duplication?",
"-",
"Investigate",
"server",
"/",
"manager",
"aspect",
"of",
"multiprocessing",
";",
"mini",
"-",
"clustering?"
] | python | train |
annoviko/pyclustering | pyclustering/cluster/syncnet.py | https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/syncnet.py#L342-L399 | def show_network(self):
"""!
@brief Shows connections in the network. It supports only 2-d and 3-d representation.
"""
if ( (self._ccore_network_pointer is not None) and (self._osc_conn is None) ):
self._osc_conn = sync_connectivity_matrix(self._ccore_network_pointer);
dimension = len(self._osc_loc[0]);
if ( (dimension != 3) and (dimension != 2) ):
raise NameError('Network that is located in different from 2-d and 3-d dimensions can not be represented');
from matplotlib.font_manager import FontProperties;
from matplotlib import rcParams;
rcParams['font.sans-serif'] = ['Arial'];
rcParams['font.size'] = 12;
fig = plt.figure();
axes = None;
if (dimension == 2):
axes = fig.add_subplot(111);
elif (dimension == 3):
axes = fig.gca(projection='3d');
surface_font = FontProperties();
surface_font.set_name('Arial');
surface_font.set_size('12');
for i in range(0, self._num_osc, 1):
if (dimension == 2):
axes.plot(self._osc_loc[i][0], self._osc_loc[i][1], 'bo');
if (self._conn_represent == conn_represent.MATRIX):
for j in range(i, self._num_osc, 1): # draw connection between two points only one time
if (self.has_connection(i, j) == True):
axes.plot([self._osc_loc[i][0], self._osc_loc[j][0]], [self._osc_loc[i][1], self._osc_loc[j][1]], 'b-', linewidth = 0.5);
else:
for j in self.get_neighbors(i):
if ( (self.has_connection(i, j) == True) and (i > j) ): # draw connection between two points only one time
axes.plot([self._osc_loc[i][0], self._osc_loc[j][0]], [self._osc_loc[i][1], self._osc_loc[j][1]], 'b-', linewidth = 0.5);
elif (dimension == 3):
axes.scatter(self._osc_loc[i][0], self._osc_loc[i][1], self._osc_loc[i][2], c = 'b', marker = 'o');
if (self._conn_represent == conn_represent.MATRIX):
for j in range(i, self._num_osc, 1): # draw connection between two points only one time
if (self.has_connection(i, j) == True):
axes.plot([self._osc_loc[i][0], self._osc_loc[j][0]], [self._osc_loc[i][1], self._osc_loc[j][1]], [self._osc_loc[i][2], self._osc_loc[j][2]], 'b-', linewidth = 0.5);
else:
for j in self.get_neighbors(i):
if ( (self.has_connection(i, j) == True) and (i > j) ): # draw connection between two points only one time
axes.plot([self._osc_loc[i][0], self._osc_loc[j][0]], [self._osc_loc[i][1], self._osc_loc[j][1]], [self._osc_loc[i][2], self._osc_loc[j][2]], 'b-', linewidth = 0.5);
plt.grid();
plt.show(); | [
"def",
"show_network",
"(",
"self",
")",
":",
"if",
"(",
"(",
"self",
".",
"_ccore_network_pointer",
"is",
"not",
"None",
")",
"and",
"(",
"self",
".",
"_osc_conn",
"is",
"None",
")",
")",
":",
"self",
".",
"_osc_conn",
"=",
"sync_connectivity_matrix",
"(",
"self",
".",
"_ccore_network_pointer",
")",
"dimension",
"=",
"len",
"(",
"self",
".",
"_osc_loc",
"[",
"0",
"]",
")",
"if",
"(",
"(",
"dimension",
"!=",
"3",
")",
"and",
"(",
"dimension",
"!=",
"2",
")",
")",
":",
"raise",
"NameError",
"(",
"'Network that is located in different from 2-d and 3-d dimensions can not be represented'",
")",
"from",
"matplotlib",
".",
"font_manager",
"import",
"FontProperties",
"from",
"matplotlib",
"import",
"rcParams",
"rcParams",
"[",
"'font.sans-serif'",
"]",
"=",
"[",
"'Arial'",
"]",
"rcParams",
"[",
"'font.size'",
"]",
"=",
"12",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"axes",
"=",
"None",
"if",
"(",
"dimension",
"==",
"2",
")",
":",
"axes",
"=",
"fig",
".",
"add_subplot",
"(",
"111",
")",
"elif",
"(",
"dimension",
"==",
"3",
")",
":",
"axes",
"=",
"fig",
".",
"gca",
"(",
"projection",
"=",
"'3d'",
")",
"surface_font",
"=",
"FontProperties",
"(",
")",
"surface_font",
".",
"set_name",
"(",
"'Arial'",
")",
"surface_font",
".",
"set_size",
"(",
"'12'",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"self",
".",
"_num_osc",
",",
"1",
")",
":",
"if",
"(",
"dimension",
"==",
"2",
")",
":",
"axes",
".",
"plot",
"(",
"self",
".",
"_osc_loc",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"self",
".",
"_osc_loc",
"[",
"i",
"]",
"[",
"1",
"]",
",",
"'bo'",
")",
"if",
"(",
"self",
".",
"_conn_represent",
"==",
"conn_represent",
".",
"MATRIX",
")",
":",
"for",
"j",
"in",
"range",
"(",
"i",
",",
"self",
".",
"_num_osc",
",",
"1",
")",
":",
"# draw connection between two points only one time\r",
"if",
"(",
"self",
".",
"has_connection",
"(",
"i",
",",
"j",
")",
"==",
"True",
")",
":",
"axes",
".",
"plot",
"(",
"[",
"self",
".",
"_osc_loc",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"self",
".",
"_osc_loc",
"[",
"j",
"]",
"[",
"0",
"]",
"]",
",",
"[",
"self",
".",
"_osc_loc",
"[",
"i",
"]",
"[",
"1",
"]",
",",
"self",
".",
"_osc_loc",
"[",
"j",
"]",
"[",
"1",
"]",
"]",
",",
"'b-'",
",",
"linewidth",
"=",
"0.5",
")",
"else",
":",
"for",
"j",
"in",
"self",
".",
"get_neighbors",
"(",
"i",
")",
":",
"if",
"(",
"(",
"self",
".",
"has_connection",
"(",
"i",
",",
"j",
")",
"==",
"True",
")",
"and",
"(",
"i",
">",
"j",
")",
")",
":",
"# draw connection between two points only one time\r",
"axes",
".",
"plot",
"(",
"[",
"self",
".",
"_osc_loc",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"self",
".",
"_osc_loc",
"[",
"j",
"]",
"[",
"0",
"]",
"]",
",",
"[",
"self",
".",
"_osc_loc",
"[",
"i",
"]",
"[",
"1",
"]",
",",
"self",
".",
"_osc_loc",
"[",
"j",
"]",
"[",
"1",
"]",
"]",
",",
"'b-'",
",",
"linewidth",
"=",
"0.5",
")",
"elif",
"(",
"dimension",
"==",
"3",
")",
":",
"axes",
".",
"scatter",
"(",
"self",
".",
"_osc_loc",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"self",
".",
"_osc_loc",
"[",
"i",
"]",
"[",
"1",
"]",
",",
"self",
".",
"_osc_loc",
"[",
"i",
"]",
"[",
"2",
"]",
",",
"c",
"=",
"'b'",
",",
"marker",
"=",
"'o'",
")",
"if",
"(",
"self",
".",
"_conn_represent",
"==",
"conn_represent",
".",
"MATRIX",
")",
":",
"for",
"j",
"in",
"range",
"(",
"i",
",",
"self",
".",
"_num_osc",
",",
"1",
")",
":",
"# draw connection between two points only one time\r",
"if",
"(",
"self",
".",
"has_connection",
"(",
"i",
",",
"j",
")",
"==",
"True",
")",
":",
"axes",
".",
"plot",
"(",
"[",
"self",
".",
"_osc_loc",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"self",
".",
"_osc_loc",
"[",
"j",
"]",
"[",
"0",
"]",
"]",
",",
"[",
"self",
".",
"_osc_loc",
"[",
"i",
"]",
"[",
"1",
"]",
",",
"self",
".",
"_osc_loc",
"[",
"j",
"]",
"[",
"1",
"]",
"]",
",",
"[",
"self",
".",
"_osc_loc",
"[",
"i",
"]",
"[",
"2",
"]",
",",
"self",
".",
"_osc_loc",
"[",
"j",
"]",
"[",
"2",
"]",
"]",
",",
"'b-'",
",",
"linewidth",
"=",
"0.5",
")",
"else",
":",
"for",
"j",
"in",
"self",
".",
"get_neighbors",
"(",
"i",
")",
":",
"if",
"(",
"(",
"self",
".",
"has_connection",
"(",
"i",
",",
"j",
")",
"==",
"True",
")",
"and",
"(",
"i",
">",
"j",
")",
")",
":",
"# draw connection between two points only one time\r",
"axes",
".",
"plot",
"(",
"[",
"self",
".",
"_osc_loc",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"self",
".",
"_osc_loc",
"[",
"j",
"]",
"[",
"0",
"]",
"]",
",",
"[",
"self",
".",
"_osc_loc",
"[",
"i",
"]",
"[",
"1",
"]",
",",
"self",
".",
"_osc_loc",
"[",
"j",
"]",
"[",
"1",
"]",
"]",
",",
"[",
"self",
".",
"_osc_loc",
"[",
"i",
"]",
"[",
"2",
"]",
",",
"self",
".",
"_osc_loc",
"[",
"j",
"]",
"[",
"2",
"]",
"]",
",",
"'b-'",
",",
"linewidth",
"=",
"0.5",
")",
"plt",
".",
"grid",
"(",
")",
"plt",
".",
"show",
"(",
")"
] | !
@brief Shows connections in the network. It supports only 2-d and 3-d representation. | [
"!"
] | python | valid |
respeaker/respeaker_python_library | respeaker/usb_hid/pywinusb_backend.py | https://github.com/respeaker/respeaker_python_library/blob/905a5334ccdc2d474ad973caf6a23d05c65bbb25/respeaker/usb_hid/pywinusb_backend.py#L63-L96 | def getAllConnectedInterface():
"""
returns all the connected CMSIS-DAP devices
"""
all_devices = hid.find_all_hid_devices()
# find devices with good vid/pid
all_mbed_devices = []
for d in all_devices:
if (d.product_name.find("MicArray") >= 0):
all_mbed_devices.append(d)
boards = []
for dev in all_mbed_devices:
try:
dev.open(shared=False)
report = dev.find_output_reports()
if (len(report) == 1):
new_board = PyWinUSB()
new_board.report = report[0]
new_board.vendor_name = dev.vendor_name
new_board.product_name = dev.product_name
new_board.serial_number = dev.serial_number
new_board.vid = dev.vendor_id
new_board.pid = dev.product_id
new_board.device = dev
new_board.device.set_raw_data_handler(new_board.rx_handler)
boards.append(new_board)
except Exception as e:
logging.error("Receiving Exception: %s", e)
dev.close()
return boards | [
"def",
"getAllConnectedInterface",
"(",
")",
":",
"all_devices",
"=",
"hid",
".",
"find_all_hid_devices",
"(",
")",
"# find devices with good vid/pid",
"all_mbed_devices",
"=",
"[",
"]",
"for",
"d",
"in",
"all_devices",
":",
"if",
"(",
"d",
".",
"product_name",
".",
"find",
"(",
"\"MicArray\"",
")",
">=",
"0",
")",
":",
"all_mbed_devices",
".",
"append",
"(",
"d",
")",
"boards",
"=",
"[",
"]",
"for",
"dev",
"in",
"all_mbed_devices",
":",
"try",
":",
"dev",
".",
"open",
"(",
"shared",
"=",
"False",
")",
"report",
"=",
"dev",
".",
"find_output_reports",
"(",
")",
"if",
"(",
"len",
"(",
"report",
")",
"==",
"1",
")",
":",
"new_board",
"=",
"PyWinUSB",
"(",
")",
"new_board",
".",
"report",
"=",
"report",
"[",
"0",
"]",
"new_board",
".",
"vendor_name",
"=",
"dev",
".",
"vendor_name",
"new_board",
".",
"product_name",
"=",
"dev",
".",
"product_name",
"new_board",
".",
"serial_number",
"=",
"dev",
".",
"serial_number",
"new_board",
".",
"vid",
"=",
"dev",
".",
"vendor_id",
"new_board",
".",
"pid",
"=",
"dev",
".",
"product_id",
"new_board",
".",
"device",
"=",
"dev",
"new_board",
".",
"device",
".",
"set_raw_data_handler",
"(",
"new_board",
".",
"rx_handler",
")",
"boards",
".",
"append",
"(",
"new_board",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"error",
"(",
"\"Receiving Exception: %s\"",
",",
"e",
")",
"dev",
".",
"close",
"(",
")",
"return",
"boards"
] | returns all the connected CMSIS-DAP devices | [
"returns",
"all",
"the",
"connected",
"CMSIS",
"-",
"DAP",
"devices"
] | python | train |
angr/claripy | claripy/vsa/valueset.py | https://github.com/angr/claripy/blob/4ed61924880af1ea8fb778047d896ec0156412a6/claripy/vsa/valueset.py#L243-L257 | def copy(self):
"""
Make a copy of self and return.
:return: A new ValueSet object.
:rtype: ValueSet
"""
vs = ValueSet(bits=self.bits)
vs._regions = self._regions.copy()
vs._region_base_addrs = self._region_base_addrs.copy()
vs._reversed = self._reversed
vs._si = self._si.copy()
return vs | [
"def",
"copy",
"(",
"self",
")",
":",
"vs",
"=",
"ValueSet",
"(",
"bits",
"=",
"self",
".",
"bits",
")",
"vs",
".",
"_regions",
"=",
"self",
".",
"_regions",
".",
"copy",
"(",
")",
"vs",
".",
"_region_base_addrs",
"=",
"self",
".",
"_region_base_addrs",
".",
"copy",
"(",
")",
"vs",
".",
"_reversed",
"=",
"self",
".",
"_reversed",
"vs",
".",
"_si",
"=",
"self",
".",
"_si",
".",
"copy",
"(",
")",
"return",
"vs"
] | Make a copy of self and return.
:return: A new ValueSet object.
:rtype: ValueSet | [
"Make",
"a",
"copy",
"of",
"self",
"and",
"return",
"."
] | python | train |
Azure/azure-uamqp-python | uamqp/authentication/cbs_auth.py | https://github.com/Azure/azure-uamqp-python/blob/b67e4fcaf2e8a337636947523570239c10a58ae2/uamqp/authentication/cbs_auth.py#L80-L89 | def close_authenticator(self):
"""Close the CBS auth channel and session."""
_logger.info("Shutting down CBS session on connection: %r.", self._connection.container_id)
try:
_logger.debug("Unlocked CBS to close on connection: %r.", self._connection.container_id)
self._cbs_auth.destroy()
_logger.info("Auth closed, destroying session on connection: %r.", self._connection.container_id)
self._session.destroy()
finally:
_logger.info("Finished shutting down CBS session on connection: %r.", self._connection.container_id) | [
"def",
"close_authenticator",
"(",
"self",
")",
":",
"_logger",
".",
"info",
"(",
"\"Shutting down CBS session on connection: %r.\"",
",",
"self",
".",
"_connection",
".",
"container_id",
")",
"try",
":",
"_logger",
".",
"debug",
"(",
"\"Unlocked CBS to close on connection: %r.\"",
",",
"self",
".",
"_connection",
".",
"container_id",
")",
"self",
".",
"_cbs_auth",
".",
"destroy",
"(",
")",
"_logger",
".",
"info",
"(",
"\"Auth closed, destroying session on connection: %r.\"",
",",
"self",
".",
"_connection",
".",
"container_id",
")",
"self",
".",
"_session",
".",
"destroy",
"(",
")",
"finally",
":",
"_logger",
".",
"info",
"(",
"\"Finished shutting down CBS session on connection: %r.\"",
",",
"self",
".",
"_connection",
".",
"container_id",
")"
] | Close the CBS auth channel and session. | [
"Close",
"the",
"CBS",
"auth",
"channel",
"and",
"session",
"."
] | python | train |
bihealth/vcfpy | vcfpy/header.py | https://github.com/bihealth/vcfpy/blob/99e2165df30f11e0c95f3170f31bc5191d9e9e15/vcfpy/header.py#L353-L358 | def get_lines(self, key):
"""Return header lines having the given ``key`` as their type"""
if key in self._indices:
return self._indices[key].values()
else:
return [] | [
"def",
"get_lines",
"(",
"self",
",",
"key",
")",
":",
"if",
"key",
"in",
"self",
".",
"_indices",
":",
"return",
"self",
".",
"_indices",
"[",
"key",
"]",
".",
"values",
"(",
")",
"else",
":",
"return",
"[",
"]"
] | Return header lines having the given ``key`` as their type | [
"Return",
"header",
"lines",
"having",
"the",
"given",
"key",
"as",
"their",
"type"
] | python | train |
huffpostdata/python-pollster | pollster/api.py | https://github.com/huffpostdata/python-pollster/blob/276de8d66a92577b1143fd92a70cff9c35a1dfcf/pollster/api.py#L23-L50 | def charts_get(self, **kwargs):
"""
Charts
Returns a list of Charts, ordered by creation date (newest first). A Chart is chosen by Pollster editors. One example is \"Obama job approval - Democrats\". It is always based upon a single Question. Users should strongly consider basing their analysis on Questions instead. Charts are derived data; Pollster editors publish them and change them as editorial priorities change.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.charts_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str cursor: Special string to index into the Array
:param str tags: Comma-separated list of tag slugs. Only Charts with one or more of these tags and Charts based on Questions with one or more of these tags will be returned.
:param date election_date: Date of an election, in YYYY-MM-DD format. Only Charts based on Questions pertaining to an election on this date will be returned.
:return: InlineResponse200
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.charts_get_with_http_info(**kwargs)
else:
(data) = self.charts_get_with_http_info(**kwargs)
return data | [
"def",
"charts_get",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'callback'",
")",
":",
"return",
"self",
".",
"charts_get_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"charts_get_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | Charts
Returns a list of Charts, ordered by creation date (newest first). A Chart is chosen by Pollster editors. One example is \"Obama job approval - Democrats\". It is always based upon a single Question. Users should strongly consider basing their analysis on Questions instead. Charts are derived data; Pollster editors publish them and change them as editorial priorities change.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.charts_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str cursor: Special string to index into the Array
:param str tags: Comma-separated list of tag slugs. Only Charts with one or more of these tags and Charts based on Questions with one or more of these tags will be returned.
:param date election_date: Date of an election, in YYYY-MM-DD format. Only Charts based on Questions pertaining to an election on this date will be returned.
:return: InlineResponse200
If the method is called asynchronously,
returns the request thread. | [
"Charts",
"Returns",
"a",
"list",
"of",
"Charts",
"ordered",
"by",
"creation",
"date",
"(",
"newest",
"first",
")",
".",
"A",
"Chart",
"is",
"chosen",
"by",
"Pollster",
"editors",
".",
"One",
"example",
"is",
"\\",
"Obama",
"job",
"approval",
"-",
"Democrats",
"\\",
".",
"It",
"is",
"always",
"based",
"upon",
"a",
"single",
"Question",
".",
"Users",
"should",
"strongly",
"consider",
"basing",
"their",
"analysis",
"on",
"Questions",
"instead",
".",
"Charts",
"are",
"derived",
"data",
";",
"Pollster",
"editors",
"publish",
"them",
"and",
"change",
"them",
"as",
"editorial",
"priorities",
"change",
"."
] | python | train |
seb-m/tss | tss.py | https://github.com/seb-m/tss/blob/ab45176b8585ba6bbbcaeffd21ec0c63f615dce0/tss.py#L212-L231 | def share_secret(threshold, nshares, secret, identifier, hash_id=Hash.SHA256):
"""
Create nshares of the secret. threshold specifies the number of shares
needed for reconstructing the secret value. A 0-16 bytes identifier must
be provided. Optionally the secret is hashed with the algorithm specified
by hash_id, a class attribute of Hash.
This function must return a list of formatted shares or raises a TSSError
exception if anything went wrong.
"""
if identifier is None:
raise TSSError('an identifier must be provided')
if not Hash.is_valid(hash_id):
raise TSSError('invalid hash algorithm %s' % hash_id)
secret = encode(secret)
identifier = encode(identifier)
if hash_id != Hash.NONE:
secret += Hash.to_func(hash_id)(secret).digest()
shares = generate_shares(threshold, nshares, secret)
header = format_header(identifier, hash_id, threshold, len(secret) + 1)
return [format_share(header, share) for share in shares] | [
"def",
"share_secret",
"(",
"threshold",
",",
"nshares",
",",
"secret",
",",
"identifier",
",",
"hash_id",
"=",
"Hash",
".",
"SHA256",
")",
":",
"if",
"identifier",
"is",
"None",
":",
"raise",
"TSSError",
"(",
"'an identifier must be provided'",
")",
"if",
"not",
"Hash",
".",
"is_valid",
"(",
"hash_id",
")",
":",
"raise",
"TSSError",
"(",
"'invalid hash algorithm %s'",
"%",
"hash_id",
")",
"secret",
"=",
"encode",
"(",
"secret",
")",
"identifier",
"=",
"encode",
"(",
"identifier",
")",
"if",
"hash_id",
"!=",
"Hash",
".",
"NONE",
":",
"secret",
"+=",
"Hash",
".",
"to_func",
"(",
"hash_id",
")",
"(",
"secret",
")",
".",
"digest",
"(",
")",
"shares",
"=",
"generate_shares",
"(",
"threshold",
",",
"nshares",
",",
"secret",
")",
"header",
"=",
"format_header",
"(",
"identifier",
",",
"hash_id",
",",
"threshold",
",",
"len",
"(",
"secret",
")",
"+",
"1",
")",
"return",
"[",
"format_share",
"(",
"header",
",",
"share",
")",
"for",
"share",
"in",
"shares",
"]"
] | Create nshares of the secret. threshold specifies the number of shares
needed for reconstructing the secret value. A 0-16 bytes identifier must
be provided. Optionally the secret is hashed with the algorithm specified
by hash_id, a class attribute of Hash.
This function must return a list of formatted shares or raises a TSSError
exception if anything went wrong. | [
"Create",
"nshares",
"of",
"the",
"secret",
".",
"threshold",
"specifies",
"the",
"number",
"of",
"shares",
"needed",
"for",
"reconstructing",
"the",
"secret",
"value",
".",
"A",
"0",
"-",
"16",
"bytes",
"identifier",
"must",
"be",
"provided",
".",
"Optionally",
"the",
"secret",
"is",
"hashed",
"with",
"the",
"algorithm",
"specified",
"by",
"hash_id",
"a",
"class",
"attribute",
"of",
"Hash",
".",
"This",
"function",
"must",
"return",
"a",
"list",
"of",
"formatted",
"shares",
"or",
"raises",
"a",
"TSSError",
"exception",
"if",
"anything",
"went",
"wrong",
"."
] | python | train |
tcalmant/ipopo | pelix/framework.py | https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/framework.py#L1639-L1656 | def get_service_references(self, clazz, ldap_filter=None):
# type: (Optional[str], Optional[str]) -> Optional[List[ServiceReference]]
"""
Returns the service references for services that were registered under
the specified class by this bundle and matching the given filter
:param clazz: The class name with which the service was registered.
:param ldap_filter: A filter on service properties
:return: The list of references to the services registered by the
calling bundle and matching the filters.
"""
refs = self.__framework.find_service_references(clazz, ldap_filter)
if refs:
for ref in refs:
if ref.get_bundle() is not self.__bundle:
refs.remove(ref)
return refs | [
"def",
"get_service_references",
"(",
"self",
",",
"clazz",
",",
"ldap_filter",
"=",
"None",
")",
":",
"# type: (Optional[str], Optional[str]) -> Optional[List[ServiceReference]]",
"refs",
"=",
"self",
".",
"__framework",
".",
"find_service_references",
"(",
"clazz",
",",
"ldap_filter",
")",
"if",
"refs",
":",
"for",
"ref",
"in",
"refs",
":",
"if",
"ref",
".",
"get_bundle",
"(",
")",
"is",
"not",
"self",
".",
"__bundle",
":",
"refs",
".",
"remove",
"(",
"ref",
")",
"return",
"refs"
] | Returns the service references for services that were registered under
the specified class by this bundle and matching the given filter
:param clazz: The class name with which the service was registered.
:param ldap_filter: A filter on service properties
:return: The list of references to the services registered by the
calling bundle and matching the filters. | [
"Returns",
"the",
"service",
"references",
"for",
"services",
"that",
"were",
"registered",
"under",
"the",
"specified",
"class",
"by",
"this",
"bundle",
"and",
"matching",
"the",
"given",
"filter"
] | python | train |
sqlboy/fileseq | src/fileseq/frameset.py | https://github.com/sqlboy/fileseq/blob/f26c3c3c383134ce27d5dfe37793e1ebe88e69ad/src/fileseq/frameset.py#L880-L892 | def difference(self, *other):
"""
Returns a new :class:`FrameSet` with elements in `self` but not in
`other`.
Args:
other (:class:`FrameSet`): or objects that can cast to :class:`FrameSet`
Returns:
:class:`FrameSet`:
"""
from_frozenset = self.items.difference(*map(set, other))
return self.from_iterable(from_frozenset, sort=True) | [
"def",
"difference",
"(",
"self",
",",
"*",
"other",
")",
":",
"from_frozenset",
"=",
"self",
".",
"items",
".",
"difference",
"(",
"*",
"map",
"(",
"set",
",",
"other",
")",
")",
"return",
"self",
".",
"from_iterable",
"(",
"from_frozenset",
",",
"sort",
"=",
"True",
")"
] | Returns a new :class:`FrameSet` with elements in `self` but not in
`other`.
Args:
other (:class:`FrameSet`): or objects that can cast to :class:`FrameSet`
Returns:
:class:`FrameSet`: | [
"Returns",
"a",
"new",
":",
"class",
":",
"FrameSet",
"with",
"elements",
"in",
"self",
"but",
"not",
"in",
"other",
"."
] | python | train |
quantumlib/Cirq | cirq/circuits/circuit.py | https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/circuits/circuit.py#L1544-L1560 | def save_qasm(self,
file_path: Union[str, bytes, int],
header: Optional[str] = None,
precision: int = 10,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
) -> None:
"""Save a QASM file equivalent to the circuit.
Args:
file_path: The location of the file where the qasm will be written.
header: A multi-line string that is placed in a comment at the top
of the QASM. Defaults to a cirq version specifier.
precision: Number of digits to use when representing numbers.
qubit_order: Determines how qubits are ordered in the QASM
register.
"""
self._to_qasm_output(header, precision, qubit_order).save(file_path) | [
"def",
"save_qasm",
"(",
"self",
",",
"file_path",
":",
"Union",
"[",
"str",
",",
"bytes",
",",
"int",
"]",
",",
"header",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"precision",
":",
"int",
"=",
"10",
",",
"qubit_order",
":",
"ops",
".",
"QubitOrderOrList",
"=",
"ops",
".",
"QubitOrder",
".",
"DEFAULT",
",",
")",
"->",
"None",
":",
"self",
".",
"_to_qasm_output",
"(",
"header",
",",
"precision",
",",
"qubit_order",
")",
".",
"save",
"(",
"file_path",
")"
] | Save a QASM file equivalent to the circuit.
Args:
file_path: The location of the file where the qasm will be written.
header: A multi-line string that is placed in a comment at the top
of the QASM. Defaults to a cirq version specifier.
precision: Number of digits to use when representing numbers.
qubit_order: Determines how qubits are ordered in the QASM
register. | [
"Save",
"a",
"QASM",
"file",
"equivalent",
"to",
"the",
"circuit",
"."
] | python | train |
digidotcom/python-devicecloud | devicecloud/__init__.py | https://github.com/digidotcom/python-devicecloud/blob/32529684a348a7830a269c32601604c78036bcb8/devicecloud/__init__.py#L290-L311 | def put(self, path, data, **kwargs):
"""Perform an HTTP PUT request of the specified path in Device Cloud
Make an HTTP PUT request against Device Cloud with this accounts
credentials and base url. This method uses the
`requests <http://docs.python-requests.org/en/latest/>`_ library
`request method <http://docs.python-requests.org/en/latest/api/#requests.request>`_
and all keyword arguments will be passed on to that method.
:param str path: Device Cloud path to PUT
:param int retries: The number of times the request should be retried if an
unsuccessful response is received. Most likely, you should leave this at 0.
:param data: The data to be posted in the body of the POST request (see docs for
``requests.post``
:raises DeviceCloudHttpException: if a non-success response to the request is received
from Device Cloud
:returns: A requests ``Response`` object
"""
url = self._make_url(path)
return self._make_request("PUT", url, data=data, **kwargs) | [
"def",
"put",
"(",
"self",
",",
"path",
",",
"data",
",",
"*",
"*",
"kwargs",
")",
":",
"url",
"=",
"self",
".",
"_make_url",
"(",
"path",
")",
"return",
"self",
".",
"_make_request",
"(",
"\"PUT\"",
",",
"url",
",",
"data",
"=",
"data",
",",
"*",
"*",
"kwargs",
")"
] | Perform an HTTP PUT request of the specified path in Device Cloud
Make an HTTP PUT request against Device Cloud with this accounts
credentials and base url. This method uses the
`requests <http://docs.python-requests.org/en/latest/>`_ library
`request method <http://docs.python-requests.org/en/latest/api/#requests.request>`_
and all keyword arguments will be passed on to that method.
:param str path: Device Cloud path to PUT
:param int retries: The number of times the request should be retried if an
unsuccessful response is received. Most likely, you should leave this at 0.
:param data: The data to be posted in the body of the POST request (see docs for
``requests.post``
:raises DeviceCloudHttpException: if a non-success response to the request is received
from Device Cloud
:returns: A requests ``Response`` object | [
"Perform",
"an",
"HTTP",
"PUT",
"request",
"of",
"the",
"specified",
"path",
"in",
"Device",
"Cloud"
] | python | train |
cmbruns/pyopenvr | src/openvr/__init__.py | https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L6128-L6134 | def getOriginTrackedDeviceInfo(self, origin, unOriginInfoSize):
"""Retrieves useful information for the origin of this action"""
fn = self.function_table.getOriginTrackedDeviceInfo
pOriginInfo = InputOriginInfo_t()
result = fn(origin, byref(pOriginInfo), unOriginInfoSize)
return result, pOriginInfo | [
"def",
"getOriginTrackedDeviceInfo",
"(",
"self",
",",
"origin",
",",
"unOriginInfoSize",
")",
":",
"fn",
"=",
"self",
".",
"function_table",
".",
"getOriginTrackedDeviceInfo",
"pOriginInfo",
"=",
"InputOriginInfo_t",
"(",
")",
"result",
"=",
"fn",
"(",
"origin",
",",
"byref",
"(",
"pOriginInfo",
")",
",",
"unOriginInfoSize",
")",
"return",
"result",
",",
"pOriginInfo"
] | Retrieves useful information for the origin of this action | [
"Retrieves",
"useful",
"information",
"for",
"the",
"origin",
"of",
"this",
"action"
] | python | train |
Ouranosinc/xclim | xclim/indices.py | https://github.com/Ouranosinc/xclim/blob/2080d139188bd8de2aeca097a025c2d89d6e0e09/xclim/indices.py#L1712-L1740 | def tn_mean(tasmin, freq='YS'):
r"""Mean minimum temperature.
Mean of daily minimum temperature.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature [℃] or [K]
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Mean of daily minimum temperature.
Notes
-----
Let :math:`TN_{ij}` be the minimum temperature at day :math:`i` of period :math:`j`. Then mean
values in period :math:`j` are given by:
.. math::
TN_{ij} = \frac{ \sum_{i=1}^{I} TN_{ij} }{I}
"""
arr = tasmin.resample(time=freq) if freq else tasmin
return arr.mean(dim='time', keep_attrs=True) | [
"def",
"tn_mean",
"(",
"tasmin",
",",
"freq",
"=",
"'YS'",
")",
":",
"arr",
"=",
"tasmin",
".",
"resample",
"(",
"time",
"=",
"freq",
")",
"if",
"freq",
"else",
"tasmin",
"return",
"arr",
".",
"mean",
"(",
"dim",
"=",
"'time'",
",",
"keep_attrs",
"=",
"True",
")"
] | r"""Mean minimum temperature.
Mean of daily minimum temperature.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature [℃] or [K]
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Mean of daily minimum temperature.
Notes
-----
Let :math:`TN_{ij}` be the minimum temperature at day :math:`i` of period :math:`j`. Then mean
values in period :math:`j` are given by:
.. math::
TN_{ij} = \frac{ \sum_{i=1}^{I} TN_{ij} }{I} | [
"r",
"Mean",
"minimum",
"temperature",
"."
] | python | train |
CellProfiler/centrosome | centrosome/haralick.py | https://github.com/CellProfiler/centrosome/blob/7bd9350a2d4ae1b215b81eabcecfe560bbb1f32a/centrosome/haralick.py#L205-L209 | def H9(self):
"Entropy."
if not hasattr(self, '_H9'):
self._H9 = -(self.P * np.log(self.P + self.eps)).sum(2).sum(1)
return self._H9 | [
"def",
"H9",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_H9'",
")",
":",
"self",
".",
"_H9",
"=",
"-",
"(",
"self",
".",
"P",
"*",
"np",
".",
"log",
"(",
"self",
".",
"P",
"+",
"self",
".",
"eps",
")",
")",
".",
"sum",
"(",
"2",
")",
".",
"sum",
"(",
"1",
")",
"return",
"self",
".",
"_H9"
] | Entropy. | [
"Entropy",
"."
] | python | train |
zhmcclient/python-zhmcclient | zhmcclient/_session.py | https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient/_session.py#L1313-L1405 | def _result_object(result):
"""
Return the JSON payload in the HTTP response as a Python dict.
Parameters:
result (requests.Response): HTTP response object.
Raises:
zhmcclient.ParseError: Error parsing the returned JSON.
"""
content_type = result.headers.get('content-type', None)
if content_type is None or content_type.startswith('application/json'):
# This function is only called when there is content expected.
# Therefore, a response without content will result in a ParseError.
try:
return result.json(object_pairs_hook=OrderedDict)
except ValueError as exc:
raise ParseError(
"JSON parse error in HTTP response: {}. "
"HTTP request: {} {}. "
"Response status {}. "
"Response content-type: {!r}. "
"Content (max.1000, decoded using {}): {}".
format(exc.args[0],
result.request.method, result.request.url,
result.status_code, content_type, result.encoding,
_text_repr(result.text, 1000)))
elif content_type.startswith('text/html'):
# We are in some error situation. The HMC returns HTML content
# for some 5xx status codes. We try to deal with it somehow,
# but we are not going as far as real HTML parsing.
m = re.search(r'charset=([^;,]+)', content_type)
if m:
encoding = m.group(1) # e.g. RFC "ISO-8859-1"
else:
encoding = 'utf-8'
try:
html_uni = result.content.decode(encoding)
except LookupError:
html_uni = result.content.decode()
# We convert to one line to be regexp-friendly.
html_oneline = html_uni.replace('\r\n', '\\n').replace('\r', '\\n').\
replace('\n', '\\n')
# Check for some well-known errors:
if re.search(r'javax\.servlet\.ServletException: '
r'Web Services are not enabled\.', html_oneline):
html_title = "Console Configuration Error"
html_details = "Web Services API is not enabled on the HMC."
html_reason = HTML_REASON_WEB_SERVICES_DISABLED
else:
m = re.search(
r'<title>([^<]*)</title>.*'
r'<h2>Details:</h2>(.*)(<hr size="1" noshade>)?</body>',
html_oneline)
if m:
html_title = m.group(1)
# Spend a reasonable effort to make the HTML readable:
html_details = m.group(2).replace('<p>', '\\n').\
replace('<br>', '\\n').replace('\\n\\n', '\\n').strip()
else:
html_title = "Console Internal Error"
html_details = "Response body: {!r}".format(html_uni)
html_reason = HTML_REASON_OTHER
message = "{}: {}".format(html_title, html_details)
# We create a minimal JSON error object (to the extent we use it
# when processing it):
result_obj = {
'http-status': result.status_code,
'reason': html_reason,
'message': message,
'request-uri': result.request.url,
'request-method': result.request.method,
}
return result_obj
elif content_type.startswith('application/vnd.ibm-z-zmanager-metrics'):
content_bytes = result.content
assert isinstance(content_bytes, six.binary_type)
return content_bytes.decode('utf-8') # as a unicode object
else:
raise ParseError(
"Unknown content type in HTTP response: {}. "
"HTTP request: {} {}. "
"Response status {}. "
"Response content-type: {!r}. "
"Content (max.1000, decoded using {}): {}".
format(content_type,
result.request.method, result.request.url,
result.status_code, content_type, result.encoding,
_text_repr(result.text, 1000))) | [
"def",
"_result_object",
"(",
"result",
")",
":",
"content_type",
"=",
"result",
".",
"headers",
".",
"get",
"(",
"'content-type'",
",",
"None",
")",
"if",
"content_type",
"is",
"None",
"or",
"content_type",
".",
"startswith",
"(",
"'application/json'",
")",
":",
"# This function is only called when there is content expected.",
"# Therefore, a response without content will result in a ParseError.",
"try",
":",
"return",
"result",
".",
"json",
"(",
"object_pairs_hook",
"=",
"OrderedDict",
")",
"except",
"ValueError",
"as",
"exc",
":",
"raise",
"ParseError",
"(",
"\"JSON parse error in HTTP response: {}. \"",
"\"HTTP request: {} {}. \"",
"\"Response status {}. \"",
"\"Response content-type: {!r}. \"",
"\"Content (max.1000, decoded using {}): {}\"",
".",
"format",
"(",
"exc",
".",
"args",
"[",
"0",
"]",
",",
"result",
".",
"request",
".",
"method",
",",
"result",
".",
"request",
".",
"url",
",",
"result",
".",
"status_code",
",",
"content_type",
",",
"result",
".",
"encoding",
",",
"_text_repr",
"(",
"result",
".",
"text",
",",
"1000",
")",
")",
")",
"elif",
"content_type",
".",
"startswith",
"(",
"'text/html'",
")",
":",
"# We are in some error situation. The HMC returns HTML content",
"# for some 5xx status codes. We try to deal with it somehow,",
"# but we are not going as far as real HTML parsing.",
"m",
"=",
"re",
".",
"search",
"(",
"r'charset=([^;,]+)'",
",",
"content_type",
")",
"if",
"m",
":",
"encoding",
"=",
"m",
".",
"group",
"(",
"1",
")",
"# e.g. RFC \"ISO-8859-1\"",
"else",
":",
"encoding",
"=",
"'utf-8'",
"try",
":",
"html_uni",
"=",
"result",
".",
"content",
".",
"decode",
"(",
"encoding",
")",
"except",
"LookupError",
":",
"html_uni",
"=",
"result",
".",
"content",
".",
"decode",
"(",
")",
"# We convert to one line to be regexp-friendly.",
"html_oneline",
"=",
"html_uni",
".",
"replace",
"(",
"'\\r\\n'",
",",
"'\\\\n'",
")",
".",
"replace",
"(",
"'\\r'",
",",
"'\\\\n'",
")",
".",
"replace",
"(",
"'\\n'",
",",
"'\\\\n'",
")",
"# Check for some well-known errors:",
"if",
"re",
".",
"search",
"(",
"r'javax\\.servlet\\.ServletException: '",
"r'Web Services are not enabled\\.'",
",",
"html_oneline",
")",
":",
"html_title",
"=",
"\"Console Configuration Error\"",
"html_details",
"=",
"\"Web Services API is not enabled on the HMC.\"",
"html_reason",
"=",
"HTML_REASON_WEB_SERVICES_DISABLED",
"else",
":",
"m",
"=",
"re",
".",
"search",
"(",
"r'<title>([^<]*)</title>.*'",
"r'<h2>Details:</h2>(.*)(<hr size=\"1\" noshade>)?</body>'",
",",
"html_oneline",
")",
"if",
"m",
":",
"html_title",
"=",
"m",
".",
"group",
"(",
"1",
")",
"# Spend a reasonable effort to make the HTML readable:",
"html_details",
"=",
"m",
".",
"group",
"(",
"2",
")",
".",
"replace",
"(",
"'<p>'",
",",
"'\\\\n'",
")",
".",
"replace",
"(",
"'<br>'",
",",
"'\\\\n'",
")",
".",
"replace",
"(",
"'\\\\n\\\\n'",
",",
"'\\\\n'",
")",
".",
"strip",
"(",
")",
"else",
":",
"html_title",
"=",
"\"Console Internal Error\"",
"html_details",
"=",
"\"Response body: {!r}\"",
".",
"format",
"(",
"html_uni",
")",
"html_reason",
"=",
"HTML_REASON_OTHER",
"message",
"=",
"\"{}: {}\"",
".",
"format",
"(",
"html_title",
",",
"html_details",
")",
"# We create a minimal JSON error object (to the extent we use it",
"# when processing it):",
"result_obj",
"=",
"{",
"'http-status'",
":",
"result",
".",
"status_code",
",",
"'reason'",
":",
"html_reason",
",",
"'message'",
":",
"message",
",",
"'request-uri'",
":",
"result",
".",
"request",
".",
"url",
",",
"'request-method'",
":",
"result",
".",
"request",
".",
"method",
",",
"}",
"return",
"result_obj",
"elif",
"content_type",
".",
"startswith",
"(",
"'application/vnd.ibm-z-zmanager-metrics'",
")",
":",
"content_bytes",
"=",
"result",
".",
"content",
"assert",
"isinstance",
"(",
"content_bytes",
",",
"six",
".",
"binary_type",
")",
"return",
"content_bytes",
".",
"decode",
"(",
"'utf-8'",
")",
"# as a unicode object",
"else",
":",
"raise",
"ParseError",
"(",
"\"Unknown content type in HTTP response: {}. \"",
"\"HTTP request: {} {}. \"",
"\"Response status {}. \"",
"\"Response content-type: {!r}. \"",
"\"Content (max.1000, decoded using {}): {}\"",
".",
"format",
"(",
"content_type",
",",
"result",
".",
"request",
".",
"method",
",",
"result",
".",
"request",
".",
"url",
",",
"result",
".",
"status_code",
",",
"content_type",
",",
"result",
".",
"encoding",
",",
"_text_repr",
"(",
"result",
".",
"text",
",",
"1000",
")",
")",
")"
] | Return the JSON payload in the HTTP response as a Python dict.
Parameters:
result (requests.Response): HTTP response object.
Raises:
zhmcclient.ParseError: Error parsing the returned JSON. | [
"Return",
"the",
"JSON",
"payload",
"in",
"the",
"HTTP",
"response",
"as",
"a",
"Python",
"dict",
"."
] | python | train |
ValvePython/steam | steam/client/__init__.py | https://github.com/ValvePython/steam/blob/2de1364c47598410b572114e6129eab8fff71d5b/steam/client/__init__.py#L556-L570 | def logout(self):
"""
Logout from steam. Doesn't nothing if not logged on.
.. note::
The server will drop the connection immediatelly upon logout.
"""
if self.logged_on:
self.logged_on = False
self.send(MsgProto(EMsg.ClientLogOff))
try:
self.wait_event(self.EVENT_DISCONNECTED, timeout=5, raises=True)
except:
self.disconnect()
self.idle() | [
"def",
"logout",
"(",
"self",
")",
":",
"if",
"self",
".",
"logged_on",
":",
"self",
".",
"logged_on",
"=",
"False",
"self",
".",
"send",
"(",
"MsgProto",
"(",
"EMsg",
".",
"ClientLogOff",
")",
")",
"try",
":",
"self",
".",
"wait_event",
"(",
"self",
".",
"EVENT_DISCONNECTED",
",",
"timeout",
"=",
"5",
",",
"raises",
"=",
"True",
")",
"except",
":",
"self",
".",
"disconnect",
"(",
")",
"self",
".",
"idle",
"(",
")"
] | Logout from steam. Doesn't nothing if not logged on.
.. note::
The server will drop the connection immediatelly upon logout. | [
"Logout",
"from",
"steam",
".",
"Doesn",
"t",
"nothing",
"if",
"not",
"logged",
"on",
"."
] | python | train |
CalebBell/ht | ht/conv_internal.py | https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/conv_internal.py#L336-L383 | def turbulent_Sieder_Tate(Re, Pr, mu=None, mu_w=None):
r'''Calculates internal convection Nusselt number for turbulent flows
in pipe according to [1]_ and supposedly [2]_.
.. math::
Nu = 0.027Re^{4/5}Pr^{1/3}\left(\frac{\mu}{\mu_s}\right)^{0.14}
Parameters
----------
Re : float
Reynolds number, [-]
Pr : float
Prandtl number, [-]
mu : float
Viscosity of fluid, [Pa*s]
mu_w : float
Viscosity of fluid at wall temperature, [Pa*s]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
A linear coefficient of 0.023 is often listed with this equation. The
source of the discrepancy is not known. The equation is not present in the
original paper, but is nevertheless the source usually cited for it.
Examples
--------
>>> turbulent_Sieder_Tate(Re=1E5, Pr=1.2)
286.9178136793052
>>> turbulent_Sieder_Tate(Re=1E5, Pr=1.2, mu=0.01, mu_w=0.067)
219.84016455766044
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
.. [2] Sieder, E. N., and G. E. Tate. "Heat Transfer and Pressure Drop of
Liquids in Tubes." Industrial & Engineering Chemistry 28, no. 12
(December 1, 1936): 1429-35. doi:10.1021/ie50324a027.
'''
Nu = 0.027*Re**0.8*Pr**(1/3.)
if mu_w and mu:
Nu *= (mu/mu_w)**0.14
return Nu | [
"def",
"turbulent_Sieder_Tate",
"(",
"Re",
",",
"Pr",
",",
"mu",
"=",
"None",
",",
"mu_w",
"=",
"None",
")",
":",
"Nu",
"=",
"0.027",
"*",
"Re",
"**",
"0.8",
"*",
"Pr",
"**",
"(",
"1",
"/",
"3.",
")",
"if",
"mu_w",
"and",
"mu",
":",
"Nu",
"*=",
"(",
"mu",
"/",
"mu_w",
")",
"**",
"0.14",
"return",
"Nu"
] | r'''Calculates internal convection Nusselt number for turbulent flows
in pipe according to [1]_ and supposedly [2]_.
.. math::
Nu = 0.027Re^{4/5}Pr^{1/3}\left(\frac{\mu}{\mu_s}\right)^{0.14}
Parameters
----------
Re : float
Reynolds number, [-]
Pr : float
Prandtl number, [-]
mu : float
Viscosity of fluid, [Pa*s]
mu_w : float
Viscosity of fluid at wall temperature, [Pa*s]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
A linear coefficient of 0.023 is often listed with this equation. The
source of the discrepancy is not known. The equation is not present in the
original paper, but is nevertheless the source usually cited for it.
Examples
--------
>>> turbulent_Sieder_Tate(Re=1E5, Pr=1.2)
286.9178136793052
>>> turbulent_Sieder_Tate(Re=1E5, Pr=1.2, mu=0.01, mu_w=0.067)
219.84016455766044
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
.. [2] Sieder, E. N., and G. E. Tate. "Heat Transfer and Pressure Drop of
Liquids in Tubes." Industrial & Engineering Chemistry 28, no. 12
(December 1, 1936): 1429-35. doi:10.1021/ie50324a027. | [
"r",
"Calculates",
"internal",
"convection",
"Nusselt",
"number",
"for",
"turbulent",
"flows",
"in",
"pipe",
"according",
"to",
"[",
"1",
"]",
"_",
"and",
"supposedly",
"[",
"2",
"]",
"_",
"."
] | python | train |
pymupdf/PyMuPDF | fitz/fitz.py | https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L3586-L3593 | def rect(self):
"""Rectangle containing the annot"""
CheckParent(self)
val = _fitz.Annot_rect(self)
val = Rect(val)
return val | [
"def",
"rect",
"(",
"self",
")",
":",
"CheckParent",
"(",
"self",
")",
"val",
"=",
"_fitz",
".",
"Annot_rect",
"(",
"self",
")",
"val",
"=",
"Rect",
"(",
"val",
")",
"return",
"val"
] | Rectangle containing the annot | [
"Rectangle",
"containing",
"the",
"annot"
] | python | train |
openstack/networking-arista | networking_arista/ml2/mechanism_arista.py | https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L253-L287 | def update_port_postcommit(self, context):
"""Send port updates to CVX
This method is also responsible for the initial creation of ports
as we wait until after a port is bound to send the port data to CVX
"""
port = context.current
orig_port = context.original
network = context.network.current
log_context("update_port_postcommit: port", port)
log_context("update_port_postcommit: orig", orig_port)
tenant_id = port['project_id']
# Device id can change without a port going DOWN, but the new device
# id may not be supported
if orig_port and port['device_id'] != orig_port['device_id']:
self._delete_port_resources(orig_port, context.original_host)
if context.status == n_const.PORT_STATUS_DOWN:
if (context.original_host and
context.status != context.original_status):
self._delete_port_resources(orig_port, context.original_host)
self._try_to_release_dynamic_segment(context, migration=True)
else:
self.create_tenant(tenant_id)
self.create_network(network)
if context.binding_levels:
segments = [
level['bound_segment'] for level in context.binding_levels]
self.create_segments(segments)
self.create_instance(port)
self.create_port(port)
self.create_port_binding(port, context.host) | [
"def",
"update_port_postcommit",
"(",
"self",
",",
"context",
")",
":",
"port",
"=",
"context",
".",
"current",
"orig_port",
"=",
"context",
".",
"original",
"network",
"=",
"context",
".",
"network",
".",
"current",
"log_context",
"(",
"\"update_port_postcommit: port\"",
",",
"port",
")",
"log_context",
"(",
"\"update_port_postcommit: orig\"",
",",
"orig_port",
")",
"tenant_id",
"=",
"port",
"[",
"'project_id'",
"]",
"# Device id can change without a port going DOWN, but the new device",
"# id may not be supported",
"if",
"orig_port",
"and",
"port",
"[",
"'device_id'",
"]",
"!=",
"orig_port",
"[",
"'device_id'",
"]",
":",
"self",
".",
"_delete_port_resources",
"(",
"orig_port",
",",
"context",
".",
"original_host",
")",
"if",
"context",
".",
"status",
"==",
"n_const",
".",
"PORT_STATUS_DOWN",
":",
"if",
"(",
"context",
".",
"original_host",
"and",
"context",
".",
"status",
"!=",
"context",
".",
"original_status",
")",
":",
"self",
".",
"_delete_port_resources",
"(",
"orig_port",
",",
"context",
".",
"original_host",
")",
"self",
".",
"_try_to_release_dynamic_segment",
"(",
"context",
",",
"migration",
"=",
"True",
")",
"else",
":",
"self",
".",
"create_tenant",
"(",
"tenant_id",
")",
"self",
".",
"create_network",
"(",
"network",
")",
"if",
"context",
".",
"binding_levels",
":",
"segments",
"=",
"[",
"level",
"[",
"'bound_segment'",
"]",
"for",
"level",
"in",
"context",
".",
"binding_levels",
"]",
"self",
".",
"create_segments",
"(",
"segments",
")",
"self",
".",
"create_instance",
"(",
"port",
")",
"self",
".",
"create_port",
"(",
"port",
")",
"self",
".",
"create_port_binding",
"(",
"port",
",",
"context",
".",
"host",
")"
] | Send port updates to CVX
This method is also responsible for the initial creation of ports
as we wait until after a port is bound to send the port data to CVX | [
"Send",
"port",
"updates",
"to",
"CVX"
] | python | train |
twisted/mantissa | xmantissa/publicweb.py | https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/publicweb.py#L741-L753 | def child_(self, ctx):
"""
If the root resource is requested, return the primary
application's front page, if a primary application has been
chosen. Otherwise return 'self', since this page can render a
simple index.
"""
if self.frontPageItem.defaultApplication is None:
return self.webViewer.wrapModel(
_OfferingsFragment(self.frontPageItem))
else:
return SharingIndex(self.frontPageItem.defaultApplication.open(),
self.webViewer).locateChild(ctx, [''])[0] | [
"def",
"child_",
"(",
"self",
",",
"ctx",
")",
":",
"if",
"self",
".",
"frontPageItem",
".",
"defaultApplication",
"is",
"None",
":",
"return",
"self",
".",
"webViewer",
".",
"wrapModel",
"(",
"_OfferingsFragment",
"(",
"self",
".",
"frontPageItem",
")",
")",
"else",
":",
"return",
"SharingIndex",
"(",
"self",
".",
"frontPageItem",
".",
"defaultApplication",
".",
"open",
"(",
")",
",",
"self",
".",
"webViewer",
")",
".",
"locateChild",
"(",
"ctx",
",",
"[",
"''",
"]",
")",
"[",
"0",
"]"
] | If the root resource is requested, return the primary
application's front page, if a primary application has been
chosen. Otherwise return 'self', since this page can render a
simple index. | [
"If",
"the",
"root",
"resource",
"is",
"requested",
"return",
"the",
"primary",
"application",
"s",
"front",
"page",
"if",
"a",
"primary",
"application",
"has",
"been",
"chosen",
".",
"Otherwise",
"return",
"self",
"since",
"this",
"page",
"can",
"render",
"a",
"simple",
"index",
"."
] | python | train |
RJT1990/pyflux | pyflux/families/poisson.py | https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/poisson.py#L243-L265 | def setup():
""" Returns the attributes of this family
Notes
----------
- scale notes whether family has a variance parameter (sigma)
- shape notes whether family has a tail thickness parameter (nu)
- skewness notes whether family has a skewness parameter (gamma)
- mean_transform is a function which transforms the location parameter
- cythonized notes whether the family has cythonized routines
Returns
----------
- model name, link function, scale, shape, skewness, mean_transform, cythonized
"""
name = "Poisson"
link = np.exp
scale = False
shape = False
skewness = False
mean_transform = np.log
cythonized = True
return name, link, scale, shape, skewness, mean_transform, cythonized | [
"def",
"setup",
"(",
")",
":",
"name",
"=",
"\"Poisson\"",
"link",
"=",
"np",
".",
"exp",
"scale",
"=",
"False",
"shape",
"=",
"False",
"skewness",
"=",
"False",
"mean_transform",
"=",
"np",
".",
"log",
"cythonized",
"=",
"True",
"return",
"name",
",",
"link",
",",
"scale",
",",
"shape",
",",
"skewness",
",",
"mean_transform",
",",
"cythonized"
] | Returns the attributes of this family
Notes
----------
- scale notes whether family has a variance parameter (sigma)
- shape notes whether family has a tail thickness parameter (nu)
- skewness notes whether family has a skewness parameter (gamma)
- mean_transform is a function which transforms the location parameter
- cythonized notes whether the family has cythonized routines
Returns
----------
- model name, link function, scale, shape, skewness, mean_transform, cythonized | [
"Returns",
"the",
"attributes",
"of",
"this",
"family"
] | python | train |
pypa/pipenv | pipenv/vendor/pexpect/FSM.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pexpect/FSM.py#L228-L243 | def process (self, input_symbol):
'''This is the main method that you call to process input. This may
cause the FSM to change state and call an action. This method calls
get_transition() to find the action and next_state associated with the
input_symbol and current_state. If the action is None then the action
is not called and only the current state is changed. This method
processes one complete input symbol. You can process a list of symbols
(or a string) by calling process_list(). '''
self.input_symbol = input_symbol
(self.action, self.next_state) = self.get_transition (self.input_symbol, self.current_state)
if self.action is not None:
self.action (self)
self.current_state = self.next_state
self.next_state = None | [
"def",
"process",
"(",
"self",
",",
"input_symbol",
")",
":",
"self",
".",
"input_symbol",
"=",
"input_symbol",
"(",
"self",
".",
"action",
",",
"self",
".",
"next_state",
")",
"=",
"self",
".",
"get_transition",
"(",
"self",
".",
"input_symbol",
",",
"self",
".",
"current_state",
")",
"if",
"self",
".",
"action",
"is",
"not",
"None",
":",
"self",
".",
"action",
"(",
"self",
")",
"self",
".",
"current_state",
"=",
"self",
".",
"next_state",
"self",
".",
"next_state",
"=",
"None"
] | This is the main method that you call to process input. This may
cause the FSM to change state and call an action. This method calls
get_transition() to find the action and next_state associated with the
input_symbol and current_state. If the action is None then the action
is not called and only the current state is changed. This method
processes one complete input symbol. You can process a list of symbols
(or a string) by calling process_list(). | [
"This",
"is",
"the",
"main",
"method",
"that",
"you",
"call",
"to",
"process",
"input",
".",
"This",
"may",
"cause",
"the",
"FSM",
"to",
"change",
"state",
"and",
"call",
"an",
"action",
".",
"This",
"method",
"calls",
"get_transition",
"()",
"to",
"find",
"the",
"action",
"and",
"next_state",
"associated",
"with",
"the",
"input_symbol",
"and",
"current_state",
".",
"If",
"the",
"action",
"is",
"None",
"then",
"the",
"action",
"is",
"not",
"called",
"and",
"only",
"the",
"current",
"state",
"is",
"changed",
".",
"This",
"method",
"processes",
"one",
"complete",
"input",
"symbol",
".",
"You",
"can",
"process",
"a",
"list",
"of",
"symbols",
"(",
"or",
"a",
"string",
")",
"by",
"calling",
"process_list",
"()",
"."
] | python | train |
pygobject/pgi | pgi/debug.py | https://github.com/pygobject/pgi/blob/2090435df6241a15ec2a78379a36b738b728652c/pgi/debug.py#L11-L31 | def pprint(obj, file_=None):
"""Prints debug information for various public objects like methods,
functions, constructors etc.
"""
if file_ is None:
file_ = sys.stdout
# functions, methods
if callable(obj) and hasattr(obj, "_code"):
obj._code.pprint(file_)
return
# classes
if isinstance(obj, type) and hasattr(obj, "_constructors"):
constructors = obj._constructors
for names, func in sorted(constructors.items()):
func._code.pprint(file_)
return
raise TypeError("unkown type") | [
"def",
"pprint",
"(",
"obj",
",",
"file_",
"=",
"None",
")",
":",
"if",
"file_",
"is",
"None",
":",
"file_",
"=",
"sys",
".",
"stdout",
"# functions, methods",
"if",
"callable",
"(",
"obj",
")",
"and",
"hasattr",
"(",
"obj",
",",
"\"_code\"",
")",
":",
"obj",
".",
"_code",
".",
"pprint",
"(",
"file_",
")",
"return",
"# classes",
"if",
"isinstance",
"(",
"obj",
",",
"type",
")",
"and",
"hasattr",
"(",
"obj",
",",
"\"_constructors\"",
")",
":",
"constructors",
"=",
"obj",
".",
"_constructors",
"for",
"names",
",",
"func",
"in",
"sorted",
"(",
"constructors",
".",
"items",
"(",
")",
")",
":",
"func",
".",
"_code",
".",
"pprint",
"(",
"file_",
")",
"return",
"raise",
"TypeError",
"(",
"\"unkown type\"",
")"
] | Prints debug information for various public objects like methods,
functions, constructors etc. | [
"Prints",
"debug",
"information",
"for",
"various",
"public",
"objects",
"like",
"methods",
"functions",
"constructors",
"etc",
"."
] | python | train |
singularityhub/singularity-python | singularity/analysis/utils.py | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/utils.py#L50-L60 | def update_dict(input_dict,key,value):
'''update_dict will update lists in a dictionary. If the key is not included,
if will add as new list. If it is, it will append.
:param input_dict: the dict to update
:param value: the value to update with
'''
if key in input_dict:
input_dict[key].append(value)
else:
input_dict[key] = [value]
return input_dict | [
"def",
"update_dict",
"(",
"input_dict",
",",
"key",
",",
"value",
")",
":",
"if",
"key",
"in",
"input_dict",
":",
"input_dict",
"[",
"key",
"]",
".",
"append",
"(",
"value",
")",
"else",
":",
"input_dict",
"[",
"key",
"]",
"=",
"[",
"value",
"]",
"return",
"input_dict"
] | update_dict will update lists in a dictionary. If the key is not included,
if will add as new list. If it is, it will append.
:param input_dict: the dict to update
:param value: the value to update with | [
"update_dict",
"will",
"update",
"lists",
"in",
"a",
"dictionary",
".",
"If",
"the",
"key",
"is",
"not",
"included",
"if",
"will",
"add",
"as",
"new",
"list",
".",
"If",
"it",
"is",
"it",
"will",
"append",
".",
":",
"param",
"input_dict",
":",
"the",
"dict",
"to",
"update",
":",
"param",
"value",
":",
"the",
"value",
"to",
"update",
"with"
] | python | train |
LonamiWebs/Telethon | telethon/client/dialogs.py | https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/client/dialogs.py#L100-L142 | def iter_dialogs(
self, limit=None, *, offset_date=None, offset_id=0,
offset_peer=types.InputPeerEmpty(), ignore_migrated=False
):
"""
Returns an iterator over the dialogs, yielding 'limit' at most.
Dialogs are the open "chats" or conversations with other people,
groups you have joined, or channels you are subscribed to.
Args:
limit (`int` | `None`):
How many dialogs to be retrieved as maximum. Can be set to
``None`` to retrieve all dialogs. Note that this may take
whole minutes if you have hundreds of dialogs, as Telegram
will tell the library to slow down through a
``FloodWaitError``.
offset_date (`datetime`, optional):
The offset date to be used.
offset_id (`int`, optional):
The message ID to be used as an offset.
offset_peer (:tl:`InputPeer`, optional):
The peer to be used as an offset.
ignore_migrated (`bool`, optional):
Whether :tl:`Chat` that have ``migrated_to`` a :tl:`Channel`
should be included or not. By default all the chats in your
dialogs are returned, but setting this to ``True`` will hide
them in the same way official applications do.
Yields:
Instances of `telethon.tl.custom.dialog.Dialog`.
"""
return _DialogsIter(
self,
limit,
offset_date=offset_date,
offset_id=offset_id,
offset_peer=offset_peer,
ignore_migrated=ignore_migrated
) | [
"def",
"iter_dialogs",
"(",
"self",
",",
"limit",
"=",
"None",
",",
"*",
",",
"offset_date",
"=",
"None",
",",
"offset_id",
"=",
"0",
",",
"offset_peer",
"=",
"types",
".",
"InputPeerEmpty",
"(",
")",
",",
"ignore_migrated",
"=",
"False",
")",
":",
"return",
"_DialogsIter",
"(",
"self",
",",
"limit",
",",
"offset_date",
"=",
"offset_date",
",",
"offset_id",
"=",
"offset_id",
",",
"offset_peer",
"=",
"offset_peer",
",",
"ignore_migrated",
"=",
"ignore_migrated",
")"
] | Returns an iterator over the dialogs, yielding 'limit' at most.
Dialogs are the open "chats" or conversations with other people,
groups you have joined, or channels you are subscribed to.
Args:
limit (`int` | `None`):
How many dialogs to be retrieved as maximum. Can be set to
``None`` to retrieve all dialogs. Note that this may take
whole minutes if you have hundreds of dialogs, as Telegram
will tell the library to slow down through a
``FloodWaitError``.
offset_date (`datetime`, optional):
The offset date to be used.
offset_id (`int`, optional):
The message ID to be used as an offset.
offset_peer (:tl:`InputPeer`, optional):
The peer to be used as an offset.
ignore_migrated (`bool`, optional):
Whether :tl:`Chat` that have ``migrated_to`` a :tl:`Channel`
should be included or not. By default all the chats in your
dialogs are returned, but setting this to ``True`` will hide
them in the same way official applications do.
Yields:
Instances of `telethon.tl.custom.dialog.Dialog`. | [
"Returns",
"an",
"iterator",
"over",
"the",
"dialogs",
"yielding",
"limit",
"at",
"most",
".",
"Dialogs",
"are",
"the",
"open",
"chats",
"or",
"conversations",
"with",
"other",
"people",
"groups",
"you",
"have",
"joined",
"or",
"channels",
"you",
"are",
"subscribed",
"to",
"."
] | python | train |
dw/mitogen | mitogen/core.py | https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/core.py#L1954-L1959 | def readers(self):
"""
Return a list of `(fd, data)` tuples for every FD registered for
receive readiness.
"""
return list((fd, data) for fd, (data, gen) in self._rfds.items()) | [
"def",
"readers",
"(",
"self",
")",
":",
"return",
"list",
"(",
"(",
"fd",
",",
"data",
")",
"for",
"fd",
",",
"(",
"data",
",",
"gen",
")",
"in",
"self",
".",
"_rfds",
".",
"items",
"(",
")",
")"
] | Return a list of `(fd, data)` tuples for every FD registered for
receive readiness. | [
"Return",
"a",
"list",
"of",
"(",
"fd",
"data",
")",
"tuples",
"for",
"every",
"FD",
"registered",
"for",
"receive",
"readiness",
"."
] | python | train |
agoragames/haigha | haigha/transports/event_transport.py | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/transports/event_transport.py#L70-L99 | def read(self, timeout=None):
'''
Read from the transport. If no data is available, should return None.
The timeout is ignored as this returns only data that has already
been buffered locally.
'''
# NOTE: copying over this comment from Connection, because there is
# knowledge captured here, even if the details are stale
# Because of the timer callback to dataRead when we re-buffered,
# there's a chance that in between we've lost the socket. If that's
# the case, just silently return as some code elsewhere would have
# already notified us. That bug could be fixed by improving the
# message reading so that we consume all possible messages and ensure
# that only a partial message was rebuffered, so that we can rely on
# the next read event to read the subsequent message.
if not hasattr(self, '_sock'):
return None
# This is sort of a hack because we're faking that data is ready, but
# it works for purposes of supporting timeouts
if timeout:
if self._heartbeat_timeout:
self._heartbeat_timeout.delete()
self._heartbeat_timeout = \
event.timeout(timeout, self._sock_read_cb, self._sock)
elif self._heartbeat_timeout:
self._heartbeat_timeout.delete()
self._heartbeat_timeout = None
return self._sock.read() | [
"def",
"read",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"# NOTE: copying over this comment from Connection, because there is",
"# knowledge captured here, even if the details are stale",
"# Because of the timer callback to dataRead when we re-buffered,",
"# there's a chance that in between we've lost the socket. If that's",
"# the case, just silently return as some code elsewhere would have",
"# already notified us. That bug could be fixed by improving the",
"# message reading so that we consume all possible messages and ensure",
"# that only a partial message was rebuffered, so that we can rely on",
"# the next read event to read the subsequent message.",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_sock'",
")",
":",
"return",
"None",
"# This is sort of a hack because we're faking that data is ready, but",
"# it works for purposes of supporting timeouts",
"if",
"timeout",
":",
"if",
"self",
".",
"_heartbeat_timeout",
":",
"self",
".",
"_heartbeat_timeout",
".",
"delete",
"(",
")",
"self",
".",
"_heartbeat_timeout",
"=",
"event",
".",
"timeout",
"(",
"timeout",
",",
"self",
".",
"_sock_read_cb",
",",
"self",
".",
"_sock",
")",
"elif",
"self",
".",
"_heartbeat_timeout",
":",
"self",
".",
"_heartbeat_timeout",
".",
"delete",
"(",
")",
"self",
".",
"_heartbeat_timeout",
"=",
"None",
"return",
"self",
".",
"_sock",
".",
"read",
"(",
")"
] | Read from the transport. If no data is available, should return None.
The timeout is ignored as this returns only data that has already
been buffered locally. | [
"Read",
"from",
"the",
"transport",
".",
"If",
"no",
"data",
"is",
"available",
"should",
"return",
"None",
".",
"The",
"timeout",
"is",
"ignored",
"as",
"this",
"returns",
"only",
"data",
"that",
"has",
"already",
"been",
"buffered",
"locally",
"."
] | python | train |
tadashi-aikawa/owlmixin | owlmixin/owlcollections.py | https://github.com/tadashi-aikawa/owlmixin/blob/7c4a042c3008abddc56a8e8e55ae930d276071f5/owlmixin/owlcollections.py#L417-L428 | def map(self, func):
"""
:param func:
:type func: (K, T) -> U
:rtype: TList[U]
Usage:
>>> sorted(TDict(k1=1, k2=2, k3=3).map(lambda k, v: v*2))
[2, 4, 6]
"""
return TList([func(k, v) for k, v in self.items()]) | [
"def",
"map",
"(",
"self",
",",
"func",
")",
":",
"return",
"TList",
"(",
"[",
"func",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"items",
"(",
")",
"]",
")"
] | :param func:
:type func: (K, T) -> U
:rtype: TList[U]
Usage:
>>> sorted(TDict(k1=1, k2=2, k3=3).map(lambda k, v: v*2))
[2, 4, 6] | [
":",
"param",
"func",
":",
":",
"type",
"func",
":",
"(",
"K",
"T",
")",
"-",
">",
"U",
":",
"rtype",
":",
"TList",
"[",
"U",
"]"
] | python | train |
fracpete/python-weka-wrapper | python/weka/core/tokenizers.py | https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/core/tokenizers.py#L42-L52 | def next(self):
"""
Reads the next dataset row.
:return: the next row
:rtype: Instance
"""
if not self.__has_more():
raise StopIteration()
else:
return javabridge.get_env().get_string(self.__next()) | [
"def",
"next",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"__has_more",
"(",
")",
":",
"raise",
"StopIteration",
"(",
")",
"else",
":",
"return",
"javabridge",
".",
"get_env",
"(",
")",
".",
"get_string",
"(",
"self",
".",
"__next",
"(",
")",
")"
] | Reads the next dataset row.
:return: the next row
:rtype: Instance | [
"Reads",
"the",
"next",
"dataset",
"row",
"."
] | python | train |
dailymuse/oz | oz/bandit/actions.py | https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/bandit/actions.py#L16-L19 | def archive_experiment(experiment):
"""Archives an experiment"""
redis = oz.redis.create_connection()
oz.bandit.Experiment(redis, experiment).archive() | [
"def",
"archive_experiment",
"(",
"experiment",
")",
":",
"redis",
"=",
"oz",
".",
"redis",
".",
"create_connection",
"(",
")",
"oz",
".",
"bandit",
".",
"Experiment",
"(",
"redis",
",",
"experiment",
")",
".",
"archive",
"(",
")"
] | Archives an experiment | [
"Archives",
"an",
"experiment"
] | python | train |
casacore/python-casacore | casacore/fitting/fitting.py | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/fitting/fitting.py#L76-L100 | def init(self, n=0, ftype="real", colfac=1.0e-8, lmfac=1.0e-3, fid=0):
"""Set selected properties of the fitserver instance.
Like in the constructor, the number of unknowns to be solved for;
the number of simultaneous solutions; the ftype and the collinearity
and Levenberg-Marquardt factor can be specified. Individual values can
be overwritten with the :meth:`set` function.
:param n: number of unknowns
:param ftype: type of solution
Allowed: real, complex, separable, asreal, conjugate
:param colfac: collinearity factor
:param lmfac: Levenberg-Marquardt factor
:param fid: the id of a sub-fitter
"""
ftype = self._gettype(ftype)
self._fitids[fid]["stat"] = False
self._fitids[fid]["solved"] = False
self._fitids[fid]["haserr"] = False
self._fitids[fid]["fit"] = False
self._fitids[fid]["looped"] = False
if self._fitproxy.init(fid, n, ftype, colfac, lmfac):
self._fitids[fid]["stat"] = self._getstate(fid)
else:
return False | [
"def",
"init",
"(",
"self",
",",
"n",
"=",
"0",
",",
"ftype",
"=",
"\"real\"",
",",
"colfac",
"=",
"1.0e-8",
",",
"lmfac",
"=",
"1.0e-3",
",",
"fid",
"=",
"0",
")",
":",
"ftype",
"=",
"self",
".",
"_gettype",
"(",
"ftype",
")",
"self",
".",
"_fitids",
"[",
"fid",
"]",
"[",
"\"stat\"",
"]",
"=",
"False",
"self",
".",
"_fitids",
"[",
"fid",
"]",
"[",
"\"solved\"",
"]",
"=",
"False",
"self",
".",
"_fitids",
"[",
"fid",
"]",
"[",
"\"haserr\"",
"]",
"=",
"False",
"self",
".",
"_fitids",
"[",
"fid",
"]",
"[",
"\"fit\"",
"]",
"=",
"False",
"self",
".",
"_fitids",
"[",
"fid",
"]",
"[",
"\"looped\"",
"]",
"=",
"False",
"if",
"self",
".",
"_fitproxy",
".",
"init",
"(",
"fid",
",",
"n",
",",
"ftype",
",",
"colfac",
",",
"lmfac",
")",
":",
"self",
".",
"_fitids",
"[",
"fid",
"]",
"[",
"\"stat\"",
"]",
"=",
"self",
".",
"_getstate",
"(",
"fid",
")",
"else",
":",
"return",
"False"
] | Set selected properties of the fitserver instance.
Like in the constructor, the number of unknowns to be solved for;
the number of simultaneous solutions; the ftype and the collinearity
and Levenberg-Marquardt factor can be specified. Individual values can
be overwritten with the :meth:`set` function.
:param n: number of unknowns
:param ftype: type of solution
Allowed: real, complex, separable, asreal, conjugate
:param colfac: collinearity factor
:param lmfac: Levenberg-Marquardt factor
:param fid: the id of a sub-fitter | [
"Set",
"selected",
"properties",
"of",
"the",
"fitserver",
"instance",
"."
] | python | train |
galaxyproject/pulsar | pulsar/client/staging/up.py | https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/client/staging/up.py#L360-L376 | def find_referenced_subfiles(self, directory):
"""
Return list of files below specified `directory` in job inputs. Could
use more sophisticated logic (match quotes to handle spaces, handle
subdirectories, etc...).
**Parameters**
directory : str
Full path to directory to search.
"""
if directory is None:
return []
pattern = r'''[\'\"]?(%s%s[^\s\'\"]+)[\'\"]?''' % (escape(directory), escape(sep))
return self.find_pattern_references(pattern) | [
"def",
"find_referenced_subfiles",
"(",
"self",
",",
"directory",
")",
":",
"if",
"directory",
"is",
"None",
":",
"return",
"[",
"]",
"pattern",
"=",
"r'''[\\'\\\"]?(%s%s[^\\s\\'\\\"]+)[\\'\\\"]?'''",
"%",
"(",
"escape",
"(",
"directory",
")",
",",
"escape",
"(",
"sep",
")",
")",
"return",
"self",
".",
"find_pattern_references",
"(",
"pattern",
")"
] | Return list of files below specified `directory` in job inputs. Could
use more sophisticated logic (match quotes to handle spaces, handle
subdirectories, etc...).
**Parameters**
directory : str
Full path to directory to search. | [
"Return",
"list",
"of",
"files",
"below",
"specified",
"directory",
"in",
"job",
"inputs",
".",
"Could",
"use",
"more",
"sophisticated",
"logic",
"(",
"match",
"quotes",
"to",
"handle",
"spaces",
"handle",
"subdirectories",
"etc",
"...",
")",
"."
] | python | train |
tensorflow/probability | tensorflow_probability/python/mcmc/transformed_kernel.py | https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/transformed_kernel.py#L67-L74 | def inverse_transform_fn(bijector):
"""Makes a function which applies a list of Bijectors' `inverse`s."""
if not mcmc_util.is_list_like(bijector):
bijector = [bijector]
def fn(state_parts):
return [b.inverse(sp)
for b, sp in zip(bijector, state_parts)]
return fn | [
"def",
"inverse_transform_fn",
"(",
"bijector",
")",
":",
"if",
"not",
"mcmc_util",
".",
"is_list_like",
"(",
"bijector",
")",
":",
"bijector",
"=",
"[",
"bijector",
"]",
"def",
"fn",
"(",
"state_parts",
")",
":",
"return",
"[",
"b",
".",
"inverse",
"(",
"sp",
")",
"for",
"b",
",",
"sp",
"in",
"zip",
"(",
"bijector",
",",
"state_parts",
")",
"]",
"return",
"fn"
] | Makes a function which applies a list of Bijectors' `inverse`s. | [
"Makes",
"a",
"function",
"which",
"applies",
"a",
"list",
"of",
"Bijectors",
"inverse",
"s",
"."
] | python | test |
jurismarches/chopper | chopper/extractor.py | https://github.com/jurismarches/chopper/blob/53c5489a53e3a5d205a5cb207df751c09633e7ce/chopper/extractor.py#L58-L110 | def extract(self, html_contents, css_contents=None, base_url=None):
"""
Extracts the cleaned html tree as a string and only
css rules matching the cleaned html tree
:param html_contents: The HTML contents to parse
:type html_contents: str
:param css_contents: The CSS contents to parse
:type css_contents: str
:param base_url: The base page URL to use for relative to absolute links
:type base_url: str
:returns: cleaned HTML contents, cleaned CSS contents
:rtype: str or tuple
"""
# Clean HTML
html_extractor = self.html_extractor(
html_contents, self._xpaths_to_keep, self._xpaths_to_discard)
has_matches = html_extractor.parse()
if has_matches:
# Relative to absolute URLs
if base_url is not None:
html_extractor.rel_to_abs(base_url)
# Convert ElementTree to string
cleaned_html = html_extractor.to_string()
else:
cleaned_html = None
# Clean CSS
if css_contents is not None:
if cleaned_html is not None:
css_extractor = self.css_extractor(css_contents, cleaned_html)
css_extractor.parse()
# Relative to absolute URLs
if base_url is not None:
css_extractor.rel_to_abs(base_url)
cleaned_css = css_extractor.to_string()
else:
cleaned_css = None
else:
return cleaned_html
return (cleaned_html, cleaned_css) | [
"def",
"extract",
"(",
"self",
",",
"html_contents",
",",
"css_contents",
"=",
"None",
",",
"base_url",
"=",
"None",
")",
":",
"# Clean HTML",
"html_extractor",
"=",
"self",
".",
"html_extractor",
"(",
"html_contents",
",",
"self",
".",
"_xpaths_to_keep",
",",
"self",
".",
"_xpaths_to_discard",
")",
"has_matches",
"=",
"html_extractor",
".",
"parse",
"(",
")",
"if",
"has_matches",
":",
"# Relative to absolute URLs",
"if",
"base_url",
"is",
"not",
"None",
":",
"html_extractor",
".",
"rel_to_abs",
"(",
"base_url",
")",
"# Convert ElementTree to string",
"cleaned_html",
"=",
"html_extractor",
".",
"to_string",
"(",
")",
"else",
":",
"cleaned_html",
"=",
"None",
"# Clean CSS",
"if",
"css_contents",
"is",
"not",
"None",
":",
"if",
"cleaned_html",
"is",
"not",
"None",
":",
"css_extractor",
"=",
"self",
".",
"css_extractor",
"(",
"css_contents",
",",
"cleaned_html",
")",
"css_extractor",
".",
"parse",
"(",
")",
"# Relative to absolute URLs",
"if",
"base_url",
"is",
"not",
"None",
":",
"css_extractor",
".",
"rel_to_abs",
"(",
"base_url",
")",
"cleaned_css",
"=",
"css_extractor",
".",
"to_string",
"(",
")",
"else",
":",
"cleaned_css",
"=",
"None",
"else",
":",
"return",
"cleaned_html",
"return",
"(",
"cleaned_html",
",",
"cleaned_css",
")"
] | Extracts the cleaned html tree as a string and only
css rules matching the cleaned html tree
:param html_contents: The HTML contents to parse
:type html_contents: str
:param css_contents: The CSS contents to parse
:type css_contents: str
:param base_url: The base page URL to use for relative to absolute links
:type base_url: str
:returns: cleaned HTML contents, cleaned CSS contents
:rtype: str or tuple | [
"Extracts",
"the",
"cleaned",
"html",
"tree",
"as",
"a",
"string",
"and",
"only",
"css",
"rules",
"matching",
"the",
"cleaned",
"html",
"tree"
] | python | train |
a1ezzz/wasp-general | wasp_general/uri.py | https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/uri.py#L576-L594 | def validate(self, uri):
""" Check that an query part of an URI is compatible with this descriptor. Return True if the URI is
compatible.
:param uri: an URI to check
:return: bool
"""
if WURIComponentVerifier.validate(self, uri) is False:
return False
try:
WStrictURIQuery(
WURIQuery.parse(uri.component(self.component())),
*self.__specs,
extra_parameters=self.__extra_parameters
)
except ValueError:
return False
return True | [
"def",
"validate",
"(",
"self",
",",
"uri",
")",
":",
"if",
"WURIComponentVerifier",
".",
"validate",
"(",
"self",
",",
"uri",
")",
"is",
"False",
":",
"return",
"False",
"try",
":",
"WStrictURIQuery",
"(",
"WURIQuery",
".",
"parse",
"(",
"uri",
".",
"component",
"(",
"self",
".",
"component",
"(",
")",
")",
")",
",",
"*",
"self",
".",
"__specs",
",",
"extra_parameters",
"=",
"self",
".",
"__extra_parameters",
")",
"except",
"ValueError",
":",
"return",
"False",
"return",
"True"
] | Check that an query part of an URI is compatible with this descriptor. Return True if the URI is
compatible.
:param uri: an URI to check
:return: bool | [
"Check",
"that",
"an",
"query",
"part",
"of",
"an",
"URI",
"is",
"compatible",
"with",
"this",
"descriptor",
".",
"Return",
"True",
"if",
"the",
"URI",
"is",
"compatible",
"."
] | python | train |
klmitch/bark | bark/handlers.py | https://github.com/klmitch/bark/blob/6e0e002d55f01fee27e3e45bb86e30af1bfeef36/bark/handlers.py#L93-L107 | def boolean(text):
"""
An alternative to the "bool" argument type which interprets string
values.
"""
tmp = text.lower()
if tmp.isdigit():
return bool(int(tmp))
elif tmp in ('t', 'true', 'on', 'yes'):
return True
elif tmp in ('f', 'false', 'off', 'no'):
return False
raise ValueError("invalid Boolean value %r" % text) | [
"def",
"boolean",
"(",
"text",
")",
":",
"tmp",
"=",
"text",
".",
"lower",
"(",
")",
"if",
"tmp",
".",
"isdigit",
"(",
")",
":",
"return",
"bool",
"(",
"int",
"(",
"tmp",
")",
")",
"elif",
"tmp",
"in",
"(",
"'t'",
",",
"'true'",
",",
"'on'",
",",
"'yes'",
")",
":",
"return",
"True",
"elif",
"tmp",
"in",
"(",
"'f'",
",",
"'false'",
",",
"'off'",
",",
"'no'",
")",
":",
"return",
"False",
"raise",
"ValueError",
"(",
"\"invalid Boolean value %r\"",
"%",
"text",
")"
] | An alternative to the "bool" argument type which interprets string
values. | [
"An",
"alternative",
"to",
"the",
"bool",
"argument",
"type",
"which",
"interprets",
"string",
"values",
"."
] | python | train |
kgori/treeCl | treeCl/parutils.py | https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/parutils.py#L84-L115 | def parallel_map(client, task, args, message, batchsize=1, background=False, nargs=None):
"""
Helper to map a function over a sequence of inputs, in parallel, with progress meter.
:param client: IPython.parallel.Client instance
:param task: Function
:param args: Must be a list of tuples of arguments that the task function will be mapped onto.
If the function takes a single argument, it still must be a 1-tuple.
:param message: String for progress bar
:param batchsize: Jobs are shipped in batches of this size. Higher numbers mean less network traffic,
but longer execution time per job.
:return: IPython.parallel.AsyncMapResult
"""
show_progress = bool(message)
njobs = get_njobs(nargs, args)
nproc = len(client)
logger.debug('parallel_map: len(client) = {}'.format(len(client)))
view = client.load_balanced_view()
if show_progress:
message += ' (IP:{}w:{}b)'.format(nproc, batchsize)
pbar = setup_progressbar(message, njobs, simple_progress=True)
if not background:
pbar.start()
map_result = view.map(task, *list(zip(*args)), chunksize=batchsize)
if background:
return map_result, client
while not map_result.ready():
map_result.wait(1)
if show_progress:
pbar.update(min(njobs, map_result.progress * batchsize))
if show_progress:
pbar.finish()
return map_result | [
"def",
"parallel_map",
"(",
"client",
",",
"task",
",",
"args",
",",
"message",
",",
"batchsize",
"=",
"1",
",",
"background",
"=",
"False",
",",
"nargs",
"=",
"None",
")",
":",
"show_progress",
"=",
"bool",
"(",
"message",
")",
"njobs",
"=",
"get_njobs",
"(",
"nargs",
",",
"args",
")",
"nproc",
"=",
"len",
"(",
"client",
")",
"logger",
".",
"debug",
"(",
"'parallel_map: len(client) = {}'",
".",
"format",
"(",
"len",
"(",
"client",
")",
")",
")",
"view",
"=",
"client",
".",
"load_balanced_view",
"(",
")",
"if",
"show_progress",
":",
"message",
"+=",
"' (IP:{}w:{}b)'",
".",
"format",
"(",
"nproc",
",",
"batchsize",
")",
"pbar",
"=",
"setup_progressbar",
"(",
"message",
",",
"njobs",
",",
"simple_progress",
"=",
"True",
")",
"if",
"not",
"background",
":",
"pbar",
".",
"start",
"(",
")",
"map_result",
"=",
"view",
".",
"map",
"(",
"task",
",",
"*",
"list",
"(",
"zip",
"(",
"*",
"args",
")",
")",
",",
"chunksize",
"=",
"batchsize",
")",
"if",
"background",
":",
"return",
"map_result",
",",
"client",
"while",
"not",
"map_result",
".",
"ready",
"(",
")",
":",
"map_result",
".",
"wait",
"(",
"1",
")",
"if",
"show_progress",
":",
"pbar",
".",
"update",
"(",
"min",
"(",
"njobs",
",",
"map_result",
".",
"progress",
"*",
"batchsize",
")",
")",
"if",
"show_progress",
":",
"pbar",
".",
"finish",
"(",
")",
"return",
"map_result"
] | Helper to map a function over a sequence of inputs, in parallel, with progress meter.
:param client: IPython.parallel.Client instance
:param task: Function
:param args: Must be a list of tuples of arguments that the task function will be mapped onto.
If the function takes a single argument, it still must be a 1-tuple.
:param message: String for progress bar
:param batchsize: Jobs are shipped in batches of this size. Higher numbers mean less network traffic,
but longer execution time per job.
:return: IPython.parallel.AsyncMapResult | [
"Helper",
"to",
"map",
"a",
"function",
"over",
"a",
"sequence",
"of",
"inputs",
"in",
"parallel",
"with",
"progress",
"meter",
".",
":",
"param",
"client",
":",
"IPython",
".",
"parallel",
".",
"Client",
"instance",
":",
"param",
"task",
":",
"Function",
":",
"param",
"args",
":",
"Must",
"be",
"a",
"list",
"of",
"tuples",
"of",
"arguments",
"that",
"the",
"task",
"function",
"will",
"be",
"mapped",
"onto",
".",
"If",
"the",
"function",
"takes",
"a",
"single",
"argument",
"it",
"still",
"must",
"be",
"a",
"1",
"-",
"tuple",
".",
":",
"param",
"message",
":",
"String",
"for",
"progress",
"bar",
":",
"param",
"batchsize",
":",
"Jobs",
"are",
"shipped",
"in",
"batches",
"of",
"this",
"size",
".",
"Higher",
"numbers",
"mean",
"less",
"network",
"traffic",
"but",
"longer",
"execution",
"time",
"per",
"job",
".",
":",
"return",
":",
"IPython",
".",
"parallel",
".",
"AsyncMapResult"
] | python | train |
joshspeagle/dynesty | dynesty/utils.py | https://github.com/joshspeagle/dynesty/blob/9e482aafeb5cf84bedb896fa6f07a761d917983e/dynesty/utils.py#L150-L194 | def quantile(x, q, weights=None):
"""
Compute (weighted) quantiles from an input set of samples.
Parameters
----------
x : `~numpy.ndarray` with shape (nsamps,)
Input samples.
q : `~numpy.ndarray` with shape (nquantiles,)
The list of quantiles to compute from `[0., 1.]`.
weights : `~numpy.ndarray` with shape (nsamps,), optional
The associated weight from each sample.
Returns
-------
quantiles : `~numpy.ndarray` with shape (nquantiles,)
The weighted sample quantiles computed at `q`.
"""
# Initial check.
x = np.atleast_1d(x)
q = np.atleast_1d(q)
# Quantile check.
if np.any(q < 0.0) or np.any(q > 1.0):
raise ValueError("Quantiles must be between 0. and 1.")
if weights is None:
# If no weights provided, this simply calls `np.percentile`.
return np.percentile(x, list(100.0 * q))
else:
# If weights are provided, compute the weighted quantiles.
weights = np.atleast_1d(weights)
if len(x) != len(weights):
raise ValueError("Dimension mismatch: len(weights) != len(x).")
idx = np.argsort(x) # sort samples
sw = weights[idx] # sort weights
cdf = np.cumsum(sw)[:-1] # compute CDF
cdf /= cdf[-1] # normalize CDF
cdf = np.append(0, cdf) # ensure proper span
quantiles = np.interp(q, cdf, x[idx]).tolist()
return quantiles | [
"def",
"quantile",
"(",
"x",
",",
"q",
",",
"weights",
"=",
"None",
")",
":",
"# Initial check.",
"x",
"=",
"np",
".",
"atleast_1d",
"(",
"x",
")",
"q",
"=",
"np",
".",
"atleast_1d",
"(",
"q",
")",
"# Quantile check.",
"if",
"np",
".",
"any",
"(",
"q",
"<",
"0.0",
")",
"or",
"np",
".",
"any",
"(",
"q",
">",
"1.0",
")",
":",
"raise",
"ValueError",
"(",
"\"Quantiles must be between 0. and 1.\"",
")",
"if",
"weights",
"is",
"None",
":",
"# If no weights provided, this simply calls `np.percentile`.",
"return",
"np",
".",
"percentile",
"(",
"x",
",",
"list",
"(",
"100.0",
"*",
"q",
")",
")",
"else",
":",
"# If weights are provided, compute the weighted quantiles.",
"weights",
"=",
"np",
".",
"atleast_1d",
"(",
"weights",
")",
"if",
"len",
"(",
"x",
")",
"!=",
"len",
"(",
"weights",
")",
":",
"raise",
"ValueError",
"(",
"\"Dimension mismatch: len(weights) != len(x).\"",
")",
"idx",
"=",
"np",
".",
"argsort",
"(",
"x",
")",
"# sort samples",
"sw",
"=",
"weights",
"[",
"idx",
"]",
"# sort weights",
"cdf",
"=",
"np",
".",
"cumsum",
"(",
"sw",
")",
"[",
":",
"-",
"1",
"]",
"# compute CDF",
"cdf",
"/=",
"cdf",
"[",
"-",
"1",
"]",
"# normalize CDF",
"cdf",
"=",
"np",
".",
"append",
"(",
"0",
",",
"cdf",
")",
"# ensure proper span",
"quantiles",
"=",
"np",
".",
"interp",
"(",
"q",
",",
"cdf",
",",
"x",
"[",
"idx",
"]",
")",
".",
"tolist",
"(",
")",
"return",
"quantiles"
] | Compute (weighted) quantiles from an input set of samples.
Parameters
----------
x : `~numpy.ndarray` with shape (nsamps,)
Input samples.
q : `~numpy.ndarray` with shape (nquantiles,)
The list of quantiles to compute from `[0., 1.]`.
weights : `~numpy.ndarray` with shape (nsamps,), optional
The associated weight from each sample.
Returns
-------
quantiles : `~numpy.ndarray` with shape (nquantiles,)
The weighted sample quantiles computed at `q`. | [
"Compute",
"(",
"weighted",
")",
"quantiles",
"from",
"an",
"input",
"set",
"of",
"samples",
"."
] | python | train |
threeML/astromodels | astromodels/functions/template_model.py | https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/functions/template_model.py#L304-L407 | def _custom_init_(self, model_name, other_name=None,log_interp = True):
"""
Custom initialization for this model
:param model_name: the name of the model, corresponding to the root of the .h5 file in the data directory
:param other_name: (optional) the name to be used as name of the model when used in astromodels. If None
(default), use the same name as model_name
:return: none
"""
# Get the data directory
data_dir_path = get_user_data_path()
# Sanitize the data file
filename_sanitized = os.path.abspath(os.path.join(data_dir_path, '%s.h5' % model_name))
if not os.path.exists(filename_sanitized):
raise MissingDataFile("The data file %s does not exists. Did you use the "
"TemplateFactory?" % (filename_sanitized))
# Open the template definition and read from it
self._data_file = filename_sanitized
with HDFStore(filename_sanitized) as store:
self._data_frame = store['data_frame']
self._parameters_grids = collections.OrderedDict()
processed_parameters = 0
for key in store.keys():
match = re.search('p_([0-9]+)_(.+)', key)
if match is None:
continue
else:
tokens = match.groups()
this_parameter_number = int(tokens[0])
this_parameter_name = str(tokens[1])
assert this_parameter_number == processed_parameters, "Parameters out of order!"
self._parameters_grids[this_parameter_name] = store[key]
processed_parameters += 1
self._energies = store['energies']
# Now get the metadata
metadata = store.get_storer('data_frame').attrs.metadata
description = metadata['description']
name = metadata['name']
self._interpolation_degree = metadata['interpolation_degree']
self._spline_smoothing_factor = metadata['spline_smoothing_factor']
# Make the dictionary of parameters
function_definition = collections.OrderedDict()
function_definition['description'] = description
function_definition['latex'] = 'n.a.'
# Now build the parameters according to the content of the parameter grid
parameters = collections.OrderedDict()
parameters['K'] = Parameter('K', 1.0)
parameters['scale'] = Parameter('scale', 1.0)
for parameter_name in self._parameters_grids.keys():
grid = self._parameters_grids[parameter_name]
parameters[parameter_name] = Parameter(parameter_name, grid.median(),
min_value=grid.min(),
max_value=grid.max())
if other_name is None:
super(TemplateModel, self).__init__(name, function_definition, parameters)
else:
super(TemplateModel, self).__init__(other_name, function_definition, parameters)
# Finally prepare the interpolators
self._prepare_interpolators(log_interp) | [
"def",
"_custom_init_",
"(",
"self",
",",
"model_name",
",",
"other_name",
"=",
"None",
",",
"log_interp",
"=",
"True",
")",
":",
"# Get the data directory",
"data_dir_path",
"=",
"get_user_data_path",
"(",
")",
"# Sanitize the data file",
"filename_sanitized",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data_dir_path",
",",
"'%s.h5'",
"%",
"model_name",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"filename_sanitized",
")",
":",
"raise",
"MissingDataFile",
"(",
"\"The data file %s does not exists. Did you use the \"",
"\"TemplateFactory?\"",
"%",
"(",
"filename_sanitized",
")",
")",
"# Open the template definition and read from it",
"self",
".",
"_data_file",
"=",
"filename_sanitized",
"with",
"HDFStore",
"(",
"filename_sanitized",
")",
"as",
"store",
":",
"self",
".",
"_data_frame",
"=",
"store",
"[",
"'data_frame'",
"]",
"self",
".",
"_parameters_grids",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"processed_parameters",
"=",
"0",
"for",
"key",
"in",
"store",
".",
"keys",
"(",
")",
":",
"match",
"=",
"re",
".",
"search",
"(",
"'p_([0-9]+)_(.+)'",
",",
"key",
")",
"if",
"match",
"is",
"None",
":",
"continue",
"else",
":",
"tokens",
"=",
"match",
".",
"groups",
"(",
")",
"this_parameter_number",
"=",
"int",
"(",
"tokens",
"[",
"0",
"]",
")",
"this_parameter_name",
"=",
"str",
"(",
"tokens",
"[",
"1",
"]",
")",
"assert",
"this_parameter_number",
"==",
"processed_parameters",
",",
"\"Parameters out of order!\"",
"self",
".",
"_parameters_grids",
"[",
"this_parameter_name",
"]",
"=",
"store",
"[",
"key",
"]",
"processed_parameters",
"+=",
"1",
"self",
".",
"_energies",
"=",
"store",
"[",
"'energies'",
"]",
"# Now get the metadata",
"metadata",
"=",
"store",
".",
"get_storer",
"(",
"'data_frame'",
")",
".",
"attrs",
".",
"metadata",
"description",
"=",
"metadata",
"[",
"'description'",
"]",
"name",
"=",
"metadata",
"[",
"'name'",
"]",
"self",
".",
"_interpolation_degree",
"=",
"metadata",
"[",
"'interpolation_degree'",
"]",
"self",
".",
"_spline_smoothing_factor",
"=",
"metadata",
"[",
"'spline_smoothing_factor'",
"]",
"# Make the dictionary of parameters",
"function_definition",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"function_definition",
"[",
"'description'",
"]",
"=",
"description",
"function_definition",
"[",
"'latex'",
"]",
"=",
"'n.a.'",
"# Now build the parameters according to the content of the parameter grid",
"parameters",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"parameters",
"[",
"'K'",
"]",
"=",
"Parameter",
"(",
"'K'",
",",
"1.0",
")",
"parameters",
"[",
"'scale'",
"]",
"=",
"Parameter",
"(",
"'scale'",
",",
"1.0",
")",
"for",
"parameter_name",
"in",
"self",
".",
"_parameters_grids",
".",
"keys",
"(",
")",
":",
"grid",
"=",
"self",
".",
"_parameters_grids",
"[",
"parameter_name",
"]",
"parameters",
"[",
"parameter_name",
"]",
"=",
"Parameter",
"(",
"parameter_name",
",",
"grid",
".",
"median",
"(",
")",
",",
"min_value",
"=",
"grid",
".",
"min",
"(",
")",
",",
"max_value",
"=",
"grid",
".",
"max",
"(",
")",
")",
"if",
"other_name",
"is",
"None",
":",
"super",
"(",
"TemplateModel",
",",
"self",
")",
".",
"__init__",
"(",
"name",
",",
"function_definition",
",",
"parameters",
")",
"else",
":",
"super",
"(",
"TemplateModel",
",",
"self",
")",
".",
"__init__",
"(",
"other_name",
",",
"function_definition",
",",
"parameters",
")",
"# Finally prepare the interpolators",
"self",
".",
"_prepare_interpolators",
"(",
"log_interp",
")"
] | Custom initialization for this model
:param model_name: the name of the model, corresponding to the root of the .h5 file in the data directory
:param other_name: (optional) the name to be used as name of the model when used in astromodels. If None
(default), use the same name as model_name
:return: none | [
"Custom",
"initialization",
"for",
"this",
"model",
":",
"param",
"model_name",
":",
"the",
"name",
"of",
"the",
"model",
"corresponding",
"to",
"the",
"root",
"of",
"the",
".",
"h5",
"file",
"in",
"the",
"data",
"directory",
":",
"param",
"other_name",
":",
"(",
"optional",
")",
"the",
"name",
"to",
"be",
"used",
"as",
"name",
"of",
"the",
"model",
"when",
"used",
"in",
"astromodels",
".",
"If",
"None",
"(",
"default",
")",
"use",
"the",
"same",
"name",
"as",
"model_name",
":",
"return",
":",
"none"
] | python | train |
johnnoone/json-spec | src/jsonspec/validators/formats.py | https://github.com/johnnoone/json-spec/blob/f91981724cea0c366bd42a6670eb07bbe31c0e0c/src/jsonspec/validators/formats.py#L112-L143 | def register(func=None, name=None):
"""
Expose compiler to factory.
:param func: the callable to expose
:type func: callable
:param name: name of format
:type name: str
It can be used as a decorator::
@register(name='my:validator')
def my_validator(obj):
if obj is True:
return obj
raise ValidationError('obj is not true')
or as a function::
def my_validator(obj):
if obj is True:
return obj
raise ValidationError('obj is not true')
@register(name='my:validator')
"""
if not name:
raise CompilationError('Name is required')
if not func:
return partial(register, name=name)
return FormatRegistry.register(name, func) | [
"def",
"register",
"(",
"func",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"if",
"not",
"name",
":",
"raise",
"CompilationError",
"(",
"'Name is required'",
")",
"if",
"not",
"func",
":",
"return",
"partial",
"(",
"register",
",",
"name",
"=",
"name",
")",
"return",
"FormatRegistry",
".",
"register",
"(",
"name",
",",
"func",
")"
] | Expose compiler to factory.
:param func: the callable to expose
:type func: callable
:param name: name of format
:type name: str
It can be used as a decorator::
@register(name='my:validator')
def my_validator(obj):
if obj is True:
return obj
raise ValidationError('obj is not true')
or as a function::
def my_validator(obj):
if obj is True:
return obj
raise ValidationError('obj is not true')
@register(name='my:validator') | [
"Expose",
"compiler",
"to",
"factory",
"."
] | python | train |
mryellow/maze_explorer | mazeexp/engine/player.py | https://github.com/mryellow/maze_explorer/blob/ab8a25ccd05105d2fe57e0213d690cfc07e45827/mazeexp/engine/player.py#L82-L95 | def update_rotation(self, dt, buttons):
"""
Updates rotation and impulse direction
"""
assert isinstance(buttons, dict)
ma = buttons['right'] - buttons['left']
if ma != 0:
self.stats['battery'] -= self.battery_use['angular']
self.rotation += ma * dt * self.angular_velocity
# Redirect velocity in new direction
a = math.radians(self.rotation)
self.impulse_dir = eu.Vector2(math.sin(a), math.cos(a)) | [
"def",
"update_rotation",
"(",
"self",
",",
"dt",
",",
"buttons",
")",
":",
"assert",
"isinstance",
"(",
"buttons",
",",
"dict",
")",
"ma",
"=",
"buttons",
"[",
"'right'",
"]",
"-",
"buttons",
"[",
"'left'",
"]",
"if",
"ma",
"!=",
"0",
":",
"self",
".",
"stats",
"[",
"'battery'",
"]",
"-=",
"self",
".",
"battery_use",
"[",
"'angular'",
"]",
"self",
".",
"rotation",
"+=",
"ma",
"*",
"dt",
"*",
"self",
".",
"angular_velocity",
"# Redirect velocity in new direction",
"a",
"=",
"math",
".",
"radians",
"(",
"self",
".",
"rotation",
")",
"self",
".",
"impulse_dir",
"=",
"eu",
".",
"Vector2",
"(",
"math",
".",
"sin",
"(",
"a",
")",
",",
"math",
".",
"cos",
"(",
"a",
")",
")"
] | Updates rotation and impulse direction | [
"Updates",
"rotation",
"and",
"impulse",
"direction"
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/nose/suite.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/nose/suite.py#L428-L453 | def ancestry(self, context):
"""Return the ancestry of the context (that is, all of the
packages and modules containing the context), in order of
descent with the outermost ancestor last.
This method is a generator.
"""
log.debug("get ancestry %s", context)
if context is None:
return
# Methods include reference to module they are defined in, we
# don't want that, instead want the module the class is in now
# (classes are re-ancestored elsewhere).
if hasattr(context, 'im_class'):
context = context.im_class
elif hasattr(context, '__self__'):
context = context.__self__.__class__
if hasattr(context, '__module__'):
ancestors = context.__module__.split('.')
elif hasattr(context, '__name__'):
ancestors = context.__name__.split('.')[:-1]
else:
raise TypeError("%s has no ancestors?" % context)
while ancestors:
log.debug(" %s ancestors %s", context, ancestors)
yield resolve_name('.'.join(ancestors))
ancestors.pop() | [
"def",
"ancestry",
"(",
"self",
",",
"context",
")",
":",
"log",
".",
"debug",
"(",
"\"get ancestry %s\"",
",",
"context",
")",
"if",
"context",
"is",
"None",
":",
"return",
"# Methods include reference to module they are defined in, we",
"# don't want that, instead want the module the class is in now",
"# (classes are re-ancestored elsewhere).",
"if",
"hasattr",
"(",
"context",
",",
"'im_class'",
")",
":",
"context",
"=",
"context",
".",
"im_class",
"elif",
"hasattr",
"(",
"context",
",",
"'__self__'",
")",
":",
"context",
"=",
"context",
".",
"__self__",
".",
"__class__",
"if",
"hasattr",
"(",
"context",
",",
"'__module__'",
")",
":",
"ancestors",
"=",
"context",
".",
"__module__",
".",
"split",
"(",
"'.'",
")",
"elif",
"hasattr",
"(",
"context",
",",
"'__name__'",
")",
":",
"ancestors",
"=",
"context",
".",
"__name__",
".",
"split",
"(",
"'.'",
")",
"[",
":",
"-",
"1",
"]",
"else",
":",
"raise",
"TypeError",
"(",
"\"%s has no ancestors?\"",
"%",
"context",
")",
"while",
"ancestors",
":",
"log",
".",
"debug",
"(",
"\" %s ancestors %s\"",
",",
"context",
",",
"ancestors",
")",
"yield",
"resolve_name",
"(",
"'.'",
".",
"join",
"(",
"ancestors",
")",
")",
"ancestors",
".",
"pop",
"(",
")"
] | Return the ancestry of the context (that is, all of the
packages and modules containing the context), in order of
descent with the outermost ancestor last.
This method is a generator. | [
"Return",
"the",
"ancestry",
"of",
"the",
"context",
"(",
"that",
"is",
"all",
"of",
"the",
"packages",
"and",
"modules",
"containing",
"the",
"context",
")",
"in",
"order",
"of",
"descent",
"with",
"the",
"outermost",
"ancestor",
"last",
".",
"This",
"method",
"is",
"a",
"generator",
"."
] | python | test |
cltl/KafNafParserPy | KafNafParserPy/coreference_data.py | https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/coreference_data.py#L194-L201 | def to_kaf(self):
"""
Converts the coreference layer to KAF
"""
if self.type == 'NAF':
for node_coref in self.__get_corefs_nodes():
node_coref.set('coid',node_coref.get('id'))
del node_coref.attrib['id'] | [
"def",
"to_kaf",
"(",
"self",
")",
":",
"if",
"self",
".",
"type",
"==",
"'NAF'",
":",
"for",
"node_coref",
"in",
"self",
".",
"__get_corefs_nodes",
"(",
")",
":",
"node_coref",
".",
"set",
"(",
"'coid'",
",",
"node_coref",
".",
"get",
"(",
"'id'",
")",
")",
"del",
"node_coref",
".",
"attrib",
"[",
"'id'",
"]"
] | Converts the coreference layer to KAF | [
"Converts",
"the",
"coreference",
"layer",
"to",
"KAF"
] | python | train |
tanghaibao/jcvi | jcvi/apps/grid.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/grid.py#L398-L477 | def run(args):
"""
%prog run command ::: file1 file2
Parallelize a set of commands on grid. The syntax is modeled after GNU
parallel <http://www.gnu.org/s/parallel/man.html#options>
{} - input line
{.} - input line without extension
{_} - input line first part
{/} - basename of input line
{/.} - basename of input line without extension
{/_} - basename of input line first part
{#} - sequence number of job to run
::: - Use arguments from the command line as input source instead of stdin
(standard input).
If file name is `t/example.tar.gz`, then,
{} is "t/example.tar.gz", {.} is "t/example.tar", {_} is "t/example"
{/} is "example.tar.gz", {/.} is "example.tar", {/_} is "example"
A few examples:
ls -1 *.fastq | %prog run process {} {.}.pdf # use stdin
%prog run process {} {.}.pdf ::: *fastq # use :::
%prog run "zcat {} > {.}" ::: *.gz # quote redirection
%prog run < commands.list # run a list of commands
"""
p = OptionParser(run.__doc__)
p.set_grid_opts()
p.set_params(prog="grid")
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(not p.print_help())
sep = ":::"
if sep in args:
sepidx = args.index(sep)
filenames = args[sepidx + 1:]
args = args[:sepidx]
if not filenames:
filenames = [""]
else:
filenames = sys.stdin if not sys.stdin.isatty() else [""]
cmd = " ".join(args)
cmds = [] if filenames else [(cmd, None)]
for i, filename in enumerate(filenames):
filename = filename.strip()
noextname = filename.rsplit(".", 1)[0]
prefix, basename = op.split(filename)
basenoextname = basename.rsplit(".", 1)[0]
basefirstname = basename.split(".")[0]
firstname = op.join(prefix, basefirstname)
ncmd = cmd
if "{" in ncmd:
ncmd = ncmd.replace("{}", filename)
else:
ncmd += " " + filename
ncmd = ncmd.replace("{.}", noextname)
ncmd = ncmd.replace("{_}", firstname)
ncmd = ncmd.replace("{/}", basename)
ncmd = ncmd.replace("{/.}", basenoextname)
ncmd = ncmd.replace("{/_}", basefirstname)
ncmd = ncmd.replace("{#}", str(i))
outfile = None
if ">" in ncmd:
ncmd, outfile = ncmd.split(">", 1)
ncmd, outfile = ncmd.strip(), outfile.strip()
ncmd = ncmd.strip()
cmds.append((ncmd, outfile))
for ncmd, outfile in cmds:
p = GridProcess(ncmd, outfile=outfile, extra_opts=opts.extra, grid_opts=opts)
p.start() | [
"def",
"run",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"run",
".",
"__doc__",
")",
"p",
".",
"set_grid_opts",
"(",
")",
"p",
".",
"set_params",
"(",
"prog",
"=",
"\"grid\"",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"==",
"0",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"sep",
"=",
"\":::\"",
"if",
"sep",
"in",
"args",
":",
"sepidx",
"=",
"args",
".",
"index",
"(",
"sep",
")",
"filenames",
"=",
"args",
"[",
"sepidx",
"+",
"1",
":",
"]",
"args",
"=",
"args",
"[",
":",
"sepidx",
"]",
"if",
"not",
"filenames",
":",
"filenames",
"=",
"[",
"\"\"",
"]",
"else",
":",
"filenames",
"=",
"sys",
".",
"stdin",
"if",
"not",
"sys",
".",
"stdin",
".",
"isatty",
"(",
")",
"else",
"[",
"\"\"",
"]",
"cmd",
"=",
"\" \"",
".",
"join",
"(",
"args",
")",
"cmds",
"=",
"[",
"]",
"if",
"filenames",
"else",
"[",
"(",
"cmd",
",",
"None",
")",
"]",
"for",
"i",
",",
"filename",
"in",
"enumerate",
"(",
"filenames",
")",
":",
"filename",
"=",
"filename",
".",
"strip",
"(",
")",
"noextname",
"=",
"filename",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"[",
"0",
"]",
"prefix",
",",
"basename",
"=",
"op",
".",
"split",
"(",
"filename",
")",
"basenoextname",
"=",
"basename",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"[",
"0",
"]",
"basefirstname",
"=",
"basename",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
"firstname",
"=",
"op",
".",
"join",
"(",
"prefix",
",",
"basefirstname",
")",
"ncmd",
"=",
"cmd",
"if",
"\"{\"",
"in",
"ncmd",
":",
"ncmd",
"=",
"ncmd",
".",
"replace",
"(",
"\"{}\"",
",",
"filename",
")",
"else",
":",
"ncmd",
"+=",
"\" \"",
"+",
"filename",
"ncmd",
"=",
"ncmd",
".",
"replace",
"(",
"\"{.}\"",
",",
"noextname",
")",
"ncmd",
"=",
"ncmd",
".",
"replace",
"(",
"\"{_}\"",
",",
"firstname",
")",
"ncmd",
"=",
"ncmd",
".",
"replace",
"(",
"\"{/}\"",
",",
"basename",
")",
"ncmd",
"=",
"ncmd",
".",
"replace",
"(",
"\"{/.}\"",
",",
"basenoextname",
")",
"ncmd",
"=",
"ncmd",
".",
"replace",
"(",
"\"{/_}\"",
",",
"basefirstname",
")",
"ncmd",
"=",
"ncmd",
".",
"replace",
"(",
"\"{#}\"",
",",
"str",
"(",
"i",
")",
")",
"outfile",
"=",
"None",
"if",
"\">\"",
"in",
"ncmd",
":",
"ncmd",
",",
"outfile",
"=",
"ncmd",
".",
"split",
"(",
"\">\"",
",",
"1",
")",
"ncmd",
",",
"outfile",
"=",
"ncmd",
".",
"strip",
"(",
")",
",",
"outfile",
".",
"strip",
"(",
")",
"ncmd",
"=",
"ncmd",
".",
"strip",
"(",
")",
"cmds",
".",
"append",
"(",
"(",
"ncmd",
",",
"outfile",
")",
")",
"for",
"ncmd",
",",
"outfile",
"in",
"cmds",
":",
"p",
"=",
"GridProcess",
"(",
"ncmd",
",",
"outfile",
"=",
"outfile",
",",
"extra_opts",
"=",
"opts",
".",
"extra",
",",
"grid_opts",
"=",
"opts",
")",
"p",
".",
"start",
"(",
")"
] | %prog run command ::: file1 file2
Parallelize a set of commands on grid. The syntax is modeled after GNU
parallel <http://www.gnu.org/s/parallel/man.html#options>
{} - input line
{.} - input line without extension
{_} - input line first part
{/} - basename of input line
{/.} - basename of input line without extension
{/_} - basename of input line first part
{#} - sequence number of job to run
::: - Use arguments from the command line as input source instead of stdin
(standard input).
If file name is `t/example.tar.gz`, then,
{} is "t/example.tar.gz", {.} is "t/example.tar", {_} is "t/example"
{/} is "example.tar.gz", {/.} is "example.tar", {/_} is "example"
A few examples:
ls -1 *.fastq | %prog run process {} {.}.pdf # use stdin
%prog run process {} {.}.pdf ::: *fastq # use :::
%prog run "zcat {} > {.}" ::: *.gz # quote redirection
%prog run < commands.list # run a list of commands | [
"%prog",
"run",
"command",
":::",
"file1",
"file2"
] | python | train |
wind-python/windpowerlib | windpowerlib/modelchain.py | https://github.com/wind-python/windpowerlib/blob/421b316139743311b7cb68a69f6b53d2665f7e23/windpowerlib/modelchain.py#L247-L317 | def wind_speed_hub(self, weather_df):
r"""
Calculates the wind speed at hub height.
The method specified by the parameter `wind_speed_model` is used.
Parameters
----------
weather_df : pandas.DataFrame
DataFrame with time series for wind speed `wind_speed` in m/s and
roughness length `roughness_length` in m.
The columns of the DataFrame are a MultiIndex where the first level
contains the variable name (e.g. wind_speed) and the second level
contains the height at which it applies (e.g. 10, if it was
measured at a height of 10 m). See documentation of
:func:`ModelChain.run_model` for an example on how to create the
weather_df DataFrame.
Returns
-------
wind_speed_hub : pandas.Series or numpy.array
Wind speed in m/s at hub height.
Notes
-----
If `weather_df` contains wind speeds at different heights the given
wind speed(s) closest to the hub height are used.
"""
if self.power_plant.hub_height in weather_df['wind_speed']:
wind_speed_hub = weather_df['wind_speed'][
self.power_plant.hub_height]
elif self.wind_speed_model == 'logarithmic':
logging.debug('Calculating wind speed using logarithmic wind '
'profile.')
closest_height = weather_df['wind_speed'].columns[
min(range(len(weather_df['wind_speed'].columns)),
key=lambda i: abs(weather_df['wind_speed'].columns[i] -
self.power_plant.hub_height))]
wind_speed_hub = wind_speed.logarithmic_profile(
weather_df['wind_speed'][closest_height], closest_height,
self.power_plant.hub_height,
weather_df['roughness_length'].iloc[:, 0],
self.obstacle_height)
elif self.wind_speed_model == 'hellman':
logging.debug('Calculating wind speed using hellman equation.')
closest_height = weather_df['wind_speed'].columns[
min(range(len(weather_df['wind_speed'].columns)),
key=lambda i: abs(weather_df['wind_speed'].columns[i] -
self.power_plant.hub_height))]
wind_speed_hub = wind_speed.hellman(
weather_df['wind_speed'][closest_height], closest_height,
self.power_plant.hub_height,
weather_df['roughness_length'].iloc[:, 0],
self.hellman_exp)
elif self.wind_speed_model == 'interpolation_extrapolation':
logging.debug('Calculating wind speed using linear inter- or '
'extrapolation.')
wind_speed_hub = tools.linear_interpolation_extrapolation(
weather_df['wind_speed'], self.power_plant.hub_height)
elif self.wind_speed_model == 'log_interpolation_extrapolation':
logging.debug('Calculating wind speed using logarithmic inter- or '
'extrapolation.')
wind_speed_hub = tools.logarithmic_interpolation_extrapolation(
weather_df['wind_speed'], self.power_plant.hub_height)
else:
raise ValueError("'{0}' is an invalid value. ".format(
self.wind_speed_model) + "`wind_speed_model` must be "
"'logarithmic', 'hellman', 'interpolation_extrapolation' " +
"or 'log_interpolation_extrapolation'.")
return wind_speed_hub | [
"def",
"wind_speed_hub",
"(",
"self",
",",
"weather_df",
")",
":",
"if",
"self",
".",
"power_plant",
".",
"hub_height",
"in",
"weather_df",
"[",
"'wind_speed'",
"]",
":",
"wind_speed_hub",
"=",
"weather_df",
"[",
"'wind_speed'",
"]",
"[",
"self",
".",
"power_plant",
".",
"hub_height",
"]",
"elif",
"self",
".",
"wind_speed_model",
"==",
"'logarithmic'",
":",
"logging",
".",
"debug",
"(",
"'Calculating wind speed using logarithmic wind '",
"'profile.'",
")",
"closest_height",
"=",
"weather_df",
"[",
"'wind_speed'",
"]",
".",
"columns",
"[",
"min",
"(",
"range",
"(",
"len",
"(",
"weather_df",
"[",
"'wind_speed'",
"]",
".",
"columns",
")",
")",
",",
"key",
"=",
"lambda",
"i",
":",
"abs",
"(",
"weather_df",
"[",
"'wind_speed'",
"]",
".",
"columns",
"[",
"i",
"]",
"-",
"self",
".",
"power_plant",
".",
"hub_height",
")",
")",
"]",
"wind_speed_hub",
"=",
"wind_speed",
".",
"logarithmic_profile",
"(",
"weather_df",
"[",
"'wind_speed'",
"]",
"[",
"closest_height",
"]",
",",
"closest_height",
",",
"self",
".",
"power_plant",
".",
"hub_height",
",",
"weather_df",
"[",
"'roughness_length'",
"]",
".",
"iloc",
"[",
":",
",",
"0",
"]",
",",
"self",
".",
"obstacle_height",
")",
"elif",
"self",
".",
"wind_speed_model",
"==",
"'hellman'",
":",
"logging",
".",
"debug",
"(",
"'Calculating wind speed using hellman equation.'",
")",
"closest_height",
"=",
"weather_df",
"[",
"'wind_speed'",
"]",
".",
"columns",
"[",
"min",
"(",
"range",
"(",
"len",
"(",
"weather_df",
"[",
"'wind_speed'",
"]",
".",
"columns",
")",
")",
",",
"key",
"=",
"lambda",
"i",
":",
"abs",
"(",
"weather_df",
"[",
"'wind_speed'",
"]",
".",
"columns",
"[",
"i",
"]",
"-",
"self",
".",
"power_plant",
".",
"hub_height",
")",
")",
"]",
"wind_speed_hub",
"=",
"wind_speed",
".",
"hellman",
"(",
"weather_df",
"[",
"'wind_speed'",
"]",
"[",
"closest_height",
"]",
",",
"closest_height",
",",
"self",
".",
"power_plant",
".",
"hub_height",
",",
"weather_df",
"[",
"'roughness_length'",
"]",
".",
"iloc",
"[",
":",
",",
"0",
"]",
",",
"self",
".",
"hellman_exp",
")",
"elif",
"self",
".",
"wind_speed_model",
"==",
"'interpolation_extrapolation'",
":",
"logging",
".",
"debug",
"(",
"'Calculating wind speed using linear inter- or '",
"'extrapolation.'",
")",
"wind_speed_hub",
"=",
"tools",
".",
"linear_interpolation_extrapolation",
"(",
"weather_df",
"[",
"'wind_speed'",
"]",
",",
"self",
".",
"power_plant",
".",
"hub_height",
")",
"elif",
"self",
".",
"wind_speed_model",
"==",
"'log_interpolation_extrapolation'",
":",
"logging",
".",
"debug",
"(",
"'Calculating wind speed using logarithmic inter- or '",
"'extrapolation.'",
")",
"wind_speed_hub",
"=",
"tools",
".",
"logarithmic_interpolation_extrapolation",
"(",
"weather_df",
"[",
"'wind_speed'",
"]",
",",
"self",
".",
"power_plant",
".",
"hub_height",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"'{0}' is an invalid value. \"",
".",
"format",
"(",
"self",
".",
"wind_speed_model",
")",
"+",
"\"`wind_speed_model` must be \"",
"\"'logarithmic', 'hellman', 'interpolation_extrapolation' \"",
"+",
"\"or 'log_interpolation_extrapolation'.\"",
")",
"return",
"wind_speed_hub"
] | r"""
Calculates the wind speed at hub height.
The method specified by the parameter `wind_speed_model` is used.
Parameters
----------
weather_df : pandas.DataFrame
DataFrame with time series for wind speed `wind_speed` in m/s and
roughness length `roughness_length` in m.
The columns of the DataFrame are a MultiIndex where the first level
contains the variable name (e.g. wind_speed) and the second level
contains the height at which it applies (e.g. 10, if it was
measured at a height of 10 m). See documentation of
:func:`ModelChain.run_model` for an example on how to create the
weather_df DataFrame.
Returns
-------
wind_speed_hub : pandas.Series or numpy.array
Wind speed in m/s at hub height.
Notes
-----
If `weather_df` contains wind speeds at different heights the given
wind speed(s) closest to the hub height are used. | [
"r",
"Calculates",
"the",
"wind",
"speed",
"at",
"hub",
"height",
"."
] | python | train |
splunk/splunk-sdk-python | examples/analytics/bottle.py | https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/examples/analytics/bottle.py#L1630-L1641 | def auth_basic(check, realm="private", text="Access denied"):
''' Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. '''
def decorator(func):
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
response.headers['WWW-Authenticate'] = 'Basic realm="%s"' % realm
return HTTPError(401, text)
return func(*a, **ka)
return wrapper
return decorator | [
"def",
"auth_basic",
"(",
"check",
",",
"realm",
"=",
"\"private\"",
",",
"text",
"=",
"\"Access denied\"",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"def",
"wrapper",
"(",
"*",
"a",
",",
"*",
"*",
"ka",
")",
":",
"user",
",",
"password",
"=",
"request",
".",
"auth",
"or",
"(",
"None",
",",
"None",
")",
"if",
"user",
"is",
"None",
"or",
"not",
"check",
"(",
"user",
",",
"password",
")",
":",
"response",
".",
"headers",
"[",
"'WWW-Authenticate'",
"]",
"=",
"'Basic realm=\"%s\"'",
"%",
"realm",
"return",
"HTTPError",
"(",
"401",
",",
"text",
")",
"return",
"func",
"(",
"*",
"a",
",",
"*",
"*",
"ka",
")",
"return",
"wrapper",
"return",
"decorator"
] | Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. | [
"Callback",
"decorator",
"to",
"require",
"HTTP",
"auth",
"(",
"basic",
")",
".",
"TODO",
":",
"Add",
"route",
"(",
"check_auth",
"=",
"...",
")",
"parameter",
"."
] | python | train |
ONSdigital/sdc-rabbit | sdc/rabbit/consumers.py | https://github.com/ONSdigital/sdc-rabbit/blob/985adfdb09cf1b263a1f311438baeb42cbcb503a/sdc/rabbit/consumers.py#L275-L282 | def reject_message(self, delivery_tag, requeue=False, **kwargs):
"""Reject the message delivery from RabbitMQ by sending a
Basic.Reject RPC method for the delivery tag.
:param int delivery_tag: The delivery tag from the Basic.Deliver frame
"""
logger.info('Rejecting message', delivery_tag=delivery_tag, **kwargs)
self._channel.basic_reject(delivery_tag, requeue=requeue) | [
"def",
"reject_message",
"(",
"self",
",",
"delivery_tag",
",",
"requeue",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"logger",
".",
"info",
"(",
"'Rejecting message'",
",",
"delivery_tag",
"=",
"delivery_tag",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"_channel",
".",
"basic_reject",
"(",
"delivery_tag",
",",
"requeue",
"=",
"requeue",
")"
] | Reject the message delivery from RabbitMQ by sending a
Basic.Reject RPC method for the delivery tag.
:param int delivery_tag: The delivery tag from the Basic.Deliver frame | [
"Reject",
"the",
"message",
"delivery",
"from",
"RabbitMQ",
"by",
"sending",
"a",
"Basic",
".",
"Reject",
"RPC",
"method",
"for",
"the",
"delivery",
"tag",
".",
":",
"param",
"int",
"delivery_tag",
":",
"The",
"delivery",
"tag",
"from",
"the",
"Basic",
".",
"Deliver",
"frame"
] | python | train |
PythonCharmers/python-future | src/future/backports/email/_header_value_parser.py | https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/email/_header_value_parser.py#L2259-L2287 | def get_group(value):
""" group = display-name ":" [group-list] ";" [CFWS]
"""
group = Group()
token, value = get_display_name(value)
if not value or value[0] != ':':
raise errors.HeaderParseError("expected ':' at end of group "
"display name but found '{}'".format(value))
group.append(token)
group.append(ValueTerminal(':', 'group-display-name-terminator'))
value = value[1:]
if value and value[0] == ';':
group.append(ValueTerminal(';', 'group-terminator'))
return group, value[1:]
token, value = get_group_list(value)
group.append(token)
if not value:
group.defects.append(errors.InvalidHeaderDefect(
"end of header in group"))
if value[0] != ';':
raise errors.HeaderParseError(
"expected ';' at end of group but found {}".format(value))
group.append(ValueTerminal(';', 'group-terminator'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
group.append(token)
return group, value | [
"def",
"get_group",
"(",
"value",
")",
":",
"group",
"=",
"Group",
"(",
")",
"token",
",",
"value",
"=",
"get_display_name",
"(",
"value",
")",
"if",
"not",
"value",
"or",
"value",
"[",
"0",
"]",
"!=",
"':'",
":",
"raise",
"errors",
".",
"HeaderParseError",
"(",
"\"expected ':' at end of group \"",
"\"display name but found '{}'\"",
".",
"format",
"(",
"value",
")",
")",
"group",
".",
"append",
"(",
"token",
")",
"group",
".",
"append",
"(",
"ValueTerminal",
"(",
"':'",
",",
"'group-display-name-terminator'",
")",
")",
"value",
"=",
"value",
"[",
"1",
":",
"]",
"if",
"value",
"and",
"value",
"[",
"0",
"]",
"==",
"';'",
":",
"group",
".",
"append",
"(",
"ValueTerminal",
"(",
"';'",
",",
"'group-terminator'",
")",
")",
"return",
"group",
",",
"value",
"[",
"1",
":",
"]",
"token",
",",
"value",
"=",
"get_group_list",
"(",
"value",
")",
"group",
".",
"append",
"(",
"token",
")",
"if",
"not",
"value",
":",
"group",
".",
"defects",
".",
"append",
"(",
"errors",
".",
"InvalidHeaderDefect",
"(",
"\"end of header in group\"",
")",
")",
"if",
"value",
"[",
"0",
"]",
"!=",
"';'",
":",
"raise",
"errors",
".",
"HeaderParseError",
"(",
"\"expected ';' at end of group but found {}\"",
".",
"format",
"(",
"value",
")",
")",
"group",
".",
"append",
"(",
"ValueTerminal",
"(",
"';'",
",",
"'group-terminator'",
")",
")",
"value",
"=",
"value",
"[",
"1",
":",
"]",
"if",
"value",
"and",
"value",
"[",
"0",
"]",
"in",
"CFWS_LEADER",
":",
"token",
",",
"value",
"=",
"get_cfws",
"(",
"value",
")",
"group",
".",
"append",
"(",
"token",
")",
"return",
"group",
",",
"value"
] | group = display-name ":" [group-list] ";" [CFWS] | [
"group",
"=",
"display",
"-",
"name",
":",
"[",
"group",
"-",
"list",
"]",
";",
"[",
"CFWS",
"]"
] | python | train |
tjomasc/snekbol | snekbol/document.py | https://github.com/tjomasc/snekbol/blob/0b491aa96e0b1bd09e6c80cfb43807dd8a876c83/snekbol/document.py#L270-L277 | def _get_triplet_value_list(self, graph, identity, rdf_type):
"""
Get a list of values from RDF triples when more than one may be present
"""
values = []
for elem in graph.objects(identity, rdf_type):
values.append(elem.toPython())
return values | [
"def",
"_get_triplet_value_list",
"(",
"self",
",",
"graph",
",",
"identity",
",",
"rdf_type",
")",
":",
"values",
"=",
"[",
"]",
"for",
"elem",
"in",
"graph",
".",
"objects",
"(",
"identity",
",",
"rdf_type",
")",
":",
"values",
".",
"append",
"(",
"elem",
".",
"toPython",
"(",
")",
")",
"return",
"values"
] | Get a list of values from RDF triples when more than one may be present | [
"Get",
"a",
"list",
"of",
"values",
"from",
"RDF",
"triples",
"when",
"more",
"than",
"one",
"may",
"be",
"present"
] | python | train |
raiden-network/raiden | raiden/utils/signing.py | https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/utils/signing.py#L9-L23 | def pack_data(abi_types, values) -> bytes:
"""Normalize data and pack them into a byte array"""
if len(abi_types) != len(values):
raise ValueError(
"Length mismatch between provided abi types and values. Got "
"{0} types and {1} values.".format(len(abi_types), len(values)),
)
normalized_values = map_abi_data([abi_address_to_hex], abi_types, values)
return decode_hex(''.join(
remove_0x_prefix(hex_encode_abi_type(abi_type, value))
for abi_type, value
in zip(abi_types, normalized_values)
)) | [
"def",
"pack_data",
"(",
"abi_types",
",",
"values",
")",
"->",
"bytes",
":",
"if",
"len",
"(",
"abi_types",
")",
"!=",
"len",
"(",
"values",
")",
":",
"raise",
"ValueError",
"(",
"\"Length mismatch between provided abi types and values. Got \"",
"\"{0} types and {1} values.\"",
".",
"format",
"(",
"len",
"(",
"abi_types",
")",
",",
"len",
"(",
"values",
")",
")",
",",
")",
"normalized_values",
"=",
"map_abi_data",
"(",
"[",
"abi_address_to_hex",
"]",
",",
"abi_types",
",",
"values",
")",
"return",
"decode_hex",
"(",
"''",
".",
"join",
"(",
"remove_0x_prefix",
"(",
"hex_encode_abi_type",
"(",
"abi_type",
",",
"value",
")",
")",
"for",
"abi_type",
",",
"value",
"in",
"zip",
"(",
"abi_types",
",",
"normalized_values",
")",
")",
")"
] | Normalize data and pack them into a byte array | [
"Normalize",
"data",
"and",
"pack",
"them",
"into",
"a",
"byte",
"array"
] | python | train |
qubell/contrib-python-qubell-client | qubell/api/private/organization.py | https://github.com/qubell/contrib-python-qubell-client/blob/4586ea11d5103c2ff9607d3ed922b5a0991b8845/qubell/api/private/organization.py#L309-L329 | def instance(self, id=None, application=None, name=None, revision=None, environment=None, parameters=None, submodules=None, destroyInterval=None):
""" Smart method. It does everything, to return Instance with given parameters within the application.
If instance found running and given parameters are actual: return it.
If instance found, but parameters differs - reconfigure instance with new parameters.
If instance not found: launch instance with given parameters.
Return: Instance object.
"""
instance = self.get_or_create_instance(id, application, revision, environment, name, parameters, submodules, destroyInterval)
reconfigure = False
# if found:
# if revision and revision is not found.revision:
# reconfigure = True
# if parameters and parameters is not found.parameters:
# reconfigure = True
# We need to reconfigure instance
if reconfigure:
instance.reconfigure(revision=revision, parameters=parameters)
return instance | [
"def",
"instance",
"(",
"self",
",",
"id",
"=",
"None",
",",
"application",
"=",
"None",
",",
"name",
"=",
"None",
",",
"revision",
"=",
"None",
",",
"environment",
"=",
"None",
",",
"parameters",
"=",
"None",
",",
"submodules",
"=",
"None",
",",
"destroyInterval",
"=",
"None",
")",
":",
"instance",
"=",
"self",
".",
"get_or_create_instance",
"(",
"id",
",",
"application",
",",
"revision",
",",
"environment",
",",
"name",
",",
"parameters",
",",
"submodules",
",",
"destroyInterval",
")",
"reconfigure",
"=",
"False",
"# if found:",
"# if revision and revision is not found.revision:",
"# reconfigure = True",
"# if parameters and parameters is not found.parameters:",
"# reconfigure = True",
"# We need to reconfigure instance",
"if",
"reconfigure",
":",
"instance",
".",
"reconfigure",
"(",
"revision",
"=",
"revision",
",",
"parameters",
"=",
"parameters",
")",
"return",
"instance"
] | Smart method. It does everything, to return Instance with given parameters within the application.
If instance found running and given parameters are actual: return it.
If instance found, but parameters differs - reconfigure instance with new parameters.
If instance not found: launch instance with given parameters.
Return: Instance object. | [
"Smart",
"method",
".",
"It",
"does",
"everything",
"to",
"return",
"Instance",
"with",
"given",
"parameters",
"within",
"the",
"application",
".",
"If",
"instance",
"found",
"running",
"and",
"given",
"parameters",
"are",
"actual",
":",
"return",
"it",
".",
"If",
"instance",
"found",
"but",
"parameters",
"differs",
"-",
"reconfigure",
"instance",
"with",
"new",
"parameters",
".",
"If",
"instance",
"not",
"found",
":",
"launch",
"instance",
"with",
"given",
"parameters",
".",
"Return",
":",
"Instance",
"object",
"."
] | python | train |
zhanglab/psamm | psamm/massconsistency.py | https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/massconsistency.py#L44-L71 | def is_consistent(database, solver, exchange=set(), zeromass=set()):
"""Try to assign a positive mass to each compound
Return True if successful. The masses are simply constrained by m_i > 1 and
finding a solution under these conditions proves that the database is mass
consistent.
"""
prob = solver.create_problem()
compound_set = _non_localized_compounds(database)
mass_compounds = compound_set.difference(zeromass)
# Define mass variables
m = prob.namespace(mass_compounds, lower=1)
prob.set_objective(m.sum(mass_compounds))
# Define constraints
massbalance_lhs = {reaction: 0 for reaction in database.reactions}
for (compound, reaction), value in iteritems(database.matrix):
if compound not in zeromass:
mass = m(compound.in_compartment(None))
massbalance_lhs[reaction] += mass * value
for reaction, lhs in iteritems(massbalance_lhs):
if reaction not in exchange:
prob.add_linear_constraints(lhs == 0)
result = prob.solve_unchecked(lp.ObjectiveSense.Minimize)
return result.success | [
"def",
"is_consistent",
"(",
"database",
",",
"solver",
",",
"exchange",
"=",
"set",
"(",
")",
",",
"zeromass",
"=",
"set",
"(",
")",
")",
":",
"prob",
"=",
"solver",
".",
"create_problem",
"(",
")",
"compound_set",
"=",
"_non_localized_compounds",
"(",
"database",
")",
"mass_compounds",
"=",
"compound_set",
".",
"difference",
"(",
"zeromass",
")",
"# Define mass variables",
"m",
"=",
"prob",
".",
"namespace",
"(",
"mass_compounds",
",",
"lower",
"=",
"1",
")",
"prob",
".",
"set_objective",
"(",
"m",
".",
"sum",
"(",
"mass_compounds",
")",
")",
"# Define constraints",
"massbalance_lhs",
"=",
"{",
"reaction",
":",
"0",
"for",
"reaction",
"in",
"database",
".",
"reactions",
"}",
"for",
"(",
"compound",
",",
"reaction",
")",
",",
"value",
"in",
"iteritems",
"(",
"database",
".",
"matrix",
")",
":",
"if",
"compound",
"not",
"in",
"zeromass",
":",
"mass",
"=",
"m",
"(",
"compound",
".",
"in_compartment",
"(",
"None",
")",
")",
"massbalance_lhs",
"[",
"reaction",
"]",
"+=",
"mass",
"*",
"value",
"for",
"reaction",
",",
"lhs",
"in",
"iteritems",
"(",
"massbalance_lhs",
")",
":",
"if",
"reaction",
"not",
"in",
"exchange",
":",
"prob",
".",
"add_linear_constraints",
"(",
"lhs",
"==",
"0",
")",
"result",
"=",
"prob",
".",
"solve_unchecked",
"(",
"lp",
".",
"ObjectiveSense",
".",
"Minimize",
")",
"return",
"result",
".",
"success"
] | Try to assign a positive mass to each compound
Return True if successful. The masses are simply constrained by m_i > 1 and
finding a solution under these conditions proves that the database is mass
consistent. | [
"Try",
"to",
"assign",
"a",
"positive",
"mass",
"to",
"each",
"compound"
] | python | train |
InformaticsMatters/pipelines-utils | src/python/pipelines_utils/utils.py | https://github.com/InformaticsMatters/pipelines-utils/blob/058aa6eceeff28c4ae402f6f58c58720bff0298e/src/python/pipelines_utils/utils.py#L30-L32 | def round_sig(x, sig):
"""Round the number to the specified number of significant figures"""
return round(x, sig - int(floor(log10(abs(x)))) - 1) | [
"def",
"round_sig",
"(",
"x",
",",
"sig",
")",
":",
"return",
"round",
"(",
"x",
",",
"sig",
"-",
"int",
"(",
"floor",
"(",
"log10",
"(",
"abs",
"(",
"x",
")",
")",
")",
")",
"-",
"1",
")"
] | Round the number to the specified number of significant figures | [
"Round",
"the",
"number",
"to",
"the",
"specified",
"number",
"of",
"significant",
"figures"
] | python | train |
elliterate/capybara.py | capybara/selector/selector.py | https://github.com/elliterate/capybara.py/blob/0c6ae449cc37e4445ec3cd6af95674533beedc6c/capybara/selector/selector.py#L141-L157 | def node_filter(self, name, **kwargs):
"""
Returns a decorator function for adding a node filter.
Args:
name (str): The name of the filter.
**kwargs: Variable keyword arguments for the filter.
Returns:
Callable[[Callable[[Element, Any], bool]]]: A decorator function for adding a node
filter.
"""
def decorator(func):
self.filters[name] = NodeFilter(name, func, **kwargs)
return decorator | [
"def",
"node_filter",
"(",
"self",
",",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"self",
".",
"filters",
"[",
"name",
"]",
"=",
"NodeFilter",
"(",
"name",
",",
"func",
",",
"*",
"*",
"kwargs",
")",
"return",
"decorator"
] | Returns a decorator function for adding a node filter.
Args:
name (str): The name of the filter.
**kwargs: Variable keyword arguments for the filter.
Returns:
Callable[[Callable[[Element, Any], bool]]]: A decorator function for adding a node
filter. | [
"Returns",
"a",
"decorator",
"function",
"for",
"adding",
"a",
"node",
"filter",
"."
] | python | test |
Arubacloud/pyArubaCloud | ArubaCloud/PyArubaAPI.py | https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/PyArubaAPI.py#L309-L323 | def get_package_id(self, name):
"""
Retrieve the smart package id given is English name
@param (str) name: the Aruba Smart package size name, ie: "small", "medium", "large", "extra large".
@return: The package id that depends on the Data center and the size choosen.
"""
json_scheme = self.gen_def_json_scheme('GetPreConfiguredPackages', dict(HypervisorType=4))
json_obj = self.call_method_post(method='GetPreConfiguredPackages ', json_scheme=json_scheme)
for package in json_obj['Value']:
packageId = package['PackageID']
for description in package['Descriptions']:
languageID = description['LanguageID']
packageName = description['Text']
if languageID == 2 and packageName.lower() == name.lower():
return packageId | [
"def",
"get_package_id",
"(",
"self",
",",
"name",
")",
":",
"json_scheme",
"=",
"self",
".",
"gen_def_json_scheme",
"(",
"'GetPreConfiguredPackages'",
",",
"dict",
"(",
"HypervisorType",
"=",
"4",
")",
")",
"json_obj",
"=",
"self",
".",
"call_method_post",
"(",
"method",
"=",
"'GetPreConfiguredPackages '",
",",
"json_scheme",
"=",
"json_scheme",
")",
"for",
"package",
"in",
"json_obj",
"[",
"'Value'",
"]",
":",
"packageId",
"=",
"package",
"[",
"'PackageID'",
"]",
"for",
"description",
"in",
"package",
"[",
"'Descriptions'",
"]",
":",
"languageID",
"=",
"description",
"[",
"'LanguageID'",
"]",
"packageName",
"=",
"description",
"[",
"'Text'",
"]",
"if",
"languageID",
"==",
"2",
"and",
"packageName",
".",
"lower",
"(",
")",
"==",
"name",
".",
"lower",
"(",
")",
":",
"return",
"packageId"
] | Retrieve the smart package id given is English name
@param (str) name: the Aruba Smart package size name, ie: "small", "medium", "large", "extra large".
@return: The package id that depends on the Data center and the size choosen. | [
"Retrieve",
"the",
"smart",
"package",
"id",
"given",
"is",
"English",
"name"
] | python | train |
DataBiosphere/toil | src/toil/common.py | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/common.py#L1337-L1349 | def getFileSystemSize(dirPath):
"""
Return the free space, and total size of the file system hosting `dirPath`.
:param str dirPath: A valid path to a directory.
:return: free space and total size of file system
:rtype: tuple
"""
assert os.path.exists(dirPath)
diskStats = os.statvfs(dirPath)
freeSpace = diskStats.f_frsize * diskStats.f_bavail
diskSize = diskStats.f_frsize * diskStats.f_blocks
return freeSpace, diskSize | [
"def",
"getFileSystemSize",
"(",
"dirPath",
")",
":",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"dirPath",
")",
"diskStats",
"=",
"os",
".",
"statvfs",
"(",
"dirPath",
")",
"freeSpace",
"=",
"diskStats",
".",
"f_frsize",
"*",
"diskStats",
".",
"f_bavail",
"diskSize",
"=",
"diskStats",
".",
"f_frsize",
"*",
"diskStats",
".",
"f_blocks",
"return",
"freeSpace",
",",
"diskSize"
] | Return the free space, and total size of the file system hosting `dirPath`.
:param str dirPath: A valid path to a directory.
:return: free space and total size of file system
:rtype: tuple | [
"Return",
"the",
"free",
"space",
"and",
"total",
"size",
"of",
"the",
"file",
"system",
"hosting",
"dirPath",
"."
] | python | train |
morse-talk/morse-talk | morse_talk/plot.py | https://github.com/morse-talk/morse-talk/blob/71e09ace0aa554d28cada5ee658e43758305b8fa/morse_talk/plot.py#L56-L71 | def plot(message, duration=1, ax=None):
"""
Plot a message
Returns: ax a Matplotlib Axe
"""
lst_bin = _encode_binary(message)
x, y = _create_x_y(lst_bin, duration)
ax = _create_ax(ax)
ax.plot(x, y, linewidth=2.0)
delta_y = 0.1
ax.set_ylim(-delta_y, 1 + delta_y)
ax.set_yticks([0, 1])
delta_x = 0.5 * duration
ax.set_xlim(-delta_x, len(lst_bin) * duration + delta_x)
return ax | [
"def",
"plot",
"(",
"message",
",",
"duration",
"=",
"1",
",",
"ax",
"=",
"None",
")",
":",
"lst_bin",
"=",
"_encode_binary",
"(",
"message",
")",
"x",
",",
"y",
"=",
"_create_x_y",
"(",
"lst_bin",
",",
"duration",
")",
"ax",
"=",
"_create_ax",
"(",
"ax",
")",
"ax",
".",
"plot",
"(",
"x",
",",
"y",
",",
"linewidth",
"=",
"2.0",
")",
"delta_y",
"=",
"0.1",
"ax",
".",
"set_ylim",
"(",
"-",
"delta_y",
",",
"1",
"+",
"delta_y",
")",
"ax",
".",
"set_yticks",
"(",
"[",
"0",
",",
"1",
"]",
")",
"delta_x",
"=",
"0.5",
"*",
"duration",
"ax",
".",
"set_xlim",
"(",
"-",
"delta_x",
",",
"len",
"(",
"lst_bin",
")",
"*",
"duration",
"+",
"delta_x",
")",
"return",
"ax"
] | Plot a message
Returns: ax a Matplotlib Axe | [
"Plot",
"a",
"message"
] | python | train |
MacHu-GWU/crawlib-project | crawlib/spider.py | https://github.com/MacHu-GWU/crawlib-project/blob/241516f2a7a0a32c692f7af35a1f44064e8ce1ab/crawlib/spider.py#L24-L114 | def execute_one_to_many_job(parent_class=None,
get_unfinished_kwargs=None,
get_unfinished_limit=None,
parser_func=None,
parser_func_kwargs=None,
build_url_func_kwargs=None,
downloader_func=None,
downloader_func_kwargs=None,
post_process_response_func=None,
post_process_response_func_kwargs=None,
process_item_func_kwargs=None,
logger=None,
sleep_time=None):
"""
A standard one-to-many crawling workflow.
:param parent_class:
:param get_unfinished_kwargs:
:param get_unfinished_limit:
:param parser_func: html parser function.
:param parser_func_kwargs: other keyword arguments for ``parser_func``
:param build_url_func_kwargs: other keyword arguments for
``parent_class().build_url(**build_url_func_kwargs)``
:param downloader_func: a function that taking ``url`` as first arg, make
http request and return response/html.
:param downloader_func_kwargs: other keyword arguments for ``downloader_func``
:param post_process_response_func: a callback function taking response/html
as first argument. You can put any logic in it. For example, you can
make it sleep if you detect that you got banned.
:param post_process_response_func_kwargs: other keyword arguments for
``post_process_response_func``
:param process_item_func_kwargs: other keyword arguments for
``ParseResult().process_item(**process_item_func_kwargs)``
:param logger:
:param sleep_time: default 0, wait time before making each request.
"""
# prepare arguments
get_unfinished_kwargs = prepare_kwargs(get_unfinished_kwargs)
parser_func_kwargs = prepare_kwargs(parser_func_kwargs)
build_url_func_kwargs = prepare_kwargs(build_url_func_kwargs)
downloader_func_kwargs = prepare_kwargs(downloader_func_kwargs)
post_process_response_func_kwargs = prepare_kwargs(
post_process_response_func_kwargs)
process_item_func_kwargs = prepare_kwargs(process_item_func_kwargs)
if post_process_response_func is None:
def post_process_response_func(response, **kwargs):
pass
if not isinstance(logger, SpiderLogger):
raise TypeError
if sleep_time is None:
sleep_time = 0
# do the real job
query_set = parent_class.get_all_unfinished(**get_unfinished_kwargs)
if get_unfinished_limit is not None:
query_set = query_set.limit(get_unfinished_limit)
todo = list(query_set)
logger.log_todo_volumn(todo)
for parent_instance in todo:
url = parent_instance.build_url(**build_url_func_kwargs)
logger.log_to_crawl_url(url)
logger.log_sleeper(sleep_time)
time.sleep(sleep_time)
try:
response_or_html = downloader_func(url, **downloader_func_kwargs)
if isinstance(response_or_html, string_types):
parser_func_kwargs["html"] = response_or_html
else:
parser_func_kwargs["response"] = response_or_html
post_process_response_func(
response_or_html, **post_process_response_func_kwargs)
except Exception as e:
logger.log_error(e)
continue
try:
parse_result = parser_func(
parent=parent_instance,
**parser_func_kwargs
)
parse_result.process_item(**process_item_func_kwargs)
logger.log_status(parse_result)
except Exception as e:
logger.log_error(e)
continue | [
"def",
"execute_one_to_many_job",
"(",
"parent_class",
"=",
"None",
",",
"get_unfinished_kwargs",
"=",
"None",
",",
"get_unfinished_limit",
"=",
"None",
",",
"parser_func",
"=",
"None",
",",
"parser_func_kwargs",
"=",
"None",
",",
"build_url_func_kwargs",
"=",
"None",
",",
"downloader_func",
"=",
"None",
",",
"downloader_func_kwargs",
"=",
"None",
",",
"post_process_response_func",
"=",
"None",
",",
"post_process_response_func_kwargs",
"=",
"None",
",",
"process_item_func_kwargs",
"=",
"None",
",",
"logger",
"=",
"None",
",",
"sleep_time",
"=",
"None",
")",
":",
"# prepare arguments",
"get_unfinished_kwargs",
"=",
"prepare_kwargs",
"(",
"get_unfinished_kwargs",
")",
"parser_func_kwargs",
"=",
"prepare_kwargs",
"(",
"parser_func_kwargs",
")",
"build_url_func_kwargs",
"=",
"prepare_kwargs",
"(",
"build_url_func_kwargs",
")",
"downloader_func_kwargs",
"=",
"prepare_kwargs",
"(",
"downloader_func_kwargs",
")",
"post_process_response_func_kwargs",
"=",
"prepare_kwargs",
"(",
"post_process_response_func_kwargs",
")",
"process_item_func_kwargs",
"=",
"prepare_kwargs",
"(",
"process_item_func_kwargs",
")",
"if",
"post_process_response_func",
"is",
"None",
":",
"def",
"post_process_response_func",
"(",
"response",
",",
"*",
"*",
"kwargs",
")",
":",
"pass",
"if",
"not",
"isinstance",
"(",
"logger",
",",
"SpiderLogger",
")",
":",
"raise",
"TypeError",
"if",
"sleep_time",
"is",
"None",
":",
"sleep_time",
"=",
"0",
"# do the real job",
"query_set",
"=",
"parent_class",
".",
"get_all_unfinished",
"(",
"*",
"*",
"get_unfinished_kwargs",
")",
"if",
"get_unfinished_limit",
"is",
"not",
"None",
":",
"query_set",
"=",
"query_set",
".",
"limit",
"(",
"get_unfinished_limit",
")",
"todo",
"=",
"list",
"(",
"query_set",
")",
"logger",
".",
"log_todo_volumn",
"(",
"todo",
")",
"for",
"parent_instance",
"in",
"todo",
":",
"url",
"=",
"parent_instance",
".",
"build_url",
"(",
"*",
"*",
"build_url_func_kwargs",
")",
"logger",
".",
"log_to_crawl_url",
"(",
"url",
")",
"logger",
".",
"log_sleeper",
"(",
"sleep_time",
")",
"time",
".",
"sleep",
"(",
"sleep_time",
")",
"try",
":",
"response_or_html",
"=",
"downloader_func",
"(",
"url",
",",
"*",
"*",
"downloader_func_kwargs",
")",
"if",
"isinstance",
"(",
"response_or_html",
",",
"string_types",
")",
":",
"parser_func_kwargs",
"[",
"\"html\"",
"]",
"=",
"response_or_html",
"else",
":",
"parser_func_kwargs",
"[",
"\"response\"",
"]",
"=",
"response_or_html",
"post_process_response_func",
"(",
"response_or_html",
",",
"*",
"*",
"post_process_response_func_kwargs",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"log_error",
"(",
"e",
")",
"continue",
"try",
":",
"parse_result",
"=",
"parser_func",
"(",
"parent",
"=",
"parent_instance",
",",
"*",
"*",
"parser_func_kwargs",
")",
"parse_result",
".",
"process_item",
"(",
"*",
"*",
"process_item_func_kwargs",
")",
"logger",
".",
"log_status",
"(",
"parse_result",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"log_error",
"(",
"e",
")",
"continue"
] | A standard one-to-many crawling workflow.
:param parent_class:
:param get_unfinished_kwargs:
:param get_unfinished_limit:
:param parser_func: html parser function.
:param parser_func_kwargs: other keyword arguments for ``parser_func``
:param build_url_func_kwargs: other keyword arguments for
``parent_class().build_url(**build_url_func_kwargs)``
:param downloader_func: a function that taking ``url`` as first arg, make
http request and return response/html.
:param downloader_func_kwargs: other keyword arguments for ``downloader_func``
:param post_process_response_func: a callback function taking response/html
as first argument. You can put any logic in it. For example, you can
make it sleep if you detect that you got banned.
:param post_process_response_func_kwargs: other keyword arguments for
``post_process_response_func``
:param process_item_func_kwargs: other keyword arguments for
``ParseResult().process_item(**process_item_func_kwargs)``
:param logger:
:param sleep_time: default 0, wait time before making each request. | [
"A",
"standard",
"one",
"-",
"to",
"-",
"many",
"crawling",
"workflow",
"."
] | python | train |
heikomuller/sco-datastore | scodata/image.py | https://github.com/heikomuller/sco-datastore/blob/7180a6b51150667e47629da566aedaa742e39342/scodata/image.py#L711-L738 | def to_dict(self, img_coll):
"""Create a Json-like dictionary for image group. Extends the basic
object with an array of image identifiers.
Parameters
----------
img_coll : ImageGroupHandle
Returns
-------
(JSON)
Json-like object, i.e., dictionary.
"""
# Get the basic Json object from the super class
json_obj = super(DefaultImageGroupManager, self).to_dict(img_coll)
# Add list of images as Json array
images = []
for img_group in img_coll.images:
images.append({
'identifier' : img_group.identifier,
'folder' : img_group.folder,
'name' : img_group.name
})
json_obj['images'] = images
# Transform dictionary of options into list of elements, one per typed
# attribute in the options set.
json_obj['options'] = attribute.attributes_to_dict(img_coll.options)
return json_obj | [
"def",
"to_dict",
"(",
"self",
",",
"img_coll",
")",
":",
"# Get the basic Json object from the super class",
"json_obj",
"=",
"super",
"(",
"DefaultImageGroupManager",
",",
"self",
")",
".",
"to_dict",
"(",
"img_coll",
")",
"# Add list of images as Json array",
"images",
"=",
"[",
"]",
"for",
"img_group",
"in",
"img_coll",
".",
"images",
":",
"images",
".",
"append",
"(",
"{",
"'identifier'",
":",
"img_group",
".",
"identifier",
",",
"'folder'",
":",
"img_group",
".",
"folder",
",",
"'name'",
":",
"img_group",
".",
"name",
"}",
")",
"json_obj",
"[",
"'images'",
"]",
"=",
"images",
"# Transform dictionary of options into list of elements, one per typed",
"# attribute in the options set.",
"json_obj",
"[",
"'options'",
"]",
"=",
"attribute",
".",
"attributes_to_dict",
"(",
"img_coll",
".",
"options",
")",
"return",
"json_obj"
] | Create a Json-like dictionary for image group. Extends the basic
object with an array of image identifiers.
Parameters
----------
img_coll : ImageGroupHandle
Returns
-------
(JSON)
Json-like object, i.e., dictionary. | [
"Create",
"a",
"Json",
"-",
"like",
"dictionary",
"for",
"image",
"group",
".",
"Extends",
"the",
"basic",
"object",
"with",
"an",
"array",
"of",
"image",
"identifiers",
"."
] | python | train |
mrstephenneal/dirutility | dirutility/system.py | https://github.com/mrstephenneal/dirutility/blob/339378659e2d7e09c53acfc51c5df745bb0cd517/dirutility/system.py#L51-L62 | def execute(self):
"""Execute a system command."""
if self._decode_output:
# Capture and decode system output
with Popen(self.command, shell=True, stdout=PIPE) as process:
self._output = [i.decode("utf-8").strip() for i in process.stdout]
self._success = True
else:
# Execute without capturing output
os.system(self.command)
self._success = True
return self | [
"def",
"execute",
"(",
"self",
")",
":",
"if",
"self",
".",
"_decode_output",
":",
"# Capture and decode system output",
"with",
"Popen",
"(",
"self",
".",
"command",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"PIPE",
")",
"as",
"process",
":",
"self",
".",
"_output",
"=",
"[",
"i",
".",
"decode",
"(",
"\"utf-8\"",
")",
".",
"strip",
"(",
")",
"for",
"i",
"in",
"process",
".",
"stdout",
"]",
"self",
".",
"_success",
"=",
"True",
"else",
":",
"# Execute without capturing output",
"os",
".",
"system",
"(",
"self",
".",
"command",
")",
"self",
".",
"_success",
"=",
"True",
"return",
"self"
] | Execute a system command. | [
"Execute",
"a",
"system",
"command",
"."
] | python | train |
klahnakoski/pyLibrary | jx_base/language.py | https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/jx_base/language.py#L125-L134 | def is_op(call, op):
"""
:param call: The specific operator instance (a method call)
:param op: The the operator we are testing against
:return: isinstance(call, op), but faster
"""
try:
return call.id == op.id
except Exception as e:
return False | [
"def",
"is_op",
"(",
"call",
",",
"op",
")",
":",
"try",
":",
"return",
"call",
".",
"id",
"==",
"op",
".",
"id",
"except",
"Exception",
"as",
"e",
":",
"return",
"False"
] | :param call: The specific operator instance (a method call)
:param op: The the operator we are testing against
:return: isinstance(call, op), but faster | [
":",
"param",
"call",
":",
"The",
"specific",
"operator",
"instance",
"(",
"a",
"method",
"call",
")",
":",
"param",
"op",
":",
"The",
"the",
"operator",
"we",
"are",
"testing",
"against",
":",
"return",
":",
"isinstance",
"(",
"call",
"op",
")",
"but",
"faster"
] | python | train |
radical-cybertools/radical.entk | src/radical/entk/execman/rp/task_processor.py | https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/execman/rp/task_processor.py#L10-L91 | def resolve_placeholders(path, placeholder_dict):
"""
**Purpose**: Substitute placeholders in staging attributes of a Task with actual paths to the corresponding tasks.
:arguments:
:path: string describing the staging paths, possibly containing a placeholder
:placeholder_dict: dictionary holding the values for placeholders
"""
try:
if isinstance(path, unicode):
path = str(path)
if not isinstance(path, str):
raise TypeError(expected_type=str, actual_type=type(path))
if '$' not in path:
return path
# Extract placeholder from path
if len(path.split('>')) == 1:
placeholder = path.split('/')[0]
else:
if path.split('>')[0].strip().startswith('$'):
placeholder = path.split('>')[0].strip().split('/')[0]
else:
placeholder = path.split('>')[1].strip().split('/')[0]
# SHARED
if placeholder == "$SHARED":
return path.replace(placeholder, 'pilot://')
# Expected placeholder format:
# $Pipeline_{pipeline.uid}_Stage_{stage.uid}_Task_{task.uid}
broken_placeholder = placeholder.split('/')[0].split('_')
if not len(broken_placeholder) == 6:
raise ValueError(
obj='placeholder',
attribute='task',
expected_value='$Pipeline_(pipeline_name)_Stage_(stage_name)_Task_(task_name) or $SHARED',
actual_value=broken_placeholder)
pipeline_name = broken_placeholder[1]
stage_name = broken_placeholder[3]
task_name = broken_placeholder[5]
resolved_placeholder = None
if pipeline_name in placeholder_dict.keys():
if stage_name in placeholder_dict[pipeline_name].keys():
if task_name in placeholder_dict[pipeline_name][stage_name].keys():
resolved_placeholder = path.replace(placeholder, placeholder_dict[
pipeline_name][stage_name][task_name]['path'])
else:
logger.warning('%s not assigned to any task in Stage %s Pipeline %s' %
(task_name, stage_name, pipeline_name))
else:
logger.warning('%s not assigned to any Stage in Pipeline %s' % (
stage_name, pipeline_name))
else:
logger.warning('%s not assigned to any Pipeline' % (pipeline_name))
if not resolved_placeholder:
logger.warning('No placeholder could be found for task name %s \
stage name %s and pipeline name %s. Please be sure to \
use object names and not uids in your references,i.e, \
$Pipeline_(pipeline_name)_Stage_(stage_name)_Task_(task_name)')
raise ValueError(
obj='placeholder',
attribute='task',
expected_value='$Pipeline_(pipeline_name)_Stage_(stage_name)_Task_(task_name) or $SHARED',
actual_value=broken_placeholder)
return resolved_placeholder
except Exception, ex:
logger.exception('Failed to resolve placeholder %s, error: %s' %(path, ex))
raise | [
"def",
"resolve_placeholders",
"(",
"path",
",",
"placeholder_dict",
")",
":",
"try",
":",
"if",
"isinstance",
"(",
"path",
",",
"unicode",
")",
":",
"path",
"=",
"str",
"(",
"path",
")",
"if",
"not",
"isinstance",
"(",
"path",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"expected_type",
"=",
"str",
",",
"actual_type",
"=",
"type",
"(",
"path",
")",
")",
"if",
"'$'",
"not",
"in",
"path",
":",
"return",
"path",
"# Extract placeholder from path",
"if",
"len",
"(",
"path",
".",
"split",
"(",
"'>'",
")",
")",
"==",
"1",
":",
"placeholder",
"=",
"path",
".",
"split",
"(",
"'/'",
")",
"[",
"0",
"]",
"else",
":",
"if",
"path",
".",
"split",
"(",
"'>'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"startswith",
"(",
"'$'",
")",
":",
"placeholder",
"=",
"path",
".",
"split",
"(",
"'>'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'/'",
")",
"[",
"0",
"]",
"else",
":",
"placeholder",
"=",
"path",
".",
"split",
"(",
"'>'",
")",
"[",
"1",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'/'",
")",
"[",
"0",
"]",
"# SHARED",
"if",
"placeholder",
"==",
"\"$SHARED\"",
":",
"return",
"path",
".",
"replace",
"(",
"placeholder",
",",
"'pilot://'",
")",
"# Expected placeholder format:",
"# $Pipeline_{pipeline.uid}_Stage_{stage.uid}_Task_{task.uid}",
"broken_placeholder",
"=",
"placeholder",
".",
"split",
"(",
"'/'",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'_'",
")",
"if",
"not",
"len",
"(",
"broken_placeholder",
")",
"==",
"6",
":",
"raise",
"ValueError",
"(",
"obj",
"=",
"'placeholder'",
",",
"attribute",
"=",
"'task'",
",",
"expected_value",
"=",
"'$Pipeline_(pipeline_name)_Stage_(stage_name)_Task_(task_name) or $SHARED'",
",",
"actual_value",
"=",
"broken_placeholder",
")",
"pipeline_name",
"=",
"broken_placeholder",
"[",
"1",
"]",
"stage_name",
"=",
"broken_placeholder",
"[",
"3",
"]",
"task_name",
"=",
"broken_placeholder",
"[",
"5",
"]",
"resolved_placeholder",
"=",
"None",
"if",
"pipeline_name",
"in",
"placeholder_dict",
".",
"keys",
"(",
")",
":",
"if",
"stage_name",
"in",
"placeholder_dict",
"[",
"pipeline_name",
"]",
".",
"keys",
"(",
")",
":",
"if",
"task_name",
"in",
"placeholder_dict",
"[",
"pipeline_name",
"]",
"[",
"stage_name",
"]",
".",
"keys",
"(",
")",
":",
"resolved_placeholder",
"=",
"path",
".",
"replace",
"(",
"placeholder",
",",
"placeholder_dict",
"[",
"pipeline_name",
"]",
"[",
"stage_name",
"]",
"[",
"task_name",
"]",
"[",
"'path'",
"]",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"'%s not assigned to any task in Stage %s Pipeline %s'",
"%",
"(",
"task_name",
",",
"stage_name",
",",
"pipeline_name",
")",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"'%s not assigned to any Stage in Pipeline %s'",
"%",
"(",
"stage_name",
",",
"pipeline_name",
")",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"'%s not assigned to any Pipeline'",
"%",
"(",
"pipeline_name",
")",
")",
"if",
"not",
"resolved_placeholder",
":",
"logger",
".",
"warning",
"(",
"'No placeholder could be found for task name %s \\\n stage name %s and pipeline name %s. Please be sure to \\\n use object names and not uids in your references,i.e, \\\n $Pipeline_(pipeline_name)_Stage_(stage_name)_Task_(task_name)'",
")",
"raise",
"ValueError",
"(",
"obj",
"=",
"'placeholder'",
",",
"attribute",
"=",
"'task'",
",",
"expected_value",
"=",
"'$Pipeline_(pipeline_name)_Stage_(stage_name)_Task_(task_name) or $SHARED'",
",",
"actual_value",
"=",
"broken_placeholder",
")",
"return",
"resolved_placeholder",
"except",
"Exception",
",",
"ex",
":",
"logger",
".",
"exception",
"(",
"'Failed to resolve placeholder %s, error: %s'",
"%",
"(",
"path",
",",
"ex",
")",
")",
"raise"
] | **Purpose**: Substitute placeholders in staging attributes of a Task with actual paths to the corresponding tasks.
:arguments:
:path: string describing the staging paths, possibly containing a placeholder
:placeholder_dict: dictionary holding the values for placeholders | [
"**",
"Purpose",
"**",
":",
"Substitute",
"placeholders",
"in",
"staging",
"attributes",
"of",
"a",
"Task",
"with",
"actual",
"paths",
"to",
"the",
"corresponding",
"tasks",
"."
] | python | train |
youversion/crony | crony/crony.py | https://github.com/youversion/crony/blob/c93d14b809a2e878f1b9d6d53d5a04947896583b/crony/crony.py#L232-L290 | def main():
"""Entry point for running crony.
1. If a --cronitor/-c is specified, a "run" ping is sent to cronitor.
2. The argument string passed to crony is ran.
3. Next steps depend on the exit code of the command ran.
* If the exit status is 0 and a --cronitor/-c is specified, a "complete" ping is sent
to cronitor.
* If the exit status is greater than 0, a message is sent to Sentry with the output
captured from the script's exit.
* If the exit status is great than 0 and --cronitor/-c is specified, a "fail" ping
is sent to cronitor.
"""
parser = argparse.ArgumentParser(
description='Monitor your crons with cronitor.io & sentry.io',
epilog='https://github.com/youversion/crony',
prog='crony'
)
parser.add_argument('-c', '--cronitor', action='store',
help='Cronitor link identifier. This can be found in your Cronitor unique'
' ping URL right after https://cronitor.link/')
parser.add_argument('-e', '--venv', action='store',
help='Path to virtualenv to source before running script. May be passed'
' as an argument or loaded from an environment variable or config file.')
parser.add_argument('-d', '--cd', action='store',
help='If the script needs ran in a specific directory, than can be passed'
' or cd can be ran prior to running crony.')
parser.add_argument('-l', '--log', action='store',
help='Log file to direct stdout of script run to. Can be passed or '
'defined in config file with "log_file"')
parser.add_argument('-o', '--config', action='store',
help='Path to a crony config file to use.')
parser.add_argument('-p', '--path', action='store',
help='Paths to append to the PATH environment variable before running. '
' Can be passed as an argument or loaded from config file.')
parser.add_argument('-s', '--dsn', action='store',
help='Sentry DSN. May be passed or loaded from an environment variable '
'or a config file.')
parser.add_argument('-t', '--timeout', action='store', default=10, help='Timeout to use when'
' sending requests to Cronitor', type=int)
parser.add_argument('-v', '--verbose', action='store_true', help='Increase level of verbosity'
' output by crony')
parser.add_argument('--version', action='store_true', help='Output crony version # and exit')
parser.add_argument('cmd', nargs=argparse.REMAINDER, help='Command to run and monitor')
cc = CommandCenter(parser.parse_args())
sys.exit(cc.log(*cc.func())) | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Monitor your crons with cronitor.io & sentry.io'",
",",
"epilog",
"=",
"'https://github.com/youversion/crony'",
",",
"prog",
"=",
"'crony'",
")",
"parser",
".",
"add_argument",
"(",
"'-c'",
",",
"'--cronitor'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'Cronitor link identifier. This can be found in your Cronitor unique'",
"' ping URL right after https://cronitor.link/'",
")",
"parser",
".",
"add_argument",
"(",
"'-e'",
",",
"'--venv'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'Path to virtualenv to source before running script. May be passed'",
"' as an argument or loaded from an environment variable or config file.'",
")",
"parser",
".",
"add_argument",
"(",
"'-d'",
",",
"'--cd'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'If the script needs ran in a specific directory, than can be passed'",
"' or cd can be ran prior to running crony.'",
")",
"parser",
".",
"add_argument",
"(",
"'-l'",
",",
"'--log'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'Log file to direct stdout of script run to. Can be passed or '",
"'defined in config file with \"log_file\"'",
")",
"parser",
".",
"add_argument",
"(",
"'-o'",
",",
"'--config'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'Path to a crony config file to use.'",
")",
"parser",
".",
"add_argument",
"(",
"'-p'",
",",
"'--path'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'Paths to append to the PATH environment variable before running. '",
"' Can be passed as an argument or loaded from config file.'",
")",
"parser",
".",
"add_argument",
"(",
"'-s'",
",",
"'--dsn'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'Sentry DSN. May be passed or loaded from an environment variable '",
"'or a config file.'",
")",
"parser",
".",
"add_argument",
"(",
"'-t'",
",",
"'--timeout'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"10",
",",
"help",
"=",
"'Timeout to use when'",
"' sending requests to Cronitor'",
",",
"type",
"=",
"int",
")",
"parser",
".",
"add_argument",
"(",
"'-v'",
",",
"'--verbose'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Increase level of verbosity'",
"' output by crony'",
")",
"parser",
".",
"add_argument",
"(",
"'--version'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Output crony version # and exit'",
")",
"parser",
".",
"add_argument",
"(",
"'cmd'",
",",
"nargs",
"=",
"argparse",
".",
"REMAINDER",
",",
"help",
"=",
"'Command to run and monitor'",
")",
"cc",
"=",
"CommandCenter",
"(",
"parser",
".",
"parse_args",
"(",
")",
")",
"sys",
".",
"exit",
"(",
"cc",
".",
"log",
"(",
"*",
"cc",
".",
"func",
"(",
")",
")",
")"
] | Entry point for running crony.
1. If a --cronitor/-c is specified, a "run" ping is sent to cronitor.
2. The argument string passed to crony is ran.
3. Next steps depend on the exit code of the command ran.
* If the exit status is 0 and a --cronitor/-c is specified, a "complete" ping is sent
to cronitor.
* If the exit status is greater than 0, a message is sent to Sentry with the output
captured from the script's exit.
* If the exit status is great than 0 and --cronitor/-c is specified, a "fail" ping
is sent to cronitor. | [
"Entry",
"point",
"for",
"running",
"crony",
"."
] | python | train |
elifesciences/elife-tools | elifetools/parseJATS.py | https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L1699-L1713 | def correspondence(soup):
"""
Find the corresp tags included in author-notes
for primary correspondence
"""
correspondence = []
author_notes_nodes = raw_parser.author_notes(soup)
if author_notes_nodes:
corresp_nodes = raw_parser.corresp(author_notes_nodes)
for tag in corresp_nodes:
correspondence.append(tag.text)
return correspondence | [
"def",
"correspondence",
"(",
"soup",
")",
":",
"correspondence",
"=",
"[",
"]",
"author_notes_nodes",
"=",
"raw_parser",
".",
"author_notes",
"(",
"soup",
")",
"if",
"author_notes_nodes",
":",
"corresp_nodes",
"=",
"raw_parser",
".",
"corresp",
"(",
"author_notes_nodes",
")",
"for",
"tag",
"in",
"corresp_nodes",
":",
"correspondence",
".",
"append",
"(",
"tag",
".",
"text",
")",
"return",
"correspondence"
] | Find the corresp tags included in author-notes
for primary correspondence | [
"Find",
"the",
"corresp",
"tags",
"included",
"in",
"author",
"-",
"notes",
"for",
"primary",
"correspondence"
] | python | train |
awslabs/aws-shell | awsshell/shellcomplete.py | https://github.com/awslabs/aws-shell/blob/8950f03d9d720879890af6c11537b8f9789ce5a9/awsshell/shellcomplete.py#L53-L56 | def change_profile(self, profile_name):
"""Change the profile used for server side completions."""
self._server_side_completer = self._create_server_side_completer(
session=botocore.session.Session(profile=profile_name)) | [
"def",
"change_profile",
"(",
"self",
",",
"profile_name",
")",
":",
"self",
".",
"_server_side_completer",
"=",
"self",
".",
"_create_server_side_completer",
"(",
"session",
"=",
"botocore",
".",
"session",
".",
"Session",
"(",
"profile",
"=",
"profile_name",
")",
")"
] | Change the profile used for server side completions. | [
"Change",
"the",
"profile",
"used",
"for",
"server",
"side",
"completions",
"."
] | python | train |
chapel-lang/sphinxcontrib-chapeldomain | sphinxcontrib/chapeldomain.py | https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L975-L989 | def _make_module_refnode(self, builder, fromdocname, name, contnode):
"""Helper function to generate new xref node based on
current environment.
"""
# Get additional info for modules.
docname, synopsis, platform, deprecated = self.data['modules'][name]
title = name
if synopsis:
title += ': ' + synopsis
if deprecated:
title += _(' (deprecated)')
if platform:
title += ' (' + platform + ')'
return make_refnode(builder, fromdocname, docname,
'module-' + name, contnode, title) | [
"def",
"_make_module_refnode",
"(",
"self",
",",
"builder",
",",
"fromdocname",
",",
"name",
",",
"contnode",
")",
":",
"# Get additional info for modules.",
"docname",
",",
"synopsis",
",",
"platform",
",",
"deprecated",
"=",
"self",
".",
"data",
"[",
"'modules'",
"]",
"[",
"name",
"]",
"title",
"=",
"name",
"if",
"synopsis",
":",
"title",
"+=",
"': '",
"+",
"synopsis",
"if",
"deprecated",
":",
"title",
"+=",
"_",
"(",
"' (deprecated)'",
")",
"if",
"platform",
":",
"title",
"+=",
"' ('",
"+",
"platform",
"+",
"')'",
"return",
"make_refnode",
"(",
"builder",
",",
"fromdocname",
",",
"docname",
",",
"'module-'",
"+",
"name",
",",
"contnode",
",",
"title",
")"
] | Helper function to generate new xref node based on
current environment. | [
"Helper",
"function",
"to",
"generate",
"new",
"xref",
"node",
"based",
"on",
"current",
"environment",
"."
] | python | train |
dtmilano/AndroidViewClient | src/com/dtmilano/android/viewclient.py | https://github.com/dtmilano/AndroidViewClient/blob/7e6e83fde63af99e5e4ab959712ecf94f9881aa2/src/com/dtmilano/android/viewclient.py#L1235-L1249 | def openQuickSettings(self):
'''
Opens the Quick Settings shade.
'''
# the tablet has a different Notification/Quick Settings bar depending on x
w23 = 2 * self.device.display['width'] / 3
s = (w23, 0)
e = (w23, self.device.display['height']/2)
self.device.drag(s, e, 500, 20, -1)
self.vc.sleep(1)
if self.vc.getSdkVersion() >= 20:
self.device.drag(s, e, 500, 20, -1)
self.vc.sleep(1)
self.vc.dump(-1) | [
"def",
"openQuickSettings",
"(",
"self",
")",
":",
"# the tablet has a different Notification/Quick Settings bar depending on x",
"w23",
"=",
"2",
"*",
"self",
".",
"device",
".",
"display",
"[",
"'width'",
"]",
"/",
"3",
"s",
"=",
"(",
"w23",
",",
"0",
")",
"e",
"=",
"(",
"w23",
",",
"self",
".",
"device",
".",
"display",
"[",
"'height'",
"]",
"/",
"2",
")",
"self",
".",
"device",
".",
"drag",
"(",
"s",
",",
"e",
",",
"500",
",",
"20",
",",
"-",
"1",
")",
"self",
".",
"vc",
".",
"sleep",
"(",
"1",
")",
"if",
"self",
".",
"vc",
".",
"getSdkVersion",
"(",
")",
">=",
"20",
":",
"self",
".",
"device",
".",
"drag",
"(",
"s",
",",
"e",
",",
"500",
",",
"20",
",",
"-",
"1",
")",
"self",
".",
"vc",
".",
"sleep",
"(",
"1",
")",
"self",
".",
"vc",
".",
"dump",
"(",
"-",
"1",
")"
] | Opens the Quick Settings shade. | [
"Opens",
"the",
"Quick",
"Settings",
"shade",
"."
] | python | train |
kashifrazzaqui/again | again/statemachine.py | https://github.com/kashifrazzaqui/again/blob/09cfbda7650d44447dbb0b27780835e9236741ea/again/statemachine.py#L122-L126 | def can(self, event):
"""
returns a list of states that can result from processing this event
"""
return [t.new_state for t in self._transitions if t.event.equals(event)] | [
"def",
"can",
"(",
"self",
",",
"event",
")",
":",
"return",
"[",
"t",
".",
"new_state",
"for",
"t",
"in",
"self",
".",
"_transitions",
"if",
"t",
".",
"event",
".",
"equals",
"(",
"event",
")",
"]"
] | returns a list of states that can result from processing this event | [
"returns",
"a",
"list",
"of",
"states",
"that",
"can",
"result",
"from",
"processing",
"this",
"event"
] | python | train |
koenedaele/skosprovider | skosprovider/skos.py | https://github.com/koenedaele/skosprovider/blob/7304a37953978ca8227febc2d3cc2b2be178f215/skosprovider/skos.py#L249-L262 | def _sortkey(self, key='uri', language='any'):
'''
Provide a single sortkey for this conceptscheme.
:param string key: Either `uri`, `label` or `sortlabel`.
:param string language: The preferred language to receive the label in
if key is `label` or `sortlabel`. This should be a valid IANA language tag.
:rtype: :class:`str`
'''
if key == 'uri':
return self.uri
else:
l = label(self.labels, language, key == 'sortlabel')
return l.label.lower() if l else '' | [
"def",
"_sortkey",
"(",
"self",
",",
"key",
"=",
"'uri'",
",",
"language",
"=",
"'any'",
")",
":",
"if",
"key",
"==",
"'uri'",
":",
"return",
"self",
".",
"uri",
"else",
":",
"l",
"=",
"label",
"(",
"self",
".",
"labels",
",",
"language",
",",
"key",
"==",
"'sortlabel'",
")",
"return",
"l",
".",
"label",
".",
"lower",
"(",
")",
"if",
"l",
"else",
"''"
] | Provide a single sortkey for this conceptscheme.
:param string key: Either `uri`, `label` or `sortlabel`.
:param string language: The preferred language to receive the label in
if key is `label` or `sortlabel`. This should be a valid IANA language tag.
:rtype: :class:`str` | [
"Provide",
"a",
"single",
"sortkey",
"for",
"this",
"conceptscheme",
"."
] | python | valid |
jasonrbriggs/stomp.py | stomp/adapter/multicast.py | https://github.com/jasonrbriggs/stomp.py/blob/643843c5fbf25fd24339dd0e69a9411c3d8b94c7/stomp/adapter/multicast.py#L135-L142 | def disconnect(self, receipt=None, headers=None, **keyword_headers):
"""
:param str receipt:
:param dict headers:
:param keyword_headers:
"""
Protocol12.disconnect(self, receipt, headers, **keyword_headers)
self.transport.stop() | [
"def",
"disconnect",
"(",
"self",
",",
"receipt",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"*",
"*",
"keyword_headers",
")",
":",
"Protocol12",
".",
"disconnect",
"(",
"self",
",",
"receipt",
",",
"headers",
",",
"*",
"*",
"keyword_headers",
")",
"self",
".",
"transport",
".",
"stop",
"(",
")"
] | :param str receipt:
:param dict headers:
:param keyword_headers: | [
":",
"param",
"str",
"receipt",
":",
":",
"param",
"dict",
"headers",
":",
":",
"param",
"keyword_headers",
":"
] | python | train |
vingd/encrypted-pickle-python | encryptedpickle/encryptedpickle.py | https://github.com/vingd/encrypted-pickle-python/blob/7656233598e02e65971f69e11849a0f288b2b2a5/encryptedpickle/encryptedpickle.py#L636-L642 | def _read_version(self, data):
'''Read header version from data'''
version = ord(data[0])
if version not in self.VERSIONS:
raise Exception('Version not defined: %d' % version)
return version | [
"def",
"_read_version",
"(",
"self",
",",
"data",
")",
":",
"version",
"=",
"ord",
"(",
"data",
"[",
"0",
"]",
")",
"if",
"version",
"not",
"in",
"self",
".",
"VERSIONS",
":",
"raise",
"Exception",
"(",
"'Version not defined: %d'",
"%",
"version",
")",
"return",
"version"
] | Read header version from data | [
"Read",
"header",
"version",
"from",
"data"
] | python | valid |
carta/ldap_tools | src/ldap_tools/key.py | https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/key.py#L214-L222 | def list(config): # pragma: no cover
"""List SSH public key(s) from LDAP."""
client = Client()
client.prepare_connection()
key_api = API(client)
for key, values in key_api.get_keys_from_ldap().items():
print("{}: ".format(key))
for value in [v.decode() for v in values]:
print("\t - {}".format(value)) | [
"def",
"list",
"(",
"config",
")",
":",
"# pragma: no cover",
"client",
"=",
"Client",
"(",
")",
"client",
".",
"prepare_connection",
"(",
")",
"key_api",
"=",
"API",
"(",
"client",
")",
"for",
"key",
",",
"values",
"in",
"key_api",
".",
"get_keys_from_ldap",
"(",
")",
".",
"items",
"(",
")",
":",
"print",
"(",
"\"{}: \"",
".",
"format",
"(",
"key",
")",
")",
"for",
"value",
"in",
"[",
"v",
".",
"decode",
"(",
")",
"for",
"v",
"in",
"values",
"]",
":",
"print",
"(",
"\"\\t - {}\"",
".",
"format",
"(",
"value",
")",
")"
] | List SSH public key(s) from LDAP. | [
"List",
"SSH",
"public",
"key",
"(",
"s",
")",
"from",
"LDAP",
"."
] | python | train |
Raynes/quarantine | quarantine/cdc.py | https://github.com/Raynes/quarantine/blob/742a318fcb7d34dbdf4fac388daff03a36872d8b/quarantine/cdc.py#L38-L41 | def create_env(self):
"""Create a virtual environment."""
virtualenv(self.env, _err=sys.stderr)
os.mkdir(self.env_bin) | [
"def",
"create_env",
"(",
"self",
")",
":",
"virtualenv",
"(",
"self",
".",
"env",
",",
"_err",
"=",
"sys",
".",
"stderr",
")",
"os",
".",
"mkdir",
"(",
"self",
".",
"env_bin",
")"
] | Create a virtual environment. | [
"Create",
"a",
"virtual",
"environment",
"."
] | python | train |
zeromake/aiko | aiko/response.py | https://github.com/zeromake/aiko/blob/53b246fa88652466a9e38ac3d1a99a6198195b0f/aiko/response.py#L197-L202 | def status(self, status: int) -> None:
"""
设置响应状态
"""
self._status = status
self._message = STATUS_CODES[status] | [
"def",
"status",
"(",
"self",
",",
"status",
":",
"int",
")",
"->",
"None",
":",
"self",
".",
"_status",
"=",
"status",
"self",
".",
"_message",
"=",
"STATUS_CODES",
"[",
"status",
"]"
] | 设置响应状态 | [
"设置响应状态"
] | python | train |
mardiros/pyshop | pyshop/models.py | https://github.com/mardiros/pyshop/blob/b42510b9c3fa16e0e5710457401ac38fea5bf7a0/pyshop/models.py#L866-L885 | def by_filename(cls, session, release, filename):
"""
Get a release file for a given release and a given filename.
:param session: SQLAlchemy session
:type session: :class:`sqlalchemy.Session`
:param release: release
:type release: :class:`pyshop.models.Release`
:param filename: filename of the release file
:type filename: unicode
:return: release file
:rtype: :class:`pyshop.models.ReleaseFile`
"""
return cls.first(session,
where=(ReleaseFile.release_id == release.id,
ReleaseFile.filename == filename,
)) | [
"def",
"by_filename",
"(",
"cls",
",",
"session",
",",
"release",
",",
"filename",
")",
":",
"return",
"cls",
".",
"first",
"(",
"session",
",",
"where",
"=",
"(",
"ReleaseFile",
".",
"release_id",
"==",
"release",
".",
"id",
",",
"ReleaseFile",
".",
"filename",
"==",
"filename",
",",
")",
")"
] | Get a release file for a given release and a given filename.
:param session: SQLAlchemy session
:type session: :class:`sqlalchemy.Session`
:param release: release
:type release: :class:`pyshop.models.Release`
:param filename: filename of the release file
:type filename: unicode
:return: release file
:rtype: :class:`pyshop.models.ReleaseFile` | [
"Get",
"a",
"release",
"file",
"for",
"a",
"given",
"release",
"and",
"a",
"given",
"filename",
"."
] | python | train |
django-haystack/pysolr | pysolr.py | https://github.com/django-haystack/pysolr/blob/ee28b39324fa21a99842d297e313c1759d8adbd2/pysolr.py#L995-L1016 | def optimize(self, commit=True, waitFlush=None, waitSearcher=None, maxSegments=None, handler='update'):
"""
Tells Solr to streamline the number of segments used, essentially a
defragmentation operation.
Optionally accepts ``maxSegments``. Default is ``None``.
Optionally accepts ``waitFlush``. Default is ``None``.
Optionally accepts ``waitSearcher``. Default is ``None``.
Usage::
solr.optimize()
"""
if maxSegments:
msg = '<optimize maxSegments="%d" />' % maxSegments
else:
msg = '<optimize />'
return self._update(msg, commit=commit, waitFlush=waitFlush, waitSearcher=waitSearcher, handler=handler) | [
"def",
"optimize",
"(",
"self",
",",
"commit",
"=",
"True",
",",
"waitFlush",
"=",
"None",
",",
"waitSearcher",
"=",
"None",
",",
"maxSegments",
"=",
"None",
",",
"handler",
"=",
"'update'",
")",
":",
"if",
"maxSegments",
":",
"msg",
"=",
"'<optimize maxSegments=\"%d\" />'",
"%",
"maxSegments",
"else",
":",
"msg",
"=",
"'<optimize />'",
"return",
"self",
".",
"_update",
"(",
"msg",
",",
"commit",
"=",
"commit",
",",
"waitFlush",
"=",
"waitFlush",
",",
"waitSearcher",
"=",
"waitSearcher",
",",
"handler",
"=",
"handler",
")"
] | Tells Solr to streamline the number of segments used, essentially a
defragmentation operation.
Optionally accepts ``maxSegments``. Default is ``None``.
Optionally accepts ``waitFlush``. Default is ``None``.
Optionally accepts ``waitSearcher``. Default is ``None``.
Usage::
solr.optimize() | [
"Tells",
"Solr",
"to",
"streamline",
"the",
"number",
"of",
"segments",
"used",
"essentially",
"a",
"defragmentation",
"operation",
"."
] | python | train |
molmod/molmod | molmod/io/number_state.py | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/number_state.py#L260-L325 | def load(self, filename, subset=None):
"""Load data into the registered fields
Argument:
| ``filename`` -- the filename to read from
Optional argument:
| ``subset`` -- a list of field names that are read from the file.
If not given, all data is read from the file.
"""
with open(filename, "r") as f:
name = None
num_names = 0
while True:
# read a header line
line = f.readline()
if len(line) == 0:
break
# process the header line
words = line.split()
name = words[0]
attr = self._fields.get(name)
if attr is None:
raise FileFormatError("Wrong header: unknown field %s" % name)
if not words[1].startswith("kind="):
raise FileFormatError("Malformatted array header line. (kind)")
kind = words[1][5:]
expected_kind = attr.get_kind(attr.get())
if kind != expected_kind:
raise FileFormatError("Wrong header: kind of field %s does not match. Got %s, expected %s" % (name, kind, expected_kind))
skip = ((subset is not None) and (name not in subset))
print(words)
if (words[2].startswith("shape=(") and words[2].endswith(")")):
if not isinstance(attr, ArrayAttr):
raise FileFormatError("field '%s' is not an array." % name)
shape = words[2][7:-1]
if shape[-1] == ', ':
shape = shape[:-1]
try:
shape = tuple(int(word) for word in shape.split(","))
except ValueError:
raise FileFormatError("Malformatted array header. (shape)")
expected_shape = attr.get().shape
if shape != expected_shape:
raise FileFormatError("Wrong header: shape of field %s does not match. Got %s, expected %s" % (name, shape, expected_shape))
attr.load(f, skip)
elif words[2].startswith("value="):
if not isinstance(attr, ScalarAttr):
raise FileFormatError("field '%s' is not a single value." % name)
if not skip:
if kind == 'i':
attr.set(int(words[2][6:]))
else:
attr.set(float(words[2][6:]))
else:
raise FileFormatError("Malformatted array header line. (shape/value)")
num_names += 1
if num_names != len(self._fields) and subset is None:
raise FileFormatError("Some fields are missing in the file.") | [
"def",
"load",
"(",
"self",
",",
"filename",
",",
"subset",
"=",
"None",
")",
":",
"with",
"open",
"(",
"filename",
",",
"\"r\"",
")",
"as",
"f",
":",
"name",
"=",
"None",
"num_names",
"=",
"0",
"while",
"True",
":",
"# read a header line",
"line",
"=",
"f",
".",
"readline",
"(",
")",
"if",
"len",
"(",
"line",
")",
"==",
"0",
":",
"break",
"# process the header line",
"words",
"=",
"line",
".",
"split",
"(",
")",
"name",
"=",
"words",
"[",
"0",
"]",
"attr",
"=",
"self",
".",
"_fields",
".",
"get",
"(",
"name",
")",
"if",
"attr",
"is",
"None",
":",
"raise",
"FileFormatError",
"(",
"\"Wrong header: unknown field %s\"",
"%",
"name",
")",
"if",
"not",
"words",
"[",
"1",
"]",
".",
"startswith",
"(",
"\"kind=\"",
")",
":",
"raise",
"FileFormatError",
"(",
"\"Malformatted array header line. (kind)\"",
")",
"kind",
"=",
"words",
"[",
"1",
"]",
"[",
"5",
":",
"]",
"expected_kind",
"=",
"attr",
".",
"get_kind",
"(",
"attr",
".",
"get",
"(",
")",
")",
"if",
"kind",
"!=",
"expected_kind",
":",
"raise",
"FileFormatError",
"(",
"\"Wrong header: kind of field %s does not match. Got %s, expected %s\"",
"%",
"(",
"name",
",",
"kind",
",",
"expected_kind",
")",
")",
"skip",
"=",
"(",
"(",
"subset",
"is",
"not",
"None",
")",
"and",
"(",
"name",
"not",
"in",
"subset",
")",
")",
"print",
"(",
"words",
")",
"if",
"(",
"words",
"[",
"2",
"]",
".",
"startswith",
"(",
"\"shape=(\"",
")",
"and",
"words",
"[",
"2",
"]",
".",
"endswith",
"(",
"\")\"",
")",
")",
":",
"if",
"not",
"isinstance",
"(",
"attr",
",",
"ArrayAttr",
")",
":",
"raise",
"FileFormatError",
"(",
"\"field '%s' is not an array.\"",
"%",
"name",
")",
"shape",
"=",
"words",
"[",
"2",
"]",
"[",
"7",
":",
"-",
"1",
"]",
"if",
"shape",
"[",
"-",
"1",
"]",
"==",
"', '",
":",
"shape",
"=",
"shape",
"[",
":",
"-",
"1",
"]",
"try",
":",
"shape",
"=",
"tuple",
"(",
"int",
"(",
"word",
")",
"for",
"word",
"in",
"shape",
".",
"split",
"(",
"\",\"",
")",
")",
"except",
"ValueError",
":",
"raise",
"FileFormatError",
"(",
"\"Malformatted array header. (shape)\"",
")",
"expected_shape",
"=",
"attr",
".",
"get",
"(",
")",
".",
"shape",
"if",
"shape",
"!=",
"expected_shape",
":",
"raise",
"FileFormatError",
"(",
"\"Wrong header: shape of field %s does not match. Got %s, expected %s\"",
"%",
"(",
"name",
",",
"shape",
",",
"expected_shape",
")",
")",
"attr",
".",
"load",
"(",
"f",
",",
"skip",
")",
"elif",
"words",
"[",
"2",
"]",
".",
"startswith",
"(",
"\"value=\"",
")",
":",
"if",
"not",
"isinstance",
"(",
"attr",
",",
"ScalarAttr",
")",
":",
"raise",
"FileFormatError",
"(",
"\"field '%s' is not a single value.\"",
"%",
"name",
")",
"if",
"not",
"skip",
":",
"if",
"kind",
"==",
"'i'",
":",
"attr",
".",
"set",
"(",
"int",
"(",
"words",
"[",
"2",
"]",
"[",
"6",
":",
"]",
")",
")",
"else",
":",
"attr",
".",
"set",
"(",
"float",
"(",
"words",
"[",
"2",
"]",
"[",
"6",
":",
"]",
")",
")",
"else",
":",
"raise",
"FileFormatError",
"(",
"\"Malformatted array header line. (shape/value)\"",
")",
"num_names",
"+=",
"1",
"if",
"num_names",
"!=",
"len",
"(",
"self",
".",
"_fields",
")",
"and",
"subset",
"is",
"None",
":",
"raise",
"FileFormatError",
"(",
"\"Some fields are missing in the file.\"",
")"
] | Load data into the registered fields
Argument:
| ``filename`` -- the filename to read from
Optional argument:
| ``subset`` -- a list of field names that are read from the file.
If not given, all data is read from the file. | [
"Load",
"data",
"into",
"the",
"registered",
"fields"
] | python | train |
spyder-ide/spyder | spyder/plugins/projects/widgets/explorer.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/projects/widgets/explorer.py#L154-L167 | def delete(self, fnames=None):
"""Delete files"""
if fnames is None:
fnames = self.get_selected_filenames()
multiple = len(fnames) > 1
yes_to_all = None
for fname in fnames:
if fname == self.proxymodel.path_list[0]:
self.sig_delete_project.emit()
else:
yes_to_all = self.delete_file(fname, multiple, yes_to_all)
if yes_to_all is not None and not yes_to_all:
# Canceled
break | [
"def",
"delete",
"(",
"self",
",",
"fnames",
"=",
"None",
")",
":",
"if",
"fnames",
"is",
"None",
":",
"fnames",
"=",
"self",
".",
"get_selected_filenames",
"(",
")",
"multiple",
"=",
"len",
"(",
"fnames",
")",
">",
"1",
"yes_to_all",
"=",
"None",
"for",
"fname",
"in",
"fnames",
":",
"if",
"fname",
"==",
"self",
".",
"proxymodel",
".",
"path_list",
"[",
"0",
"]",
":",
"self",
".",
"sig_delete_project",
".",
"emit",
"(",
")",
"else",
":",
"yes_to_all",
"=",
"self",
".",
"delete_file",
"(",
"fname",
",",
"multiple",
",",
"yes_to_all",
")",
"if",
"yes_to_all",
"is",
"not",
"None",
"and",
"not",
"yes_to_all",
":",
"# Canceled\r",
"break"
] | Delete files | [
"Delete",
"files"
] | python | train |
bokeh/bokeh | bokeh/embed/bundle.py | https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/embed/bundle.py#L156-L167 | def _use_gl(objs):
''' Whether a collection of Bokeh objects contains a plot requesting WebGL
Args:
objs (seq[Model or Document]) :
Returns:
bool
'''
from ..models.plots import Plot
return _any(objs, lambda obj: isinstance(obj, Plot) and obj.output_backend == "webgl") | [
"def",
"_use_gl",
"(",
"objs",
")",
":",
"from",
".",
".",
"models",
".",
"plots",
"import",
"Plot",
"return",
"_any",
"(",
"objs",
",",
"lambda",
"obj",
":",
"isinstance",
"(",
"obj",
",",
"Plot",
")",
"and",
"obj",
".",
"output_backend",
"==",
"\"webgl\"",
")"
] | Whether a collection of Bokeh objects contains a plot requesting WebGL
Args:
objs (seq[Model or Document]) :
Returns:
bool | [
"Whether",
"a",
"collection",
"of",
"Bokeh",
"objects",
"contains",
"a",
"plot",
"requesting",
"WebGL"
] | python | train |
YosaiProject/yosai | yosai/core/subject/subject.py | https://github.com/YosaiProject/yosai/blob/7f96aa6b837ceae9bf3d7387cd7e35f5ab032575/yosai/core/subject/subject.py#L348-L365 | def has_role_collective(self, role_s, logical_operator=all):
"""
:param role_s: 1..N role identifier
:type role_s: a Set of Strings
:param logical_operator: indicates whether all or at least one
permission check is true (any)
:type: any OR all (from python standard library)
:returns: a Boolean
"""
if self.authorized:
return self.security_manager.has_role_collective(self.identifiers,
role_s,
logical_operator)
else:
msg = 'Cannot check permission when identifiers aren\'t set!'
raise ValueError(msg) | [
"def",
"has_role_collective",
"(",
"self",
",",
"role_s",
",",
"logical_operator",
"=",
"all",
")",
":",
"if",
"self",
".",
"authorized",
":",
"return",
"self",
".",
"security_manager",
".",
"has_role_collective",
"(",
"self",
".",
"identifiers",
",",
"role_s",
",",
"logical_operator",
")",
"else",
":",
"msg",
"=",
"'Cannot check permission when identifiers aren\\'t set!'",
"raise",
"ValueError",
"(",
"msg",
")"
] | :param role_s: 1..N role identifier
:type role_s: a Set of Strings
:param logical_operator: indicates whether all or at least one
permission check is true (any)
:type: any OR all (from python standard library)
:returns: a Boolean | [
":",
"param",
"role_s",
":",
"1",
"..",
"N",
"role",
"identifier",
":",
"type",
"role_s",
":",
"a",
"Set",
"of",
"Strings"
] | python | train |
ttsteiger/cryptocompy | cryptocompy/price.py | https://github.com/ttsteiger/cryptocompy/blob/b0514079202587a5bfb3a4f2c871196315b9302e/cryptocompy/price.py#L7-L83 | def get_current_price(fsyms, tsyms, e='all', try_conversion=True, full=False,
format='raw'):
"""Get latest trading price or full trading information in display or raw
format for the specified FROM/TO currency pairs.
Args:
fsyms: Single string or list of FROM symbols.
tsyms: Single string or list of TO symbols.
e: Default returns average price across all exchanges. Can be set to the
name of a single exchange.
try_conversion: If the crypto does not trade directly into the toSymbol
requested, BTC will be used for conversion. If set to false, it will
try to get values without using any conversion at all.
full: Default of False returns only the latest price. True returns the
following dictionary structure containing the full trading info:
format: Default returns the 'RAW' format. Can be set to 'DISPLAY'
format.
Returns:
Returns a dictionary containing the latest price pairs if full is set to
false:
{fsym1: {tsym1: ..., tsym2:..., ...},
fsym2: {...},
...}
or full trading info dictionaries for all the price pairs in the other
case:
{fsym1: {tsym1: {'CHANGE24HOUR': ...,
'CHANGEPCT24HOUR': ...,
'FLAGS': ...,
'FROMSYMBOL': ...,
'HIGH24HOUR': ...,
'LASTMARKET': ...,
'LASTTRADEID': ...,
'LASTUPDATE': ...,
'LASTVOLUME': ...,
'LASTVOLUMETO': ...,
'LOW24HOUR': ...,
'MARKET' ...,
'MKTCAP': ...,
'OPEN24HOUR': ...,
'PRICE': ...,
'SUPPLY': ...,
'TOSYMBOL': ...,
'TYPE': ...,
'VOLUME24HOUR': ...,
'VOLUME24HOURTO': ...},
tsym2: ..., ...},
fsym2: {...},
...}
"""
# select API function based on 'full' parameter value
if not full:
func = 'pricemulti'
else:
func = 'pricemultifull'
# convert single fsym and tsym input to single element lists
if not isinstance(fsyms, list):
fsyms = [fsyms]
if not isinstance(tsyms, list):
tsyms = [tsyms]
# load data
url = build_url(func, fsyms=fsyms, tsyms=tsyms, e=e,
try_conversion=try_conversion)
data = load_data(url)
# select right format to return for full requests
if full and format == 'raw':
data = data['RAW']
elif full and format == 'display':
data = data['DISPLAY']
return data | [
"def",
"get_current_price",
"(",
"fsyms",
",",
"tsyms",
",",
"e",
"=",
"'all'",
",",
"try_conversion",
"=",
"True",
",",
"full",
"=",
"False",
",",
"format",
"=",
"'raw'",
")",
":",
"# select API function based on 'full' parameter value\r",
"if",
"not",
"full",
":",
"func",
"=",
"'pricemulti'",
"else",
":",
"func",
"=",
"'pricemultifull'",
"# convert single fsym and tsym input to single element lists\r",
"if",
"not",
"isinstance",
"(",
"fsyms",
",",
"list",
")",
":",
"fsyms",
"=",
"[",
"fsyms",
"]",
"if",
"not",
"isinstance",
"(",
"tsyms",
",",
"list",
")",
":",
"tsyms",
"=",
"[",
"tsyms",
"]",
"# load data\r",
"url",
"=",
"build_url",
"(",
"func",
",",
"fsyms",
"=",
"fsyms",
",",
"tsyms",
"=",
"tsyms",
",",
"e",
"=",
"e",
",",
"try_conversion",
"=",
"try_conversion",
")",
"data",
"=",
"load_data",
"(",
"url",
")",
"# select right format to return for full requests\r",
"if",
"full",
"and",
"format",
"==",
"'raw'",
":",
"data",
"=",
"data",
"[",
"'RAW'",
"]",
"elif",
"full",
"and",
"format",
"==",
"'display'",
":",
"data",
"=",
"data",
"[",
"'DISPLAY'",
"]",
"return",
"data"
] | Get latest trading price or full trading information in display or raw
format for the specified FROM/TO currency pairs.
Args:
fsyms: Single string or list of FROM symbols.
tsyms: Single string or list of TO symbols.
e: Default returns average price across all exchanges. Can be set to the
name of a single exchange.
try_conversion: If the crypto does not trade directly into the toSymbol
requested, BTC will be used for conversion. If set to false, it will
try to get values without using any conversion at all.
full: Default of False returns only the latest price. True returns the
following dictionary structure containing the full trading info:
format: Default returns the 'RAW' format. Can be set to 'DISPLAY'
format.
Returns:
Returns a dictionary containing the latest price pairs if full is set to
false:
{fsym1: {tsym1: ..., tsym2:..., ...},
fsym2: {...},
...}
or full trading info dictionaries for all the price pairs in the other
case:
{fsym1: {tsym1: {'CHANGE24HOUR': ...,
'CHANGEPCT24HOUR': ...,
'FLAGS': ...,
'FROMSYMBOL': ...,
'HIGH24HOUR': ...,
'LASTMARKET': ...,
'LASTTRADEID': ...,
'LASTUPDATE': ...,
'LASTVOLUME': ...,
'LASTVOLUMETO': ...,
'LOW24HOUR': ...,
'MARKET' ...,
'MKTCAP': ...,
'OPEN24HOUR': ...,
'PRICE': ...,
'SUPPLY': ...,
'TOSYMBOL': ...,
'TYPE': ...,
'VOLUME24HOUR': ...,
'VOLUME24HOURTO': ...},
tsym2: ..., ...},
fsym2: {...},
...} | [
"Get",
"latest",
"trading",
"price",
"or",
"full",
"trading",
"information",
"in",
"display",
"or",
"raw",
"format",
"for",
"the",
"specified",
"FROM",
"/",
"TO",
"currency",
"pairs",
".",
"Args",
":",
"fsyms",
":",
"Single",
"string",
"or",
"list",
"of",
"FROM",
"symbols",
".",
"tsyms",
":",
"Single",
"string",
"or",
"list",
"of",
"TO",
"symbols",
".",
"e",
":",
"Default",
"returns",
"average",
"price",
"across",
"all",
"exchanges",
".",
"Can",
"be",
"set",
"to",
"the",
"name",
"of",
"a",
"single",
"exchange",
".",
"try_conversion",
":",
"If",
"the",
"crypto",
"does",
"not",
"trade",
"directly",
"into",
"the",
"toSymbol",
"requested",
"BTC",
"will",
"be",
"used",
"for",
"conversion",
".",
"If",
"set",
"to",
"false",
"it",
"will",
"try",
"to",
"get",
"values",
"without",
"using",
"any",
"conversion",
"at",
"all",
".",
"full",
":",
"Default",
"of",
"False",
"returns",
"only",
"the",
"latest",
"price",
".",
"True",
"returns",
"the",
"following",
"dictionary",
"structure",
"containing",
"the",
"full",
"trading",
"info",
":",
"format",
":",
"Default",
"returns",
"the",
"RAW",
"format",
".",
"Can",
"be",
"set",
"to",
"DISPLAY",
"format",
".",
"Returns",
":",
"Returns",
"a",
"dictionary",
"containing",
"the",
"latest",
"price",
"pairs",
"if",
"full",
"is",
"set",
"to",
"false",
":",
"{",
"fsym1",
":",
"{",
"tsym1",
":",
"...",
"tsym2",
":",
"...",
"...",
"}",
"fsym2",
":",
"{",
"...",
"}",
"...",
"}",
"or",
"full",
"trading",
"info",
"dictionaries",
"for",
"all",
"the",
"price",
"pairs",
"in",
"the",
"other",
"case",
":",
"{",
"fsym1",
":",
"{",
"tsym1",
":",
"{",
"CHANGE24HOUR",
":",
"...",
"CHANGEPCT24HOUR",
":",
"...",
"FLAGS",
":",
"...",
"FROMSYMBOL",
":",
"...",
"HIGH24HOUR",
":",
"...",
"LASTMARKET",
":",
"...",
"LASTTRADEID",
":",
"...",
"LASTUPDATE",
":",
"...",
"LASTVOLUME",
":",
"...",
"LASTVOLUMETO",
":",
"...",
"LOW24HOUR",
":",
"...",
"MARKET",
"...",
"MKTCAP",
":",
"...",
"OPEN24HOUR",
":",
"...",
"PRICE",
":",
"...",
"SUPPLY",
":",
"...",
"TOSYMBOL",
":",
"...",
"TYPE",
":",
"...",
"VOLUME24HOUR",
":",
"...",
"VOLUME24HOURTO",
":",
"...",
"}",
"tsym2",
":",
"...",
"...",
"}",
"fsym2",
":",
"{",
"...",
"}",
"...",
"}"
] | python | train |
KelSolaar/Umbra | umbra/components/factory/script_editor/script_editor.py | https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/factory/script_editor/script_editor.py#L2939-L2951 | def register_file(self, file):
"""
Registers given file in the **file_system_events_manager**.
:param file: File.
:type file: unicode
:return: Method success.
:rtype: bool
"""
not self.__engine.file_system_events_manager.is_path_registered(file) and \
self.__engine.file_system_events_manager.register_path(file)
return True | [
"def",
"register_file",
"(",
"self",
",",
"file",
")",
":",
"not",
"self",
".",
"__engine",
".",
"file_system_events_manager",
".",
"is_path_registered",
"(",
"file",
")",
"and",
"self",
".",
"__engine",
".",
"file_system_events_manager",
".",
"register_path",
"(",
"file",
")",
"return",
"True"
] | Registers given file in the **file_system_events_manager**.
:param file: File.
:type file: unicode
:return: Method success.
:rtype: bool | [
"Registers",
"given",
"file",
"in",
"the",
"**",
"file_system_events_manager",
"**",
"."
] | python | train |
arkottke/pysra | pysra/motion.py | https://github.com/arkottke/pysra/blob/c72fd389d6c15203c0c00728ac00f101bae6369d/pysra/motion.py#L188-L208 | def _calc_sdof_tf(self, osc_freq, damping=0.05):
"""Compute the transfer function for a single-degree-of-freedom
oscillator.
The transfer function computes the pseudo-spectral acceleration.
Parameters
----------
osc_freq : float
natural frequency of the oscillator [Hz]
damping : float, optional
damping ratio of the oscillator in decimal. Default value is
0.05, or 5%.
Returns
-------
tf : :class:`numpy.ndarray`
Complex-valued transfer function with length equal to `self.freq`.
"""
return (-osc_freq ** 2. / (np.square(self.freqs) - np.square(osc_freq)
- 2.j * damping * osc_freq * self.freqs)) | [
"def",
"_calc_sdof_tf",
"(",
"self",
",",
"osc_freq",
",",
"damping",
"=",
"0.05",
")",
":",
"return",
"(",
"-",
"osc_freq",
"**",
"2.",
"/",
"(",
"np",
".",
"square",
"(",
"self",
".",
"freqs",
")",
"-",
"np",
".",
"square",
"(",
"osc_freq",
")",
"-",
"2.j",
"*",
"damping",
"*",
"osc_freq",
"*",
"self",
".",
"freqs",
")",
")"
] | Compute the transfer function for a single-degree-of-freedom
oscillator.
The transfer function computes the pseudo-spectral acceleration.
Parameters
----------
osc_freq : float
natural frequency of the oscillator [Hz]
damping : float, optional
damping ratio of the oscillator in decimal. Default value is
0.05, or 5%.
Returns
-------
tf : :class:`numpy.ndarray`
Complex-valued transfer function with length equal to `self.freq`. | [
"Compute",
"the",
"transfer",
"function",
"for",
"a",
"single",
"-",
"degree",
"-",
"of",
"-",
"freedom",
"oscillator",
"."
] | python | train |
bcbio/bcbio-nextgen | bcbio/structural/cnvkit.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L31-L39 | def use_general_sv_bins(data):
"""Check if we should use a general binning approach for a sample.
Checks if CNVkit is enabled and we haven't already run CNVkit.
"""
if any([c in dd.get_svcaller(data) for c in ["cnvkit", "titancna", "purecn", "gatk-cnv"]]):
if not _get_original_coverage(data):
return True
return False | [
"def",
"use_general_sv_bins",
"(",
"data",
")",
":",
"if",
"any",
"(",
"[",
"c",
"in",
"dd",
".",
"get_svcaller",
"(",
"data",
")",
"for",
"c",
"in",
"[",
"\"cnvkit\"",
",",
"\"titancna\"",
",",
"\"purecn\"",
",",
"\"gatk-cnv\"",
"]",
"]",
")",
":",
"if",
"not",
"_get_original_coverage",
"(",
"data",
")",
":",
"return",
"True",
"return",
"False"
] | Check if we should use a general binning approach for a sample.
Checks if CNVkit is enabled and we haven't already run CNVkit. | [
"Check",
"if",
"we",
"should",
"use",
"a",
"general",
"binning",
"approach",
"for",
"a",
"sample",
"."
] | python | train |
subdownloader/subdownloader | subdownloader/provider/SDService.py | https://github.com/subdownloader/subdownloader/blob/bbccedd11b18d925ad4c062b5eb65981e24d0433/subdownloader/provider/SDService.py#L248-L277 | def _login(self, username="", password=""):
"""Login to the Server using username/password,
empty parameters means an anonymously login
Returns True if login sucessful, and False if not.
"""
self.log.debug("----------------")
self.log.debug("Logging in (username: %s)..." % username)
def run_query():
return self._xmlrpc_server.LogIn(
username, password, self.language, self.user_agent)
info = self._safe_exec(run_query, None)
if info is None:
self._token = None
return False
self.log.debug("Login ended in %s with status: %s" %
(info['seconds'], info['status']))
if info['status'] == "200 OK":
self.log.debug("Session ID: %s" % info['token'])
self.log.debug("----------------")
self._token = info['token']
return True
else:
# force token reset
self.log.debug("----------------")
self._token = None
return False | [
"def",
"_login",
"(",
"self",
",",
"username",
"=",
"\"\"",
",",
"password",
"=",
"\"\"",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"----------------\"",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Logging in (username: %s)...\"",
"%",
"username",
")",
"def",
"run_query",
"(",
")",
":",
"return",
"self",
".",
"_xmlrpc_server",
".",
"LogIn",
"(",
"username",
",",
"password",
",",
"self",
".",
"language",
",",
"self",
".",
"user_agent",
")",
"info",
"=",
"self",
".",
"_safe_exec",
"(",
"run_query",
",",
"None",
")",
"if",
"info",
"is",
"None",
":",
"self",
".",
"_token",
"=",
"None",
"return",
"False",
"self",
".",
"log",
".",
"debug",
"(",
"\"Login ended in %s with status: %s\"",
"%",
"(",
"info",
"[",
"'seconds'",
"]",
",",
"info",
"[",
"'status'",
"]",
")",
")",
"if",
"info",
"[",
"'status'",
"]",
"==",
"\"200 OK\"",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Session ID: %s\"",
"%",
"info",
"[",
"'token'",
"]",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"----------------\"",
")",
"self",
".",
"_token",
"=",
"info",
"[",
"'token'",
"]",
"return",
"True",
"else",
":",
"# force token reset",
"self",
".",
"log",
".",
"debug",
"(",
"\"----------------\"",
")",
"self",
".",
"_token",
"=",
"None",
"return",
"False"
] | Login to the Server using username/password,
empty parameters means an anonymously login
Returns True if login sucessful, and False if not. | [
"Login",
"to",
"the",
"Server",
"using",
"username",
"/",
"password",
"empty",
"parameters",
"means",
"an",
"anonymously",
"login",
"Returns",
"True",
"if",
"login",
"sucessful",
"and",
"False",
"if",
"not",
"."
] | python | train |
geophysics-ubonn/reda | lib/reda/utils/geometric_factors.py | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/utils/geometric_factors.py#L11-L26 | def apply_K(df, k):
"""Apply the geometric factors to the dataset and compute (apparent)
resistivities/conductivities
"""
if 'k' not in df.columns:
df['k'] = k
if 'rho_a' not in df.columns:
df['rho_a'] = df['r'] * df['k']
if 'sigma_a' not in df.columns:
df['sigma_a'] = 1.0 / df['rho_a']
if 'Zt' in df.columns:
df['rho_a_complex'] = df['Zt'] * df['k']
return df | [
"def",
"apply_K",
"(",
"df",
",",
"k",
")",
":",
"if",
"'k'",
"not",
"in",
"df",
".",
"columns",
":",
"df",
"[",
"'k'",
"]",
"=",
"k",
"if",
"'rho_a'",
"not",
"in",
"df",
".",
"columns",
":",
"df",
"[",
"'rho_a'",
"]",
"=",
"df",
"[",
"'r'",
"]",
"*",
"df",
"[",
"'k'",
"]",
"if",
"'sigma_a'",
"not",
"in",
"df",
".",
"columns",
":",
"df",
"[",
"'sigma_a'",
"]",
"=",
"1.0",
"/",
"df",
"[",
"'rho_a'",
"]",
"if",
"'Zt'",
"in",
"df",
".",
"columns",
":",
"df",
"[",
"'rho_a_complex'",
"]",
"=",
"df",
"[",
"'Zt'",
"]",
"*",
"df",
"[",
"'k'",
"]",
"return",
"df"
] | Apply the geometric factors to the dataset and compute (apparent)
resistivities/conductivities | [
"Apply",
"the",
"geometric",
"factors",
"to",
"the",
"dataset",
"and",
"compute",
"(",
"apparent",
")",
"resistivities",
"/",
"conductivities"
] | python | train |
xolox/python-update-dotdee | update_dotdee/__init__.py | https://github.com/xolox/python-update-dotdee/blob/04d5836f0d217e32778745b533beeb8159d80c32/update_dotdee/__init__.py#L214-L226 | def write_file(self, filename, contents):
"""
Write a text file and provide feedback to the user.
:param filename: The pathname of the file to write (a string).
:param contents: The new contents of the file (a string).
"""
logger.info("Writing file: %s", format_path(filename))
contents = contents.rstrip() + b"\n"
self.context.write_file(filename, contents)
logger.debug("Wrote %s to %s.",
pluralize(len(contents.splitlines()), "line"),
format_path(filename)) | [
"def",
"write_file",
"(",
"self",
",",
"filename",
",",
"contents",
")",
":",
"logger",
".",
"info",
"(",
"\"Writing file: %s\"",
",",
"format_path",
"(",
"filename",
")",
")",
"contents",
"=",
"contents",
".",
"rstrip",
"(",
")",
"+",
"b\"\\n\"",
"self",
".",
"context",
".",
"write_file",
"(",
"filename",
",",
"contents",
")",
"logger",
".",
"debug",
"(",
"\"Wrote %s to %s.\"",
",",
"pluralize",
"(",
"len",
"(",
"contents",
".",
"splitlines",
"(",
")",
")",
",",
"\"line\"",
")",
",",
"format_path",
"(",
"filename",
")",
")"
] | Write a text file and provide feedback to the user.
:param filename: The pathname of the file to write (a string).
:param contents: The new contents of the file (a string). | [
"Write",
"a",
"text",
"file",
"and",
"provide",
"feedback",
"to",
"the",
"user",
"."
] | python | train |
wal-e/wal-e | wal_e/blobstore/s3/calling_format.py | https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/blobstore/s3/calling_format.py#L43-L62 | def _is_ipv4_like(s):
"""Find if a string superficially looks like an IPv4 address.
AWS documentation plays it fast and loose with this; in other
regions, it seems like even non-valid IPv4 addresses (in
particular, ones that possess decimal numbers out of range for
IPv4) are rejected.
"""
parts = s.split('.')
if len(parts) != 4:
return False
for part in parts:
try:
int(part)
except ValueError:
return False
return True | [
"def",
"_is_ipv4_like",
"(",
"s",
")",
":",
"parts",
"=",
"s",
".",
"split",
"(",
"'.'",
")",
"if",
"len",
"(",
"parts",
")",
"!=",
"4",
":",
"return",
"False",
"for",
"part",
"in",
"parts",
":",
"try",
":",
"int",
"(",
"part",
")",
"except",
"ValueError",
":",
"return",
"False",
"return",
"True"
] | Find if a string superficially looks like an IPv4 address.
AWS documentation plays it fast and loose with this; in other
regions, it seems like even non-valid IPv4 addresses (in
particular, ones that possess decimal numbers out of range for
IPv4) are rejected. | [
"Find",
"if",
"a",
"string",
"superficially",
"looks",
"like",
"an",
"IPv4",
"address",
"."
] | python | train |
pycontribs/pyrax | pyrax/clouddns.py | https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/clouddns.py#L589-L606 | def findall(self, **kwargs):
"""
Finds all items with attributes matching ``**kwargs``.
Normally this isn't very efficient, since the default action is to
load the entire list and then filter on the Python side, but the DNS
API provides a more efficient search option when filtering on name.
So if the filter is on name, use that; otherwise, use the default.
"""
if (len(kwargs) == 1) and ("name" in kwargs):
# Filtering on name; use the more efficient method.
nm = kwargs["name"].lower()
uri = "/%s?name=%s" % (self.uri_base, nm)
matches = self._list(uri, list_all=True)
return [match for match in matches
if match.name.lower() == nm]
else:
return super(CloudDNSManager, self).findall(**kwargs) | [
"def",
"findall",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"(",
"len",
"(",
"kwargs",
")",
"==",
"1",
")",
"and",
"(",
"\"name\"",
"in",
"kwargs",
")",
":",
"# Filtering on name; use the more efficient method.",
"nm",
"=",
"kwargs",
"[",
"\"name\"",
"]",
".",
"lower",
"(",
")",
"uri",
"=",
"\"/%s?name=%s\"",
"%",
"(",
"self",
".",
"uri_base",
",",
"nm",
")",
"matches",
"=",
"self",
".",
"_list",
"(",
"uri",
",",
"list_all",
"=",
"True",
")",
"return",
"[",
"match",
"for",
"match",
"in",
"matches",
"if",
"match",
".",
"name",
".",
"lower",
"(",
")",
"==",
"nm",
"]",
"else",
":",
"return",
"super",
"(",
"CloudDNSManager",
",",
"self",
")",
".",
"findall",
"(",
"*",
"*",
"kwargs",
")"
] | Finds all items with attributes matching ``**kwargs``.
Normally this isn't very efficient, since the default action is to
load the entire list and then filter on the Python side, but the DNS
API provides a more efficient search option when filtering on name.
So if the filter is on name, use that; otherwise, use the default. | [
"Finds",
"all",
"items",
"with",
"attributes",
"matching",
"**",
"kwargs",
"."
] | python | train |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.