repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
DataONEorg/d1_python
|
client_onedrive/src/d1_onedrive/impl/drivers/dokan/dokan.py
|
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_onedrive/src/d1_onedrive/impl/drivers/dokan/dokan.py#L430-L449
|
def setFileTime(
self, fileName, creationTime, lastAccessTime, lastWriteTime, dokanFileInfo
):
"""Set time values for a file.
:param fileName: name of file to set time values for
:type fileName: ctypes.c_wchar_p
:param creationTime: creation time of file
:type creationTime: ctypes.POINTER(ctypes.wintypes.FILETIME)
:param lastAccessTime: last access time of file
:type lastAccessTime: ctypes.POINTER(ctypes.wintypes.FILETIME)
:param lastWriteTime: last write time of file
:type lastWriteTime: ctypes.POINTER(ctypes.wintypes.FILETIME)
:param dokanFileInfo: used by Dokan
:type dokanFileInfo: PDOKAN_FILE_INFO
:return: error code
:rtype: ctypes.c_int
"""
return self.operations('setFileTime', fileName)
|
[
"def",
"setFileTime",
"(",
"self",
",",
"fileName",
",",
"creationTime",
",",
"lastAccessTime",
",",
"lastWriteTime",
",",
"dokanFileInfo",
")",
":",
"return",
"self",
".",
"operations",
"(",
"'setFileTime'",
",",
"fileName",
")"
] |
Set time values for a file.
:param fileName: name of file to set time values for
:type fileName: ctypes.c_wchar_p
:param creationTime: creation time of file
:type creationTime: ctypes.POINTER(ctypes.wintypes.FILETIME)
:param lastAccessTime: last access time of file
:type lastAccessTime: ctypes.POINTER(ctypes.wintypes.FILETIME)
:param lastWriteTime: last write time of file
:type lastWriteTime: ctypes.POINTER(ctypes.wintypes.FILETIME)
:param dokanFileInfo: used by Dokan
:type dokanFileInfo: PDOKAN_FILE_INFO
:return: error code
:rtype: ctypes.c_int
|
[
"Set",
"time",
"values",
"for",
"a",
"file",
"."
] |
python
|
train
|
doconix/django-mako-plus
|
django_mako_plus/tags.py
|
https://github.com/doconix/django-mako-plus/blob/a90f9b4af19e5fa9f83452989cdcaed21569a181/django_mako_plus/tags.py#L14-L33
|
def django_include(context, template_name, **kwargs):
'''
Mako tag to include a Django template withing the current DMP (Mako) template.
Since this is a Django template, it is search for using the Django search
algorithm (instead of the DMP app-based concept).
See https://docs.djangoproject.com/en/2.1/topics/templates/.
The current context is sent to the included template, which makes all context
variables available to the Django template. Any additional kwargs are added
to the context.
'''
try:
djengine = engines['django']
except KeyError as e:
raise TemplateDoesNotExist("Django template engine not configured in settings, so template cannot be found: {}".format(template_name)) from e
djtemplate = djengine.get_template(template_name)
djcontext = {}
djcontext.update(context)
djcontext.update(kwargs)
return djtemplate.render(djcontext, context['request'])
|
[
"def",
"django_include",
"(",
"context",
",",
"template_name",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"djengine",
"=",
"engines",
"[",
"'django'",
"]",
"except",
"KeyError",
"as",
"e",
":",
"raise",
"TemplateDoesNotExist",
"(",
"\"Django template engine not configured in settings, so template cannot be found: {}\"",
".",
"format",
"(",
"template_name",
")",
")",
"from",
"e",
"djtemplate",
"=",
"djengine",
".",
"get_template",
"(",
"template_name",
")",
"djcontext",
"=",
"{",
"}",
"djcontext",
".",
"update",
"(",
"context",
")",
"djcontext",
".",
"update",
"(",
"kwargs",
")",
"return",
"djtemplate",
".",
"render",
"(",
"djcontext",
",",
"context",
"[",
"'request'",
"]",
")"
] |
Mako tag to include a Django template withing the current DMP (Mako) template.
Since this is a Django template, it is search for using the Django search
algorithm (instead of the DMP app-based concept).
See https://docs.djangoproject.com/en/2.1/topics/templates/.
The current context is sent to the included template, which makes all context
variables available to the Django template. Any additional kwargs are added
to the context.
|
[
"Mako",
"tag",
"to",
"include",
"a",
"Django",
"template",
"withing",
"the",
"current",
"DMP",
"(",
"Mako",
")",
"template",
".",
"Since",
"this",
"is",
"a",
"Django",
"template",
"it",
"is",
"search",
"for",
"using",
"the",
"Django",
"search",
"algorithm",
"(",
"instead",
"of",
"the",
"DMP",
"app",
"-",
"based",
"concept",
")",
".",
"See",
"https",
":",
"//",
"docs",
".",
"djangoproject",
".",
"com",
"/",
"en",
"/",
"2",
".",
"1",
"/",
"topics",
"/",
"templates",
"/",
"."
] |
python
|
train
|
erikrose/nose-progressive
|
noseprogressive/runner.py
|
https://github.com/erikrose/nose-progressive/blob/42853f11290cfaac8aa3d204714b71e27cc4ec07/noseprogressive/runner.py#L16-L27
|
def _makeResult(self):
"""Return a Result that doesn't print dots.
Nose's ResultProxy will wrap it, and other plugins can still print
stuff---but without smashing into our progress bar, care of
ProgressivePlugin's stderr/out wrapping.
"""
return ProgressiveResult(self._cwd,
self._totalTests,
self.stream,
config=self.config)
|
[
"def",
"_makeResult",
"(",
"self",
")",
":",
"return",
"ProgressiveResult",
"(",
"self",
".",
"_cwd",
",",
"self",
".",
"_totalTests",
",",
"self",
".",
"stream",
",",
"config",
"=",
"self",
".",
"config",
")"
] |
Return a Result that doesn't print dots.
Nose's ResultProxy will wrap it, and other plugins can still print
stuff---but without smashing into our progress bar, care of
ProgressivePlugin's stderr/out wrapping.
|
[
"Return",
"a",
"Result",
"that",
"doesn",
"t",
"print",
"dots",
"."
] |
python
|
train
|
xhtml2pdf/xhtml2pdf
|
xhtml2pdf/w3c/css.py
|
https://github.com/xhtml2pdf/xhtml2pdf/blob/230357a392f48816532d3c2fa082a680b80ece48/xhtml2pdf/w3c/css.py#L159-L169
|
def findStylesForEach(self, element, attrNames, default=NotImplemented):
"""Attempts to find the style setting for attrName in the CSSRulesets.
Note: This method does not attempt to resolve rules that return
"inherited", "default", or values that have units (including "%").
This is left up to the client app to re-query the CSS in order to
implement these semantics.
"""
rules = self.findCSSRulesForEach(element, attrNames)
return [(attrName, self._extractStyleForRule(rule, attrName, default))
for attrName, rule in six.iteritems(rules)]
|
[
"def",
"findStylesForEach",
"(",
"self",
",",
"element",
",",
"attrNames",
",",
"default",
"=",
"NotImplemented",
")",
":",
"rules",
"=",
"self",
".",
"findCSSRulesForEach",
"(",
"element",
",",
"attrNames",
")",
"return",
"[",
"(",
"attrName",
",",
"self",
".",
"_extractStyleForRule",
"(",
"rule",
",",
"attrName",
",",
"default",
")",
")",
"for",
"attrName",
",",
"rule",
"in",
"six",
".",
"iteritems",
"(",
"rules",
")",
"]"
] |
Attempts to find the style setting for attrName in the CSSRulesets.
Note: This method does not attempt to resolve rules that return
"inherited", "default", or values that have units (including "%").
This is left up to the client app to re-query the CSS in order to
implement these semantics.
|
[
"Attempts",
"to",
"find",
"the",
"style",
"setting",
"for",
"attrName",
"in",
"the",
"CSSRulesets",
"."
] |
python
|
train
|
mwouts/jupytext
|
jupytext/magics.py
|
https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/magics.py#L46-L55
|
def comment_magic(source, language='python', global_escape_flag=True):
"""Escape Jupyter magics with '# '"""
parser = StringParser(language)
next_is_magic = False
for pos, line in enumerate(source):
if not parser.is_quoted() and (next_is_magic or is_magic(line, language, global_escape_flag)):
source[pos] = _COMMENT[language] + ' ' + line
next_is_magic = language == 'python' and _LINE_CONTINUATION_RE.match(line)
parser.read_line(line)
return source
|
[
"def",
"comment_magic",
"(",
"source",
",",
"language",
"=",
"'python'",
",",
"global_escape_flag",
"=",
"True",
")",
":",
"parser",
"=",
"StringParser",
"(",
"language",
")",
"next_is_magic",
"=",
"False",
"for",
"pos",
",",
"line",
"in",
"enumerate",
"(",
"source",
")",
":",
"if",
"not",
"parser",
".",
"is_quoted",
"(",
")",
"and",
"(",
"next_is_magic",
"or",
"is_magic",
"(",
"line",
",",
"language",
",",
"global_escape_flag",
")",
")",
":",
"source",
"[",
"pos",
"]",
"=",
"_COMMENT",
"[",
"language",
"]",
"+",
"' '",
"+",
"line",
"next_is_magic",
"=",
"language",
"==",
"'python'",
"and",
"_LINE_CONTINUATION_RE",
".",
"match",
"(",
"line",
")",
"parser",
".",
"read_line",
"(",
"line",
")",
"return",
"source"
] |
Escape Jupyter magics with '#
|
[
"Escape",
"Jupyter",
"magics",
"with",
"#"
] |
python
|
train
|
Cognexa/cxflow
|
cxflow/main_loop.py
|
https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/main_loop.py#L182-L188
|
def train_by_stream(self, stream: StreamWrapper) -> None:
"""
Train the model with the given stream.
:param stream: stream to train with
"""
self._run_epoch(stream=stream, train=True)
|
[
"def",
"train_by_stream",
"(",
"self",
",",
"stream",
":",
"StreamWrapper",
")",
"->",
"None",
":",
"self",
".",
"_run_epoch",
"(",
"stream",
"=",
"stream",
",",
"train",
"=",
"True",
")"
] |
Train the model with the given stream.
:param stream: stream to train with
|
[
"Train",
"the",
"model",
"with",
"the",
"given",
"stream",
"."
] |
python
|
train
|
econ-ark/HARK
|
HARK/ConsumptionSaving/ConsIndShockModel.py
|
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/ConsumptionSaving/ConsIndShockModel.py#L1183-L1244
|
def solveConsIndShock(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,PermGroFac,
BoroCnstArt,aXtraGrid,vFuncBool,CubicBool):
'''
Solves a single period consumption-saving problem with CRRA utility and risky
income (subject to permanent and transitory shocks). Can generate a value
function if requested; consumption function can be linear or cubic splines.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
aXtraGrid: np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
Indicator for whether the solver should use cubic or linear interpolation.
Returns
-------
solution_now : ConsumerSolution
The solution to the single period consumption-saving problem. Includes
a consumption function cFunc (using cubic or linear splines), a marginal
value function vPfunc, a minimum acceptable level of normalized market
resources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin
and MPCmax. It might also have a value function vFunc and marginal mar-
ginal value function vPPfunc.
'''
# Use the basic solver if user doesn't want cubic splines or the value function
if (not CubicBool) and (not vFuncBool):
solver = ConsIndShockSolverBasic(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,
Rfree,PermGroFac,BoroCnstArt,aXtraGrid,vFuncBool,
CubicBool)
else: # Use the "advanced" solver if either is requested
solver = ConsIndShockSolver(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,
PermGroFac,BoroCnstArt,aXtraGrid,vFuncBool,CubicBool)
solver.prepareToSolve() # Do some preparatory work
solution_now = solver.solve() # Solve the period
return solution_now
|
[
"def",
"solveConsIndShock",
"(",
"solution_next",
",",
"IncomeDstn",
",",
"LivPrb",
",",
"DiscFac",
",",
"CRRA",
",",
"Rfree",
",",
"PermGroFac",
",",
"BoroCnstArt",
",",
"aXtraGrid",
",",
"vFuncBool",
",",
"CubicBool",
")",
":",
"# Use the basic solver if user doesn't want cubic splines or the value function",
"if",
"(",
"not",
"CubicBool",
")",
"and",
"(",
"not",
"vFuncBool",
")",
":",
"solver",
"=",
"ConsIndShockSolverBasic",
"(",
"solution_next",
",",
"IncomeDstn",
",",
"LivPrb",
",",
"DiscFac",
",",
"CRRA",
",",
"Rfree",
",",
"PermGroFac",
",",
"BoroCnstArt",
",",
"aXtraGrid",
",",
"vFuncBool",
",",
"CubicBool",
")",
"else",
":",
"# Use the \"advanced\" solver if either is requested",
"solver",
"=",
"ConsIndShockSolver",
"(",
"solution_next",
",",
"IncomeDstn",
",",
"LivPrb",
",",
"DiscFac",
",",
"CRRA",
",",
"Rfree",
",",
"PermGroFac",
",",
"BoroCnstArt",
",",
"aXtraGrid",
",",
"vFuncBool",
",",
"CubicBool",
")",
"solver",
".",
"prepareToSolve",
"(",
")",
"# Do some preparatory work",
"solution_now",
"=",
"solver",
".",
"solve",
"(",
")",
"# Solve the period",
"return",
"solution_now"
] |
Solves a single period consumption-saving problem with CRRA utility and risky
income (subject to permanent and transitory shocks). Can generate a value
function if requested; consumption function can be linear or cubic splines.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
aXtraGrid: np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
Indicator for whether the solver should use cubic or linear interpolation.
Returns
-------
solution_now : ConsumerSolution
The solution to the single period consumption-saving problem. Includes
a consumption function cFunc (using cubic or linear splines), a marginal
value function vPfunc, a minimum acceptable level of normalized market
resources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin
and MPCmax. It might also have a value function vFunc and marginal mar-
ginal value function vPPfunc.
|
[
"Solves",
"a",
"single",
"period",
"consumption",
"-",
"saving",
"problem",
"with",
"CRRA",
"utility",
"and",
"risky",
"income",
"(",
"subject",
"to",
"permanent",
"and",
"transitory",
"shocks",
")",
".",
"Can",
"generate",
"a",
"value",
"function",
"if",
"requested",
";",
"consumption",
"function",
"can",
"be",
"linear",
"or",
"cubic",
"splines",
"."
] |
python
|
train
|
spyder-ide/spyder
|
spyder/plugins/editor/extensions/docstring.py
|
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/extensions/docstring.py#L479-L524
|
def parse_return_elements(return_vals_group, return_element_name,
return_element_type, placeholder):
"""Return the appropriate text for a group of return elements."""
all_eq = (return_vals_group.count(return_vals_group[0])
== len(return_vals_group))
if all([{'[list]', '(tuple)', '{dict}', '{set}'}.issuperset(
return_vals_group)]) and all_eq:
return return_element_type.format(
return_type=return_vals_group[0][1:-1])
# Output placeholder if special Python chars present in name
py_chars = {' ', '+', '-', '*', '/', '%', '@', '<', '>', '&', '|', '^',
'~', '=', ',', ':', ';', '#', '(', '[', '{', '}', ']',
')', }
if any([any([py_char in return_val for py_char in py_chars])
for return_val in return_vals_group]):
return placeholder
# Output str type and no name if only string literals
if all(['"' in return_val or '\'' in return_val
for return_val in return_vals_group]):
return return_element_type.format(return_type='str')
# Output bool type and no name if only bool literals
if {'True', 'False'}.issuperset(return_vals_group):
return return_element_type.format(return_type='bool')
# Output numeric types and no name if only numeric literals
try:
[float(return_val) for return_val in return_vals_group]
num_not_int = 0
for return_val in return_vals_group:
try:
int(return_val)
except ValueError: # If not an integer (EAFP)
num_not_int = num_not_int + 1
if num_not_int == 0:
return return_element_type.format(return_type='int')
elif num_not_int == len(return_vals_group):
return return_element_type.format(return_type='float')
else:
return return_element_type.format(return_type='numeric')
except ValueError: # Not a numeric if float conversion didn't work
pass
# If names are not equal, don't contain "." or are a builtin
if ({'self', 'cls', 'None'}.isdisjoint(return_vals_group) and all_eq
and all(['.' not in return_val
for return_val in return_vals_group])):
return return_element_name.format(return_name=return_vals_group[0])
return placeholder
|
[
"def",
"parse_return_elements",
"(",
"return_vals_group",
",",
"return_element_name",
",",
"return_element_type",
",",
"placeholder",
")",
":",
"all_eq",
"=",
"(",
"return_vals_group",
".",
"count",
"(",
"return_vals_group",
"[",
"0",
"]",
")",
"==",
"len",
"(",
"return_vals_group",
")",
")",
"if",
"all",
"(",
"[",
"{",
"'[list]'",
",",
"'(tuple)'",
",",
"'{dict}'",
",",
"'{set}'",
"}",
".",
"issuperset",
"(",
"return_vals_group",
")",
"]",
")",
"and",
"all_eq",
":",
"return",
"return_element_type",
".",
"format",
"(",
"return_type",
"=",
"return_vals_group",
"[",
"0",
"]",
"[",
"1",
":",
"-",
"1",
"]",
")",
"# Output placeholder if special Python chars present in name\r",
"py_chars",
"=",
"{",
"' '",
",",
"'+'",
",",
"'-'",
",",
"'*'",
",",
"'/'",
",",
"'%'",
",",
"'@'",
",",
"'<'",
",",
"'>'",
",",
"'&'",
",",
"'|'",
",",
"'^'",
",",
"'~'",
",",
"'='",
",",
"','",
",",
"':'",
",",
"';'",
",",
"'#'",
",",
"'('",
",",
"'['",
",",
"'{'",
",",
"'}'",
",",
"']'",
",",
"')'",
",",
"}",
"if",
"any",
"(",
"[",
"any",
"(",
"[",
"py_char",
"in",
"return_val",
"for",
"py_char",
"in",
"py_chars",
"]",
")",
"for",
"return_val",
"in",
"return_vals_group",
"]",
")",
":",
"return",
"placeholder",
"# Output str type and no name if only string literals\r",
"if",
"all",
"(",
"[",
"'\"'",
"in",
"return_val",
"or",
"'\\''",
"in",
"return_val",
"for",
"return_val",
"in",
"return_vals_group",
"]",
")",
":",
"return",
"return_element_type",
".",
"format",
"(",
"return_type",
"=",
"'str'",
")",
"# Output bool type and no name if only bool literals\r",
"if",
"{",
"'True'",
",",
"'False'",
"}",
".",
"issuperset",
"(",
"return_vals_group",
")",
":",
"return",
"return_element_type",
".",
"format",
"(",
"return_type",
"=",
"'bool'",
")",
"# Output numeric types and no name if only numeric literals\r",
"try",
":",
"[",
"float",
"(",
"return_val",
")",
"for",
"return_val",
"in",
"return_vals_group",
"]",
"num_not_int",
"=",
"0",
"for",
"return_val",
"in",
"return_vals_group",
":",
"try",
":",
"int",
"(",
"return_val",
")",
"except",
"ValueError",
":",
"# If not an integer (EAFP)\r",
"num_not_int",
"=",
"num_not_int",
"+",
"1",
"if",
"num_not_int",
"==",
"0",
":",
"return",
"return_element_type",
".",
"format",
"(",
"return_type",
"=",
"'int'",
")",
"elif",
"num_not_int",
"==",
"len",
"(",
"return_vals_group",
")",
":",
"return",
"return_element_type",
".",
"format",
"(",
"return_type",
"=",
"'float'",
")",
"else",
":",
"return",
"return_element_type",
".",
"format",
"(",
"return_type",
"=",
"'numeric'",
")",
"except",
"ValueError",
":",
"# Not a numeric if float conversion didn't work\r",
"pass",
"# If names are not equal, don't contain \".\" or are a builtin\r",
"if",
"(",
"{",
"'self'",
",",
"'cls'",
",",
"'None'",
"}",
".",
"isdisjoint",
"(",
"return_vals_group",
")",
"and",
"all_eq",
"and",
"all",
"(",
"[",
"'.'",
"not",
"in",
"return_val",
"for",
"return_val",
"in",
"return_vals_group",
"]",
")",
")",
":",
"return",
"return_element_name",
".",
"format",
"(",
"return_name",
"=",
"return_vals_group",
"[",
"0",
"]",
")",
"return",
"placeholder"
] |
Return the appropriate text for a group of return elements.
|
[
"Return",
"the",
"appropriate",
"text",
"for",
"a",
"group",
"of",
"return",
"elements",
"."
] |
python
|
train
|
ssalentin/plip
|
plip/modules/report.py
|
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/report.py#L37-L73
|
def construct_xml_tree(self):
"""Construct the basic XML tree"""
report = et.Element('report')
plipversion = et.SubElement(report, 'plipversion')
plipversion.text = __version__
date_of_creation = et.SubElement(report, 'date_of_creation')
date_of_creation.text = time.strftime("%Y/%m/%d")
citation_information = et.SubElement(report, 'citation_information')
citation_information.text = "Salentin,S. et al. PLIP: fully automated protein-ligand interaction profiler. " \
"Nucl. Acids Res. (1 July 2015) 43 (W1): W443-W447. doi: 10.1093/nar/gkv315"
mode = et.SubElement(report, 'mode')
if config.DNARECEPTOR:
mode.text = 'dna_receptor'
else:
mode.text = 'default'
pdbid = et.SubElement(report, 'pdbid')
pdbid.text = self.mol.pymol_name.upper()
filetype = et.SubElement(report, 'filetype')
filetype.text = self.mol.filetype.upper()
pdbfile = et.SubElement(report, 'pdbfile')
pdbfile.text = self.mol.sourcefiles['pdbcomplex']
pdbfixes = et.SubElement(report, 'pdbfixes')
pdbfixes.text = str(self.mol.information['pdbfixes'])
filename = et.SubElement(report, 'filename')
filename.text = str(self.mol.sourcefiles.get('filename') or None)
exligs = et.SubElement(report, 'excluded_ligands')
for i, exlig in enumerate(self.excluded):
e = et.SubElement(exligs, 'excluded_ligand', id=str(i + 1))
e.text = exlig
covalent = et.SubElement(report, 'covlinkages')
for i, covlinkage in enumerate(self.mol.covalent):
e = et.SubElement(covalent, 'covlinkage', id=str(i + 1))
f1 = et.SubElement(e, 'res1')
f2 = et.SubElement(e, 'res2')
f1.text = ":".join([covlinkage.id1, covlinkage.chain1, str(covlinkage.pos1)])
f2.text = ":".join([covlinkage.id2, covlinkage.chain2, str(covlinkage.pos2)])
return report
|
[
"def",
"construct_xml_tree",
"(",
"self",
")",
":",
"report",
"=",
"et",
".",
"Element",
"(",
"'report'",
")",
"plipversion",
"=",
"et",
".",
"SubElement",
"(",
"report",
",",
"'plipversion'",
")",
"plipversion",
".",
"text",
"=",
"__version__",
"date_of_creation",
"=",
"et",
".",
"SubElement",
"(",
"report",
",",
"'date_of_creation'",
")",
"date_of_creation",
".",
"text",
"=",
"time",
".",
"strftime",
"(",
"\"%Y/%m/%d\"",
")",
"citation_information",
"=",
"et",
".",
"SubElement",
"(",
"report",
",",
"'citation_information'",
")",
"citation_information",
".",
"text",
"=",
"\"Salentin,S. et al. PLIP: fully automated protein-ligand interaction profiler. \"",
"\"Nucl. Acids Res. (1 July 2015) 43 (W1): W443-W447. doi: 10.1093/nar/gkv315\"",
"mode",
"=",
"et",
".",
"SubElement",
"(",
"report",
",",
"'mode'",
")",
"if",
"config",
".",
"DNARECEPTOR",
":",
"mode",
".",
"text",
"=",
"'dna_receptor'",
"else",
":",
"mode",
".",
"text",
"=",
"'default'",
"pdbid",
"=",
"et",
".",
"SubElement",
"(",
"report",
",",
"'pdbid'",
")",
"pdbid",
".",
"text",
"=",
"self",
".",
"mol",
".",
"pymol_name",
".",
"upper",
"(",
")",
"filetype",
"=",
"et",
".",
"SubElement",
"(",
"report",
",",
"'filetype'",
")",
"filetype",
".",
"text",
"=",
"self",
".",
"mol",
".",
"filetype",
".",
"upper",
"(",
")",
"pdbfile",
"=",
"et",
".",
"SubElement",
"(",
"report",
",",
"'pdbfile'",
")",
"pdbfile",
".",
"text",
"=",
"self",
".",
"mol",
".",
"sourcefiles",
"[",
"'pdbcomplex'",
"]",
"pdbfixes",
"=",
"et",
".",
"SubElement",
"(",
"report",
",",
"'pdbfixes'",
")",
"pdbfixes",
".",
"text",
"=",
"str",
"(",
"self",
".",
"mol",
".",
"information",
"[",
"'pdbfixes'",
"]",
")",
"filename",
"=",
"et",
".",
"SubElement",
"(",
"report",
",",
"'filename'",
")",
"filename",
".",
"text",
"=",
"str",
"(",
"self",
".",
"mol",
".",
"sourcefiles",
".",
"get",
"(",
"'filename'",
")",
"or",
"None",
")",
"exligs",
"=",
"et",
".",
"SubElement",
"(",
"report",
",",
"'excluded_ligands'",
")",
"for",
"i",
",",
"exlig",
"in",
"enumerate",
"(",
"self",
".",
"excluded",
")",
":",
"e",
"=",
"et",
".",
"SubElement",
"(",
"exligs",
",",
"'excluded_ligand'",
",",
"id",
"=",
"str",
"(",
"i",
"+",
"1",
")",
")",
"e",
".",
"text",
"=",
"exlig",
"covalent",
"=",
"et",
".",
"SubElement",
"(",
"report",
",",
"'covlinkages'",
")",
"for",
"i",
",",
"covlinkage",
"in",
"enumerate",
"(",
"self",
".",
"mol",
".",
"covalent",
")",
":",
"e",
"=",
"et",
".",
"SubElement",
"(",
"covalent",
",",
"'covlinkage'",
",",
"id",
"=",
"str",
"(",
"i",
"+",
"1",
")",
")",
"f1",
"=",
"et",
".",
"SubElement",
"(",
"e",
",",
"'res1'",
")",
"f2",
"=",
"et",
".",
"SubElement",
"(",
"e",
",",
"'res2'",
")",
"f1",
".",
"text",
"=",
"\":\"",
".",
"join",
"(",
"[",
"covlinkage",
".",
"id1",
",",
"covlinkage",
".",
"chain1",
",",
"str",
"(",
"covlinkage",
".",
"pos1",
")",
"]",
")",
"f2",
".",
"text",
"=",
"\":\"",
".",
"join",
"(",
"[",
"covlinkage",
".",
"id2",
",",
"covlinkage",
".",
"chain2",
",",
"str",
"(",
"covlinkage",
".",
"pos2",
")",
"]",
")",
"return",
"report"
] |
Construct the basic XML tree
|
[
"Construct",
"the",
"basic",
"XML",
"tree"
] |
python
|
train
|
dvdotsenko/jsonrpc.py
|
jsonrpcparts/serializers.py
|
https://github.com/dvdotsenko/jsonrpc.py/blob/19673edd77a9518ac5655bd407f6b93ffbb2cafc/jsonrpcparts/serializers.py#L136-L153
|
def assemble_notification_request(method, params=tuple()):
"""serialize a JSON-RPC-Notification
:Parameters: see dumps_request
:Returns: | {"method": "...", "params": ..., "id": null}
| "method", "params" and "id" are always in this order.
:Raises: see dumps_request
"""
if not isinstance(method, (str, unicode)):
raise TypeError('"method" must be a string (or unicode string).')
if not isinstance(params, (tuple, list)):
raise TypeError("params must be a tuple/list.")
return {
"method": method,
"params": params,
"id": None
}
|
[
"def",
"assemble_notification_request",
"(",
"method",
",",
"params",
"=",
"tuple",
"(",
")",
")",
":",
"if",
"not",
"isinstance",
"(",
"method",
",",
"(",
"str",
",",
"unicode",
")",
")",
":",
"raise",
"TypeError",
"(",
"'\"method\" must be a string (or unicode string).'",
")",
"if",
"not",
"isinstance",
"(",
"params",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"params must be a tuple/list.\"",
")",
"return",
"{",
"\"method\"",
":",
"method",
",",
"\"params\"",
":",
"params",
",",
"\"id\"",
":",
"None",
"}"
] |
serialize a JSON-RPC-Notification
:Parameters: see dumps_request
:Returns: | {"method": "...", "params": ..., "id": null}
| "method", "params" and "id" are always in this order.
:Raises: see dumps_request
|
[
"serialize",
"a",
"JSON",
"-",
"RPC",
"-",
"Notification"
] |
python
|
train
|
secdev/scapy
|
scapy/layers/sixlowpan.py
|
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/sixlowpan.py#L707-L716
|
def _getTrafficClassAndFlowLabel(self):
"""Page 6, draft feb 2011 """
if self.tf == 0x0:
return (self.tc_ecn << 6) + self.tc_dscp, self.flowlabel
elif self.tf == 0x1:
return (self.tc_ecn << 6), self.flowlabel
elif self.tf == 0x2:
return (self.tc_ecn << 6) + self.tc_dscp, 0
else:
return 0, 0
|
[
"def",
"_getTrafficClassAndFlowLabel",
"(",
"self",
")",
":",
"if",
"self",
".",
"tf",
"==",
"0x0",
":",
"return",
"(",
"self",
".",
"tc_ecn",
"<<",
"6",
")",
"+",
"self",
".",
"tc_dscp",
",",
"self",
".",
"flowlabel",
"elif",
"self",
".",
"tf",
"==",
"0x1",
":",
"return",
"(",
"self",
".",
"tc_ecn",
"<<",
"6",
")",
",",
"self",
".",
"flowlabel",
"elif",
"self",
".",
"tf",
"==",
"0x2",
":",
"return",
"(",
"self",
".",
"tc_ecn",
"<<",
"6",
")",
"+",
"self",
".",
"tc_dscp",
",",
"0",
"else",
":",
"return",
"0",
",",
"0"
] |
Page 6, draft feb 2011
|
[
"Page",
"6",
"draft",
"feb",
"2011"
] |
python
|
train
|
django-cumulus/django-cumulus
|
cumulus/management/commands/syncfiles.py
|
https://github.com/django-cumulus/django-cumulus/blob/64feb07b857af28f226be4899e875c29405e261d/cumulus/management/commands/syncfiles.py#L154-L175
|
def match_local(self, prefix, includes, excludes):
"""
Filters os.walk() with include and exclude patterns.
See: http://stackoverflow.com/a/5141829/93559
"""
includes_pattern = r"|".join([fnmatch.translate(x) for x in includes])
excludes_pattern = r"|".join([fnmatch.translate(x) for x in excludes]) or r"$."
matches = []
for root, dirs, files in os.walk(prefix, topdown=True):
# exclude dirs
dirs[:] = [os.path.join(root, d) for d in dirs]
dirs[:] = [d for d in dirs if not re.match(excludes_pattern,
d.split(root)[1])]
# exclude/include files
files = [os.path.join(root, f) for f in files]
files = [os.path.join(root, f) for f in files
if not re.match(excludes_pattern, f)]
files = [os.path.join(root, f) for f in files
if re.match(includes_pattern, f.split(prefix)[1])]
for fname in files:
matches.append(fname)
return matches
|
[
"def",
"match_local",
"(",
"self",
",",
"prefix",
",",
"includes",
",",
"excludes",
")",
":",
"includes_pattern",
"=",
"r\"|\"",
".",
"join",
"(",
"[",
"fnmatch",
".",
"translate",
"(",
"x",
")",
"for",
"x",
"in",
"includes",
"]",
")",
"excludes_pattern",
"=",
"r\"|\"",
".",
"join",
"(",
"[",
"fnmatch",
".",
"translate",
"(",
"x",
")",
"for",
"x",
"in",
"excludes",
"]",
")",
"or",
"r\"$.\"",
"matches",
"=",
"[",
"]",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"prefix",
",",
"topdown",
"=",
"True",
")",
":",
"# exclude dirs",
"dirs",
"[",
":",
"]",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"d",
")",
"for",
"d",
"in",
"dirs",
"]",
"dirs",
"[",
":",
"]",
"=",
"[",
"d",
"for",
"d",
"in",
"dirs",
"if",
"not",
"re",
".",
"match",
"(",
"excludes_pattern",
",",
"d",
".",
"split",
"(",
"root",
")",
"[",
"1",
"]",
")",
"]",
"# exclude/include files",
"files",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"f",
")",
"for",
"f",
"in",
"files",
"]",
"files",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"f",
")",
"for",
"f",
"in",
"files",
"if",
"not",
"re",
".",
"match",
"(",
"excludes_pattern",
",",
"f",
")",
"]",
"files",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"f",
")",
"for",
"f",
"in",
"files",
"if",
"re",
".",
"match",
"(",
"includes_pattern",
",",
"f",
".",
"split",
"(",
"prefix",
")",
"[",
"1",
"]",
")",
"]",
"for",
"fname",
"in",
"files",
":",
"matches",
".",
"append",
"(",
"fname",
")",
"return",
"matches"
] |
Filters os.walk() with include and exclude patterns.
See: http://stackoverflow.com/a/5141829/93559
|
[
"Filters",
"os",
".",
"walk",
"()",
"with",
"include",
"and",
"exclude",
"patterns",
".",
"See",
":",
"http",
":",
"//",
"stackoverflow",
".",
"com",
"/",
"a",
"/",
"5141829",
"/",
"93559"
] |
python
|
train
|
pinax/pinax-blog
|
pinax/blog/parsers/creole_parser.py
|
https://github.com/pinax/pinax-blog/blob/be1d64946381b47d197b258a488d5de56aacccce/pinax/blog/parsers/creole_parser.py#L142-L145
|
def emit_node(self, node):
"""Emit a single node."""
emit = getattr(self, "%s_emit" % node.kind, self.default_emit)
return emit(node)
|
[
"def",
"emit_node",
"(",
"self",
",",
"node",
")",
":",
"emit",
"=",
"getattr",
"(",
"self",
",",
"\"%s_emit\"",
"%",
"node",
".",
"kind",
",",
"self",
".",
"default_emit",
")",
"return",
"emit",
"(",
"node",
")"
] |
Emit a single node.
|
[
"Emit",
"a",
"single",
"node",
"."
] |
python
|
train
|
contains-io/rcli
|
rcli/usage.py
|
https://github.com/contains-io/rcli/blob/cdd6191a0e0a19bc767f84921650835d099349cf/rcli/usage.py#L310-L333
|
def _get_definitions(source):
# type: (str) -> Tuple[Dict[str, str], int]
"""Extract a dictionary of arguments and definitions.
Args:
source: The source for a section of a usage string that contains
definitions.
Returns:
A two-tuple containing a dictionary of all arguments and definitions as
well as the length of the longest argument.
"""
max_len = 0
descs = collections.OrderedDict() # type: Dict[str, str]
lines = (s.strip() for s in source.splitlines())
non_empty_lines = (s for s in lines if s)
for line in non_empty_lines:
if line:
arg, desc = re.split(r'\s\s+', line.strip())
arg_len = len(arg)
if arg_len > max_len:
max_len = arg_len
descs[arg] = desc
return descs, max_len
|
[
"def",
"_get_definitions",
"(",
"source",
")",
":",
"# type: (str) -> Tuple[Dict[str, str], int]",
"max_len",
"=",
"0",
"descs",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"# type: Dict[str, str]",
"lines",
"=",
"(",
"s",
".",
"strip",
"(",
")",
"for",
"s",
"in",
"source",
".",
"splitlines",
"(",
")",
")",
"non_empty_lines",
"=",
"(",
"s",
"for",
"s",
"in",
"lines",
"if",
"s",
")",
"for",
"line",
"in",
"non_empty_lines",
":",
"if",
"line",
":",
"arg",
",",
"desc",
"=",
"re",
".",
"split",
"(",
"r'\\s\\s+'",
",",
"line",
".",
"strip",
"(",
")",
")",
"arg_len",
"=",
"len",
"(",
"arg",
")",
"if",
"arg_len",
">",
"max_len",
":",
"max_len",
"=",
"arg_len",
"descs",
"[",
"arg",
"]",
"=",
"desc",
"return",
"descs",
",",
"max_len"
] |
Extract a dictionary of arguments and definitions.
Args:
source: The source for a section of a usage string that contains
definitions.
Returns:
A two-tuple containing a dictionary of all arguments and definitions as
well as the length of the longest argument.
|
[
"Extract",
"a",
"dictionary",
"of",
"arguments",
"and",
"definitions",
"."
] |
python
|
train
|
pantsbuild/pants
|
src/python/pants/auth/cookies.py
|
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/auth/cookies.py#L39-L51
|
def get_cookie_jar(self):
"""Returns our cookie jar."""
cookie_file = self._get_cookie_file()
cookie_jar = LWPCookieJar(cookie_file)
if os.path.exists(cookie_file):
cookie_jar.load()
else:
safe_mkdir_for(cookie_file)
# Save an empty cookie jar so we can change the file perms on it before writing data to it.
with self._lock:
cookie_jar.save()
os.chmod(cookie_file, 0o600)
return cookie_jar
|
[
"def",
"get_cookie_jar",
"(",
"self",
")",
":",
"cookie_file",
"=",
"self",
".",
"_get_cookie_file",
"(",
")",
"cookie_jar",
"=",
"LWPCookieJar",
"(",
"cookie_file",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"cookie_file",
")",
":",
"cookie_jar",
".",
"load",
"(",
")",
"else",
":",
"safe_mkdir_for",
"(",
"cookie_file",
")",
"# Save an empty cookie jar so we can change the file perms on it before writing data to it.",
"with",
"self",
".",
"_lock",
":",
"cookie_jar",
".",
"save",
"(",
")",
"os",
".",
"chmod",
"(",
"cookie_file",
",",
"0o600",
")",
"return",
"cookie_jar"
] |
Returns our cookie jar.
|
[
"Returns",
"our",
"cookie",
"jar",
"."
] |
python
|
train
|
sprockets/sprockets.mixins.metrics
|
sprockets/mixins/metrics/statsd.py
|
https://github.com/sprockets/sprockets.mixins.metrics/blob/0b17d5f0c09a2be9db779e17e6789d3d5ff9a0d0/sprockets/mixins/metrics/statsd.py#L19-L32
|
def record_timing(self, duration, *path):
"""Record a timing.
This method records a timing to the application's namespace
followed by a calculated path. Each element of `path` is
converted to a string and normalized before joining the
elements by periods. The normalization process is little
more than replacing periods with dashes.
:param float duration: timing to record in seconds
:param path: elements of the metric path to record
"""
self.application.statsd.send(path, duration * 1000.0, 'ms')
|
[
"def",
"record_timing",
"(",
"self",
",",
"duration",
",",
"*",
"path",
")",
":",
"self",
".",
"application",
".",
"statsd",
".",
"send",
"(",
"path",
",",
"duration",
"*",
"1000.0",
",",
"'ms'",
")"
] |
Record a timing.
This method records a timing to the application's namespace
followed by a calculated path. Each element of `path` is
converted to a string and normalized before joining the
elements by periods. The normalization process is little
more than replacing periods with dashes.
:param float duration: timing to record in seconds
:param path: elements of the metric path to record
|
[
"Record",
"a",
"timing",
"."
] |
python
|
train
|
petebachant/PXL
|
pxl/io.py
|
https://github.com/petebachant/PXL/blob/d7d06cb74422e1ac0154741351fbecea080cfcc0/pxl/io.py#L48-L55
|
def savecsv(filename, datadict, mode="w"):
"""Save a dictionary of data to CSV."""
if mode == "a" :
header = False
else:
header = True
with open(filename, mode) as f:
_pd.DataFrame(datadict).to_csv(f, index=False, header=header)
|
[
"def",
"savecsv",
"(",
"filename",
",",
"datadict",
",",
"mode",
"=",
"\"w\"",
")",
":",
"if",
"mode",
"==",
"\"a\"",
":",
"header",
"=",
"False",
"else",
":",
"header",
"=",
"True",
"with",
"open",
"(",
"filename",
",",
"mode",
")",
"as",
"f",
":",
"_pd",
".",
"DataFrame",
"(",
"datadict",
")",
".",
"to_csv",
"(",
"f",
",",
"index",
"=",
"False",
",",
"header",
"=",
"header",
")"
] |
Save a dictionary of data to CSV.
|
[
"Save",
"a",
"dictionary",
"of",
"data",
"to",
"CSV",
"."
] |
python
|
train
|
dmwm/DBS
|
Server/Python/src/dbs/dao/Oracle/FileBuffer/DeleteFiles.py
|
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/dao/Oracle/FileBuffer/DeleteFiles.py#L20-L27
|
def execute(self, conn, logical_file_name={}, transaction=False):
"""
simple execute
"""
if not conn:
dbsExceptionHandler("dbsException-db-conn-failed", "Oracle/FileBuffer/DeleteFiles. Expects db connection from upper layer.")
self.dbi.processData(self.sql, logical_file_name, conn, transaction, returnCursor=True)
|
[
"def",
"execute",
"(",
"self",
",",
"conn",
",",
"logical_file_name",
"=",
"{",
"}",
",",
"transaction",
"=",
"False",
")",
":",
"if",
"not",
"conn",
":",
"dbsExceptionHandler",
"(",
"\"dbsException-db-conn-failed\"",
",",
"\"Oracle/FileBuffer/DeleteFiles. Expects db connection from upper layer.\"",
")",
"self",
".",
"dbi",
".",
"processData",
"(",
"self",
".",
"sql",
",",
"logical_file_name",
",",
"conn",
",",
"transaction",
",",
"returnCursor",
"=",
"True",
")"
] |
simple execute
|
[
"simple",
"execute"
] |
python
|
train
|
quantopian/zipline
|
zipline/data/benchmarks.py
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/benchmarks.py#L19-L42
|
def get_benchmark_returns(symbol):
"""
Get a Series of benchmark returns from IEX associated with `symbol`.
Default is `SPY`.
Parameters
----------
symbol : str
Benchmark symbol for which we're getting the returns.
The data is provided by IEX (https://iextrading.com/), and we can
get up to 5 years worth of data.
"""
r = requests.get(
'https://api.iextrading.com/1.0/stock/{}/chart/5y'.format(symbol)
)
data = r.json()
df = pd.DataFrame(data)
df.index = pd.DatetimeIndex(df['date'])
df = df['close']
return df.sort_index().tz_localize('UTC').pct_change(1).iloc[1:]
|
[
"def",
"get_benchmark_returns",
"(",
"symbol",
")",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"'https://api.iextrading.com/1.0/stock/{}/chart/5y'",
".",
"format",
"(",
"symbol",
")",
")",
"data",
"=",
"r",
".",
"json",
"(",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
")",
"df",
".",
"index",
"=",
"pd",
".",
"DatetimeIndex",
"(",
"df",
"[",
"'date'",
"]",
")",
"df",
"=",
"df",
"[",
"'close'",
"]",
"return",
"df",
".",
"sort_index",
"(",
")",
".",
"tz_localize",
"(",
"'UTC'",
")",
".",
"pct_change",
"(",
"1",
")",
".",
"iloc",
"[",
"1",
":",
"]"
] |
Get a Series of benchmark returns from IEX associated with `symbol`.
Default is `SPY`.
Parameters
----------
symbol : str
Benchmark symbol for which we're getting the returns.
The data is provided by IEX (https://iextrading.com/), and we can
get up to 5 years worth of data.
|
[
"Get",
"a",
"Series",
"of",
"benchmark",
"returns",
"from",
"IEX",
"associated",
"with",
"symbol",
".",
"Default",
"is",
"SPY",
"."
] |
python
|
train
|
wheeler-microfluidics/dmf-control-board-firmware
|
dmf_control_board_firmware/__init__.py
|
https://github.com/wheeler-microfluidics/dmf-control-board-firmware/blob/1cd8cc9a148d530f9a11f634f2dbfe73f08aa27c/dmf_control_board_firmware/__init__.py#L1183-L1219
|
def series_capacitance(self, channel, resistor_index=None):
'''
Parameters
----------
channel : int
Analog channel index.
resistor_index : int, optional
Series resistor channel index.
If :data:`resistor_index` is not specified, the resistor-index from
the current context _(i.e., the result of
:attr:`series_resistor_index`)_ is used.
Otherwise, the series-resistor is temporarily set to the value of
:data:`resistor_index` to read the capacitance before restoring
back to the original value.
See definition of :meth:`safe_series_resistor_index_read`
decorator.
Returns
-------
float
Return the current series capacitance value for the specified
channel.
'''
if resistor_index is None:
resistor_index = self.series_resistor_index(channel)
value = self._series_capacitance(channel)
try:
if channel == 0:
self.calibration.C_hv[resistor_index] = value
else:
self.calibration.C_fb[resistor_index] = value
except:
pass
return value
|
[
"def",
"series_capacitance",
"(",
"self",
",",
"channel",
",",
"resistor_index",
"=",
"None",
")",
":",
"if",
"resistor_index",
"is",
"None",
":",
"resistor_index",
"=",
"self",
".",
"series_resistor_index",
"(",
"channel",
")",
"value",
"=",
"self",
".",
"_series_capacitance",
"(",
"channel",
")",
"try",
":",
"if",
"channel",
"==",
"0",
":",
"self",
".",
"calibration",
".",
"C_hv",
"[",
"resistor_index",
"]",
"=",
"value",
"else",
":",
"self",
".",
"calibration",
".",
"C_fb",
"[",
"resistor_index",
"]",
"=",
"value",
"except",
":",
"pass",
"return",
"value"
] |
Parameters
----------
channel : int
Analog channel index.
resistor_index : int, optional
Series resistor channel index.
If :data:`resistor_index` is not specified, the resistor-index from
the current context _(i.e., the result of
:attr:`series_resistor_index`)_ is used.
Otherwise, the series-resistor is temporarily set to the value of
:data:`resistor_index` to read the capacitance before restoring
back to the original value.
See definition of :meth:`safe_series_resistor_index_read`
decorator.
Returns
-------
float
Return the current series capacitance value for the specified
channel.
|
[
"Parameters",
"----------",
"channel",
":",
"int",
"Analog",
"channel",
"index",
".",
"resistor_index",
":",
"int",
"optional",
"Series",
"resistor",
"channel",
"index",
"."
] |
python
|
train
|
maxweisspoker/simplebitcoinfuncs
|
simplebitcoinfuncs/signandverify.py
|
https://github.com/maxweisspoker/simplebitcoinfuncs/blob/ad332433dfcc067e86d2e77fa0c8f1a27daffb63/simplebitcoinfuncs/signandverify.py#L122-L182
|
def checksigformat(a,invalidatehighS=False):
'''
Checks input to see if it's a correctly formatted DER Bitcoin
signature in hex string format.
Returns True/False. If it excepts, there's a different problem
unrelated to the signature...
This does NOT valid the signature in any way, it ONLY checks that
it is formatted properly.
If invalidatehighS is True, this function will return False on an
otherwise valid signature format if it has a high S value.
'''
try:
a = hexstrlify(unhexlify(a))
except:
return False
try:
rlen = 2*int(a[6:8],16)
slen = 2*int(a[(10+rlen):(12+rlen)],16)
r = a[8:8+(rlen)]
s1 = a[(12+rlen):]
s2 = a[(12+rlen):(12+rlen+slen)]
assert s1 == s2
s1 = int(s1,16)
assert s1 < N
assert a[:2] == '30'
assert len(a) == ((2*int(a[2:4],16)) + 4)
assert a[4:6] == '02'
assert a[(8+rlen):(10+rlen)] == '02'
if int(dechex(int(r,16))[:2],16) > 127:
assert r[:2] == '00'
assert r[2:4] != '00'
else:
assert r[:2] != '00'
if int(dechex(s1)[:2],16) > 127:
assert s2[:2] == '00'
assert s2[2:4] != '00'
else:
assert s2[:2] != '00'
assert len(r) < 67
assert len(s2) < 67
except AssertionError:
return False
except Exception as e:
raise Exception(str(e))
if invalidatehighS:
if s1 > (N / 2):
return False
return True
|
[
"def",
"checksigformat",
"(",
"a",
",",
"invalidatehighS",
"=",
"False",
")",
":",
"try",
":",
"a",
"=",
"hexstrlify",
"(",
"unhexlify",
"(",
"a",
")",
")",
"except",
":",
"return",
"False",
"try",
":",
"rlen",
"=",
"2",
"*",
"int",
"(",
"a",
"[",
"6",
":",
"8",
"]",
",",
"16",
")",
"slen",
"=",
"2",
"*",
"int",
"(",
"a",
"[",
"(",
"10",
"+",
"rlen",
")",
":",
"(",
"12",
"+",
"rlen",
")",
"]",
",",
"16",
")",
"r",
"=",
"a",
"[",
"8",
":",
"8",
"+",
"(",
"rlen",
")",
"]",
"s1",
"=",
"a",
"[",
"(",
"12",
"+",
"rlen",
")",
":",
"]",
"s2",
"=",
"a",
"[",
"(",
"12",
"+",
"rlen",
")",
":",
"(",
"12",
"+",
"rlen",
"+",
"slen",
")",
"]",
"assert",
"s1",
"==",
"s2",
"s1",
"=",
"int",
"(",
"s1",
",",
"16",
")",
"assert",
"s1",
"<",
"N",
"assert",
"a",
"[",
":",
"2",
"]",
"==",
"'30'",
"assert",
"len",
"(",
"a",
")",
"==",
"(",
"(",
"2",
"*",
"int",
"(",
"a",
"[",
"2",
":",
"4",
"]",
",",
"16",
")",
")",
"+",
"4",
")",
"assert",
"a",
"[",
"4",
":",
"6",
"]",
"==",
"'02'",
"assert",
"a",
"[",
"(",
"8",
"+",
"rlen",
")",
":",
"(",
"10",
"+",
"rlen",
")",
"]",
"==",
"'02'",
"if",
"int",
"(",
"dechex",
"(",
"int",
"(",
"r",
",",
"16",
")",
")",
"[",
":",
"2",
"]",
",",
"16",
")",
">",
"127",
":",
"assert",
"r",
"[",
":",
"2",
"]",
"==",
"'00'",
"assert",
"r",
"[",
"2",
":",
"4",
"]",
"!=",
"'00'",
"else",
":",
"assert",
"r",
"[",
":",
"2",
"]",
"!=",
"'00'",
"if",
"int",
"(",
"dechex",
"(",
"s1",
")",
"[",
":",
"2",
"]",
",",
"16",
")",
">",
"127",
":",
"assert",
"s2",
"[",
":",
"2",
"]",
"==",
"'00'",
"assert",
"s2",
"[",
"2",
":",
"4",
"]",
"!=",
"'00'",
"else",
":",
"assert",
"s2",
"[",
":",
"2",
"]",
"!=",
"'00'",
"assert",
"len",
"(",
"r",
")",
"<",
"67",
"assert",
"len",
"(",
"s2",
")",
"<",
"67",
"except",
"AssertionError",
":",
"return",
"False",
"except",
"Exception",
"as",
"e",
":",
"raise",
"Exception",
"(",
"str",
"(",
"e",
")",
")",
"if",
"invalidatehighS",
":",
"if",
"s1",
">",
"(",
"N",
"/",
"2",
")",
":",
"return",
"False",
"return",
"True"
] |
Checks input to see if it's a correctly formatted DER Bitcoin
signature in hex string format.
Returns True/False. If it excepts, there's a different problem
unrelated to the signature...
This does NOT valid the signature in any way, it ONLY checks that
it is formatted properly.
If invalidatehighS is True, this function will return False on an
otherwise valid signature format if it has a high S value.
|
[
"Checks",
"input",
"to",
"see",
"if",
"it",
"s",
"a",
"correctly",
"formatted",
"DER",
"Bitcoin",
"signature",
"in",
"hex",
"string",
"format",
"."
] |
python
|
train
|
fabioz/PyDev.Debugger
|
third_party/pep8/lib2to3/lib2to3/pytree.py
|
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/lib2to3/lib2to3/pytree.py#L400-L404
|
def clone(self):
"""Return a cloned (deep) copy of self."""
return Leaf(self.type, self.value,
(self.prefix, (self.lineno, self.column)),
fixers_applied=self.fixers_applied)
|
[
"def",
"clone",
"(",
"self",
")",
":",
"return",
"Leaf",
"(",
"self",
".",
"type",
",",
"self",
".",
"value",
",",
"(",
"self",
".",
"prefix",
",",
"(",
"self",
".",
"lineno",
",",
"self",
".",
"column",
")",
")",
",",
"fixers_applied",
"=",
"self",
".",
"fixers_applied",
")"
] |
Return a cloned (deep) copy of self.
|
[
"Return",
"a",
"cloned",
"(",
"deep",
")",
"copy",
"of",
"self",
"."
] |
python
|
train
|
swharden/SWHLab
|
swhlab/indexing/indexing.py
|
https://github.com/swharden/SWHLab/blob/a86c3c65323cec809a4bd4f81919644927094bf5/swhlab/indexing/indexing.py#L133-L146
|
def analyzeAll(self):
"""analyze every unanalyzed ABF in the folder."""
searchableData=str(self.files2)
self.log.debug("considering analysis for %d ABFs",len(self.IDs))
for ID in self.IDs:
if not ID+"_" in searchableData:
self.log.debug("%s needs analysis",ID)
try:
self.analyzeABF(ID)
except:
print("EXCEPTION! "*100)
else:
self.log.debug("%s has existing analysis, not overwriting",ID)
self.log.debug("verified analysis of %d ABFs",len(self.IDs))
|
[
"def",
"analyzeAll",
"(",
"self",
")",
":",
"searchableData",
"=",
"str",
"(",
"self",
".",
"files2",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"considering analysis for %d ABFs\"",
",",
"len",
"(",
"self",
".",
"IDs",
")",
")",
"for",
"ID",
"in",
"self",
".",
"IDs",
":",
"if",
"not",
"ID",
"+",
"\"_\"",
"in",
"searchableData",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"%s needs analysis\"",
",",
"ID",
")",
"try",
":",
"self",
".",
"analyzeABF",
"(",
"ID",
")",
"except",
":",
"print",
"(",
"\"EXCEPTION! \"",
"*",
"100",
")",
"else",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"%s has existing analysis, not overwriting\"",
",",
"ID",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"verified analysis of %d ABFs\"",
",",
"len",
"(",
"self",
".",
"IDs",
")",
")"
] |
analyze every unanalyzed ABF in the folder.
|
[
"analyze",
"every",
"unanalyzed",
"ABF",
"in",
"the",
"folder",
"."
] |
python
|
valid
|
gwpy/gwpy
|
gwpy/signal/filter_design.py
|
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/filter_design.py#L632-L669
|
def concatenate_zpks(*zpks):
"""Concatenate a list of zero-pole-gain (ZPK) filters
Parameters
----------
*zpks
one or more zero-pole-gain format, each one should be a 3-`tuple`
containing an array of zeros, an array of poles, and a gain `float`
Returns
-------
zeros : `numpy.ndarray`
the concatenated array of zeros
poles : `numpy.ndarray`
the concatenated array of poles
gain : `float`
the overall gain
Examples
--------
Create a lowpass and a highpass filter, and combine them:
>>> from gwpy.signal.filter_design import (
... highpass, lowpass, concatenate_zpks)
>>> hp = highpass(100, 4096)
>>> lp = lowpass(1000, 4096)
>>> zpk = concatenate_zpks(hp, lp)
Plot the filter:
>>> from gwpy.plot import BodePlot
>>> plot = BodePlot(zpk, sample_rate=4096)
>>> plot.show()
"""
zeros, poles, gains = zip(*zpks)
return (numpy.concatenate(zeros),
numpy.concatenate(poles),
reduce(operator.mul, gains, 1))
|
[
"def",
"concatenate_zpks",
"(",
"*",
"zpks",
")",
":",
"zeros",
",",
"poles",
",",
"gains",
"=",
"zip",
"(",
"*",
"zpks",
")",
"return",
"(",
"numpy",
".",
"concatenate",
"(",
"zeros",
")",
",",
"numpy",
".",
"concatenate",
"(",
"poles",
")",
",",
"reduce",
"(",
"operator",
".",
"mul",
",",
"gains",
",",
"1",
")",
")"
] |
Concatenate a list of zero-pole-gain (ZPK) filters
Parameters
----------
*zpks
one or more zero-pole-gain format, each one should be a 3-`tuple`
containing an array of zeros, an array of poles, and a gain `float`
Returns
-------
zeros : `numpy.ndarray`
the concatenated array of zeros
poles : `numpy.ndarray`
the concatenated array of poles
gain : `float`
the overall gain
Examples
--------
Create a lowpass and a highpass filter, and combine them:
>>> from gwpy.signal.filter_design import (
... highpass, lowpass, concatenate_zpks)
>>> hp = highpass(100, 4096)
>>> lp = lowpass(1000, 4096)
>>> zpk = concatenate_zpks(hp, lp)
Plot the filter:
>>> from gwpy.plot import BodePlot
>>> plot = BodePlot(zpk, sample_rate=4096)
>>> plot.show()
|
[
"Concatenate",
"a",
"list",
"of",
"zero",
"-",
"pole",
"-",
"gain",
"(",
"ZPK",
")",
"filters"
] |
python
|
train
|
PeerAssets/pypeerassets
|
pypeerassets/protocol.py
|
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/protocol.py#L309-L328
|
def metainfo_to_protobuf(self) -> bytes:
'''encode card_transfer info to protobuf'''
card = cardtransferproto()
card.version = self.version
card.amount.extend(self.amount)
card.number_of_decimals = self.number_of_decimals
if self.asset_specific_data:
if not isinstance(self.asset_specific_data, bytes):
card.asset_specific_data = self.asset_specific_data.encode()
else:
card.asset_specific_data = self.asset_specific_data
if card.ByteSize() > net_query(self.network).op_return_max_bytes:
raise OverSizeOPReturn('''
Metainfo size exceeds maximum of {max} bytes supported by this network.'''
.format(max=net_query(self.network)
.op_return_max_bytes))
return card.SerializeToString()
|
[
"def",
"metainfo_to_protobuf",
"(",
"self",
")",
"->",
"bytes",
":",
"card",
"=",
"cardtransferproto",
"(",
")",
"card",
".",
"version",
"=",
"self",
".",
"version",
"card",
".",
"amount",
".",
"extend",
"(",
"self",
".",
"amount",
")",
"card",
".",
"number_of_decimals",
"=",
"self",
".",
"number_of_decimals",
"if",
"self",
".",
"asset_specific_data",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"asset_specific_data",
",",
"bytes",
")",
":",
"card",
".",
"asset_specific_data",
"=",
"self",
".",
"asset_specific_data",
".",
"encode",
"(",
")",
"else",
":",
"card",
".",
"asset_specific_data",
"=",
"self",
".",
"asset_specific_data",
"if",
"card",
".",
"ByteSize",
"(",
")",
">",
"net_query",
"(",
"self",
".",
"network",
")",
".",
"op_return_max_bytes",
":",
"raise",
"OverSizeOPReturn",
"(",
"'''\n Metainfo size exceeds maximum of {max} bytes supported by this network.'''",
".",
"format",
"(",
"max",
"=",
"net_query",
"(",
"self",
".",
"network",
")",
".",
"op_return_max_bytes",
")",
")",
"return",
"card",
".",
"SerializeToString",
"(",
")"
] |
encode card_transfer info to protobuf
|
[
"encode",
"card_transfer",
"info",
"to",
"protobuf"
] |
python
|
train
|
tcalmant/ipopo
|
pelix/misc/mqtt_client.py
|
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/misc/mqtt_client.py#L399-L413
|
def __on_message(self, client, userdata, msg):
# pylint: disable=W0613
"""
A message has been received from a server
:param client: Client that received the message
:param userdata: User data (unused)
:param msg: A MQTTMessage bean
"""
# Notify the caller, if any
if self.on_message is not None:
try:
self.on_message(self, msg)
except Exception as ex:
_logger.exception("Error notifying MQTT listener: %s", ex)
|
[
"def",
"__on_message",
"(",
"self",
",",
"client",
",",
"userdata",
",",
"msg",
")",
":",
"# pylint: disable=W0613",
"# Notify the caller, if any",
"if",
"self",
".",
"on_message",
"is",
"not",
"None",
":",
"try",
":",
"self",
".",
"on_message",
"(",
"self",
",",
"msg",
")",
"except",
"Exception",
"as",
"ex",
":",
"_logger",
".",
"exception",
"(",
"\"Error notifying MQTT listener: %s\"",
",",
"ex",
")"
] |
A message has been received from a server
:param client: Client that received the message
:param userdata: User data (unused)
:param msg: A MQTTMessage bean
|
[
"A",
"message",
"has",
"been",
"received",
"from",
"a",
"server"
] |
python
|
train
|
xmikos/reparser
|
reparser.py
|
https://github.com/xmikos/reparser/blob/0668112a15b9e8e9355a1261040c36b4a6034020/reparser.py#L84-L91
|
def build_regex(self, tokens):
"""Build compound regex from list of tokens"""
patterns = []
for token in tokens:
patterns.append(token.pattern_start)
if token.pattern_end:
patterns.append(token.pattern_end)
return re.compile('|'.join(patterns), re.DOTALL)
|
[
"def",
"build_regex",
"(",
"self",
",",
"tokens",
")",
":",
"patterns",
"=",
"[",
"]",
"for",
"token",
"in",
"tokens",
":",
"patterns",
".",
"append",
"(",
"token",
".",
"pattern_start",
")",
"if",
"token",
".",
"pattern_end",
":",
"patterns",
".",
"append",
"(",
"token",
".",
"pattern_end",
")",
"return",
"re",
".",
"compile",
"(",
"'|'",
".",
"join",
"(",
"patterns",
")",
",",
"re",
".",
"DOTALL",
")"
] |
Build compound regex from list of tokens
|
[
"Build",
"compound",
"regex",
"from",
"list",
"of",
"tokens"
] |
python
|
train
|
GNS3/gns3-server
|
gns3server/utils/asyncio/embed_shell.py
|
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/utils/asyncio/embed_shell.py#L300-L335
|
def create_stdin_shell(shell, loop=None):
"""
Run a shell application with a stdin frontend
:param application: An EmbedShell instance
:param loop: The event loop
:returns: Telnet server
"""
@asyncio.coroutine
def feed_stdin(loop, reader, shell):
history = InMemoryHistory()
completer = WordCompleter([name for name, _ in shell.get_commands()], ignore_case=True)
while True:
line = yield from prompt(
">", patch_stdout=True, return_asyncio_coroutine=True, history=history, completer=completer)
line += '\n'
reader.feed_data(line.encode())
@asyncio.coroutine
def read_stdout(writer):
while True:
c = yield from writer.read(1)
print(c.decode(), end='')
sys.stdout.flush()
reader = asyncio.StreamReader()
writer = asyncio.StreamReader()
shell.reader = reader
shell.writer = writer
if loop is None:
loop = asyncio.get_event_loop()
reader_task = loop.create_task(feed_stdin(loop, reader, shell))
writer_task = loop.create_task(read_stdout(writer))
shell_task = loop.create_task(shell.run())
return asyncio.gather(shell_task, writer_task, reader_task)
|
[
"def",
"create_stdin_shell",
"(",
"shell",
",",
"loop",
"=",
"None",
")",
":",
"@",
"asyncio",
".",
"coroutine",
"def",
"feed_stdin",
"(",
"loop",
",",
"reader",
",",
"shell",
")",
":",
"history",
"=",
"InMemoryHistory",
"(",
")",
"completer",
"=",
"WordCompleter",
"(",
"[",
"name",
"for",
"name",
",",
"_",
"in",
"shell",
".",
"get_commands",
"(",
")",
"]",
",",
"ignore_case",
"=",
"True",
")",
"while",
"True",
":",
"line",
"=",
"yield",
"from",
"prompt",
"(",
"\">\"",
",",
"patch_stdout",
"=",
"True",
",",
"return_asyncio_coroutine",
"=",
"True",
",",
"history",
"=",
"history",
",",
"completer",
"=",
"completer",
")",
"line",
"+=",
"'\\n'",
"reader",
".",
"feed_data",
"(",
"line",
".",
"encode",
"(",
")",
")",
"@",
"asyncio",
".",
"coroutine",
"def",
"read_stdout",
"(",
"writer",
")",
":",
"while",
"True",
":",
"c",
"=",
"yield",
"from",
"writer",
".",
"read",
"(",
"1",
")",
"print",
"(",
"c",
".",
"decode",
"(",
")",
",",
"end",
"=",
"''",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"reader",
"=",
"asyncio",
".",
"StreamReader",
"(",
")",
"writer",
"=",
"asyncio",
".",
"StreamReader",
"(",
")",
"shell",
".",
"reader",
"=",
"reader",
"shell",
".",
"writer",
"=",
"writer",
"if",
"loop",
"is",
"None",
":",
"loop",
"=",
"asyncio",
".",
"get_event_loop",
"(",
")",
"reader_task",
"=",
"loop",
".",
"create_task",
"(",
"feed_stdin",
"(",
"loop",
",",
"reader",
",",
"shell",
")",
")",
"writer_task",
"=",
"loop",
".",
"create_task",
"(",
"read_stdout",
"(",
"writer",
")",
")",
"shell_task",
"=",
"loop",
".",
"create_task",
"(",
"shell",
".",
"run",
"(",
")",
")",
"return",
"asyncio",
".",
"gather",
"(",
"shell_task",
",",
"writer_task",
",",
"reader_task",
")"
] |
Run a shell application with a stdin frontend
:param application: An EmbedShell instance
:param loop: The event loop
:returns: Telnet server
|
[
"Run",
"a",
"shell",
"application",
"with",
"a",
"stdin",
"frontend"
] |
python
|
train
|
awslabs/sockeye
|
sockeye/utils.py
|
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/utils.py#L342-L363
|
def smart_open(filename: str, mode: str = "rt", ftype: str = "auto", errors: str = 'replace'):
"""
Returns a file descriptor for filename with UTF-8 encoding.
If mode is "rt", file is opened read-only.
If ftype is "auto", uses gzip iff filename endswith .gz.
If ftype is {"gzip","gz"}, uses gzip.
If ftype is "auto" and read mode requested, uses gzip iff is_gzip_file(filename).
Note: encoding error handling defaults to "replace"
:param filename: The filename to open.
:param mode: Reader mode.
:param ftype: File type. If 'auto' checks filename suffix for gz to try gzip.open.
:param errors: Encoding error handling during reading. Defaults to 'replace'.
:return: File descriptor.
"""
if ftype in ('gzip', 'gz') \
or (ftype == 'auto' and filename.endswith(".gz")) \
or (ftype == 'auto' and 'r' in mode and is_gzip_file(filename)):
return gzip.open(filename, mode=mode, encoding='utf-8', errors=errors)
else:
return open(filename, mode=mode, encoding='utf-8', errors=errors)
|
[
"def",
"smart_open",
"(",
"filename",
":",
"str",
",",
"mode",
":",
"str",
"=",
"\"rt\"",
",",
"ftype",
":",
"str",
"=",
"\"auto\"",
",",
"errors",
":",
"str",
"=",
"'replace'",
")",
":",
"if",
"ftype",
"in",
"(",
"'gzip'",
",",
"'gz'",
")",
"or",
"(",
"ftype",
"==",
"'auto'",
"and",
"filename",
".",
"endswith",
"(",
"\".gz\"",
")",
")",
"or",
"(",
"ftype",
"==",
"'auto'",
"and",
"'r'",
"in",
"mode",
"and",
"is_gzip_file",
"(",
"filename",
")",
")",
":",
"return",
"gzip",
".",
"open",
"(",
"filename",
",",
"mode",
"=",
"mode",
",",
"encoding",
"=",
"'utf-8'",
",",
"errors",
"=",
"errors",
")",
"else",
":",
"return",
"open",
"(",
"filename",
",",
"mode",
"=",
"mode",
",",
"encoding",
"=",
"'utf-8'",
",",
"errors",
"=",
"errors",
")"
] |
Returns a file descriptor for filename with UTF-8 encoding.
If mode is "rt", file is opened read-only.
If ftype is "auto", uses gzip iff filename endswith .gz.
If ftype is {"gzip","gz"}, uses gzip.
If ftype is "auto" and read mode requested, uses gzip iff is_gzip_file(filename).
Note: encoding error handling defaults to "replace"
:param filename: The filename to open.
:param mode: Reader mode.
:param ftype: File type. If 'auto' checks filename suffix for gz to try gzip.open.
:param errors: Encoding error handling during reading. Defaults to 'replace'.
:return: File descriptor.
|
[
"Returns",
"a",
"file",
"descriptor",
"for",
"filename",
"with",
"UTF",
"-",
"8",
"encoding",
".",
"If",
"mode",
"is",
"rt",
"file",
"is",
"opened",
"read",
"-",
"only",
".",
"If",
"ftype",
"is",
"auto",
"uses",
"gzip",
"iff",
"filename",
"endswith",
".",
"gz",
".",
"If",
"ftype",
"is",
"{",
"gzip",
"gz",
"}",
"uses",
"gzip",
".",
"If",
"ftype",
"is",
"auto",
"and",
"read",
"mode",
"requested",
"uses",
"gzip",
"iff",
"is_gzip_file",
"(",
"filename",
")",
"."
] |
python
|
train
|
OSSOS/MOP
|
src/ossos/core/ossos/util.py
|
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/ossos/util.py#L112-L125
|
def close(self):
"""
Closes the stream.
"""
self.flush()
try:
if self.stream is not None:
self.stream.flush()
_name = self.stream.name
self.stream.close()
self.client.copy(_name, self.filename)
except Exception as ex:
print str(ex)
pass
|
[
"def",
"close",
"(",
"self",
")",
":",
"self",
".",
"flush",
"(",
")",
"try",
":",
"if",
"self",
".",
"stream",
"is",
"not",
"None",
":",
"self",
".",
"stream",
".",
"flush",
"(",
")",
"_name",
"=",
"self",
".",
"stream",
".",
"name",
"self",
".",
"stream",
".",
"close",
"(",
")",
"self",
".",
"client",
".",
"copy",
"(",
"_name",
",",
"self",
".",
"filename",
")",
"except",
"Exception",
"as",
"ex",
":",
"print",
"str",
"(",
"ex",
")",
"pass"
] |
Closes the stream.
|
[
"Closes",
"the",
"stream",
"."
] |
python
|
train
|
jslang/responsys
|
responsys/client.py
|
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L369-L382
|
def merge_table_records(self, table, record_data, match_column_names):
""" Responsys.mergeTableRecords call
Accepts:
InteractObject table
RecordData record_data
list match_column_names
Returns a MergeResult
"""
table = table.get_soap_object(self.client)
record_data = record_data.get_soap_object(self.client)
return MergeResult(self.call(
'mergeTableRecords', table, record_data, match_column_names))
|
[
"def",
"merge_table_records",
"(",
"self",
",",
"table",
",",
"record_data",
",",
"match_column_names",
")",
":",
"table",
"=",
"table",
".",
"get_soap_object",
"(",
"self",
".",
"client",
")",
"record_data",
"=",
"record_data",
".",
"get_soap_object",
"(",
"self",
".",
"client",
")",
"return",
"MergeResult",
"(",
"self",
".",
"call",
"(",
"'mergeTableRecords'",
",",
"table",
",",
"record_data",
",",
"match_column_names",
")",
")"
] |
Responsys.mergeTableRecords call
Accepts:
InteractObject table
RecordData record_data
list match_column_names
Returns a MergeResult
|
[
"Responsys",
".",
"mergeTableRecords",
"call"
] |
python
|
train
|
jobovy/galpy
|
galpy/orbit/FullOrbit.py
|
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/orbit/FullOrbit.py#L381-L447
|
def fit(self,vxvv,vxvv_err=None,pot=None,radec=False,lb=False,
customsky=False,lb_to_customsky=None,pmllpmbb_to_customsky=None,
tintJ=10,ntintJ=1000,integrate_method='dopr54_c',
disp=False,
**kwargs):
"""
NAME:
fit
PURPOSE:
fit an Orbit to data using the current orbit as the initial
condition
INPUT:
vxvv - [:,6] array of positions and velocities along the orbit [cannot be Quantities]
vxvv_err= [:,6] array of errors on positions and velocities along the orbit (if None, these are set to 0.01) [cannot be Quantities]
pot= Potential to fit the orbit in
Keywords related to the input data:
radec= if True, input vxvv and vxvv_err are [ra,dec,d,mu_ra, mu_dec,vlos] in [deg,deg,kpc,mas/yr,mas/yr,km/s] (all ICRS; mu_ra = mu_ra * cos dec); the attributes of the current Orbit are used to convert between these coordinates and Galactocentric coordinates; Note that for speed reasons, galpy's internal transformation between (l,b) and (ra,dec) is used, rather than astropy's
lb= if True, input vxvv and vxvv_err are [long,lat,d,mu_ll, mu_bb,vlos] in [deg,deg,kpc,mas/yr,mas/yr,km/s] (mu_ll = mu_ll * cos lat); the attributes of the current Orbit are used to convert between these coordinates and Galactocentric coordinates
customsky= if True, input vxvv and vxvv_err are [custom long,custom lat,d,mu_customll, mu_custombb,vlos] in [deg,deg,kpc,mas/yr,mas/yr,km/s] (mu_ll = mu_ll * cos lat) where custom longitude and custom latitude are a custom set of sky coordinates (e.g., ecliptic) and the proper motions are also expressed in these coordinats; you need to provide the functions lb_to_customsky and pmllpmbb_to_customsky to convert to the custom sky coordinates (these should have the same inputs and outputs as lb_to_radec and pmllpmbb_to_pmrapmdec); the attributes of the current Orbit are used to convert between these coordinates and Galactocentric coordinates
obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer
(in kpc and km/s) (default=Object-wide default)
Cannot be an Orbit instance with the orbit of the reference point, as w/ the ra etc. functions
ro= distance in kpc corresponding to R=1. (default: taken from object)
vo= velocity in km/s corresponding to v=1. (default: taken from object)
lb_to_customsky= function that converts l,b,degree=False to the custom sky coordinates (like lb_to_radec); needs to be given when customsky=True
pmllpmbb_to_customsky= function that converts pmll,pmbb,l,b,degree=False to proper motions in the custom sky coordinates (like pmllpmbb_to_pmrapmdec); needs to be given when customsky=True
Keywords related to the orbit integrations:
tintJ= (default: 10) time to integrate orbits for fitting the orbit
ntintJ= (default: 1000) number of time-integration points
integrate_method= (default: 'dopr54_c') integration method to use
disp= (False) display the optimizer's convergence message
OUTPUT:
max of log likelihood
HISTORY:
2014-06-17 - Written - Bovy (IAS)
TEST:
from galpy.potential import LogarithmicHaloPotential; lp= LogarithmicHaloPotential(normalize=1.); from galpy.orbit import Orbit; o= Orbit(vxvv=[1.,0.1,1.1,0.1,0.02,0.]); ts= numpy.linspace(0,10,1000); o.integrate(ts,lp); outts= [0.,0.1,0.2,0.3,0.4]; vxvv= numpy.array([o.R(outts),o.vR(outts),o.vT(outts),o.z(outts),o.vz(outts),o.phi(outts)]).T; of= Orbit(vxvv=[1.02,0.101,1.101,0.101,0.0201,0.001]); of._orb.fit(vxvv,pot=lp,radec=False,tintJ=10,ntintJ=1000)
"""
if pot is None:
try:
pot= self._pot
except AttributeError:
raise AttributeError("Integrate orbit first or specify pot=")
if radec or lb or customsky:
obs, ro, vo= self._parse_radec_kwargs(kwargs,vel=True,dontpop=True)
else:
obs, ro, vo= None, None, None
if customsky \
and (lb_to_customsky is None or pmllpmbb_to_customsky is None):
raise IOError('if customsky=True, the functions lb_to_customsky and pmllpmbb_to_customsky need to be given')
new_vxvv, maxLogL= _fit_orbit(self,vxvv,vxvv_err,pot,radec=radec,lb=lb,
customsky=customsky,
lb_to_customsky=lb_to_customsky,
pmllpmbb_to_customsky=pmllpmbb_to_customsky,
tintJ=tintJ,ntintJ=ntintJ,
integrate_method=integrate_method,
ro=ro,vo=vo,obs=obs,disp=disp)
#Setup with these new initial conditions
self.vxvv= new_vxvv
return maxLogL
|
[
"def",
"fit",
"(",
"self",
",",
"vxvv",
",",
"vxvv_err",
"=",
"None",
",",
"pot",
"=",
"None",
",",
"radec",
"=",
"False",
",",
"lb",
"=",
"False",
",",
"customsky",
"=",
"False",
",",
"lb_to_customsky",
"=",
"None",
",",
"pmllpmbb_to_customsky",
"=",
"None",
",",
"tintJ",
"=",
"10",
",",
"ntintJ",
"=",
"1000",
",",
"integrate_method",
"=",
"'dopr54_c'",
",",
"disp",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"pot",
"is",
"None",
":",
"try",
":",
"pot",
"=",
"self",
".",
"_pot",
"except",
"AttributeError",
":",
"raise",
"AttributeError",
"(",
"\"Integrate orbit first or specify pot=\"",
")",
"if",
"radec",
"or",
"lb",
"or",
"customsky",
":",
"obs",
",",
"ro",
",",
"vo",
"=",
"self",
".",
"_parse_radec_kwargs",
"(",
"kwargs",
",",
"vel",
"=",
"True",
",",
"dontpop",
"=",
"True",
")",
"else",
":",
"obs",
",",
"ro",
",",
"vo",
"=",
"None",
",",
"None",
",",
"None",
"if",
"customsky",
"and",
"(",
"lb_to_customsky",
"is",
"None",
"or",
"pmllpmbb_to_customsky",
"is",
"None",
")",
":",
"raise",
"IOError",
"(",
"'if customsky=True, the functions lb_to_customsky and pmllpmbb_to_customsky need to be given'",
")",
"new_vxvv",
",",
"maxLogL",
"=",
"_fit_orbit",
"(",
"self",
",",
"vxvv",
",",
"vxvv_err",
",",
"pot",
",",
"radec",
"=",
"radec",
",",
"lb",
"=",
"lb",
",",
"customsky",
"=",
"customsky",
",",
"lb_to_customsky",
"=",
"lb_to_customsky",
",",
"pmllpmbb_to_customsky",
"=",
"pmllpmbb_to_customsky",
",",
"tintJ",
"=",
"tintJ",
",",
"ntintJ",
"=",
"ntintJ",
",",
"integrate_method",
"=",
"integrate_method",
",",
"ro",
"=",
"ro",
",",
"vo",
"=",
"vo",
",",
"obs",
"=",
"obs",
",",
"disp",
"=",
"disp",
")",
"#Setup with these new initial conditions",
"self",
".",
"vxvv",
"=",
"new_vxvv",
"return",
"maxLogL"
] |
NAME:
fit
PURPOSE:
fit an Orbit to data using the current orbit as the initial
condition
INPUT:
vxvv - [:,6] array of positions and velocities along the orbit [cannot be Quantities]
vxvv_err= [:,6] array of errors on positions and velocities along the orbit (if None, these are set to 0.01) [cannot be Quantities]
pot= Potential to fit the orbit in
Keywords related to the input data:
radec= if True, input vxvv and vxvv_err are [ra,dec,d,mu_ra, mu_dec,vlos] in [deg,deg,kpc,mas/yr,mas/yr,km/s] (all ICRS; mu_ra = mu_ra * cos dec); the attributes of the current Orbit are used to convert between these coordinates and Galactocentric coordinates; Note that for speed reasons, galpy's internal transformation between (l,b) and (ra,dec) is used, rather than astropy's
lb= if True, input vxvv and vxvv_err are [long,lat,d,mu_ll, mu_bb,vlos] in [deg,deg,kpc,mas/yr,mas/yr,km/s] (mu_ll = mu_ll * cos lat); the attributes of the current Orbit are used to convert between these coordinates and Galactocentric coordinates
customsky= if True, input vxvv and vxvv_err are [custom long,custom lat,d,mu_customll, mu_custombb,vlos] in [deg,deg,kpc,mas/yr,mas/yr,km/s] (mu_ll = mu_ll * cos lat) where custom longitude and custom latitude are a custom set of sky coordinates (e.g., ecliptic) and the proper motions are also expressed in these coordinats; you need to provide the functions lb_to_customsky and pmllpmbb_to_customsky to convert to the custom sky coordinates (these should have the same inputs and outputs as lb_to_radec and pmllpmbb_to_pmrapmdec); the attributes of the current Orbit are used to convert between these coordinates and Galactocentric coordinates
obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer
(in kpc and km/s) (default=Object-wide default)
Cannot be an Orbit instance with the orbit of the reference point, as w/ the ra etc. functions
ro= distance in kpc corresponding to R=1. (default: taken from object)
vo= velocity in km/s corresponding to v=1. (default: taken from object)
lb_to_customsky= function that converts l,b,degree=False to the custom sky coordinates (like lb_to_radec); needs to be given when customsky=True
pmllpmbb_to_customsky= function that converts pmll,pmbb,l,b,degree=False to proper motions in the custom sky coordinates (like pmllpmbb_to_pmrapmdec); needs to be given when customsky=True
Keywords related to the orbit integrations:
tintJ= (default: 10) time to integrate orbits for fitting the orbit
ntintJ= (default: 1000) number of time-integration points
integrate_method= (default: 'dopr54_c') integration method to use
disp= (False) display the optimizer's convergence message
OUTPUT:
max of log likelihood
HISTORY:
2014-06-17 - Written - Bovy (IAS)
TEST:
from galpy.potential import LogarithmicHaloPotential; lp= LogarithmicHaloPotential(normalize=1.); from galpy.orbit import Orbit; o= Orbit(vxvv=[1.,0.1,1.1,0.1,0.02,0.]); ts= numpy.linspace(0,10,1000); o.integrate(ts,lp); outts= [0.,0.1,0.2,0.3,0.4]; vxvv= numpy.array([o.R(outts),o.vR(outts),o.vT(outts),o.z(outts),o.vz(outts),o.phi(outts)]).T; of= Orbit(vxvv=[1.02,0.101,1.101,0.101,0.0201,0.001]); of._orb.fit(vxvv,pot=lp,radec=False,tintJ=10,ntintJ=1000)
|
[
"NAME",
":",
"fit",
"PURPOSE",
":",
"fit",
"an",
"Orbit",
"to",
"data",
"using",
"the",
"current",
"orbit",
"as",
"the",
"initial",
"condition",
"INPUT",
":",
"vxvv",
"-",
"[",
":",
"6",
"]",
"array",
"of",
"positions",
"and",
"velocities",
"along",
"the",
"orbit",
"[",
"cannot",
"be",
"Quantities",
"]",
"vxvv_err",
"=",
"[",
":",
"6",
"]",
"array",
"of",
"errors",
"on",
"positions",
"and",
"velocities",
"along",
"the",
"orbit",
"(",
"if",
"None",
"these",
"are",
"set",
"to",
"0",
".",
"01",
")",
"[",
"cannot",
"be",
"Quantities",
"]",
"pot",
"=",
"Potential",
"to",
"fit",
"the",
"orbit",
"in"
] |
python
|
train
|
BreakingBytes/simkit
|
simkit/core/simulations.py
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/simulations.py#L329-L338
|
def index_iterator(self):
"""
Generator that resumes from same index, or restarts from sent index.
"""
idx = 0 # index
while idx < self.number_intervals:
new_idx = yield idx
idx += 1
if new_idx:
idx = new_idx - 1
|
[
"def",
"index_iterator",
"(",
"self",
")",
":",
"idx",
"=",
"0",
"# index",
"while",
"idx",
"<",
"self",
".",
"number_intervals",
":",
"new_idx",
"=",
"yield",
"idx",
"idx",
"+=",
"1",
"if",
"new_idx",
":",
"idx",
"=",
"new_idx",
"-",
"1"
] |
Generator that resumes from same index, or restarts from sent index.
|
[
"Generator",
"that",
"resumes",
"from",
"same",
"index",
"or",
"restarts",
"from",
"sent",
"index",
"."
] |
python
|
train
|
eventbrite/pysoa
|
pysoa/client/client.py
|
https://github.com/eventbrite/pysoa/blob/9c052cae2397d13de3df8ae2c790846a70b53f18/pysoa/client/client.py#L701-L800
|
def call_jobs_parallel_future(
self,
jobs,
expansions=None,
raise_job_errors=True,
raise_action_errors=True,
catch_transport_errors=False,
timeout=None,
**kwargs
):
"""
This method is identical in signature and behavior to `call_jobs_parallel`, except that it sends the requests
and then immediately returns a `FutureResponse` instead of blocking waiting on all responses and returning
a `list` of `JobResponses`. Just call `result(timeout=None)` on the future response to block for an available
response. Some of the possible exceptions may be raised when this method is called; others may be raised when
the future is used.
:return: A future from which the list of job responses can later be retrieved
:rtype: Client.FutureResponse
"""
kwargs.pop('suppress_response', None) # If this kwarg is used, this method would always result in a timeout
if timeout:
kwargs['message_expiry_in_seconds'] = timeout
error_key = 0
transport_errors = {}
response_reassembly_keys = []
service_request_ids = {}
for job in jobs:
try:
sent_request_id = self.send_request(job['service_name'], job['actions'], **kwargs)
service_request_ids.setdefault(job['service_name'], set()).add(sent_request_id)
except (ConnectionError, InvalidMessageError, MessageSendError, MessageSendTimeout, MessageTooLarge) as e:
if not catch_transport_errors:
raise
sent_request_id = error_key = error_key - 1
transport_errors[(job['service_name'], sent_request_id)] = e
response_reassembly_keys.append((job['service_name'], sent_request_id))
def get_response(_timeout):
service_responses = {}
for service_name, request_ids in six.iteritems(service_request_ids):
try:
for request_id, response in self.get_all_responses(
service_name,
receive_timeout_in_seconds=_timeout or timeout,
):
if request_id not in request_ids:
raise Exception(
'Got response ID {}, not in set of expected IDs {}'.format(request_id, request_ids)
)
service_responses[(service_name, request_id)] = response
if catch_transport_errors:
# We don't need the set to be reduced unless we're catching errors
request_ids.remove(request_id)
except (ConnectionError, InvalidMessageError, MessageReceiveError, MessageReceiveTimeout) as e:
if not catch_transport_errors:
raise
for request_id in request_ids:
transport_errors[(service_name, request_id)] = e
responses = []
actions_to_expand = []
for service_name, request_id in response_reassembly_keys:
if request_id < 0:
# A transport error occurred during send, and we are catching errors, so add it to the list
responses.append(transport_errors[(service_name, request_id)])
continue
if (service_name, request_id) not in service_responses:
if (service_name, request_id) in transport_errors:
# A transport error occurred during receive, and we are catching errors, so add it to the list
responses.append(transport_errors[(service_name, request_id)])
continue
# It shouldn't be possible for this to happen unless the code has a bug, but let's raise a
# meaningful exception just in case a bug exists, because KeyError will not be helpful.
raise Exception('There was no response for service {}, request {}'.format(service_name, request_id))
response = service_responses[(service_name, request_id)]
if raise_job_errors and response.errors:
raise self.JobError(response.errors)
if raise_action_errors:
error_actions = [action for action in response.actions if action.errors]
if error_actions:
raise self.CallActionError(error_actions)
if expansions:
actions_to_expand.extend(response.actions)
responses.append(response)
if expansions:
kwargs.pop('continue_on_error', None)
self._perform_expansion(actions_to_expand, expansions, **kwargs)
return responses
return self.FutureResponse(get_response)
|
[
"def",
"call_jobs_parallel_future",
"(",
"self",
",",
"jobs",
",",
"expansions",
"=",
"None",
",",
"raise_job_errors",
"=",
"True",
",",
"raise_action_errors",
"=",
"True",
",",
"catch_transport_errors",
"=",
"False",
",",
"timeout",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"pop",
"(",
"'suppress_response'",
",",
"None",
")",
"# If this kwarg is used, this method would always result in a timeout",
"if",
"timeout",
":",
"kwargs",
"[",
"'message_expiry_in_seconds'",
"]",
"=",
"timeout",
"error_key",
"=",
"0",
"transport_errors",
"=",
"{",
"}",
"response_reassembly_keys",
"=",
"[",
"]",
"service_request_ids",
"=",
"{",
"}",
"for",
"job",
"in",
"jobs",
":",
"try",
":",
"sent_request_id",
"=",
"self",
".",
"send_request",
"(",
"job",
"[",
"'service_name'",
"]",
",",
"job",
"[",
"'actions'",
"]",
",",
"*",
"*",
"kwargs",
")",
"service_request_ids",
".",
"setdefault",
"(",
"job",
"[",
"'service_name'",
"]",
",",
"set",
"(",
")",
")",
".",
"add",
"(",
"sent_request_id",
")",
"except",
"(",
"ConnectionError",
",",
"InvalidMessageError",
",",
"MessageSendError",
",",
"MessageSendTimeout",
",",
"MessageTooLarge",
")",
"as",
"e",
":",
"if",
"not",
"catch_transport_errors",
":",
"raise",
"sent_request_id",
"=",
"error_key",
"=",
"error_key",
"-",
"1",
"transport_errors",
"[",
"(",
"job",
"[",
"'service_name'",
"]",
",",
"sent_request_id",
")",
"]",
"=",
"e",
"response_reassembly_keys",
".",
"append",
"(",
"(",
"job",
"[",
"'service_name'",
"]",
",",
"sent_request_id",
")",
")",
"def",
"get_response",
"(",
"_timeout",
")",
":",
"service_responses",
"=",
"{",
"}",
"for",
"service_name",
",",
"request_ids",
"in",
"six",
".",
"iteritems",
"(",
"service_request_ids",
")",
":",
"try",
":",
"for",
"request_id",
",",
"response",
"in",
"self",
".",
"get_all_responses",
"(",
"service_name",
",",
"receive_timeout_in_seconds",
"=",
"_timeout",
"or",
"timeout",
",",
")",
":",
"if",
"request_id",
"not",
"in",
"request_ids",
":",
"raise",
"Exception",
"(",
"'Got response ID {}, not in set of expected IDs {}'",
".",
"format",
"(",
"request_id",
",",
"request_ids",
")",
")",
"service_responses",
"[",
"(",
"service_name",
",",
"request_id",
")",
"]",
"=",
"response",
"if",
"catch_transport_errors",
":",
"# We don't need the set to be reduced unless we're catching errors",
"request_ids",
".",
"remove",
"(",
"request_id",
")",
"except",
"(",
"ConnectionError",
",",
"InvalidMessageError",
",",
"MessageReceiveError",
",",
"MessageReceiveTimeout",
")",
"as",
"e",
":",
"if",
"not",
"catch_transport_errors",
":",
"raise",
"for",
"request_id",
"in",
"request_ids",
":",
"transport_errors",
"[",
"(",
"service_name",
",",
"request_id",
")",
"]",
"=",
"e",
"responses",
"=",
"[",
"]",
"actions_to_expand",
"=",
"[",
"]",
"for",
"service_name",
",",
"request_id",
"in",
"response_reassembly_keys",
":",
"if",
"request_id",
"<",
"0",
":",
"# A transport error occurred during send, and we are catching errors, so add it to the list",
"responses",
".",
"append",
"(",
"transport_errors",
"[",
"(",
"service_name",
",",
"request_id",
")",
"]",
")",
"continue",
"if",
"(",
"service_name",
",",
"request_id",
")",
"not",
"in",
"service_responses",
":",
"if",
"(",
"service_name",
",",
"request_id",
")",
"in",
"transport_errors",
":",
"# A transport error occurred during receive, and we are catching errors, so add it to the list",
"responses",
".",
"append",
"(",
"transport_errors",
"[",
"(",
"service_name",
",",
"request_id",
")",
"]",
")",
"continue",
"# It shouldn't be possible for this to happen unless the code has a bug, but let's raise a",
"# meaningful exception just in case a bug exists, because KeyError will not be helpful.",
"raise",
"Exception",
"(",
"'There was no response for service {}, request {}'",
".",
"format",
"(",
"service_name",
",",
"request_id",
")",
")",
"response",
"=",
"service_responses",
"[",
"(",
"service_name",
",",
"request_id",
")",
"]",
"if",
"raise_job_errors",
"and",
"response",
".",
"errors",
":",
"raise",
"self",
".",
"JobError",
"(",
"response",
".",
"errors",
")",
"if",
"raise_action_errors",
":",
"error_actions",
"=",
"[",
"action",
"for",
"action",
"in",
"response",
".",
"actions",
"if",
"action",
".",
"errors",
"]",
"if",
"error_actions",
":",
"raise",
"self",
".",
"CallActionError",
"(",
"error_actions",
")",
"if",
"expansions",
":",
"actions_to_expand",
".",
"extend",
"(",
"response",
".",
"actions",
")",
"responses",
".",
"append",
"(",
"response",
")",
"if",
"expansions",
":",
"kwargs",
".",
"pop",
"(",
"'continue_on_error'",
",",
"None",
")",
"self",
".",
"_perform_expansion",
"(",
"actions_to_expand",
",",
"expansions",
",",
"*",
"*",
"kwargs",
")",
"return",
"responses",
"return",
"self",
".",
"FutureResponse",
"(",
"get_response",
")"
] |
This method is identical in signature and behavior to `call_jobs_parallel`, except that it sends the requests
and then immediately returns a `FutureResponse` instead of blocking waiting on all responses and returning
a `list` of `JobResponses`. Just call `result(timeout=None)` on the future response to block for an available
response. Some of the possible exceptions may be raised when this method is called; others may be raised when
the future is used.
:return: A future from which the list of job responses can later be retrieved
:rtype: Client.FutureResponse
|
[
"This",
"method",
"is",
"identical",
"in",
"signature",
"and",
"behavior",
"to",
"call_jobs_parallel",
"except",
"that",
"it",
"sends",
"the",
"requests",
"and",
"then",
"immediately",
"returns",
"a",
"FutureResponse",
"instead",
"of",
"blocking",
"waiting",
"on",
"all",
"responses",
"and",
"returning",
"a",
"list",
"of",
"JobResponses",
".",
"Just",
"call",
"result",
"(",
"timeout",
"=",
"None",
")",
"on",
"the",
"future",
"response",
"to",
"block",
"for",
"an",
"available",
"response",
".",
"Some",
"of",
"the",
"possible",
"exceptions",
"may",
"be",
"raised",
"when",
"this",
"method",
"is",
"called",
";",
"others",
"may",
"be",
"raised",
"when",
"the",
"future",
"is",
"used",
"."
] |
python
|
train
|
moonso/loqusdb
|
loqusdb/plugins/mongo/variant.py
|
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/variant.py#L159-L196
|
def delete_variant(self, variant):
"""Delete observation in database
This means that we take down the observations variable with one.
If 'observations' == 1 we remove the variant. If variant was homozygote
we decrease 'homozygote' with one.
Also remove the family from array 'families'.
Args:
variant (dict): A variant dictionary
"""
mongo_variant = self.get_variant(variant)
if mongo_variant:
if mongo_variant['observations'] == 1:
LOG.debug("Removing variant {0}".format(
mongo_variant.get('_id')
))
message = self.db.variant.delete_one({'_id': variant['_id']})
else:
LOG.debug("Decreasing observations for {0}".format(
mongo_variant.get('_id')
))
message = self.db.variant.update_one({
'_id': mongo_variant['_id']
},{
'$inc': {
'observations': -1,
'homozygote': - (variant.get('homozygote', 0)),
'hemizygote': - (variant.get('hemizygote', 0)),
},
'$pull': {
'families': variant.get('case_id')
}
}, upsert=False)
return
|
[
"def",
"delete_variant",
"(",
"self",
",",
"variant",
")",
":",
"mongo_variant",
"=",
"self",
".",
"get_variant",
"(",
"variant",
")",
"if",
"mongo_variant",
":",
"if",
"mongo_variant",
"[",
"'observations'",
"]",
"==",
"1",
":",
"LOG",
".",
"debug",
"(",
"\"Removing variant {0}\"",
".",
"format",
"(",
"mongo_variant",
".",
"get",
"(",
"'_id'",
")",
")",
")",
"message",
"=",
"self",
".",
"db",
".",
"variant",
".",
"delete_one",
"(",
"{",
"'_id'",
":",
"variant",
"[",
"'_id'",
"]",
"}",
")",
"else",
":",
"LOG",
".",
"debug",
"(",
"\"Decreasing observations for {0}\"",
".",
"format",
"(",
"mongo_variant",
".",
"get",
"(",
"'_id'",
")",
")",
")",
"message",
"=",
"self",
".",
"db",
".",
"variant",
".",
"update_one",
"(",
"{",
"'_id'",
":",
"mongo_variant",
"[",
"'_id'",
"]",
"}",
",",
"{",
"'$inc'",
":",
"{",
"'observations'",
":",
"-",
"1",
",",
"'homozygote'",
":",
"-",
"(",
"variant",
".",
"get",
"(",
"'homozygote'",
",",
"0",
")",
")",
",",
"'hemizygote'",
":",
"-",
"(",
"variant",
".",
"get",
"(",
"'hemizygote'",
",",
"0",
")",
")",
",",
"}",
",",
"'$pull'",
":",
"{",
"'families'",
":",
"variant",
".",
"get",
"(",
"'case_id'",
")",
"}",
"}",
",",
"upsert",
"=",
"False",
")",
"return"
] |
Delete observation in database
This means that we take down the observations variable with one.
If 'observations' == 1 we remove the variant. If variant was homozygote
we decrease 'homozygote' with one.
Also remove the family from array 'families'.
Args:
variant (dict): A variant dictionary
|
[
"Delete",
"observation",
"in",
"database"
] |
python
|
train
|
senaite/senaite.core
|
bika/lims/content/bikasetup.py
|
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/bikasetup.py#L937-L945
|
def getRejectionReasonsItems(self):
"""Return the list of predefined rejection reasons
"""
reasons = self.getRejectionReasons()
if not reasons:
return []
reasons = reasons[0]
keys = filter(lambda key: key != "checkbox", reasons.keys())
return map(lambda key: reasons[key], sorted(keys)) or []
|
[
"def",
"getRejectionReasonsItems",
"(",
"self",
")",
":",
"reasons",
"=",
"self",
".",
"getRejectionReasons",
"(",
")",
"if",
"not",
"reasons",
":",
"return",
"[",
"]",
"reasons",
"=",
"reasons",
"[",
"0",
"]",
"keys",
"=",
"filter",
"(",
"lambda",
"key",
":",
"key",
"!=",
"\"checkbox\"",
",",
"reasons",
".",
"keys",
"(",
")",
")",
"return",
"map",
"(",
"lambda",
"key",
":",
"reasons",
"[",
"key",
"]",
",",
"sorted",
"(",
"keys",
")",
")",
"or",
"[",
"]"
] |
Return the list of predefined rejection reasons
|
[
"Return",
"the",
"list",
"of",
"predefined",
"rejection",
"reasons"
] |
python
|
train
|
dw/mitogen
|
mitogen/parent.py
|
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/parent.py#L193-L203
|
def get_default_remote_name():
"""
Return the default name appearing in argv[0] of remote machines.
"""
s = u'%s@%s:%d'
s %= (getpass.getuser(), socket.gethostname(), os.getpid())
# In mixed UNIX/Windows environments, the username may contain slashes.
return s.translate({
ord(u'\\'): ord(u'_'),
ord(u'/'): ord(u'_')
})
|
[
"def",
"get_default_remote_name",
"(",
")",
":",
"s",
"=",
"u'%s@%s:%d'",
"s",
"%=",
"(",
"getpass",
".",
"getuser",
"(",
")",
",",
"socket",
".",
"gethostname",
"(",
")",
",",
"os",
".",
"getpid",
"(",
")",
")",
"# In mixed UNIX/Windows environments, the username may contain slashes.",
"return",
"s",
".",
"translate",
"(",
"{",
"ord",
"(",
"u'\\\\'",
")",
":",
"ord",
"(",
"u'_'",
")",
",",
"ord",
"(",
"u'/'",
")",
":",
"ord",
"(",
"u'_'",
")",
"}",
")"
] |
Return the default name appearing in argv[0] of remote machines.
|
[
"Return",
"the",
"default",
"name",
"appearing",
"in",
"argv",
"[",
"0",
"]",
"of",
"remote",
"machines",
"."
] |
python
|
train
|
eqcorrscan/EQcorrscan
|
eqcorrscan/utils/plotting.py
|
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/plotting.py#L2264-L2290
|
def _plotting_decimation(trace, max_len=10e5, decimation_step=4):
"""
Decimate data until required length reached.
:type trace: obspy.core.stream.Trace
:param trace: Trace to decimate
type max_len: int
:param max_len: Maximum length in samples
:type decimation_step: int
:param decimation_step: Decimation factor to use for each step.
:return: obspy.core.stream.Trace
.. rubric: Example
>>> from obspy import Trace
>>> import numpy as np
>>> trace = Trace(np.random.randn(1000))
>>> trace = _plotting_decimation(trace, max_len=100, decimation_step=2)
>>> print(trace.stats.npts)
63
"""
trace_len = trace.stats.npts
while trace_len > max_len:
trace.decimate(decimation_step)
trace_len = trace.stats.npts
return trace
|
[
"def",
"_plotting_decimation",
"(",
"trace",
",",
"max_len",
"=",
"10e5",
",",
"decimation_step",
"=",
"4",
")",
":",
"trace_len",
"=",
"trace",
".",
"stats",
".",
"npts",
"while",
"trace_len",
">",
"max_len",
":",
"trace",
".",
"decimate",
"(",
"decimation_step",
")",
"trace_len",
"=",
"trace",
".",
"stats",
".",
"npts",
"return",
"trace"
] |
Decimate data until required length reached.
:type trace: obspy.core.stream.Trace
:param trace: Trace to decimate
type max_len: int
:param max_len: Maximum length in samples
:type decimation_step: int
:param decimation_step: Decimation factor to use for each step.
:return: obspy.core.stream.Trace
.. rubric: Example
>>> from obspy import Trace
>>> import numpy as np
>>> trace = Trace(np.random.randn(1000))
>>> trace = _plotting_decimation(trace, max_len=100, decimation_step=2)
>>> print(trace.stats.npts)
63
|
[
"Decimate",
"data",
"until",
"required",
"length",
"reached",
"."
] |
python
|
train
|
log2timeline/plaso
|
plaso/output/mediator.py
|
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/output/mediator.py#L92-L108
|
def GetFormattedSources(self, event):
"""Retrieves the formatted sources related to the event.
Args:
event (EventObject): event.
Returns:
tuple: containing:
str: full source string or None if no event formatter was found.
str: short source string or None if no event formatter was found.
"""
event_formatter = self.GetEventFormatter(event)
if not event_formatter:
return None, None
return event_formatter.GetSources(event)
|
[
"def",
"GetFormattedSources",
"(",
"self",
",",
"event",
")",
":",
"event_formatter",
"=",
"self",
".",
"GetEventFormatter",
"(",
"event",
")",
"if",
"not",
"event_formatter",
":",
"return",
"None",
",",
"None",
"return",
"event_formatter",
".",
"GetSources",
"(",
"event",
")"
] |
Retrieves the formatted sources related to the event.
Args:
event (EventObject): event.
Returns:
tuple: containing:
str: full source string or None if no event formatter was found.
str: short source string or None if no event formatter was found.
|
[
"Retrieves",
"the",
"formatted",
"sources",
"related",
"to",
"the",
"event",
"."
] |
python
|
train
|
numenta/nupic
|
src/nupic/data/generators/anomalyzer.py
|
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/anomalyzer.py#L66-L80
|
def add(reader, writer, column, start, stop, value):
"""Adds a value over a range of rows.
Args:
reader: A FileRecordStream object with input data.
writer: A FileRecordStream object to write output data to.
column: The column of data to modify.
start: The first row in the range to modify.
end: The last row in the range to modify.
value: The value to add.
"""
for i, row in enumerate(reader):
if i >= start and i <= stop:
row[column] = type(value)(row[column]) + value
writer.appendRecord(row)
|
[
"def",
"add",
"(",
"reader",
",",
"writer",
",",
"column",
",",
"start",
",",
"stop",
",",
"value",
")",
":",
"for",
"i",
",",
"row",
"in",
"enumerate",
"(",
"reader",
")",
":",
"if",
"i",
">=",
"start",
"and",
"i",
"<=",
"stop",
":",
"row",
"[",
"column",
"]",
"=",
"type",
"(",
"value",
")",
"(",
"row",
"[",
"column",
"]",
")",
"+",
"value",
"writer",
".",
"appendRecord",
"(",
"row",
")"
] |
Adds a value over a range of rows.
Args:
reader: A FileRecordStream object with input data.
writer: A FileRecordStream object to write output data to.
column: The column of data to modify.
start: The first row in the range to modify.
end: The last row in the range to modify.
value: The value to add.
|
[
"Adds",
"a",
"value",
"over",
"a",
"range",
"of",
"rows",
"."
] |
python
|
valid
|
shaiguitar/snowclient.py
|
snowclient/api.py
|
https://github.com/shaiguitar/snowclient.py/blob/6bb513576d3b37612a7a4da225140d134f3e1c82/snowclient/api.py#L159-L166
|
def resolve_links(self, snow_record, **kparams):
"""
Get the infos from the links and return SnowRecords[].
"""
records = []
for attr, link in snow_record.links().items():
records.append(self.resolve_link(snow_record, attr, **kparams))
return records
|
[
"def",
"resolve_links",
"(",
"self",
",",
"snow_record",
",",
"*",
"*",
"kparams",
")",
":",
"records",
"=",
"[",
"]",
"for",
"attr",
",",
"link",
"in",
"snow_record",
".",
"links",
"(",
")",
".",
"items",
"(",
")",
":",
"records",
".",
"append",
"(",
"self",
".",
"resolve_link",
"(",
"snow_record",
",",
"attr",
",",
"*",
"*",
"kparams",
")",
")",
"return",
"records"
] |
Get the infos from the links and return SnowRecords[].
|
[
"Get",
"the",
"infos",
"from",
"the",
"links",
"and",
"return",
"SnowRecords",
"[]",
"."
] |
python
|
train
|
klen/muffin
|
muffin/manage.py
|
https://github.com/klen/muffin/blob/7bc891e174e08b62d1ae232b5d45f8cd8bc82112/muffin/manage.py#L250-L278
|
def run():
"""CLI endpoint."""
sys.path.insert(0, os.getcwd())
logging.basicConfig(level=logging.INFO, handlers=[logging.StreamHandler()])
parser = argparse.ArgumentParser(description="Manage Application", add_help=False)
parser.add_argument('app', metavar='app',
type=str, help='Application module path')
parser.add_argument('--config', type=str, help='Path to configuration.')
parser.add_argument('--version', action="version", version=__version__)
args_, subargs_ = parser.parse_known_args(sys.argv[1:])
if args_.config:
os.environ[CONFIGURATION_ENVIRON_VARIABLE] = args_.config
from gunicorn.util import import_app
app_uri = args_.app
if ':' not in app_uri:
app_uri += ':app'
try:
app = import_app(app_uri)
app.uri = app_uri
app.logger.info('Application is loaded: %s' % app.name)
except Exception as exc:
logging.exception(exc)
raise sys.exit(1)
app.manage(*subargs_, prog='muffin %s' % args_.app)
|
[
"def",
"run",
"(",
")",
":",
"sys",
".",
"path",
".",
"insert",
"(",
"0",
",",
"os",
".",
"getcwd",
"(",
")",
")",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"INFO",
",",
"handlers",
"=",
"[",
"logging",
".",
"StreamHandler",
"(",
")",
"]",
")",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"\"Manage Application\"",
",",
"add_help",
"=",
"False",
")",
"parser",
".",
"add_argument",
"(",
"'app'",
",",
"metavar",
"=",
"'app'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'Application module path'",
")",
"parser",
".",
"add_argument",
"(",
"'--config'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'Path to configuration.'",
")",
"parser",
".",
"add_argument",
"(",
"'--version'",
",",
"action",
"=",
"\"version\"",
",",
"version",
"=",
"__version__",
")",
"args_",
",",
"subargs_",
"=",
"parser",
".",
"parse_known_args",
"(",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
")",
"if",
"args_",
".",
"config",
":",
"os",
".",
"environ",
"[",
"CONFIGURATION_ENVIRON_VARIABLE",
"]",
"=",
"args_",
".",
"config",
"from",
"gunicorn",
".",
"util",
"import",
"import_app",
"app_uri",
"=",
"args_",
".",
"app",
"if",
"':'",
"not",
"in",
"app_uri",
":",
"app_uri",
"+=",
"':app'",
"try",
":",
"app",
"=",
"import_app",
"(",
"app_uri",
")",
"app",
".",
"uri",
"=",
"app_uri",
"app",
".",
"logger",
".",
"info",
"(",
"'Application is loaded: %s'",
"%",
"app",
".",
"name",
")",
"except",
"Exception",
"as",
"exc",
":",
"logging",
".",
"exception",
"(",
"exc",
")",
"raise",
"sys",
".",
"exit",
"(",
"1",
")",
"app",
".",
"manage",
"(",
"*",
"subargs_",
",",
"prog",
"=",
"'muffin %s'",
"%",
"args_",
".",
"app",
")"
] |
CLI endpoint.
|
[
"CLI",
"endpoint",
"."
] |
python
|
train
|
ptcryan/hydrawiser
|
hydrawiser/core.py
|
https://github.com/ptcryan/hydrawiser/blob/53acafb08b5cee0f6628414044b9b9f9a0b15e50/hydrawiser/core.py#L104-L130
|
def relay_info(self, relay, attribute=None):
"""
Return information about a relay.
:param relay: The relay being queried.
:type relay: int
:param attribute: The attribute being queried, or all attributes for
that relay if None is specified.
:type attribute: string or None
:returns: The attribute being queried or None if not found.
:rtype: string or int
"""
# Check if the relay number is valid.
if (relay < 0) or (relay > (self.num_relays - 1)):
# Invalid relay index specified.
return None
else:
if attribute is None:
# Return all the relay attributes.
return self.relays[relay]
else:
try:
return self.relays[relay][attribute]
except KeyError:
# Invalid key specified.
return None
|
[
"def",
"relay_info",
"(",
"self",
",",
"relay",
",",
"attribute",
"=",
"None",
")",
":",
"# Check if the relay number is valid.",
"if",
"(",
"relay",
"<",
"0",
")",
"or",
"(",
"relay",
">",
"(",
"self",
".",
"num_relays",
"-",
"1",
")",
")",
":",
"# Invalid relay index specified.",
"return",
"None",
"else",
":",
"if",
"attribute",
"is",
"None",
":",
"# Return all the relay attributes.",
"return",
"self",
".",
"relays",
"[",
"relay",
"]",
"else",
":",
"try",
":",
"return",
"self",
".",
"relays",
"[",
"relay",
"]",
"[",
"attribute",
"]",
"except",
"KeyError",
":",
"# Invalid key specified.",
"return",
"None"
] |
Return information about a relay.
:param relay: The relay being queried.
:type relay: int
:param attribute: The attribute being queried, or all attributes for
that relay if None is specified.
:type attribute: string or None
:returns: The attribute being queried or None if not found.
:rtype: string or int
|
[
"Return",
"information",
"about",
"a",
"relay",
"."
] |
python
|
train
|
saltstack/salt
|
salt/modules/win_network.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_network.py#L142-L193
|
def traceroute(host):
'''
Performs a traceroute to a 3rd party host
CLI Example:
.. code-block:: bash
salt '*' network.traceroute archlinux.org
'''
ret = []
cmd = ['tracert', salt.utils.network.sanitize_host(host)]
lines = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
for line in lines:
if ' ' not in line:
continue
if line.startswith('Trac'):
continue
if line.startswith('over'):
continue
comps = line.split()
complength = len(comps)
# This method still needs to better catch rows of other lengths
# For example if some of the ms returns are '*'
if complength == 9:
result = {
'count': comps[0],
'hostname': comps[7],
'ip': comps[8],
'ms1': comps[1],
'ms2': comps[3],
'ms3': comps[5]}
ret.append(result)
elif complength == 8:
result = {
'count': comps[0],
'hostname': None,
'ip': comps[7],
'ms1': comps[1],
'ms2': comps[3],
'ms3': comps[5]}
ret.append(result)
else:
result = {
'count': comps[0],
'hostname': None,
'ip': None,
'ms1': None,
'ms2': None,
'ms3': None}
ret.append(result)
return ret
|
[
"def",
"traceroute",
"(",
"host",
")",
":",
"ret",
"=",
"[",
"]",
"cmd",
"=",
"[",
"'tracert'",
",",
"salt",
".",
"utils",
".",
"network",
".",
"sanitize_host",
"(",
"host",
")",
"]",
"lines",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"cmd",
",",
"python_shell",
"=",
"False",
")",
".",
"splitlines",
"(",
")",
"for",
"line",
"in",
"lines",
":",
"if",
"' '",
"not",
"in",
"line",
":",
"continue",
"if",
"line",
".",
"startswith",
"(",
"'Trac'",
")",
":",
"continue",
"if",
"line",
".",
"startswith",
"(",
"'over'",
")",
":",
"continue",
"comps",
"=",
"line",
".",
"split",
"(",
")",
"complength",
"=",
"len",
"(",
"comps",
")",
"# This method still needs to better catch rows of other lengths",
"# For example if some of the ms returns are '*'",
"if",
"complength",
"==",
"9",
":",
"result",
"=",
"{",
"'count'",
":",
"comps",
"[",
"0",
"]",
",",
"'hostname'",
":",
"comps",
"[",
"7",
"]",
",",
"'ip'",
":",
"comps",
"[",
"8",
"]",
",",
"'ms1'",
":",
"comps",
"[",
"1",
"]",
",",
"'ms2'",
":",
"comps",
"[",
"3",
"]",
",",
"'ms3'",
":",
"comps",
"[",
"5",
"]",
"}",
"ret",
".",
"append",
"(",
"result",
")",
"elif",
"complength",
"==",
"8",
":",
"result",
"=",
"{",
"'count'",
":",
"comps",
"[",
"0",
"]",
",",
"'hostname'",
":",
"None",
",",
"'ip'",
":",
"comps",
"[",
"7",
"]",
",",
"'ms1'",
":",
"comps",
"[",
"1",
"]",
",",
"'ms2'",
":",
"comps",
"[",
"3",
"]",
",",
"'ms3'",
":",
"comps",
"[",
"5",
"]",
"}",
"ret",
".",
"append",
"(",
"result",
")",
"else",
":",
"result",
"=",
"{",
"'count'",
":",
"comps",
"[",
"0",
"]",
",",
"'hostname'",
":",
"None",
",",
"'ip'",
":",
"None",
",",
"'ms1'",
":",
"None",
",",
"'ms2'",
":",
"None",
",",
"'ms3'",
":",
"None",
"}",
"ret",
".",
"append",
"(",
"result",
")",
"return",
"ret"
] |
Performs a traceroute to a 3rd party host
CLI Example:
.. code-block:: bash
salt '*' network.traceroute archlinux.org
|
[
"Performs",
"a",
"traceroute",
"to",
"a",
"3rd",
"party",
"host"
] |
python
|
train
|
LionelR/pyair
|
pyair/date.py
|
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/date.py#L50-L67
|
def profil_hebdo(df, func='mean'):
"""
Calcul du profil journalier
Paramètres:
df: DataFrame de données dont l'index est une série temporelle
(cf module xair par exemple)
func: function permettant le calcul. Soit un nom de fonction numpy ('mean', 'max', ...)
soit la fonction elle-même (np.mean, np.max, ...)
Retourne:
Un DataFrame de moyennes par journée sur la semaine
"""
func = _get_funky(func)
res = df.groupby(lambda x: x.weekday).aggregate(func)
# On met des noms de jour à la place des numéros dans l'index
res.index = [cal.day_name[i] for i in range(0,7)]
return res
|
[
"def",
"profil_hebdo",
"(",
"df",
",",
"func",
"=",
"'mean'",
")",
":",
"func",
"=",
"_get_funky",
"(",
"func",
")",
"res",
"=",
"df",
".",
"groupby",
"(",
"lambda",
"x",
":",
"x",
".",
"weekday",
")",
".",
"aggregate",
"(",
"func",
")",
"# On met des noms de jour à la place des numéros dans l'index",
"res",
".",
"index",
"=",
"[",
"cal",
".",
"day_name",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"7",
")",
"]",
"return",
"res"
] |
Calcul du profil journalier
Paramètres:
df: DataFrame de données dont l'index est une série temporelle
(cf module xair par exemple)
func: function permettant le calcul. Soit un nom de fonction numpy ('mean', 'max', ...)
soit la fonction elle-même (np.mean, np.max, ...)
Retourne:
Un DataFrame de moyennes par journée sur la semaine
|
[
"Calcul",
"du",
"profil",
"journalier"
] |
python
|
valid
|
gwastro/pycbc-glue
|
pycbc_glue/ligolw/utils/segments.py
|
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/ligolw/utils/segments.py#L401-L481
|
def finalize(self, process_row = None):
"""
Restore the LigolwSegmentList objects to the XML tables in
preparation for output. All segments from all segment
lists are inserted into the tables in time order, but this
is NOT behaviour external applications should rely on.
This is done simply in the belief that it might assist in
constructing well balanced indexed databases from the
resulting files. If that proves not to be the case, or for
some reason this behaviour proves inconvenient to preserve,
then it might be discontinued without notice. You've been
warned.
"""
if process_row is not None:
process_id = process_row.process_id
elif self.process is not None:
process_id = self.process.process_id
else:
raise ValueError("must supply a process row to .__init__()")
#
# ensure ID generators are synchronized with table contents
#
self.segment_def_table.sync_next_id()
self.segment_table.sync_next_id()
self.segment_sum_table.sync_next_id()
#
# put all segment lists in time order
#
self.sort()
#
# generator function to convert segments into row objects,
# each paired with the table to which the row is to be
# appended
#
def row_generator(segs, target_table, process_id, segment_def_id):
id_column = target_table.next_id.column_name
for seg in segs:
row = target_table.RowType()
row.segment = seg
row.process_id = process_id
row.segment_def_id = segment_def_id
setattr(row, id_column, target_table.get_next_id())
if 'comment' in target_table.validcolumns:
row.comment = None
yield row, target_table
#
# populate the segment_definer table from the list of
# LigolwSegmentList objects and construct a matching list
# of table row generators. empty ourselves to prevent this
# process from being repeated
#
row_generators = []
while self:
ligolw_segment_list = self.pop()
segment_def_row = self.segment_def_table.RowType()
segment_def_row.process_id = process_id
segment_def_row.segment_def_id = self.segment_def_table.get_next_id()
segment_def_row.instruments = ligolw_segment_list.instruments
segment_def_row.name = ligolw_segment_list.name
segment_def_row.version = ligolw_segment_list.version
segment_def_row.comment = ligolw_segment_list.comment
self.segment_def_table.append(segment_def_row)
row_generators.append(row_generator(ligolw_segment_list.valid, self.segment_sum_table, process_id, segment_def_row.segment_def_id))
row_generators.append(row_generator(ligolw_segment_list.active, self.segment_table, process_id, segment_def_row.segment_def_id))
#
# populate segment and segment_summary tables by pulling
# rows from the generators in time order
#
for row, target_table in iterutils.inorder(*row_generators):
target_table.append(row)
|
[
"def",
"finalize",
"(",
"self",
",",
"process_row",
"=",
"None",
")",
":",
"if",
"process_row",
"is",
"not",
"None",
":",
"process_id",
"=",
"process_row",
".",
"process_id",
"elif",
"self",
".",
"process",
"is",
"not",
"None",
":",
"process_id",
"=",
"self",
".",
"process",
".",
"process_id",
"else",
":",
"raise",
"ValueError",
"(",
"\"must supply a process row to .__init__()\"",
")",
"#",
"# ensure ID generators are synchronized with table contents",
"#",
"self",
".",
"segment_def_table",
".",
"sync_next_id",
"(",
")",
"self",
".",
"segment_table",
".",
"sync_next_id",
"(",
")",
"self",
".",
"segment_sum_table",
".",
"sync_next_id",
"(",
")",
"#",
"# put all segment lists in time order",
"#",
"self",
".",
"sort",
"(",
")",
"#",
"# generator function to convert segments into row objects,",
"# each paired with the table to which the row is to be",
"# appended",
"#",
"def",
"row_generator",
"(",
"segs",
",",
"target_table",
",",
"process_id",
",",
"segment_def_id",
")",
":",
"id_column",
"=",
"target_table",
".",
"next_id",
".",
"column_name",
"for",
"seg",
"in",
"segs",
":",
"row",
"=",
"target_table",
".",
"RowType",
"(",
")",
"row",
".",
"segment",
"=",
"seg",
"row",
".",
"process_id",
"=",
"process_id",
"row",
".",
"segment_def_id",
"=",
"segment_def_id",
"setattr",
"(",
"row",
",",
"id_column",
",",
"target_table",
".",
"get_next_id",
"(",
")",
")",
"if",
"'comment'",
"in",
"target_table",
".",
"validcolumns",
":",
"row",
".",
"comment",
"=",
"None",
"yield",
"row",
",",
"target_table",
"#",
"# populate the segment_definer table from the list of",
"# LigolwSegmentList objects and construct a matching list",
"# of table row generators. empty ourselves to prevent this",
"# process from being repeated",
"#",
"row_generators",
"=",
"[",
"]",
"while",
"self",
":",
"ligolw_segment_list",
"=",
"self",
".",
"pop",
"(",
")",
"segment_def_row",
"=",
"self",
".",
"segment_def_table",
".",
"RowType",
"(",
")",
"segment_def_row",
".",
"process_id",
"=",
"process_id",
"segment_def_row",
".",
"segment_def_id",
"=",
"self",
".",
"segment_def_table",
".",
"get_next_id",
"(",
")",
"segment_def_row",
".",
"instruments",
"=",
"ligolw_segment_list",
".",
"instruments",
"segment_def_row",
".",
"name",
"=",
"ligolw_segment_list",
".",
"name",
"segment_def_row",
".",
"version",
"=",
"ligolw_segment_list",
".",
"version",
"segment_def_row",
".",
"comment",
"=",
"ligolw_segment_list",
".",
"comment",
"self",
".",
"segment_def_table",
".",
"append",
"(",
"segment_def_row",
")",
"row_generators",
".",
"append",
"(",
"row_generator",
"(",
"ligolw_segment_list",
".",
"valid",
",",
"self",
".",
"segment_sum_table",
",",
"process_id",
",",
"segment_def_row",
".",
"segment_def_id",
")",
")",
"row_generators",
".",
"append",
"(",
"row_generator",
"(",
"ligolw_segment_list",
".",
"active",
",",
"self",
".",
"segment_table",
",",
"process_id",
",",
"segment_def_row",
".",
"segment_def_id",
")",
")",
"#",
"# populate segment and segment_summary tables by pulling",
"# rows from the generators in time order",
"#",
"for",
"row",
",",
"target_table",
"in",
"iterutils",
".",
"inorder",
"(",
"*",
"row_generators",
")",
":",
"target_table",
".",
"append",
"(",
"row",
")"
] |
Restore the LigolwSegmentList objects to the XML tables in
preparation for output. All segments from all segment
lists are inserted into the tables in time order, but this
is NOT behaviour external applications should rely on.
This is done simply in the belief that it might assist in
constructing well balanced indexed databases from the
resulting files. If that proves not to be the case, or for
some reason this behaviour proves inconvenient to preserve,
then it might be discontinued without notice. You've been
warned.
|
[
"Restore",
"the",
"LigolwSegmentList",
"objects",
"to",
"the",
"XML",
"tables",
"in",
"preparation",
"for",
"output",
".",
"All",
"segments",
"from",
"all",
"segment",
"lists",
"are",
"inserted",
"into",
"the",
"tables",
"in",
"time",
"order",
"but",
"this",
"is",
"NOT",
"behaviour",
"external",
"applications",
"should",
"rely",
"on",
".",
"This",
"is",
"done",
"simply",
"in",
"the",
"belief",
"that",
"it",
"might",
"assist",
"in",
"constructing",
"well",
"balanced",
"indexed",
"databases",
"from",
"the",
"resulting",
"files",
".",
"If",
"that",
"proves",
"not",
"to",
"be",
"the",
"case",
"or",
"for",
"some",
"reason",
"this",
"behaviour",
"proves",
"inconvenient",
"to",
"preserve",
"then",
"it",
"might",
"be",
"discontinued",
"without",
"notice",
".",
"You",
"ve",
"been",
"warned",
"."
] |
python
|
train
|
rhayes777/PyAutoFit
|
autofit/tools/pipeline.py
|
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/tools/pipeline.py#L143-L167
|
def run_function(self, func, data_name=None, assert_optimizer_pickle_matches=True):
"""
Run the function for each phase in the pipeline.
Parameters
----------
assert_optimizer_pickle_matches
data_name
func
A function that takes a phase and prior results, returning results for that phase
Returns
-------
results: ResultsCollection
A collection of results
"""
results = ResultsCollection()
for i, phase in enumerate(self.phases):
logger.info("Running Phase {} (Number {})".format(phase.optimizer.phase_name, i))
if assert_optimizer_pickle_matches:
assert_optimizer_pickle_matches_for_phase(phase)
save_optimizer_for_phase(phase)
self.save_metadata(phase, data_name)
results.add(phase.phase_name, func(phase, results))
return results
|
[
"def",
"run_function",
"(",
"self",
",",
"func",
",",
"data_name",
"=",
"None",
",",
"assert_optimizer_pickle_matches",
"=",
"True",
")",
":",
"results",
"=",
"ResultsCollection",
"(",
")",
"for",
"i",
",",
"phase",
"in",
"enumerate",
"(",
"self",
".",
"phases",
")",
":",
"logger",
".",
"info",
"(",
"\"Running Phase {} (Number {})\"",
".",
"format",
"(",
"phase",
".",
"optimizer",
".",
"phase_name",
",",
"i",
")",
")",
"if",
"assert_optimizer_pickle_matches",
":",
"assert_optimizer_pickle_matches_for_phase",
"(",
"phase",
")",
"save_optimizer_for_phase",
"(",
"phase",
")",
"self",
".",
"save_metadata",
"(",
"phase",
",",
"data_name",
")",
"results",
".",
"add",
"(",
"phase",
".",
"phase_name",
",",
"func",
"(",
"phase",
",",
"results",
")",
")",
"return",
"results"
] |
Run the function for each phase in the pipeline.
Parameters
----------
assert_optimizer_pickle_matches
data_name
func
A function that takes a phase and prior results, returning results for that phase
Returns
-------
results: ResultsCollection
A collection of results
|
[
"Run",
"the",
"function",
"for",
"each",
"phase",
"in",
"the",
"pipeline",
"."
] |
python
|
train
|
apache/incubator-heron
|
heron/tools/admin/src/python/standalone.py
|
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/admin/src/python/standalone.py#L573-L592
|
def wait_for_job_to_start(single_master, job):
'''
Wait for a Nomad job to start
'''
i = 0
while True:
try:
r = requests.get("http://%s:4646/v1/job/%s" % (single_master, job))
if r.status_code == 200 and r.json()["Status"] == "running":
break
else:
raise RuntimeError()
except:
Log.debug(sys.exc_info()[0])
Log.info("Waiting for %s to come up... %s" % (job, i))
time.sleep(1)
if i > 20:
Log.error("Failed to start Nomad Cluster!")
sys.exit(-1)
i = i + 1
|
[
"def",
"wait_for_job_to_start",
"(",
"single_master",
",",
"job",
")",
":",
"i",
"=",
"0",
"while",
"True",
":",
"try",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"\"http://%s:4646/v1/job/%s\"",
"%",
"(",
"single_master",
",",
"job",
")",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
"and",
"r",
".",
"json",
"(",
")",
"[",
"\"Status\"",
"]",
"==",
"\"running\"",
":",
"break",
"else",
":",
"raise",
"RuntimeError",
"(",
")",
"except",
":",
"Log",
".",
"debug",
"(",
"sys",
".",
"exc_info",
"(",
")",
"[",
"0",
"]",
")",
"Log",
".",
"info",
"(",
"\"Waiting for %s to come up... %s\"",
"%",
"(",
"job",
",",
"i",
")",
")",
"time",
".",
"sleep",
"(",
"1",
")",
"if",
"i",
">",
"20",
":",
"Log",
".",
"error",
"(",
"\"Failed to start Nomad Cluster!\"",
")",
"sys",
".",
"exit",
"(",
"-",
"1",
")",
"i",
"=",
"i",
"+",
"1"
] |
Wait for a Nomad job to start
|
[
"Wait",
"for",
"a",
"Nomad",
"job",
"to",
"start"
] |
python
|
valid
|
knowmalware/camcrypt
|
camcrypt/__init__.py
|
https://github.com/knowmalware/camcrypt/blob/40c9ebbbd33ebfbb3a564ee5768cfe7a1815f6a3/camcrypt/__init__.py#L73-L95
|
def encrypt(self, plainText):
"""Encrypt an arbitrary-length block of data.
NOTE: This function formerly worked only on 16-byte blocks of `plainText`.
code that assumed this should still work fine, but can optionally be
modified to call `encrypt_block` instead.
Args:
plainText (str): data to encrypt. If the data is not a multiple of 16
bytes long, it will be padded with null (0x00) bytes until it is.
Returns:
encrypted data. Note that this will always be a multiple of 16 bytes
long.
"""
encryptedResult = ''
for index in range(0, len(plainText), BLOCK_SIZE):
block = plainText[index:index + BLOCK_SIZE]
# Pad to required length if needed
if len(block) < BLOCK_SIZE:
block = zero_pad(block, BLOCK_SIZE)
encryptedResult += self.encrypt_block(block)
return encryptedResult
|
[
"def",
"encrypt",
"(",
"self",
",",
"plainText",
")",
":",
"encryptedResult",
"=",
"''",
"for",
"index",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"plainText",
")",
",",
"BLOCK_SIZE",
")",
":",
"block",
"=",
"plainText",
"[",
"index",
":",
"index",
"+",
"BLOCK_SIZE",
"]",
"# Pad to required length if needed",
"if",
"len",
"(",
"block",
")",
"<",
"BLOCK_SIZE",
":",
"block",
"=",
"zero_pad",
"(",
"block",
",",
"BLOCK_SIZE",
")",
"encryptedResult",
"+=",
"self",
".",
"encrypt_block",
"(",
"block",
")",
"return",
"encryptedResult"
] |
Encrypt an arbitrary-length block of data.
NOTE: This function formerly worked only on 16-byte blocks of `plainText`.
code that assumed this should still work fine, but can optionally be
modified to call `encrypt_block` instead.
Args:
plainText (str): data to encrypt. If the data is not a multiple of 16
bytes long, it will be padded with null (0x00) bytes until it is.
Returns:
encrypted data. Note that this will always be a multiple of 16 bytes
long.
|
[
"Encrypt",
"an",
"arbitrary",
"-",
"length",
"block",
"of",
"data",
"."
] |
python
|
train
|
MIT-LCP/wfdb-python
|
wfdb/io/record.py
|
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/record.py#L1054-L1320
|
def rdrecord(record_name, sampfrom=0, sampto=None, channels=None,
physical=True, pb_dir=None, m2s=True, smooth_frames=True,
ignore_skew=False, return_res=64, force_channels=True,
channel_names=None, warn_empty=False):
"""
Read a WFDB record and return the signal and record descriptors as
attributes in a Record or MultiRecord object.
Parameters
----------
record_name : str
The name of the WFDB record to be read, without any file
extensions. If the argument contains any path delimiter
characters, the argument will be interpreted as PATH/BASE_RECORD.
Both relative and absolute paths are accepted. If the `pb_dir`
parameter is set, this parameter should contain just the base
record name, and the files fill be searched for remotely.
Otherwise, the data files will be searched for in the local path.
sampfrom : int, optional
The starting sample number to read for all channels.
sampto : int, or 'end', optional
The sample number at which to stop reading for all channels.
Reads the entire duration by default.
channels : list, optional
List of integer indices specifying the channels to be read.
Reads all channels by default.
physical : bool, optional
Specifies whether to return signals in physical units in the
`p_signal` field (True), or digital units in the `d_signal`
field (False).
pb_dir : str, optional
Option used to stream data from Physiobank. The Physiobank
database directory from which to find the required record files.
eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb'
pb_dir='mitdb'.
m2s : bool, optional
Used when reading multi-segment records. Specifies whether to
directly return a wfdb MultiRecord object (False), or to convert
it into and return a wfdb Record object (True).
smooth_frames : bool, optional
Used when reading records with signals having multiple samples
per frame. Specifies whether to smooth the samples in signals
with more than one sample per frame and return an (MxN) uniform
numpy array as the `d_signal` or `p_signal` field (True), or to
return a list of 1d numpy arrays containing every expanded
sample as the `e_d_signal` or `e_p_signal` field (False).
ignore_skew : bool, optional
Used when reading records with at least one skewed signal.
Specifies whether to apply the skew to align the signals in the
output variable (False), or to ignore the skew field and load in
all values contained in the dat files unaligned (True).
return_res : int, optional
The numpy array dtype of the returned signals. Options are: 64,
32, 16, and 8, where the value represents the numpy int or float
dtype. Note that the value cannot be 8 when physical is True
since there is no float8 format.
force_channels : bool, optional
Used when reading multi-segment variable layout records. Whether
to update the layout specification record, and the converted
Record object if `m2s` is True, to match the input `channels`
argument, or to omit channels in which no read segment contains
the signals.
channel_names : list, optional
List of channel names to return. If this parameter is specified,
it takes precedence over `channels`.
warn_empty : bool, optional
Whether to display a warning if the specified channel indices
or names are not contained in the record, and no signal is
returned.
Returns
-------
record : Record or MultiRecord
The wfdb Record or MultiRecord object representing the contents
of the record read.
Notes
-----
If a signal range or channel selection is specified when calling
this function, the resulting attributes of the returned object will
be set to reflect the section of the record that is actually read,
rather than necessarily the entire record. For example, if
`channels=[0, 1, 2]` is specified when reading a 12 channel record,
the 'n_sig' attribute will be 3, not 12.
The `rdsamp` function exists as a simple alternative to `rdrecord`
for the common purpose of extracting the physical signals and a few
important descriptor fields.
Examples
--------
>>> record = wfdb.rdrecord('sample-data/test01_00s', sampfrom=800,
channels=[1, 3])
"""
dir_name, base_record_name = os.path.split(record_name)
dir_name = os.path.abspath(dir_name)
# Read the header fields
record = rdheader(record_name, pb_dir=pb_dir, rd_segments=False)
# Set defaults for sampto and channels input variables
if sampto is None:
# If the header does not contain the signal length, figure it
# out from the first dat file. This is only possible for single
# segment records. If there are no signals, sig_len is 0.
if record.sig_len is None:
if record.n_sig == 0:
record.sig_len = 0
else:
record.sig_len = _signal._infer_sig_len(
file_name=record.file_name[0], fmt=record.fmt[0],
n_sig=record.file_name.count(record.file_name[0]),
dir_name=dir_name, pb_dir=pb_dir)
sampto = record.sig_len
# channel_names takes precedence over channels
if channel_names is not None:
# Figure out the channel indices matching the record, if any.
if isinstance(record, Record):
reference_record = record
else:
if record.layout == 'fixed':
# Find the first non-empty segment to get the signal
# names
first_seg_name = [n for n in record.seg_name if n != '~'][0]
reference_record = rdheader(os.path.join(dir_name,
record.seg_name[0]),
pb_dir=pb_dir)
else:
# Use the layout specification header to get the signal
# names
reference_record = rdheader(os.path.join(dir_name,
record.seg_name[0]),
pb_dir=pb_dir)
channels = _get_wanted_channels(wanted_sig_names=channel_names,
record_sig_names=reference_record.sig_name)
elif channels is None:
channels = list(range(record.n_sig))
# Ensure that input fields are valid for the record
record.check_read_inputs(sampfrom, sampto, channels, physical,
smooth_frames, return_res)
# If the signal doesn't have the specified channels, there will be
# no signal. Recall that `rdsamp` is not called on segments of multi
# segment records if the channels are not present, so this won't
# break anything.
if not len(channels):
old_record = record
record = Record()
for attr in _header.RECORD_SPECS.index:
if attr == 'n_seg':
continue
elif attr in ['n_sig', 'sig_len']:
setattr(record, attr, 0)
else:
setattr(record, attr, getattr(old_record, attr))
if warn_empty:
print('None of the specified signals were contained in the record')
# A single segment record
elif isinstance(record, Record):
# Only 1 sample/frame, or frames are smoothed. Return uniform numpy array
if smooth_frames or max([record.samps_per_frame[c] for c in channels]) == 1:
# Read signals from the associated dat files that contain
# wanted channels
record.d_signal = _signal._rd_segment(record.file_name, dir_name,
pb_dir, record.fmt,
record.n_sig, record.sig_len,
record.byte_offset,
record.samps_per_frame,
record.skew, sampfrom, sampto,
channels, smooth_frames,
ignore_skew)
# Arrange/edit the object fields to reflect user channel
# and/or signal range input
record._arrange_fields(channels=channels, sampfrom=sampfrom,
expanded=False)
if physical:
# Perform inplace dac to get physical signal
record.dac(expanded=False, return_res=return_res, inplace=True)
# Return each sample of the signals with multiple samples per frame
else:
record.e_d_signal = _signal._rd_segment(record.file_name, dir_name,
pb_dir, record.fmt,
record.n_sig,
record.sig_len,
record.byte_offset,
record.samps_per_frame,
record.skew, sampfrom,
sampto, channels,
smooth_frames, ignore_skew)
# Arrange/edit the object fields to reflect user channel
# and/or signal range input
record._arrange_fields(channels=channels, sampfrom=sampfrom,
expanded=True)
if physical:
# Perform dac to get physical signal
record.dac(expanded=True, return_res=return_res, inplace=True)
# A multi segment record
else:
# Strategy:
# 1. Read the required segments and store them in
# Record objects.
# 2. Update the parameters of the objects to reflect
# the state of the sections read.
# 3. Update the parameters of the overall MultiRecord
# object to reflect the state of the individual segments.
# 4. If specified, convert the MultiRecord object
# into a single Record object.
# Segments field is a list of Record objects
# Empty segments store None.
record.segments = [None] * record.n_seg
# Variable layout, read the layout specification header
if record.layout == 'variable':
record.segments[0] = rdheader(os.path.join(dir_name,
record.seg_name[0]),
pb_dir=pb_dir)
# The segment numbers and samples within each segment to read.
seg_numbers, seg_ranges = record._required_segments(sampfrom, sampto)
# The channels within each segment to read
seg_channels = record._required_channels(seg_numbers, channels,
dir_name, pb_dir)
# Read the desired samples in the relevant segments
for i in range(len(seg_numbers)):
seg_num = seg_numbers[i]
# Empty segment or segment with no relevant channels
if record.seg_name[seg_num] == '~' or len(seg_channels[i]) == 0:
record.segments[seg_num] = None
else:
record.segments[seg_num] = rdrecord(
os.path.join(dir_name, record.seg_name[seg_num]),
sampfrom=seg_ranges[i][0], sampto=seg_ranges[i][1],
channels=seg_channels[i], physical=physical, pb_dir=pb_dir)
# Arrange the fields of the layout specification segment, and
# the overall object, to reflect user input.
record._arrange_fields(seg_numbers=seg_numbers, seg_ranges=seg_ranges,
channels=channels, sampfrom=sampfrom,
force_channels=force_channels)
# Convert object into a single segment Record object
if m2s:
record = record.multi_to_single(physical=physical,
return_res=return_res)
# Perform dtype conversion if necessary
if isinstance(record, Record) and record.n_sig > 0:
record.convert_dtype(physical, return_res, smooth_frames)
return record
|
[
"def",
"rdrecord",
"(",
"record_name",
",",
"sampfrom",
"=",
"0",
",",
"sampto",
"=",
"None",
",",
"channels",
"=",
"None",
",",
"physical",
"=",
"True",
",",
"pb_dir",
"=",
"None",
",",
"m2s",
"=",
"True",
",",
"smooth_frames",
"=",
"True",
",",
"ignore_skew",
"=",
"False",
",",
"return_res",
"=",
"64",
",",
"force_channels",
"=",
"True",
",",
"channel_names",
"=",
"None",
",",
"warn_empty",
"=",
"False",
")",
":",
"dir_name",
",",
"base_record_name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"record_name",
")",
"dir_name",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"dir_name",
")",
"# Read the header fields",
"record",
"=",
"rdheader",
"(",
"record_name",
",",
"pb_dir",
"=",
"pb_dir",
",",
"rd_segments",
"=",
"False",
")",
"# Set defaults for sampto and channels input variables",
"if",
"sampto",
"is",
"None",
":",
"# If the header does not contain the signal length, figure it",
"# out from the first dat file. This is only possible for single",
"# segment records. If there are no signals, sig_len is 0.",
"if",
"record",
".",
"sig_len",
"is",
"None",
":",
"if",
"record",
".",
"n_sig",
"==",
"0",
":",
"record",
".",
"sig_len",
"=",
"0",
"else",
":",
"record",
".",
"sig_len",
"=",
"_signal",
".",
"_infer_sig_len",
"(",
"file_name",
"=",
"record",
".",
"file_name",
"[",
"0",
"]",
",",
"fmt",
"=",
"record",
".",
"fmt",
"[",
"0",
"]",
",",
"n_sig",
"=",
"record",
".",
"file_name",
".",
"count",
"(",
"record",
".",
"file_name",
"[",
"0",
"]",
")",
",",
"dir_name",
"=",
"dir_name",
",",
"pb_dir",
"=",
"pb_dir",
")",
"sampto",
"=",
"record",
".",
"sig_len",
"# channel_names takes precedence over channels",
"if",
"channel_names",
"is",
"not",
"None",
":",
"# Figure out the channel indices matching the record, if any.",
"if",
"isinstance",
"(",
"record",
",",
"Record",
")",
":",
"reference_record",
"=",
"record",
"else",
":",
"if",
"record",
".",
"layout",
"==",
"'fixed'",
":",
"# Find the first non-empty segment to get the signal",
"# names",
"first_seg_name",
"=",
"[",
"n",
"for",
"n",
"in",
"record",
".",
"seg_name",
"if",
"n",
"!=",
"'~'",
"]",
"[",
"0",
"]",
"reference_record",
"=",
"rdheader",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dir_name",
",",
"record",
".",
"seg_name",
"[",
"0",
"]",
")",
",",
"pb_dir",
"=",
"pb_dir",
")",
"else",
":",
"# Use the layout specification header to get the signal",
"# names",
"reference_record",
"=",
"rdheader",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dir_name",
",",
"record",
".",
"seg_name",
"[",
"0",
"]",
")",
",",
"pb_dir",
"=",
"pb_dir",
")",
"channels",
"=",
"_get_wanted_channels",
"(",
"wanted_sig_names",
"=",
"channel_names",
",",
"record_sig_names",
"=",
"reference_record",
".",
"sig_name",
")",
"elif",
"channels",
"is",
"None",
":",
"channels",
"=",
"list",
"(",
"range",
"(",
"record",
".",
"n_sig",
")",
")",
"# Ensure that input fields are valid for the record",
"record",
".",
"check_read_inputs",
"(",
"sampfrom",
",",
"sampto",
",",
"channels",
",",
"physical",
",",
"smooth_frames",
",",
"return_res",
")",
"# If the signal doesn't have the specified channels, there will be",
"# no signal. Recall that `rdsamp` is not called on segments of multi",
"# segment records if the channels are not present, so this won't",
"# break anything.",
"if",
"not",
"len",
"(",
"channels",
")",
":",
"old_record",
"=",
"record",
"record",
"=",
"Record",
"(",
")",
"for",
"attr",
"in",
"_header",
".",
"RECORD_SPECS",
".",
"index",
":",
"if",
"attr",
"==",
"'n_seg'",
":",
"continue",
"elif",
"attr",
"in",
"[",
"'n_sig'",
",",
"'sig_len'",
"]",
":",
"setattr",
"(",
"record",
",",
"attr",
",",
"0",
")",
"else",
":",
"setattr",
"(",
"record",
",",
"attr",
",",
"getattr",
"(",
"old_record",
",",
"attr",
")",
")",
"if",
"warn_empty",
":",
"print",
"(",
"'None of the specified signals were contained in the record'",
")",
"# A single segment record",
"elif",
"isinstance",
"(",
"record",
",",
"Record",
")",
":",
"# Only 1 sample/frame, or frames are smoothed. Return uniform numpy array",
"if",
"smooth_frames",
"or",
"max",
"(",
"[",
"record",
".",
"samps_per_frame",
"[",
"c",
"]",
"for",
"c",
"in",
"channels",
"]",
")",
"==",
"1",
":",
"# Read signals from the associated dat files that contain",
"# wanted channels",
"record",
".",
"d_signal",
"=",
"_signal",
".",
"_rd_segment",
"(",
"record",
".",
"file_name",
",",
"dir_name",
",",
"pb_dir",
",",
"record",
".",
"fmt",
",",
"record",
".",
"n_sig",
",",
"record",
".",
"sig_len",
",",
"record",
".",
"byte_offset",
",",
"record",
".",
"samps_per_frame",
",",
"record",
".",
"skew",
",",
"sampfrom",
",",
"sampto",
",",
"channels",
",",
"smooth_frames",
",",
"ignore_skew",
")",
"# Arrange/edit the object fields to reflect user channel",
"# and/or signal range input",
"record",
".",
"_arrange_fields",
"(",
"channels",
"=",
"channels",
",",
"sampfrom",
"=",
"sampfrom",
",",
"expanded",
"=",
"False",
")",
"if",
"physical",
":",
"# Perform inplace dac to get physical signal",
"record",
".",
"dac",
"(",
"expanded",
"=",
"False",
",",
"return_res",
"=",
"return_res",
",",
"inplace",
"=",
"True",
")",
"# Return each sample of the signals with multiple samples per frame",
"else",
":",
"record",
".",
"e_d_signal",
"=",
"_signal",
".",
"_rd_segment",
"(",
"record",
".",
"file_name",
",",
"dir_name",
",",
"pb_dir",
",",
"record",
".",
"fmt",
",",
"record",
".",
"n_sig",
",",
"record",
".",
"sig_len",
",",
"record",
".",
"byte_offset",
",",
"record",
".",
"samps_per_frame",
",",
"record",
".",
"skew",
",",
"sampfrom",
",",
"sampto",
",",
"channels",
",",
"smooth_frames",
",",
"ignore_skew",
")",
"# Arrange/edit the object fields to reflect user channel",
"# and/or signal range input",
"record",
".",
"_arrange_fields",
"(",
"channels",
"=",
"channels",
",",
"sampfrom",
"=",
"sampfrom",
",",
"expanded",
"=",
"True",
")",
"if",
"physical",
":",
"# Perform dac to get physical signal",
"record",
".",
"dac",
"(",
"expanded",
"=",
"True",
",",
"return_res",
"=",
"return_res",
",",
"inplace",
"=",
"True",
")",
"# A multi segment record",
"else",
":",
"# Strategy:",
"# 1. Read the required segments and store them in",
"# Record objects.",
"# 2. Update the parameters of the objects to reflect",
"# the state of the sections read.",
"# 3. Update the parameters of the overall MultiRecord",
"# object to reflect the state of the individual segments.",
"# 4. If specified, convert the MultiRecord object",
"# into a single Record object.",
"# Segments field is a list of Record objects",
"# Empty segments store None.",
"record",
".",
"segments",
"=",
"[",
"None",
"]",
"*",
"record",
".",
"n_seg",
"# Variable layout, read the layout specification header",
"if",
"record",
".",
"layout",
"==",
"'variable'",
":",
"record",
".",
"segments",
"[",
"0",
"]",
"=",
"rdheader",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dir_name",
",",
"record",
".",
"seg_name",
"[",
"0",
"]",
")",
",",
"pb_dir",
"=",
"pb_dir",
")",
"# The segment numbers and samples within each segment to read.",
"seg_numbers",
",",
"seg_ranges",
"=",
"record",
".",
"_required_segments",
"(",
"sampfrom",
",",
"sampto",
")",
"# The channels within each segment to read",
"seg_channels",
"=",
"record",
".",
"_required_channels",
"(",
"seg_numbers",
",",
"channels",
",",
"dir_name",
",",
"pb_dir",
")",
"# Read the desired samples in the relevant segments",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"seg_numbers",
")",
")",
":",
"seg_num",
"=",
"seg_numbers",
"[",
"i",
"]",
"# Empty segment or segment with no relevant channels",
"if",
"record",
".",
"seg_name",
"[",
"seg_num",
"]",
"==",
"'~'",
"or",
"len",
"(",
"seg_channels",
"[",
"i",
"]",
")",
"==",
"0",
":",
"record",
".",
"segments",
"[",
"seg_num",
"]",
"=",
"None",
"else",
":",
"record",
".",
"segments",
"[",
"seg_num",
"]",
"=",
"rdrecord",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dir_name",
",",
"record",
".",
"seg_name",
"[",
"seg_num",
"]",
")",
",",
"sampfrom",
"=",
"seg_ranges",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"sampto",
"=",
"seg_ranges",
"[",
"i",
"]",
"[",
"1",
"]",
",",
"channels",
"=",
"seg_channels",
"[",
"i",
"]",
",",
"physical",
"=",
"physical",
",",
"pb_dir",
"=",
"pb_dir",
")",
"# Arrange the fields of the layout specification segment, and",
"# the overall object, to reflect user input.",
"record",
".",
"_arrange_fields",
"(",
"seg_numbers",
"=",
"seg_numbers",
",",
"seg_ranges",
"=",
"seg_ranges",
",",
"channels",
"=",
"channels",
",",
"sampfrom",
"=",
"sampfrom",
",",
"force_channels",
"=",
"force_channels",
")",
"# Convert object into a single segment Record object",
"if",
"m2s",
":",
"record",
"=",
"record",
".",
"multi_to_single",
"(",
"physical",
"=",
"physical",
",",
"return_res",
"=",
"return_res",
")",
"# Perform dtype conversion if necessary",
"if",
"isinstance",
"(",
"record",
",",
"Record",
")",
"and",
"record",
".",
"n_sig",
">",
"0",
":",
"record",
".",
"convert_dtype",
"(",
"physical",
",",
"return_res",
",",
"smooth_frames",
")",
"return",
"record"
] |
Read a WFDB record and return the signal and record descriptors as
attributes in a Record or MultiRecord object.
Parameters
----------
record_name : str
The name of the WFDB record to be read, without any file
extensions. If the argument contains any path delimiter
characters, the argument will be interpreted as PATH/BASE_RECORD.
Both relative and absolute paths are accepted. If the `pb_dir`
parameter is set, this parameter should contain just the base
record name, and the files fill be searched for remotely.
Otherwise, the data files will be searched for in the local path.
sampfrom : int, optional
The starting sample number to read for all channels.
sampto : int, or 'end', optional
The sample number at which to stop reading for all channels.
Reads the entire duration by default.
channels : list, optional
List of integer indices specifying the channels to be read.
Reads all channels by default.
physical : bool, optional
Specifies whether to return signals in physical units in the
`p_signal` field (True), or digital units in the `d_signal`
field (False).
pb_dir : str, optional
Option used to stream data from Physiobank. The Physiobank
database directory from which to find the required record files.
eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb'
pb_dir='mitdb'.
m2s : bool, optional
Used when reading multi-segment records. Specifies whether to
directly return a wfdb MultiRecord object (False), or to convert
it into and return a wfdb Record object (True).
smooth_frames : bool, optional
Used when reading records with signals having multiple samples
per frame. Specifies whether to smooth the samples in signals
with more than one sample per frame and return an (MxN) uniform
numpy array as the `d_signal` or `p_signal` field (True), or to
return a list of 1d numpy arrays containing every expanded
sample as the `e_d_signal` or `e_p_signal` field (False).
ignore_skew : bool, optional
Used when reading records with at least one skewed signal.
Specifies whether to apply the skew to align the signals in the
output variable (False), or to ignore the skew field and load in
all values contained in the dat files unaligned (True).
return_res : int, optional
The numpy array dtype of the returned signals. Options are: 64,
32, 16, and 8, where the value represents the numpy int or float
dtype. Note that the value cannot be 8 when physical is True
since there is no float8 format.
force_channels : bool, optional
Used when reading multi-segment variable layout records. Whether
to update the layout specification record, and the converted
Record object if `m2s` is True, to match the input `channels`
argument, or to omit channels in which no read segment contains
the signals.
channel_names : list, optional
List of channel names to return. If this parameter is specified,
it takes precedence over `channels`.
warn_empty : bool, optional
Whether to display a warning if the specified channel indices
or names are not contained in the record, and no signal is
returned.
Returns
-------
record : Record or MultiRecord
The wfdb Record or MultiRecord object representing the contents
of the record read.
Notes
-----
If a signal range or channel selection is specified when calling
this function, the resulting attributes of the returned object will
be set to reflect the section of the record that is actually read,
rather than necessarily the entire record. For example, if
`channels=[0, 1, 2]` is specified when reading a 12 channel record,
the 'n_sig' attribute will be 3, not 12.
The `rdsamp` function exists as a simple alternative to `rdrecord`
for the common purpose of extracting the physical signals and a few
important descriptor fields.
Examples
--------
>>> record = wfdb.rdrecord('sample-data/test01_00s', sampfrom=800,
channels=[1, 3])
|
[
"Read",
"a",
"WFDB",
"record",
"and",
"return",
"the",
"signal",
"and",
"record",
"descriptors",
"as",
"attributes",
"in",
"a",
"Record",
"or",
"MultiRecord",
"object",
"."
] |
python
|
train
|
VIVelev/PyDojoML
|
dojo/tree/utils/functions.py
|
https://github.com/VIVelev/PyDojoML/blob/773fdce6866aa6decd306a5a85f94129fed816eb/dojo/tree/utils/functions.py#L193-L207
|
def print_tree(root, space=' '):
"""Prints the Decision Tree in a pretty way.
"""
if isinstance(root, Leaf):
print(space + "Prediction: " + str(root.most_frequent))
return
print(space + str(root.question))
print(space + "--> True:")
print_tree(root.true_branch, space+' ')
print(space + "--> False:")
print_tree(root.false_branch, space+' ')
|
[
"def",
"print_tree",
"(",
"root",
",",
"space",
"=",
"' '",
")",
":",
"if",
"isinstance",
"(",
"root",
",",
"Leaf",
")",
":",
"print",
"(",
"space",
"+",
"\"Prediction: \"",
"+",
"str",
"(",
"root",
".",
"most_frequent",
")",
")",
"return",
"print",
"(",
"space",
"+",
"str",
"(",
"root",
".",
"question",
")",
")",
"print",
"(",
"space",
"+",
"\"--> True:\"",
")",
"print_tree",
"(",
"root",
".",
"true_branch",
",",
"space",
"+",
"' '",
")",
"print",
"(",
"space",
"+",
"\"--> False:\"",
")",
"print_tree",
"(",
"root",
".",
"false_branch",
",",
"space",
"+",
"' '",
")"
] |
Prints the Decision Tree in a pretty way.
|
[
"Prints",
"the",
"Decision",
"Tree",
"in",
"a",
"pretty",
"way",
"."
] |
python
|
train
|
nccgroup/Scout2
|
AWSScout2/services/vpc.py
|
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/vpc.py#L210-L229
|
def propagate_vpc_names(aws_config, current_config, path, current_path, resource_id, callback_args):
"""
Propagate VPC names in VPC-related services (info only fetched during EC2 calls)
:param aws_config:
:param current_config:
:param path:
:param current_path:
:param resource_id:
:param callback_args:
:return:
"""
if resource_id == ec2_classic:
current_config['name'] = ec2_classic
else:
target_path = copy.deepcopy(current_path)
target_path[1] = 'ec2'
target_path.append(resource_id)
target_path.append('Name')
target_path = '.'.join(target_path)
current_config['name'] = get_value_at(aws_config, target_path, target_path)
|
[
"def",
"propagate_vpc_names",
"(",
"aws_config",
",",
"current_config",
",",
"path",
",",
"current_path",
",",
"resource_id",
",",
"callback_args",
")",
":",
"if",
"resource_id",
"==",
"ec2_classic",
":",
"current_config",
"[",
"'name'",
"]",
"=",
"ec2_classic",
"else",
":",
"target_path",
"=",
"copy",
".",
"deepcopy",
"(",
"current_path",
")",
"target_path",
"[",
"1",
"]",
"=",
"'ec2'",
"target_path",
".",
"append",
"(",
"resource_id",
")",
"target_path",
".",
"append",
"(",
"'Name'",
")",
"target_path",
"=",
"'.'",
".",
"join",
"(",
"target_path",
")",
"current_config",
"[",
"'name'",
"]",
"=",
"get_value_at",
"(",
"aws_config",
",",
"target_path",
",",
"target_path",
")"
] |
Propagate VPC names in VPC-related services (info only fetched during EC2 calls)
:param aws_config:
:param current_config:
:param path:
:param current_path:
:param resource_id:
:param callback_args:
:return:
|
[
"Propagate",
"VPC",
"names",
"in",
"VPC",
"-",
"related",
"services",
"(",
"info",
"only",
"fetched",
"during",
"EC2",
"calls",
")",
":",
"param",
"aws_config",
":",
":",
"param",
"current_config",
":",
":",
"param",
"path",
":",
":",
"param",
"current_path",
":",
":",
"param",
"resource_id",
":",
":",
"param",
"callback_args",
":",
":",
"return",
":"
] |
python
|
train
|
spotify/snakebite
|
snakebite/client.py
|
https://github.com/spotify/snakebite/blob/6a456e6100b0c1be66cc1f7f9d7f50494f369da3/snakebite/client.py#L342-L375
|
def du(self, paths, include_toplevel=False, include_children=True):
'''Returns size information for paths
:param paths: Paths to du
:type paths: list
:param include_toplevel: Include the given path in the result. If the path is a file, include_toplevel is always True.
:type include_toplevel: boolean
:param include_children: Include child nodes in the result.
:type include_children: boolean
:returns: a generator that yields dictionaries
**Examples:**
Children:
>>> list(client.du(['/']))
[{'path': '/Makefile', 'length': 6783L}, {'path': '/build', 'length': 244778L}, {'path': '/index.asciidoc', 'length': 100L}, {'path': '/source', 'length': 8524L}]
Directory only:
>>> list(client.du(['/'], include_toplevel=True, include_children=False))
[{'path': '/', 'length': 260185L}]
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("du: no path given")
processor = lambda path, node: self._handle_du(path, node)
for item in self._find_items(paths, processor, include_toplevel=include_toplevel,
include_children=include_children, recurse=False):
if item:
yield item
|
[
"def",
"du",
"(",
"self",
",",
"paths",
",",
"include_toplevel",
"=",
"False",
",",
"include_children",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"paths",
",",
"list",
")",
":",
"raise",
"InvalidInputException",
"(",
"\"Paths should be a list\"",
")",
"if",
"not",
"paths",
":",
"raise",
"InvalidInputException",
"(",
"\"du: no path given\"",
")",
"processor",
"=",
"lambda",
"path",
",",
"node",
":",
"self",
".",
"_handle_du",
"(",
"path",
",",
"node",
")",
"for",
"item",
"in",
"self",
".",
"_find_items",
"(",
"paths",
",",
"processor",
",",
"include_toplevel",
"=",
"include_toplevel",
",",
"include_children",
"=",
"include_children",
",",
"recurse",
"=",
"False",
")",
":",
"if",
"item",
":",
"yield",
"item"
] |
Returns size information for paths
:param paths: Paths to du
:type paths: list
:param include_toplevel: Include the given path in the result. If the path is a file, include_toplevel is always True.
:type include_toplevel: boolean
:param include_children: Include child nodes in the result.
:type include_children: boolean
:returns: a generator that yields dictionaries
**Examples:**
Children:
>>> list(client.du(['/']))
[{'path': '/Makefile', 'length': 6783L}, {'path': '/build', 'length': 244778L}, {'path': '/index.asciidoc', 'length': 100L}, {'path': '/source', 'length': 8524L}]
Directory only:
>>> list(client.du(['/'], include_toplevel=True, include_children=False))
[{'path': '/', 'length': 260185L}]
|
[
"Returns",
"size",
"information",
"for",
"paths"
] |
python
|
train
|
todddeluca/python-vagrant
|
vagrant/__init__.py
|
https://github.com/todddeluca/python-vagrant/blob/83b26f9337b1f2cb6314210923bbd189e7c9199e/vagrant/__init__.py#L496-L513
|
def _parse_status(self, output):
'''
Unit testing is so much easier when Vagrant is removed from the
equation.
'''
parsed = self._parse_machine_readable_output(output)
statuses = []
# group tuples by target name
# assuming tuples are sorted by target name, this should group all
# the tuples with info for each target.
for target, tuples in itertools.groupby(parsed, lambda tup: tup[1]):
# transform tuples into a dict mapping "type" to "data"
info = {kind: data for timestamp, _, kind, data in tuples}
status = Status(name=target, state=info.get('state'),
provider=info.get('provider-name'))
statuses.append(status)
return statuses
|
[
"def",
"_parse_status",
"(",
"self",
",",
"output",
")",
":",
"parsed",
"=",
"self",
".",
"_parse_machine_readable_output",
"(",
"output",
")",
"statuses",
"=",
"[",
"]",
"# group tuples by target name",
"# assuming tuples are sorted by target name, this should group all",
"# the tuples with info for each target.",
"for",
"target",
",",
"tuples",
"in",
"itertools",
".",
"groupby",
"(",
"parsed",
",",
"lambda",
"tup",
":",
"tup",
"[",
"1",
"]",
")",
":",
"# transform tuples into a dict mapping \"type\" to \"data\"",
"info",
"=",
"{",
"kind",
":",
"data",
"for",
"timestamp",
",",
"_",
",",
"kind",
",",
"data",
"in",
"tuples",
"}",
"status",
"=",
"Status",
"(",
"name",
"=",
"target",
",",
"state",
"=",
"info",
".",
"get",
"(",
"'state'",
")",
",",
"provider",
"=",
"info",
".",
"get",
"(",
"'provider-name'",
")",
")",
"statuses",
".",
"append",
"(",
"status",
")",
"return",
"statuses"
] |
Unit testing is so much easier when Vagrant is removed from the
equation.
|
[
"Unit",
"testing",
"is",
"so",
"much",
"easier",
"when",
"Vagrant",
"is",
"removed",
"from",
"the",
"equation",
"."
] |
python
|
train
|
praekeltfoundation/seaworthy
|
seaworthy/client.py
|
https://github.com/praekeltfoundation/seaworthy/blob/6f10a19b45d4ea1dc3bd0553cc4d0438696c079c/seaworthy/client.py#L190-L202
|
def delete(self, path=None, url_kwargs=None, **kwargs):
"""
Sends a PUT request.
:param path:
The HTTP path (either absolute or relative).
:param url_kwargs:
Parameters to override in the generated URL. See `~hyperlink.URL`.
:param **kwargs:
Optional arguments that ``request`` takes.
:return: response object
"""
return self._session.delete(self._url(path, url_kwargs), **kwargs)
|
[
"def",
"delete",
"(",
"self",
",",
"path",
"=",
"None",
",",
"url_kwargs",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_session",
".",
"delete",
"(",
"self",
".",
"_url",
"(",
"path",
",",
"url_kwargs",
")",
",",
"*",
"*",
"kwargs",
")"
] |
Sends a PUT request.
:param path:
The HTTP path (either absolute or relative).
:param url_kwargs:
Parameters to override in the generated URL. See `~hyperlink.URL`.
:param **kwargs:
Optional arguments that ``request`` takes.
:return: response object
|
[
"Sends",
"a",
"PUT",
"request",
"."
] |
python
|
train
|
bioidiap/bob.bio.spear
|
bob/bio/spear/utils/extraction.py
|
https://github.com/bioidiap/bob.bio.spear/blob/9f5d13d2e52d3b0c818f4abaa07cda15f62a34cd/bob/bio/spear/utils/extraction.py#L32-L37
|
def calc_mean(c0, c1=[]):
""" Calculates the mean of the data."""
if c1 != []:
return (numpy.mean(c0, 0) + numpy.mean(c1, 0)) / 2.
else:
return numpy.mean(c0, 0)
|
[
"def",
"calc_mean",
"(",
"c0",
",",
"c1",
"=",
"[",
"]",
")",
":",
"if",
"c1",
"!=",
"[",
"]",
":",
"return",
"(",
"numpy",
".",
"mean",
"(",
"c0",
",",
"0",
")",
"+",
"numpy",
".",
"mean",
"(",
"c1",
",",
"0",
")",
")",
"/",
"2.",
"else",
":",
"return",
"numpy",
".",
"mean",
"(",
"c0",
",",
"0",
")"
] |
Calculates the mean of the data.
|
[
"Calculates",
"the",
"mean",
"of",
"the",
"data",
"."
] |
python
|
train
|
abseil/abseil-py
|
absl/flags/_flagvalues.py
|
https://github.com/abseil/abseil-py/blob/9d73fdaa23a6b6726aa5731390f388c0c6250ee5/absl/flags/_flagvalues.py#L859-L879
|
def _get_help_for_modules(self, modules, prefix, include_special_flags):
"""Returns the help string for a list of modules.
Private to absl.flags package.
Args:
modules: List[str], a list of modules to get the help string for.
prefix: str, a string that is prepended to each generated help line.
include_special_flags: bool, whether to include description of
SPECIAL_FLAGS, i.e. --flagfile and --undefok.
"""
output_lines = []
for module in modules:
self._render_our_module_flags(module, output_lines, prefix)
if include_special_flags:
self._render_module_flags(
'absl.flags',
six.itervalues(_helpers.SPECIAL_FLAGS._flags()), # pylint: disable=protected-access
output_lines,
prefix)
return '\n'.join(output_lines)
|
[
"def",
"_get_help_for_modules",
"(",
"self",
",",
"modules",
",",
"prefix",
",",
"include_special_flags",
")",
":",
"output_lines",
"=",
"[",
"]",
"for",
"module",
"in",
"modules",
":",
"self",
".",
"_render_our_module_flags",
"(",
"module",
",",
"output_lines",
",",
"prefix",
")",
"if",
"include_special_flags",
":",
"self",
".",
"_render_module_flags",
"(",
"'absl.flags'",
",",
"six",
".",
"itervalues",
"(",
"_helpers",
".",
"SPECIAL_FLAGS",
".",
"_flags",
"(",
")",
")",
",",
"# pylint: disable=protected-access",
"output_lines",
",",
"prefix",
")",
"return",
"'\\n'",
".",
"join",
"(",
"output_lines",
")"
] |
Returns the help string for a list of modules.
Private to absl.flags package.
Args:
modules: List[str], a list of modules to get the help string for.
prefix: str, a string that is prepended to each generated help line.
include_special_flags: bool, whether to include description of
SPECIAL_FLAGS, i.e. --flagfile and --undefok.
|
[
"Returns",
"the",
"help",
"string",
"for",
"a",
"list",
"of",
"modules",
"."
] |
python
|
train
|
python-security/pyt
|
pyt/core/ast_helper.py
|
https://github.com/python-security/pyt/blob/efc0cfb716e40e0c8df4098f1cc8cf43723cd31f/pyt/core/ast_helper.py#L52-L63
|
def _get_call_names_helper(node):
"""Recursively finds all function names."""
if isinstance(node, ast.Name):
if node.id not in BLACK_LISTED_CALL_NAMES:
yield node.id
elif isinstance(node, ast.Subscript):
yield from _get_call_names_helper(node.value)
elif isinstance(node, ast.Str):
yield node.s
elif isinstance(node, ast.Attribute):
yield node.attr
yield from _get_call_names_helper(node.value)
|
[
"def",
"_get_call_names_helper",
"(",
"node",
")",
":",
"if",
"isinstance",
"(",
"node",
",",
"ast",
".",
"Name",
")",
":",
"if",
"node",
".",
"id",
"not",
"in",
"BLACK_LISTED_CALL_NAMES",
":",
"yield",
"node",
".",
"id",
"elif",
"isinstance",
"(",
"node",
",",
"ast",
".",
"Subscript",
")",
":",
"yield",
"from",
"_get_call_names_helper",
"(",
"node",
".",
"value",
")",
"elif",
"isinstance",
"(",
"node",
",",
"ast",
".",
"Str",
")",
":",
"yield",
"node",
".",
"s",
"elif",
"isinstance",
"(",
"node",
",",
"ast",
".",
"Attribute",
")",
":",
"yield",
"node",
".",
"attr",
"yield",
"from",
"_get_call_names_helper",
"(",
"node",
".",
"value",
")"
] |
Recursively finds all function names.
|
[
"Recursively",
"finds",
"all",
"function",
"names",
"."
] |
python
|
train
|
deepmind/sonnet
|
sonnet/examples/learn_to_execute.py
|
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/examples/learn_to_execute.py#L932-L957
|
def make_batch(self):
"""Generator function for batchifying data for learning to execute.
Yields:
tuple:
1. one-hot input tensor, representing programmatic input
2. one-hot target tensor, the vealuation result.
3. one-hot decoder target, start symbol added for sequence decoding.
4. batch size tensor containing integer input sequence lengths.
5. batch size tensor containing integer output sequence lengths.
"""
while True:
self.reset_data_source()
obs = np.reshape(self._data_source.flat_data,
[self.batch_size, -1])[:, :self._num_steps].T
target = np.reshape(
self._data_source.flat_targets,
[self.batch_size, -1])[:, :self._num_steps_out].T
start_tokens = np.ndarray([1, self.batch_size], dtype=np.int32)
start_tokens.fill(self._data_source.start_token[0])
target_in = np.concatenate((start_tokens, target[:-1, :]), axis=0)
yield (self._np_one_hot(obs, self._num_steps),
self._np_one_hot(target, self._num_steps_out),
self._np_one_hot(target_in, self._num_steps_out),
self.seq_sizes_in,
self.seq_sizes_out)
|
[
"def",
"make_batch",
"(",
"self",
")",
":",
"while",
"True",
":",
"self",
".",
"reset_data_source",
"(",
")",
"obs",
"=",
"np",
".",
"reshape",
"(",
"self",
".",
"_data_source",
".",
"flat_data",
",",
"[",
"self",
".",
"batch_size",
",",
"-",
"1",
"]",
")",
"[",
":",
",",
":",
"self",
".",
"_num_steps",
"]",
".",
"T",
"target",
"=",
"np",
".",
"reshape",
"(",
"self",
".",
"_data_source",
".",
"flat_targets",
",",
"[",
"self",
".",
"batch_size",
",",
"-",
"1",
"]",
")",
"[",
":",
",",
":",
"self",
".",
"_num_steps_out",
"]",
".",
"T",
"start_tokens",
"=",
"np",
".",
"ndarray",
"(",
"[",
"1",
",",
"self",
".",
"batch_size",
"]",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"start_tokens",
".",
"fill",
"(",
"self",
".",
"_data_source",
".",
"start_token",
"[",
"0",
"]",
")",
"target_in",
"=",
"np",
".",
"concatenate",
"(",
"(",
"start_tokens",
",",
"target",
"[",
":",
"-",
"1",
",",
":",
"]",
")",
",",
"axis",
"=",
"0",
")",
"yield",
"(",
"self",
".",
"_np_one_hot",
"(",
"obs",
",",
"self",
".",
"_num_steps",
")",
",",
"self",
".",
"_np_one_hot",
"(",
"target",
",",
"self",
".",
"_num_steps_out",
")",
",",
"self",
".",
"_np_one_hot",
"(",
"target_in",
",",
"self",
".",
"_num_steps_out",
")",
",",
"self",
".",
"seq_sizes_in",
",",
"self",
".",
"seq_sizes_out",
")"
] |
Generator function for batchifying data for learning to execute.
Yields:
tuple:
1. one-hot input tensor, representing programmatic input
2. one-hot target tensor, the vealuation result.
3. one-hot decoder target, start symbol added for sequence decoding.
4. batch size tensor containing integer input sequence lengths.
5. batch size tensor containing integer output sequence lengths.
|
[
"Generator",
"function",
"for",
"batchifying",
"data",
"for",
"learning",
"to",
"execute",
"."
] |
python
|
train
|
indico/indico-plugins
|
importer_invenio/indico_importer_invenio/connector.py
|
https://github.com/indico/indico-plugins/blob/fe50085cc63be9b8161b09539e662e7b04e4b38e/importer_invenio/indico_importer_invenio/connector.py#L193-L211
|
def _init_browser(self):
"""
Ovveride this method with the appropriate way to prepare a logged in
browser.
"""
self.browser = mechanize.Browser()
self.browser.set_handle_robots(False)
self.browser.open(self.server_url + "/youraccount/login")
self.browser.select_form(nr=0)
try:
self.browser['nickname'] = self.user
self.browser['password'] = self.password
except:
self.browser['p_un'] = self.user
self.browser['p_pw'] = self.password
# Set login_method to be writable
self.browser.form.find_control('login_method').readonly = False
self.browser['login_method'] = self.login_method
self.browser.submit()
|
[
"def",
"_init_browser",
"(",
"self",
")",
":",
"self",
".",
"browser",
"=",
"mechanize",
".",
"Browser",
"(",
")",
"self",
".",
"browser",
".",
"set_handle_robots",
"(",
"False",
")",
"self",
".",
"browser",
".",
"open",
"(",
"self",
".",
"server_url",
"+",
"\"/youraccount/login\"",
")",
"self",
".",
"browser",
".",
"select_form",
"(",
"nr",
"=",
"0",
")",
"try",
":",
"self",
".",
"browser",
"[",
"'nickname'",
"]",
"=",
"self",
".",
"user",
"self",
".",
"browser",
"[",
"'password'",
"]",
"=",
"self",
".",
"password",
"except",
":",
"self",
".",
"browser",
"[",
"'p_un'",
"]",
"=",
"self",
".",
"user",
"self",
".",
"browser",
"[",
"'p_pw'",
"]",
"=",
"self",
".",
"password",
"# Set login_method to be writable",
"self",
".",
"browser",
".",
"form",
".",
"find_control",
"(",
"'login_method'",
")",
".",
"readonly",
"=",
"False",
"self",
".",
"browser",
"[",
"'login_method'",
"]",
"=",
"self",
".",
"login_method",
"self",
".",
"browser",
".",
"submit",
"(",
")"
] |
Ovveride this method with the appropriate way to prepare a logged in
browser.
|
[
"Ovveride",
"this",
"method",
"with",
"the",
"appropriate",
"way",
"to",
"prepare",
"a",
"logged",
"in",
"browser",
"."
] |
python
|
train
|
fastai/fastai
|
fastai/callbacks/hooks.py
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callbacks/hooks.py#L110-L114
|
def model_sizes(m:nn.Module, size:tuple=(64,64))->Tuple[Sizes,Tensor,Hooks]:
"Pass a dummy input through the model `m` to get the various sizes of activations."
with hook_outputs(m) as hooks:
x = dummy_eval(m, size)
return [o.stored.shape for o in hooks]
|
[
"def",
"model_sizes",
"(",
"m",
":",
"nn",
".",
"Module",
",",
"size",
":",
"tuple",
"=",
"(",
"64",
",",
"64",
")",
")",
"->",
"Tuple",
"[",
"Sizes",
",",
"Tensor",
",",
"Hooks",
"]",
":",
"with",
"hook_outputs",
"(",
"m",
")",
"as",
"hooks",
":",
"x",
"=",
"dummy_eval",
"(",
"m",
",",
"size",
")",
"return",
"[",
"o",
".",
"stored",
".",
"shape",
"for",
"o",
"in",
"hooks",
"]"
] |
Pass a dummy input through the model `m` to get the various sizes of activations.
|
[
"Pass",
"a",
"dummy",
"input",
"through",
"the",
"model",
"m",
"to",
"get",
"the",
"various",
"sizes",
"of",
"activations",
"."
] |
python
|
train
|
Yelp/kafka-utils
|
kafka_utils/util/zookeeper.py
|
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/zookeeper.py#L426-L435
|
def get_cluster_assignment(self):
"""Fetch the cluster layout in form of assignment from zookeeper"""
plan = self.get_cluster_plan()
assignment = {}
for elem in plan['partitions']:
assignment[
(elem['topic'], elem['partition'])
] = elem['replicas']
return assignment
|
[
"def",
"get_cluster_assignment",
"(",
"self",
")",
":",
"plan",
"=",
"self",
".",
"get_cluster_plan",
"(",
")",
"assignment",
"=",
"{",
"}",
"for",
"elem",
"in",
"plan",
"[",
"'partitions'",
"]",
":",
"assignment",
"[",
"(",
"elem",
"[",
"'topic'",
"]",
",",
"elem",
"[",
"'partition'",
"]",
")",
"]",
"=",
"elem",
"[",
"'replicas'",
"]",
"return",
"assignment"
] |
Fetch the cluster layout in form of assignment from zookeeper
|
[
"Fetch",
"the",
"cluster",
"layout",
"in",
"form",
"of",
"assignment",
"from",
"zookeeper"
] |
python
|
train
|
terrycain/aioboto3
|
aioboto3/s3/cse.py
|
https://github.com/terrycain/aioboto3/blob/0fd192175461f7bb192f3ed9a872591caf8474ac/aioboto3/s3/cse.py#L330-L389
|
async def get_object(self, Bucket: str, Key: str, **kwargs) -> dict:
"""
S3 GetObject. Takes same args as Boto3 documentation
Decrypts any CSE
:param Bucket: S3 Bucket
:param Key: S3 Key (filepath)
:return: returns same response as a normal S3 get_object
"""
if self._s3_client is None:
await self.setup()
# Ok so if we are doing a range get. We need to align the range start/end with AES block boundaries
# 9223372036854775806 is 8EiB so I have no issue with hardcoding it.
# We pass the actual start, desired start and desired end to the decrypt function so that it can
# generate the correct IV's for starting decryption at that block and then chop off the start and end of the
# AES block so it matches what the user is expecting.
_range = kwargs.get('Range')
actual_range_start = None
desired_range_start = None
desired_range_end = None
if _range:
range_match = RANGE_REGEX.match(_range)
if not range_match:
raise ValueError('Dont understand this range value {0}'.format(_range))
desired_range_start = int(range_match.group(1))
desired_range_end = range_match.group(2)
if desired_range_end is None:
desired_range_end = 9223372036854775806
else:
desired_range_end = int(desired_range_end)
actual_range_start, actual_range_end = _get_adjusted_crypto_range(desired_range_start, desired_range_end)
# Update range with actual start_end
kwargs['Range'] = 'bytes={0}-{1}'.format(actual_range_start, actual_range_end)
s3_response = await self._s3_client.get_object(Bucket=Bucket, Key=Key, **kwargs)
file_data = await s3_response['Body'].read()
metadata = s3_response['Metadata']
whole_file_length = int(s3_response['ResponseMetadata']['HTTPHeaders']['content-length'])
if 'x-amz-key' not in metadata and 'x-amz-key-v2' not in metadata:
# No crypto
return s3_response
if 'x-amz-key' in metadata:
# Crypto V1
body = await self._decrypt_v1(file_data, metadata, actual_range_start)
else:
# Crypto V2
body = await self._decrypt_v2(file_data, metadata, whole_file_length,
actual_range_start, desired_range_start,
desired_range_end)
s3_response['Body'] = DummyAIOFile(body)
return s3_response
|
[
"async",
"def",
"get_object",
"(",
"self",
",",
"Bucket",
":",
"str",
",",
"Key",
":",
"str",
",",
"*",
"*",
"kwargs",
")",
"->",
"dict",
":",
"if",
"self",
".",
"_s3_client",
"is",
"None",
":",
"await",
"self",
".",
"setup",
"(",
")",
"# Ok so if we are doing a range get. We need to align the range start/end with AES block boundaries",
"# 9223372036854775806 is 8EiB so I have no issue with hardcoding it.",
"# We pass the actual start, desired start and desired end to the decrypt function so that it can",
"# generate the correct IV's for starting decryption at that block and then chop off the start and end of the",
"# AES block so it matches what the user is expecting.",
"_range",
"=",
"kwargs",
".",
"get",
"(",
"'Range'",
")",
"actual_range_start",
"=",
"None",
"desired_range_start",
"=",
"None",
"desired_range_end",
"=",
"None",
"if",
"_range",
":",
"range_match",
"=",
"RANGE_REGEX",
".",
"match",
"(",
"_range",
")",
"if",
"not",
"range_match",
":",
"raise",
"ValueError",
"(",
"'Dont understand this range value {0}'",
".",
"format",
"(",
"_range",
")",
")",
"desired_range_start",
"=",
"int",
"(",
"range_match",
".",
"group",
"(",
"1",
")",
")",
"desired_range_end",
"=",
"range_match",
".",
"group",
"(",
"2",
")",
"if",
"desired_range_end",
"is",
"None",
":",
"desired_range_end",
"=",
"9223372036854775806",
"else",
":",
"desired_range_end",
"=",
"int",
"(",
"desired_range_end",
")",
"actual_range_start",
",",
"actual_range_end",
"=",
"_get_adjusted_crypto_range",
"(",
"desired_range_start",
",",
"desired_range_end",
")",
"# Update range with actual start_end",
"kwargs",
"[",
"'Range'",
"]",
"=",
"'bytes={0}-{1}'",
".",
"format",
"(",
"actual_range_start",
",",
"actual_range_end",
")",
"s3_response",
"=",
"await",
"self",
".",
"_s3_client",
".",
"get_object",
"(",
"Bucket",
"=",
"Bucket",
",",
"Key",
"=",
"Key",
",",
"*",
"*",
"kwargs",
")",
"file_data",
"=",
"await",
"s3_response",
"[",
"'Body'",
"]",
".",
"read",
"(",
")",
"metadata",
"=",
"s3_response",
"[",
"'Metadata'",
"]",
"whole_file_length",
"=",
"int",
"(",
"s3_response",
"[",
"'ResponseMetadata'",
"]",
"[",
"'HTTPHeaders'",
"]",
"[",
"'content-length'",
"]",
")",
"if",
"'x-amz-key'",
"not",
"in",
"metadata",
"and",
"'x-amz-key-v2'",
"not",
"in",
"metadata",
":",
"# No crypto",
"return",
"s3_response",
"if",
"'x-amz-key'",
"in",
"metadata",
":",
"# Crypto V1",
"body",
"=",
"await",
"self",
".",
"_decrypt_v1",
"(",
"file_data",
",",
"metadata",
",",
"actual_range_start",
")",
"else",
":",
"# Crypto V2",
"body",
"=",
"await",
"self",
".",
"_decrypt_v2",
"(",
"file_data",
",",
"metadata",
",",
"whole_file_length",
",",
"actual_range_start",
",",
"desired_range_start",
",",
"desired_range_end",
")",
"s3_response",
"[",
"'Body'",
"]",
"=",
"DummyAIOFile",
"(",
"body",
")",
"return",
"s3_response"
] |
S3 GetObject. Takes same args as Boto3 documentation
Decrypts any CSE
:param Bucket: S3 Bucket
:param Key: S3 Key (filepath)
:return: returns same response as a normal S3 get_object
|
[
"S3",
"GetObject",
".",
"Takes",
"same",
"args",
"as",
"Boto3",
"documentation"
] |
python
|
train
|
Qiskit/qiskit-terra
|
qiskit/extensions/standard/ubase.py
|
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/extensions/standard/ubase.py#L33-L45
|
def to_matrix(self):
"""Return a Numpy.array for the U3 gate."""
theta, phi, lam = self.params
return numpy.array(
[[
numpy.cos(theta / 2),
-numpy.exp(1j * lam) * numpy.sin(theta / 2)
],
[
numpy.exp(1j * phi) * numpy.sin(theta / 2),
numpy.exp(1j * (phi + lam)) * numpy.cos(theta / 2)
]],
dtype=complex)
|
[
"def",
"to_matrix",
"(",
"self",
")",
":",
"theta",
",",
"phi",
",",
"lam",
"=",
"self",
".",
"params",
"return",
"numpy",
".",
"array",
"(",
"[",
"[",
"numpy",
".",
"cos",
"(",
"theta",
"/",
"2",
")",
",",
"-",
"numpy",
".",
"exp",
"(",
"1j",
"*",
"lam",
")",
"*",
"numpy",
".",
"sin",
"(",
"theta",
"/",
"2",
")",
"]",
",",
"[",
"numpy",
".",
"exp",
"(",
"1j",
"*",
"phi",
")",
"*",
"numpy",
".",
"sin",
"(",
"theta",
"/",
"2",
")",
",",
"numpy",
".",
"exp",
"(",
"1j",
"*",
"(",
"phi",
"+",
"lam",
")",
")",
"*",
"numpy",
".",
"cos",
"(",
"theta",
"/",
"2",
")",
"]",
"]",
",",
"dtype",
"=",
"complex",
")"
] |
Return a Numpy.array for the U3 gate.
|
[
"Return",
"a",
"Numpy",
".",
"array",
"for",
"the",
"U3",
"gate",
"."
] |
python
|
test
|
Gandi/gandi.cli
|
gandi/cli/modules/iaas.py
|
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/iaas.py#L475-L493
|
def scp(cls, vm_id, login, identity, local_file, remote_file):
"""Copy file to remote VM."""
cmd = ['scp']
if identity:
cmd.extend(('-i', identity,))
version, ip_addr = cls.vm_ip(vm_id)
if version == 6:
ip_addr = '[%s]' % ip_addr
cmd.extend((local_file, '%s@%s:%s' %
(login, ip_addr, remote_file),))
cls.echo('Running %s' % ' '.join(cmd))
for _ in range(5):
ret = cls.execute(cmd, False)
if ret:
break
time.sleep(.5)
return ret
|
[
"def",
"scp",
"(",
"cls",
",",
"vm_id",
",",
"login",
",",
"identity",
",",
"local_file",
",",
"remote_file",
")",
":",
"cmd",
"=",
"[",
"'scp'",
"]",
"if",
"identity",
":",
"cmd",
".",
"extend",
"(",
"(",
"'-i'",
",",
"identity",
",",
")",
")",
"version",
",",
"ip_addr",
"=",
"cls",
".",
"vm_ip",
"(",
"vm_id",
")",
"if",
"version",
"==",
"6",
":",
"ip_addr",
"=",
"'[%s]'",
"%",
"ip_addr",
"cmd",
".",
"extend",
"(",
"(",
"local_file",
",",
"'%s@%s:%s'",
"%",
"(",
"login",
",",
"ip_addr",
",",
"remote_file",
")",
",",
")",
")",
"cls",
".",
"echo",
"(",
"'Running %s'",
"%",
"' '",
".",
"join",
"(",
"cmd",
")",
")",
"for",
"_",
"in",
"range",
"(",
"5",
")",
":",
"ret",
"=",
"cls",
".",
"execute",
"(",
"cmd",
",",
"False",
")",
"if",
"ret",
":",
"break",
"time",
".",
"sleep",
"(",
".5",
")",
"return",
"ret"
] |
Copy file to remote VM.
|
[
"Copy",
"file",
"to",
"remote",
"VM",
"."
] |
python
|
train
|
wonambi-python/wonambi
|
wonambi/ioeeg/micromed.py
|
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/ioeeg/micromed.py#L43-L65
|
def return_hdr(self):
"""Return the header for further use.
Returns
-------
subj_id : str
subject identification code
start_time : datetime
start time of the dataset
s_freq : float
sampling frequency
chan_name : list of str
list of all the channels
n_samples : int
number of samples in the dataset
orig : dict
additional information taken directly from the header
"""
subj_id = self._header['name'] + ' ' + self._header['surname']
chan_name = [ch['chan_name'] for ch in self._header['chans']]
return subj_id, self._header['start_time'], self._header['s_freq'], chan_name, self._n_smp, self._header
|
[
"def",
"return_hdr",
"(",
"self",
")",
":",
"subj_id",
"=",
"self",
".",
"_header",
"[",
"'name'",
"]",
"+",
"' '",
"+",
"self",
".",
"_header",
"[",
"'surname'",
"]",
"chan_name",
"=",
"[",
"ch",
"[",
"'chan_name'",
"]",
"for",
"ch",
"in",
"self",
".",
"_header",
"[",
"'chans'",
"]",
"]",
"return",
"subj_id",
",",
"self",
".",
"_header",
"[",
"'start_time'",
"]",
",",
"self",
".",
"_header",
"[",
"'s_freq'",
"]",
",",
"chan_name",
",",
"self",
".",
"_n_smp",
",",
"self",
".",
"_header"
] |
Return the header for further use.
Returns
-------
subj_id : str
subject identification code
start_time : datetime
start time of the dataset
s_freq : float
sampling frequency
chan_name : list of str
list of all the channels
n_samples : int
number of samples in the dataset
orig : dict
additional information taken directly from the header
|
[
"Return",
"the",
"header",
"for",
"further",
"use",
"."
] |
python
|
train
|
playpauseandstop/bootstrapper
|
bootstrapper.py
|
https://github.com/playpauseandstop/bootstrapper/blob/b216a05f2acb0b9f4919c4e010ff7b0f63fc1393/bootstrapper.py#L265-L267
|
def iteritems(data, **kwargs):
"""Iterate over dict items."""
return iter(data.items(**kwargs)) if IS_PY3 else data.iteritems(**kwargs)
|
[
"def",
"iteritems",
"(",
"data",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"iter",
"(",
"data",
".",
"items",
"(",
"*",
"*",
"kwargs",
")",
")",
"if",
"IS_PY3",
"else",
"data",
".",
"iteritems",
"(",
"*",
"*",
"kwargs",
")"
] |
Iterate over dict items.
|
[
"Iterate",
"over",
"dict",
"items",
"."
] |
python
|
valid
|
PmagPy/PmagPy
|
programs/pmag_gui.py
|
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/pmag_gui.py#L619-L639
|
def on_btn_orientation(self, event):
"""
Create and fill wxPython grid for entering
orientation data.
"""
wait = wx.BusyInfo('Compiling required data, please wait...')
wx.SafeYield()
#dw, dh = wx.DisplaySize()
size = wx.DisplaySize()
size = (size[0]-0.1 * size[0], size[1]-0.1 * size[1])
if self.data_model_num == 3:
frame = pmag_gui_dialogs.OrientFrameGrid3(self, -1, 'demag_orient.txt',
self.WD, self.contribution,
size)
else:
frame = pmag_gui_dialogs.OrientFrameGrid(self, -1, 'demag_orient.txt',
self.WD, self.er_magic, size)
frame.Show(True)
frame.Centre()
self.Hide()
del wait
|
[
"def",
"on_btn_orientation",
"(",
"self",
",",
"event",
")",
":",
"wait",
"=",
"wx",
".",
"BusyInfo",
"(",
"'Compiling required data, please wait...'",
")",
"wx",
".",
"SafeYield",
"(",
")",
"#dw, dh = wx.DisplaySize()",
"size",
"=",
"wx",
".",
"DisplaySize",
"(",
")",
"size",
"=",
"(",
"size",
"[",
"0",
"]",
"-",
"0.1",
"*",
"size",
"[",
"0",
"]",
",",
"size",
"[",
"1",
"]",
"-",
"0.1",
"*",
"size",
"[",
"1",
"]",
")",
"if",
"self",
".",
"data_model_num",
"==",
"3",
":",
"frame",
"=",
"pmag_gui_dialogs",
".",
"OrientFrameGrid3",
"(",
"self",
",",
"-",
"1",
",",
"'demag_orient.txt'",
",",
"self",
".",
"WD",
",",
"self",
".",
"contribution",
",",
"size",
")",
"else",
":",
"frame",
"=",
"pmag_gui_dialogs",
".",
"OrientFrameGrid",
"(",
"self",
",",
"-",
"1",
",",
"'demag_orient.txt'",
",",
"self",
".",
"WD",
",",
"self",
".",
"er_magic",
",",
"size",
")",
"frame",
".",
"Show",
"(",
"True",
")",
"frame",
".",
"Centre",
"(",
")",
"self",
".",
"Hide",
"(",
")",
"del",
"wait"
] |
Create and fill wxPython grid for entering
orientation data.
|
[
"Create",
"and",
"fill",
"wxPython",
"grid",
"for",
"entering",
"orientation",
"data",
"."
] |
python
|
train
|
pymupdf/PyMuPDF
|
fitz/fitz.py
|
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L3030-L3037
|
def _getTransformation(self):
"""_getTransformation(self) -> PyObject *"""
CheckParent(self)
val = _fitz.Page__getTransformation(self)
val = Matrix(val)
return val
|
[
"def",
"_getTransformation",
"(",
"self",
")",
":",
"CheckParent",
"(",
"self",
")",
"val",
"=",
"_fitz",
".",
"Page__getTransformation",
"(",
"self",
")",
"val",
"=",
"Matrix",
"(",
"val",
")",
"return",
"val"
] |
_getTransformation(self) -> PyObject *
|
[
"_getTransformation",
"(",
"self",
")",
"-",
">",
"PyObject",
"*"
] |
python
|
train
|
PyCQA/astroid
|
astroid/rebuilder.py
|
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/rebuilder.py#L775-L777
|
def visit_pass(self, node, parent):
"""visit a Pass node by returning a fresh instance of it"""
return nodes.Pass(node.lineno, node.col_offset, parent)
|
[
"def",
"visit_pass",
"(",
"self",
",",
"node",
",",
"parent",
")",
":",
"return",
"nodes",
".",
"Pass",
"(",
"node",
".",
"lineno",
",",
"node",
".",
"col_offset",
",",
"parent",
")"
] |
visit a Pass node by returning a fresh instance of it
|
[
"visit",
"a",
"Pass",
"node",
"by",
"returning",
"a",
"fresh",
"instance",
"of",
"it"
] |
python
|
train
|
iotile/coretools
|
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Util.py
|
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Util.py#L1411-L1424
|
def make_path_relative(path):
""" makes an absolute path name to a relative pathname.
"""
if os.path.isabs(path):
drive_s,path = os.path.splitdrive(path)
import re
if not drive_s:
path=re.compile("/*(.*)").findall(path)[0]
else:
path=path[1:]
assert( not os.path.isabs( path ) ), path
return path
|
[
"def",
"make_path_relative",
"(",
"path",
")",
":",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"path",
")",
":",
"drive_s",
",",
"path",
"=",
"os",
".",
"path",
".",
"splitdrive",
"(",
"path",
")",
"import",
"re",
"if",
"not",
"drive_s",
":",
"path",
"=",
"re",
".",
"compile",
"(",
"\"/*(.*)\"",
")",
".",
"findall",
"(",
"path",
")",
"[",
"0",
"]",
"else",
":",
"path",
"=",
"path",
"[",
"1",
":",
"]",
"assert",
"(",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"path",
")",
")",
",",
"path",
"return",
"path"
] |
makes an absolute path name to a relative pathname.
|
[
"makes",
"an",
"absolute",
"path",
"name",
"to",
"a",
"relative",
"pathname",
"."
] |
python
|
train
|
osrg/ryu
|
ryu/services/protocols/bgp/operator/command.py
|
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/operator/command.py#L209-L217
|
def _quick_help(self, nested=False):
""":param nested: True if help is requested directly for this command
and False when help is requested for a list of possible
completions.
"""
if nested:
return self.command_path(), None, self.help_msg
else:
return self.command_path(), self.param_help_msg, self.help_msg
|
[
"def",
"_quick_help",
"(",
"self",
",",
"nested",
"=",
"False",
")",
":",
"if",
"nested",
":",
"return",
"self",
".",
"command_path",
"(",
")",
",",
"None",
",",
"self",
".",
"help_msg",
"else",
":",
"return",
"self",
".",
"command_path",
"(",
")",
",",
"self",
".",
"param_help_msg",
",",
"self",
".",
"help_msg"
] |
:param nested: True if help is requested directly for this command
and False when help is requested for a list of possible
completions.
|
[
":",
"param",
"nested",
":",
"True",
"if",
"help",
"is",
"requested",
"directly",
"for",
"this",
"command",
"and",
"False",
"when",
"help",
"is",
"requested",
"for",
"a",
"list",
"of",
"possible",
"completions",
"."
] |
python
|
train
|
chaddotson/noaa_radar
|
noaa_radar/radar.py
|
https://github.com/chaddotson/noaa_radar/blob/ebb1e8d87d4b35b8942867446deced74b22a47cc/noaa_radar/radar.py#L112-L141
|
def get_composite_reflectivity(self, tower_id, background='#000000', include_legend=True, include_counties=True,
include_warnings=True, include_highways=True, include_cities=True,
include_rivers=True, include_topography=True):
"""
Get the composite reflectivity for a noaa radar site.
:param tower_id: The noaa tower id. Ex Huntsville, Al -> 'HTX'.
:type tower_id: str
:param background: The hex background color.
:type background: str
:param include_legend: True - include legend.
:type include_legend: bool
:param include_counties: True - include county lines.
:type include_counties: bool
:param include_warnings: True - include warning lines.
:type include_warnings: bool
:param include_highways: True - include highways.
:type include_highways: bool
:param include_cities: True - include city labels.
:type include_cities: bool
:param include_rivers: True - include rivers
:type include_rivers: bool
:param include_topography: True - include topography
:type include_topography: bool
:rtype: PIL.Image
:return: A PIL.Image instance with the Radar composite reflectivity.
"""
return self._build_radar_image(tower_id, "NCR", background=background, include_legend=include_legend,
include_counties=include_counties, include_warnings=include_warnings,
include_highways=include_highways, include_cities=include_cities,
include_rivers=include_rivers, include_topography=include_topography)
|
[
"def",
"get_composite_reflectivity",
"(",
"self",
",",
"tower_id",
",",
"background",
"=",
"'#000000'",
",",
"include_legend",
"=",
"True",
",",
"include_counties",
"=",
"True",
",",
"include_warnings",
"=",
"True",
",",
"include_highways",
"=",
"True",
",",
"include_cities",
"=",
"True",
",",
"include_rivers",
"=",
"True",
",",
"include_topography",
"=",
"True",
")",
":",
"return",
"self",
".",
"_build_radar_image",
"(",
"tower_id",
",",
"\"NCR\"",
",",
"background",
"=",
"background",
",",
"include_legend",
"=",
"include_legend",
",",
"include_counties",
"=",
"include_counties",
",",
"include_warnings",
"=",
"include_warnings",
",",
"include_highways",
"=",
"include_highways",
",",
"include_cities",
"=",
"include_cities",
",",
"include_rivers",
"=",
"include_rivers",
",",
"include_topography",
"=",
"include_topography",
")"
] |
Get the composite reflectivity for a noaa radar site.
:param tower_id: The noaa tower id. Ex Huntsville, Al -> 'HTX'.
:type tower_id: str
:param background: The hex background color.
:type background: str
:param include_legend: True - include legend.
:type include_legend: bool
:param include_counties: True - include county lines.
:type include_counties: bool
:param include_warnings: True - include warning lines.
:type include_warnings: bool
:param include_highways: True - include highways.
:type include_highways: bool
:param include_cities: True - include city labels.
:type include_cities: bool
:param include_rivers: True - include rivers
:type include_rivers: bool
:param include_topography: True - include topography
:type include_topography: bool
:rtype: PIL.Image
:return: A PIL.Image instance with the Radar composite reflectivity.
|
[
"Get",
"the",
"composite",
"reflectivity",
"for",
"a",
"noaa",
"radar",
"site",
".",
":",
"param",
"tower_id",
":",
"The",
"noaa",
"tower",
"id",
".",
"Ex",
"Huntsville",
"Al",
"-",
">",
"HTX",
".",
":",
"type",
"tower_id",
":",
"str",
":",
"param",
"background",
":",
"The",
"hex",
"background",
"color",
".",
":",
"type",
"background",
":",
"str",
":",
"param",
"include_legend",
":",
"True",
"-",
"include",
"legend",
".",
":",
"type",
"include_legend",
":",
"bool",
":",
"param",
"include_counties",
":",
"True",
"-",
"include",
"county",
"lines",
".",
":",
"type",
"include_counties",
":",
"bool",
":",
"param",
"include_warnings",
":",
"True",
"-",
"include",
"warning",
"lines",
".",
":",
"type",
"include_warnings",
":",
"bool",
":",
"param",
"include_highways",
":",
"True",
"-",
"include",
"highways",
".",
":",
"type",
"include_highways",
":",
"bool",
":",
"param",
"include_cities",
":",
"True",
"-",
"include",
"city",
"labels",
".",
":",
"type",
"include_cities",
":",
"bool",
":",
"param",
"include_rivers",
":",
"True",
"-",
"include",
"rivers",
":",
"type",
"include_rivers",
":",
"bool",
":",
"param",
"include_topography",
":",
"True",
"-",
"include",
"topography",
":",
"type",
"include_topography",
":",
"bool",
":",
"rtype",
":",
"PIL",
".",
"Image",
":",
"return",
":",
"A",
"PIL",
".",
"Image",
"instance",
"with",
"the",
"Radar",
"composite",
"reflectivity",
"."
] |
python
|
train
|
jwhitlock/drf-cached-instances
|
drf_cached_instances/models.py
|
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/models.py#L46-L55
|
def values_list(self, *args, **kwargs):
"""Return the primary keys as a list.
The only valid call is values_list('pk', flat=True)
"""
flat = kwargs.pop('flat', False)
assert flat is True
assert len(args) == 1
assert args[0] == self.model._meta.pk.name
return self.pks
|
[
"def",
"values_list",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"flat",
"=",
"kwargs",
".",
"pop",
"(",
"'flat'",
",",
"False",
")",
"assert",
"flat",
"is",
"True",
"assert",
"len",
"(",
"args",
")",
"==",
"1",
"assert",
"args",
"[",
"0",
"]",
"==",
"self",
".",
"model",
".",
"_meta",
".",
"pk",
".",
"name",
"return",
"self",
".",
"pks"
] |
Return the primary keys as a list.
The only valid call is values_list('pk', flat=True)
|
[
"Return",
"the",
"primary",
"keys",
"as",
"a",
"list",
"."
] |
python
|
train
|
teitei-tk/Flask-REST-Controller
|
flask_rest_controller/routing.py
|
https://github.com/teitei-tk/Flask-REST-Controller/blob/b4386b523f3d2c6550051c95d5ba74e5ff459946/flask_rest_controller/routing.py#L16-L34
|
def set_routing(app, view_data):
"""
apply the routing configuration you've described
example:
view_data = [
("/", "app.IndexController", "index"),
]
1. "/" is receive request path
2. "app.IndexController" is to process the received request controller class path
3. "index" string To generate a URL that refers to the application
"""
routing_modules = convert_routing_module(view_data)
for module in routing_modules:
view = import_string(module.import_path)
app.add_url_rule(module.url, view_func=view.as_view(module.endpoint))
|
[
"def",
"set_routing",
"(",
"app",
",",
"view_data",
")",
":",
"routing_modules",
"=",
"convert_routing_module",
"(",
"view_data",
")",
"for",
"module",
"in",
"routing_modules",
":",
"view",
"=",
"import_string",
"(",
"module",
".",
"import_path",
")",
"app",
".",
"add_url_rule",
"(",
"module",
".",
"url",
",",
"view_func",
"=",
"view",
".",
"as_view",
"(",
"module",
".",
"endpoint",
")",
")"
] |
apply the routing configuration you've described
example:
view_data = [
("/", "app.IndexController", "index"),
]
1. "/" is receive request path
2. "app.IndexController" is to process the received request controller class path
3. "index" string To generate a URL that refers to the application
|
[
"apply",
"the",
"routing",
"configuration",
"you",
"ve",
"described"
] |
python
|
train
|
basho/riak-python-client
|
riak/transports/http/transport.py
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/http/transport.py#L530-L550
|
def delete_search_index(self, index):
"""
Fetch the specified Solr search index for Yokozuna.
:param index: a name of a yz index
:type index: string
:rtype boolean
"""
if not self.yz_wm_index:
raise NotImplementedError("Search 2.0 administration is not "
"supported for this version")
url = self.search_index_path(index)
# Run the request...
status, _, _ = self._request('DELETE', url)
if status != 204:
raise RiakError('Error setting Search 2.0 index.')
return True
|
[
"def",
"delete_search_index",
"(",
"self",
",",
"index",
")",
":",
"if",
"not",
"self",
".",
"yz_wm_index",
":",
"raise",
"NotImplementedError",
"(",
"\"Search 2.0 administration is not \"",
"\"supported for this version\"",
")",
"url",
"=",
"self",
".",
"search_index_path",
"(",
"index",
")",
"# Run the request...",
"status",
",",
"_",
",",
"_",
"=",
"self",
".",
"_request",
"(",
"'DELETE'",
",",
"url",
")",
"if",
"status",
"!=",
"204",
":",
"raise",
"RiakError",
"(",
"'Error setting Search 2.0 index.'",
")",
"return",
"True"
] |
Fetch the specified Solr search index for Yokozuna.
:param index: a name of a yz index
:type index: string
:rtype boolean
|
[
"Fetch",
"the",
"specified",
"Solr",
"search",
"index",
"for",
"Yokozuna",
"."
] |
python
|
train
|
oasis-open/cti-stix-validator
|
stix2validator/v20/musts.py
|
https://github.com/oasis-open/cti-stix-validator/blob/a607014e3fa500a7678f8b61b278456ca581f9d0/stix2validator/v20/musts.py#L244-L265
|
def artifact_mime_type(instance):
"""Ensure the 'mime_type' property of artifact objects comes from the
Template column in the IANA media type registry.
"""
for key, obj in instance['objects'].items():
if ('type' in obj and obj['type'] == 'artifact' and 'mime_type' in obj):
if enums.media_types():
if obj['mime_type'] not in enums.media_types():
yield JSONError("The 'mime_type' property of object '%s' "
"('%s') must be an IANA registered MIME "
"Type of the form 'type/subtype'."
% (key, obj['mime_type']), instance['id'])
else:
info("Can't reach IANA website; using regex for mime types.")
mime_re = re.compile(r'^(application|audio|font|image|message|model'
'|multipart|text|video)/[a-zA-Z0-9.+_-]+')
if not mime_re.match(obj['mime_type']):
yield JSONError("The 'mime_type' property of object '%s' "
"('%s') should be an IANA MIME Type of the"
" form 'type/subtype'."
% (key, obj['mime_type']), instance['id'])
|
[
"def",
"artifact_mime_type",
"(",
"instance",
")",
":",
"for",
"key",
",",
"obj",
"in",
"instance",
"[",
"'objects'",
"]",
".",
"items",
"(",
")",
":",
"if",
"(",
"'type'",
"in",
"obj",
"and",
"obj",
"[",
"'type'",
"]",
"==",
"'artifact'",
"and",
"'mime_type'",
"in",
"obj",
")",
":",
"if",
"enums",
".",
"media_types",
"(",
")",
":",
"if",
"obj",
"[",
"'mime_type'",
"]",
"not",
"in",
"enums",
".",
"media_types",
"(",
")",
":",
"yield",
"JSONError",
"(",
"\"The 'mime_type' property of object '%s' \"",
"\"('%s') must be an IANA registered MIME \"",
"\"Type of the form 'type/subtype'.\"",
"%",
"(",
"key",
",",
"obj",
"[",
"'mime_type'",
"]",
")",
",",
"instance",
"[",
"'id'",
"]",
")",
"else",
":",
"info",
"(",
"\"Can't reach IANA website; using regex for mime types.\"",
")",
"mime_re",
"=",
"re",
".",
"compile",
"(",
"r'^(application|audio|font|image|message|model'",
"'|multipart|text|video)/[a-zA-Z0-9.+_-]+'",
")",
"if",
"not",
"mime_re",
".",
"match",
"(",
"obj",
"[",
"'mime_type'",
"]",
")",
":",
"yield",
"JSONError",
"(",
"\"The 'mime_type' property of object '%s' \"",
"\"('%s') should be an IANA MIME Type of the\"",
"\" form 'type/subtype'.\"",
"%",
"(",
"key",
",",
"obj",
"[",
"'mime_type'",
"]",
")",
",",
"instance",
"[",
"'id'",
"]",
")"
] |
Ensure the 'mime_type' property of artifact objects comes from the
Template column in the IANA media type registry.
|
[
"Ensure",
"the",
"mime_type",
"property",
"of",
"artifact",
"objects",
"comes",
"from",
"the",
"Template",
"column",
"in",
"the",
"IANA",
"media",
"type",
"registry",
"."
] |
python
|
train
|
QuantEcon/QuantEcon.py
|
quantecon/random/utilities.py
|
https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/random/utilities.py#L173-L212
|
def draw(cdf, size=None):
"""
Generate a random sample according to the cumulative distribution
given by `cdf`. Jit-complied by Numba in nopython mode.
Parameters
----------
cdf : array_like(float, ndim=1)
Array containing the cumulative distribution.
size : scalar(int), optional(default=None)
Size of the sample. If an integer is supplied, an ndarray of
`size` independent draws is returned; otherwise, a single draw
is returned as a scalar.
Returns
-------
scalar(int) or ndarray(int, ndim=1)
Examples
--------
>>> cdf = np.cumsum([0.4, 0.6])
>>> qe.random.draw(cdf)
1
>>> qe.random.draw(cdf, 10)
array([1, 0, 1, 0, 1, 0, 0, 0, 1, 0])
"""
if isinstance(size, types.Integer):
def draw_impl(cdf, size):
rs = np.random.random_sample(size)
out = np.empty(size, dtype=np.int_)
for i in range(size):
out[i] = searchsorted(cdf, rs[i])
return out
else:
def draw_impl(cdf, size):
r = np.random.random_sample()
return searchsorted(cdf, r)
return draw_impl
|
[
"def",
"draw",
"(",
"cdf",
",",
"size",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"size",
",",
"types",
".",
"Integer",
")",
":",
"def",
"draw_impl",
"(",
"cdf",
",",
"size",
")",
":",
"rs",
"=",
"np",
".",
"random",
".",
"random_sample",
"(",
"size",
")",
"out",
"=",
"np",
".",
"empty",
"(",
"size",
",",
"dtype",
"=",
"np",
".",
"int_",
")",
"for",
"i",
"in",
"range",
"(",
"size",
")",
":",
"out",
"[",
"i",
"]",
"=",
"searchsorted",
"(",
"cdf",
",",
"rs",
"[",
"i",
"]",
")",
"return",
"out",
"else",
":",
"def",
"draw_impl",
"(",
"cdf",
",",
"size",
")",
":",
"r",
"=",
"np",
".",
"random",
".",
"random_sample",
"(",
")",
"return",
"searchsorted",
"(",
"cdf",
",",
"r",
")",
"return",
"draw_impl"
] |
Generate a random sample according to the cumulative distribution
given by `cdf`. Jit-complied by Numba in nopython mode.
Parameters
----------
cdf : array_like(float, ndim=1)
Array containing the cumulative distribution.
size : scalar(int), optional(default=None)
Size of the sample. If an integer is supplied, an ndarray of
`size` independent draws is returned; otherwise, a single draw
is returned as a scalar.
Returns
-------
scalar(int) or ndarray(int, ndim=1)
Examples
--------
>>> cdf = np.cumsum([0.4, 0.6])
>>> qe.random.draw(cdf)
1
>>> qe.random.draw(cdf, 10)
array([1, 0, 1, 0, 1, 0, 0, 0, 1, 0])
|
[
"Generate",
"a",
"random",
"sample",
"according",
"to",
"the",
"cumulative",
"distribution",
"given",
"by",
"cdf",
".",
"Jit",
"-",
"complied",
"by",
"Numba",
"in",
"nopython",
"mode",
"."
] |
python
|
train
|
numenta/nupic
|
src/nupic/encoders/coordinate.py
|
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/coordinate.py#L179-L189
|
def _bitForCoordinate(cls, coordinate, n):
"""
Maps the coordinate to a bit in the SDR.
@param coordinate (numpy.array) Coordinate
@param n (int) The number of available bits in the SDR
@return (int) The index to a bit in the SDR
"""
seed = cls._hashCoordinate(coordinate)
rng = Random(seed)
return rng.getUInt32(n)
|
[
"def",
"_bitForCoordinate",
"(",
"cls",
",",
"coordinate",
",",
"n",
")",
":",
"seed",
"=",
"cls",
".",
"_hashCoordinate",
"(",
"coordinate",
")",
"rng",
"=",
"Random",
"(",
"seed",
")",
"return",
"rng",
".",
"getUInt32",
"(",
"n",
")"
] |
Maps the coordinate to a bit in the SDR.
@param coordinate (numpy.array) Coordinate
@param n (int) The number of available bits in the SDR
@return (int) The index to a bit in the SDR
|
[
"Maps",
"the",
"coordinate",
"to",
"a",
"bit",
"in",
"the",
"SDR",
"."
] |
python
|
valid
|
saltstack/salt
|
salt/modules/freebsdpkg.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/freebsdpkg.py#L138-L184
|
def _match(names):
'''
Since pkg_delete requires the full "pkgname-version" string, this function
will attempt to match the package name with its version. Returns a list of
partial matches and package names that match the "pkgname-version" string
required by pkg_delete, and a list of errors encountered.
'''
pkgs = list_pkgs(versions_as_list=True)
errors = []
# Look for full matches
full_pkg_strings = []
out = __salt__['cmd.run_stdout'](['pkg_info'],
output_loglevel='trace',
python_shell=False)
for line in out.splitlines():
try:
full_pkg_strings.append(line.split()[0])
except IndexError:
continue
full_matches = [x for x in names if x in full_pkg_strings]
# Look for pkgname-only matches
matches = []
ambiguous = []
for name in set(names) - set(full_matches):
cver = pkgs.get(name)
if cver is not None:
if len(cver) == 1:
matches.append('{0}-{1}'.format(name, cver[0]))
else:
ambiguous.append(name)
errors.append(
'Ambiguous package \'{0}\'. Full name/version required. '
'Possible matches: {1}'.format(
name,
', '.join(['{0}-{1}'.format(name, x) for x in cver])
)
)
# Find packages that did not match anything
not_matched = \
set(names) - set(matches) - set(full_matches) - set(ambiguous)
for name in not_matched:
errors.append('Package \'{0}\' not found'.format(name))
return matches + full_matches, errors
|
[
"def",
"_match",
"(",
"names",
")",
":",
"pkgs",
"=",
"list_pkgs",
"(",
"versions_as_list",
"=",
"True",
")",
"errors",
"=",
"[",
"]",
"# Look for full matches",
"full_pkg_strings",
"=",
"[",
"]",
"out",
"=",
"__salt__",
"[",
"'cmd.run_stdout'",
"]",
"(",
"[",
"'pkg_info'",
"]",
",",
"output_loglevel",
"=",
"'trace'",
",",
"python_shell",
"=",
"False",
")",
"for",
"line",
"in",
"out",
".",
"splitlines",
"(",
")",
":",
"try",
":",
"full_pkg_strings",
".",
"append",
"(",
"line",
".",
"split",
"(",
")",
"[",
"0",
"]",
")",
"except",
"IndexError",
":",
"continue",
"full_matches",
"=",
"[",
"x",
"for",
"x",
"in",
"names",
"if",
"x",
"in",
"full_pkg_strings",
"]",
"# Look for pkgname-only matches",
"matches",
"=",
"[",
"]",
"ambiguous",
"=",
"[",
"]",
"for",
"name",
"in",
"set",
"(",
"names",
")",
"-",
"set",
"(",
"full_matches",
")",
":",
"cver",
"=",
"pkgs",
".",
"get",
"(",
"name",
")",
"if",
"cver",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"cver",
")",
"==",
"1",
":",
"matches",
".",
"append",
"(",
"'{0}-{1}'",
".",
"format",
"(",
"name",
",",
"cver",
"[",
"0",
"]",
")",
")",
"else",
":",
"ambiguous",
".",
"append",
"(",
"name",
")",
"errors",
".",
"append",
"(",
"'Ambiguous package \\'{0}\\'. Full name/version required. '",
"'Possible matches: {1}'",
".",
"format",
"(",
"name",
",",
"', '",
".",
"join",
"(",
"[",
"'{0}-{1}'",
".",
"format",
"(",
"name",
",",
"x",
")",
"for",
"x",
"in",
"cver",
"]",
")",
")",
")",
"# Find packages that did not match anything",
"not_matched",
"=",
"set",
"(",
"names",
")",
"-",
"set",
"(",
"matches",
")",
"-",
"set",
"(",
"full_matches",
")",
"-",
"set",
"(",
"ambiguous",
")",
"for",
"name",
"in",
"not_matched",
":",
"errors",
".",
"append",
"(",
"'Package \\'{0}\\' not found'",
".",
"format",
"(",
"name",
")",
")",
"return",
"matches",
"+",
"full_matches",
",",
"errors"
] |
Since pkg_delete requires the full "pkgname-version" string, this function
will attempt to match the package name with its version. Returns a list of
partial matches and package names that match the "pkgname-version" string
required by pkg_delete, and a list of errors encountered.
|
[
"Since",
"pkg_delete",
"requires",
"the",
"full",
"pkgname",
"-",
"version",
"string",
"this",
"function",
"will",
"attempt",
"to",
"match",
"the",
"package",
"name",
"with",
"its",
"version",
".",
"Returns",
"a",
"list",
"of",
"partial",
"matches",
"and",
"package",
"names",
"that",
"match",
"the",
"pkgname",
"-",
"version",
"string",
"required",
"by",
"pkg_delete",
"and",
"a",
"list",
"of",
"errors",
"encountered",
"."
] |
python
|
train
|
exa-analytics/exa
|
exa/core/container.py
|
https://github.com/exa-analytics/exa/blob/40fb3c22b531d460dbc51e603de75b856cc28f0d/exa/core/container.py#L433-L444
|
def _rel(self, copy=False):
"""
Get descriptive kwargs of the container (e.g. name, description, meta).
"""
rel = {}
for key, obj in vars(self).items():
if not isinstance(obj, (pd.Series, pd.DataFrame, pd.SparseSeries, pd.SparseDataFrame)) and not key.startswith('_'):
if copy and 'id' not in key:
rel[key] = deepcopy(obj)
else:
rel[key] = obj
return rel
|
[
"def",
"_rel",
"(",
"self",
",",
"copy",
"=",
"False",
")",
":",
"rel",
"=",
"{",
"}",
"for",
"key",
",",
"obj",
"in",
"vars",
"(",
"self",
")",
".",
"items",
"(",
")",
":",
"if",
"not",
"isinstance",
"(",
"obj",
",",
"(",
"pd",
".",
"Series",
",",
"pd",
".",
"DataFrame",
",",
"pd",
".",
"SparseSeries",
",",
"pd",
".",
"SparseDataFrame",
")",
")",
"and",
"not",
"key",
".",
"startswith",
"(",
"'_'",
")",
":",
"if",
"copy",
"and",
"'id'",
"not",
"in",
"key",
":",
"rel",
"[",
"key",
"]",
"=",
"deepcopy",
"(",
"obj",
")",
"else",
":",
"rel",
"[",
"key",
"]",
"=",
"obj",
"return",
"rel"
] |
Get descriptive kwargs of the container (e.g. name, description, meta).
|
[
"Get",
"descriptive",
"kwargs",
"of",
"the",
"container",
"(",
"e",
".",
"g",
".",
"name",
"description",
"meta",
")",
"."
] |
python
|
train
|
inasafe/inasafe
|
safe/utilities/gis.py
|
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/utilities/gis.py#L288-L302
|
def is_polygon_layer(layer):
"""Check if a QGIS layer is vector and its geometries are polygons.
:param layer: A vector layer.
:type layer: QgsVectorLayer, QgsMapLayer
:returns: True if the layer contains polygons, otherwise False.
:rtype: bool
"""
try:
return (layer.type() == QgsMapLayer.VectorLayer) and (
layer.geometryType() == QgsWkbTypes.PolygonGeometry)
except AttributeError:
return False
|
[
"def",
"is_polygon_layer",
"(",
"layer",
")",
":",
"try",
":",
"return",
"(",
"layer",
".",
"type",
"(",
")",
"==",
"QgsMapLayer",
".",
"VectorLayer",
")",
"and",
"(",
"layer",
".",
"geometryType",
"(",
")",
"==",
"QgsWkbTypes",
".",
"PolygonGeometry",
")",
"except",
"AttributeError",
":",
"return",
"False"
] |
Check if a QGIS layer is vector and its geometries are polygons.
:param layer: A vector layer.
:type layer: QgsVectorLayer, QgsMapLayer
:returns: True if the layer contains polygons, otherwise False.
:rtype: bool
|
[
"Check",
"if",
"a",
"QGIS",
"layer",
"is",
"vector",
"and",
"its",
"geometries",
"are",
"polygons",
"."
] |
python
|
train
|
pvlib/pvlib-python
|
pvlib/tools.py
|
https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/tools.py#L262-L356
|
def _array_newton(func, x0, fprime, args, tol, maxiter, fprime2,
converged=False):
"""
A vectorized version of Newton, Halley, and secant methods for arrays. Do
not use this method directly. This method is called from :func:`newton`
when ``np.isscalar(x0)`` is true. For docstring, see :func:`newton`.
"""
try:
p = np.asarray(x0, dtype=float)
except TypeError: # can't convert complex to float
p = np.asarray(x0)
failures = np.ones_like(p, dtype=bool) # at start, nothing converged
nz_der = np.copy(failures)
if fprime is not None:
# Newton-Raphson method
for iteration in range(maxiter):
# first evaluate fval
fval = np.asarray(func(p, *args))
# If all fval are 0, all roots have been found, then terminate
if not fval.any():
failures = fval.astype(bool)
break
fder = np.asarray(fprime(p, *args))
nz_der = (fder != 0)
# stop iterating if all derivatives are zero
if not nz_der.any():
break
# Newton step
dp = fval[nz_der] / fder[nz_der]
if fprime2 is not None:
fder2 = np.asarray(fprime2(p, *args))
dp = dp / (1.0 - 0.5 * dp * fder2[nz_der] / fder[nz_der])
# only update nonzero derivatives
p[nz_der] -= dp
failures[nz_der] = np.abs(dp) >= tol # items not yet converged
# stop iterating if there aren't any failures, not incl zero der
if not failures[nz_der].any():
break
else:
# Secant method
dx = np.finfo(float).eps**0.33
p1 = p * (1 + dx) + np.where(p >= 0, dx, -dx)
q0 = np.asarray(func(p, *args))
q1 = np.asarray(func(p1, *args))
active = np.ones_like(p, dtype=bool)
for iteration in range(maxiter):
nz_der = (q1 != q0)
# stop iterating if all derivatives are zero
if not nz_der.any():
p = (p1 + p) / 2.0
break
# Secant Step
dp = (q1 * (p1 - p))[nz_der] / (q1 - q0)[nz_der]
# only update nonzero derivatives
p[nz_der] = p1[nz_der] - dp
active_zero_der = ~nz_der & active
p[active_zero_der] = (p1 + p)[active_zero_der] / 2.0
active &= nz_der # don't assign zero derivatives again
failures[nz_der] = np.abs(dp) >= tol # not yet converged
# stop iterating if there aren't any failures, not incl zero der
if not failures[nz_der].any():
break
p1, p = p, p1
q0 = q1
q1 = np.asarray(func(p1, *args))
zero_der = ~nz_der & failures # don't include converged with zero-ders
if zero_der.any():
# secant warnings
if fprime is None:
nonzero_dp = (p1 != p)
# non-zero dp, but infinite newton step
zero_der_nz_dp = (zero_der & nonzero_dp)
if zero_der_nz_dp.any():
rms = np.sqrt(
sum((p1[zero_der_nz_dp] - p[zero_der_nz_dp]) ** 2)
)
warnings.warn('RMS of {:g} reached'.format(rms),
RuntimeWarning)
# newton or halley warnings
else:
all_or_some = 'all' if zero_der.all() else 'some'
msg = '{:s} derivatives were zero'.format(all_or_some)
warnings.warn(msg, RuntimeWarning)
elif failures.any():
all_or_some = 'all' if failures.all() else 'some'
msg = '{0:s} failed to converge after {1:d} iterations'.format(
all_or_some, maxiter
)
if failures.all():
raise RuntimeError(msg)
warnings.warn(msg, RuntimeWarning)
if converged:
result = namedtuple('result', ('root', 'converged', 'zero_der'))
p = result(p, ~failures, zero_der)
return p
|
[
"def",
"_array_newton",
"(",
"func",
",",
"x0",
",",
"fprime",
",",
"args",
",",
"tol",
",",
"maxiter",
",",
"fprime2",
",",
"converged",
"=",
"False",
")",
":",
"try",
":",
"p",
"=",
"np",
".",
"asarray",
"(",
"x0",
",",
"dtype",
"=",
"float",
")",
"except",
"TypeError",
":",
"# can't convert complex to float",
"p",
"=",
"np",
".",
"asarray",
"(",
"x0",
")",
"failures",
"=",
"np",
".",
"ones_like",
"(",
"p",
",",
"dtype",
"=",
"bool",
")",
"# at start, nothing converged",
"nz_der",
"=",
"np",
".",
"copy",
"(",
"failures",
")",
"if",
"fprime",
"is",
"not",
"None",
":",
"# Newton-Raphson method",
"for",
"iteration",
"in",
"range",
"(",
"maxiter",
")",
":",
"# first evaluate fval",
"fval",
"=",
"np",
".",
"asarray",
"(",
"func",
"(",
"p",
",",
"*",
"args",
")",
")",
"# If all fval are 0, all roots have been found, then terminate",
"if",
"not",
"fval",
".",
"any",
"(",
")",
":",
"failures",
"=",
"fval",
".",
"astype",
"(",
"bool",
")",
"break",
"fder",
"=",
"np",
".",
"asarray",
"(",
"fprime",
"(",
"p",
",",
"*",
"args",
")",
")",
"nz_der",
"=",
"(",
"fder",
"!=",
"0",
")",
"# stop iterating if all derivatives are zero",
"if",
"not",
"nz_der",
".",
"any",
"(",
")",
":",
"break",
"# Newton step",
"dp",
"=",
"fval",
"[",
"nz_der",
"]",
"/",
"fder",
"[",
"nz_der",
"]",
"if",
"fprime2",
"is",
"not",
"None",
":",
"fder2",
"=",
"np",
".",
"asarray",
"(",
"fprime2",
"(",
"p",
",",
"*",
"args",
")",
")",
"dp",
"=",
"dp",
"/",
"(",
"1.0",
"-",
"0.5",
"*",
"dp",
"*",
"fder2",
"[",
"nz_der",
"]",
"/",
"fder",
"[",
"nz_der",
"]",
")",
"# only update nonzero derivatives",
"p",
"[",
"nz_der",
"]",
"-=",
"dp",
"failures",
"[",
"nz_der",
"]",
"=",
"np",
".",
"abs",
"(",
"dp",
")",
">=",
"tol",
"# items not yet converged",
"# stop iterating if there aren't any failures, not incl zero der",
"if",
"not",
"failures",
"[",
"nz_der",
"]",
".",
"any",
"(",
")",
":",
"break",
"else",
":",
"# Secant method",
"dx",
"=",
"np",
".",
"finfo",
"(",
"float",
")",
".",
"eps",
"**",
"0.33",
"p1",
"=",
"p",
"*",
"(",
"1",
"+",
"dx",
")",
"+",
"np",
".",
"where",
"(",
"p",
">=",
"0",
",",
"dx",
",",
"-",
"dx",
")",
"q0",
"=",
"np",
".",
"asarray",
"(",
"func",
"(",
"p",
",",
"*",
"args",
")",
")",
"q1",
"=",
"np",
".",
"asarray",
"(",
"func",
"(",
"p1",
",",
"*",
"args",
")",
")",
"active",
"=",
"np",
".",
"ones_like",
"(",
"p",
",",
"dtype",
"=",
"bool",
")",
"for",
"iteration",
"in",
"range",
"(",
"maxiter",
")",
":",
"nz_der",
"=",
"(",
"q1",
"!=",
"q0",
")",
"# stop iterating if all derivatives are zero",
"if",
"not",
"nz_der",
".",
"any",
"(",
")",
":",
"p",
"=",
"(",
"p1",
"+",
"p",
")",
"/",
"2.0",
"break",
"# Secant Step",
"dp",
"=",
"(",
"q1",
"*",
"(",
"p1",
"-",
"p",
")",
")",
"[",
"nz_der",
"]",
"/",
"(",
"q1",
"-",
"q0",
")",
"[",
"nz_der",
"]",
"# only update nonzero derivatives",
"p",
"[",
"nz_der",
"]",
"=",
"p1",
"[",
"nz_der",
"]",
"-",
"dp",
"active_zero_der",
"=",
"~",
"nz_der",
"&",
"active",
"p",
"[",
"active_zero_der",
"]",
"=",
"(",
"p1",
"+",
"p",
")",
"[",
"active_zero_der",
"]",
"/",
"2.0",
"active",
"&=",
"nz_der",
"# don't assign zero derivatives again",
"failures",
"[",
"nz_der",
"]",
"=",
"np",
".",
"abs",
"(",
"dp",
")",
">=",
"tol",
"# not yet converged",
"# stop iterating if there aren't any failures, not incl zero der",
"if",
"not",
"failures",
"[",
"nz_der",
"]",
".",
"any",
"(",
")",
":",
"break",
"p1",
",",
"p",
"=",
"p",
",",
"p1",
"q0",
"=",
"q1",
"q1",
"=",
"np",
".",
"asarray",
"(",
"func",
"(",
"p1",
",",
"*",
"args",
")",
")",
"zero_der",
"=",
"~",
"nz_der",
"&",
"failures",
"# don't include converged with zero-ders",
"if",
"zero_der",
".",
"any",
"(",
")",
":",
"# secant warnings",
"if",
"fprime",
"is",
"None",
":",
"nonzero_dp",
"=",
"(",
"p1",
"!=",
"p",
")",
"# non-zero dp, but infinite newton step",
"zero_der_nz_dp",
"=",
"(",
"zero_der",
"&",
"nonzero_dp",
")",
"if",
"zero_der_nz_dp",
".",
"any",
"(",
")",
":",
"rms",
"=",
"np",
".",
"sqrt",
"(",
"sum",
"(",
"(",
"p1",
"[",
"zero_der_nz_dp",
"]",
"-",
"p",
"[",
"zero_der_nz_dp",
"]",
")",
"**",
"2",
")",
")",
"warnings",
".",
"warn",
"(",
"'RMS of {:g} reached'",
".",
"format",
"(",
"rms",
")",
",",
"RuntimeWarning",
")",
"# newton or halley warnings",
"else",
":",
"all_or_some",
"=",
"'all'",
"if",
"zero_der",
".",
"all",
"(",
")",
"else",
"'some'",
"msg",
"=",
"'{:s} derivatives were zero'",
".",
"format",
"(",
"all_or_some",
")",
"warnings",
".",
"warn",
"(",
"msg",
",",
"RuntimeWarning",
")",
"elif",
"failures",
".",
"any",
"(",
")",
":",
"all_or_some",
"=",
"'all'",
"if",
"failures",
".",
"all",
"(",
")",
"else",
"'some'",
"msg",
"=",
"'{0:s} failed to converge after {1:d} iterations'",
".",
"format",
"(",
"all_or_some",
",",
"maxiter",
")",
"if",
"failures",
".",
"all",
"(",
")",
":",
"raise",
"RuntimeError",
"(",
"msg",
")",
"warnings",
".",
"warn",
"(",
"msg",
",",
"RuntimeWarning",
")",
"if",
"converged",
":",
"result",
"=",
"namedtuple",
"(",
"'result'",
",",
"(",
"'root'",
",",
"'converged'",
",",
"'zero_der'",
")",
")",
"p",
"=",
"result",
"(",
"p",
",",
"~",
"failures",
",",
"zero_der",
")",
"return",
"p"
] |
A vectorized version of Newton, Halley, and secant methods for arrays. Do
not use this method directly. This method is called from :func:`newton`
when ``np.isscalar(x0)`` is true. For docstring, see :func:`newton`.
|
[
"A",
"vectorized",
"version",
"of",
"Newton",
"Halley",
"and",
"secant",
"methods",
"for",
"arrays",
".",
"Do",
"not",
"use",
"this",
"method",
"directly",
".",
"This",
"method",
"is",
"called",
"from",
":",
"func",
":",
"newton",
"when",
"np",
".",
"isscalar",
"(",
"x0",
")",
"is",
"true",
".",
"For",
"docstring",
"see",
":",
"func",
":",
"newton",
"."
] |
python
|
train
|
chrisspen/dtree
|
dtree.py
|
https://github.com/chrisspen/dtree/blob/9e9c9992b22ad9a7e296af7e6837666b05db43ef/dtree.py#L1331-L1342
|
def out_of_bag_mae(self):
"""
Returns the mean absolute error for predictions on the out-of-bag
samples.
"""
if not self._out_of_bag_mae_clean:
try:
self._out_of_bag_mae = self.test(self.out_of_bag_samples)
self._out_of_bag_mae_clean = True
except NodeNotReadyToPredict:
return
return self._out_of_bag_mae.copy()
|
[
"def",
"out_of_bag_mae",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_out_of_bag_mae_clean",
":",
"try",
":",
"self",
".",
"_out_of_bag_mae",
"=",
"self",
".",
"test",
"(",
"self",
".",
"out_of_bag_samples",
")",
"self",
".",
"_out_of_bag_mae_clean",
"=",
"True",
"except",
"NodeNotReadyToPredict",
":",
"return",
"return",
"self",
".",
"_out_of_bag_mae",
".",
"copy",
"(",
")"
] |
Returns the mean absolute error for predictions on the out-of-bag
samples.
|
[
"Returns",
"the",
"mean",
"absolute",
"error",
"for",
"predictions",
"on",
"the",
"out",
"-",
"of",
"-",
"bag",
"samples",
"."
] |
python
|
train
|
UncleRus/regnupg
|
regnupg.py
|
https://github.com/UncleRus/regnupg/blob/c1acb5d459107c70e45967ec554831a5f2cd1aaf/regnupg.py#L845-L860
|
def key_exists(self, key, secret=False):
'''
Check is given key exists.
:param key: Key ID
:param secret: Check secret key
:rtype: bool
'''
if len(key) < 8:
return False
key = key.upper()
res = self.list_keys(secret)
for fingerprint in res.keys:
if fingerprint.endswith(key):
return True
return False
|
[
"def",
"key_exists",
"(",
"self",
",",
"key",
",",
"secret",
"=",
"False",
")",
":",
"if",
"len",
"(",
"key",
")",
"<",
"8",
":",
"return",
"False",
"key",
"=",
"key",
".",
"upper",
"(",
")",
"res",
"=",
"self",
".",
"list_keys",
"(",
"secret",
")",
"for",
"fingerprint",
"in",
"res",
".",
"keys",
":",
"if",
"fingerprint",
".",
"endswith",
"(",
"key",
")",
":",
"return",
"True",
"return",
"False"
] |
Check is given key exists.
:param key: Key ID
:param secret: Check secret key
:rtype: bool
|
[
"Check",
"is",
"given",
"key",
"exists",
"."
] |
python
|
train
|
litl/backoff
|
backoff/_wait_gen.py
|
https://github.com/litl/backoff/blob/229d30adce4128f093550a1761c49594c78df4b4/backoff/_wait_gen.py#L6-L23
|
def expo(base=2, factor=1, max_value=None):
"""Generator for exponential decay.
Args:
base: The mathematical base of the exponentiation operation
factor: Factor to multiply the exponentation by.
max_value: The maximum value to yield. Once the value in the
true exponential sequence exceeds this, the value
of max_value will forever after be yielded.
"""
n = 0
while True:
a = factor * base ** n
if max_value is None or a < max_value:
yield a
n += 1
else:
yield max_value
|
[
"def",
"expo",
"(",
"base",
"=",
"2",
",",
"factor",
"=",
"1",
",",
"max_value",
"=",
"None",
")",
":",
"n",
"=",
"0",
"while",
"True",
":",
"a",
"=",
"factor",
"*",
"base",
"**",
"n",
"if",
"max_value",
"is",
"None",
"or",
"a",
"<",
"max_value",
":",
"yield",
"a",
"n",
"+=",
"1",
"else",
":",
"yield",
"max_value"
] |
Generator for exponential decay.
Args:
base: The mathematical base of the exponentiation operation
factor: Factor to multiply the exponentation by.
max_value: The maximum value to yield. Once the value in the
true exponential sequence exceeds this, the value
of max_value will forever after be yielded.
|
[
"Generator",
"for",
"exponential",
"decay",
"."
] |
python
|
train
|
Kozea/cairocffi
|
cairocffi/fonts.py
|
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/fonts.py#L40-L58
|
def _from_pointer(pointer, incref):
"""Wrap an existing :c:type:`cairo_font_face_t *` cdata pointer.
:type incref: bool
:param incref:
Whether increase the :ref:`reference count <refcounting>` now.
:return:
A new instance of :class:`FontFace` or one of its sub-classes,
depending on the face’s type.
"""
if pointer == ffi.NULL:
raise ValueError('Null pointer')
if incref:
cairo.cairo_font_face_reference(pointer)
self = object.__new__(FONT_TYPE_TO_CLASS.get(
cairo.cairo_font_face_get_type(pointer), FontFace))
FontFace.__init__(self, pointer) # Skip the subclass’s __init__
return self
|
[
"def",
"_from_pointer",
"(",
"pointer",
",",
"incref",
")",
":",
"if",
"pointer",
"==",
"ffi",
".",
"NULL",
":",
"raise",
"ValueError",
"(",
"'Null pointer'",
")",
"if",
"incref",
":",
"cairo",
".",
"cairo_font_face_reference",
"(",
"pointer",
")",
"self",
"=",
"object",
".",
"__new__",
"(",
"FONT_TYPE_TO_CLASS",
".",
"get",
"(",
"cairo",
".",
"cairo_font_face_get_type",
"(",
"pointer",
")",
",",
"FontFace",
")",
")",
"FontFace",
".",
"__init__",
"(",
"self",
",",
"pointer",
")",
"# Skip the subclass’s __init__",
"return",
"self"
] |
Wrap an existing :c:type:`cairo_font_face_t *` cdata pointer.
:type incref: bool
:param incref:
Whether increase the :ref:`reference count <refcounting>` now.
:return:
A new instance of :class:`FontFace` or one of its sub-classes,
depending on the face’s type.
|
[
"Wrap",
"an",
"existing",
":",
"c",
":",
"type",
":",
"cairo_font_face_t",
"*",
"cdata",
"pointer",
"."
] |
python
|
train
|
nabetama/slacky
|
slacky/rest/rest.py
|
https://github.com/nabetama/slacky/blob/dde62ce49af9b8f581729c36d2ac790310b570e4/slacky/rest/rest.py#L163-L171
|
def mark(self, channel_name, ts):
""" https://api.slack.com/methods/channels.mark
"""
channel_id = self.get_channel_id(channel_name)
self.params.update({
'channel': channel_id,
'ts': ts,
})
return FromUrl('https://slack.com/api/channels.mark', self._requests)(data=self.params).post()
|
[
"def",
"mark",
"(",
"self",
",",
"channel_name",
",",
"ts",
")",
":",
"channel_id",
"=",
"self",
".",
"get_channel_id",
"(",
"channel_name",
")",
"self",
".",
"params",
".",
"update",
"(",
"{",
"'channel'",
":",
"channel_id",
",",
"'ts'",
":",
"ts",
",",
"}",
")",
"return",
"FromUrl",
"(",
"'https://slack.com/api/channels.mark'",
",",
"self",
".",
"_requests",
")",
"(",
"data",
"=",
"self",
".",
"params",
")",
".",
"post",
"(",
")"
] |
https://api.slack.com/methods/channels.mark
|
[
"https",
":",
"//",
"api",
".",
"slack",
".",
"com",
"/",
"methods",
"/",
"channels",
".",
"mark"
] |
python
|
train
|
ninuxorg/nodeshot
|
nodeshot/interop/sync/models/node_external.py
|
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/interop/sync/models/node_external.py#L48-L60
|
def delete_external_nodes(sender, **kwargs):
""" sync by deleting nodes from external layers when needed """
node = kwargs['instance']
if node.layer.is_external is False or not hasattr(node.layer, 'external') or node.layer.external.synchronizer_path is None:
return False
if hasattr(node, 'external') and node.external.external_id:
push_changes_to_external_layers.delay(
node=node.external.external_id,
external_layer=node.layer.external,
operation='delete'
)
|
[
"def",
"delete_external_nodes",
"(",
"sender",
",",
"*",
"*",
"kwargs",
")",
":",
"node",
"=",
"kwargs",
"[",
"'instance'",
"]",
"if",
"node",
".",
"layer",
".",
"is_external",
"is",
"False",
"or",
"not",
"hasattr",
"(",
"node",
".",
"layer",
",",
"'external'",
")",
"or",
"node",
".",
"layer",
".",
"external",
".",
"synchronizer_path",
"is",
"None",
":",
"return",
"False",
"if",
"hasattr",
"(",
"node",
",",
"'external'",
")",
"and",
"node",
".",
"external",
".",
"external_id",
":",
"push_changes_to_external_layers",
".",
"delay",
"(",
"node",
"=",
"node",
".",
"external",
".",
"external_id",
",",
"external_layer",
"=",
"node",
".",
"layer",
".",
"external",
",",
"operation",
"=",
"'delete'",
")"
] |
sync by deleting nodes from external layers when needed
|
[
"sync",
"by",
"deleting",
"nodes",
"from",
"external",
"layers",
"when",
"needed"
] |
python
|
train
|
aliyun/aliyun-odps-python-sdk
|
odps/ml/metrics/classification.py
|
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/ml/metrics/classification.py#L533-L569
|
def lift_chart(df, col_true=None, col_pred=None, col_scores=None, pos_label=1):
r"""
Compute life value, true positive rate (TPR) and threshold from predicted DataFrame.
Note that this method will trigger the defined flow to execute.
:param df: predicted data frame
:type df: DataFrame
:param pos_label: positive label
:type pos_label: str
:param col_true: true column
:type col_true: str
:param col_pred: predicted column, 'prediction_result' if absent.
:type col_pred: str
:param col_scores: score column, 'prediction_score' if absent.
:type col_scores: str
:return: lift value, true positive rate and threshold, in numpy array format.
:Example:
>>> import matplotlib.pyplot as plt
>>> depth, lift, thresh = lift_chart(predicted)
>>> plt.plot(depth, lift)
"""
if not col_pred:
col_pred = get_field_name_by_role(df, FieldRole.PREDICTED_CLASS)
if not col_scores:
col_scores = get_field_name_by_role(df, FieldRole.PREDICTED_SCORE)
thresh, tp, fn, tn, fp = _run_roc_node(df, pos_label, col_true, col_pred, col_scores)
depth = (tp + fp) * 1.0 / (tp + fp + tn + fn)
tpr = tp * 1.0 / (tp + fn)
lift = tpr / depth
lift_result = namedtuple('LiftResult', 'depth lift thresh')
return lift_result(depth=depth, lift=lift, thresh=thresh)
|
[
"def",
"lift_chart",
"(",
"df",
",",
"col_true",
"=",
"None",
",",
"col_pred",
"=",
"None",
",",
"col_scores",
"=",
"None",
",",
"pos_label",
"=",
"1",
")",
":",
"if",
"not",
"col_pred",
":",
"col_pred",
"=",
"get_field_name_by_role",
"(",
"df",
",",
"FieldRole",
".",
"PREDICTED_CLASS",
")",
"if",
"not",
"col_scores",
":",
"col_scores",
"=",
"get_field_name_by_role",
"(",
"df",
",",
"FieldRole",
".",
"PREDICTED_SCORE",
")",
"thresh",
",",
"tp",
",",
"fn",
",",
"tn",
",",
"fp",
"=",
"_run_roc_node",
"(",
"df",
",",
"pos_label",
",",
"col_true",
",",
"col_pred",
",",
"col_scores",
")",
"depth",
"=",
"(",
"tp",
"+",
"fp",
")",
"*",
"1.0",
"/",
"(",
"tp",
"+",
"fp",
"+",
"tn",
"+",
"fn",
")",
"tpr",
"=",
"tp",
"*",
"1.0",
"/",
"(",
"tp",
"+",
"fn",
")",
"lift",
"=",
"tpr",
"/",
"depth",
"lift_result",
"=",
"namedtuple",
"(",
"'LiftResult'",
",",
"'depth lift thresh'",
")",
"return",
"lift_result",
"(",
"depth",
"=",
"depth",
",",
"lift",
"=",
"lift",
",",
"thresh",
"=",
"thresh",
")"
] |
r"""
Compute life value, true positive rate (TPR) and threshold from predicted DataFrame.
Note that this method will trigger the defined flow to execute.
:param df: predicted data frame
:type df: DataFrame
:param pos_label: positive label
:type pos_label: str
:param col_true: true column
:type col_true: str
:param col_pred: predicted column, 'prediction_result' if absent.
:type col_pred: str
:param col_scores: score column, 'prediction_score' if absent.
:type col_scores: str
:return: lift value, true positive rate and threshold, in numpy array format.
:Example:
>>> import matplotlib.pyplot as plt
>>> depth, lift, thresh = lift_chart(predicted)
>>> plt.plot(depth, lift)
|
[
"r",
"Compute",
"life",
"value",
"true",
"positive",
"rate",
"(",
"TPR",
")",
"and",
"threshold",
"from",
"predicted",
"DataFrame",
"."
] |
python
|
train
|
cltk/cltk
|
cltk/tokenize/sentence.py
|
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/tokenize/sentence.py#L96-L105
|
def tokenize(self, text: str, model: object = None):
"""
Method for tokenizing sentences with regular expressions.
:rtype: list
:param text: text to be tokenized into sentences
:type text: str
"""
sentences = re.split(self.pattern, text)
return sentences
|
[
"def",
"tokenize",
"(",
"self",
",",
"text",
":",
"str",
",",
"model",
":",
"object",
"=",
"None",
")",
":",
"sentences",
"=",
"re",
".",
"split",
"(",
"self",
".",
"pattern",
",",
"text",
")",
"return",
"sentences"
] |
Method for tokenizing sentences with regular expressions.
:rtype: list
:param text: text to be tokenized into sentences
:type text: str
|
[
"Method",
"for",
"tokenizing",
"sentences",
"with",
"regular",
"expressions",
"."
] |
python
|
train
|
manns/pyspread
|
pyspread/src/lib/vlc.py
|
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/vlc.py#L4702-L4712
|
def libvlc_media_list_item_at_index(p_ml, i_pos):
'''List media instance in media list at a position
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
@param i_pos: position in array where to insert.
@return: media instance at position i_pos, or NULL if not found. In case of success, L{libvlc_media_retain}() is called to increase the refcount on the media.
'''
f = _Cfunctions.get('libvlc_media_list_item_at_index', None) or \
_Cfunction('libvlc_media_list_item_at_index', ((1,), (1,),), class_result(Media),
ctypes.c_void_p, MediaList, ctypes.c_int)
return f(p_ml, i_pos)
|
[
"def",
"libvlc_media_list_item_at_index",
"(",
"p_ml",
",",
"i_pos",
")",
":",
"f",
"=",
"_Cfunctions",
".",
"get",
"(",
"'libvlc_media_list_item_at_index'",
",",
"None",
")",
"or",
"_Cfunction",
"(",
"'libvlc_media_list_item_at_index'",
",",
"(",
"(",
"1",
",",
")",
",",
"(",
"1",
",",
")",
",",
")",
",",
"class_result",
"(",
"Media",
")",
",",
"ctypes",
".",
"c_void_p",
",",
"MediaList",
",",
"ctypes",
".",
"c_int",
")",
"return",
"f",
"(",
"p_ml",
",",
"i_pos",
")"
] |
List media instance in media list at a position
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
@param i_pos: position in array where to insert.
@return: media instance at position i_pos, or NULL if not found. In case of success, L{libvlc_media_retain}() is called to increase the refcount on the media.
|
[
"List",
"media",
"instance",
"in",
"media",
"list",
"at",
"a",
"position",
"The",
"L",
"{",
"libvlc_media_list_lock",
"}",
"should",
"be",
"held",
"upon",
"entering",
"this",
"function",
"."
] |
python
|
train
|
phdata/sdc-api-tool
|
sdctool/api.py
|
https://github.com/phdata/sdc-api-tool/blob/8c86cfa89773ad411226264293d5b574194045de/sdctool/api.py#L86-L103
|
def pipeline_status(url, pipeline_id, auth, verify_ssl):
"""Retrieve the current status for a pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
status_result = requests.get(url + '/' + pipeline_id + '/status', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
status_result.raise_for_status()
logging.debug('Status request: ' + url + '/status')
logging.debug(status_result.json())
return status_result.json()
|
[
"def",
"pipeline_status",
"(",
"url",
",",
"pipeline_id",
",",
"auth",
",",
"verify_ssl",
")",
":",
"status_result",
"=",
"requests",
".",
"get",
"(",
"url",
"+",
"'/'",
"+",
"pipeline_id",
"+",
"'/status'",
",",
"headers",
"=",
"X_REQ_BY",
",",
"auth",
"=",
"auth",
",",
"verify",
"=",
"verify_ssl",
")",
"status_result",
".",
"raise_for_status",
"(",
")",
"logging",
".",
"debug",
"(",
"'Status request: '",
"+",
"url",
"+",
"'/status'",
")",
"logging",
".",
"debug",
"(",
"status_result",
".",
"json",
"(",
")",
")",
"return",
"status_result",
".",
"json",
"(",
")"
] |
Retrieve the current status for a pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
|
[
"Retrieve",
"the",
"current",
"status",
"for",
"a",
"pipeline",
"."
] |
python
|
train
|
uber/rides-python-sdk
|
uber_rides/auth.py
|
https://github.com/uber/rides-python-sdk/blob/76ecd75ab5235d792ec1010e36eca679ba285127/uber_rides/auth.py#L194-L202
|
def _generate_state_token(self, length=32):
"""Generate CSRF State Token.
CSRF State Tokens are passed as a parameter in the authorization
URL and are checked when receiving responses from the Uber Auth
server to prevent request forgery.
"""
choices = ascii_letters + digits
return ''.join(SystemRandom().choice(choices) for _ in range(length))
|
[
"def",
"_generate_state_token",
"(",
"self",
",",
"length",
"=",
"32",
")",
":",
"choices",
"=",
"ascii_letters",
"+",
"digits",
"return",
"''",
".",
"join",
"(",
"SystemRandom",
"(",
")",
".",
"choice",
"(",
"choices",
")",
"for",
"_",
"in",
"range",
"(",
"length",
")",
")"
] |
Generate CSRF State Token.
CSRF State Tokens are passed as a parameter in the authorization
URL and are checked when receiving responses from the Uber Auth
server to prevent request forgery.
|
[
"Generate",
"CSRF",
"State",
"Token",
"."
] |
python
|
train
|
cuihantao/andes
|
andes/routines/pflow.py
|
https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/routines/pflow.py#L290-L323
|
def post(self):
"""
Post processing for solved systems.
Store load, generation data on buses.
Store reactive power generation on PVs and slack generators.
Calculate series flows and area flows.
Returns
-------
None
"""
if not self.solved:
return
system = self.system
exec(system.call.pfload)
system.Bus.Pl = system.dae.g[system.Bus.a]
system.Bus.Ql = system.dae.g[system.Bus.v]
exec(system.call.pfgen)
system.Bus.Pg = system.dae.g[system.Bus.a]
system.Bus.Qg = system.dae.g[system.Bus.v]
if system.PV.n:
system.PV.qg = system.dae.y[system.PV.q]
if system.SW.n:
system.SW.pg = system.dae.y[system.SW.p]
system.SW.qg = system.dae.y[system.SW.q]
exec(system.call.seriesflow)
system.Area.seriesflow(system.dae)
|
[
"def",
"post",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"solved",
":",
"return",
"system",
"=",
"self",
".",
"system",
"exec",
"(",
"system",
".",
"call",
".",
"pfload",
")",
"system",
".",
"Bus",
".",
"Pl",
"=",
"system",
".",
"dae",
".",
"g",
"[",
"system",
".",
"Bus",
".",
"a",
"]",
"system",
".",
"Bus",
".",
"Ql",
"=",
"system",
".",
"dae",
".",
"g",
"[",
"system",
".",
"Bus",
".",
"v",
"]",
"exec",
"(",
"system",
".",
"call",
".",
"pfgen",
")",
"system",
".",
"Bus",
".",
"Pg",
"=",
"system",
".",
"dae",
".",
"g",
"[",
"system",
".",
"Bus",
".",
"a",
"]",
"system",
".",
"Bus",
".",
"Qg",
"=",
"system",
".",
"dae",
".",
"g",
"[",
"system",
".",
"Bus",
".",
"v",
"]",
"if",
"system",
".",
"PV",
".",
"n",
":",
"system",
".",
"PV",
".",
"qg",
"=",
"system",
".",
"dae",
".",
"y",
"[",
"system",
".",
"PV",
".",
"q",
"]",
"if",
"system",
".",
"SW",
".",
"n",
":",
"system",
".",
"SW",
".",
"pg",
"=",
"system",
".",
"dae",
".",
"y",
"[",
"system",
".",
"SW",
".",
"p",
"]",
"system",
".",
"SW",
".",
"qg",
"=",
"system",
".",
"dae",
".",
"y",
"[",
"system",
".",
"SW",
".",
"q",
"]",
"exec",
"(",
"system",
".",
"call",
".",
"seriesflow",
")",
"system",
".",
"Area",
".",
"seriesflow",
"(",
"system",
".",
"dae",
")"
] |
Post processing for solved systems.
Store load, generation data on buses.
Store reactive power generation on PVs and slack generators.
Calculate series flows and area flows.
Returns
-------
None
|
[
"Post",
"processing",
"for",
"solved",
"systems",
"."
] |
python
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/wikisum/wikisum.py
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wikisum/wikisum.py#L348-L379
|
def rank_reference_paragraphs(wiki_title, references_content, normalize=True):
"""Rank and return reference paragraphs by tf-idf score on title tokens."""
normalized_title = _normalize_text(wiki_title)
title_tokens = _tokens_to_score(
set(tokenizer.encode(text_encoder.native_to_unicode(normalized_title))))
ref_paragraph_info = []
doc_counts = collections.defaultdict(int)
for ref in references_content:
for paragraph in ref.split("\n"):
normalized_paragraph = _normalize_text(paragraph)
if cc_utils.filter_paragraph(normalized_paragraph):
# Skip paragraph
continue
counts = _token_counts(normalized_paragraph, title_tokens)
for token in title_tokens:
if counts[token]:
doc_counts[token] += 1
content = normalized_paragraph if normalize else paragraph
info = {"content": content, "counts": counts}
ref_paragraph_info.append(info)
for info in ref_paragraph_info:
score = 0.
for token in title_tokens:
term_frequency = info["counts"][token]
inv_doc_frequency = (
float(len(ref_paragraph_info)) / max(doc_counts[token], 1))
score += term_frequency * math.log(inv_doc_frequency)
info["score"] = score
ref_paragraph_info.sort(key=lambda el: el["score"], reverse=True)
return [info["content"] for info in ref_paragraph_info]
|
[
"def",
"rank_reference_paragraphs",
"(",
"wiki_title",
",",
"references_content",
",",
"normalize",
"=",
"True",
")",
":",
"normalized_title",
"=",
"_normalize_text",
"(",
"wiki_title",
")",
"title_tokens",
"=",
"_tokens_to_score",
"(",
"set",
"(",
"tokenizer",
".",
"encode",
"(",
"text_encoder",
".",
"native_to_unicode",
"(",
"normalized_title",
")",
")",
")",
")",
"ref_paragraph_info",
"=",
"[",
"]",
"doc_counts",
"=",
"collections",
".",
"defaultdict",
"(",
"int",
")",
"for",
"ref",
"in",
"references_content",
":",
"for",
"paragraph",
"in",
"ref",
".",
"split",
"(",
"\"\\n\"",
")",
":",
"normalized_paragraph",
"=",
"_normalize_text",
"(",
"paragraph",
")",
"if",
"cc_utils",
".",
"filter_paragraph",
"(",
"normalized_paragraph",
")",
":",
"# Skip paragraph",
"continue",
"counts",
"=",
"_token_counts",
"(",
"normalized_paragraph",
",",
"title_tokens",
")",
"for",
"token",
"in",
"title_tokens",
":",
"if",
"counts",
"[",
"token",
"]",
":",
"doc_counts",
"[",
"token",
"]",
"+=",
"1",
"content",
"=",
"normalized_paragraph",
"if",
"normalize",
"else",
"paragraph",
"info",
"=",
"{",
"\"content\"",
":",
"content",
",",
"\"counts\"",
":",
"counts",
"}",
"ref_paragraph_info",
".",
"append",
"(",
"info",
")",
"for",
"info",
"in",
"ref_paragraph_info",
":",
"score",
"=",
"0.",
"for",
"token",
"in",
"title_tokens",
":",
"term_frequency",
"=",
"info",
"[",
"\"counts\"",
"]",
"[",
"token",
"]",
"inv_doc_frequency",
"=",
"(",
"float",
"(",
"len",
"(",
"ref_paragraph_info",
")",
")",
"/",
"max",
"(",
"doc_counts",
"[",
"token",
"]",
",",
"1",
")",
")",
"score",
"+=",
"term_frequency",
"*",
"math",
".",
"log",
"(",
"inv_doc_frequency",
")",
"info",
"[",
"\"score\"",
"]",
"=",
"score",
"ref_paragraph_info",
".",
"sort",
"(",
"key",
"=",
"lambda",
"el",
":",
"el",
"[",
"\"score\"",
"]",
",",
"reverse",
"=",
"True",
")",
"return",
"[",
"info",
"[",
"\"content\"",
"]",
"for",
"info",
"in",
"ref_paragraph_info",
"]"
] |
Rank and return reference paragraphs by tf-idf score on title tokens.
|
[
"Rank",
"and",
"return",
"reference",
"paragraphs",
"by",
"tf",
"-",
"idf",
"score",
"on",
"title",
"tokens",
"."
] |
python
|
train
|
pypyr/pypyr-cli
|
pypyr/pipelinerunner.py
|
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/pipelinerunner.py#L17-L65
|
def get_parsed_context(pipeline, context_in_string):
"""Execute get_parsed_context handler if specified.
Dynamically load the module specified by the context_parser key in pipeline
dict and execute the get_parsed_context function on that module.
Args:
pipeline: dict. Pipeline object.
context_in_string: string. Argument string used to initialize context.
Returns:
pypyr.context.Context() instance.
Raises:
AttributeError: parser specified on pipeline missing get_parsed_context
function.
"""
logger.debug("starting")
if 'context_parser' in pipeline:
parser_module_name = pipeline['context_parser']
logger.debug(f"context parser found: {parser_module_name}")
parser_module = pypyr.moduleloader.get_module(parser_module_name)
try:
logger.debug(f"running parser {parser_module_name}")
result_context = parser_module.get_parsed_context(
context_in_string)
logger.debug(f"step {parser_module_name} done")
# Downstream steps likely to expect context not to be None, hence
# empty rather than None.
if result_context is None:
logger.debug(f"{parser_module_name} returned None. Using "
"empty context instead")
return pypyr.context.Context()
else:
return pypyr.context.Context(result_context)
except AttributeError:
logger.error(f"The parser {parser_module_name} doesn't have a "
"get_parsed_context(context) function.")
raise
else:
logger.debug("pipeline does not have custom context parser. Using "
"empty context.")
logger.debug("done")
# initialize to an empty dictionary because you want to be able to run
# with no context.
return pypyr.context.Context()
|
[
"def",
"get_parsed_context",
"(",
"pipeline",
",",
"context_in_string",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"if",
"'context_parser'",
"in",
"pipeline",
":",
"parser_module_name",
"=",
"pipeline",
"[",
"'context_parser'",
"]",
"logger",
".",
"debug",
"(",
"f\"context parser found: {parser_module_name}\"",
")",
"parser_module",
"=",
"pypyr",
".",
"moduleloader",
".",
"get_module",
"(",
"parser_module_name",
")",
"try",
":",
"logger",
".",
"debug",
"(",
"f\"running parser {parser_module_name}\"",
")",
"result_context",
"=",
"parser_module",
".",
"get_parsed_context",
"(",
"context_in_string",
")",
"logger",
".",
"debug",
"(",
"f\"step {parser_module_name} done\"",
")",
"# Downstream steps likely to expect context not to be None, hence",
"# empty rather than None.",
"if",
"result_context",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"f\"{parser_module_name} returned None. Using \"",
"\"empty context instead\"",
")",
"return",
"pypyr",
".",
"context",
".",
"Context",
"(",
")",
"else",
":",
"return",
"pypyr",
".",
"context",
".",
"Context",
"(",
"result_context",
")",
"except",
"AttributeError",
":",
"logger",
".",
"error",
"(",
"f\"The parser {parser_module_name} doesn't have a \"",
"\"get_parsed_context(context) function.\"",
")",
"raise",
"else",
":",
"logger",
".",
"debug",
"(",
"\"pipeline does not have custom context parser. Using \"",
"\"empty context.\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")",
"# initialize to an empty dictionary because you want to be able to run",
"# with no context.",
"return",
"pypyr",
".",
"context",
".",
"Context",
"(",
")"
] |
Execute get_parsed_context handler if specified.
Dynamically load the module specified by the context_parser key in pipeline
dict and execute the get_parsed_context function on that module.
Args:
pipeline: dict. Pipeline object.
context_in_string: string. Argument string used to initialize context.
Returns:
pypyr.context.Context() instance.
Raises:
AttributeError: parser specified on pipeline missing get_parsed_context
function.
|
[
"Execute",
"get_parsed_context",
"handler",
"if",
"specified",
"."
] |
python
|
train
|
ihmeuw/vivarium
|
src/vivarium/config_tree.py
|
https://github.com/ihmeuw/vivarium/blob/c5f5d50f775c8bf337d3aae1ff7c57c025a8e258/src/vivarium/config_tree.py#L397-L414
|
def _load(self, f, layer=None, source=None):
"""Load data from a yaml formatted file.
Parameters
----------
f : str or file like object
If f is a string then it is interpreted as a path to the file to load
If it is a file like object then data is read directly from it.
layer : str
layer to load data into. If none is supplied the outermost one is used
source : str
Source to attribute the values to
"""
if hasattr(f, 'read'):
self._loads(f.read(), layer=layer, source=source)
else:
with open(f) as f:
self._loads(f.read(), layer=layer, source=source)
|
[
"def",
"_load",
"(",
"self",
",",
"f",
",",
"layer",
"=",
"None",
",",
"source",
"=",
"None",
")",
":",
"if",
"hasattr",
"(",
"f",
",",
"'read'",
")",
":",
"self",
".",
"_loads",
"(",
"f",
".",
"read",
"(",
")",
",",
"layer",
"=",
"layer",
",",
"source",
"=",
"source",
")",
"else",
":",
"with",
"open",
"(",
"f",
")",
"as",
"f",
":",
"self",
".",
"_loads",
"(",
"f",
".",
"read",
"(",
")",
",",
"layer",
"=",
"layer",
",",
"source",
"=",
"source",
")"
] |
Load data from a yaml formatted file.
Parameters
----------
f : str or file like object
If f is a string then it is interpreted as a path to the file to load
If it is a file like object then data is read directly from it.
layer : str
layer to load data into. If none is supplied the outermost one is used
source : str
Source to attribute the values to
|
[
"Load",
"data",
"from",
"a",
"yaml",
"formatted",
"file",
"."
] |
python
|
train
|
trailofbits/manticore
|
manticore/core/executor.py
|
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/core/executor.py#L347-L405
|
def fork(self, state, expression, policy='ALL', setstate=None):
"""
Fork state on expression concretizations.
Using policy build a list of solutions for expression.
For the state on each solution setting the new state with setstate
For example if expression is a Bool it may have 2 solutions. True or False.
Parent
(expression = ??)
Child1 Child2
(expression = True) (expression = True)
setstate(True) setstate(False)
The optional setstate() function is supposed to set the concrete value
in the child state.
"""
assert isinstance(expression, Expression)
if setstate is None:
setstate = lambda x, y: None
# Find a set of solutions for expression
solutions = state.concretize(expression, policy)
if not solutions:
raise ExecutorError("Forking on unfeasible constraint set")
if len(solutions) == 1:
setstate(state, solutions[0])
return state
logger.info("Forking. Policy: %s. Values: %s",
policy,
', '.join(f'0x{sol:x}' for sol in solutions))
self._publish('will_fork_state', state, expression, solutions, policy)
# Build and enqueue a state for each solution
children = []
for new_value in solutions:
with state as new_state:
new_state.constrain(expression == new_value)
# and set the PC of the new state to the concrete pc-dest
#(or other register or memory address to concrete)
setstate(new_state, new_value)
self._publish('did_fork_state', new_state, expression, new_value, policy)
# enqueue new_state
state_id = self.enqueue(new_state)
# maintain a list of children for logging purpose
children.append(state_id)
logger.info("Forking current state into states %r", children)
return None
|
[
"def",
"fork",
"(",
"self",
",",
"state",
",",
"expression",
",",
"policy",
"=",
"'ALL'",
",",
"setstate",
"=",
"None",
")",
":",
"assert",
"isinstance",
"(",
"expression",
",",
"Expression",
")",
"if",
"setstate",
"is",
"None",
":",
"setstate",
"=",
"lambda",
"x",
",",
"y",
":",
"None",
"# Find a set of solutions for expression",
"solutions",
"=",
"state",
".",
"concretize",
"(",
"expression",
",",
"policy",
")",
"if",
"not",
"solutions",
":",
"raise",
"ExecutorError",
"(",
"\"Forking on unfeasible constraint set\"",
")",
"if",
"len",
"(",
"solutions",
")",
"==",
"1",
":",
"setstate",
"(",
"state",
",",
"solutions",
"[",
"0",
"]",
")",
"return",
"state",
"logger",
".",
"info",
"(",
"\"Forking. Policy: %s. Values: %s\"",
",",
"policy",
",",
"', '",
".",
"join",
"(",
"f'0x{sol:x}'",
"for",
"sol",
"in",
"solutions",
")",
")",
"self",
".",
"_publish",
"(",
"'will_fork_state'",
",",
"state",
",",
"expression",
",",
"solutions",
",",
"policy",
")",
"# Build and enqueue a state for each solution",
"children",
"=",
"[",
"]",
"for",
"new_value",
"in",
"solutions",
":",
"with",
"state",
"as",
"new_state",
":",
"new_state",
".",
"constrain",
"(",
"expression",
"==",
"new_value",
")",
"# and set the PC of the new state to the concrete pc-dest",
"#(or other register or memory address to concrete)",
"setstate",
"(",
"new_state",
",",
"new_value",
")",
"self",
".",
"_publish",
"(",
"'did_fork_state'",
",",
"new_state",
",",
"expression",
",",
"new_value",
",",
"policy",
")",
"# enqueue new_state",
"state_id",
"=",
"self",
".",
"enqueue",
"(",
"new_state",
")",
"# maintain a list of children for logging purpose",
"children",
".",
"append",
"(",
"state_id",
")",
"logger",
".",
"info",
"(",
"\"Forking current state into states %r\"",
",",
"children",
")",
"return",
"None"
] |
Fork state on expression concretizations.
Using policy build a list of solutions for expression.
For the state on each solution setting the new state with setstate
For example if expression is a Bool it may have 2 solutions. True or False.
Parent
(expression = ??)
Child1 Child2
(expression = True) (expression = True)
setstate(True) setstate(False)
The optional setstate() function is supposed to set the concrete value
in the child state.
|
[
"Fork",
"state",
"on",
"expression",
"concretizations",
".",
"Using",
"policy",
"build",
"a",
"list",
"of",
"solutions",
"for",
"expression",
".",
"For",
"the",
"state",
"on",
"each",
"solution",
"setting",
"the",
"new",
"state",
"with",
"setstate"
] |
python
|
valid
|
a1ezzz/wasp-general
|
wasp_general/config.py
|
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/config.py#L66-L83
|
def merge_section(self, config, section_to, section_from=None):
""" Load configuration section from other configuration. If specified section doesn't exist in current
configuration, then it will be added automatically.
:param config: source configuration
:param section_to: destination section name
:param section_from: source section name (if it is None, then section_to is used as source section name)
:return: None
"""
section_from = section_from if section_from is not None else section_to
if section_from not in config.sections():
raise ValueError('There is no such section "%s" in config' % section_from)
if section_to not in self.sections():
self.add_section(section_to)
for option in config[section_from].keys():
self.set(section_to, option, config[section_from][option])
|
[
"def",
"merge_section",
"(",
"self",
",",
"config",
",",
"section_to",
",",
"section_from",
"=",
"None",
")",
":",
"section_from",
"=",
"section_from",
"if",
"section_from",
"is",
"not",
"None",
"else",
"section_to",
"if",
"section_from",
"not",
"in",
"config",
".",
"sections",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'There is no such section \"%s\" in config'",
"%",
"section_from",
")",
"if",
"section_to",
"not",
"in",
"self",
".",
"sections",
"(",
")",
":",
"self",
".",
"add_section",
"(",
"section_to",
")",
"for",
"option",
"in",
"config",
"[",
"section_from",
"]",
".",
"keys",
"(",
")",
":",
"self",
".",
"set",
"(",
"section_to",
",",
"option",
",",
"config",
"[",
"section_from",
"]",
"[",
"option",
"]",
")"
] |
Load configuration section from other configuration. If specified section doesn't exist in current
configuration, then it will be added automatically.
:param config: source configuration
:param section_to: destination section name
:param section_from: source section name (if it is None, then section_to is used as source section name)
:return: None
|
[
"Load",
"configuration",
"section",
"from",
"other",
"configuration",
".",
"If",
"specified",
"section",
"doesn",
"t",
"exist",
"in",
"current",
"configuration",
"then",
"it",
"will",
"be",
"added",
"automatically",
"."
] |
python
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.