repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
lrq3000/pyFileFixity
|
pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py
|
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L637-L641
|
def OnMoreSquareToggle( self, event ):
"""Toggle the more-square view (better looking, but more likely to filter records)"""
self.squareMap.square_style = not self.squareMap.square_style
self.squareMap.Refresh()
self.moreSquareViewItem.Check(self.squareMap.square_style)
|
[
"def",
"OnMoreSquareToggle",
"(",
"self",
",",
"event",
")",
":",
"self",
".",
"squareMap",
".",
"square_style",
"=",
"not",
"self",
".",
"squareMap",
".",
"square_style",
"self",
".",
"squareMap",
".",
"Refresh",
"(",
")",
"self",
".",
"moreSquareViewItem",
".",
"Check",
"(",
"self",
".",
"squareMap",
".",
"square_style",
")"
] |
Toggle the more-square view (better looking, but more likely to filter records)
|
[
"Toggle",
"the",
"more",
"-",
"square",
"view",
"(",
"better",
"looking",
"but",
"more",
"likely",
"to",
"filter",
"records",
")"
] |
python
|
train
|
openpermissions/perch
|
perch/organisation.py
|
https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/organisation.py#L475-L488
|
def can_update(self, user, **kwargs):
"""Org admins may not update organisation_id or service_type"""
if user.is_admin():
raise Return((True, set([])))
is_creator = self.created_by == user.id
if not (user.is_org_admin(self.organisation_id) or is_creator):
raise Return((False, set([])))
fields = ({'service_type', 'organisation_id'} & set(kwargs.keys()))
if fields:
raise Return((False, fields))
else:
raise Return((True, set([])))
|
[
"def",
"can_update",
"(",
"self",
",",
"user",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"user",
".",
"is_admin",
"(",
")",
":",
"raise",
"Return",
"(",
"(",
"True",
",",
"set",
"(",
"[",
"]",
")",
")",
")",
"is_creator",
"=",
"self",
".",
"created_by",
"==",
"user",
".",
"id",
"if",
"not",
"(",
"user",
".",
"is_org_admin",
"(",
"self",
".",
"organisation_id",
")",
"or",
"is_creator",
")",
":",
"raise",
"Return",
"(",
"(",
"False",
",",
"set",
"(",
"[",
"]",
")",
")",
")",
"fields",
"=",
"(",
"{",
"'service_type'",
",",
"'organisation_id'",
"}",
"&",
"set",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
")",
"if",
"fields",
":",
"raise",
"Return",
"(",
"(",
"False",
",",
"fields",
")",
")",
"else",
":",
"raise",
"Return",
"(",
"(",
"True",
",",
"set",
"(",
"[",
"]",
")",
")",
")"
] |
Org admins may not update organisation_id or service_type
|
[
"Org",
"admins",
"may",
"not",
"update",
"organisation_id",
"or",
"service_type"
] |
python
|
train
|
twisted/mantissa
|
xmantissa/sharing.py
|
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/sharing.py#L470-L487
|
def _interfacesToNames(interfaces):
"""
Convert from a list of interfaces to a unicode string of names suitable for
storage in the database.
@param interfaces: an iterable of Interface objects.
@return: a unicode string, a comma-separated list of names of interfaces.
@raise ConflictingNames: if any of the names conflict: see
L{_checkConflictingNames}.
"""
if interfaces is ALL_IMPLEMENTED:
names = ALL_IMPLEMENTED_DB
else:
_checkConflictingNames(interfaces)
names = u','.join(map(qual, interfaces))
return names
|
[
"def",
"_interfacesToNames",
"(",
"interfaces",
")",
":",
"if",
"interfaces",
"is",
"ALL_IMPLEMENTED",
":",
"names",
"=",
"ALL_IMPLEMENTED_DB",
"else",
":",
"_checkConflictingNames",
"(",
"interfaces",
")",
"names",
"=",
"u','",
".",
"join",
"(",
"map",
"(",
"qual",
",",
"interfaces",
")",
")",
"return",
"names"
] |
Convert from a list of interfaces to a unicode string of names suitable for
storage in the database.
@param interfaces: an iterable of Interface objects.
@return: a unicode string, a comma-separated list of names of interfaces.
@raise ConflictingNames: if any of the names conflict: see
L{_checkConflictingNames}.
|
[
"Convert",
"from",
"a",
"list",
"of",
"interfaces",
"to",
"a",
"unicode",
"string",
"of",
"names",
"suitable",
"for",
"storage",
"in",
"the",
"database",
"."
] |
python
|
train
|
google/prettytensor
|
prettytensor/scopes.py
|
https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/scopes.py#L27-L74
|
def var_and_name_scope(names):
"""Creates a variable scope and a name scope.
If a variable_scope is provided, this will reenter that variable scope.
However, if none is provided then the variable scope will match the generated
part of the name scope.
Args:
names: A tuple of name_scope, variable_scope or None.
Yields:
The result of name_scope and variable_scope as a tuple.
"""
# pylint: disable=protected-access
if not names:
yield None, None
else:
name, var_scope = names
with tf.name_scope(name) as scope:
# TODO(eiderman): This is a workaround until the variable_scope updates land
# in a TF release.
old_vs = tf.get_variable_scope()
if var_scope is None:
count = len(name.split('/'))
scoped_name = '/'.join(scope.split('/')[-count - 1:-1])
full_name = (old_vs.name + '/' + scoped_name).lstrip('/')
else:
full_name = var_scope.name
vs_key = tf.get_collection_ref(variable_scope._VARSCOPE_KEY)
try:
# TODO(eiderman): Remove this hack or fix the full file.
try:
vs_key[0] = tf.VariableScope(
old_vs.reuse,
name=full_name,
initializer=old_vs.initializer,
regularizer=old_vs.regularizer,
caching_device=old_vs.caching_device)
except AttributeError:
vs_key[0] = variable_scope._VariableScope(
old_vs.reuse,
name=full_name,
initializer=old_vs.initializer)
vs_key[0].name_scope = scope
yield scope, vs_key[0]
finally:
vs_key[0] = old_vs
|
[
"def",
"var_and_name_scope",
"(",
"names",
")",
":",
"# pylint: disable=protected-access",
"if",
"not",
"names",
":",
"yield",
"None",
",",
"None",
"else",
":",
"name",
",",
"var_scope",
"=",
"names",
"with",
"tf",
".",
"name_scope",
"(",
"name",
")",
"as",
"scope",
":",
"# TODO(eiderman): This is a workaround until the variable_scope updates land",
"# in a TF release.",
"old_vs",
"=",
"tf",
".",
"get_variable_scope",
"(",
")",
"if",
"var_scope",
"is",
"None",
":",
"count",
"=",
"len",
"(",
"name",
".",
"split",
"(",
"'/'",
")",
")",
"scoped_name",
"=",
"'/'",
".",
"join",
"(",
"scope",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"count",
"-",
"1",
":",
"-",
"1",
"]",
")",
"full_name",
"=",
"(",
"old_vs",
".",
"name",
"+",
"'/'",
"+",
"scoped_name",
")",
".",
"lstrip",
"(",
"'/'",
")",
"else",
":",
"full_name",
"=",
"var_scope",
".",
"name",
"vs_key",
"=",
"tf",
".",
"get_collection_ref",
"(",
"variable_scope",
".",
"_VARSCOPE_KEY",
")",
"try",
":",
"# TODO(eiderman): Remove this hack or fix the full file.",
"try",
":",
"vs_key",
"[",
"0",
"]",
"=",
"tf",
".",
"VariableScope",
"(",
"old_vs",
".",
"reuse",
",",
"name",
"=",
"full_name",
",",
"initializer",
"=",
"old_vs",
".",
"initializer",
",",
"regularizer",
"=",
"old_vs",
".",
"regularizer",
",",
"caching_device",
"=",
"old_vs",
".",
"caching_device",
")",
"except",
"AttributeError",
":",
"vs_key",
"[",
"0",
"]",
"=",
"variable_scope",
".",
"_VariableScope",
"(",
"old_vs",
".",
"reuse",
",",
"name",
"=",
"full_name",
",",
"initializer",
"=",
"old_vs",
".",
"initializer",
")",
"vs_key",
"[",
"0",
"]",
".",
"name_scope",
"=",
"scope",
"yield",
"scope",
",",
"vs_key",
"[",
"0",
"]",
"finally",
":",
"vs_key",
"[",
"0",
"]",
"=",
"old_vs"
] |
Creates a variable scope and a name scope.
If a variable_scope is provided, this will reenter that variable scope.
However, if none is provided then the variable scope will match the generated
part of the name scope.
Args:
names: A tuple of name_scope, variable_scope or None.
Yields:
The result of name_scope and variable_scope as a tuple.
|
[
"Creates",
"a",
"variable",
"scope",
"and",
"a",
"name",
"scope",
"."
] |
python
|
train
|
iterative/dvc
|
dvc/cli.py
|
https://github.com/iterative/dvc/blob/8bb21261e34c9632453e09090de7ebe50e38d341/dvc/cli.py#L122-L167
|
def parse_args(argv=None):
"""Parses CLI arguments.
Args:
argv: optional list of arguments to parse. sys.argv is used by default.
Raises:
dvc.exceptions.DvcParserError: raised for argument parsing errors.
"""
parent_parser = get_parent_parser()
# Main parser
desc = "Data Version Control"
parser = DvcParser(
prog="dvc",
description=desc,
parents=[parent_parser],
formatter_class=argparse.RawTextHelpFormatter,
)
# NOTE: On some python versions action='version' prints to stderr
# instead of stdout https://bugs.python.org/issue18920
parser.add_argument(
"-V",
"--version",
action=VersionAction,
nargs=0,
help="Show program's version.",
)
# Sub commands
subparsers = parser.add_subparsers(
title="Available Commands",
metavar="COMMAND",
dest="cmd",
help="Use dvc COMMAND --help for command-specific help.",
)
fix_subparsers(subparsers)
for cmd in COMMANDS:
cmd.add_parser(subparsers, parent_parser)
args = parser.parse_args(argv)
return args
|
[
"def",
"parse_args",
"(",
"argv",
"=",
"None",
")",
":",
"parent_parser",
"=",
"get_parent_parser",
"(",
")",
"# Main parser",
"desc",
"=",
"\"Data Version Control\"",
"parser",
"=",
"DvcParser",
"(",
"prog",
"=",
"\"dvc\"",
",",
"description",
"=",
"desc",
",",
"parents",
"=",
"[",
"parent_parser",
"]",
",",
"formatter_class",
"=",
"argparse",
".",
"RawTextHelpFormatter",
",",
")",
"# NOTE: On some python versions action='version' prints to stderr",
"# instead of stdout https://bugs.python.org/issue18920",
"parser",
".",
"add_argument",
"(",
"\"-V\"",
",",
"\"--version\"",
",",
"action",
"=",
"VersionAction",
",",
"nargs",
"=",
"0",
",",
"help",
"=",
"\"Show program's version.\"",
",",
")",
"# Sub commands",
"subparsers",
"=",
"parser",
".",
"add_subparsers",
"(",
"title",
"=",
"\"Available Commands\"",
",",
"metavar",
"=",
"\"COMMAND\"",
",",
"dest",
"=",
"\"cmd\"",
",",
"help",
"=",
"\"Use dvc COMMAND --help for command-specific help.\"",
",",
")",
"fix_subparsers",
"(",
"subparsers",
")",
"for",
"cmd",
"in",
"COMMANDS",
":",
"cmd",
".",
"add_parser",
"(",
"subparsers",
",",
"parent_parser",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"argv",
")",
"return",
"args"
] |
Parses CLI arguments.
Args:
argv: optional list of arguments to parse. sys.argv is used by default.
Raises:
dvc.exceptions.DvcParserError: raised for argument parsing errors.
|
[
"Parses",
"CLI",
"arguments",
"."
] |
python
|
train
|
google/textfsm
|
textfsm/parser.py
|
https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/parser.py#L659-L680
|
def _Parse(self, template):
"""Parses template file for FSM structure.
Args:
template: Valid template file.
Raises:
TextFSMTemplateError: If template file syntax is invalid.
"""
if not template:
raise TextFSMTemplateError('Null template.')
# Parse header with Variables.
self._ParseFSMVariables(template)
# Parse States.
while self._ParseFSMState(template):
pass
# Validate destination states.
self._ValidateFSM()
|
[
"def",
"_Parse",
"(",
"self",
",",
"template",
")",
":",
"if",
"not",
"template",
":",
"raise",
"TextFSMTemplateError",
"(",
"'Null template.'",
")",
"# Parse header with Variables.",
"self",
".",
"_ParseFSMVariables",
"(",
"template",
")",
"# Parse States.",
"while",
"self",
".",
"_ParseFSMState",
"(",
"template",
")",
":",
"pass",
"# Validate destination states.",
"self",
".",
"_ValidateFSM",
"(",
")"
] |
Parses template file for FSM structure.
Args:
template: Valid template file.
Raises:
TextFSMTemplateError: If template file syntax is invalid.
|
[
"Parses",
"template",
"file",
"for",
"FSM",
"structure",
"."
] |
python
|
train
|
CiscoUcs/UcsPythonSDK
|
src/UcsSdk/UcsHandle_Edit.py
|
https://github.com/CiscoUcs/UcsPythonSDK/blob/bf6b07d6abeacb922c92b198352eda4eb9e4629b/src/UcsSdk/UcsHandle_Edit.py#L2157-L2281
|
def SetManagedObject(self, inMo, classId=None, params=None, dumpXml=None):
"""
Modifies Managed Object in UCS.
- inMo, if provided, it acts as the target object for the present operation. It should be None unless a user wants to provide an
inMo. It can be a single MO or a list containing multiple managed objects.
- classId of the managed object/s to be modified.
- params contains semicolon (;) separated list of key/value pairs(key=value), that are used as filters for selecting specific managed
objects. The key should be a valid property of the managed object to be modified.
"""
from UcsBase import UcsUtils, ManagedObject, WriteUcsWarning, WriteObject, UcsException, UcsValidationException
from Ucs import ClassFactory, Pair, ConfigMap
if params != None:
keys = params.keys()
else:
keys = []
unknownMo = False
dn = None
obj = None
configMap = None
dnParamSet = False
if (classId != None and classId != ""):
metaClassId = UcsUtils.FindClassIdInMoMetaIgnoreCase(classId)
if (metaClassId != None):
classId = metaClassId
moMeta = UcsUtils.GetUcsPropertyMeta(classId, "Meta")
else:
unknownMo = True
for k in keys:
if (k.lower() == "dn"):
# ClassId And Dn Specified - No Parent Necessary
dnParamSet = True
dn = params[k]
obj = ManagedObject(classId)
for prop in keys:
propMoMeta = UcsUtils.IsPropertyInMetaIgnoreCase(classId, prop)
if (propMoMeta != None):
if (prop.lower() == "rn" or prop.lower() == "dn"):
pass
elif (propMoMeta.access == UcsPropertyMeta.ReadOnly):
# TODO: Add Warning/Error messages in Logger.
WriteUcsWarning(
"[Warning]: SetManagedObject [Description] Attempt to set non-writable property %s in Class %s" % (
prop, classId))
obj.setattr(propMoMeta.name, str(params[prop]))
else:
# Sets the unknown property/value as XtraProperty in obj
obj.setattr(UcsUtils.WordL(prop), str(params[prop]))
obj.setattr("Dn", dn)
obj.setattr("Status", Status().MODIFIED)
pair = Pair()
pair.setattr("Key", obj.getattr("Dn"))
pair.AddChild(obj)
configMap = ConfigMap()
configMap.AddChild(pair)
if (dnParamSet == False):
# ClassId is None, inMo Necessary
if ((inMo == None) or (not isinstance(inMo, list)) or (len(inMo) == 0)):
if (classId == None or classId == ""):
raise UcsValidationException(
'[Error]: SetManagedObject [Description]: inMo and ClassId are both not specified')
# raise Exception('[Error]: SetManagedObject [Description]: inMo and ClassId are both not specified')
else:
raise UcsValidationException(
'[Error]: SetManagedObject [Description]: inMo and Dn are both not specified')
# raise Exception('[Error]: SetManagedObject [Description]: inMo and Dn are both not specified')
configMap = ConfigMap()
for mo in inMo:
obj = ManagedObject(mo.propMoMeta.name)
dn = mo.getattr("Dn")
if (classId == None or classId == ""):
classId = mo.propMoMeta.name
elif (classId.lower() != (
mo.propMoMeta.name).lower()): # check that classId(just in case classId have value) and parentMo's classId is equal.
# TODO: Add Warning/Error messages in Logger.
WriteUcsWarning(
"[Warning]: SetManagedObject [Description] ClassId does not match with inMo's classId.")
classId = mo.propMoMeta.name
for prop in keys:
propMoMeta = UcsUtils.IsPropertyInMetaIgnoreCase(classId, prop)
if (propMoMeta != None):
if (prop.lower() == "rn" or prop.lower() == "dn"):
pass
elif (propMoMeta.access == UcsPropertyMeta.ReadOnly):
# TODO: Add Warning/Error messages in Logger.
WriteUcsWarning(
"[Warning]: SetManagedObject [Description] Attempt to set non-writeable property %s in Class %s" % (
prop, classId))
obj.setattr(propMoMeta.name, str(params[prop]))
else:
# Sets the unknown property/value as XtraProperty in obj
obj.setattr(UcsUtils.WordL(prop), str(params[prop]))
obj.setattr("Dn", dn)
obj.setattr("Status", Status().MODIFIED)
pair = Pair()
pair.setattr("Key", obj.getattr("Dn"))
pair.AddChild(obj)
configMap.AddChild(pair)
if (configMap != None):
ccm = self.ConfigConfMos(configMap, YesOrNo.FALSE, dumpXml)
if ccm.errorCode == 0:
moList = []
for child in ccm.OutConfigs.GetChild():
if (isinstance(child, Pair) == True):
for mo in child.GetChild():
moList.append(mo)
elif (isinstance(child, ManagedObject) == True):
moList.append(child)
# WriteObject(moList)
return moList
else:
raise UcsException(ccm.errorCode, ccm.errorDescr)
|
[
"def",
"SetManagedObject",
"(",
"self",
",",
"inMo",
",",
"classId",
"=",
"None",
",",
"params",
"=",
"None",
",",
"dumpXml",
"=",
"None",
")",
":",
"from",
"UcsBase",
"import",
"UcsUtils",
",",
"ManagedObject",
",",
"WriteUcsWarning",
",",
"WriteObject",
",",
"UcsException",
",",
"UcsValidationException",
"from",
"Ucs",
"import",
"ClassFactory",
",",
"Pair",
",",
"ConfigMap",
"if",
"params",
"!=",
"None",
":",
"keys",
"=",
"params",
".",
"keys",
"(",
")",
"else",
":",
"keys",
"=",
"[",
"]",
"unknownMo",
"=",
"False",
"dn",
"=",
"None",
"obj",
"=",
"None",
"configMap",
"=",
"None",
"dnParamSet",
"=",
"False",
"if",
"(",
"classId",
"!=",
"None",
"and",
"classId",
"!=",
"\"\"",
")",
":",
"metaClassId",
"=",
"UcsUtils",
".",
"FindClassIdInMoMetaIgnoreCase",
"(",
"classId",
")",
"if",
"(",
"metaClassId",
"!=",
"None",
")",
":",
"classId",
"=",
"metaClassId",
"moMeta",
"=",
"UcsUtils",
".",
"GetUcsPropertyMeta",
"(",
"classId",
",",
"\"Meta\"",
")",
"else",
":",
"unknownMo",
"=",
"True",
"for",
"k",
"in",
"keys",
":",
"if",
"(",
"k",
".",
"lower",
"(",
")",
"==",
"\"dn\"",
")",
":",
"# ClassId And Dn Specified - No Parent Necessary",
"dnParamSet",
"=",
"True",
"dn",
"=",
"params",
"[",
"k",
"]",
"obj",
"=",
"ManagedObject",
"(",
"classId",
")",
"for",
"prop",
"in",
"keys",
":",
"propMoMeta",
"=",
"UcsUtils",
".",
"IsPropertyInMetaIgnoreCase",
"(",
"classId",
",",
"prop",
")",
"if",
"(",
"propMoMeta",
"!=",
"None",
")",
":",
"if",
"(",
"prop",
".",
"lower",
"(",
")",
"==",
"\"rn\"",
"or",
"prop",
".",
"lower",
"(",
")",
"==",
"\"dn\"",
")",
":",
"pass",
"elif",
"(",
"propMoMeta",
".",
"access",
"==",
"UcsPropertyMeta",
".",
"ReadOnly",
")",
":",
"# TODO: Add Warning/Error messages in Logger.",
"WriteUcsWarning",
"(",
"\"[Warning]: SetManagedObject [Description] Attempt to set non-writable property %s in Class %s\"",
"%",
"(",
"prop",
",",
"classId",
")",
")",
"obj",
".",
"setattr",
"(",
"propMoMeta",
".",
"name",
",",
"str",
"(",
"params",
"[",
"prop",
"]",
")",
")",
"else",
":",
"# Sets the unknown property/value as XtraProperty in obj",
"obj",
".",
"setattr",
"(",
"UcsUtils",
".",
"WordL",
"(",
"prop",
")",
",",
"str",
"(",
"params",
"[",
"prop",
"]",
")",
")",
"obj",
".",
"setattr",
"(",
"\"Dn\"",
",",
"dn",
")",
"obj",
".",
"setattr",
"(",
"\"Status\"",
",",
"Status",
"(",
")",
".",
"MODIFIED",
")",
"pair",
"=",
"Pair",
"(",
")",
"pair",
".",
"setattr",
"(",
"\"Key\"",
",",
"obj",
".",
"getattr",
"(",
"\"Dn\"",
")",
")",
"pair",
".",
"AddChild",
"(",
"obj",
")",
"configMap",
"=",
"ConfigMap",
"(",
")",
"configMap",
".",
"AddChild",
"(",
"pair",
")",
"if",
"(",
"dnParamSet",
"==",
"False",
")",
":",
"# ClassId is None, inMo Necessary",
"if",
"(",
"(",
"inMo",
"==",
"None",
")",
"or",
"(",
"not",
"isinstance",
"(",
"inMo",
",",
"list",
")",
")",
"or",
"(",
"len",
"(",
"inMo",
")",
"==",
"0",
")",
")",
":",
"if",
"(",
"classId",
"==",
"None",
"or",
"classId",
"==",
"\"\"",
")",
":",
"raise",
"UcsValidationException",
"(",
"'[Error]: SetManagedObject [Description]: inMo and ClassId are both not specified'",
")",
"# raise Exception('[Error]: SetManagedObject [Description]: inMo and ClassId are both not specified')",
"else",
":",
"raise",
"UcsValidationException",
"(",
"'[Error]: SetManagedObject [Description]: inMo and Dn are both not specified'",
")",
"# raise Exception('[Error]: SetManagedObject [Description]: inMo and Dn are both not specified')",
"configMap",
"=",
"ConfigMap",
"(",
")",
"for",
"mo",
"in",
"inMo",
":",
"obj",
"=",
"ManagedObject",
"(",
"mo",
".",
"propMoMeta",
".",
"name",
")",
"dn",
"=",
"mo",
".",
"getattr",
"(",
"\"Dn\"",
")",
"if",
"(",
"classId",
"==",
"None",
"or",
"classId",
"==",
"\"\"",
")",
":",
"classId",
"=",
"mo",
".",
"propMoMeta",
".",
"name",
"elif",
"(",
"classId",
".",
"lower",
"(",
")",
"!=",
"(",
"mo",
".",
"propMoMeta",
".",
"name",
")",
".",
"lower",
"(",
")",
")",
":",
"# check that classId(just in case classId have value) and parentMo's classId is equal.",
"# TODO: Add Warning/Error messages in Logger.",
"WriteUcsWarning",
"(",
"\"[Warning]: SetManagedObject [Description] ClassId does not match with inMo's classId.\"",
")",
"classId",
"=",
"mo",
".",
"propMoMeta",
".",
"name",
"for",
"prop",
"in",
"keys",
":",
"propMoMeta",
"=",
"UcsUtils",
".",
"IsPropertyInMetaIgnoreCase",
"(",
"classId",
",",
"prop",
")",
"if",
"(",
"propMoMeta",
"!=",
"None",
")",
":",
"if",
"(",
"prop",
".",
"lower",
"(",
")",
"==",
"\"rn\"",
"or",
"prop",
".",
"lower",
"(",
")",
"==",
"\"dn\"",
")",
":",
"pass",
"elif",
"(",
"propMoMeta",
".",
"access",
"==",
"UcsPropertyMeta",
".",
"ReadOnly",
")",
":",
"# TODO: Add Warning/Error messages in Logger.",
"WriteUcsWarning",
"(",
"\"[Warning]: SetManagedObject [Description] Attempt to set non-writeable property %s in Class %s\"",
"%",
"(",
"prop",
",",
"classId",
")",
")",
"obj",
".",
"setattr",
"(",
"propMoMeta",
".",
"name",
",",
"str",
"(",
"params",
"[",
"prop",
"]",
")",
")",
"else",
":",
"# Sets the unknown property/value as XtraProperty in obj",
"obj",
".",
"setattr",
"(",
"UcsUtils",
".",
"WordL",
"(",
"prop",
")",
",",
"str",
"(",
"params",
"[",
"prop",
"]",
")",
")",
"obj",
".",
"setattr",
"(",
"\"Dn\"",
",",
"dn",
")",
"obj",
".",
"setattr",
"(",
"\"Status\"",
",",
"Status",
"(",
")",
".",
"MODIFIED",
")",
"pair",
"=",
"Pair",
"(",
")",
"pair",
".",
"setattr",
"(",
"\"Key\"",
",",
"obj",
".",
"getattr",
"(",
"\"Dn\"",
")",
")",
"pair",
".",
"AddChild",
"(",
"obj",
")",
"configMap",
".",
"AddChild",
"(",
"pair",
")",
"if",
"(",
"configMap",
"!=",
"None",
")",
":",
"ccm",
"=",
"self",
".",
"ConfigConfMos",
"(",
"configMap",
",",
"YesOrNo",
".",
"FALSE",
",",
"dumpXml",
")",
"if",
"ccm",
".",
"errorCode",
"==",
"0",
":",
"moList",
"=",
"[",
"]",
"for",
"child",
"in",
"ccm",
".",
"OutConfigs",
".",
"GetChild",
"(",
")",
":",
"if",
"(",
"isinstance",
"(",
"child",
",",
"Pair",
")",
"==",
"True",
")",
":",
"for",
"mo",
"in",
"child",
".",
"GetChild",
"(",
")",
":",
"moList",
".",
"append",
"(",
"mo",
")",
"elif",
"(",
"isinstance",
"(",
"child",
",",
"ManagedObject",
")",
"==",
"True",
")",
":",
"moList",
".",
"append",
"(",
"child",
")",
"# WriteObject(moList)",
"return",
"moList",
"else",
":",
"raise",
"UcsException",
"(",
"ccm",
".",
"errorCode",
",",
"ccm",
".",
"errorDescr",
")"
] |
Modifies Managed Object in UCS.
- inMo, if provided, it acts as the target object for the present operation. It should be None unless a user wants to provide an
inMo. It can be a single MO or a list containing multiple managed objects.
- classId of the managed object/s to be modified.
- params contains semicolon (;) separated list of key/value pairs(key=value), that are used as filters for selecting specific managed
objects. The key should be a valid property of the managed object to be modified.
|
[
"Modifies",
"Managed",
"Object",
"in",
"UCS",
".",
"-",
"inMo",
"if",
"provided",
"it",
"acts",
"as",
"the",
"target",
"object",
"for",
"the",
"present",
"operation",
".",
"It",
"should",
"be",
"None",
"unless",
"a",
"user",
"wants",
"to",
"provide",
"an",
"inMo",
".",
"It",
"can",
"be",
"a",
"single",
"MO",
"or",
"a",
"list",
"containing",
"multiple",
"managed",
"objects",
".",
"-",
"classId",
"of",
"the",
"managed",
"object",
"/",
"s",
"to",
"be",
"modified",
".",
"-",
"params",
"contains",
"semicolon",
"(",
";",
")",
"separated",
"list",
"of",
"key",
"/",
"value",
"pairs",
"(",
"key",
"=",
"value",
")",
"that",
"are",
"used",
"as",
"filters",
"for",
"selecting",
"specific",
"managed",
"objects",
".",
"The",
"key",
"should",
"be",
"a",
"valid",
"property",
"of",
"the",
"managed",
"object",
"to",
"be",
"modified",
"."
] |
python
|
train
|
Kunstmord/datalib
|
src/dataset.py
|
https://github.com/Kunstmord/datalib/blob/9d7db3e7c3a5feeeb5d19eb0dbee858bd2b50886/src/dataset.py#L723-L740
|
def return_features_numpy(self, names='all'):
"""
Returns a 2d numpy array of extracted features
Parameters
----------
names : list of strings, a list of feature names which are to be retrieved from the database, if equal to 'all',
all features will be returned, default value: 'all'
Returns
-------
A numpy array of features, each row corresponds to a single datapoint. If a single feature is a 1d numpy array,
then it will be unrolled into the resulting array. Higher-dimensional numpy arrays are not supported.
"""
if self._prepopulated is False:
raise errors.EmptyDatabase(self.dbpath)
else:
return return_features_numpy_base(self.dbpath, self._set_object, self.points_amt, names)
|
[
"def",
"return_features_numpy",
"(",
"self",
",",
"names",
"=",
"'all'",
")",
":",
"if",
"self",
".",
"_prepopulated",
"is",
"False",
":",
"raise",
"errors",
".",
"EmptyDatabase",
"(",
"self",
".",
"dbpath",
")",
"else",
":",
"return",
"return_features_numpy_base",
"(",
"self",
".",
"dbpath",
",",
"self",
".",
"_set_object",
",",
"self",
".",
"points_amt",
",",
"names",
")"
] |
Returns a 2d numpy array of extracted features
Parameters
----------
names : list of strings, a list of feature names which are to be retrieved from the database, if equal to 'all',
all features will be returned, default value: 'all'
Returns
-------
A numpy array of features, each row corresponds to a single datapoint. If a single feature is a 1d numpy array,
then it will be unrolled into the resulting array. Higher-dimensional numpy arrays are not supported.
|
[
"Returns",
"a",
"2d",
"numpy",
"array",
"of",
"extracted",
"features"
] |
python
|
train
|
swharden/SWHLab
|
doc/oldcode/swhlab/core/abf.py
|
https://github.com/swharden/SWHLab/blob/a86c3c65323cec809a4bd4f81919644927094bf5/doc/oldcode/swhlab/core/abf.py#L506-L517
|
def loadThing(self,fname,ext=".pkl"):
"""save any object from /swhlab4/ID_[fname].pkl"""
if ext and not ext in fname:
fname+=ext
fname=self.outpre+fname
time1=cm.timethis()
thing = pickle.load(open(fname,"rb"))
print(" -> loading [%s] (%.01f kB) took %.02f ms"%(\
os.path.basename(fname),
sys.getsizeof(pickle.dumps(thing, -1))/1e3,
cm.timethis(time1)))
return thing
|
[
"def",
"loadThing",
"(",
"self",
",",
"fname",
",",
"ext",
"=",
"\".pkl\"",
")",
":",
"if",
"ext",
"and",
"not",
"ext",
"in",
"fname",
":",
"fname",
"+=",
"ext",
"fname",
"=",
"self",
".",
"outpre",
"+",
"fname",
"time1",
"=",
"cm",
".",
"timethis",
"(",
")",
"thing",
"=",
"pickle",
".",
"load",
"(",
"open",
"(",
"fname",
",",
"\"rb\"",
")",
")",
"print",
"(",
"\" -> loading [%s] (%.01f kB) took %.02f ms\"",
"%",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"fname",
")",
",",
"sys",
".",
"getsizeof",
"(",
"pickle",
".",
"dumps",
"(",
"thing",
",",
"-",
"1",
")",
")",
"/",
"1e3",
",",
"cm",
".",
"timethis",
"(",
"time1",
")",
")",
")",
"return",
"thing"
] |
save any object from /swhlab4/ID_[fname].pkl
|
[
"save",
"any",
"object",
"from",
"/",
"swhlab4",
"/",
"ID_",
"[",
"fname",
"]",
".",
"pkl"
] |
python
|
valid
|
dwavesystems/dwave_embedding_utilities
|
dwave_embedding_utilities.py
|
https://github.com/dwavesystems/dwave_embedding_utilities/blob/2e485e0ae89d96f3c0005f144bab4b465a3039a3/dwave_embedding_utilities.py#L163-L278
|
def embed_ising(source_linear, source_quadratic, embedding, target_adjacency, chain_strength=1.0):
"""Embeds a logical Ising model onto another graph via an embedding.
Args:
source_linear (dict): The linear biases to be embedded. Should be a dict of
the form {v: bias, ...} where v is a variable in the source model
and bias is the linear bias associated with v.
source_quadratic (dict): The quadratic biases to be embedded. Should be a dict
of the form {(u, v): bias, ...} where u, v are variables in the
source model and bias is the quadratic bias associated with (u, v).
embedding (dict): The mapping from the source graph to the target graph.
Should be of the form {v: {s, ...}, ...} where v is a variable in the
source model and s is a variable in the target model.
target_adjacency (dict/:class:`networkx.Graph`): The adjacency dict of the target
graph. Should be a dict of the form {s: Ns, ...} where s is a variable
in the target graph and Ns is the set of neighbours of s.
chain_strength (float, optional): The quadratic bias that should be used
to create chains.
Returns:
(dict, dict, dict): A 3-tuple containing:
dict: The linear biases of the target problem. In the form {s: bias, ...}
where s is a node in the target graph and bias is the associated linear bias.
dict: The quadratic biases of the target problem. A dict of the form
{(s, t): bias, ...} where (s, t) is an edge in the target graph and bias is
the associated quadratic bias.
dict: The quadratic biases that induce the variables in the target problem to
act as one. A dict of the form {(s, t): -chain_strength, ...} which
is the quadratic biases associated with the chains.
Examples:
>>> source_linear = {'a': 1, 'b': 1}
>>> source_quadratic = {('a', 'b'): -1}
>>> embedding = {'a': [0, 1], 'b': [2]}
>>> target_adjacency = {0: {1, 2}, 1: {0, 2}, 2: {0, 1}}
>>> target_linear, target_quadratic, chain_quadratic = embed_ising(
... source_linear, source_quadratic, embedding, target_adjacency)
>>> target_linear
{0: 0.5, 1: 0.5, 2: 1.0}
>>> target_quadratic
{(0, 2): -0.5, (1, 2): -0.5}
>>> chain_quadratic
{(0, 1): -1.0}
"""
# store variables in the target graph that the embedding hasn't used
unused = {v for v in target_adjacency} - set().union(*embedding.values())
# ok, let's begin with the linear biases.
# we spread the value of h evenly over the chain
target_linear = {v: 0. for v in target_adjacency}
for v, bias in iteritems(source_linear):
try:
chain_variables = embedding[v]
except KeyError:
# if our embedding doesn't deal with this variable, assume it's an isolated vertex and embed it to one of
# the unused variables. if this turns out to not be an isolated vertex, it will be caught below when
# handling quadratic biases
try:
embedding[v] = {unused.pop()}
except KeyError:
raise ValueError('no embedding provided for source variable {}'.format(v))
chain_variables = embedding[v]
b = bias / len(chain_variables)
for s in chain_variables:
try:
target_linear[s] += b
except KeyError:
raise ValueError('chain variable {} not in target_adjacency'.format(s))
# next up the quadratic biases.
# We spread the quadratic biases evenly over the edges
target_quadratic = {}
for (u, v), bias in iteritems(source_quadratic):
edges = set()
if u not in embedding:
raise ValueError('no embedding provided for source variable {}'.format(u))
if v not in embedding:
raise ValueError('no embedding provided for source variable {}'.format(v))
for s in embedding[u]:
for t in embedding[v]:
try:
if s in target_adjacency[t] and (t, s) not in edges:
edges.add((s, t))
except KeyError:
raise ValueError('chain variable {} not in target_adjacency'.format(s))
if not edges:
raise ValueError("no edges in target graph between source variables {}, {}".format(u, v))
b = bias / len(edges)
# in some cases the logical J can have (u, v) and (v, u) as inputs, so make
# sure we are not doubling them up with our choice of ordering
for s, t in edges:
if (s, t) in target_quadratic:
target_quadratic[(s, t)] += b
elif (t, s) in target_quadratic:
target_quadratic[(t, s)] += b
else:
target_quadratic[(s, t)] = b
# finally we need to connect the nodes in the chains
chain_quadratic = {}
for chain in itervalues(embedding):
chain_quadratic.update(chain_to_quadratic(chain, target_adjacency, chain_strength))
return target_linear, target_quadratic, chain_quadratic
|
[
"def",
"embed_ising",
"(",
"source_linear",
",",
"source_quadratic",
",",
"embedding",
",",
"target_adjacency",
",",
"chain_strength",
"=",
"1.0",
")",
":",
"# store variables in the target graph that the embedding hasn't used",
"unused",
"=",
"{",
"v",
"for",
"v",
"in",
"target_adjacency",
"}",
"-",
"set",
"(",
")",
".",
"union",
"(",
"*",
"embedding",
".",
"values",
"(",
")",
")",
"# ok, let's begin with the linear biases.",
"# we spread the value of h evenly over the chain",
"target_linear",
"=",
"{",
"v",
":",
"0.",
"for",
"v",
"in",
"target_adjacency",
"}",
"for",
"v",
",",
"bias",
"in",
"iteritems",
"(",
"source_linear",
")",
":",
"try",
":",
"chain_variables",
"=",
"embedding",
"[",
"v",
"]",
"except",
"KeyError",
":",
"# if our embedding doesn't deal with this variable, assume it's an isolated vertex and embed it to one of",
"# the unused variables. if this turns out to not be an isolated vertex, it will be caught below when",
"# handling quadratic biases",
"try",
":",
"embedding",
"[",
"v",
"]",
"=",
"{",
"unused",
".",
"pop",
"(",
")",
"}",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"'no embedding provided for source variable {}'",
".",
"format",
"(",
"v",
")",
")",
"chain_variables",
"=",
"embedding",
"[",
"v",
"]",
"b",
"=",
"bias",
"/",
"len",
"(",
"chain_variables",
")",
"for",
"s",
"in",
"chain_variables",
":",
"try",
":",
"target_linear",
"[",
"s",
"]",
"+=",
"b",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"'chain variable {} not in target_adjacency'",
".",
"format",
"(",
"s",
")",
")",
"# next up the quadratic biases.",
"# We spread the quadratic biases evenly over the edges",
"target_quadratic",
"=",
"{",
"}",
"for",
"(",
"u",
",",
"v",
")",
",",
"bias",
"in",
"iteritems",
"(",
"source_quadratic",
")",
":",
"edges",
"=",
"set",
"(",
")",
"if",
"u",
"not",
"in",
"embedding",
":",
"raise",
"ValueError",
"(",
"'no embedding provided for source variable {}'",
".",
"format",
"(",
"u",
")",
")",
"if",
"v",
"not",
"in",
"embedding",
":",
"raise",
"ValueError",
"(",
"'no embedding provided for source variable {}'",
".",
"format",
"(",
"v",
")",
")",
"for",
"s",
"in",
"embedding",
"[",
"u",
"]",
":",
"for",
"t",
"in",
"embedding",
"[",
"v",
"]",
":",
"try",
":",
"if",
"s",
"in",
"target_adjacency",
"[",
"t",
"]",
"and",
"(",
"t",
",",
"s",
")",
"not",
"in",
"edges",
":",
"edges",
".",
"add",
"(",
"(",
"s",
",",
"t",
")",
")",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"'chain variable {} not in target_adjacency'",
".",
"format",
"(",
"s",
")",
")",
"if",
"not",
"edges",
":",
"raise",
"ValueError",
"(",
"\"no edges in target graph between source variables {}, {}\"",
".",
"format",
"(",
"u",
",",
"v",
")",
")",
"b",
"=",
"bias",
"/",
"len",
"(",
"edges",
")",
"# in some cases the logical J can have (u, v) and (v, u) as inputs, so make",
"# sure we are not doubling them up with our choice of ordering",
"for",
"s",
",",
"t",
"in",
"edges",
":",
"if",
"(",
"s",
",",
"t",
")",
"in",
"target_quadratic",
":",
"target_quadratic",
"[",
"(",
"s",
",",
"t",
")",
"]",
"+=",
"b",
"elif",
"(",
"t",
",",
"s",
")",
"in",
"target_quadratic",
":",
"target_quadratic",
"[",
"(",
"t",
",",
"s",
")",
"]",
"+=",
"b",
"else",
":",
"target_quadratic",
"[",
"(",
"s",
",",
"t",
")",
"]",
"=",
"b",
"# finally we need to connect the nodes in the chains",
"chain_quadratic",
"=",
"{",
"}",
"for",
"chain",
"in",
"itervalues",
"(",
"embedding",
")",
":",
"chain_quadratic",
".",
"update",
"(",
"chain_to_quadratic",
"(",
"chain",
",",
"target_adjacency",
",",
"chain_strength",
")",
")",
"return",
"target_linear",
",",
"target_quadratic",
",",
"chain_quadratic"
] |
Embeds a logical Ising model onto another graph via an embedding.
Args:
source_linear (dict): The linear biases to be embedded. Should be a dict of
the form {v: bias, ...} where v is a variable in the source model
and bias is the linear bias associated with v.
source_quadratic (dict): The quadratic biases to be embedded. Should be a dict
of the form {(u, v): bias, ...} where u, v are variables in the
source model and bias is the quadratic bias associated with (u, v).
embedding (dict): The mapping from the source graph to the target graph.
Should be of the form {v: {s, ...}, ...} where v is a variable in the
source model and s is a variable in the target model.
target_adjacency (dict/:class:`networkx.Graph`): The adjacency dict of the target
graph. Should be a dict of the form {s: Ns, ...} where s is a variable
in the target graph and Ns is the set of neighbours of s.
chain_strength (float, optional): The quadratic bias that should be used
to create chains.
Returns:
(dict, dict, dict): A 3-tuple containing:
dict: The linear biases of the target problem. In the form {s: bias, ...}
where s is a node in the target graph and bias is the associated linear bias.
dict: The quadratic biases of the target problem. A dict of the form
{(s, t): bias, ...} where (s, t) is an edge in the target graph and bias is
the associated quadratic bias.
dict: The quadratic biases that induce the variables in the target problem to
act as one. A dict of the form {(s, t): -chain_strength, ...} which
is the quadratic biases associated with the chains.
Examples:
>>> source_linear = {'a': 1, 'b': 1}
>>> source_quadratic = {('a', 'b'): -1}
>>> embedding = {'a': [0, 1], 'b': [2]}
>>> target_adjacency = {0: {1, 2}, 1: {0, 2}, 2: {0, 1}}
>>> target_linear, target_quadratic, chain_quadratic = embed_ising(
... source_linear, source_quadratic, embedding, target_adjacency)
>>> target_linear
{0: 0.5, 1: 0.5, 2: 1.0}
>>> target_quadratic
{(0, 2): -0.5, (1, 2): -0.5}
>>> chain_quadratic
{(0, 1): -1.0}
|
[
"Embeds",
"a",
"logical",
"Ising",
"model",
"onto",
"another",
"graph",
"via",
"an",
"embedding",
"."
] |
python
|
train
|
flying-sheep/bcode
|
bcoding.py
|
https://github.com/flying-sheep/bcode/blob/a50996aa1741685c2daba6a9b4893692f377695a/bcoding.py#L181-L195
|
def bencode(data, f=None):
"""
Writes a serializable data piece to f
The order of tests is nonarbitrary,
as strings and mappings are iterable.
If f is None, it writes to a byte buffer
and returns a bytestring
"""
if f is None:
f = BytesIO()
_bencode_to_file(data, f)
return f.getvalue()
else:
_bencode_to_file(data, f)
|
[
"def",
"bencode",
"(",
"data",
",",
"f",
"=",
"None",
")",
":",
"if",
"f",
"is",
"None",
":",
"f",
"=",
"BytesIO",
"(",
")",
"_bencode_to_file",
"(",
"data",
",",
"f",
")",
"return",
"f",
".",
"getvalue",
"(",
")",
"else",
":",
"_bencode_to_file",
"(",
"data",
",",
"f",
")"
] |
Writes a serializable data piece to f
The order of tests is nonarbitrary,
as strings and mappings are iterable.
If f is None, it writes to a byte buffer
and returns a bytestring
|
[
"Writes",
"a",
"serializable",
"data",
"piece",
"to",
"f",
"The",
"order",
"of",
"tests",
"is",
"nonarbitrary",
"as",
"strings",
"and",
"mappings",
"are",
"iterable",
".",
"If",
"f",
"is",
"None",
"it",
"writes",
"to",
"a",
"byte",
"buffer",
"and",
"returns",
"a",
"bytestring"
] |
python
|
train
|
Yelp/kafka-utils
|
kafka_utils/kafka_rolling_restart/main.py
|
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_rolling_restart/main.py#L489-L514
|
def get_task_class(tasks, task_args):
"""Reads in a list of tasks provided by the user,
loads the appropiate task, and returns two lists,
pre_stop_tasks and post_stop_tasks
:param tasks: list of strings locating tasks to load
:type tasks: list
:param task_args: list of strings to be used as args
:type task_args: list
"""
pre_stop_tasks = []
post_stop_tasks = []
task_to_task_args = dict(list(zip(tasks, task_args)))
tasks_classes = [PreStopTask, PostStopTask]
for func, task_args in task_to_task_args.items():
for task_class in tasks_classes:
imported_class = dynamic_import(func, task_class)
if imported_class:
if task_class is PreStopTask:
pre_stop_tasks.append(imported_class(task_args))
elif task_class is PostStopTask:
post_stop_tasks.append(imported_class(task_args))
else:
print("ERROR: Class is not a type of Pre/Post StopTask:" + func)
sys.exit(1)
return pre_stop_tasks, post_stop_tasks
|
[
"def",
"get_task_class",
"(",
"tasks",
",",
"task_args",
")",
":",
"pre_stop_tasks",
"=",
"[",
"]",
"post_stop_tasks",
"=",
"[",
"]",
"task_to_task_args",
"=",
"dict",
"(",
"list",
"(",
"zip",
"(",
"tasks",
",",
"task_args",
")",
")",
")",
"tasks_classes",
"=",
"[",
"PreStopTask",
",",
"PostStopTask",
"]",
"for",
"func",
",",
"task_args",
"in",
"task_to_task_args",
".",
"items",
"(",
")",
":",
"for",
"task_class",
"in",
"tasks_classes",
":",
"imported_class",
"=",
"dynamic_import",
"(",
"func",
",",
"task_class",
")",
"if",
"imported_class",
":",
"if",
"task_class",
"is",
"PreStopTask",
":",
"pre_stop_tasks",
".",
"append",
"(",
"imported_class",
"(",
"task_args",
")",
")",
"elif",
"task_class",
"is",
"PostStopTask",
":",
"post_stop_tasks",
".",
"append",
"(",
"imported_class",
"(",
"task_args",
")",
")",
"else",
":",
"print",
"(",
"\"ERROR: Class is not a type of Pre/Post StopTask:\"",
"+",
"func",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"return",
"pre_stop_tasks",
",",
"post_stop_tasks"
] |
Reads in a list of tasks provided by the user,
loads the appropiate task, and returns two lists,
pre_stop_tasks and post_stop_tasks
:param tasks: list of strings locating tasks to load
:type tasks: list
:param task_args: list of strings to be used as args
:type task_args: list
|
[
"Reads",
"in",
"a",
"list",
"of",
"tasks",
"provided",
"by",
"the",
"user",
"loads",
"the",
"appropiate",
"task",
"and",
"returns",
"two",
"lists",
"pre_stop_tasks",
"and",
"post_stop_tasks",
":",
"param",
"tasks",
":",
"list",
"of",
"strings",
"locating",
"tasks",
"to",
"load",
":",
"type",
"tasks",
":",
"list",
":",
"param",
"task_args",
":",
"list",
"of",
"strings",
"to",
"be",
"used",
"as",
"args",
":",
"type",
"task_args",
":",
"list"
] |
python
|
train
|
senaite/senaite.core
|
bika/lims/content/arimport.py
|
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/arimport.py#L712-L776
|
def validate_headers(self):
"""Validate headers fields from schema
"""
pc = getToolByName(self, 'portal_catalog')
pu = getToolByName(self, "plone_utils")
client = self.aq_parent
# Verify Client Name
if self.getClientName() != client.Title():
self.error("%s: value is invalid (%s)." % (
'Client name', self.getClientName()))
# Verify Client ID
if self.getClientID() != client.getClientID():
self.error("%s: value is invalid (%s)." % (
'Client ID', self.getClientID()))
existing_arimports = pc(portal_type='ARImport',
review_state=['valid', 'imported'])
# Verify Client Order Number
for arimport in existing_arimports:
if arimport.UID == self.UID() \
or not arimport.getClientOrderNumber():
continue
arimport = arimport.getObject()
if arimport.getClientOrderNumber() == self.getClientOrderNumber():
self.error('%s: already used by existing ARImport.' %
'ClientOrderNumber')
break
# Verify Client Reference
for arimport in existing_arimports:
if arimport.UID == self.UID() \
or not arimport.getClientReference():
continue
arimport = arimport.getObject()
if arimport.getClientReference() == self.getClientReference():
self.error('%s: already used by existing ARImport.' %
'ClientReference')
break
# getCCContacts has no value if object is not complete (eg during test)
if self.getCCContacts():
cc_contacts = self.getCCContacts()[0]
contacts = [x for x in client.objectValues('Contact')]
contact_names = [c.Title() for c in contacts]
# validate Contact existence in this Client
for k in ['CCNamesReport', 'CCNamesInvoice']:
for val in cc_contacts[k]:
if val and val not in contact_names:
self.error('%s: value is invalid (%s)' % (k, val))
else:
cc_contacts = {'CCNamesReport': [],
'CCEmailsReport': [],
'CCNamesInvoice': [],
'CCEmailsInvoice': []
}
# validate Contact existence in this Client
for k in ['CCEmailsReport', 'CCEmailsInvoice']:
for val in cc_contacts.get(k, []):
if val and not pu.validateSingleNormalizedEmailAddress(val):
self.error('%s: value is invalid (%s)' % (k, val))
|
[
"def",
"validate_headers",
"(",
"self",
")",
":",
"pc",
"=",
"getToolByName",
"(",
"self",
",",
"'portal_catalog'",
")",
"pu",
"=",
"getToolByName",
"(",
"self",
",",
"\"plone_utils\"",
")",
"client",
"=",
"self",
".",
"aq_parent",
"# Verify Client Name",
"if",
"self",
".",
"getClientName",
"(",
")",
"!=",
"client",
".",
"Title",
"(",
")",
":",
"self",
".",
"error",
"(",
"\"%s: value is invalid (%s).\"",
"%",
"(",
"'Client name'",
",",
"self",
".",
"getClientName",
"(",
")",
")",
")",
"# Verify Client ID",
"if",
"self",
".",
"getClientID",
"(",
")",
"!=",
"client",
".",
"getClientID",
"(",
")",
":",
"self",
".",
"error",
"(",
"\"%s: value is invalid (%s).\"",
"%",
"(",
"'Client ID'",
",",
"self",
".",
"getClientID",
"(",
")",
")",
")",
"existing_arimports",
"=",
"pc",
"(",
"portal_type",
"=",
"'ARImport'",
",",
"review_state",
"=",
"[",
"'valid'",
",",
"'imported'",
"]",
")",
"# Verify Client Order Number",
"for",
"arimport",
"in",
"existing_arimports",
":",
"if",
"arimport",
".",
"UID",
"==",
"self",
".",
"UID",
"(",
")",
"or",
"not",
"arimport",
".",
"getClientOrderNumber",
"(",
")",
":",
"continue",
"arimport",
"=",
"arimport",
".",
"getObject",
"(",
")",
"if",
"arimport",
".",
"getClientOrderNumber",
"(",
")",
"==",
"self",
".",
"getClientOrderNumber",
"(",
")",
":",
"self",
".",
"error",
"(",
"'%s: already used by existing ARImport.'",
"%",
"'ClientOrderNumber'",
")",
"break",
"# Verify Client Reference",
"for",
"arimport",
"in",
"existing_arimports",
":",
"if",
"arimport",
".",
"UID",
"==",
"self",
".",
"UID",
"(",
")",
"or",
"not",
"arimport",
".",
"getClientReference",
"(",
")",
":",
"continue",
"arimport",
"=",
"arimport",
".",
"getObject",
"(",
")",
"if",
"arimport",
".",
"getClientReference",
"(",
")",
"==",
"self",
".",
"getClientReference",
"(",
")",
":",
"self",
".",
"error",
"(",
"'%s: already used by existing ARImport.'",
"%",
"'ClientReference'",
")",
"break",
"# getCCContacts has no value if object is not complete (eg during test)",
"if",
"self",
".",
"getCCContacts",
"(",
")",
":",
"cc_contacts",
"=",
"self",
".",
"getCCContacts",
"(",
")",
"[",
"0",
"]",
"contacts",
"=",
"[",
"x",
"for",
"x",
"in",
"client",
".",
"objectValues",
"(",
"'Contact'",
")",
"]",
"contact_names",
"=",
"[",
"c",
".",
"Title",
"(",
")",
"for",
"c",
"in",
"contacts",
"]",
"# validate Contact existence in this Client",
"for",
"k",
"in",
"[",
"'CCNamesReport'",
",",
"'CCNamesInvoice'",
"]",
":",
"for",
"val",
"in",
"cc_contacts",
"[",
"k",
"]",
":",
"if",
"val",
"and",
"val",
"not",
"in",
"contact_names",
":",
"self",
".",
"error",
"(",
"'%s: value is invalid (%s)'",
"%",
"(",
"k",
",",
"val",
")",
")",
"else",
":",
"cc_contacts",
"=",
"{",
"'CCNamesReport'",
":",
"[",
"]",
",",
"'CCEmailsReport'",
":",
"[",
"]",
",",
"'CCNamesInvoice'",
":",
"[",
"]",
",",
"'CCEmailsInvoice'",
":",
"[",
"]",
"}",
"# validate Contact existence in this Client",
"for",
"k",
"in",
"[",
"'CCEmailsReport'",
",",
"'CCEmailsInvoice'",
"]",
":",
"for",
"val",
"in",
"cc_contacts",
".",
"get",
"(",
"k",
",",
"[",
"]",
")",
":",
"if",
"val",
"and",
"not",
"pu",
".",
"validateSingleNormalizedEmailAddress",
"(",
"val",
")",
":",
"self",
".",
"error",
"(",
"'%s: value is invalid (%s)'",
"%",
"(",
"k",
",",
"val",
")",
")"
] |
Validate headers fields from schema
|
[
"Validate",
"headers",
"fields",
"from",
"schema"
] |
python
|
train
|
aleju/imgaug
|
imgaug/augmenters/contrast.py
|
https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmenters/contrast.py#L109-L181
|
def adjust_contrast_sigmoid(arr, gain, cutoff):
"""
Adjust contrast by scaling each pixel value to ``255 * 1/(1 + exp(gain*(cutoff - I_ij/255)))``.
dtype support::
* ``uint8``: yes; fully tested (1) (2) (3)
* ``uint16``: yes; tested (2) (3)
* ``uint32``: yes; tested (2) (3)
* ``uint64``: yes; tested (2) (3) (4)
* ``int8``: limited; tested (2) (3) (5)
* ``int16``: limited; tested (2) (3) (5)
* ``int32``: limited; tested (2) (3) (5)
* ``int64``: limited; tested (2) (3) (4) (5)
* ``float16``: limited; tested (5)
* ``float32``: limited; tested (5)
* ``float64``: limited; tested (5)
* ``float128``: no (6)
* ``bool``: no (7)
- (1) Handled by ``cv2``. Other dtypes are handled by ``skimage``.
- (2) Normalization is done as ``I_ij/max``, where ``max`` is the maximum value of the
dtype, e.g. 255 for ``uint8``. The normalization is reversed afterwards,
e.g. ``result*255`` for ``uint8``.
- (3) Integer-like values are not rounded after applying the contrast adjustment equation
(before inverting the normalization to 0.0-1.0 space), i.e. projection from continuous
space to discrete happens according to floor function.
- (4) Note that scikit-image doc says that integers are converted to ``float64`` values before
applying the contrast normalization method. This might lead to inaccuracies for large
64bit integer values. Tests showed no indication of that happening though.
- (5) Must not contain negative values. Values >=0 are fully supported.
- (6) Leads to error in scikit-image.
- (7) Does not make sense for contrast adjustments.
Parameters
----------
arr : numpy.ndarray
Array for which to adjust the contrast. Dtype ``uint8`` is fastest.
gain : number
Multiplier for the sigmoid function's output.
Higher values lead to quicker changes from dark to light pixels.
cutoff : number
Cutoff that shifts the sigmoid function in horizontal direction.
Higher values mean that the switch from dark to light pixels happens later, i.e.
the pixels will remain darker.
Returns
-------
numpy.ndarray
Array with adjusted contrast.
"""
# int8 is also possible according to docs
# https://docs.opencv.org/3.0-beta/modules/core/doc/operations_on_arrays.html#cv2.LUT , but here it seemed
# like `d` was 0 for CV_8S, causing that to fail
if arr.dtype.name == "uint8":
min_value, _center_value, max_value = iadt.get_value_range_of_dtype(arr.dtype)
dynamic_range = max_value - min_value
value_range = np.linspace(0, 1.0, num=dynamic_range+1, dtype=np.float32)
# 255 * 1/(1 + exp(gain*(cutoff - I_ij/255)))
# using np.float32(.) here still works when the input is a numpy array of size 1
gain = np.float32(gain)
cutoff = np.float32(cutoff)
table = min_value + dynamic_range * 1/(1 + np.exp(gain * (cutoff - value_range)))
arr_aug = cv2.LUT(arr, np.clip(table, min_value, max_value).astype(arr.dtype))
if arr.ndim == 3 and arr_aug.ndim == 2:
return arr_aug[..., np.newaxis]
return arr_aug
else:
return ski_exposure.adjust_sigmoid(arr, cutoff=cutoff, gain=gain)
|
[
"def",
"adjust_contrast_sigmoid",
"(",
"arr",
",",
"gain",
",",
"cutoff",
")",
":",
"# int8 is also possible according to docs",
"# https://docs.opencv.org/3.0-beta/modules/core/doc/operations_on_arrays.html#cv2.LUT , but here it seemed",
"# like `d` was 0 for CV_8S, causing that to fail",
"if",
"arr",
".",
"dtype",
".",
"name",
"==",
"\"uint8\"",
":",
"min_value",
",",
"_center_value",
",",
"max_value",
"=",
"iadt",
".",
"get_value_range_of_dtype",
"(",
"arr",
".",
"dtype",
")",
"dynamic_range",
"=",
"max_value",
"-",
"min_value",
"value_range",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"1.0",
",",
"num",
"=",
"dynamic_range",
"+",
"1",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"# 255 * 1/(1 + exp(gain*(cutoff - I_ij/255)))",
"# using np.float32(.) here still works when the input is a numpy array of size 1",
"gain",
"=",
"np",
".",
"float32",
"(",
"gain",
")",
"cutoff",
"=",
"np",
".",
"float32",
"(",
"cutoff",
")",
"table",
"=",
"min_value",
"+",
"dynamic_range",
"*",
"1",
"/",
"(",
"1",
"+",
"np",
".",
"exp",
"(",
"gain",
"*",
"(",
"cutoff",
"-",
"value_range",
")",
")",
")",
"arr_aug",
"=",
"cv2",
".",
"LUT",
"(",
"arr",
",",
"np",
".",
"clip",
"(",
"table",
",",
"min_value",
",",
"max_value",
")",
".",
"astype",
"(",
"arr",
".",
"dtype",
")",
")",
"if",
"arr",
".",
"ndim",
"==",
"3",
"and",
"arr_aug",
".",
"ndim",
"==",
"2",
":",
"return",
"arr_aug",
"[",
"...",
",",
"np",
".",
"newaxis",
"]",
"return",
"arr_aug",
"else",
":",
"return",
"ski_exposure",
".",
"adjust_sigmoid",
"(",
"arr",
",",
"cutoff",
"=",
"cutoff",
",",
"gain",
"=",
"gain",
")"
] |
Adjust contrast by scaling each pixel value to ``255 * 1/(1 + exp(gain*(cutoff - I_ij/255)))``.
dtype support::
* ``uint8``: yes; fully tested (1) (2) (3)
* ``uint16``: yes; tested (2) (3)
* ``uint32``: yes; tested (2) (3)
* ``uint64``: yes; tested (2) (3) (4)
* ``int8``: limited; tested (2) (3) (5)
* ``int16``: limited; tested (2) (3) (5)
* ``int32``: limited; tested (2) (3) (5)
* ``int64``: limited; tested (2) (3) (4) (5)
* ``float16``: limited; tested (5)
* ``float32``: limited; tested (5)
* ``float64``: limited; tested (5)
* ``float128``: no (6)
* ``bool``: no (7)
- (1) Handled by ``cv2``. Other dtypes are handled by ``skimage``.
- (2) Normalization is done as ``I_ij/max``, where ``max`` is the maximum value of the
dtype, e.g. 255 for ``uint8``. The normalization is reversed afterwards,
e.g. ``result*255`` for ``uint8``.
- (3) Integer-like values are not rounded after applying the contrast adjustment equation
(before inverting the normalization to 0.0-1.0 space), i.e. projection from continuous
space to discrete happens according to floor function.
- (4) Note that scikit-image doc says that integers are converted to ``float64`` values before
applying the contrast normalization method. This might lead to inaccuracies for large
64bit integer values. Tests showed no indication of that happening though.
- (5) Must not contain negative values. Values >=0 are fully supported.
- (6) Leads to error in scikit-image.
- (7) Does not make sense for contrast adjustments.
Parameters
----------
arr : numpy.ndarray
Array for which to adjust the contrast. Dtype ``uint8`` is fastest.
gain : number
Multiplier for the sigmoid function's output.
Higher values lead to quicker changes from dark to light pixels.
cutoff : number
Cutoff that shifts the sigmoid function in horizontal direction.
Higher values mean that the switch from dark to light pixels happens later, i.e.
the pixels will remain darker.
Returns
-------
numpy.ndarray
Array with adjusted contrast.
|
[
"Adjust",
"contrast",
"by",
"scaling",
"each",
"pixel",
"value",
"to",
"255",
"*",
"1",
"/",
"(",
"1",
"+",
"exp",
"(",
"gain",
"*",
"(",
"cutoff",
"-",
"I_ij",
"/",
"255",
")))",
"."
] |
python
|
valid
|
DLR-RM/RAFCON
|
source/rafcon/core/states/container_state.py
|
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/container_state.py#L1404-L1427
|
def add_scoped_variable(self, name, data_type=None, default_value=None, scoped_variable_id=None):
""" Adds a scoped variable to the container state
:param name: The name of the scoped variable
:param data_type: An optional data type of the scoped variable
:param default_value: An optional default value of the scoped variable
:param scoped_variable_id: An optional scoped variable id of the
:return: the unique id of the added scoped variable
:raises exceptions.ValueError: if the scoped variable is not valid
"""
if scoped_variable_id is None:
# All data port ids have to passed to the id generation as the data port id has to be unique inside a state
scoped_variable_id = generate_data_port_id(self.get_data_port_ids())
self._scoped_variables[scoped_variable_id] = ScopedVariable(name, data_type, default_value,
scoped_variable_id, self)
# Check for name uniqueness
valid, message = self._check_data_port_name(self._scoped_variables[scoped_variable_id])
if not valid:
self._scoped_variables[scoped_variable_id].parent = None
del self._scoped_variables[scoped_variable_id]
raise ValueError(message)
return scoped_variable_id
|
[
"def",
"add_scoped_variable",
"(",
"self",
",",
"name",
",",
"data_type",
"=",
"None",
",",
"default_value",
"=",
"None",
",",
"scoped_variable_id",
"=",
"None",
")",
":",
"if",
"scoped_variable_id",
"is",
"None",
":",
"# All data port ids have to passed to the id generation as the data port id has to be unique inside a state",
"scoped_variable_id",
"=",
"generate_data_port_id",
"(",
"self",
".",
"get_data_port_ids",
"(",
")",
")",
"self",
".",
"_scoped_variables",
"[",
"scoped_variable_id",
"]",
"=",
"ScopedVariable",
"(",
"name",
",",
"data_type",
",",
"default_value",
",",
"scoped_variable_id",
",",
"self",
")",
"# Check for name uniqueness",
"valid",
",",
"message",
"=",
"self",
".",
"_check_data_port_name",
"(",
"self",
".",
"_scoped_variables",
"[",
"scoped_variable_id",
"]",
")",
"if",
"not",
"valid",
":",
"self",
".",
"_scoped_variables",
"[",
"scoped_variable_id",
"]",
".",
"parent",
"=",
"None",
"del",
"self",
".",
"_scoped_variables",
"[",
"scoped_variable_id",
"]",
"raise",
"ValueError",
"(",
"message",
")",
"return",
"scoped_variable_id"
] |
Adds a scoped variable to the container state
:param name: The name of the scoped variable
:param data_type: An optional data type of the scoped variable
:param default_value: An optional default value of the scoped variable
:param scoped_variable_id: An optional scoped variable id of the
:return: the unique id of the added scoped variable
:raises exceptions.ValueError: if the scoped variable is not valid
|
[
"Adds",
"a",
"scoped",
"variable",
"to",
"the",
"container",
"state"
] |
python
|
train
|
palantir/python-language-server
|
pyls/plugins/jedi_completion.py
|
https://github.com/palantir/python-language-server/blob/96e08d85635382d17024c352306c4759f124195d/pyls/plugins/jedi_completion.py#L95-L102
|
def _sort_text(definition):
""" Ensure builtins appear at the bottom.
Description is of format <type>: <module>.<item>
"""
# If its 'hidden', put it next last
prefix = 'z{}' if definition.name.startswith('_') else 'a{}'
return prefix.format(definition.name)
|
[
"def",
"_sort_text",
"(",
"definition",
")",
":",
"# If its 'hidden', put it next last",
"prefix",
"=",
"'z{}'",
"if",
"definition",
".",
"name",
".",
"startswith",
"(",
"'_'",
")",
"else",
"'a{}'",
"return",
"prefix",
".",
"format",
"(",
"definition",
".",
"name",
")"
] |
Ensure builtins appear at the bottom.
Description is of format <type>: <module>.<item>
|
[
"Ensure",
"builtins",
"appear",
"at",
"the",
"bottom",
".",
"Description",
"is",
"of",
"format",
"<type",
">",
":",
"<module",
">",
".",
"<item",
">"
] |
python
|
train
|
pixelogik/NearPy
|
nearpy/hashes/permutation/permutedIndex.py
|
https://github.com/pixelogik/NearPy/blob/1b534b864d320d875508e95cd2b76b6d8c07a90b/nearpy/hashes/permutation/permutedIndex.py#L123-L148
|
def get_neighbour_keys(self, bucket_key, k):
"""
The computing complexity is O( np*beam*log(np*beam) )
where,
np = number of permutations
beam = self.beam_size
Make sure np*beam is much less than the number of bucket keys,
otherwise we could use brute-force to get the neighbours
"""
# convert query_key into bitarray
query_key = bitarray(bucket_key)
topk = set()
for i in xrange(len(self.permutes)):
p = self.permutes[i]
plist = self.permuted_lists[i]
candidates = p.search_revert(plist, query_key, self.beam_size)
topk = topk.union(set(candidates))
topk = list(topk)
# sort the topk neighbour keys according to the Hamming distance to qurey key
topk = sorted(topk, key=lambda x: self.hamming_distance(x, query_key))
# return the top k items
topk_bin = [x.to01() for x in topk[:k]]
return topk_bin
|
[
"def",
"get_neighbour_keys",
"(",
"self",
",",
"bucket_key",
",",
"k",
")",
":",
"# convert query_key into bitarray",
"query_key",
"=",
"bitarray",
"(",
"bucket_key",
")",
"topk",
"=",
"set",
"(",
")",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"self",
".",
"permutes",
")",
")",
":",
"p",
"=",
"self",
".",
"permutes",
"[",
"i",
"]",
"plist",
"=",
"self",
".",
"permuted_lists",
"[",
"i",
"]",
"candidates",
"=",
"p",
".",
"search_revert",
"(",
"plist",
",",
"query_key",
",",
"self",
".",
"beam_size",
")",
"topk",
"=",
"topk",
".",
"union",
"(",
"set",
"(",
"candidates",
")",
")",
"topk",
"=",
"list",
"(",
"topk",
")",
"# sort the topk neighbour keys according to the Hamming distance to qurey key",
"topk",
"=",
"sorted",
"(",
"topk",
",",
"key",
"=",
"lambda",
"x",
":",
"self",
".",
"hamming_distance",
"(",
"x",
",",
"query_key",
")",
")",
"# return the top k items",
"topk_bin",
"=",
"[",
"x",
".",
"to01",
"(",
")",
"for",
"x",
"in",
"topk",
"[",
":",
"k",
"]",
"]",
"return",
"topk_bin"
] |
The computing complexity is O( np*beam*log(np*beam) )
where,
np = number of permutations
beam = self.beam_size
Make sure np*beam is much less than the number of bucket keys,
otherwise we could use brute-force to get the neighbours
|
[
"The",
"computing",
"complexity",
"is",
"O",
"(",
"np",
"*",
"beam",
"*",
"log",
"(",
"np",
"*",
"beam",
")",
")",
"where",
"np",
"=",
"number",
"of",
"permutations",
"beam",
"=",
"self",
".",
"beam_size"
] |
python
|
train
|
edx/edx-enterprise
|
enterprise/utils.py
|
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/utils.py#L485-L502
|
def get_enterprise_customer_or_404(enterprise_uuid):
"""
Given an EnterpriseCustomer UUID, return the corresponding EnterpriseCustomer or raise a 404.
Arguments:
enterprise_uuid (str): The UUID (in string form) of the EnterpriseCustomer to fetch.
Returns:
(EnterpriseCustomer): The EnterpriseCustomer given the UUID.
"""
EnterpriseCustomer = apps.get_model('enterprise', 'EnterpriseCustomer') # pylint: disable=invalid-name
try:
enterprise_uuid = UUID(enterprise_uuid)
return EnterpriseCustomer.objects.get(uuid=enterprise_uuid) # pylint: disable=no-member
except (TypeError, ValueError, EnterpriseCustomer.DoesNotExist):
LOGGER.error('Unable to find enterprise customer for UUID: [%s]', enterprise_uuid)
raise Http404
|
[
"def",
"get_enterprise_customer_or_404",
"(",
"enterprise_uuid",
")",
":",
"EnterpriseCustomer",
"=",
"apps",
".",
"get_model",
"(",
"'enterprise'",
",",
"'EnterpriseCustomer'",
")",
"# pylint: disable=invalid-name",
"try",
":",
"enterprise_uuid",
"=",
"UUID",
"(",
"enterprise_uuid",
")",
"return",
"EnterpriseCustomer",
".",
"objects",
".",
"get",
"(",
"uuid",
"=",
"enterprise_uuid",
")",
"# pylint: disable=no-member",
"except",
"(",
"TypeError",
",",
"ValueError",
",",
"EnterpriseCustomer",
".",
"DoesNotExist",
")",
":",
"LOGGER",
".",
"error",
"(",
"'Unable to find enterprise customer for UUID: [%s]'",
",",
"enterprise_uuid",
")",
"raise",
"Http404"
] |
Given an EnterpriseCustomer UUID, return the corresponding EnterpriseCustomer or raise a 404.
Arguments:
enterprise_uuid (str): The UUID (in string form) of the EnterpriseCustomer to fetch.
Returns:
(EnterpriseCustomer): The EnterpriseCustomer given the UUID.
|
[
"Given",
"an",
"EnterpriseCustomer",
"UUID",
"return",
"the",
"corresponding",
"EnterpriseCustomer",
"or",
"raise",
"a",
"404",
"."
] |
python
|
valid
|
RiotGames/cloud-inquisitor
|
backend/cloud_inquisitor/app.py
|
https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/backend/cloud_inquisitor/app.py#L339-L356
|
def before_request():
"""Checks to ensure that the session is valid and validates the users CSRF token is present
Returns:
`None`
"""
if not request.path.startswith('/saml') and not request.path.startswith('/auth'):
# Validate the session has the items we need
if 'accounts' not in session:
logger.debug('Missing \'accounts\' from session object, sending user to login page')
return BaseView.make_unauth_response()
# Require the CSRF token to be present if we are performing a change action (add, delete or modify objects)
# but exclude the SAML endpoints from the CSRF check
if request.method in ('POST', 'PUT', 'DELETE',):
if session['csrf_token'] != request.headers.get('X-Csrf-Token'):
logger.info('CSRF Token is missing or incorrect, sending user to login page')
abort(403)
|
[
"def",
"before_request",
"(",
")",
":",
"if",
"not",
"request",
".",
"path",
".",
"startswith",
"(",
"'/saml'",
")",
"and",
"not",
"request",
".",
"path",
".",
"startswith",
"(",
"'/auth'",
")",
":",
"# Validate the session has the items we need",
"if",
"'accounts'",
"not",
"in",
"session",
":",
"logger",
".",
"debug",
"(",
"'Missing \\'accounts\\' from session object, sending user to login page'",
")",
"return",
"BaseView",
".",
"make_unauth_response",
"(",
")",
"# Require the CSRF token to be present if we are performing a change action (add, delete or modify objects)",
"# but exclude the SAML endpoints from the CSRF check",
"if",
"request",
".",
"method",
"in",
"(",
"'POST'",
",",
"'PUT'",
",",
"'DELETE'",
",",
")",
":",
"if",
"session",
"[",
"'csrf_token'",
"]",
"!=",
"request",
".",
"headers",
".",
"get",
"(",
"'X-Csrf-Token'",
")",
":",
"logger",
".",
"info",
"(",
"'CSRF Token is missing or incorrect, sending user to login page'",
")",
"abort",
"(",
"403",
")"
] |
Checks to ensure that the session is valid and validates the users CSRF token is present
Returns:
`None`
|
[
"Checks",
"to",
"ensure",
"that",
"the",
"session",
"is",
"valid",
"and",
"validates",
"the",
"users",
"CSRF",
"token",
"is",
"present"
] |
python
|
train
|
iDigBio/idigbio-python-client
|
idigbio/json_client.py
|
https://github.com/iDigBio/idigbio-python-client/blob/e896075b9fed297fc420caf303b3bb5a2298d969/idigbio/json_client.py#L315-L332
|
def search_records(self, rq={}, limit=100, offset=0, sort=None,
fields=None, fields_exclude=FIELDS_EXCLUDE_DEFAULT):
"""
rq Search Query in iDigBio Query Format, using Record Query Fields
sort field to sort on, pick from Record Query Fields
fields a list of fields to return, specified using the fieldName parameter from Fields with type records
fields_exclude a list of fields to exclude, specified using the fieldName parameter from Fields with type records
limit max results
offset skip results
Returns idigbio record format (legacy api), plus additional top level keys with parsed index terms. Returns None on error.
"""
if fields is not None and fields_exclude is FIELDS_EXCLUDE_DEFAULT:
fields_exclude = None
return self._api_post("/v2/search/records",
rq=rq, limit=limit, offset=offset, sort=sort,
fields=fields, fields_exclude=fields_exclude)
|
[
"def",
"search_records",
"(",
"self",
",",
"rq",
"=",
"{",
"}",
",",
"limit",
"=",
"100",
",",
"offset",
"=",
"0",
",",
"sort",
"=",
"None",
",",
"fields",
"=",
"None",
",",
"fields_exclude",
"=",
"FIELDS_EXCLUDE_DEFAULT",
")",
":",
"if",
"fields",
"is",
"not",
"None",
"and",
"fields_exclude",
"is",
"FIELDS_EXCLUDE_DEFAULT",
":",
"fields_exclude",
"=",
"None",
"return",
"self",
".",
"_api_post",
"(",
"\"/v2/search/records\"",
",",
"rq",
"=",
"rq",
",",
"limit",
"=",
"limit",
",",
"offset",
"=",
"offset",
",",
"sort",
"=",
"sort",
",",
"fields",
"=",
"fields",
",",
"fields_exclude",
"=",
"fields_exclude",
")"
] |
rq Search Query in iDigBio Query Format, using Record Query Fields
sort field to sort on, pick from Record Query Fields
fields a list of fields to return, specified using the fieldName parameter from Fields with type records
fields_exclude a list of fields to exclude, specified using the fieldName parameter from Fields with type records
limit max results
offset skip results
Returns idigbio record format (legacy api), plus additional top level keys with parsed index terms. Returns None on error.
|
[
"rq",
"Search",
"Query",
"in",
"iDigBio",
"Query",
"Format",
"using",
"Record",
"Query",
"Fields",
"sort",
"field",
"to",
"sort",
"on",
"pick",
"from",
"Record",
"Query",
"Fields",
"fields",
"a",
"list",
"of",
"fields",
"to",
"return",
"specified",
"using",
"the",
"fieldName",
"parameter",
"from",
"Fields",
"with",
"type",
"records",
"fields_exclude",
"a",
"list",
"of",
"fields",
"to",
"exclude",
"specified",
"using",
"the",
"fieldName",
"parameter",
"from",
"Fields",
"with",
"type",
"records",
"limit",
"max",
"results",
"offset",
"skip",
"results"
] |
python
|
train
|
ska-sa/katcp-python
|
katcp/kattypes.py
|
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/kattypes.py#L116-L142
|
def unpack(self, packed_value, major=DEFAULT_KATCP_MAJOR):
"""Parse a KATCP parameter into an object.
Parameters
----------
packed_value : str
The unescaped KATCP string to parse into a value.
major : int, optional
Major version of KATCP to use when interpreting types.
Defaults to latest implemented KATCP version.
Returns
-------
value : object
The value the KATCP string represented.
"""
if packed_value is None:
value = self.get_default()
else:
try:
value = self.decode(packed_value, major)
except Exception:
raise
if value is not None:
self.check(value, major)
return value
|
[
"def",
"unpack",
"(",
"self",
",",
"packed_value",
",",
"major",
"=",
"DEFAULT_KATCP_MAJOR",
")",
":",
"if",
"packed_value",
"is",
"None",
":",
"value",
"=",
"self",
".",
"get_default",
"(",
")",
"else",
":",
"try",
":",
"value",
"=",
"self",
".",
"decode",
"(",
"packed_value",
",",
"major",
")",
"except",
"Exception",
":",
"raise",
"if",
"value",
"is",
"not",
"None",
":",
"self",
".",
"check",
"(",
"value",
",",
"major",
")",
"return",
"value"
] |
Parse a KATCP parameter into an object.
Parameters
----------
packed_value : str
The unescaped KATCP string to parse into a value.
major : int, optional
Major version of KATCP to use when interpreting types.
Defaults to latest implemented KATCP version.
Returns
-------
value : object
The value the KATCP string represented.
|
[
"Parse",
"a",
"KATCP",
"parameter",
"into",
"an",
"object",
"."
] |
python
|
train
|
mgoral/subconvert
|
src/subconvert/utils/VideoPlayer.py
|
https://github.com/mgoral/subconvert/blob/59701e5e69ef1ca26ce7d1d766c936664aa2cb32/src/subconvert/utils/VideoPlayer.py#L144-L148
|
def stop(self):
"""Stops playback"""
if self.isPlaying is True:
self._execute("stop")
self._changePlayingState(False)
|
[
"def",
"stop",
"(",
"self",
")",
":",
"if",
"self",
".",
"isPlaying",
"is",
"True",
":",
"self",
".",
"_execute",
"(",
"\"stop\"",
")",
"self",
".",
"_changePlayingState",
"(",
"False",
")"
] |
Stops playback
|
[
"Stops",
"playback"
] |
python
|
train
|
ktdreyer/txkoji
|
txkoji/connection.py
|
https://github.com/ktdreyer/txkoji/blob/a7de380f29f745bf11730b27217208f6d4da7733/txkoji/connection.py#L232-L247
|
def getBuild(self, build_id, **kwargs):
"""
Load all information about a build and return a custom Build class.
Calls "getBuild" XML-RPC.
:param build_id: ``int``, for example 12345
:returns: deferred that when fired returns a Build (Munch, dict-like)
object representing this Koji build, or None if no build was
found.
"""
buildinfo = yield self.call('getBuild', build_id, **kwargs)
build = Build.fromDict(buildinfo)
if build:
build.connection = self
defer.returnValue(build)
|
[
"def",
"getBuild",
"(",
"self",
",",
"build_id",
",",
"*",
"*",
"kwargs",
")",
":",
"buildinfo",
"=",
"yield",
"self",
".",
"call",
"(",
"'getBuild'",
",",
"build_id",
",",
"*",
"*",
"kwargs",
")",
"build",
"=",
"Build",
".",
"fromDict",
"(",
"buildinfo",
")",
"if",
"build",
":",
"build",
".",
"connection",
"=",
"self",
"defer",
".",
"returnValue",
"(",
"build",
")"
] |
Load all information about a build and return a custom Build class.
Calls "getBuild" XML-RPC.
:param build_id: ``int``, for example 12345
:returns: deferred that when fired returns a Build (Munch, dict-like)
object representing this Koji build, or None if no build was
found.
|
[
"Load",
"all",
"information",
"about",
"a",
"build",
"and",
"return",
"a",
"custom",
"Build",
"class",
"."
] |
python
|
train
|
IntegralDefense/splunklib
|
splunklib/__init__.py
|
https://github.com/IntegralDefense/splunklib/blob/c3a02c83daad20cf24838f52b22cd2476f062eed/splunklib/__init__.py#L15-L30
|
def create_timedelta(timespec):
"""Utility function to translate DD:HH:MM:SS into a timedelta object."""
duration = timespec.split(':')
seconds = int(duration[-1])
minutes = 0
hours = 0
days = 0
if len(duration) > 1:
minutes = int(duration[-2])
if len(duration) > 2:
hours = int(duration[-3])
if len(duration) > 3:
days = int(duration[-4])
return datetime.timedelta(days=days, seconds=seconds, minutes=minutes, hours=hours)
|
[
"def",
"create_timedelta",
"(",
"timespec",
")",
":",
"duration",
"=",
"timespec",
".",
"split",
"(",
"':'",
")",
"seconds",
"=",
"int",
"(",
"duration",
"[",
"-",
"1",
"]",
")",
"minutes",
"=",
"0",
"hours",
"=",
"0",
"days",
"=",
"0",
"if",
"len",
"(",
"duration",
")",
">",
"1",
":",
"minutes",
"=",
"int",
"(",
"duration",
"[",
"-",
"2",
"]",
")",
"if",
"len",
"(",
"duration",
")",
">",
"2",
":",
"hours",
"=",
"int",
"(",
"duration",
"[",
"-",
"3",
"]",
")",
"if",
"len",
"(",
"duration",
")",
">",
"3",
":",
"days",
"=",
"int",
"(",
"duration",
"[",
"-",
"4",
"]",
")",
"return",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"days",
",",
"seconds",
"=",
"seconds",
",",
"minutes",
"=",
"minutes",
",",
"hours",
"=",
"hours",
")"
] |
Utility function to translate DD:HH:MM:SS into a timedelta object.
|
[
"Utility",
"function",
"to",
"translate",
"DD",
":",
"HH",
":",
"MM",
":",
"SS",
"into",
"a",
"timedelta",
"object",
"."
] |
python
|
train
|
nchopin/particles
|
book/pmcmc/pmmh_lingauss_varying_scale.py
|
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/book/pmcmc/pmmh_lingauss_varying_scale.py#L31-L37
|
def msjd(theta):
"""Mean squared jumping distance.
"""
s = 0.
for p in theta.dtype.names:
s += np.sum(np.diff(theta[p], axis=0) ** 2)
return s
|
[
"def",
"msjd",
"(",
"theta",
")",
":",
"s",
"=",
"0.",
"for",
"p",
"in",
"theta",
".",
"dtype",
".",
"names",
":",
"s",
"+=",
"np",
".",
"sum",
"(",
"np",
".",
"diff",
"(",
"theta",
"[",
"p",
"]",
",",
"axis",
"=",
"0",
")",
"**",
"2",
")",
"return",
"s"
] |
Mean squared jumping distance.
|
[
"Mean",
"squared",
"jumping",
"distance",
"."
] |
python
|
train
|
snare/scruffy
|
scruffy/plugin.py
|
https://github.com/snare/scruffy/blob/0fedc08cfdb6db927ff93c09f25f24ce5a04c541/scruffy/plugin.py#L38-L65
|
def load_plugins(self, directory):
"""
Loads plugins from the specified directory.
`directory` is the full path to a directory containing python modules
which each contain a subclass of the Plugin class.
There is no criteria for a valid plugin at this level - any python
module found in the directory will be loaded. Only modules that
implement a subclass of the Plugin class above will be collected.
The directory will be traversed recursively.
"""
# walk directory
for filename in os.listdir(directory):
# path to file
filepath = os.path.join(directory, filename)
# if it's a file, load it
modname, ext = os.path.splitext(filename)
if os.path.isfile(filepath) and ext == '.py':
file, path, descr = imp.find_module(modname, [directory])
if file:
mod = imp.load_module(modname, file, path, descr)
# if it's a directory, recurse into it
if os.path.isdir(filepath):
self.load_plugins(filepath)
|
[
"def",
"load_plugins",
"(",
"self",
",",
"directory",
")",
":",
"# walk directory",
"for",
"filename",
"in",
"os",
".",
"listdir",
"(",
"directory",
")",
":",
"# path to file",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"filename",
")",
"# if it's a file, load it",
"modname",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filepath",
")",
"and",
"ext",
"==",
"'.py'",
":",
"file",
",",
"path",
",",
"descr",
"=",
"imp",
".",
"find_module",
"(",
"modname",
",",
"[",
"directory",
"]",
")",
"if",
"file",
":",
"mod",
"=",
"imp",
".",
"load_module",
"(",
"modname",
",",
"file",
",",
"path",
",",
"descr",
")",
"# if it's a directory, recurse into it",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"filepath",
")",
":",
"self",
".",
"load_plugins",
"(",
"filepath",
")"
] |
Loads plugins from the specified directory.
`directory` is the full path to a directory containing python modules
which each contain a subclass of the Plugin class.
There is no criteria for a valid plugin at this level - any python
module found in the directory will be loaded. Only modules that
implement a subclass of the Plugin class above will be collected.
The directory will be traversed recursively.
|
[
"Loads",
"plugins",
"from",
"the",
"specified",
"directory",
"."
] |
python
|
test
|
meejah/txtorcon
|
txtorcon/torconfig.py
|
https://github.com/meejah/txtorcon/blob/14053b95adf0b4bd9dd9c317bece912a26578a93/txtorcon/torconfig.py#L393-L408
|
def config_attributes(self):
"""
Helper method used by TorConfig when generating a torrc file.
"""
rtn = [('HiddenServiceDir', str(self.dir))]
if self.conf._supports['HiddenServiceDirGroupReadable'] \
and self.group_readable:
rtn.append(('HiddenServiceDirGroupReadable', str(1)))
for port in self.ports:
rtn.append(('HiddenServicePort', str(port)))
if self.version:
rtn.append(('HiddenServiceVersion', str(self.version)))
for authline in self.authorize_client:
rtn.append(('HiddenServiceAuthorizeClient', str(authline)))
return rtn
|
[
"def",
"config_attributes",
"(",
"self",
")",
":",
"rtn",
"=",
"[",
"(",
"'HiddenServiceDir'",
",",
"str",
"(",
"self",
".",
"dir",
")",
")",
"]",
"if",
"self",
".",
"conf",
".",
"_supports",
"[",
"'HiddenServiceDirGroupReadable'",
"]",
"and",
"self",
".",
"group_readable",
":",
"rtn",
".",
"append",
"(",
"(",
"'HiddenServiceDirGroupReadable'",
",",
"str",
"(",
"1",
")",
")",
")",
"for",
"port",
"in",
"self",
".",
"ports",
":",
"rtn",
".",
"append",
"(",
"(",
"'HiddenServicePort'",
",",
"str",
"(",
"port",
")",
")",
")",
"if",
"self",
".",
"version",
":",
"rtn",
".",
"append",
"(",
"(",
"'HiddenServiceVersion'",
",",
"str",
"(",
"self",
".",
"version",
")",
")",
")",
"for",
"authline",
"in",
"self",
".",
"authorize_client",
":",
"rtn",
".",
"append",
"(",
"(",
"'HiddenServiceAuthorizeClient'",
",",
"str",
"(",
"authline",
")",
")",
")",
"return",
"rtn"
] |
Helper method used by TorConfig when generating a torrc file.
|
[
"Helper",
"method",
"used",
"by",
"TorConfig",
"when",
"generating",
"a",
"torrc",
"file",
"."
] |
python
|
train
|
spyder-ide/spyder
|
spyder/widgets/fileswitcher.py
|
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/fileswitcher.py#L543-L552
|
def get_plugin_data(self, plugin):
"""Get the data object of the plugin's current tab manager."""
# The data object is named "data" in the editor plugin while it is
# named "clients" in the notebook plugin.
try:
data = plugin.get_current_tab_manager().data
except AttributeError:
data = plugin.get_current_tab_manager().clients
return data
|
[
"def",
"get_plugin_data",
"(",
"self",
",",
"plugin",
")",
":",
"# The data object is named \"data\" in the editor plugin while it is",
"# named \"clients\" in the notebook plugin.",
"try",
":",
"data",
"=",
"plugin",
".",
"get_current_tab_manager",
"(",
")",
".",
"data",
"except",
"AttributeError",
":",
"data",
"=",
"plugin",
".",
"get_current_tab_manager",
"(",
")",
".",
"clients",
"return",
"data"
] |
Get the data object of the plugin's current tab manager.
|
[
"Get",
"the",
"data",
"object",
"of",
"the",
"plugin",
"s",
"current",
"tab",
"manager",
"."
] |
python
|
train
|
crackinglandia/pype32
|
pype32/pype32.py
|
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L860-L870
|
def isPeBounded(self):
"""
Determines if the current L{PE} instance is bounded, i.e. has a C{BOUND_IMPORT_DIRECTORY}.
@rtype: bool
@return: Returns C{True} if the current L{PE} instance is bounded. Otherwise, returns C{False}.
"""
boundImportsDir = self.ntHeaders.optionalHeader.dataDirectory[consts.BOUND_IMPORT_DIRECTORY]
if boundImportsDir.rva.value and boundImportsDir.size.value:
return True
return False
|
[
"def",
"isPeBounded",
"(",
"self",
")",
":",
"boundImportsDir",
"=",
"self",
".",
"ntHeaders",
".",
"optionalHeader",
".",
"dataDirectory",
"[",
"consts",
".",
"BOUND_IMPORT_DIRECTORY",
"]",
"if",
"boundImportsDir",
".",
"rva",
".",
"value",
"and",
"boundImportsDir",
".",
"size",
".",
"value",
":",
"return",
"True",
"return",
"False"
] |
Determines if the current L{PE} instance is bounded, i.e. has a C{BOUND_IMPORT_DIRECTORY}.
@rtype: bool
@return: Returns C{True} if the current L{PE} instance is bounded. Otherwise, returns C{False}.
|
[
"Determines",
"if",
"the",
"current",
"L",
"{",
"PE",
"}",
"instance",
"is",
"bounded",
"i",
".",
"e",
".",
"has",
"a",
"C",
"{",
"BOUND_IMPORT_DIRECTORY",
"}",
"."
] |
python
|
train
|
codenerix/django-codenerix-invoicing
|
codenerix_invoicing/models_sales_original.py
|
https://github.com/codenerix/django-codenerix-invoicing/blob/7db5c62f335f9215a8b308603848625208b48698/codenerix_invoicing/models_sales_original.py#L1009-L1020
|
def create_albaran_automatic(pk, list_lines):
"""
creamos de forma automatica el albaran
"""
line_bd = SalesLineAlbaran.objects.filter(line_order__pk__in=list_lines).values_list('line_order__pk')
if line_bd.count() == 0 or len(list_lines) != len(line_bd[0]):
# solo aquellas lineas de pedidos que no estan ya albarandas
if line_bd.count() != 0:
for x in line_bd[0]:
list_lines.pop(list_lines.index(x))
GenLineProduct.create_albaran_from_order(pk, list_lines)
|
[
"def",
"create_albaran_automatic",
"(",
"pk",
",",
"list_lines",
")",
":",
"line_bd",
"=",
"SalesLineAlbaran",
".",
"objects",
".",
"filter",
"(",
"line_order__pk__in",
"=",
"list_lines",
")",
".",
"values_list",
"(",
"'line_order__pk'",
")",
"if",
"line_bd",
".",
"count",
"(",
")",
"==",
"0",
"or",
"len",
"(",
"list_lines",
")",
"!=",
"len",
"(",
"line_bd",
"[",
"0",
"]",
")",
":",
"# solo aquellas lineas de pedidos que no estan ya albarandas",
"if",
"line_bd",
".",
"count",
"(",
")",
"!=",
"0",
":",
"for",
"x",
"in",
"line_bd",
"[",
"0",
"]",
":",
"list_lines",
".",
"pop",
"(",
"list_lines",
".",
"index",
"(",
"x",
")",
")",
"GenLineProduct",
".",
"create_albaran_from_order",
"(",
"pk",
",",
"list_lines",
")"
] |
creamos de forma automatica el albaran
|
[
"creamos",
"de",
"forma",
"automatica",
"el",
"albaran"
] |
python
|
train
|
ccubed/PyMoe
|
Pymoe/Kitsu/auth.py
|
https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Kitsu/auth.py#L50-L66
|
def refresh(self, refresh_token):
"""
Renew an oauth token given an appropriate refresh token.
:param refresh_token: The Refresh Token
:return: A tuple of (token, expiration time in unix time stamp)
"""
r = requests.post(self.apiurl + "/token", params={"grant_type": "refresh_token", "client_id": self.cid,
"client_secret": self.csecret,
"refresh_token": refresh_token})
if r.status_code != 200:
raise ServerError
jsd = r.json()
return jsd['access_token'], int(jsd['expires_in']) + int(jsd['created_at'])
|
[
"def",
"refresh",
"(",
"self",
",",
"refresh_token",
")",
":",
"r",
"=",
"requests",
".",
"post",
"(",
"self",
".",
"apiurl",
"+",
"\"/token\"",
",",
"params",
"=",
"{",
"\"grant_type\"",
":",
"\"refresh_token\"",
",",
"\"client_id\"",
":",
"self",
".",
"cid",
",",
"\"client_secret\"",
":",
"self",
".",
"csecret",
",",
"\"refresh_token\"",
":",
"refresh_token",
"}",
")",
"if",
"r",
".",
"status_code",
"!=",
"200",
":",
"raise",
"ServerError",
"jsd",
"=",
"r",
".",
"json",
"(",
")",
"return",
"jsd",
"[",
"'access_token'",
"]",
",",
"int",
"(",
"jsd",
"[",
"'expires_in'",
"]",
")",
"+",
"int",
"(",
"jsd",
"[",
"'created_at'",
"]",
")"
] |
Renew an oauth token given an appropriate refresh token.
:param refresh_token: The Refresh Token
:return: A tuple of (token, expiration time in unix time stamp)
|
[
"Renew",
"an",
"oauth",
"token",
"given",
"an",
"appropriate",
"refresh",
"token",
"."
] |
python
|
train
|
saltstack/salt
|
salt/runners/digicertapi.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/digicertapi.py#L92-L120
|
def _paginate(url, topkey, *args, **kwargs):
'''
Wrapper to assist with paginated responses from Digicert's REST API.
'''
ret = salt.utils.http.query(url, **kwargs)
if 'errors' in ret['dict']:
return ret['dict']
lim = int(ret['dict']['page']['limit'])
total = int(ret['dict']['page']['total'])
if total == 0:
return {}
numpages = (total / lim) + 1
# If the count returned is less than the page size, just return the dict
if numpages == 1:
return ret['dict'][topkey]
aggregate_ret = ret['dict'][topkey]
url = args[0]
for p in range(2, numpages):
param_url = url + '?offset={0}'.format(lim * (p - 1))
next_ret = salt.utils.http.query(param_url, kwargs)
aggregate_ret[topkey].extend(next_ret['dict'][topkey])
return aggregate_ret
|
[
"def",
"_paginate",
"(",
"url",
",",
"topkey",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"ret",
"=",
"salt",
".",
"utils",
".",
"http",
".",
"query",
"(",
"url",
",",
"*",
"*",
"kwargs",
")",
"if",
"'errors'",
"in",
"ret",
"[",
"'dict'",
"]",
":",
"return",
"ret",
"[",
"'dict'",
"]",
"lim",
"=",
"int",
"(",
"ret",
"[",
"'dict'",
"]",
"[",
"'page'",
"]",
"[",
"'limit'",
"]",
")",
"total",
"=",
"int",
"(",
"ret",
"[",
"'dict'",
"]",
"[",
"'page'",
"]",
"[",
"'total'",
"]",
")",
"if",
"total",
"==",
"0",
":",
"return",
"{",
"}",
"numpages",
"=",
"(",
"total",
"/",
"lim",
")",
"+",
"1",
"# If the count returned is less than the page size, just return the dict",
"if",
"numpages",
"==",
"1",
":",
"return",
"ret",
"[",
"'dict'",
"]",
"[",
"topkey",
"]",
"aggregate_ret",
"=",
"ret",
"[",
"'dict'",
"]",
"[",
"topkey",
"]",
"url",
"=",
"args",
"[",
"0",
"]",
"for",
"p",
"in",
"range",
"(",
"2",
",",
"numpages",
")",
":",
"param_url",
"=",
"url",
"+",
"'?offset={0}'",
".",
"format",
"(",
"lim",
"*",
"(",
"p",
"-",
"1",
")",
")",
"next_ret",
"=",
"salt",
".",
"utils",
".",
"http",
".",
"query",
"(",
"param_url",
",",
"kwargs",
")",
"aggregate_ret",
"[",
"topkey",
"]",
".",
"extend",
"(",
"next_ret",
"[",
"'dict'",
"]",
"[",
"topkey",
"]",
")",
"return",
"aggregate_ret"
] |
Wrapper to assist with paginated responses from Digicert's REST API.
|
[
"Wrapper",
"to",
"assist",
"with",
"paginated",
"responses",
"from",
"Digicert",
"s",
"REST",
"API",
"."
] |
python
|
train
|
twaldear/flask-secure-headers
|
flask_secure_headers/core.py
|
https://github.com/twaldear/flask-secure-headers/blob/3eca972b369608a7669b67cbe66679570a6505ce/flask_secure_headers/core.py#L99-L111
|
def wrapper(self, updateParams=None):
""" create wrapper for flask app route """
def decorator(f):
_headers = self._getHeaders(updateParams)
""" flask decorator to include headers """
@wraps(f)
def decorated_function(*args, **kwargs):
resp = make_response(f(*args, **kwargs))
self._setRespHeader(resp, _headers)
resp.has_secure_headers = True
return resp
return decorated_function
return decorator
|
[
"def",
"wrapper",
"(",
"self",
",",
"updateParams",
"=",
"None",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"_headers",
"=",
"self",
".",
"_getHeaders",
"(",
"updateParams",
")",
"\"\"\" flask decorator to include headers \"\"\"",
"@",
"wraps",
"(",
"f",
")",
"def",
"decorated_function",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"resp",
"=",
"make_response",
"(",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"self",
".",
"_setRespHeader",
"(",
"resp",
",",
"_headers",
")",
"resp",
".",
"has_secure_headers",
"=",
"True",
"return",
"resp",
"return",
"decorated_function",
"return",
"decorator"
] |
create wrapper for flask app route
|
[
"create",
"wrapper",
"for",
"flask",
"app",
"route"
] |
python
|
train
|
MolSSI-BSE/basis_set_exchange
|
basis_set_exchange/cli/bsecurate_handlers.py
|
https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/cli/bsecurate_handlers.py#L78-L82
|
def _bsecurate_cli_view_graph(args):
'''Handles the view-graph subcommand'''
curate.view_graph(args.basis, args.version, args.data_dir)
return ''
|
[
"def",
"_bsecurate_cli_view_graph",
"(",
"args",
")",
":",
"curate",
".",
"view_graph",
"(",
"args",
".",
"basis",
",",
"args",
".",
"version",
",",
"args",
".",
"data_dir",
")",
"return",
"''"
] |
Handles the view-graph subcommand
|
[
"Handles",
"the",
"view",
"-",
"graph",
"subcommand"
] |
python
|
train
|
marcomusy/vtkplotter
|
vtkplotter/vtkio.py
|
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/vtkio.py#L635-L678
|
def convertNeutral2Xml(infile, outfile=None):
"""Convert Neutral file format to Dolfin XML."""
f = open(infile, "r")
lines = f.readlines()
f.close()
ncoords = int(lines[0])
fdolf_coords = []
for i in range(1, ncoords + 1):
x, y, z = lines[i].split()
fdolf_coords.append([float(x), float(y), float(z)])
ntets = int(lines[ncoords + 1])
idolf_tets = []
for i in range(ncoords + 2, ncoords + ntets + 2):
text = lines[i].split()
v0, v1, v2, v3 = text[1], text[2], text[3], text[4]
idolf_tets.append([int(v0) - 1, int(v1) - 1, int(v2) - 1, int(v3) - 1])
if outfile: # write dolfin xml
outF = open(outfile, "w")
outF.write('<?xml version="1.0" encoding="UTF-8"?>\n')
outF.write('<dolfin xmlns:dolfin="http://www.fenicsproject.org">\n')
outF.write(' <mesh celltype="tetrahedron" dim="3">\n')
outF.write(' <vertices size="' + str(ncoords) + '">\n')
for i in range(ncoords):
x, y, z = fdolf_coords[i]
outF.write(' <vertex index="'+str(i)
+ '" x="'+str(x)+'" y="'+str(y)+'" z="'+str(z)+'"/>\n')
outF.write(' </vertices>\n')
outF.write(' <cells size="' + str(ntets) + '">\n')
for i in range(ntets):
v0, v1, v2, v3 = idolf_tets[i]
outF.write(' <tetrahedron index="'+str(i)
+ '" v0="'+str(v0)+'" v1="'+str(v1)+'" v2="'+str(v2)+'" v3="'+str(v3)+'"/>\n')
outF.write(' </cells>\n')
outF.write(" </mesh>\n")
outF.write("</dolfin>\n")
outF.close()
return fdolf_coords, idolf_tets
|
[
"def",
"convertNeutral2Xml",
"(",
"infile",
",",
"outfile",
"=",
"None",
")",
":",
"f",
"=",
"open",
"(",
"infile",
",",
"\"r\"",
")",
"lines",
"=",
"f",
".",
"readlines",
"(",
")",
"f",
".",
"close",
"(",
")",
"ncoords",
"=",
"int",
"(",
"lines",
"[",
"0",
"]",
")",
"fdolf_coords",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"ncoords",
"+",
"1",
")",
":",
"x",
",",
"y",
",",
"z",
"=",
"lines",
"[",
"i",
"]",
".",
"split",
"(",
")",
"fdolf_coords",
".",
"append",
"(",
"[",
"float",
"(",
"x",
")",
",",
"float",
"(",
"y",
")",
",",
"float",
"(",
"z",
")",
"]",
")",
"ntets",
"=",
"int",
"(",
"lines",
"[",
"ncoords",
"+",
"1",
"]",
")",
"idolf_tets",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"ncoords",
"+",
"2",
",",
"ncoords",
"+",
"ntets",
"+",
"2",
")",
":",
"text",
"=",
"lines",
"[",
"i",
"]",
".",
"split",
"(",
")",
"v0",
",",
"v1",
",",
"v2",
",",
"v3",
"=",
"text",
"[",
"1",
"]",
",",
"text",
"[",
"2",
"]",
",",
"text",
"[",
"3",
"]",
",",
"text",
"[",
"4",
"]",
"idolf_tets",
".",
"append",
"(",
"[",
"int",
"(",
"v0",
")",
"-",
"1",
",",
"int",
"(",
"v1",
")",
"-",
"1",
",",
"int",
"(",
"v2",
")",
"-",
"1",
",",
"int",
"(",
"v3",
")",
"-",
"1",
"]",
")",
"if",
"outfile",
":",
"# write dolfin xml",
"outF",
"=",
"open",
"(",
"outfile",
",",
"\"w\"",
")",
"outF",
".",
"write",
"(",
"'<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n'",
")",
"outF",
".",
"write",
"(",
"'<dolfin xmlns:dolfin=\"http://www.fenicsproject.org\">\\n'",
")",
"outF",
".",
"write",
"(",
"' <mesh celltype=\"tetrahedron\" dim=\"3\">\\n'",
")",
"outF",
".",
"write",
"(",
"' <vertices size=\"'",
"+",
"str",
"(",
"ncoords",
")",
"+",
"'\">\\n'",
")",
"for",
"i",
"in",
"range",
"(",
"ncoords",
")",
":",
"x",
",",
"y",
",",
"z",
"=",
"fdolf_coords",
"[",
"i",
"]",
"outF",
".",
"write",
"(",
"' <vertex index=\"'",
"+",
"str",
"(",
"i",
")",
"+",
"'\" x=\"'",
"+",
"str",
"(",
"x",
")",
"+",
"'\" y=\"'",
"+",
"str",
"(",
"y",
")",
"+",
"'\" z=\"'",
"+",
"str",
"(",
"z",
")",
"+",
"'\"/>\\n'",
")",
"outF",
".",
"write",
"(",
"' </vertices>\\n'",
")",
"outF",
".",
"write",
"(",
"' <cells size=\"'",
"+",
"str",
"(",
"ntets",
")",
"+",
"'\">\\n'",
")",
"for",
"i",
"in",
"range",
"(",
"ntets",
")",
":",
"v0",
",",
"v1",
",",
"v2",
",",
"v3",
"=",
"idolf_tets",
"[",
"i",
"]",
"outF",
".",
"write",
"(",
"' <tetrahedron index=\"'",
"+",
"str",
"(",
"i",
")",
"+",
"'\" v0=\"'",
"+",
"str",
"(",
"v0",
")",
"+",
"'\" v1=\"'",
"+",
"str",
"(",
"v1",
")",
"+",
"'\" v2=\"'",
"+",
"str",
"(",
"v2",
")",
"+",
"'\" v3=\"'",
"+",
"str",
"(",
"v3",
")",
"+",
"'\"/>\\n'",
")",
"outF",
".",
"write",
"(",
"' </cells>\\n'",
")",
"outF",
".",
"write",
"(",
"\" </mesh>\\n\"",
")",
"outF",
".",
"write",
"(",
"\"</dolfin>\\n\"",
")",
"outF",
".",
"close",
"(",
")",
"return",
"fdolf_coords",
",",
"idolf_tets"
] |
Convert Neutral file format to Dolfin XML.
|
[
"Convert",
"Neutral",
"file",
"format",
"to",
"Dolfin",
"XML",
"."
] |
python
|
train
|
limix/numpy-sugar
|
numpy_sugar/linalg/solve.py
|
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/solve.py#L31-L98
|
def hsolve(A, y):
r"""Solver for the linear equations of two variables and equations only.
It uses Householder reductions to solve ``Ax = y`` in a robust manner.
Parameters
----------
A : array_like
Coefficient matrix.
y : array_like
Ordinate values.
Returns
-------
:class:`numpy.ndarray` Solution ``x``.
"""
n = _norm(A[0, 0], A[1, 0])
u0 = A[0, 0] - n
u1 = A[1, 0]
nu = _norm(u0, u1)
with errstate(invalid="ignore", divide="ignore"):
v0 = nan_to_num(u0 / nu)
v1 = nan_to_num(u1 / nu)
B00 = 1 - 2 * v0 * v0
B01 = 0 - 2 * v0 * v1
B11 = 1 - 2 * v1 * v1
D00 = B00 * A[0, 0] + B01 * A[1, 0]
D01 = B00 * A[0, 1] + B01 * A[1, 1]
D11 = B01 * A[0, 1] + B11 * A[1, 1]
b0 = y[0] - 2 * y[0] * v0 * v0 - 2 * y[1] * v0 * v1
b1 = y[1] - 2 * y[0] * v1 * v0 - 2 * y[1] * v1 * v1
n = _norm(D00, D01)
u0 = D00 - n
u1 = D01
nu = _norm(u0, u1)
with errstate(invalid="ignore", divide="ignore"):
v0 = nan_to_num(u0 / nu)
v1 = nan_to_num(u1 / nu)
E00 = 1 - 2 * v0 * v0
E01 = 0 - 2 * v0 * v1
E11 = 1 - 2 * v1 * v1
F00 = E00 * D00 + E01 * D01
F01 = E01 * D11
F11 = E11 * D11
F11 = (npy_abs(F11) > epsilon.small) * F11
with errstate(divide="ignore", invalid="ignore"):
Fi00 = nan_to_num(F00 / F00 / F00)
Fi11 = nan_to_num(F11 / F11 / F11)
Fi10 = nan_to_num(-(F01 / F00) * Fi11)
c0 = Fi00 * b0
c1 = Fi10 * b0 + Fi11 * b1
x0 = E00 * c0 + E01 * c1
x1 = E01 * c0 + E11 * c1
return array([x0, x1])
|
[
"def",
"hsolve",
"(",
"A",
",",
"y",
")",
":",
"n",
"=",
"_norm",
"(",
"A",
"[",
"0",
",",
"0",
"]",
",",
"A",
"[",
"1",
",",
"0",
"]",
")",
"u0",
"=",
"A",
"[",
"0",
",",
"0",
"]",
"-",
"n",
"u1",
"=",
"A",
"[",
"1",
",",
"0",
"]",
"nu",
"=",
"_norm",
"(",
"u0",
",",
"u1",
")",
"with",
"errstate",
"(",
"invalid",
"=",
"\"ignore\"",
",",
"divide",
"=",
"\"ignore\"",
")",
":",
"v0",
"=",
"nan_to_num",
"(",
"u0",
"/",
"nu",
")",
"v1",
"=",
"nan_to_num",
"(",
"u1",
"/",
"nu",
")",
"B00",
"=",
"1",
"-",
"2",
"*",
"v0",
"*",
"v0",
"B01",
"=",
"0",
"-",
"2",
"*",
"v0",
"*",
"v1",
"B11",
"=",
"1",
"-",
"2",
"*",
"v1",
"*",
"v1",
"D00",
"=",
"B00",
"*",
"A",
"[",
"0",
",",
"0",
"]",
"+",
"B01",
"*",
"A",
"[",
"1",
",",
"0",
"]",
"D01",
"=",
"B00",
"*",
"A",
"[",
"0",
",",
"1",
"]",
"+",
"B01",
"*",
"A",
"[",
"1",
",",
"1",
"]",
"D11",
"=",
"B01",
"*",
"A",
"[",
"0",
",",
"1",
"]",
"+",
"B11",
"*",
"A",
"[",
"1",
",",
"1",
"]",
"b0",
"=",
"y",
"[",
"0",
"]",
"-",
"2",
"*",
"y",
"[",
"0",
"]",
"*",
"v0",
"*",
"v0",
"-",
"2",
"*",
"y",
"[",
"1",
"]",
"*",
"v0",
"*",
"v1",
"b1",
"=",
"y",
"[",
"1",
"]",
"-",
"2",
"*",
"y",
"[",
"0",
"]",
"*",
"v1",
"*",
"v0",
"-",
"2",
"*",
"y",
"[",
"1",
"]",
"*",
"v1",
"*",
"v1",
"n",
"=",
"_norm",
"(",
"D00",
",",
"D01",
")",
"u0",
"=",
"D00",
"-",
"n",
"u1",
"=",
"D01",
"nu",
"=",
"_norm",
"(",
"u0",
",",
"u1",
")",
"with",
"errstate",
"(",
"invalid",
"=",
"\"ignore\"",
",",
"divide",
"=",
"\"ignore\"",
")",
":",
"v0",
"=",
"nan_to_num",
"(",
"u0",
"/",
"nu",
")",
"v1",
"=",
"nan_to_num",
"(",
"u1",
"/",
"nu",
")",
"E00",
"=",
"1",
"-",
"2",
"*",
"v0",
"*",
"v0",
"E01",
"=",
"0",
"-",
"2",
"*",
"v0",
"*",
"v1",
"E11",
"=",
"1",
"-",
"2",
"*",
"v1",
"*",
"v1",
"F00",
"=",
"E00",
"*",
"D00",
"+",
"E01",
"*",
"D01",
"F01",
"=",
"E01",
"*",
"D11",
"F11",
"=",
"E11",
"*",
"D11",
"F11",
"=",
"(",
"npy_abs",
"(",
"F11",
")",
">",
"epsilon",
".",
"small",
")",
"*",
"F11",
"with",
"errstate",
"(",
"divide",
"=",
"\"ignore\"",
",",
"invalid",
"=",
"\"ignore\"",
")",
":",
"Fi00",
"=",
"nan_to_num",
"(",
"F00",
"/",
"F00",
"/",
"F00",
")",
"Fi11",
"=",
"nan_to_num",
"(",
"F11",
"/",
"F11",
"/",
"F11",
")",
"Fi10",
"=",
"nan_to_num",
"(",
"-",
"(",
"F01",
"/",
"F00",
")",
"*",
"Fi11",
")",
"c0",
"=",
"Fi00",
"*",
"b0",
"c1",
"=",
"Fi10",
"*",
"b0",
"+",
"Fi11",
"*",
"b1",
"x0",
"=",
"E00",
"*",
"c0",
"+",
"E01",
"*",
"c1",
"x1",
"=",
"E01",
"*",
"c0",
"+",
"E11",
"*",
"c1",
"return",
"array",
"(",
"[",
"x0",
",",
"x1",
"]",
")"
] |
r"""Solver for the linear equations of two variables and equations only.
It uses Householder reductions to solve ``Ax = y`` in a robust manner.
Parameters
----------
A : array_like
Coefficient matrix.
y : array_like
Ordinate values.
Returns
-------
:class:`numpy.ndarray` Solution ``x``.
|
[
"r",
"Solver",
"for",
"the",
"linear",
"equations",
"of",
"two",
"variables",
"and",
"equations",
"only",
"."
] |
python
|
train
|
apache/incubator-mxnet
|
example/ctc/lstm_ocr_infer.py
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ctc/lstm_ocr_infer.py#L65-L88
|
def main():
"""Program entry point"""
parser = argparse.ArgumentParser()
parser.add_argument("path", help="Path to the CAPTCHA image file")
parser.add_argument("--prefix", help="Checkpoint prefix [Default 'ocr']", default='ocr')
parser.add_argument("--epoch", help="Checkpoint epoch [Default 100]", type=int, default=100)
args = parser.parse_args()
init_state_names, init_state_arrays = lstm_init_states(batch_size=1)
img = read_img(args.path)
sample = SimpleBatch(
data_names=['data'] + init_state_names,
data=[mx.nd.array(img)] + init_state_arrays)
mod = load_module(args.prefix, args.epoch, sample.data_names, sample.provide_data)
mod.forward(sample)
prob = mod.get_outputs()[0].asnumpy()
prediction = CtcMetrics.ctc_label(np.argmax(prob, axis=-1).tolist())
# Predictions are 1 to 10 for digits 0 to 9 respectively (prediction 0 means no-digit)
prediction = [p - 1 for p in prediction]
print("Digits:", prediction)
|
[
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"\"path\"",
",",
"help",
"=",
"\"Path to the CAPTCHA image file\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--prefix\"",
",",
"help",
"=",
"\"Checkpoint prefix [Default 'ocr']\"",
",",
"default",
"=",
"'ocr'",
")",
"parser",
".",
"add_argument",
"(",
"\"--epoch\"",
",",
"help",
"=",
"\"Checkpoint epoch [Default 100]\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"100",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"init_state_names",
",",
"init_state_arrays",
"=",
"lstm_init_states",
"(",
"batch_size",
"=",
"1",
")",
"img",
"=",
"read_img",
"(",
"args",
".",
"path",
")",
"sample",
"=",
"SimpleBatch",
"(",
"data_names",
"=",
"[",
"'data'",
"]",
"+",
"init_state_names",
",",
"data",
"=",
"[",
"mx",
".",
"nd",
".",
"array",
"(",
"img",
")",
"]",
"+",
"init_state_arrays",
")",
"mod",
"=",
"load_module",
"(",
"args",
".",
"prefix",
",",
"args",
".",
"epoch",
",",
"sample",
".",
"data_names",
",",
"sample",
".",
"provide_data",
")",
"mod",
".",
"forward",
"(",
"sample",
")",
"prob",
"=",
"mod",
".",
"get_outputs",
"(",
")",
"[",
"0",
"]",
".",
"asnumpy",
"(",
")",
"prediction",
"=",
"CtcMetrics",
".",
"ctc_label",
"(",
"np",
".",
"argmax",
"(",
"prob",
",",
"axis",
"=",
"-",
"1",
")",
".",
"tolist",
"(",
")",
")",
"# Predictions are 1 to 10 for digits 0 to 9 respectively (prediction 0 means no-digit)",
"prediction",
"=",
"[",
"p",
"-",
"1",
"for",
"p",
"in",
"prediction",
"]",
"print",
"(",
"\"Digits:\"",
",",
"prediction",
")"
] |
Program entry point
|
[
"Program",
"entry",
"point"
] |
python
|
train
|
mongodb/mongo-python-driver
|
pymongo/mongo_client.py
|
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/mongo_client.py#L1941-L1989
|
def get_default_database(self, default=None, codec_options=None,
read_preference=None, write_concern=None, read_concern=None):
"""Get the database named in the MongoDB connection URI.
>>> uri = 'mongodb://host/my_database'
>>> client = MongoClient(uri)
>>> db = client.get_default_database()
>>> assert db.name == 'my_database'
>>> db = client.get_database()
>>> assert db.name == 'my_database'
Useful in scripts where you want to choose which database to use
based only on the URI in a configuration file.
:Parameters:
- `default` (optional): the database name to use if no database name
was provided in the URI.
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) the :attr:`codec_options` of this :class:`MongoClient` is
used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) the :attr:`read_preference` of this
:class:`MongoClient` is used. See :mod:`~pymongo.read_preferences`
for options.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) the :attr:`write_concern` of this :class:`MongoClient` is
used.
- `read_concern` (optional): An instance of
:class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the
default) the :attr:`read_concern` of this :class:`MongoClient` is
used.
.. versionchanged:: 3.8
Undeprecated. Added the ``default``, ``codec_options``,
``read_preference``, ``write_concern`` and ``read_concern``
parameters.
.. versionchanged:: 3.5
Deprecated, use :meth:`get_database` instead.
"""
if self.__default_database_name is None and default is None:
raise ConfigurationError(
'No default database name defined or provided.')
return database.Database(
self, self.__default_database_name or default, codec_options,
read_preference, write_concern, read_concern)
|
[
"def",
"get_default_database",
"(",
"self",
",",
"default",
"=",
"None",
",",
"codec_options",
"=",
"None",
",",
"read_preference",
"=",
"None",
",",
"write_concern",
"=",
"None",
",",
"read_concern",
"=",
"None",
")",
":",
"if",
"self",
".",
"__default_database_name",
"is",
"None",
"and",
"default",
"is",
"None",
":",
"raise",
"ConfigurationError",
"(",
"'No default database name defined or provided.'",
")",
"return",
"database",
".",
"Database",
"(",
"self",
",",
"self",
".",
"__default_database_name",
"or",
"default",
",",
"codec_options",
",",
"read_preference",
",",
"write_concern",
",",
"read_concern",
")"
] |
Get the database named in the MongoDB connection URI.
>>> uri = 'mongodb://host/my_database'
>>> client = MongoClient(uri)
>>> db = client.get_default_database()
>>> assert db.name == 'my_database'
>>> db = client.get_database()
>>> assert db.name == 'my_database'
Useful in scripts where you want to choose which database to use
based only on the URI in a configuration file.
:Parameters:
- `default` (optional): the database name to use if no database name
was provided in the URI.
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) the :attr:`codec_options` of this :class:`MongoClient` is
used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) the :attr:`read_preference` of this
:class:`MongoClient` is used. See :mod:`~pymongo.read_preferences`
for options.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) the :attr:`write_concern` of this :class:`MongoClient` is
used.
- `read_concern` (optional): An instance of
:class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the
default) the :attr:`read_concern` of this :class:`MongoClient` is
used.
.. versionchanged:: 3.8
Undeprecated. Added the ``default``, ``codec_options``,
``read_preference``, ``write_concern`` and ``read_concern``
parameters.
.. versionchanged:: 3.5
Deprecated, use :meth:`get_database` instead.
|
[
"Get",
"the",
"database",
"named",
"in",
"the",
"MongoDB",
"connection",
"URI",
"."
] |
python
|
train
|
fermiPy/fermipy
|
fermipy/scripts/cluster_sources.py
|
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/scripts/cluster_sources.py#L157-L184
|
def make_rev_dict_unique(cdict):
""" Make a reverse dictionary
Parameters
----------
in_dict : dict(int:dict(int:True))
A dictionary of clusters. Each cluster is a source index and
the dictionary of other sources in the cluster.
Returns
-------
rev_dict : dict(int:dict(int:True))
A dictionary pointing from source index to the clusters it is
included in.
"""
rev_dict = {}
for k, v in cdict.items():
if k in rev_dict:
rev_dict[k][k] = True
else:
rev_dict[k] = {k: True}
for vv in v.keys():
if vv in rev_dict:
rev_dict[vv][k] = True
else:
rev_dict[vv] = {k: True}
return rev_dict
|
[
"def",
"make_rev_dict_unique",
"(",
"cdict",
")",
":",
"rev_dict",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"cdict",
".",
"items",
"(",
")",
":",
"if",
"k",
"in",
"rev_dict",
":",
"rev_dict",
"[",
"k",
"]",
"[",
"k",
"]",
"=",
"True",
"else",
":",
"rev_dict",
"[",
"k",
"]",
"=",
"{",
"k",
":",
"True",
"}",
"for",
"vv",
"in",
"v",
".",
"keys",
"(",
")",
":",
"if",
"vv",
"in",
"rev_dict",
":",
"rev_dict",
"[",
"vv",
"]",
"[",
"k",
"]",
"=",
"True",
"else",
":",
"rev_dict",
"[",
"vv",
"]",
"=",
"{",
"k",
":",
"True",
"}",
"return",
"rev_dict"
] |
Make a reverse dictionary
Parameters
----------
in_dict : dict(int:dict(int:True))
A dictionary of clusters. Each cluster is a source index and
the dictionary of other sources in the cluster.
Returns
-------
rev_dict : dict(int:dict(int:True))
A dictionary pointing from source index to the clusters it is
included in.
|
[
"Make",
"a",
"reverse",
"dictionary"
] |
python
|
train
|
TUT-ARG/sed_eval
|
sed_eval/sound_event.py
|
https://github.com/TUT-ARG/sed_eval/blob/0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb/sed_eval/sound_event.py#L1705-L1741
|
def overall_error_rate(self):
"""Overall error rate metrics (error_rate, substitution_rate, deletion_rate, and insertion_rate)
Returns
-------
dict
results in a dictionary format
"""
substitution_rate = metric.substitution_rate(
Nref=self.overall['Nref'],
Nsubstitutions=self.overall['Nsubs']
)
deletion_rate = metric.deletion_rate(
Nref=self.overall['Nref'],
Ndeletions=self.overall['Nfn']
)
insertion_rate = metric.insertion_rate(
Nref=self.overall['Nref'],
Ninsertions=self.overall['Nfp']
)
error_rate = metric.error_rate(
substitution_rate_value=substitution_rate,
deletion_rate_value=deletion_rate,
insertion_rate_value=insertion_rate
)
return {
'error_rate': error_rate,
'substitution_rate': substitution_rate,
'deletion_rate': deletion_rate,
'insertion_rate': insertion_rate
}
|
[
"def",
"overall_error_rate",
"(",
"self",
")",
":",
"substitution_rate",
"=",
"metric",
".",
"substitution_rate",
"(",
"Nref",
"=",
"self",
".",
"overall",
"[",
"'Nref'",
"]",
",",
"Nsubstitutions",
"=",
"self",
".",
"overall",
"[",
"'Nsubs'",
"]",
")",
"deletion_rate",
"=",
"metric",
".",
"deletion_rate",
"(",
"Nref",
"=",
"self",
".",
"overall",
"[",
"'Nref'",
"]",
",",
"Ndeletions",
"=",
"self",
".",
"overall",
"[",
"'Nfn'",
"]",
")",
"insertion_rate",
"=",
"metric",
".",
"insertion_rate",
"(",
"Nref",
"=",
"self",
".",
"overall",
"[",
"'Nref'",
"]",
",",
"Ninsertions",
"=",
"self",
".",
"overall",
"[",
"'Nfp'",
"]",
")",
"error_rate",
"=",
"metric",
".",
"error_rate",
"(",
"substitution_rate_value",
"=",
"substitution_rate",
",",
"deletion_rate_value",
"=",
"deletion_rate",
",",
"insertion_rate_value",
"=",
"insertion_rate",
")",
"return",
"{",
"'error_rate'",
":",
"error_rate",
",",
"'substitution_rate'",
":",
"substitution_rate",
",",
"'deletion_rate'",
":",
"deletion_rate",
",",
"'insertion_rate'",
":",
"insertion_rate",
"}"
] |
Overall error rate metrics (error_rate, substitution_rate, deletion_rate, and insertion_rate)
Returns
-------
dict
results in a dictionary format
|
[
"Overall",
"error",
"rate",
"metrics",
"(",
"error_rate",
"substitution_rate",
"deletion_rate",
"and",
"insertion_rate",
")"
] |
python
|
train
|
ModisWorks/modis
|
modis/discord_modis/modules/music/_musicplayer.py
|
https://github.com/ModisWorks/modis/blob/1f1225c9841835ec1d1831fc196306527567db8b/modis/discord_modis/modules/music/_musicplayer.py#L401-L468
|
async def remove(self, index=""):
"""
The remove command
Args:
index (str): The index to remove, can be either a number, or a range in the for '##-##'
"""
if not self.state == 'ready':
logger.debug("Trying to remove from wrong state '{}'".format(self.state))
return
if index == "":
self.statuslog.error("Must provide index to remove")
return
elif index == "all":
self.queue = []
self.update_queue()
self.statuslog.info("Removed all songs")
return
indexes = index.split("-")
self.logger.debug("Removing {}".format(indexes))
try:
if len(indexes) == 0:
self.statuslog.error("Remove must specify an index or range")
return
elif len(indexes) == 1:
num_lower = int(indexes[0]) - 1
num_upper = num_lower + 1
elif len(indexes) == 2:
num_lower = int(indexes[0]) - 1
num_upper = int(indexes[1])
else:
self.statuslog.error("Cannot have more than 2 indexes for remove range")
return
except TypeError:
self.statuslog.error("Remove index must be a number")
return
except ValueError:
self.statuslog.error("Remove index must be a number")
return
if num_lower < 0 or num_lower >= len(self.queue) or num_upper > len(self.queue):
if len(self.queue) == 0:
self.statuslog.warning("No songs in queue")
elif len(self.queue) == 1:
self.statuslog.error("Remove index must be 1 (only 1 song in queue)")
else:
self.statuslog.error("Remove index must be between 1 and {}".format(len(self.queue)))
return
if num_upper <= num_lower:
self.statuslog.error("Second index in range must be greater than first")
return
lower_songname = self.queue[num_lower][1]
for num in range(0, num_upper - num_lower):
self.logger.debug("Removed {}".format(self.queue[num_lower][1]))
self.queue.pop(num_lower)
if len(indexes) == 1:
self.statuslog.info("Removed {}".format(lower_songname))
else:
self.statuslog.info("Removed songs {}-{}".format(num_lower + 1, num_upper))
self.update_queue()
|
[
"async",
"def",
"remove",
"(",
"self",
",",
"index",
"=",
"\"\"",
")",
":",
"if",
"not",
"self",
".",
"state",
"==",
"'ready'",
":",
"logger",
".",
"debug",
"(",
"\"Trying to remove from wrong state '{}'\"",
".",
"format",
"(",
"self",
".",
"state",
")",
")",
"return",
"if",
"index",
"==",
"\"\"",
":",
"self",
".",
"statuslog",
".",
"error",
"(",
"\"Must provide index to remove\"",
")",
"return",
"elif",
"index",
"==",
"\"all\"",
":",
"self",
".",
"queue",
"=",
"[",
"]",
"self",
".",
"update_queue",
"(",
")",
"self",
".",
"statuslog",
".",
"info",
"(",
"\"Removed all songs\"",
")",
"return",
"indexes",
"=",
"index",
".",
"split",
"(",
"\"-\"",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Removing {}\"",
".",
"format",
"(",
"indexes",
")",
")",
"try",
":",
"if",
"len",
"(",
"indexes",
")",
"==",
"0",
":",
"self",
".",
"statuslog",
".",
"error",
"(",
"\"Remove must specify an index or range\"",
")",
"return",
"elif",
"len",
"(",
"indexes",
")",
"==",
"1",
":",
"num_lower",
"=",
"int",
"(",
"indexes",
"[",
"0",
"]",
")",
"-",
"1",
"num_upper",
"=",
"num_lower",
"+",
"1",
"elif",
"len",
"(",
"indexes",
")",
"==",
"2",
":",
"num_lower",
"=",
"int",
"(",
"indexes",
"[",
"0",
"]",
")",
"-",
"1",
"num_upper",
"=",
"int",
"(",
"indexes",
"[",
"1",
"]",
")",
"else",
":",
"self",
".",
"statuslog",
".",
"error",
"(",
"\"Cannot have more than 2 indexes for remove range\"",
")",
"return",
"except",
"TypeError",
":",
"self",
".",
"statuslog",
".",
"error",
"(",
"\"Remove index must be a number\"",
")",
"return",
"except",
"ValueError",
":",
"self",
".",
"statuslog",
".",
"error",
"(",
"\"Remove index must be a number\"",
")",
"return",
"if",
"num_lower",
"<",
"0",
"or",
"num_lower",
">=",
"len",
"(",
"self",
".",
"queue",
")",
"or",
"num_upper",
">",
"len",
"(",
"self",
".",
"queue",
")",
":",
"if",
"len",
"(",
"self",
".",
"queue",
")",
"==",
"0",
":",
"self",
".",
"statuslog",
".",
"warning",
"(",
"\"No songs in queue\"",
")",
"elif",
"len",
"(",
"self",
".",
"queue",
")",
"==",
"1",
":",
"self",
".",
"statuslog",
".",
"error",
"(",
"\"Remove index must be 1 (only 1 song in queue)\"",
")",
"else",
":",
"self",
".",
"statuslog",
".",
"error",
"(",
"\"Remove index must be between 1 and {}\"",
".",
"format",
"(",
"len",
"(",
"self",
".",
"queue",
")",
")",
")",
"return",
"if",
"num_upper",
"<=",
"num_lower",
":",
"self",
".",
"statuslog",
".",
"error",
"(",
"\"Second index in range must be greater than first\"",
")",
"return",
"lower_songname",
"=",
"self",
".",
"queue",
"[",
"num_lower",
"]",
"[",
"1",
"]",
"for",
"num",
"in",
"range",
"(",
"0",
",",
"num_upper",
"-",
"num_lower",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Removed {}\"",
".",
"format",
"(",
"self",
".",
"queue",
"[",
"num_lower",
"]",
"[",
"1",
"]",
")",
")",
"self",
".",
"queue",
".",
"pop",
"(",
"num_lower",
")",
"if",
"len",
"(",
"indexes",
")",
"==",
"1",
":",
"self",
".",
"statuslog",
".",
"info",
"(",
"\"Removed {}\"",
".",
"format",
"(",
"lower_songname",
")",
")",
"else",
":",
"self",
".",
"statuslog",
".",
"info",
"(",
"\"Removed songs {}-{}\"",
".",
"format",
"(",
"num_lower",
"+",
"1",
",",
"num_upper",
")",
")",
"self",
".",
"update_queue",
"(",
")"
] |
The remove command
Args:
index (str): The index to remove, can be either a number, or a range in the for '##-##'
|
[
"The",
"remove",
"command"
] |
python
|
train
|
xeroc/python-graphenelib
|
graphenecommon/blockchain.py
|
https://github.com/xeroc/python-graphenelib/blob/8bb5396bc79998ee424cf3813af478304173f3a6/graphenecommon/blockchain.py#L224-L248
|
def stream(self, opNames=[], *args, **kwargs):
""" Yield specific operations (e.g. comments) only
:param array opNames: List of operations to filter for
:param int start: Start at this block
:param int stop: Stop at this block
:param str mode: We here have the choice between
* "head": the last block
* "irreversible": the block that is confirmed by 2/3 of all
block producers and is thus irreversible!
The dict output is formated such that ``type`` caries the
operation type, timestamp and block_num are taken from the
block the operation was stored in and the other key depend
on the actualy operation.
"""
for op in self.ops(**kwargs):
if not opNames or op["op"][0] in opNames:
r = {
"type": op["op"][0],
"timestamp": op.get("timestamp"),
"block_num": op.get("block_num"),
}
r.update(op["op"][1])
yield r
|
[
"def",
"stream",
"(",
"self",
",",
"opNames",
"=",
"[",
"]",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"op",
"in",
"self",
".",
"ops",
"(",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"opNames",
"or",
"op",
"[",
"\"op\"",
"]",
"[",
"0",
"]",
"in",
"opNames",
":",
"r",
"=",
"{",
"\"type\"",
":",
"op",
"[",
"\"op\"",
"]",
"[",
"0",
"]",
",",
"\"timestamp\"",
":",
"op",
".",
"get",
"(",
"\"timestamp\"",
")",
",",
"\"block_num\"",
":",
"op",
".",
"get",
"(",
"\"block_num\"",
")",
",",
"}",
"r",
".",
"update",
"(",
"op",
"[",
"\"op\"",
"]",
"[",
"1",
"]",
")",
"yield",
"r"
] |
Yield specific operations (e.g. comments) only
:param array opNames: List of operations to filter for
:param int start: Start at this block
:param int stop: Stop at this block
:param str mode: We here have the choice between
* "head": the last block
* "irreversible": the block that is confirmed by 2/3 of all
block producers and is thus irreversible!
The dict output is formated such that ``type`` caries the
operation type, timestamp and block_num are taken from the
block the operation was stored in and the other key depend
on the actualy operation.
|
[
"Yield",
"specific",
"operations",
"(",
"e",
".",
"g",
".",
"comments",
")",
"only"
] |
python
|
valid
|
docker/docker-py
|
docker/models/images.py
|
https://github.com/docker/docker-py/blob/613d6aad83acc9931ff2ecfd6a6c7bd8061dc125/docker/models/images.py#L41-L48
|
def tags(self):
"""
The image's tags.
"""
tags = self.attrs.get('RepoTags')
if tags is None:
tags = []
return [tag for tag in tags if tag != '<none>:<none>']
|
[
"def",
"tags",
"(",
"self",
")",
":",
"tags",
"=",
"self",
".",
"attrs",
".",
"get",
"(",
"'RepoTags'",
")",
"if",
"tags",
"is",
"None",
":",
"tags",
"=",
"[",
"]",
"return",
"[",
"tag",
"for",
"tag",
"in",
"tags",
"if",
"tag",
"!=",
"'<none>:<none>'",
"]"
] |
The image's tags.
|
[
"The",
"image",
"s",
"tags",
"."
] |
python
|
train
|
spantaleev/flask-sijax
|
flask_sijax.py
|
https://github.com/spantaleev/flask-sijax/blob/df9f6d9b8385b3375c119a51aa100491d5445e17/flask_sijax.py#L85-L97
|
def register_comet_callback(self, *args, **kwargs):
"""Registers a single Comet callback function
(see :ref:`comet-plugin`).
Refer to :func:`sijax.plugin.comet.register_comet_callback`
for more details - its signature differs slightly.
This method's signature is the same, except that the first
argument that :func:`sijax.plugin.comet.register_comet_callback`
expects is the Sijax instance, and this method
does that automatically, so you don't have to do it.
"""
sijax.plugin.comet.register_comet_callback(self._sijax, *args, **kwargs)
|
[
"def",
"register_comet_callback",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"sijax",
".",
"plugin",
".",
"comet",
".",
"register_comet_callback",
"(",
"self",
".",
"_sijax",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Registers a single Comet callback function
(see :ref:`comet-plugin`).
Refer to :func:`sijax.plugin.comet.register_comet_callback`
for more details - its signature differs slightly.
This method's signature is the same, except that the first
argument that :func:`sijax.plugin.comet.register_comet_callback`
expects is the Sijax instance, and this method
does that automatically, so you don't have to do it.
|
[
"Registers",
"a",
"single",
"Comet",
"callback",
"function",
"(",
"see",
":",
"ref",
":",
"comet",
"-",
"plugin",
")",
"."
] |
python
|
train
|
hyperledger/indy-sdk
|
wrappers/python/indy/did.py
|
https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/wrappers/python/indy/did.py#L245-L275
|
async def get_key_metadata(wallet_handle: int,
verkey: str) -> str:
"""
Retrieves the meta information for the giving key in the wallet.
:param wallet_handle: Wallet handle (created by open_wallet).
:param verkey: The key (verkey, key id) to retrieve metadata.
:return: metadata: The meta information stored with the key; Can be null if no metadata was saved for this key.
"""
logger = logging.getLogger(__name__)
logger.debug("get_key_metadata: >>> wallet_handle: %r, verkey: %r",
wallet_handle,
verkey)
if not hasattr(get_key_metadata, "cb"):
logger.debug("get_key_metadata: Creating callback")
get_key_metadata.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_verkey = c_char_p(verkey.encode('utf-8'))
metadata = await do_call('indy_get_key_metadata',
c_wallet_handle,
c_verkey,
get_key_metadata.cb)
res = metadata.decode()
logger.debug("get_key_metadata: <<< res: %r", res)
return res
|
[
"async",
"def",
"get_key_metadata",
"(",
"wallet_handle",
":",
"int",
",",
"verkey",
":",
"str",
")",
"->",
"str",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"debug",
"(",
"\"get_key_metadata: >>> wallet_handle: %r, verkey: %r\"",
",",
"wallet_handle",
",",
"verkey",
")",
"if",
"not",
"hasattr",
"(",
"get_key_metadata",
",",
"\"cb\"",
")",
":",
"logger",
".",
"debug",
"(",
"\"get_key_metadata: Creating callback\"",
")",
"get_key_metadata",
".",
"cb",
"=",
"create_cb",
"(",
"CFUNCTYPE",
"(",
"None",
",",
"c_int32",
",",
"c_int32",
",",
"c_char_p",
")",
")",
"c_wallet_handle",
"=",
"c_int32",
"(",
"wallet_handle",
")",
"c_verkey",
"=",
"c_char_p",
"(",
"verkey",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"metadata",
"=",
"await",
"do_call",
"(",
"'indy_get_key_metadata'",
",",
"c_wallet_handle",
",",
"c_verkey",
",",
"get_key_metadata",
".",
"cb",
")",
"res",
"=",
"metadata",
".",
"decode",
"(",
")",
"logger",
".",
"debug",
"(",
"\"get_key_metadata: <<< res: %r\"",
",",
"res",
")",
"return",
"res"
] |
Retrieves the meta information for the giving key in the wallet.
:param wallet_handle: Wallet handle (created by open_wallet).
:param verkey: The key (verkey, key id) to retrieve metadata.
:return: metadata: The meta information stored with the key; Can be null if no metadata was saved for this key.
|
[
"Retrieves",
"the",
"meta",
"information",
"for",
"the",
"giving",
"key",
"in",
"the",
"wallet",
"."
] |
python
|
train
|
basho/riak-python-client
|
riak/transports/tcp/transport.py
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/tcp/transport.py#L286-L299
|
def set_bucket_props(self, bucket, props):
"""
Serialize set bucket property request and deserialize response
"""
if not self.pb_all_bucket_props():
for key in props:
if key not in ('n_val', 'allow_mult'):
raise NotImplementedError('Server only supports n_val and '
'allow_mult properties over PBC')
msg_code = riak.pb.messages.MSG_CODE_SET_BUCKET_REQ
codec = self._get_codec(msg_code)
msg = codec.encode_set_bucket_props(bucket, props)
resp_code, resp = self._request(msg, codec)
return True
|
[
"def",
"set_bucket_props",
"(",
"self",
",",
"bucket",
",",
"props",
")",
":",
"if",
"not",
"self",
".",
"pb_all_bucket_props",
"(",
")",
":",
"for",
"key",
"in",
"props",
":",
"if",
"key",
"not",
"in",
"(",
"'n_val'",
",",
"'allow_mult'",
")",
":",
"raise",
"NotImplementedError",
"(",
"'Server only supports n_val and '",
"'allow_mult properties over PBC'",
")",
"msg_code",
"=",
"riak",
".",
"pb",
".",
"messages",
".",
"MSG_CODE_SET_BUCKET_REQ",
"codec",
"=",
"self",
".",
"_get_codec",
"(",
"msg_code",
")",
"msg",
"=",
"codec",
".",
"encode_set_bucket_props",
"(",
"bucket",
",",
"props",
")",
"resp_code",
",",
"resp",
"=",
"self",
".",
"_request",
"(",
"msg",
",",
"codec",
")",
"return",
"True"
] |
Serialize set bucket property request and deserialize response
|
[
"Serialize",
"set",
"bucket",
"property",
"request",
"and",
"deserialize",
"response"
] |
python
|
train
|
B2W-BIT/aiologger
|
aiologger/handlers/files.py
|
https://github.com/B2W-BIT/aiologger/blob/0b366597a8305d5577a267305e81d5e4784cd398/aiologger/handlers/files.py#L106-L120
|
async def emit(self, record: LogRecord): # type: ignore
"""
Emit a record.
Output the record to the file, catering for rollover as described
in `do_rollover`.
"""
try:
if self.should_rollover(record):
async with self._rollover_lock:
if self.should_rollover(record):
await self.do_rollover()
await super().emit(record)
except Exception as e:
await self.handleError(record)
|
[
"async",
"def",
"emit",
"(",
"self",
",",
"record",
":",
"LogRecord",
")",
":",
"# type: ignore",
"try",
":",
"if",
"self",
".",
"should_rollover",
"(",
"record",
")",
":",
"async",
"with",
"self",
".",
"_rollover_lock",
":",
"if",
"self",
".",
"should_rollover",
"(",
"record",
")",
":",
"await",
"self",
".",
"do_rollover",
"(",
")",
"await",
"super",
"(",
")",
".",
"emit",
"(",
"record",
")",
"except",
"Exception",
"as",
"e",
":",
"await",
"self",
".",
"handleError",
"(",
"record",
")"
] |
Emit a record.
Output the record to the file, catering for rollover as described
in `do_rollover`.
|
[
"Emit",
"a",
"record",
"."
] |
python
|
train
|
elastic/elasticsearch-py
|
elasticsearch/client/xpack/ml.py
|
https://github.com/elastic/elasticsearch-py/blob/2aab285c8f506f3863cbdaba3c90a685c510ba00/elasticsearch/client/xpack/ml.py#L231-L255
|
def flush_job(self, job_id, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-flush-job.html>`_
:arg job_id: The name of the job to flush
:arg body: Flush parameters
:arg advance_time: Advances time to the given value generating results
and updating the model for the advanced interval
:arg calc_interim: Calculates interim results for the most recent bucket
or all buckets within the latency period
:arg end: When used in conjunction with calc_interim, specifies the
range of buckets on which to calculate interim results
:arg skip_time: Skips time to the given value without generating results
or updating the model for the skipped interval
:arg start: When used in conjunction with calc_interim, specifies the
range of buckets on which to calculate interim results
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request(
"POST",
_make_path("_ml", "anomaly_detectors", job_id, "_flush"),
params=params,
body=body,
)
|
[
"def",
"flush_job",
"(",
"self",
",",
"job_id",
",",
"body",
"=",
"None",
",",
"params",
"=",
"None",
")",
":",
"if",
"job_id",
"in",
"SKIP_IN_PATH",
":",
"raise",
"ValueError",
"(",
"\"Empty value passed for a required argument 'job_id'.\"",
")",
"return",
"self",
".",
"transport",
".",
"perform_request",
"(",
"\"POST\"",
",",
"_make_path",
"(",
"\"_ml\"",
",",
"\"anomaly_detectors\"",
",",
"job_id",
",",
"\"_flush\"",
")",
",",
"params",
"=",
"params",
",",
"body",
"=",
"body",
",",
")"
] |
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-flush-job.html>`_
:arg job_id: The name of the job to flush
:arg body: Flush parameters
:arg advance_time: Advances time to the given value generating results
and updating the model for the advanced interval
:arg calc_interim: Calculates interim results for the most recent bucket
or all buckets within the latency period
:arg end: When used in conjunction with calc_interim, specifies the
range of buckets on which to calculate interim results
:arg skip_time: Skips time to the given value without generating results
or updating the model for the skipped interval
:arg start: When used in conjunction with calc_interim, specifies the
range of buckets on which to calculate interim results
|
[
"<http",
":",
"//",
"www",
".",
"elastic",
".",
"co",
"/",
"guide",
"/",
"en",
"/",
"elasticsearch",
"/",
"reference",
"/",
"current",
"/",
"ml",
"-",
"flush",
"-",
"job",
".",
"html",
">",
"_"
] |
python
|
train
|
jorgenschaefer/elpy
|
elpy/server.py
|
https://github.com/jorgenschaefer/elpy/blob/ffd982f829b11e53f2be187c7b770423341f29bc/elpy/server.py#L210-L215
|
def rpc_fix_code(self, source, directory):
"""Formats Python code to conform to the PEP 8 style guide.
"""
source = get_source(source)
return fix_code(source, directory)
|
[
"def",
"rpc_fix_code",
"(",
"self",
",",
"source",
",",
"directory",
")",
":",
"source",
"=",
"get_source",
"(",
"source",
")",
"return",
"fix_code",
"(",
"source",
",",
"directory",
")"
] |
Formats Python code to conform to the PEP 8 style guide.
|
[
"Formats",
"Python",
"code",
"to",
"conform",
"to",
"the",
"PEP",
"8",
"style",
"guide",
"."
] |
python
|
train
|
apple/turicreate
|
src/unity/python/turicreate/toolkits/distances/_util.py
|
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/distances/_util.py#L24-L126
|
def compute_composite_distance(distance, x, y):
"""
Compute the value of a composite distance function on two dictionaries,
typically SFrame rows.
Parameters
----------
distance : list[list]
A composite distance function. Composite distance functions are a
weighted sum of standard distance functions, each of which applies to
its own subset of features. Composite distance functions are specified
as a list of distance components, each of which is itself a list
containing three items:
1. list or tuple of feature names (strings)
2. standard distance name (string)
3. scaling factor (int or float)
x, y : dict
Individual observations, typically rows of an SFrame, in dictionary
form. Must include the features specified by `distance`.
Returns
-------
out : float
The distance between `x` and `y`, as specified by `distance`.
Examples
--------
>>> sf = turicreate.SFrame({'X1': [0.98, 0.62, 0.11],
... 'X2': [0.69, 0.58, 0.36],
... 'species': ['cat', 'dog', 'fossa']})
...
>>> dist_spec = [[('X1', 'X2'), 'euclidean', 2],
... [('species',), 'levenshtein', 0.4]]
...
>>> d = turicreate.distances.compute_composite_distance(dist_spec, sf[0], sf[1])
>>> print d
1.95286120899
"""
## Validate inputs
_validate_composite_distance(distance)
distance = _convert_distance_names_to_functions(distance)
if not isinstance(x, dict) or not isinstance(y, dict):
raise TypeError("Inputs 'x' and 'y' must be in dictionary form. " +
"Selecting individual rows of an SFrame yields the " +
"correct format.")
ans = 0.
for d in distance:
ftrs, dist, weight = d
## Special check for multiple columns with levenshtein distance.
if dist == _tc.distances.levenshtein and len(ftrs) > 1:
raise ValueError("levenshtein distance cannot be used with multiple" +
"columns. Please concatenate strings into a single " +
"column before computing the distance.")
## Extract values for specified features.
a = {}
b = {}
for ftr in ftrs:
if type(x[ftr]) != type(y[ftr]):
if not isinstance(x[ftr], (int, float)) or not isinstance(y[ftr], (int, float)):
raise ValueError("Input data has different types.")
if isinstance(x[ftr], (int, float, str)):
a[ftr] = x[ftr]
b[ftr] = y[ftr]
elif isinstance(x[ftr], dict):
for key, val in _six.iteritems(x[ftr]):
a['{}.{}'.format(ftr, key)] = val
for key, val in _six.iteritems(y[ftr]):
b['{}.{}'.format(ftr, key)] = val
elif isinstance(x[ftr], (list, _array.array)):
for i, val in enumerate(x[ftr]):
a[i] = val
for i, val in enumerate(y[ftr]):
b[i] = val
else:
raise TypeError("Type of feature '{}' not understood.".format(ftr))
## Pull out the raw values for levenshtein
if dist == _tc.distances.levenshtein:
a = list(a.values())[0]
b = list(b.values())[0]
## Compute component distance and add to the total distance.
ans += weight * dist(a, b)
return ans
|
[
"def",
"compute_composite_distance",
"(",
"distance",
",",
"x",
",",
"y",
")",
":",
"## Validate inputs",
"_validate_composite_distance",
"(",
"distance",
")",
"distance",
"=",
"_convert_distance_names_to_functions",
"(",
"distance",
")",
"if",
"not",
"isinstance",
"(",
"x",
",",
"dict",
")",
"or",
"not",
"isinstance",
"(",
"y",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"\"Inputs 'x' and 'y' must be in dictionary form. \"",
"+",
"\"Selecting individual rows of an SFrame yields the \"",
"+",
"\"correct format.\"",
")",
"ans",
"=",
"0.",
"for",
"d",
"in",
"distance",
":",
"ftrs",
",",
"dist",
",",
"weight",
"=",
"d",
"## Special check for multiple columns with levenshtein distance.",
"if",
"dist",
"==",
"_tc",
".",
"distances",
".",
"levenshtein",
"and",
"len",
"(",
"ftrs",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"levenshtein distance cannot be used with multiple\"",
"+",
"\"columns. Please concatenate strings into a single \"",
"+",
"\"column before computing the distance.\"",
")",
"## Extract values for specified features.",
"a",
"=",
"{",
"}",
"b",
"=",
"{",
"}",
"for",
"ftr",
"in",
"ftrs",
":",
"if",
"type",
"(",
"x",
"[",
"ftr",
"]",
")",
"!=",
"type",
"(",
"y",
"[",
"ftr",
"]",
")",
":",
"if",
"not",
"isinstance",
"(",
"x",
"[",
"ftr",
"]",
",",
"(",
"int",
",",
"float",
")",
")",
"or",
"not",
"isinstance",
"(",
"y",
"[",
"ftr",
"]",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Input data has different types.\"",
")",
"if",
"isinstance",
"(",
"x",
"[",
"ftr",
"]",
",",
"(",
"int",
",",
"float",
",",
"str",
")",
")",
":",
"a",
"[",
"ftr",
"]",
"=",
"x",
"[",
"ftr",
"]",
"b",
"[",
"ftr",
"]",
"=",
"y",
"[",
"ftr",
"]",
"elif",
"isinstance",
"(",
"x",
"[",
"ftr",
"]",
",",
"dict",
")",
":",
"for",
"key",
",",
"val",
"in",
"_six",
".",
"iteritems",
"(",
"x",
"[",
"ftr",
"]",
")",
":",
"a",
"[",
"'{}.{}'",
".",
"format",
"(",
"ftr",
",",
"key",
")",
"]",
"=",
"val",
"for",
"key",
",",
"val",
"in",
"_six",
".",
"iteritems",
"(",
"y",
"[",
"ftr",
"]",
")",
":",
"b",
"[",
"'{}.{}'",
".",
"format",
"(",
"ftr",
",",
"key",
")",
"]",
"=",
"val",
"elif",
"isinstance",
"(",
"x",
"[",
"ftr",
"]",
",",
"(",
"list",
",",
"_array",
".",
"array",
")",
")",
":",
"for",
"i",
",",
"val",
"in",
"enumerate",
"(",
"x",
"[",
"ftr",
"]",
")",
":",
"a",
"[",
"i",
"]",
"=",
"val",
"for",
"i",
",",
"val",
"in",
"enumerate",
"(",
"y",
"[",
"ftr",
"]",
")",
":",
"b",
"[",
"i",
"]",
"=",
"val",
"else",
":",
"raise",
"TypeError",
"(",
"\"Type of feature '{}' not understood.\"",
".",
"format",
"(",
"ftr",
")",
")",
"## Pull out the raw values for levenshtein",
"if",
"dist",
"==",
"_tc",
".",
"distances",
".",
"levenshtein",
":",
"a",
"=",
"list",
"(",
"a",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
"b",
"=",
"list",
"(",
"b",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
"## Compute component distance and add to the total distance.",
"ans",
"+=",
"weight",
"*",
"dist",
"(",
"a",
",",
"b",
")",
"return",
"ans"
] |
Compute the value of a composite distance function on two dictionaries,
typically SFrame rows.
Parameters
----------
distance : list[list]
A composite distance function. Composite distance functions are a
weighted sum of standard distance functions, each of which applies to
its own subset of features. Composite distance functions are specified
as a list of distance components, each of which is itself a list
containing three items:
1. list or tuple of feature names (strings)
2. standard distance name (string)
3. scaling factor (int or float)
x, y : dict
Individual observations, typically rows of an SFrame, in dictionary
form. Must include the features specified by `distance`.
Returns
-------
out : float
The distance between `x` and `y`, as specified by `distance`.
Examples
--------
>>> sf = turicreate.SFrame({'X1': [0.98, 0.62, 0.11],
... 'X2': [0.69, 0.58, 0.36],
... 'species': ['cat', 'dog', 'fossa']})
...
>>> dist_spec = [[('X1', 'X2'), 'euclidean', 2],
... [('species',), 'levenshtein', 0.4]]
...
>>> d = turicreate.distances.compute_composite_distance(dist_spec, sf[0], sf[1])
>>> print d
1.95286120899
|
[
"Compute",
"the",
"value",
"of",
"a",
"composite",
"distance",
"function",
"on",
"two",
"dictionaries",
"typically",
"SFrame",
"rows",
"."
] |
python
|
train
|
PyMySQL/PyMySQL
|
pymysql/protocol.py
|
https://github.com/PyMySQL/PyMySQL/blob/3674bc6fd064bf88524e839c07690e8c35223709/pymysql/protocol.py#L63-L75
|
def read(self, size):
"""Read the first 'size' bytes in packet and advance cursor past them."""
result = self._data[self._position:(self._position+size)]
if len(result) != size:
error = ('Result length not requested length:\n'
'Expected=%s. Actual=%s. Position: %s. Data Length: %s'
% (size, len(result), self._position, len(self._data)))
if DEBUG:
print(error)
self.dump()
raise AssertionError(error)
self._position += size
return result
|
[
"def",
"read",
"(",
"self",
",",
"size",
")",
":",
"result",
"=",
"self",
".",
"_data",
"[",
"self",
".",
"_position",
":",
"(",
"self",
".",
"_position",
"+",
"size",
")",
"]",
"if",
"len",
"(",
"result",
")",
"!=",
"size",
":",
"error",
"=",
"(",
"'Result length not requested length:\\n'",
"'Expected=%s. Actual=%s. Position: %s. Data Length: %s'",
"%",
"(",
"size",
",",
"len",
"(",
"result",
")",
",",
"self",
".",
"_position",
",",
"len",
"(",
"self",
".",
"_data",
")",
")",
")",
"if",
"DEBUG",
":",
"print",
"(",
"error",
")",
"self",
".",
"dump",
"(",
")",
"raise",
"AssertionError",
"(",
"error",
")",
"self",
".",
"_position",
"+=",
"size",
"return",
"result"
] |
Read the first 'size' bytes in packet and advance cursor past them.
|
[
"Read",
"the",
"first",
"size",
"bytes",
"in",
"packet",
"and",
"advance",
"cursor",
"past",
"them",
"."
] |
python
|
train
|
AlecAivazis/graphql-over-kafka
|
nautilus/services/apiGateway.py
|
https://github.com/AlecAivazis/graphql-over-kafka/blob/70e2acef27a2f87355590be1a6ca60ce3ab4d09c/nautilus/services/apiGateway.py#L210-L293
|
async def object_resolver(self, object_name, fields, obey_auth=False, current_user=None, **filters):
"""
This function resolves a given object in the remote backend services
"""
try:
# check if an object with that name has been registered
registered = [model for model in self._external_service_data['models'] \
if model['name']==object_name][0]
# if there is no connection data yet
except AttributeError:
raise ValueError("No objects are registered with this schema yet.")
# if we dont recognize the model that was requested
except IndexError:
raise ValueError("Cannot query for object {} on this service.".format(object_name))
# the valid fields for this object
valid_fields = [field['name'] for field in registered['fields']]
# figure out if any invalid fields were requested
invalid_fields = [field for field in fields if field not in valid_fields]
try:
# make sure we never treat pk as invalid
invalid_fields.remove('pk')
# if they weren't asking for pk as a field
except ValueError:
pass
# if there were
if invalid_fields:
# yell loudly
raise ValueError("Cannot query for fields {!r} on {}".format(
invalid_fields, registered['name']
))
# make sure we include the id in the request
fields.append('pk')
# the query for model records
query = query_for_model(fields, **filters)
# the action type for the question
action_type = get_crud_action('read', object_name)
# query the appropriate stream for the information
response = await self.event_broker.ask(
action_type=action_type,
payload=query
)
# treat the reply like a json object
response_data = json.loads(response)
# if something went wrong
if 'errors' in response_data and response_data['errors']:
# return an empty response
raise ValueError(','.join(response_data['errors']))
# grab the valid list of matches
result = response_data['data'][root_query()]
# grab the auth handler for the object
auth_criteria = self.auth_criteria.get(object_name)
# if we care about auth requirements and there is one for this object
if obey_auth and auth_criteria:
# build a second list of authorized entries
authorized_results = []
# for each query result
for query_result in result:
# create a graph entity for the model
graph_entity = GraphEntity(self, model_type=object_name, id=query_result['pk'])
# if the auth handler passes
if await auth_criteria(model=graph_entity, user_id=current_user):
# add the result to the final list
authorized_results.append(query_result)
# overwrite the query result
result = authorized_results
# apply the auth handler to the result
return result
|
[
"async",
"def",
"object_resolver",
"(",
"self",
",",
"object_name",
",",
"fields",
",",
"obey_auth",
"=",
"False",
",",
"current_user",
"=",
"None",
",",
"*",
"*",
"filters",
")",
":",
"try",
":",
"# check if an object with that name has been registered",
"registered",
"=",
"[",
"model",
"for",
"model",
"in",
"self",
".",
"_external_service_data",
"[",
"'models'",
"]",
"if",
"model",
"[",
"'name'",
"]",
"==",
"object_name",
"]",
"[",
"0",
"]",
"# if there is no connection data yet",
"except",
"AttributeError",
":",
"raise",
"ValueError",
"(",
"\"No objects are registered with this schema yet.\"",
")",
"# if we dont recognize the model that was requested",
"except",
"IndexError",
":",
"raise",
"ValueError",
"(",
"\"Cannot query for object {} on this service.\"",
".",
"format",
"(",
"object_name",
")",
")",
"# the valid fields for this object",
"valid_fields",
"=",
"[",
"field",
"[",
"'name'",
"]",
"for",
"field",
"in",
"registered",
"[",
"'fields'",
"]",
"]",
"# figure out if any invalid fields were requested",
"invalid_fields",
"=",
"[",
"field",
"for",
"field",
"in",
"fields",
"if",
"field",
"not",
"in",
"valid_fields",
"]",
"try",
":",
"# make sure we never treat pk as invalid",
"invalid_fields",
".",
"remove",
"(",
"'pk'",
")",
"# if they weren't asking for pk as a field",
"except",
"ValueError",
":",
"pass",
"# if there were",
"if",
"invalid_fields",
":",
"# yell loudly",
"raise",
"ValueError",
"(",
"\"Cannot query for fields {!r} on {}\"",
".",
"format",
"(",
"invalid_fields",
",",
"registered",
"[",
"'name'",
"]",
")",
")",
"# make sure we include the id in the request",
"fields",
".",
"append",
"(",
"'pk'",
")",
"# the query for model records",
"query",
"=",
"query_for_model",
"(",
"fields",
",",
"*",
"*",
"filters",
")",
"# the action type for the question",
"action_type",
"=",
"get_crud_action",
"(",
"'read'",
",",
"object_name",
")",
"# query the appropriate stream for the information",
"response",
"=",
"await",
"self",
".",
"event_broker",
".",
"ask",
"(",
"action_type",
"=",
"action_type",
",",
"payload",
"=",
"query",
")",
"# treat the reply like a json object",
"response_data",
"=",
"json",
".",
"loads",
"(",
"response",
")",
"# if something went wrong",
"if",
"'errors'",
"in",
"response_data",
"and",
"response_data",
"[",
"'errors'",
"]",
":",
"# return an empty response",
"raise",
"ValueError",
"(",
"','",
".",
"join",
"(",
"response_data",
"[",
"'errors'",
"]",
")",
")",
"# grab the valid list of matches",
"result",
"=",
"response_data",
"[",
"'data'",
"]",
"[",
"root_query",
"(",
")",
"]",
"# grab the auth handler for the object",
"auth_criteria",
"=",
"self",
".",
"auth_criteria",
".",
"get",
"(",
"object_name",
")",
"# if we care about auth requirements and there is one for this object",
"if",
"obey_auth",
"and",
"auth_criteria",
":",
"# build a second list of authorized entries",
"authorized_results",
"=",
"[",
"]",
"# for each query result",
"for",
"query_result",
"in",
"result",
":",
"# create a graph entity for the model",
"graph_entity",
"=",
"GraphEntity",
"(",
"self",
",",
"model_type",
"=",
"object_name",
",",
"id",
"=",
"query_result",
"[",
"'pk'",
"]",
")",
"# if the auth handler passes",
"if",
"await",
"auth_criteria",
"(",
"model",
"=",
"graph_entity",
",",
"user_id",
"=",
"current_user",
")",
":",
"# add the result to the final list",
"authorized_results",
".",
"append",
"(",
"query_result",
")",
"# overwrite the query result",
"result",
"=",
"authorized_results",
"# apply the auth handler to the result",
"return",
"result"
] |
This function resolves a given object in the remote backend services
|
[
"This",
"function",
"resolves",
"a",
"given",
"object",
"in",
"the",
"remote",
"backend",
"services"
] |
python
|
train
|
SeabornGames/Table
|
seaborn_table/table.py
|
https://github.com/SeabornGames/Table/blob/0c474ef2fb00db0e7cf47e8af91e3556c2e7485a/seaborn_table/table.py#L1579-L1589
|
def _key_on_columns(key_on, columns):
"""
:param key_on: str of column
:param columns: list of str of columns
:return: list of str with the key_on in the front of the list
"""
if key_on is not None:
if key_on in columns:
columns.remove(key_on)
columns = [key_on] + columns
return columns
|
[
"def",
"_key_on_columns",
"(",
"key_on",
",",
"columns",
")",
":",
"if",
"key_on",
"is",
"not",
"None",
":",
"if",
"key_on",
"in",
"columns",
":",
"columns",
".",
"remove",
"(",
"key_on",
")",
"columns",
"=",
"[",
"key_on",
"]",
"+",
"columns",
"return",
"columns"
] |
:param key_on: str of column
:param columns: list of str of columns
:return: list of str with the key_on in the front of the list
|
[
":",
"param",
"key_on",
":",
"str",
"of",
"column",
":",
"param",
"columns",
":",
"list",
"of",
"str",
"of",
"columns",
":",
"return",
":",
"list",
"of",
"str",
"with",
"the",
"key_on",
"in",
"the",
"front",
"of",
"the",
"list"
] |
python
|
train
|
gem/oq-engine
|
openquake/hazardlib/gsim/campbell_bozorgnia_2014.py
|
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/campbell_bozorgnia_2014.py#L243-L252
|
def _get_hanging_wall_coeffs_mag(self, C, mag):
"""
Returns the hanging wall magnitude term defined in equation 14
"""
if mag < 5.5:
return 0.0
elif mag > 6.5:
return 1.0 + C["a2"] * (mag - 6.5)
else:
return (mag - 5.5) * (1.0 + C["a2"] * (mag - 6.5))
|
[
"def",
"_get_hanging_wall_coeffs_mag",
"(",
"self",
",",
"C",
",",
"mag",
")",
":",
"if",
"mag",
"<",
"5.5",
":",
"return",
"0.0",
"elif",
"mag",
">",
"6.5",
":",
"return",
"1.0",
"+",
"C",
"[",
"\"a2\"",
"]",
"*",
"(",
"mag",
"-",
"6.5",
")",
"else",
":",
"return",
"(",
"mag",
"-",
"5.5",
")",
"*",
"(",
"1.0",
"+",
"C",
"[",
"\"a2\"",
"]",
"*",
"(",
"mag",
"-",
"6.5",
")",
")"
] |
Returns the hanging wall magnitude term defined in equation 14
|
[
"Returns",
"the",
"hanging",
"wall",
"magnitude",
"term",
"defined",
"in",
"equation",
"14"
] |
python
|
train
|
arista-eosplus/pyeapi
|
pyeapi/api/vrrp.py
|
https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/api/vrrp.py#L462-L507
|
def set_primary_ip(self, name, vrid, value=None, disable=False,
default=False, run=True):
"""Set the primary_ip property of the vrrp
Args:
name (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be managed.
value (string): IP address to be set.
disable (boolean): Unset primary ip if True.
default (boolean): Set primary ip to default if True.
run (boolean): Set to True to execute the command, False to
return a string with the formatted command.
Returns:
If run is True, returns True if the command executed successfully,
error if failure.
If run is False, returns the formatted command string which can
be passed to the node
"""
if default is True:
vrrps = self.get(name)
primary_ip = vrrps[vrid]['primary_ip']
cmd = "default vrrp %d ip %s" % (vrid, primary_ip)
elif disable is True or value is None:
vrrps = self.get(name)
primary_ip = vrrps[vrid]['primary_ip']
cmd = "no vrrp %d ip %s" % (vrid, primary_ip)
elif re.match(r'^\d+\.\d+\.\d+\.\d+$', str(value)):
cmd = "vrrp %d ip %s" % (vrid, value)
else:
raise ValueError("vrrp property 'primary_ip' must be "
"a properly formatted IP address")
# Run the command if requested
if run:
result = self.configure_interface(name, cmd)
# And verify the command succeeded
if result is False:
return self.error
return result
# Otherwise return the formatted command
return cmd
|
[
"def",
"set_primary_ip",
"(",
"self",
",",
"name",
",",
"vrid",
",",
"value",
"=",
"None",
",",
"disable",
"=",
"False",
",",
"default",
"=",
"False",
",",
"run",
"=",
"True",
")",
":",
"if",
"default",
"is",
"True",
":",
"vrrps",
"=",
"self",
".",
"get",
"(",
"name",
")",
"primary_ip",
"=",
"vrrps",
"[",
"vrid",
"]",
"[",
"'primary_ip'",
"]",
"cmd",
"=",
"\"default vrrp %d ip %s\"",
"%",
"(",
"vrid",
",",
"primary_ip",
")",
"elif",
"disable",
"is",
"True",
"or",
"value",
"is",
"None",
":",
"vrrps",
"=",
"self",
".",
"get",
"(",
"name",
")",
"primary_ip",
"=",
"vrrps",
"[",
"vrid",
"]",
"[",
"'primary_ip'",
"]",
"cmd",
"=",
"\"no vrrp %d ip %s\"",
"%",
"(",
"vrid",
",",
"primary_ip",
")",
"elif",
"re",
".",
"match",
"(",
"r'^\\d+\\.\\d+\\.\\d+\\.\\d+$'",
",",
"str",
"(",
"value",
")",
")",
":",
"cmd",
"=",
"\"vrrp %d ip %s\"",
"%",
"(",
"vrid",
",",
"value",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"vrrp property 'primary_ip' must be \"",
"\"a properly formatted IP address\"",
")",
"# Run the command if requested",
"if",
"run",
":",
"result",
"=",
"self",
".",
"configure_interface",
"(",
"name",
",",
"cmd",
")",
"# And verify the command succeeded",
"if",
"result",
"is",
"False",
":",
"return",
"self",
".",
"error",
"return",
"result",
"# Otherwise return the formatted command",
"return",
"cmd"
] |
Set the primary_ip property of the vrrp
Args:
name (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be managed.
value (string): IP address to be set.
disable (boolean): Unset primary ip if True.
default (boolean): Set primary ip to default if True.
run (boolean): Set to True to execute the command, False to
return a string with the formatted command.
Returns:
If run is True, returns True if the command executed successfully,
error if failure.
If run is False, returns the formatted command string which can
be passed to the node
|
[
"Set",
"the",
"primary_ip",
"property",
"of",
"the",
"vrrp"
] |
python
|
train
|
chrippa/python-librtmp
|
librtmp/stream.py
|
https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/stream.py#L45-L67
|
def write(self, data):
"""Writes data to the stream.
:param data: bytes, FLV data to write to the stream
The data passed can contain multiple FLV tags, but it MUST
always contain complete tags or undefined behaviour might
occur.
Raises :exc:`IOError` on error.
"""
if isinstance(data, bytearray):
data = bytes(data)
if not isinstance(data, byte_types):
raise ValueError("A bytes argument is required")
res = librtmp.RTMP_Write(self.client.rtmp, data, len(data))
if res < 0:
raise IOError("Failed to write data")
return res
|
[
"def",
"write",
"(",
"self",
",",
"data",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"bytearray",
")",
":",
"data",
"=",
"bytes",
"(",
"data",
")",
"if",
"not",
"isinstance",
"(",
"data",
",",
"byte_types",
")",
":",
"raise",
"ValueError",
"(",
"\"A bytes argument is required\"",
")",
"res",
"=",
"librtmp",
".",
"RTMP_Write",
"(",
"self",
".",
"client",
".",
"rtmp",
",",
"data",
",",
"len",
"(",
"data",
")",
")",
"if",
"res",
"<",
"0",
":",
"raise",
"IOError",
"(",
"\"Failed to write data\"",
")",
"return",
"res"
] |
Writes data to the stream.
:param data: bytes, FLV data to write to the stream
The data passed can contain multiple FLV tags, but it MUST
always contain complete tags or undefined behaviour might
occur.
Raises :exc:`IOError` on error.
|
[
"Writes",
"data",
"to",
"the",
"stream",
"."
] |
python
|
train
|
CZ-NIC/yangson
|
yangson/schemanode.py
|
https://github.com/CZ-NIC/yangson/blob/a4b9464041fa8b28f6020a420ababf18fddf5d4a/yangson/schemanode.py#L1138-L1143
|
def _active_case(self, value: ObjectValue) -> Optional["CaseNode"]:
"""Return receiver's case that's active in an instance node value."""
for c in self.children:
for cc in c.data_children():
if cc.iname() in value:
return c
|
[
"def",
"_active_case",
"(",
"self",
",",
"value",
":",
"ObjectValue",
")",
"->",
"Optional",
"[",
"\"CaseNode\"",
"]",
":",
"for",
"c",
"in",
"self",
".",
"children",
":",
"for",
"cc",
"in",
"c",
".",
"data_children",
"(",
")",
":",
"if",
"cc",
".",
"iname",
"(",
")",
"in",
"value",
":",
"return",
"c"
] |
Return receiver's case that's active in an instance node value.
|
[
"Return",
"receiver",
"s",
"case",
"that",
"s",
"active",
"in",
"an",
"instance",
"node",
"value",
"."
] |
python
|
train
|
grigi/talkey
|
talkey/utils.py
|
https://github.com/grigi/talkey/blob/5d2d4a1f7001744c4fd9a79a883a3f2001522329/talkey/utils.py#L40-L63
|
def check_network_connection(server, port):
'''
Checks if jasper can connect a network server.
Arguments:
server -- (optional) the server to connect with (Default:
"www.google.com")
Returns:
True or False
'''
logger = logging.getLogger(__name__)
logger.debug("Checking network connection to server '%s'...", server)
try:
# see if we can resolve the host name -- tells us if there is
# a DNS listening
host = socket.gethostbyname(server)
# connect to the host -- tells us if the host is actually
# reachable
sock = socket.create_connection((host, port), 2)
sock.close()
except Exception: # pragma: no cover
logger.debug("Network connection not working")
return False
logger.debug("Network connection working")
return True
|
[
"def",
"check_network_connection",
"(",
"server",
",",
"port",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"debug",
"(",
"\"Checking network connection to server '%s'...\"",
",",
"server",
")",
"try",
":",
"# see if we can resolve the host name -- tells us if there is",
"# a DNS listening",
"host",
"=",
"socket",
".",
"gethostbyname",
"(",
"server",
")",
"# connect to the host -- tells us if the host is actually",
"# reachable",
"sock",
"=",
"socket",
".",
"create_connection",
"(",
"(",
"host",
",",
"port",
")",
",",
"2",
")",
"sock",
".",
"close",
"(",
")",
"except",
"Exception",
":",
"# pragma: no cover",
"logger",
".",
"debug",
"(",
"\"Network connection not working\"",
")",
"return",
"False",
"logger",
".",
"debug",
"(",
"\"Network connection working\"",
")",
"return",
"True"
] |
Checks if jasper can connect a network server.
Arguments:
server -- (optional) the server to connect with (Default:
"www.google.com")
Returns:
True or False
|
[
"Checks",
"if",
"jasper",
"can",
"connect",
"a",
"network",
"server",
".",
"Arguments",
":",
"server",
"--",
"(",
"optional",
")",
"the",
"server",
"to",
"connect",
"with",
"(",
"Default",
":",
"www",
".",
"google",
".",
"com",
")",
"Returns",
":",
"True",
"or",
"False"
] |
python
|
train
|
BeyondTheClouds/enoslib
|
enoslib/infra/enos_g5k/g5k_api_utils.py
|
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_g5k/g5k_api_utils.py#L372-L380
|
def get_nodes(cluster):
"""Get all the nodes of a given cluster.
Args:
cluster(string): uid of the cluster (e.g 'rennes')
"""
gk = get_api_client()
site = get_cluster_site(cluster)
return gk.sites[site].clusters[cluster].nodes.list()
|
[
"def",
"get_nodes",
"(",
"cluster",
")",
":",
"gk",
"=",
"get_api_client",
"(",
")",
"site",
"=",
"get_cluster_site",
"(",
"cluster",
")",
"return",
"gk",
".",
"sites",
"[",
"site",
"]",
".",
"clusters",
"[",
"cluster",
"]",
".",
"nodes",
".",
"list",
"(",
")"
] |
Get all the nodes of a given cluster.
Args:
cluster(string): uid of the cluster (e.g 'rennes')
|
[
"Get",
"all",
"the",
"nodes",
"of",
"a",
"given",
"cluster",
"."
] |
python
|
train
|
a1ezzz/wasp-general
|
wasp_general/task/registry.py
|
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/task/registry.py#L201-L211
|
def tasks_by_tag(self, registry_tag):
""" Get tasks from registry by its tag
:param registry_tag: any hash-able object
:return: Return task (if :attr:`.WTaskRegistryStorage.__multiple_tasks_per_tag__` is not True) or \
list of tasks
"""
if registry_tag not in self.__registry.keys():
return None
tasks = self.__registry[registry_tag]
return tasks if self.__multiple_tasks_per_tag__ is True else tasks[0]
|
[
"def",
"tasks_by_tag",
"(",
"self",
",",
"registry_tag",
")",
":",
"if",
"registry_tag",
"not",
"in",
"self",
".",
"__registry",
".",
"keys",
"(",
")",
":",
"return",
"None",
"tasks",
"=",
"self",
".",
"__registry",
"[",
"registry_tag",
"]",
"return",
"tasks",
"if",
"self",
".",
"__multiple_tasks_per_tag__",
"is",
"True",
"else",
"tasks",
"[",
"0",
"]"
] |
Get tasks from registry by its tag
:param registry_tag: any hash-able object
:return: Return task (if :attr:`.WTaskRegistryStorage.__multiple_tasks_per_tag__` is not True) or \
list of tasks
|
[
"Get",
"tasks",
"from",
"registry",
"by",
"its",
"tag"
] |
python
|
train
|
jmgilman/Neolib
|
neolib/pyamf/remoting/gateway/__init__.py
|
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/remoting/gateway/__init__.py#L451-L472
|
def authenticateRequest(self, service_request, username, password, **kwargs):
"""
Processes an authentication request. If no authenticator is supplied,
then authentication succeeds.
@return: Returns a C{bool} based on the result of authorization. A
value of C{False} will stop processing the request and return an
error to the client.
@rtype: C{bool}
"""
authenticator = self.getAuthenticator(service_request)
if authenticator is None:
return True
args = (username, password)
if hasattr(authenticator, '_pyamf_expose_request'):
http_request = kwargs.get('http_request', None)
args = (http_request,) + args
return authenticator(*args) == True
|
[
"def",
"authenticateRequest",
"(",
"self",
",",
"service_request",
",",
"username",
",",
"password",
",",
"*",
"*",
"kwargs",
")",
":",
"authenticator",
"=",
"self",
".",
"getAuthenticator",
"(",
"service_request",
")",
"if",
"authenticator",
"is",
"None",
":",
"return",
"True",
"args",
"=",
"(",
"username",
",",
"password",
")",
"if",
"hasattr",
"(",
"authenticator",
",",
"'_pyamf_expose_request'",
")",
":",
"http_request",
"=",
"kwargs",
".",
"get",
"(",
"'http_request'",
",",
"None",
")",
"args",
"=",
"(",
"http_request",
",",
")",
"+",
"args",
"return",
"authenticator",
"(",
"*",
"args",
")",
"==",
"True"
] |
Processes an authentication request. If no authenticator is supplied,
then authentication succeeds.
@return: Returns a C{bool} based on the result of authorization. A
value of C{False} will stop processing the request and return an
error to the client.
@rtype: C{bool}
|
[
"Processes",
"an",
"authentication",
"request",
".",
"If",
"no",
"authenticator",
"is",
"supplied",
"then",
"authentication",
"succeeds",
"."
] |
python
|
train
|
deepmind/pysc2
|
pysc2/lib/renderer_human.py
|
https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/lib/renderer_human.py#L934-L941
|
def _abilities(self, fn=None):
"""Return the list of abilities filtered by `fn`."""
out = {}
for cmd in self._obs.observation.abilities:
ability = _Ability(cmd, self._static_data.abilities)
if not fn or fn(ability):
out[ability.ability_id] = ability
return list(out.values())
|
[
"def",
"_abilities",
"(",
"self",
",",
"fn",
"=",
"None",
")",
":",
"out",
"=",
"{",
"}",
"for",
"cmd",
"in",
"self",
".",
"_obs",
".",
"observation",
".",
"abilities",
":",
"ability",
"=",
"_Ability",
"(",
"cmd",
",",
"self",
".",
"_static_data",
".",
"abilities",
")",
"if",
"not",
"fn",
"or",
"fn",
"(",
"ability",
")",
":",
"out",
"[",
"ability",
".",
"ability_id",
"]",
"=",
"ability",
"return",
"list",
"(",
"out",
".",
"values",
"(",
")",
")"
] |
Return the list of abilities filtered by `fn`.
|
[
"Return",
"the",
"list",
"of",
"abilities",
"filtered",
"by",
"fn",
"."
] |
python
|
train
|
etcher-be/epab
|
epab/utils/_repo.py
|
https://github.com/etcher-be/epab/blob/024cde74d058281aa66e6e4b7b71dccbe803b1c1/epab/utils/_repo.py#L301-L341
|
def commit(
self,
message: str,
files_to_add: typing.Optional[typing.Union[typing.List[str], str]] = None,
allow_empty: bool = False,
):
"""
Commits changes to the repo
:param message: first line of the message
:type message: str
:param files_to_add: files to commit
:type files_to_add: optional list of str
:param allow_empty: allow dummy commit
:type allow_empty: bool
"""
message = str(message)
LOGGER.debug('message: %s', message)
files_to_add = self._sanitize_files_to_add(files_to_add)
LOGGER.debug('files to add: %s', files_to_add)
if not message:
LOGGER.error('empty commit message')
sys.exit(-1)
if os.getenv('APPVEYOR'):
LOGGER.info('committing on AV, adding skip_ci tag')
message = self.add_skip_ci_to_commit_msg(message)
if files_to_add is None:
self.stage_all()
else:
self.reset_index()
self.stage_subset(*files_to_add)
if self.index_is_empty() and not allow_empty:
LOGGER.error('empty commit')
sys.exit(-1)
self.repo.index.commit(message=message)
|
[
"def",
"commit",
"(",
"self",
",",
"message",
":",
"str",
",",
"files_to_add",
":",
"typing",
".",
"Optional",
"[",
"typing",
".",
"Union",
"[",
"typing",
".",
"List",
"[",
"str",
"]",
",",
"str",
"]",
"]",
"=",
"None",
",",
"allow_empty",
":",
"bool",
"=",
"False",
",",
")",
":",
"message",
"=",
"str",
"(",
"message",
")",
"LOGGER",
".",
"debug",
"(",
"'message: %s'",
",",
"message",
")",
"files_to_add",
"=",
"self",
".",
"_sanitize_files_to_add",
"(",
"files_to_add",
")",
"LOGGER",
".",
"debug",
"(",
"'files to add: %s'",
",",
"files_to_add",
")",
"if",
"not",
"message",
":",
"LOGGER",
".",
"error",
"(",
"'empty commit message'",
")",
"sys",
".",
"exit",
"(",
"-",
"1",
")",
"if",
"os",
".",
"getenv",
"(",
"'APPVEYOR'",
")",
":",
"LOGGER",
".",
"info",
"(",
"'committing on AV, adding skip_ci tag'",
")",
"message",
"=",
"self",
".",
"add_skip_ci_to_commit_msg",
"(",
"message",
")",
"if",
"files_to_add",
"is",
"None",
":",
"self",
".",
"stage_all",
"(",
")",
"else",
":",
"self",
".",
"reset_index",
"(",
")",
"self",
".",
"stage_subset",
"(",
"*",
"files_to_add",
")",
"if",
"self",
".",
"index_is_empty",
"(",
")",
"and",
"not",
"allow_empty",
":",
"LOGGER",
".",
"error",
"(",
"'empty commit'",
")",
"sys",
".",
"exit",
"(",
"-",
"1",
")",
"self",
".",
"repo",
".",
"index",
".",
"commit",
"(",
"message",
"=",
"message",
")"
] |
Commits changes to the repo
:param message: first line of the message
:type message: str
:param files_to_add: files to commit
:type files_to_add: optional list of str
:param allow_empty: allow dummy commit
:type allow_empty: bool
|
[
"Commits",
"changes",
"to",
"the",
"repo"
] |
python
|
train
|
geopy/geopy
|
geopy/geocoders/arcgis.py
|
https://github.com/geopy/geopy/blob/02c838d965e76497f3c3d61f53808c86b5c58224/geopy/geocoders/arcgis.py#L308-L337
|
def _refresh_authentication_token(self):
"""
POST to ArcGIS requesting a new token.
"""
if self.retry == self._MAX_RETRIES:
raise GeocoderAuthenticationFailure(
'Too many retries for auth: %s' % self.retry
)
token_request_arguments = {
'username': self.username,
'password': self.password,
'referer': self.referer,
'expiration': self.token_lifetime,
'f': 'json'
}
url = "?".join((self.auth_api, urlencode(token_request_arguments)))
logger.debug(
"%s._refresh_authentication_token: %s",
self.__class__.__name__, url
)
self.token_expiry = int(time()) + self.token_lifetime
response = self._base_call_geocoder(url)
if 'token' not in response:
raise GeocoderAuthenticationFailure(
'Missing token in auth request.'
'Request URL: %s; response JSON: %s' %
(url, json.dumps(response))
)
self.retry = 0
self.token = response['token']
|
[
"def",
"_refresh_authentication_token",
"(",
"self",
")",
":",
"if",
"self",
".",
"retry",
"==",
"self",
".",
"_MAX_RETRIES",
":",
"raise",
"GeocoderAuthenticationFailure",
"(",
"'Too many retries for auth: %s'",
"%",
"self",
".",
"retry",
")",
"token_request_arguments",
"=",
"{",
"'username'",
":",
"self",
".",
"username",
",",
"'password'",
":",
"self",
".",
"password",
",",
"'referer'",
":",
"self",
".",
"referer",
",",
"'expiration'",
":",
"self",
".",
"token_lifetime",
",",
"'f'",
":",
"'json'",
"}",
"url",
"=",
"\"?\"",
".",
"join",
"(",
"(",
"self",
".",
"auth_api",
",",
"urlencode",
"(",
"token_request_arguments",
")",
")",
")",
"logger",
".",
"debug",
"(",
"\"%s._refresh_authentication_token: %s\"",
",",
"self",
".",
"__class__",
".",
"__name__",
",",
"url",
")",
"self",
".",
"token_expiry",
"=",
"int",
"(",
"time",
"(",
")",
")",
"+",
"self",
".",
"token_lifetime",
"response",
"=",
"self",
".",
"_base_call_geocoder",
"(",
"url",
")",
"if",
"'token'",
"not",
"in",
"response",
":",
"raise",
"GeocoderAuthenticationFailure",
"(",
"'Missing token in auth request.'",
"'Request URL: %s; response JSON: %s'",
"%",
"(",
"url",
",",
"json",
".",
"dumps",
"(",
"response",
")",
")",
")",
"self",
".",
"retry",
"=",
"0",
"self",
".",
"token",
"=",
"response",
"[",
"'token'",
"]"
] |
POST to ArcGIS requesting a new token.
|
[
"POST",
"to",
"ArcGIS",
"requesting",
"a",
"new",
"token",
"."
] |
python
|
train
|
gwastro/pycbc
|
pycbc/tmpltbank/partitioned_bank.py
|
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/tmpltbank/partitioned_bank.py#L611-L640
|
def output_all_points(self):
"""Return all points in the bank.
Return all points in the bank as lists of m1, m2, spin1z, spin2z.
Returns
-------
mass1 : list
List of mass1 values.
mass2 : list
List of mass2 values.
spin1z : list
List of spin1z values.
spin2z : list
List of spin2z values.
"""
mass1 = []
mass2 = []
spin1z = []
spin2z = []
for i in self.massbank.keys():
for j in self.massbank[i].keys():
for k in xrange(len(self.massbank[i][j]['mass1s'])):
curr_bank = self.massbank[i][j]
mass1.append(curr_bank['mass1s'][k])
mass2.append(curr_bank['mass2s'][k])
spin1z.append(curr_bank['spin1s'][k])
spin2z.append(curr_bank['spin2s'][k])
return mass1, mass2, spin1z, spin2z
|
[
"def",
"output_all_points",
"(",
"self",
")",
":",
"mass1",
"=",
"[",
"]",
"mass2",
"=",
"[",
"]",
"spin1z",
"=",
"[",
"]",
"spin2z",
"=",
"[",
"]",
"for",
"i",
"in",
"self",
".",
"massbank",
".",
"keys",
"(",
")",
":",
"for",
"j",
"in",
"self",
".",
"massbank",
"[",
"i",
"]",
".",
"keys",
"(",
")",
":",
"for",
"k",
"in",
"xrange",
"(",
"len",
"(",
"self",
".",
"massbank",
"[",
"i",
"]",
"[",
"j",
"]",
"[",
"'mass1s'",
"]",
")",
")",
":",
"curr_bank",
"=",
"self",
".",
"massbank",
"[",
"i",
"]",
"[",
"j",
"]",
"mass1",
".",
"append",
"(",
"curr_bank",
"[",
"'mass1s'",
"]",
"[",
"k",
"]",
")",
"mass2",
".",
"append",
"(",
"curr_bank",
"[",
"'mass2s'",
"]",
"[",
"k",
"]",
")",
"spin1z",
".",
"append",
"(",
"curr_bank",
"[",
"'spin1s'",
"]",
"[",
"k",
"]",
")",
"spin2z",
".",
"append",
"(",
"curr_bank",
"[",
"'spin2s'",
"]",
"[",
"k",
"]",
")",
"return",
"mass1",
",",
"mass2",
",",
"spin1z",
",",
"spin2z"
] |
Return all points in the bank.
Return all points in the bank as lists of m1, m2, spin1z, spin2z.
Returns
-------
mass1 : list
List of mass1 values.
mass2 : list
List of mass2 values.
spin1z : list
List of spin1z values.
spin2z : list
List of spin2z values.
|
[
"Return",
"all",
"points",
"in",
"the",
"bank",
"."
] |
python
|
train
|
brocade/pynos
|
pynos/versions/ver_6/ver_6_0_1/yang/brocade_common_def.py
|
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_common_def.py#L460-L473
|
def ip_hide_ext_community_list_holder_extcommunity_list_ext_community_action(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def")
hide_ext_community_list_holder = ET.SubElement(ip, "hide-ext-community-list-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
extcommunity_list = ET.SubElement(hide_ext_community_list_holder, "extcommunity-list")
extcommunity_list_num_key = ET.SubElement(extcommunity_list, "extcommunity-list-num")
extcommunity_list_num_key.text = kwargs.pop('extcommunity_list_num')
ext_community_action = ET.SubElement(extcommunity_list, "ext-community-action")
ext_community_action.text = kwargs.pop('ext_community_action')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
[
"def",
"ip_hide_ext_community_list_holder_extcommunity_list_ext_community_action",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"ip",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"ip\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-common-def\"",
")",
"hide_ext_community_list_holder",
"=",
"ET",
".",
"SubElement",
"(",
"ip",
",",
"\"hide-ext-community-list-holder\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-ip-policy\"",
")",
"extcommunity_list",
"=",
"ET",
".",
"SubElement",
"(",
"hide_ext_community_list_holder",
",",
"\"extcommunity-list\"",
")",
"extcommunity_list_num_key",
"=",
"ET",
".",
"SubElement",
"(",
"extcommunity_list",
",",
"\"extcommunity-list-num\"",
")",
"extcommunity_list_num_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'extcommunity_list_num'",
")",
"ext_community_action",
"=",
"ET",
".",
"SubElement",
"(",
"extcommunity_list",
",",
"\"ext-community-action\"",
")",
"ext_community_action",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'ext_community_action'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] |
Auto Generated Code
|
[
"Auto",
"Generated",
"Code"
] |
python
|
train
|
aiortc/aiortc
|
aiortc/rtcsctptransport.py
|
https://github.com/aiortc/aiortc/blob/60ed036abf4575bd63985724b4493d569e6da29b/aiortc/rtcsctptransport.py#L734-L744
|
def _set_extensions(self, params):
"""
Sets what extensions are supported by the local party.
"""
extensions = []
if self._local_partial_reliability:
params.append((SCTP_PRSCTP_SUPPORTED, b''))
extensions.append(ForwardTsnChunk.type)
extensions.append(ReconfigChunk.type)
params.append((SCTP_SUPPORTED_CHUNK_EXT, bytes(extensions)))
|
[
"def",
"_set_extensions",
"(",
"self",
",",
"params",
")",
":",
"extensions",
"=",
"[",
"]",
"if",
"self",
".",
"_local_partial_reliability",
":",
"params",
".",
"append",
"(",
"(",
"SCTP_PRSCTP_SUPPORTED",
",",
"b''",
")",
")",
"extensions",
".",
"append",
"(",
"ForwardTsnChunk",
".",
"type",
")",
"extensions",
".",
"append",
"(",
"ReconfigChunk",
".",
"type",
")",
"params",
".",
"append",
"(",
"(",
"SCTP_SUPPORTED_CHUNK_EXT",
",",
"bytes",
"(",
"extensions",
")",
")",
")"
] |
Sets what extensions are supported by the local party.
|
[
"Sets",
"what",
"extensions",
"are",
"supported",
"by",
"the",
"local",
"party",
"."
] |
python
|
train
|
EndurantDevs/webargs-sanic
|
webargs_sanic/sanicparser.py
|
https://github.com/EndurantDevs/webargs-sanic/blob/8861a3b7d16d43a0b7e6669115eb93b0553f1b63/webargs_sanic/sanicparser.py#L74-L81
|
def parse_json(self, req, name, field):
"""Pull a json value from the request."""
if not (req.body and is_json_request(req)):
return core.missing
json_data = req.json
if json_data is None:
return core.missing
return core.get_value(json_data, name, field, allow_many_nested=True)
|
[
"def",
"parse_json",
"(",
"self",
",",
"req",
",",
"name",
",",
"field",
")",
":",
"if",
"not",
"(",
"req",
".",
"body",
"and",
"is_json_request",
"(",
"req",
")",
")",
":",
"return",
"core",
".",
"missing",
"json_data",
"=",
"req",
".",
"json",
"if",
"json_data",
"is",
"None",
":",
"return",
"core",
".",
"missing",
"return",
"core",
".",
"get_value",
"(",
"json_data",
",",
"name",
",",
"field",
",",
"allow_many_nested",
"=",
"True",
")"
] |
Pull a json value from the request.
|
[
"Pull",
"a",
"json",
"value",
"from",
"the",
"request",
"."
] |
python
|
train
|
ninuxorg/nodeshot
|
nodeshot/interop/sync/management/commands/sync.py
|
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/interop/sync/management/commands/sync.py#L76-L129
|
def handle(self, *args, **options):
""" execute sync command """
# store verbosity level in instance attribute for later use
self.verbosity = int(options.get('verbosity'))
# blank line
self.stdout.write('\r\n')
# retrieve layers
layers = self.retrieve_layers(*args, **options)
if len(layers) < 1:
self.stdout.write('no layers to process\n\r')
return
else:
self.verbose('going to process %d layers...' % len(layers))
# loop over
for layer in layers:
# retrieve interop class if available
try:
synchronizer_path = layer.external.synchronizer_path
except (ObjectDoesNotExist, AttributeError):
self.stdout.write('External Layer %s does not have a synchronizer class specified\n\r' % layer.name)
continue
# if no synchronizer_path jump to next layer
if synchronizer_path == 'None':
self.stdout.write('External Layer %s does not have a synchronizer class specified\n\r' % layer.name)
continue
if layer.external.config is None:
self.stdout.write('Layer %s does not have a config yet\n\r' % layer.name)
continue
# retrieve class
synchronizer = import_by_path(synchronizer_path)
self.stdout.write('imported module %s\r\n' % synchronizer.__name__)
# try running
try:
instance = synchronizer(layer, verbosity=self.verbosity)
self.stdout.write('Processing layer "%s"\r\n' % layer.slug)
messages = instance.sync()
except ImproperlyConfigured as e:
self.stdout.write('Validation error: %s\r\n' % e)
continue
except Exception as e:
self.stdout.write('Got Exception: %s\r\n' % e)
exception(e)
continue
for message in messages:
self.stdout.write('%s\n\r' % message)
self.stdout.write('\r\n')
|
[
"def",
"handle",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"options",
")",
":",
"# store verbosity level in instance attribute for later use",
"self",
".",
"verbosity",
"=",
"int",
"(",
"options",
".",
"get",
"(",
"'verbosity'",
")",
")",
"# blank line",
"self",
".",
"stdout",
".",
"write",
"(",
"'\\r\\n'",
")",
"# retrieve layers",
"layers",
"=",
"self",
".",
"retrieve_layers",
"(",
"*",
"args",
",",
"*",
"*",
"options",
")",
"if",
"len",
"(",
"layers",
")",
"<",
"1",
":",
"self",
".",
"stdout",
".",
"write",
"(",
"'no layers to process\\n\\r'",
")",
"return",
"else",
":",
"self",
".",
"verbose",
"(",
"'going to process %d layers...'",
"%",
"len",
"(",
"layers",
")",
")",
"# loop over",
"for",
"layer",
"in",
"layers",
":",
"# retrieve interop class if available",
"try",
":",
"synchronizer_path",
"=",
"layer",
".",
"external",
".",
"synchronizer_path",
"except",
"(",
"ObjectDoesNotExist",
",",
"AttributeError",
")",
":",
"self",
".",
"stdout",
".",
"write",
"(",
"'External Layer %s does not have a synchronizer class specified\\n\\r'",
"%",
"layer",
".",
"name",
")",
"continue",
"# if no synchronizer_path jump to next layer",
"if",
"synchronizer_path",
"==",
"'None'",
":",
"self",
".",
"stdout",
".",
"write",
"(",
"'External Layer %s does not have a synchronizer class specified\\n\\r'",
"%",
"layer",
".",
"name",
")",
"continue",
"if",
"layer",
".",
"external",
".",
"config",
"is",
"None",
":",
"self",
".",
"stdout",
".",
"write",
"(",
"'Layer %s does not have a config yet\\n\\r'",
"%",
"layer",
".",
"name",
")",
"continue",
"# retrieve class",
"synchronizer",
"=",
"import_by_path",
"(",
"synchronizer_path",
")",
"self",
".",
"stdout",
".",
"write",
"(",
"'imported module %s\\r\\n'",
"%",
"synchronizer",
".",
"__name__",
")",
"# try running",
"try",
":",
"instance",
"=",
"synchronizer",
"(",
"layer",
",",
"verbosity",
"=",
"self",
".",
"verbosity",
")",
"self",
".",
"stdout",
".",
"write",
"(",
"'Processing layer \"%s\"\\r\\n'",
"%",
"layer",
".",
"slug",
")",
"messages",
"=",
"instance",
".",
"sync",
"(",
")",
"except",
"ImproperlyConfigured",
"as",
"e",
":",
"self",
".",
"stdout",
".",
"write",
"(",
"'Validation error: %s\\r\\n'",
"%",
"e",
")",
"continue",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"stdout",
".",
"write",
"(",
"'Got Exception: %s\\r\\n'",
"%",
"e",
")",
"exception",
"(",
"e",
")",
"continue",
"for",
"message",
"in",
"messages",
":",
"self",
".",
"stdout",
".",
"write",
"(",
"'%s\\n\\r'",
"%",
"message",
")",
"self",
".",
"stdout",
".",
"write",
"(",
"'\\r\\n'",
")"
] |
execute sync command
|
[
"execute",
"sync",
"command"
] |
python
|
train
|
datacamp/pythonwhat
|
pythonwhat/sct_syntax.py
|
https://github.com/datacamp/pythonwhat/blob/ffbf7f8436a51f77c22f3bed75ba3bc37a5c666f/pythonwhat/sct_syntax.py#L13-L27
|
def multi_dec(f):
"""Decorator for multi to remove nodes for original test functions from root node"""
@wraps(f)
def wrapper(*args, **kwargs):
args = (
args[0] if len(args) == 1 and isinstance(args[0], (list, tuple)) else args
)
for arg in args:
if isinstance(arg, Node) and arg.parent.name is "root":
arg.parent.remove_child(arg)
arg.update_child_calls()
return f(*args, **kwargs)
return wrapper
|
[
"def",
"multi_dec",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"(",
"args",
"[",
"0",
"]",
"if",
"len",
"(",
"args",
")",
"==",
"1",
"and",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"(",
"list",
",",
"tuple",
")",
")",
"else",
"args",
")",
"for",
"arg",
"in",
"args",
":",
"if",
"isinstance",
"(",
"arg",
",",
"Node",
")",
"and",
"arg",
".",
"parent",
".",
"name",
"is",
"\"root\"",
":",
"arg",
".",
"parent",
".",
"remove_child",
"(",
"arg",
")",
"arg",
".",
"update_child_calls",
"(",
")",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
] |
Decorator for multi to remove nodes for original test functions from root node
|
[
"Decorator",
"for",
"multi",
"to",
"remove",
"nodes",
"for",
"original",
"test",
"functions",
"from",
"root",
"node"
] |
python
|
test
|
reiinakano/xcessiv
|
xcessiv/views.py
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/views.py#L428-L476
|
def get_automated_runs():
"""Return all automated runs"""
path = functions.get_path_from_query_string(request)
if request.method == 'GET':
with functions.DBContextManager(path) as session:
automated_runs = session.query(models.AutomatedRun).all()
return jsonify(list(map(lambda x: x.serialize, automated_runs)))
if request.method == 'POST':
req_body = request.get_json()
with functions.DBContextManager(path) as session:
base_learner_origin = None
if req_body['category'] == 'bayes' or req_body['category'] == 'greedy_ensemble_search':
base_learner_origin = session.query(models.BaseLearnerOrigin).\
filter_by(id=req_body['base_learner_origin_id']).first()
if base_learner_origin is None:
raise exceptions.UserError('Base learner origin {} not found'.format(
req_body['base_learner_origin_id']
), 404)
if not base_learner_origin.final:
raise exceptions.UserError('Base learner origin {} is not final'.format(
req_body['base_learner_origin_id']
))
elif req_body['category'] == 'tpot':
pass
else:
raise exceptions.UserError('Automated run category'
' {} not recognized'.format(req_body['category']))
# Check for any syntax errors
module = functions.import_string_code_as_module(req_body['source'])
del module
automated_run = models.AutomatedRun(req_body['source'],
'queued',
req_body['category'],
base_learner_origin)
session.add(automated_run)
session.commit()
with Connection(get_redis_connection()):
rqtasks.start_automated_run.delay(path, automated_run.id)
return jsonify(automated_run.serialize)
|
[
"def",
"get_automated_runs",
"(",
")",
":",
"path",
"=",
"functions",
".",
"get_path_from_query_string",
"(",
"request",
")",
"if",
"request",
".",
"method",
"==",
"'GET'",
":",
"with",
"functions",
".",
"DBContextManager",
"(",
"path",
")",
"as",
"session",
":",
"automated_runs",
"=",
"session",
".",
"query",
"(",
"models",
".",
"AutomatedRun",
")",
".",
"all",
"(",
")",
"return",
"jsonify",
"(",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"serialize",
",",
"automated_runs",
")",
")",
")",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"req_body",
"=",
"request",
".",
"get_json",
"(",
")",
"with",
"functions",
".",
"DBContextManager",
"(",
"path",
")",
"as",
"session",
":",
"base_learner_origin",
"=",
"None",
"if",
"req_body",
"[",
"'category'",
"]",
"==",
"'bayes'",
"or",
"req_body",
"[",
"'category'",
"]",
"==",
"'greedy_ensemble_search'",
":",
"base_learner_origin",
"=",
"session",
".",
"query",
"(",
"models",
".",
"BaseLearnerOrigin",
")",
".",
"filter_by",
"(",
"id",
"=",
"req_body",
"[",
"'base_learner_origin_id'",
"]",
")",
".",
"first",
"(",
")",
"if",
"base_learner_origin",
"is",
"None",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'Base learner origin {} not found'",
".",
"format",
"(",
"req_body",
"[",
"'base_learner_origin_id'",
"]",
")",
",",
"404",
")",
"if",
"not",
"base_learner_origin",
".",
"final",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'Base learner origin {} is not final'",
".",
"format",
"(",
"req_body",
"[",
"'base_learner_origin_id'",
"]",
")",
")",
"elif",
"req_body",
"[",
"'category'",
"]",
"==",
"'tpot'",
":",
"pass",
"else",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'Automated run category'",
"' {} not recognized'",
".",
"format",
"(",
"req_body",
"[",
"'category'",
"]",
")",
")",
"# Check for any syntax errors",
"module",
"=",
"functions",
".",
"import_string_code_as_module",
"(",
"req_body",
"[",
"'source'",
"]",
")",
"del",
"module",
"automated_run",
"=",
"models",
".",
"AutomatedRun",
"(",
"req_body",
"[",
"'source'",
"]",
",",
"'queued'",
",",
"req_body",
"[",
"'category'",
"]",
",",
"base_learner_origin",
")",
"session",
".",
"add",
"(",
"automated_run",
")",
"session",
".",
"commit",
"(",
")",
"with",
"Connection",
"(",
"get_redis_connection",
"(",
")",
")",
":",
"rqtasks",
".",
"start_automated_run",
".",
"delay",
"(",
"path",
",",
"automated_run",
".",
"id",
")",
"return",
"jsonify",
"(",
"automated_run",
".",
"serialize",
")"
] |
Return all automated runs
|
[
"Return",
"all",
"automated",
"runs"
] |
python
|
train
|
user-cont/conu
|
conu/backend/docker/container.py
|
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/backend/docker/container.py#L350-L363
|
def get_ports(self):
"""
get ports specified in container metadata
:return: list of str
"""
ports = []
container_ports = self.inspect(refresh=True)["NetworkSettings"]["Ports"]
if not container_ports:
return ports
for p in container_ports:
# TODO: gracefullness, error handling
ports.append(p.split("/")[0])
return ports
|
[
"def",
"get_ports",
"(",
"self",
")",
":",
"ports",
"=",
"[",
"]",
"container_ports",
"=",
"self",
".",
"inspect",
"(",
"refresh",
"=",
"True",
")",
"[",
"\"NetworkSettings\"",
"]",
"[",
"\"Ports\"",
"]",
"if",
"not",
"container_ports",
":",
"return",
"ports",
"for",
"p",
"in",
"container_ports",
":",
"# TODO: gracefullness, error handling",
"ports",
".",
"append",
"(",
"p",
".",
"split",
"(",
"\"/\"",
")",
"[",
"0",
"]",
")",
"return",
"ports"
] |
get ports specified in container metadata
:return: list of str
|
[
"get",
"ports",
"specified",
"in",
"container",
"metadata"
] |
python
|
train
|
Kronuz/pyScss
|
scss/cssdefs.py
|
https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/scss/cssdefs.py#L208-L224
|
def convert_units_to_base_units(units):
"""Convert a set of units into a set of "base" units.
Returns a 2-tuple of `factor, new_units`.
"""
total_factor = 1
new_units = []
for unit in units:
if unit not in BASE_UNIT_CONVERSIONS:
continue
factor, new_unit = BASE_UNIT_CONVERSIONS[unit]
total_factor *= factor
new_units.append(new_unit)
new_units.sort()
return total_factor, tuple(new_units)
|
[
"def",
"convert_units_to_base_units",
"(",
"units",
")",
":",
"total_factor",
"=",
"1",
"new_units",
"=",
"[",
"]",
"for",
"unit",
"in",
"units",
":",
"if",
"unit",
"not",
"in",
"BASE_UNIT_CONVERSIONS",
":",
"continue",
"factor",
",",
"new_unit",
"=",
"BASE_UNIT_CONVERSIONS",
"[",
"unit",
"]",
"total_factor",
"*=",
"factor",
"new_units",
".",
"append",
"(",
"new_unit",
")",
"new_units",
".",
"sort",
"(",
")",
"return",
"total_factor",
",",
"tuple",
"(",
"new_units",
")"
] |
Convert a set of units into a set of "base" units.
Returns a 2-tuple of `factor, new_units`.
|
[
"Convert",
"a",
"set",
"of",
"units",
"into",
"a",
"set",
"of",
"base",
"units",
"."
] |
python
|
train
|
cqparts/cqparts
|
src/cqparts/utils/wrappers.py
|
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/utils/wrappers.py#L2-L42
|
def as_part(func):
"""
Converts a function to a :class:`Part <cqparts.Part>` instance.
So the conventionally defined *part*::
import cadquery
from cqparts import Part
from cqparts.params import Float
class Box(Part):
x = Float(1)
y = Float(2)
z = Float(4)
def make(self):
return cadquery.Workplane('XY').box(self.x, self.y, self.z)
box = Box(x=6, y=3, z=1)
May also be written as::
import cadquery
from cqparts.utils.wrappers import as_part
@as_part
def make_box(x=1, y=2, z=4):
return cadquery.Workplane('XY').box(x, y, z)
box = make_box(x=6, y=3, z=1)
In both cases, ``box`` is a :class:`Part <cqparts.Part>` instance.
"""
from .. import Part
def inner(*args, **kwargs):
part_class = type(func.__name__, (Part,), {
'make': lambda self: func(*args, **kwargs),
})
return part_class()
inner.__doc__ = func.__doc__
return inner
|
[
"def",
"as_part",
"(",
"func",
")",
":",
"from",
".",
".",
"import",
"Part",
"def",
"inner",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"part_class",
"=",
"type",
"(",
"func",
".",
"__name__",
",",
"(",
"Part",
",",
")",
",",
"{",
"'make'",
":",
"lambda",
"self",
":",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
",",
"}",
")",
"return",
"part_class",
"(",
")",
"inner",
".",
"__doc__",
"=",
"func",
".",
"__doc__",
"return",
"inner"
] |
Converts a function to a :class:`Part <cqparts.Part>` instance.
So the conventionally defined *part*::
import cadquery
from cqparts import Part
from cqparts.params import Float
class Box(Part):
x = Float(1)
y = Float(2)
z = Float(4)
def make(self):
return cadquery.Workplane('XY').box(self.x, self.y, self.z)
box = Box(x=6, y=3, z=1)
May also be written as::
import cadquery
from cqparts.utils.wrappers import as_part
@as_part
def make_box(x=1, y=2, z=4):
return cadquery.Workplane('XY').box(x, y, z)
box = make_box(x=6, y=3, z=1)
In both cases, ``box`` is a :class:`Part <cqparts.Part>` instance.
|
[
"Converts",
"a",
"function",
"to",
"a",
":",
"class",
":",
"Part",
"<cqparts",
".",
"Part",
">",
"instance",
"."
] |
python
|
train
|
mlperf/training
|
compliance/mlperf_compliance/tf_mlperf_log.py
|
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/compliance/mlperf_compliance/tf_mlperf_log.py#L33-L62
|
def log_deferred(op, log_id, every_n=1, first_n=None):
"""Helper method inserting compliance logging ops.
Note: This helper is not guaranteed to be efficient, as it will insert ops
and control dependencies. If this proves to be a bottleneck, submitters
may wish to consider other methods such as extracting values from an
.events file.
Args:
op: A tf op to be printed.
log_id: a uuid provided by the logger in mlperf_log.py
every_n: If repeat is True, with what frequency should the input op be '
logged. If repeat is False, this argument is ignored.
first_n: Only log this many values. This arg does not interact with every_n.
The first_n refers to the first n that would have been logged.
"""
prefix = ":::MLPv0.5.0 [{}]".format(log_id)
if not first_n is not None and first_n == 1:
return tf.Print(op, [tf.timestamp(), op], message=prefix, first_n=1)
counter = tf.Variable(tf.zeros(shape=(), dtype=tf.int32) - 1,
aggregation=tf.VariableAggregation.MEAN)
increment = tf.assign_add(counter, 1, use_locking=True)
return tf.cond(
tf.equal(tf.mod(increment, every_n), 0),
lambda :tf.Print(op, [tf.timestamp(), op], message=prefix,
first_n=first_n),
lambda :op
)
|
[
"def",
"log_deferred",
"(",
"op",
",",
"log_id",
",",
"every_n",
"=",
"1",
",",
"first_n",
"=",
"None",
")",
":",
"prefix",
"=",
"\":::MLPv0.5.0 [{}]\"",
".",
"format",
"(",
"log_id",
")",
"if",
"not",
"first_n",
"is",
"not",
"None",
"and",
"first_n",
"==",
"1",
":",
"return",
"tf",
".",
"Print",
"(",
"op",
",",
"[",
"tf",
".",
"timestamp",
"(",
")",
",",
"op",
"]",
",",
"message",
"=",
"prefix",
",",
"first_n",
"=",
"1",
")",
"counter",
"=",
"tf",
".",
"Variable",
"(",
"tf",
".",
"zeros",
"(",
"shape",
"=",
"(",
")",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"-",
"1",
",",
"aggregation",
"=",
"tf",
".",
"VariableAggregation",
".",
"MEAN",
")",
"increment",
"=",
"tf",
".",
"assign_add",
"(",
"counter",
",",
"1",
",",
"use_locking",
"=",
"True",
")",
"return",
"tf",
".",
"cond",
"(",
"tf",
".",
"equal",
"(",
"tf",
".",
"mod",
"(",
"increment",
",",
"every_n",
")",
",",
"0",
")",
",",
"lambda",
":",
"tf",
".",
"Print",
"(",
"op",
",",
"[",
"tf",
".",
"timestamp",
"(",
")",
",",
"op",
"]",
",",
"message",
"=",
"prefix",
",",
"first_n",
"=",
"first_n",
")",
",",
"lambda",
":",
"op",
")"
] |
Helper method inserting compliance logging ops.
Note: This helper is not guaranteed to be efficient, as it will insert ops
and control dependencies. If this proves to be a bottleneck, submitters
may wish to consider other methods such as extracting values from an
.events file.
Args:
op: A tf op to be printed.
log_id: a uuid provided by the logger in mlperf_log.py
every_n: If repeat is True, with what frequency should the input op be '
logged. If repeat is False, this argument is ignored.
first_n: Only log this many values. This arg does not interact with every_n.
The first_n refers to the first n that would have been logged.
|
[
"Helper",
"method",
"inserting",
"compliance",
"logging",
"ops",
"."
] |
python
|
train
|
bitesofcode/projexui
|
projexui/widgets/xchartwidget/xchartruler.py
|
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xchartwidget/xchartruler.py#L355-L419
|
def percentAt( self, value ):
"""
Returns the percentage where the given value lies between this rulers
minimum and maximum values. If the value equals the minimum, then the
percent is 0, if it equals the maximum, then the percent is 1 - any
value between will be a floating point. If the ruler is a custom type,
then only if the value matches a notch will be successful.
:param value | <variant>
:return <float>
"""
if ( value is None ):
return 0.0
minim = self.minimum()
maxim = self.maximum()
rtype = self.rulerType()
# simple minimum
if ( value == minim and not self.padStart() ):
perc = 0.0
# simple maximum
elif ( value == maxim and not self.padEnd() ):
perc = 1.0
# calculate a numeric percentage value
elif ( rtype == XChartRuler.Type.Number ):
perc = float(value - minim) / float(maxim - minim)
# calculate a time percentage value
elif ( rtype in (XChartRuler.Type.Datetime, XChartRuler.Type.Time) ):
maxsecs = minim.secsTo(maxim)
valsecs = minim.secsTo(value)
perc = float(valsecs) / maxsecs
# calculate a date percentage value
elif ( rtype == XChartRuler.Type.Date ):
maxdays = minim.daysTo(maxim)
valdays = minim.daysTo(value)
perc = float(valdays) / maxdays
# otherwise, compare against the notches
else:
perc = 0.0
notches = self.notches()
count = len(notches)
count += self.padStart() + self.padEnd()
count = max(1, count - 1)
perc = float(self.padStart()) / count
for i, notch in enumerate(notches):
if ( notch == value ):
perc += float(i) / count
break
# normalize the percentage
perc = min(perc, 1.0)
perc = max(0, perc)
return perc
|
[
"def",
"percentAt",
"(",
"self",
",",
"value",
")",
":",
"if",
"(",
"value",
"is",
"None",
")",
":",
"return",
"0.0",
"minim",
"=",
"self",
".",
"minimum",
"(",
")",
"maxim",
"=",
"self",
".",
"maximum",
"(",
")",
"rtype",
"=",
"self",
".",
"rulerType",
"(",
")",
"# simple minimum\r",
"if",
"(",
"value",
"==",
"minim",
"and",
"not",
"self",
".",
"padStart",
"(",
")",
")",
":",
"perc",
"=",
"0.0",
"# simple maximum\r",
"elif",
"(",
"value",
"==",
"maxim",
"and",
"not",
"self",
".",
"padEnd",
"(",
")",
")",
":",
"perc",
"=",
"1.0",
"# calculate a numeric percentage value\r",
"elif",
"(",
"rtype",
"==",
"XChartRuler",
".",
"Type",
".",
"Number",
")",
":",
"perc",
"=",
"float",
"(",
"value",
"-",
"minim",
")",
"/",
"float",
"(",
"maxim",
"-",
"minim",
")",
"# calculate a time percentage value\r",
"elif",
"(",
"rtype",
"in",
"(",
"XChartRuler",
".",
"Type",
".",
"Datetime",
",",
"XChartRuler",
".",
"Type",
".",
"Time",
")",
")",
":",
"maxsecs",
"=",
"minim",
".",
"secsTo",
"(",
"maxim",
")",
"valsecs",
"=",
"minim",
".",
"secsTo",
"(",
"value",
")",
"perc",
"=",
"float",
"(",
"valsecs",
")",
"/",
"maxsecs",
"# calculate a date percentage value\r",
"elif",
"(",
"rtype",
"==",
"XChartRuler",
".",
"Type",
".",
"Date",
")",
":",
"maxdays",
"=",
"minim",
".",
"daysTo",
"(",
"maxim",
")",
"valdays",
"=",
"minim",
".",
"daysTo",
"(",
"value",
")",
"perc",
"=",
"float",
"(",
"valdays",
")",
"/",
"maxdays",
"# otherwise, compare against the notches\r",
"else",
":",
"perc",
"=",
"0.0",
"notches",
"=",
"self",
".",
"notches",
"(",
")",
"count",
"=",
"len",
"(",
"notches",
")",
"count",
"+=",
"self",
".",
"padStart",
"(",
")",
"+",
"self",
".",
"padEnd",
"(",
")",
"count",
"=",
"max",
"(",
"1",
",",
"count",
"-",
"1",
")",
"perc",
"=",
"float",
"(",
"self",
".",
"padStart",
"(",
")",
")",
"/",
"count",
"for",
"i",
",",
"notch",
"in",
"enumerate",
"(",
"notches",
")",
":",
"if",
"(",
"notch",
"==",
"value",
")",
":",
"perc",
"+=",
"float",
"(",
"i",
")",
"/",
"count",
"break",
"# normalize the percentage\r",
"perc",
"=",
"min",
"(",
"perc",
",",
"1.0",
")",
"perc",
"=",
"max",
"(",
"0",
",",
"perc",
")",
"return",
"perc"
] |
Returns the percentage where the given value lies between this rulers
minimum and maximum values. If the value equals the minimum, then the
percent is 0, if it equals the maximum, then the percent is 1 - any
value between will be a floating point. If the ruler is a custom type,
then only if the value matches a notch will be successful.
:param value | <variant>
:return <float>
|
[
"Returns",
"the",
"percentage",
"where",
"the",
"given",
"value",
"lies",
"between",
"this",
"rulers",
"minimum",
"and",
"maximum",
"values",
".",
"If",
"the",
"value",
"equals",
"the",
"minimum",
"then",
"the",
"percent",
"is",
"0",
"if",
"it",
"equals",
"the",
"maximum",
"then",
"the",
"percent",
"is",
"1",
"-",
"any",
"value",
"between",
"will",
"be",
"a",
"floating",
"point",
".",
"If",
"the",
"ruler",
"is",
"a",
"custom",
"type",
"then",
"only",
"if",
"the",
"value",
"matches",
"a",
"notch",
"will",
"be",
"successful",
".",
":",
"param",
"value",
"|",
"<variant",
">",
":",
"return",
"<float",
">"
] |
python
|
train
|
godaddy/gdapi-python
|
gdapi.py
|
https://github.com/godaddy/gdapi-python/blob/79d7784df9d9aae92c1c808c3e4936970ad72abf/gdapi.py#L544-L586
|
def indent(rows, hasHeader=False, headerChar='-', delim=' | ', justify='left',
separateRows=False, prefix='', postfix='', wrapfunc=lambda x: x):
'''Indents a table by column.
- rows: A sequence of sequences of items, one sequence per row.
- hasHeader: True if the first row consists of the columns' names.
- headerChar: Character to be used for the row separator line
(if hasHeader==True or separateRows==True).
- delim: The column delimiter.
- justify: Determines how are data justified in their column.
Valid values are 'left','right' and 'center'.
- separateRows: True if rows are to be separated by a line
of 'headerChar's.
- prefix: A string prepended to each printed row.
- postfix: A string appended to each printed row.
- wrapfunc: A function f(text) for wrapping text; each element in
the table is first wrapped by this function.'''
# closure for breaking logical rows to physical, using wrapfunc
def rowWrapper(row):
newRows = [wrapfunc(item).split('\n') for item in row]
return [[substr or '' for substr in item] for item in map(None, *newRows)] # NOQA
# break each logical row into one or more physical ones
logicalRows = [rowWrapper(row) for row in rows]
# columns of physical rows
columns = map(None, *reduce(operator.add, logicalRows))
# get the maximum of each column by the string length of its items
maxWidths = [max([len(str(item)) for item in column])
for column in columns]
rowSeparator = headerChar * (len(prefix) + len(postfix) +
sum(maxWidths) +
len(delim)*(len(maxWidths)-1))
# select the appropriate justify method
justify = {'center': str.center, 'right': str.rjust, 'left': str.ljust}[justify.lower()] # NOQA
output = cStringIO.StringIO()
if separateRows:
print >> output, rowSeparator
for physicalRows in logicalRows:
for row in physicalRows:
print >> output, prefix \
+ delim.join([justify(str(item), width) for (item, width) in zip(row, maxWidths)]) + postfix # NOQA
if separateRows or hasHeader:
print >> output, rowSeparator
hasHeader = False
return output.getvalue()
|
[
"def",
"indent",
"(",
"rows",
",",
"hasHeader",
"=",
"False",
",",
"headerChar",
"=",
"'-'",
",",
"delim",
"=",
"' | '",
",",
"justify",
"=",
"'left'",
",",
"separateRows",
"=",
"False",
",",
"prefix",
"=",
"''",
",",
"postfix",
"=",
"''",
",",
"wrapfunc",
"=",
"lambda",
"x",
":",
"x",
")",
":",
"# closure for breaking logical rows to physical, using wrapfunc",
"def",
"rowWrapper",
"(",
"row",
")",
":",
"newRows",
"=",
"[",
"wrapfunc",
"(",
"item",
")",
".",
"split",
"(",
"'\\n'",
")",
"for",
"item",
"in",
"row",
"]",
"return",
"[",
"[",
"substr",
"or",
"''",
"for",
"substr",
"in",
"item",
"]",
"for",
"item",
"in",
"map",
"(",
"None",
",",
"*",
"newRows",
")",
"]",
"# NOQA",
"# break each logical row into one or more physical ones",
"logicalRows",
"=",
"[",
"rowWrapper",
"(",
"row",
")",
"for",
"row",
"in",
"rows",
"]",
"# columns of physical rows",
"columns",
"=",
"map",
"(",
"None",
",",
"*",
"reduce",
"(",
"operator",
".",
"add",
",",
"logicalRows",
")",
")",
"# get the maximum of each column by the string length of its items",
"maxWidths",
"=",
"[",
"max",
"(",
"[",
"len",
"(",
"str",
"(",
"item",
")",
")",
"for",
"item",
"in",
"column",
"]",
")",
"for",
"column",
"in",
"columns",
"]",
"rowSeparator",
"=",
"headerChar",
"*",
"(",
"len",
"(",
"prefix",
")",
"+",
"len",
"(",
"postfix",
")",
"+",
"sum",
"(",
"maxWidths",
")",
"+",
"len",
"(",
"delim",
")",
"*",
"(",
"len",
"(",
"maxWidths",
")",
"-",
"1",
")",
")",
"# select the appropriate justify method",
"justify",
"=",
"{",
"'center'",
":",
"str",
".",
"center",
",",
"'right'",
":",
"str",
".",
"rjust",
",",
"'left'",
":",
"str",
".",
"ljust",
"}",
"[",
"justify",
".",
"lower",
"(",
")",
"]",
"# NOQA",
"output",
"=",
"cStringIO",
".",
"StringIO",
"(",
")",
"if",
"separateRows",
":",
"print",
">>",
"output",
",",
"rowSeparator",
"for",
"physicalRows",
"in",
"logicalRows",
":",
"for",
"row",
"in",
"physicalRows",
":",
"print",
">>",
"output",
",",
"prefix",
"+",
"delim",
".",
"join",
"(",
"[",
"justify",
"(",
"str",
"(",
"item",
")",
",",
"width",
")",
"for",
"(",
"item",
",",
"width",
")",
"in",
"zip",
"(",
"row",
",",
"maxWidths",
")",
"]",
")",
"+",
"postfix",
"# NOQA",
"if",
"separateRows",
"or",
"hasHeader",
":",
"print",
">>",
"output",
",",
"rowSeparator",
"hasHeader",
"=",
"False",
"return",
"output",
".",
"getvalue",
"(",
")"
] |
Indents a table by column.
- rows: A sequence of sequences of items, one sequence per row.
- hasHeader: True if the first row consists of the columns' names.
- headerChar: Character to be used for the row separator line
(if hasHeader==True or separateRows==True).
- delim: The column delimiter.
- justify: Determines how are data justified in their column.
Valid values are 'left','right' and 'center'.
- separateRows: True if rows are to be separated by a line
of 'headerChar's.
- prefix: A string prepended to each printed row.
- postfix: A string appended to each printed row.
- wrapfunc: A function f(text) for wrapping text; each element in
the table is first wrapped by this function.
|
[
"Indents",
"a",
"table",
"by",
"column",
".",
"-",
"rows",
":",
"A",
"sequence",
"of",
"sequences",
"of",
"items",
"one",
"sequence",
"per",
"row",
".",
"-",
"hasHeader",
":",
"True",
"if",
"the",
"first",
"row",
"consists",
"of",
"the",
"columns",
"names",
".",
"-",
"headerChar",
":",
"Character",
"to",
"be",
"used",
"for",
"the",
"row",
"separator",
"line",
"(",
"if",
"hasHeader",
"==",
"True",
"or",
"separateRows",
"==",
"True",
")",
".",
"-",
"delim",
":",
"The",
"column",
"delimiter",
".",
"-",
"justify",
":",
"Determines",
"how",
"are",
"data",
"justified",
"in",
"their",
"column",
".",
"Valid",
"values",
"are",
"left",
"right",
"and",
"center",
".",
"-",
"separateRows",
":",
"True",
"if",
"rows",
"are",
"to",
"be",
"separated",
"by",
"a",
"line",
"of",
"headerChar",
"s",
".",
"-",
"prefix",
":",
"A",
"string",
"prepended",
"to",
"each",
"printed",
"row",
".",
"-",
"postfix",
":",
"A",
"string",
"appended",
"to",
"each",
"printed",
"row",
".",
"-",
"wrapfunc",
":",
"A",
"function",
"f",
"(",
"text",
")",
"for",
"wrapping",
"text",
";",
"each",
"element",
"in",
"the",
"table",
"is",
"first",
"wrapped",
"by",
"this",
"function",
"."
] |
python
|
train
|
tanghaibao/jcvi
|
jcvi/assembly/gaps.py
|
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/gaps.py#L284-L330
|
def flanks(args):
"""
%prog flanks gaps.bed fastafile
Create sequences flanking the gaps.
"""
p = OptionParser(flanks.__doc__)
p.add_option("--extend", default=2000, type="int",
help="Extend seq flanking the gaps [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gapsbed, fastafile = args
Ext = opts.extend
sizes = Sizes(fastafile).mapping
bed = Bed(gapsbed)
pf = gapsbed.rsplit(".", 1)[0]
extbed = pf + ".ext.bed"
fw = open(extbed, "w")
for i, b in enumerate(bed):
seqid = b.seqid
gapname = b.accn
size = sizes[seqid]
prev_b = bed[i - 1] if i > 0 else None
next_b = bed[i + 1] if i + 1 < len(bed) else None
if prev_b and prev_b.seqid != seqid:
prev_b = None
if next_b and next_b.seqid != seqid:
next_b = None
start = prev_b.end + 1 if prev_b else 1
start, end = max(start, b.start - Ext), b.start - 1
print("\t".join(str(x) for x in \
(b.seqid, start - 1, end, gapname + "L")), file=fw)
end = next_b.start - 1 if next_b else size
start, end = b.end + 1, min(end, b.end + Ext)
print("\t".join(str(x) for x in \
(b.seqid, start - 1, end, gapname + "R")), file=fw)
fw.close()
extfasta = fastaFromBed(extbed, fastafile, name=True)
return extbed, extfasta
|
[
"def",
"flanks",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"flanks",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--extend\"",
",",
"default",
"=",
"2000",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"Extend seq flanking the gaps [default: %default]\"",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"gapsbed",
",",
"fastafile",
"=",
"args",
"Ext",
"=",
"opts",
".",
"extend",
"sizes",
"=",
"Sizes",
"(",
"fastafile",
")",
".",
"mapping",
"bed",
"=",
"Bed",
"(",
"gapsbed",
")",
"pf",
"=",
"gapsbed",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"[",
"0",
"]",
"extbed",
"=",
"pf",
"+",
"\".ext.bed\"",
"fw",
"=",
"open",
"(",
"extbed",
",",
"\"w\"",
")",
"for",
"i",
",",
"b",
"in",
"enumerate",
"(",
"bed",
")",
":",
"seqid",
"=",
"b",
".",
"seqid",
"gapname",
"=",
"b",
".",
"accn",
"size",
"=",
"sizes",
"[",
"seqid",
"]",
"prev_b",
"=",
"bed",
"[",
"i",
"-",
"1",
"]",
"if",
"i",
">",
"0",
"else",
"None",
"next_b",
"=",
"bed",
"[",
"i",
"+",
"1",
"]",
"if",
"i",
"+",
"1",
"<",
"len",
"(",
"bed",
")",
"else",
"None",
"if",
"prev_b",
"and",
"prev_b",
".",
"seqid",
"!=",
"seqid",
":",
"prev_b",
"=",
"None",
"if",
"next_b",
"and",
"next_b",
".",
"seqid",
"!=",
"seqid",
":",
"next_b",
"=",
"None",
"start",
"=",
"prev_b",
".",
"end",
"+",
"1",
"if",
"prev_b",
"else",
"1",
"start",
",",
"end",
"=",
"max",
"(",
"start",
",",
"b",
".",
"start",
"-",
"Ext",
")",
",",
"b",
".",
"start",
"-",
"1",
"print",
"(",
"\"\\t\"",
".",
"join",
"(",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"(",
"b",
".",
"seqid",
",",
"start",
"-",
"1",
",",
"end",
",",
"gapname",
"+",
"\"L\"",
")",
")",
",",
"file",
"=",
"fw",
")",
"end",
"=",
"next_b",
".",
"start",
"-",
"1",
"if",
"next_b",
"else",
"size",
"start",
",",
"end",
"=",
"b",
".",
"end",
"+",
"1",
",",
"min",
"(",
"end",
",",
"b",
".",
"end",
"+",
"Ext",
")",
"print",
"(",
"\"\\t\"",
".",
"join",
"(",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"(",
"b",
".",
"seqid",
",",
"start",
"-",
"1",
",",
"end",
",",
"gapname",
"+",
"\"R\"",
")",
")",
",",
"file",
"=",
"fw",
")",
"fw",
".",
"close",
"(",
")",
"extfasta",
"=",
"fastaFromBed",
"(",
"extbed",
",",
"fastafile",
",",
"name",
"=",
"True",
")",
"return",
"extbed",
",",
"extfasta"
] |
%prog flanks gaps.bed fastafile
Create sequences flanking the gaps.
|
[
"%prog",
"flanks",
"gaps",
".",
"bed",
"fastafile"
] |
python
|
train
|
mbedmicro/pyOCD
|
pyocd/flash/flash.py
|
https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/flash/flash.py#L340-L353
|
def erase_sector(self, address):
"""!
@brief Erase one sector.
@exception FlashEraseFailure
"""
assert self._active_operation == self.Operation.ERASE
# update core register to execute the erase_sector subroutine
result = self._call_function_and_wait(self.flash_algo['pc_erase_sector'], address)
# check the return code
if result != 0:
raise FlashEraseFailure('erase_sector(0x%x) error: %i' % (address, result), address, result)
|
[
"def",
"erase_sector",
"(",
"self",
",",
"address",
")",
":",
"assert",
"self",
".",
"_active_operation",
"==",
"self",
".",
"Operation",
".",
"ERASE",
"# update core register to execute the erase_sector subroutine",
"result",
"=",
"self",
".",
"_call_function_and_wait",
"(",
"self",
".",
"flash_algo",
"[",
"'pc_erase_sector'",
"]",
",",
"address",
")",
"# check the return code",
"if",
"result",
"!=",
"0",
":",
"raise",
"FlashEraseFailure",
"(",
"'erase_sector(0x%x) error: %i'",
"%",
"(",
"address",
",",
"result",
")",
",",
"address",
",",
"result",
")"
] |
!
@brief Erase one sector.
@exception FlashEraseFailure
|
[
"!"
] |
python
|
train
|
xeroc/python-graphenelib
|
graphenecommon/account.py
|
https://github.com/xeroc/python-graphenelib/blob/8bb5396bc79998ee424cf3813af478304173f3a6/graphenecommon/account.py#L101-L110
|
def balances(self):
""" List balances of an account. This call returns instances of
:class:`amount.Amount`.
"""
balances = self.blockchain.rpc.get_account_balances(self["id"], [])
return [
self.amount_class(b, blockchain_instance=self.blockchain)
for b in balances
if int(b["amount"]) > 0
]
|
[
"def",
"balances",
"(",
"self",
")",
":",
"balances",
"=",
"self",
".",
"blockchain",
".",
"rpc",
".",
"get_account_balances",
"(",
"self",
"[",
"\"id\"",
"]",
",",
"[",
"]",
")",
"return",
"[",
"self",
".",
"amount_class",
"(",
"b",
",",
"blockchain_instance",
"=",
"self",
".",
"blockchain",
")",
"for",
"b",
"in",
"balances",
"if",
"int",
"(",
"b",
"[",
"\"amount\"",
"]",
")",
">",
"0",
"]"
] |
List balances of an account. This call returns instances of
:class:`amount.Amount`.
|
[
"List",
"balances",
"of",
"an",
"account",
".",
"This",
"call",
"returns",
"instances",
"of",
":",
"class",
":",
"amount",
".",
"Amount",
"."
] |
python
|
valid
|
hyperledger/indy-sdk
|
wrappers/python/indy/wallet.py
|
https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/wrappers/python/indy/wallet.py#L9-L63
|
async def create_wallet(config: str,
credentials: str) -> None:
"""
Creates a new secure wallet with the given unique name.
:param config: Wallet configuration json.
{
"id": string, Identifier of the wallet.
Configured storage uses this identifier to lookup exact wallet data placement.
"storage_type": optional<string>, Type of the wallet storage. Defaults to 'default'.
'Default' storage type allows to store wallet data in the local file.
Custom storage types can be registered with indy_register_wallet_storage call.
"storage_config": optional<object>, Storage configuration json. Storage type defines set of supported keys.
Can be optional if storage supports default configuration.
For 'default' storage type configuration is:
{
"path": optional<string>, Path to the directory with wallet files.
Defaults to $HOME/.indy_client/wallet.
Wallet will be stored in the file {path}/{id}/sqlite.db
}
}
:param credentials: Wallet credentials json
{
"key": string, Key or passphrase used for wallet key derivation.
Look to key_derivation_method param for information about supported key derivation methods.
"storage_credentials": optional<object> Credentials for wallet storage. Storage type defines set of supported keys.
Can be optional if storage supports default configuration.
For 'default' storage type should be empty.
"key_derivation_method": optional<string> Algorithm to use for wallet key derivation:
ARGON2I_MOD - derive secured wallet master key (used by default)
ARGON2I_INT - derive secured wallet master key (less secured but faster)
RAW - raw wallet key master provided (skip derivation).
RAW keys can be generated with generate_wallet_key call
}
:return: Error code
"""
logger = logging.getLogger(__name__)
logger.debug("create_wallet: >>> config: %r, credentials: %r",
config,
credentials)
if not hasattr(create_wallet, "cb"):
logger.debug("create_wallet: Creating callback")
create_wallet.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32))
c_config = c_char_p(config.encode('utf-8'))
c_credentials = c_char_p(credentials.encode('utf-8'))
await do_call('indy_create_wallet',
c_config,
c_credentials,
create_wallet.cb)
logger.debug("create_wallet: <<<")
|
[
"async",
"def",
"create_wallet",
"(",
"config",
":",
"str",
",",
"credentials",
":",
"str",
")",
"->",
"None",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"debug",
"(",
"\"create_wallet: >>> config: %r, credentials: %r\"",
",",
"config",
",",
"credentials",
")",
"if",
"not",
"hasattr",
"(",
"create_wallet",
",",
"\"cb\"",
")",
":",
"logger",
".",
"debug",
"(",
"\"create_wallet: Creating callback\"",
")",
"create_wallet",
".",
"cb",
"=",
"create_cb",
"(",
"CFUNCTYPE",
"(",
"None",
",",
"c_int32",
",",
"c_int32",
")",
")",
"c_config",
"=",
"c_char_p",
"(",
"config",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"c_credentials",
"=",
"c_char_p",
"(",
"credentials",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"await",
"do_call",
"(",
"'indy_create_wallet'",
",",
"c_config",
",",
"c_credentials",
",",
"create_wallet",
".",
"cb",
")",
"logger",
".",
"debug",
"(",
"\"create_wallet: <<<\"",
")"
] |
Creates a new secure wallet with the given unique name.
:param config: Wallet configuration json.
{
"id": string, Identifier of the wallet.
Configured storage uses this identifier to lookup exact wallet data placement.
"storage_type": optional<string>, Type of the wallet storage. Defaults to 'default'.
'Default' storage type allows to store wallet data in the local file.
Custom storage types can be registered with indy_register_wallet_storage call.
"storage_config": optional<object>, Storage configuration json. Storage type defines set of supported keys.
Can be optional if storage supports default configuration.
For 'default' storage type configuration is:
{
"path": optional<string>, Path to the directory with wallet files.
Defaults to $HOME/.indy_client/wallet.
Wallet will be stored in the file {path}/{id}/sqlite.db
}
}
:param credentials: Wallet credentials json
{
"key": string, Key or passphrase used for wallet key derivation.
Look to key_derivation_method param for information about supported key derivation methods.
"storage_credentials": optional<object> Credentials for wallet storage. Storage type defines set of supported keys.
Can be optional if storage supports default configuration.
For 'default' storage type should be empty.
"key_derivation_method": optional<string> Algorithm to use for wallet key derivation:
ARGON2I_MOD - derive secured wallet master key (used by default)
ARGON2I_INT - derive secured wallet master key (less secured but faster)
RAW - raw wallet key master provided (skip derivation).
RAW keys can be generated with generate_wallet_key call
}
:return: Error code
|
[
"Creates",
"a",
"new",
"secure",
"wallet",
"with",
"the",
"given",
"unique",
"name",
"."
] |
python
|
train
|
DataONEorg/d1_python
|
lib_common/src/d1_common/multipart.py
|
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/multipart.py#L44-L68
|
def parse_str(mmp_bytes, content_type, encoding='utf-8'):
"""Parse multipart document bytes into a tuple of BodyPart objects.
Args:
mmp_bytes: bytes
Multipart document.
content_type : str
Must be on the form, ``multipart/form-data; boundary=<BOUNDARY>``, where
``<BOUNDARY>`` is the string that separates the parts of the multipart document
in ``mmp_bytes``. In HTTP requests and responses, it is passed in the
Content-Type header.
encoding : str
The coding used for the text in the HTML body.
Returns:
tuple of BodyPart
Members: headers (CaseInsensitiveDict), content (bytes), text (Unicode),
encoding (str).
"""
return requests_toolbelt.multipart.decoder.MultipartDecoder(
mmp_bytes, content_type, encoding
).parts
|
[
"def",
"parse_str",
"(",
"mmp_bytes",
",",
"content_type",
",",
"encoding",
"=",
"'utf-8'",
")",
":",
"return",
"requests_toolbelt",
".",
"multipart",
".",
"decoder",
".",
"MultipartDecoder",
"(",
"mmp_bytes",
",",
"content_type",
",",
"encoding",
")",
".",
"parts"
] |
Parse multipart document bytes into a tuple of BodyPart objects.
Args:
mmp_bytes: bytes
Multipart document.
content_type : str
Must be on the form, ``multipart/form-data; boundary=<BOUNDARY>``, where
``<BOUNDARY>`` is the string that separates the parts of the multipart document
in ``mmp_bytes``. In HTTP requests and responses, it is passed in the
Content-Type header.
encoding : str
The coding used for the text in the HTML body.
Returns:
tuple of BodyPart
Members: headers (CaseInsensitiveDict), content (bytes), text (Unicode),
encoding (str).
|
[
"Parse",
"multipart",
"document",
"bytes",
"into",
"a",
"tuple",
"of",
"BodyPart",
"objects",
"."
] |
python
|
train
|
fabioz/PyDev.Debugger
|
pydevd_attach_to_process/winappdbg/module.py
|
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/module.py#L1999-L2016
|
def _notify_unload_dll(self, event):
"""
Notify the release of a loaded module.
This is done automatically by the L{Debug} class, you shouldn't need
to call it yourself.
@type event: L{UnloadDLLEvent}
@param event: Unload DLL event.
@rtype: bool
@return: C{True} to call the user-defined handle, C{False} otherwise.
"""
lpBaseOfDll = event.get_module_base()
## if self.has_module(lpBaseOfDll): # XXX this would trigger a scan
if lpBaseOfDll in self.__moduleDict:
self._del_module(lpBaseOfDll)
return True
|
[
"def",
"_notify_unload_dll",
"(",
"self",
",",
"event",
")",
":",
"lpBaseOfDll",
"=",
"event",
".",
"get_module_base",
"(",
")",
"## if self.has_module(lpBaseOfDll): # XXX this would trigger a scan",
"if",
"lpBaseOfDll",
"in",
"self",
".",
"__moduleDict",
":",
"self",
".",
"_del_module",
"(",
"lpBaseOfDll",
")",
"return",
"True"
] |
Notify the release of a loaded module.
This is done automatically by the L{Debug} class, you shouldn't need
to call it yourself.
@type event: L{UnloadDLLEvent}
@param event: Unload DLL event.
@rtype: bool
@return: C{True} to call the user-defined handle, C{False} otherwise.
|
[
"Notify",
"the",
"release",
"of",
"a",
"loaded",
"module",
"."
] |
python
|
train
|
havardgulldahl/jottalib
|
src/jottalib/JFS.py
|
https://github.com/havardgulldahl/jottalib/blob/4d015e4309b1d9055e561ec757363fb2632b4eb7/src/jottalib/JFS.py#L92-L105
|
def calculate_md5(fileobject, size=2**16):
"""Utility function to calculate md5 hashes while being light on memory usage.
By reading the fileobject piece by piece, we are able to process content that
is larger than available memory"""
fileobject.seek(0)
md5 = hashlib.md5()
for data in iter(lambda: fileobject.read(size), b''):
if not data: break
if isinstance(data, six.text_type):
data = data.encode('utf-8') # md5 needs a byte string
md5.update(data)
fileobject.seek(0) # rewind read head
return md5.hexdigest()
|
[
"def",
"calculate_md5",
"(",
"fileobject",
",",
"size",
"=",
"2",
"**",
"16",
")",
":",
"fileobject",
".",
"seek",
"(",
"0",
")",
"md5",
"=",
"hashlib",
".",
"md5",
"(",
")",
"for",
"data",
"in",
"iter",
"(",
"lambda",
":",
"fileobject",
".",
"read",
"(",
"size",
")",
",",
"b''",
")",
":",
"if",
"not",
"data",
":",
"break",
"if",
"isinstance",
"(",
"data",
",",
"six",
".",
"text_type",
")",
":",
"data",
"=",
"data",
".",
"encode",
"(",
"'utf-8'",
")",
"# md5 needs a byte string",
"md5",
".",
"update",
"(",
"data",
")",
"fileobject",
".",
"seek",
"(",
"0",
")",
"# rewind read head",
"return",
"md5",
".",
"hexdigest",
"(",
")"
] |
Utility function to calculate md5 hashes while being light on memory usage.
By reading the fileobject piece by piece, we are able to process content that
is larger than available memory
|
[
"Utility",
"function",
"to",
"calculate",
"md5",
"hashes",
"while",
"being",
"light",
"on",
"memory",
"usage",
"."
] |
python
|
train
|
streamlink/streamlink
|
src/streamlink_cli/utils/progress.py
|
https://github.com/streamlink/streamlink/blob/c8ed1daff14ac03195870238b9b900c1109dd5c1/src/streamlink_cli/utils/progress.py#L83-L97
|
def format_time(elapsed):
"""Formats elapsed seconds into a human readable format."""
hours = int(elapsed / (60 * 60))
minutes = int((elapsed % (60 * 60)) / 60)
seconds = int(elapsed % 60)
rval = ""
if hours:
rval += "{0}h".format(hours)
if elapsed > 60:
rval += "{0}m".format(minutes)
rval += "{0}s".format(seconds)
return rval
|
[
"def",
"format_time",
"(",
"elapsed",
")",
":",
"hours",
"=",
"int",
"(",
"elapsed",
"/",
"(",
"60",
"*",
"60",
")",
")",
"minutes",
"=",
"int",
"(",
"(",
"elapsed",
"%",
"(",
"60",
"*",
"60",
")",
")",
"/",
"60",
")",
"seconds",
"=",
"int",
"(",
"elapsed",
"%",
"60",
")",
"rval",
"=",
"\"\"",
"if",
"hours",
":",
"rval",
"+=",
"\"{0}h\"",
".",
"format",
"(",
"hours",
")",
"if",
"elapsed",
">",
"60",
":",
"rval",
"+=",
"\"{0}m\"",
".",
"format",
"(",
"minutes",
")",
"rval",
"+=",
"\"{0}s\"",
".",
"format",
"(",
"seconds",
")",
"return",
"rval"
] |
Formats elapsed seconds into a human readable format.
|
[
"Formats",
"elapsed",
"seconds",
"into",
"a",
"human",
"readable",
"format",
"."
] |
python
|
test
|
globus/globus-cli
|
globus_cli/commands/endpoint/my_shared_endpoint_list.py
|
https://github.com/globus/globus-cli/blob/336675ff24da64c5ee487243f39ae39fc49a7e14/globus_cli/commands/endpoint/my_shared_endpoint_list.py#L14-L21
|
def my_shared_endpoint_list(endpoint_id):
"""
Executor for `globus endpoint my-shared-endpoint-list`
"""
client = get_client()
ep_iterator = client.my_shared_endpoint_list(endpoint_id)
formatted_print(ep_iterator, fields=ENDPOINT_LIST_FIELDS)
|
[
"def",
"my_shared_endpoint_list",
"(",
"endpoint_id",
")",
":",
"client",
"=",
"get_client",
"(",
")",
"ep_iterator",
"=",
"client",
".",
"my_shared_endpoint_list",
"(",
"endpoint_id",
")",
"formatted_print",
"(",
"ep_iterator",
",",
"fields",
"=",
"ENDPOINT_LIST_FIELDS",
")"
] |
Executor for `globus endpoint my-shared-endpoint-list`
|
[
"Executor",
"for",
"globus",
"endpoint",
"my",
"-",
"shared",
"-",
"endpoint",
"-",
"list"
] |
python
|
train
|
raiden-network/raiden-contracts
|
raiden_contracts/utils/transaction.py
|
https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/utils/transaction.py#L7-L22
|
def check_successful_tx(web3: Web3, txid: str, timeout=180) -> Tuple[dict, dict]:
"""See if transaction went through (Solidity code did not throw).
:return: Transaction receipt and transaction info
"""
receipt = wait_for_transaction_receipt(web3=web3, txid=txid, timeout=timeout)
txinfo = web3.eth.getTransaction(txid)
if 'status' not in receipt:
raise KeyError(
'A transaction receipt does not contain the "status" field. '
'Does your chain have Byzantium rules enabled?',
)
if receipt['status'] == 0:
raise ValueError(f'Status 0 indicates failure')
if txinfo['gas'] == receipt['gasUsed']:
raise ValueError(f'Gas is completely used ({txinfo["gas"]}). Failure?')
return (receipt, txinfo)
|
[
"def",
"check_successful_tx",
"(",
"web3",
":",
"Web3",
",",
"txid",
":",
"str",
",",
"timeout",
"=",
"180",
")",
"->",
"Tuple",
"[",
"dict",
",",
"dict",
"]",
":",
"receipt",
"=",
"wait_for_transaction_receipt",
"(",
"web3",
"=",
"web3",
",",
"txid",
"=",
"txid",
",",
"timeout",
"=",
"timeout",
")",
"txinfo",
"=",
"web3",
".",
"eth",
".",
"getTransaction",
"(",
"txid",
")",
"if",
"'status'",
"not",
"in",
"receipt",
":",
"raise",
"KeyError",
"(",
"'A transaction receipt does not contain the \"status\" field. '",
"'Does your chain have Byzantium rules enabled?'",
",",
")",
"if",
"receipt",
"[",
"'status'",
"]",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"f'Status 0 indicates failure'",
")",
"if",
"txinfo",
"[",
"'gas'",
"]",
"==",
"receipt",
"[",
"'gasUsed'",
"]",
":",
"raise",
"ValueError",
"(",
"f'Gas is completely used ({txinfo[\"gas\"]}). Failure?'",
")",
"return",
"(",
"receipt",
",",
"txinfo",
")"
] |
See if transaction went through (Solidity code did not throw).
:return: Transaction receipt and transaction info
|
[
"See",
"if",
"transaction",
"went",
"through",
"(",
"Solidity",
"code",
"did",
"not",
"throw",
")",
".",
":",
"return",
":",
"Transaction",
"receipt",
"and",
"transaction",
"info"
] |
python
|
train
|
bwohlberg/sporco
|
docs/source/docntbk.py
|
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/docs/source/docntbk.py#L253-L304
|
def rst_to_docs_rst(infile, outfile):
"""Convert an rst file to a sphinx docs rst file."""
# Read infile into a list of lines
with open(infile, 'r') as fin:
rst = fin.readlines()
# Inspect outfile path components to determine whether outfile
# is in the root of the examples directory or in a subdirectory
# thererof
ps = pathsplit(outfile)[-3:]
if ps[-2] == 'examples':
ps = ps[-2:]
idx = 'index'
else:
idx = ''
# Output string starts with a cross-reference anchor constructed from
# the file name and path
out = '.. _' + '_'.join(ps) + ':\n\n'
# Iterate over lines from infile
it = iter(rst)
for line in it:
if line[0:12] == '.. toc-start': # Line has start of toc marker
# Initialise current toc array and iterate over lines until
# end of toc marker encountered
toc = []
for line in it:
if line == '\n': # Drop newline lines
continue
elif line[0:10] == '.. toc-end': # End of toc marker
# Add toctree section to output string
out += '.. toctree::\n :maxdepth: 1\n\n'
for c in toc:
out += ' %s <%s>\n' % c
break
else: # Still within toc section
# Extract link text and target url and append to
# toc array
m = re.search(r'`(.*?)\s*<(.*?)(?:.py)?>`', line)
if m:
if idx == '':
toc.append((m.group(1), m.group(2)))
else:
toc.append((m.group(1),
os.path.join(m.group(2), idx)))
else: # Not within toc section
out += line
with open(outfile, 'w') as fout:
fout.write(out)
|
[
"def",
"rst_to_docs_rst",
"(",
"infile",
",",
"outfile",
")",
":",
"# Read infile into a list of lines",
"with",
"open",
"(",
"infile",
",",
"'r'",
")",
"as",
"fin",
":",
"rst",
"=",
"fin",
".",
"readlines",
"(",
")",
"# Inspect outfile path components to determine whether outfile",
"# is in the root of the examples directory or in a subdirectory",
"# thererof",
"ps",
"=",
"pathsplit",
"(",
"outfile",
")",
"[",
"-",
"3",
":",
"]",
"if",
"ps",
"[",
"-",
"2",
"]",
"==",
"'examples'",
":",
"ps",
"=",
"ps",
"[",
"-",
"2",
":",
"]",
"idx",
"=",
"'index'",
"else",
":",
"idx",
"=",
"''",
"# Output string starts with a cross-reference anchor constructed from",
"# the file name and path",
"out",
"=",
"'.. _'",
"+",
"'_'",
".",
"join",
"(",
"ps",
")",
"+",
"':\\n\\n'",
"# Iterate over lines from infile",
"it",
"=",
"iter",
"(",
"rst",
")",
"for",
"line",
"in",
"it",
":",
"if",
"line",
"[",
"0",
":",
"12",
"]",
"==",
"'.. toc-start'",
":",
"# Line has start of toc marker",
"# Initialise current toc array and iterate over lines until",
"# end of toc marker encountered",
"toc",
"=",
"[",
"]",
"for",
"line",
"in",
"it",
":",
"if",
"line",
"==",
"'\\n'",
":",
"# Drop newline lines",
"continue",
"elif",
"line",
"[",
"0",
":",
"10",
"]",
"==",
"'.. toc-end'",
":",
"# End of toc marker",
"# Add toctree section to output string",
"out",
"+=",
"'.. toctree::\\n :maxdepth: 1\\n\\n'",
"for",
"c",
"in",
"toc",
":",
"out",
"+=",
"' %s <%s>\\n'",
"%",
"c",
"break",
"else",
":",
"# Still within toc section",
"# Extract link text and target url and append to",
"# toc array",
"m",
"=",
"re",
".",
"search",
"(",
"r'`(.*?)\\s*<(.*?)(?:.py)?>`'",
",",
"line",
")",
"if",
"m",
":",
"if",
"idx",
"==",
"''",
":",
"toc",
".",
"append",
"(",
"(",
"m",
".",
"group",
"(",
"1",
")",
",",
"m",
".",
"group",
"(",
"2",
")",
")",
")",
"else",
":",
"toc",
".",
"append",
"(",
"(",
"m",
".",
"group",
"(",
"1",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"m",
".",
"group",
"(",
"2",
")",
",",
"idx",
")",
")",
")",
"else",
":",
"# Not within toc section",
"out",
"+=",
"line",
"with",
"open",
"(",
"outfile",
",",
"'w'",
")",
"as",
"fout",
":",
"fout",
".",
"write",
"(",
"out",
")"
] |
Convert an rst file to a sphinx docs rst file.
|
[
"Convert",
"an",
"rst",
"file",
"to",
"a",
"sphinx",
"docs",
"rst",
"file",
"."
] |
python
|
train
|
sdispater/pendulum
|
pendulum/datetime.py
|
https://github.com/sdispater/pendulum/blob/94d28b0d3cb524ae02361bd1ed7ea03e2e655e4e/pendulum/datetime.py#L779-L793
|
def diff(self, dt=None, abs=True):
"""
Returns the difference between two DateTime objects represented as a Duration.
:type dt: DateTime or None
:param abs: Whether to return an absolute interval or not
:type abs: bool
:rtype: Period
"""
if dt is None:
dt = self.now(self.tz)
return Period(self, dt, absolute=abs)
|
[
"def",
"diff",
"(",
"self",
",",
"dt",
"=",
"None",
",",
"abs",
"=",
"True",
")",
":",
"if",
"dt",
"is",
"None",
":",
"dt",
"=",
"self",
".",
"now",
"(",
"self",
".",
"tz",
")",
"return",
"Period",
"(",
"self",
",",
"dt",
",",
"absolute",
"=",
"abs",
")"
] |
Returns the difference between two DateTime objects represented as a Duration.
:type dt: DateTime or None
:param abs: Whether to return an absolute interval or not
:type abs: bool
:rtype: Period
|
[
"Returns",
"the",
"difference",
"between",
"two",
"DateTime",
"objects",
"represented",
"as",
"a",
"Duration",
"."
] |
python
|
train
|
eventbrite/conformity
|
conformity/fields/structures.py
|
https://github.com/eventbrite/conformity/blob/12014fe4e14f66869ffda9f9ca09cd20a985c769/conformity/fields/structures.py#L170-L205
|
def extend(
self,
contents=None,
optional_keys=None,
allow_extra_keys=None,
description=None,
replace_optional_keys=False,
):
"""
This method allows you to create a new `Dictionary` that extends the current `Dictionary` with additional
contents and/or optional keys, and/or replaces the `allow_extra_keys` and/or `description` attributes.
:param contents: More contents, if any, to extend the current contents
:type contents: dict
:param optional_keys: More optional keys, if any, to extend the current optional keys
:type optional_keys: union[set, list, tuple]
:param allow_extra_keys: If non-`None`, this overrides the current `allow_extra_keys` attribute
:type allow_extra_keys: bool
:param description: If non-`None`, this overrides the current `description` attribute
:type description: union[str, unicode]
:param replace_optional_keys: If `True`, then the `optional_keys` argument will completely replace, instead of
extend, the current optional keys
:type replace_optional_keys: bool
:return: A new `Dictionary` extended from the current `Dictionary` based on the supplied arguments
:rtype: Dictionary
"""
optional_keys = set(optional_keys or [])
return Dictionary(
contents=type(self.contents)(
(k, v) for d in (self.contents, contents) for k, v in six.iteritems(d)
) if contents else self.contents,
optional_keys=optional_keys if replace_optional_keys else self.optional_keys | optional_keys,
allow_extra_keys=self.allow_extra_keys if allow_extra_keys is None else allow_extra_keys,
description=self.description if description is None else description,
)
|
[
"def",
"extend",
"(",
"self",
",",
"contents",
"=",
"None",
",",
"optional_keys",
"=",
"None",
",",
"allow_extra_keys",
"=",
"None",
",",
"description",
"=",
"None",
",",
"replace_optional_keys",
"=",
"False",
",",
")",
":",
"optional_keys",
"=",
"set",
"(",
"optional_keys",
"or",
"[",
"]",
")",
"return",
"Dictionary",
"(",
"contents",
"=",
"type",
"(",
"self",
".",
"contents",
")",
"(",
"(",
"k",
",",
"v",
")",
"for",
"d",
"in",
"(",
"self",
".",
"contents",
",",
"contents",
")",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"d",
")",
")",
"if",
"contents",
"else",
"self",
".",
"contents",
",",
"optional_keys",
"=",
"optional_keys",
"if",
"replace_optional_keys",
"else",
"self",
".",
"optional_keys",
"|",
"optional_keys",
",",
"allow_extra_keys",
"=",
"self",
".",
"allow_extra_keys",
"if",
"allow_extra_keys",
"is",
"None",
"else",
"allow_extra_keys",
",",
"description",
"=",
"self",
".",
"description",
"if",
"description",
"is",
"None",
"else",
"description",
",",
")"
] |
This method allows you to create a new `Dictionary` that extends the current `Dictionary` with additional
contents and/or optional keys, and/or replaces the `allow_extra_keys` and/or `description` attributes.
:param contents: More contents, if any, to extend the current contents
:type contents: dict
:param optional_keys: More optional keys, if any, to extend the current optional keys
:type optional_keys: union[set, list, tuple]
:param allow_extra_keys: If non-`None`, this overrides the current `allow_extra_keys` attribute
:type allow_extra_keys: bool
:param description: If non-`None`, this overrides the current `description` attribute
:type description: union[str, unicode]
:param replace_optional_keys: If `True`, then the `optional_keys` argument will completely replace, instead of
extend, the current optional keys
:type replace_optional_keys: bool
:return: A new `Dictionary` extended from the current `Dictionary` based on the supplied arguments
:rtype: Dictionary
|
[
"This",
"method",
"allows",
"you",
"to",
"create",
"a",
"new",
"Dictionary",
"that",
"extends",
"the",
"current",
"Dictionary",
"with",
"additional",
"contents",
"and",
"/",
"or",
"optional",
"keys",
"and",
"/",
"or",
"replaces",
"the",
"allow_extra_keys",
"and",
"/",
"or",
"description",
"attributes",
"."
] |
python
|
train
|
buildbot/buildbot
|
master/buildbot/changes/mail.py
|
https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/buildbot/changes/mail.py#L96-L263
|
def parse(self, m, prefix=None):
"""Parse messages sent by the 'buildbot-cvs-mail' program.
"""
# The mail is sent from the person doing the checkin. Assume that the
# local username is enough to identify them (this assumes a one-server
# cvs-over-rsh environment rather than the server-dirs-shared-over-NFS
# model)
name, addr = parseaddr(m["from"])
if not addr:
# no From means this message isn't from buildbot-cvs-mail
return None
at = addr.find("@")
if at == -1:
author = addr # might still be useful
else:
author = addr[:at]
author = util.bytes2unicode(author, encoding="ascii")
# CVS accepts RFC822 dates. buildbot-cvs-mail adds the date as
# part of the mail header, so use that.
# This assumes cvs is being access via ssh or pserver, so the time
# will be the CVS server's time.
# calculate a "revision" based on that timestamp, or the current time
# if we're unable to parse the date.
log.msg('Processing CVS mail')
dateTuple = parsedate_tz(m["date"])
if dateTuple is None:
when = util.now()
else:
when = mktime_tz(dateTuple)
theTime = datetime.datetime.utcfromtimestamp(float(when))
rev = theTime.strftime('%Y-%m-%d %H:%M:%S')
catRE = re.compile(r'^Category:\s*(\S.*)')
cvsRE = re.compile(r'^CVSROOT:\s*(\S.*)')
cvsmodeRE = re.compile(r'^Cvsmode:\s*(\S.*)')
filesRE = re.compile(r'^Files:\s*(\S.*)')
modRE = re.compile(r'^Module:\s*(\S.*)')
pathRE = re.compile(r'^Path:\s*(\S.*)')
projRE = re.compile(r'^Project:\s*(\S.*)')
singleFileRE = re.compile(r'(.*) (NONE|\d(\.|\d)+) (NONE|\d(\.|\d)+)')
tagRE = re.compile(r'^\s+Tag:\s*(\S.*)')
updateRE = re.compile(r'^Update of:\s*(\S.*)')
comments = ""
branch = None
cvsroot = None
fileList = None
files = []
isdir = 0
path = None
project = None
lines = list(body_line_iterator(m))
while lines:
line = lines.pop(0)
m = catRE.match(line)
if m:
category = m.group(1)
continue
m = cvsRE.match(line)
if m:
cvsroot = m.group(1)
continue
m = cvsmodeRE.match(line)
if m:
cvsmode = m.group(1)
continue
m = filesRE.match(line)
if m:
fileList = m.group(1)
continue
m = modRE.match(line)
if m:
# We don't actually use this
# module = m.group(1)
continue
m = pathRE.match(line)
if m:
path = m.group(1)
continue
m = projRE.match(line)
if m:
project = m.group(1)
continue
m = tagRE.match(line)
if m:
branch = m.group(1)
continue
m = updateRE.match(line)
if m:
# We don't actually use this
# updateof = m.group(1)
continue
if line == "Log Message:\n":
break
# CVS 1.11 lists files as:
# repo/path file,old-version,new-version file2,old-version,new-version
# Version 1.12 lists files as:
# file1 old-version new-version file2 old-version new-version
#
# files consists of tuples of 'file-name old-version new-version'
# The versions are either dotted-decimal version numbers, ie 1.1
# or NONE. New files are of the form 'NONE NUMBER', while removed
# files are 'NUMBER NONE'. 'NONE' is a literal string
# Parsing this instead of files list in 'Added File:' etc
# makes it possible to handle files with embedded spaces, though
# it could fail if the filename was 'bad 1.1 1.2'
# For cvs version 1.11, we expect
# my_module new_file.c,NONE,1.1
# my_module removed.txt,1.2,NONE
# my_module modified_file.c,1.1,1.2
# While cvs version 1.12 gives us
# new_file.c NONE 1.1
# removed.txt 1.2 NONE
# modified_file.c 1.1,1.2
if fileList is None:
log.msg('CVSMaildirSource Mail with no files. Ignoring')
return None # We don't have any files. Email not from CVS
if cvsmode == '1.11':
# Please, no repo paths with spaces!
m = re.search('([^ ]*) ', fileList)
if m:
path = m.group(1)
else:
log.msg(
'CVSMaildirSource can\'t get path from file list. Ignoring mail')
return
fileList = fileList[len(path):].strip()
singleFileRE = re.compile(
r'(.+?),(NONE|(?:\d+\.(?:\d+\.\d+\.)*\d+)),(NONE|(?:\d+\.(?:\d+\.\d+\.)*\d+))(?: |$)')
elif cvsmode == '1.12':
singleFileRE = re.compile(
r'(.+?) (NONE|(?:\d+\.(?:\d+\.\d+\.)*\d+)) (NONE|(?:\d+\.(?:\d+\.\d+\.)*\d+))(?: |$)')
if path is None:
raise ValueError(
'CVSMaildirSource cvs 1.12 require path. Check cvs loginfo config')
else:
raise ValueError(
'Expected cvsmode 1.11 or 1.12. got: %s' % cvsmode)
log.msg("CVSMaildirSource processing filelist: %s" % fileList)
while(fileList):
m = singleFileRE.match(fileList)
if m:
curFile = path + '/' + m.group(1)
files.append(curFile)
fileList = fileList[m.end():]
else:
log.msg('CVSMaildirSource no files matched regex. Ignoring')
return None # bail - we couldn't parse the files that changed
# Now get comments
while lines:
line = lines.pop(0)
comments += line
comments = comments.rstrip() + "\n"
if comments == '\n':
comments = None
return ('cvs', dict(author=author, files=files, comments=comments,
isdir=isdir, when=when, branch=branch,
revision=rev, category=category,
repository=cvsroot, project=project,
properties=self.properties))
|
[
"def",
"parse",
"(",
"self",
",",
"m",
",",
"prefix",
"=",
"None",
")",
":",
"# The mail is sent from the person doing the checkin. Assume that the",
"# local username is enough to identify them (this assumes a one-server",
"# cvs-over-rsh environment rather than the server-dirs-shared-over-NFS",
"# model)",
"name",
",",
"addr",
"=",
"parseaddr",
"(",
"m",
"[",
"\"from\"",
"]",
")",
"if",
"not",
"addr",
":",
"# no From means this message isn't from buildbot-cvs-mail",
"return",
"None",
"at",
"=",
"addr",
".",
"find",
"(",
"\"@\"",
")",
"if",
"at",
"==",
"-",
"1",
":",
"author",
"=",
"addr",
"# might still be useful",
"else",
":",
"author",
"=",
"addr",
"[",
":",
"at",
"]",
"author",
"=",
"util",
".",
"bytes2unicode",
"(",
"author",
",",
"encoding",
"=",
"\"ascii\"",
")",
"# CVS accepts RFC822 dates. buildbot-cvs-mail adds the date as",
"# part of the mail header, so use that.",
"# This assumes cvs is being access via ssh or pserver, so the time",
"# will be the CVS server's time.",
"# calculate a \"revision\" based on that timestamp, or the current time",
"# if we're unable to parse the date.",
"log",
".",
"msg",
"(",
"'Processing CVS mail'",
")",
"dateTuple",
"=",
"parsedate_tz",
"(",
"m",
"[",
"\"date\"",
"]",
")",
"if",
"dateTuple",
"is",
"None",
":",
"when",
"=",
"util",
".",
"now",
"(",
")",
"else",
":",
"when",
"=",
"mktime_tz",
"(",
"dateTuple",
")",
"theTime",
"=",
"datetime",
".",
"datetime",
".",
"utcfromtimestamp",
"(",
"float",
"(",
"when",
")",
")",
"rev",
"=",
"theTime",
".",
"strftime",
"(",
"'%Y-%m-%d %H:%M:%S'",
")",
"catRE",
"=",
"re",
".",
"compile",
"(",
"r'^Category:\\s*(\\S.*)'",
")",
"cvsRE",
"=",
"re",
".",
"compile",
"(",
"r'^CVSROOT:\\s*(\\S.*)'",
")",
"cvsmodeRE",
"=",
"re",
".",
"compile",
"(",
"r'^Cvsmode:\\s*(\\S.*)'",
")",
"filesRE",
"=",
"re",
".",
"compile",
"(",
"r'^Files:\\s*(\\S.*)'",
")",
"modRE",
"=",
"re",
".",
"compile",
"(",
"r'^Module:\\s*(\\S.*)'",
")",
"pathRE",
"=",
"re",
".",
"compile",
"(",
"r'^Path:\\s*(\\S.*)'",
")",
"projRE",
"=",
"re",
".",
"compile",
"(",
"r'^Project:\\s*(\\S.*)'",
")",
"singleFileRE",
"=",
"re",
".",
"compile",
"(",
"r'(.*) (NONE|\\d(\\.|\\d)+) (NONE|\\d(\\.|\\d)+)'",
")",
"tagRE",
"=",
"re",
".",
"compile",
"(",
"r'^\\s+Tag:\\s*(\\S.*)'",
")",
"updateRE",
"=",
"re",
".",
"compile",
"(",
"r'^Update of:\\s*(\\S.*)'",
")",
"comments",
"=",
"\"\"",
"branch",
"=",
"None",
"cvsroot",
"=",
"None",
"fileList",
"=",
"None",
"files",
"=",
"[",
"]",
"isdir",
"=",
"0",
"path",
"=",
"None",
"project",
"=",
"None",
"lines",
"=",
"list",
"(",
"body_line_iterator",
"(",
"m",
")",
")",
"while",
"lines",
":",
"line",
"=",
"lines",
".",
"pop",
"(",
"0",
")",
"m",
"=",
"catRE",
".",
"match",
"(",
"line",
")",
"if",
"m",
":",
"category",
"=",
"m",
".",
"group",
"(",
"1",
")",
"continue",
"m",
"=",
"cvsRE",
".",
"match",
"(",
"line",
")",
"if",
"m",
":",
"cvsroot",
"=",
"m",
".",
"group",
"(",
"1",
")",
"continue",
"m",
"=",
"cvsmodeRE",
".",
"match",
"(",
"line",
")",
"if",
"m",
":",
"cvsmode",
"=",
"m",
".",
"group",
"(",
"1",
")",
"continue",
"m",
"=",
"filesRE",
".",
"match",
"(",
"line",
")",
"if",
"m",
":",
"fileList",
"=",
"m",
".",
"group",
"(",
"1",
")",
"continue",
"m",
"=",
"modRE",
".",
"match",
"(",
"line",
")",
"if",
"m",
":",
"# We don't actually use this",
"# module = m.group(1)",
"continue",
"m",
"=",
"pathRE",
".",
"match",
"(",
"line",
")",
"if",
"m",
":",
"path",
"=",
"m",
".",
"group",
"(",
"1",
")",
"continue",
"m",
"=",
"projRE",
".",
"match",
"(",
"line",
")",
"if",
"m",
":",
"project",
"=",
"m",
".",
"group",
"(",
"1",
")",
"continue",
"m",
"=",
"tagRE",
".",
"match",
"(",
"line",
")",
"if",
"m",
":",
"branch",
"=",
"m",
".",
"group",
"(",
"1",
")",
"continue",
"m",
"=",
"updateRE",
".",
"match",
"(",
"line",
")",
"if",
"m",
":",
"# We don't actually use this",
"# updateof = m.group(1)",
"continue",
"if",
"line",
"==",
"\"Log Message:\\n\"",
":",
"break",
"# CVS 1.11 lists files as:",
"# repo/path file,old-version,new-version file2,old-version,new-version",
"# Version 1.12 lists files as:",
"# file1 old-version new-version file2 old-version new-version",
"#",
"# files consists of tuples of 'file-name old-version new-version'",
"# The versions are either dotted-decimal version numbers, ie 1.1",
"# or NONE. New files are of the form 'NONE NUMBER', while removed",
"# files are 'NUMBER NONE'. 'NONE' is a literal string",
"# Parsing this instead of files list in 'Added File:' etc",
"# makes it possible to handle files with embedded spaces, though",
"# it could fail if the filename was 'bad 1.1 1.2'",
"# For cvs version 1.11, we expect",
"# my_module new_file.c,NONE,1.1",
"# my_module removed.txt,1.2,NONE",
"# my_module modified_file.c,1.1,1.2",
"# While cvs version 1.12 gives us",
"# new_file.c NONE 1.1",
"# removed.txt 1.2 NONE",
"# modified_file.c 1.1,1.2",
"if",
"fileList",
"is",
"None",
":",
"log",
".",
"msg",
"(",
"'CVSMaildirSource Mail with no files. Ignoring'",
")",
"return",
"None",
"# We don't have any files. Email not from CVS",
"if",
"cvsmode",
"==",
"'1.11'",
":",
"# Please, no repo paths with spaces!",
"m",
"=",
"re",
".",
"search",
"(",
"'([^ ]*) '",
",",
"fileList",
")",
"if",
"m",
":",
"path",
"=",
"m",
".",
"group",
"(",
"1",
")",
"else",
":",
"log",
".",
"msg",
"(",
"'CVSMaildirSource can\\'t get path from file list. Ignoring mail'",
")",
"return",
"fileList",
"=",
"fileList",
"[",
"len",
"(",
"path",
")",
":",
"]",
".",
"strip",
"(",
")",
"singleFileRE",
"=",
"re",
".",
"compile",
"(",
"r'(.+?),(NONE|(?:\\d+\\.(?:\\d+\\.\\d+\\.)*\\d+)),(NONE|(?:\\d+\\.(?:\\d+\\.\\d+\\.)*\\d+))(?: |$)'",
")",
"elif",
"cvsmode",
"==",
"'1.12'",
":",
"singleFileRE",
"=",
"re",
".",
"compile",
"(",
"r'(.+?) (NONE|(?:\\d+\\.(?:\\d+\\.\\d+\\.)*\\d+)) (NONE|(?:\\d+\\.(?:\\d+\\.\\d+\\.)*\\d+))(?: |$)'",
")",
"if",
"path",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'CVSMaildirSource cvs 1.12 require path. Check cvs loginfo config'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Expected cvsmode 1.11 or 1.12. got: %s'",
"%",
"cvsmode",
")",
"log",
".",
"msg",
"(",
"\"CVSMaildirSource processing filelist: %s\"",
"%",
"fileList",
")",
"while",
"(",
"fileList",
")",
":",
"m",
"=",
"singleFileRE",
".",
"match",
"(",
"fileList",
")",
"if",
"m",
":",
"curFile",
"=",
"path",
"+",
"'/'",
"+",
"m",
".",
"group",
"(",
"1",
")",
"files",
".",
"append",
"(",
"curFile",
")",
"fileList",
"=",
"fileList",
"[",
"m",
".",
"end",
"(",
")",
":",
"]",
"else",
":",
"log",
".",
"msg",
"(",
"'CVSMaildirSource no files matched regex. Ignoring'",
")",
"return",
"None",
"# bail - we couldn't parse the files that changed",
"# Now get comments",
"while",
"lines",
":",
"line",
"=",
"lines",
".",
"pop",
"(",
"0",
")",
"comments",
"+=",
"line",
"comments",
"=",
"comments",
".",
"rstrip",
"(",
")",
"+",
"\"\\n\"",
"if",
"comments",
"==",
"'\\n'",
":",
"comments",
"=",
"None",
"return",
"(",
"'cvs'",
",",
"dict",
"(",
"author",
"=",
"author",
",",
"files",
"=",
"files",
",",
"comments",
"=",
"comments",
",",
"isdir",
"=",
"isdir",
",",
"when",
"=",
"when",
",",
"branch",
"=",
"branch",
",",
"revision",
"=",
"rev",
",",
"category",
"=",
"category",
",",
"repository",
"=",
"cvsroot",
",",
"project",
"=",
"project",
",",
"properties",
"=",
"self",
".",
"properties",
")",
")"
] |
Parse messages sent by the 'buildbot-cvs-mail' program.
|
[
"Parse",
"messages",
"sent",
"by",
"the",
"buildbot",
"-",
"cvs",
"-",
"mail",
"program",
"."
] |
python
|
train
|
CalebBell/thermo
|
thermo/heat_capacity.py
|
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/heat_capacity.py#L662-L711
|
def load_all_methods(self):
r'''Method which picks out coefficients for the specified chemical
from the various dictionaries and DataFrames storing it. All data is
stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`,
and :obj:`all_methods` as a set of methods for which the data exists for.
Called on initialization only. See the source code for the variables at
which the coefficients are stored. The coefficients can safely be
altered once the class is initialized. This method can be called again
to reset the parameters.
'''
methods = []
Tmins, Tmaxs = [], []
if self.CASRN in TRC_gas_data.index:
methods.append(TRCIG)
_, self.TRCIG_Tmin, self.TRCIG_Tmax, a0, a1, a2, a3, a4, a5, a6, a7, _, _, _ = _TRC_gas_data_values[TRC_gas_data.index.get_loc(self.CASRN)].tolist()
self.TRCIG_coefs = [a0, a1, a2, a3, a4, a5, a6, a7]
Tmins.append(self.TRCIG_Tmin); Tmaxs.append(self.TRCIG_Tmax)
if self.CASRN in Poling_data.index and not np.isnan(Poling_data.at[self.CASRN, 'a0']):
_, self.POLING_Tmin, self.POLING_Tmax, a0, a1, a2, a3, a4, Cpg, Cpl = _Poling_data_values[Poling_data.index.get_loc(self.CASRN)].tolist()
methods.append(POLING)
self.POLING_coefs = [a0, a1, a2, a3, a4]
Tmins.append(self.POLING_Tmin); Tmaxs.append(self.POLING_Tmax)
if self.CASRN in Poling_data.index and not np.isnan(Poling_data.at[self.CASRN, 'Cpg']):
methods.append(POLING_CONST)
self.POLING_T = 298.15
self.POLING_constant = float(Poling_data.at[self.CASRN, 'Cpg'])
if self.CASRN in CRC_standard_data.index and not np.isnan(CRC_standard_data.at[self.CASRN, 'Cpg']):
methods.append(CRCSTD)
self.CRCSTD_T = 298.15
self.CRCSTD_constant = float(CRC_standard_data.at[self.CASRN, 'Cpg'])
if self.CASRN in _VDISaturationDict:
# NOTE: VDI data is for the saturation curve, i.e. at increasing
# pressure; it is normally substantially higher than the ideal gas
# value
methods.append(VDI_TABULAR)
Ts, props = VDI_tabular_data(self.CASRN, 'Cp (g)')
self.VDI_Tmin = Ts[0]
self.VDI_Tmax = Ts[-1]
self.tabular_data[VDI_TABULAR] = (Ts, props)
Tmins.append(self.VDI_Tmin); Tmaxs.append(self.VDI_Tmax)
if has_CoolProp and self.CASRN in coolprop_dict:
methods.append(COOLPROP)
self.CP_f = coolprop_fluids[self.CASRN]
Tmins.append(self.CP_f.Tt); Tmaxs.append(self.CP_f.Tc)
if self.MW and self.similarity_variable:
methods.append(LASTOVKA_SHAW)
self.all_methods = set(methods)
if Tmins and Tmaxs:
self.Tmin, self.Tmax = min(Tmins), max(Tmaxs)
|
[
"def",
"load_all_methods",
"(",
"self",
")",
":",
"methods",
"=",
"[",
"]",
"Tmins",
",",
"Tmaxs",
"=",
"[",
"]",
",",
"[",
"]",
"if",
"self",
".",
"CASRN",
"in",
"TRC_gas_data",
".",
"index",
":",
"methods",
".",
"append",
"(",
"TRCIG",
")",
"_",
",",
"self",
".",
"TRCIG_Tmin",
",",
"self",
".",
"TRCIG_Tmax",
",",
"a0",
",",
"a1",
",",
"a2",
",",
"a3",
",",
"a4",
",",
"a5",
",",
"a6",
",",
"a7",
",",
"_",
",",
"_",
",",
"_",
"=",
"_TRC_gas_data_values",
"[",
"TRC_gas_data",
".",
"index",
".",
"get_loc",
"(",
"self",
".",
"CASRN",
")",
"]",
".",
"tolist",
"(",
")",
"self",
".",
"TRCIG_coefs",
"=",
"[",
"a0",
",",
"a1",
",",
"a2",
",",
"a3",
",",
"a4",
",",
"a5",
",",
"a6",
",",
"a7",
"]",
"Tmins",
".",
"append",
"(",
"self",
".",
"TRCIG_Tmin",
")",
"Tmaxs",
".",
"append",
"(",
"self",
".",
"TRCIG_Tmax",
")",
"if",
"self",
".",
"CASRN",
"in",
"Poling_data",
".",
"index",
"and",
"not",
"np",
".",
"isnan",
"(",
"Poling_data",
".",
"at",
"[",
"self",
".",
"CASRN",
",",
"'a0'",
"]",
")",
":",
"_",
",",
"self",
".",
"POLING_Tmin",
",",
"self",
".",
"POLING_Tmax",
",",
"a0",
",",
"a1",
",",
"a2",
",",
"a3",
",",
"a4",
",",
"Cpg",
",",
"Cpl",
"=",
"_Poling_data_values",
"[",
"Poling_data",
".",
"index",
".",
"get_loc",
"(",
"self",
".",
"CASRN",
")",
"]",
".",
"tolist",
"(",
")",
"methods",
".",
"append",
"(",
"POLING",
")",
"self",
".",
"POLING_coefs",
"=",
"[",
"a0",
",",
"a1",
",",
"a2",
",",
"a3",
",",
"a4",
"]",
"Tmins",
".",
"append",
"(",
"self",
".",
"POLING_Tmin",
")",
"Tmaxs",
".",
"append",
"(",
"self",
".",
"POLING_Tmax",
")",
"if",
"self",
".",
"CASRN",
"in",
"Poling_data",
".",
"index",
"and",
"not",
"np",
".",
"isnan",
"(",
"Poling_data",
".",
"at",
"[",
"self",
".",
"CASRN",
",",
"'Cpg'",
"]",
")",
":",
"methods",
".",
"append",
"(",
"POLING_CONST",
")",
"self",
".",
"POLING_T",
"=",
"298.15",
"self",
".",
"POLING_constant",
"=",
"float",
"(",
"Poling_data",
".",
"at",
"[",
"self",
".",
"CASRN",
",",
"'Cpg'",
"]",
")",
"if",
"self",
".",
"CASRN",
"in",
"CRC_standard_data",
".",
"index",
"and",
"not",
"np",
".",
"isnan",
"(",
"CRC_standard_data",
".",
"at",
"[",
"self",
".",
"CASRN",
",",
"'Cpg'",
"]",
")",
":",
"methods",
".",
"append",
"(",
"CRCSTD",
")",
"self",
".",
"CRCSTD_T",
"=",
"298.15",
"self",
".",
"CRCSTD_constant",
"=",
"float",
"(",
"CRC_standard_data",
".",
"at",
"[",
"self",
".",
"CASRN",
",",
"'Cpg'",
"]",
")",
"if",
"self",
".",
"CASRN",
"in",
"_VDISaturationDict",
":",
"# NOTE: VDI data is for the saturation curve, i.e. at increasing",
"# pressure; it is normally substantially higher than the ideal gas",
"# value",
"methods",
".",
"append",
"(",
"VDI_TABULAR",
")",
"Ts",
",",
"props",
"=",
"VDI_tabular_data",
"(",
"self",
".",
"CASRN",
",",
"'Cp (g)'",
")",
"self",
".",
"VDI_Tmin",
"=",
"Ts",
"[",
"0",
"]",
"self",
".",
"VDI_Tmax",
"=",
"Ts",
"[",
"-",
"1",
"]",
"self",
".",
"tabular_data",
"[",
"VDI_TABULAR",
"]",
"=",
"(",
"Ts",
",",
"props",
")",
"Tmins",
".",
"append",
"(",
"self",
".",
"VDI_Tmin",
")",
"Tmaxs",
".",
"append",
"(",
"self",
".",
"VDI_Tmax",
")",
"if",
"has_CoolProp",
"and",
"self",
".",
"CASRN",
"in",
"coolprop_dict",
":",
"methods",
".",
"append",
"(",
"COOLPROP",
")",
"self",
".",
"CP_f",
"=",
"coolprop_fluids",
"[",
"self",
".",
"CASRN",
"]",
"Tmins",
".",
"append",
"(",
"self",
".",
"CP_f",
".",
"Tt",
")",
"Tmaxs",
".",
"append",
"(",
"self",
".",
"CP_f",
".",
"Tc",
")",
"if",
"self",
".",
"MW",
"and",
"self",
".",
"similarity_variable",
":",
"methods",
".",
"append",
"(",
"LASTOVKA_SHAW",
")",
"self",
".",
"all_methods",
"=",
"set",
"(",
"methods",
")",
"if",
"Tmins",
"and",
"Tmaxs",
":",
"self",
".",
"Tmin",
",",
"self",
".",
"Tmax",
"=",
"min",
"(",
"Tmins",
")",
",",
"max",
"(",
"Tmaxs",
")"
] |
r'''Method which picks out coefficients for the specified chemical
from the various dictionaries and DataFrames storing it. All data is
stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`,
and :obj:`all_methods` as a set of methods for which the data exists for.
Called on initialization only. See the source code for the variables at
which the coefficients are stored. The coefficients can safely be
altered once the class is initialized. This method can be called again
to reset the parameters.
|
[
"r",
"Method",
"which",
"picks",
"out",
"coefficients",
"for",
"the",
"specified",
"chemical",
"from",
"the",
"various",
"dictionaries",
"and",
"DataFrames",
"storing",
"it",
".",
"All",
"data",
"is",
"stored",
"as",
"attributes",
".",
"This",
"method",
"also",
"sets",
":",
"obj",
":",
"Tmin",
":",
"obj",
":",
"Tmax",
"and",
":",
"obj",
":",
"all_methods",
"as",
"a",
"set",
"of",
"methods",
"for",
"which",
"the",
"data",
"exists",
"for",
"."
] |
python
|
valid
|
iotile/coretools
|
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Scanner/Dir.py
|
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Scanner/Dir.py#L32-L37
|
def DirScanner(**kw):
"""Return a prototype Scanner instance for scanning
directories for on-disk files"""
kw['node_factory'] = SCons.Node.FS.Entry
kw['recursive'] = only_dirs
return SCons.Scanner.Base(scan_on_disk, "DirScanner", **kw)
|
[
"def",
"DirScanner",
"(",
"*",
"*",
"kw",
")",
":",
"kw",
"[",
"'node_factory'",
"]",
"=",
"SCons",
".",
"Node",
".",
"FS",
".",
"Entry",
"kw",
"[",
"'recursive'",
"]",
"=",
"only_dirs",
"return",
"SCons",
".",
"Scanner",
".",
"Base",
"(",
"scan_on_disk",
",",
"\"DirScanner\"",
",",
"*",
"*",
"kw",
")"
] |
Return a prototype Scanner instance for scanning
directories for on-disk files
|
[
"Return",
"a",
"prototype",
"Scanner",
"instance",
"for",
"scanning",
"directories",
"for",
"on",
"-",
"disk",
"files"
] |
python
|
train
|
autokey/autokey
|
lib/autokey/scripting.py
|
https://github.com/autokey/autokey/blob/35decb72f286ce68cd2a1f09ace8891a520b58d1/lib/autokey/scripting.py#L349-L382
|
def list_menu_multi(self, options, title="Choose one or more values", message="Choose one or more values",
defaults: list=None, **kwargs):
"""
Show a multiple-selection list menu
Usage: C{dialog.list_menu_multi(options, title="Choose one or more values", message="Choose one or more values", defaults=[], **kwargs)}
@param options: list of options (strings) for the dialog
@param title: window title for the dialog
@param message: message displayed above the list
@param defaults: list of default values to be selected
@return: a tuple containing the exit code and user choice
@rtype: C{DialogData(int, List[str])}
"""
if defaults is None:
defaults = []
choices = []
optionNum = 0
for option in options:
choices.append(str(optionNum))
choices.append(option)
if option in defaults:
choices.append("on")
else:
choices.append("off")
optionNum += 1
return_code, output = self._run_kdialog(title, ["--separate-output", "--checklist", message] + choices, kwargs)
results = output.split()
choices = [options[int(choice_index)] for choice_index in results]
return DialogData(return_code, choices)
|
[
"def",
"list_menu_multi",
"(",
"self",
",",
"options",
",",
"title",
"=",
"\"Choose one or more values\"",
",",
"message",
"=",
"\"Choose one or more values\"",
",",
"defaults",
":",
"list",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"defaults",
"is",
"None",
":",
"defaults",
"=",
"[",
"]",
"choices",
"=",
"[",
"]",
"optionNum",
"=",
"0",
"for",
"option",
"in",
"options",
":",
"choices",
".",
"append",
"(",
"str",
"(",
"optionNum",
")",
")",
"choices",
".",
"append",
"(",
"option",
")",
"if",
"option",
"in",
"defaults",
":",
"choices",
".",
"append",
"(",
"\"on\"",
")",
"else",
":",
"choices",
".",
"append",
"(",
"\"off\"",
")",
"optionNum",
"+=",
"1",
"return_code",
",",
"output",
"=",
"self",
".",
"_run_kdialog",
"(",
"title",
",",
"[",
"\"--separate-output\"",
",",
"\"--checklist\"",
",",
"message",
"]",
"+",
"choices",
",",
"kwargs",
")",
"results",
"=",
"output",
".",
"split",
"(",
")",
"choices",
"=",
"[",
"options",
"[",
"int",
"(",
"choice_index",
")",
"]",
"for",
"choice_index",
"in",
"results",
"]",
"return",
"DialogData",
"(",
"return_code",
",",
"choices",
")"
] |
Show a multiple-selection list menu
Usage: C{dialog.list_menu_multi(options, title="Choose one or more values", message="Choose one or more values", defaults=[], **kwargs)}
@param options: list of options (strings) for the dialog
@param title: window title for the dialog
@param message: message displayed above the list
@param defaults: list of default values to be selected
@return: a tuple containing the exit code and user choice
@rtype: C{DialogData(int, List[str])}
|
[
"Show",
"a",
"multiple",
"-",
"selection",
"list",
"menu",
"Usage",
":",
"C",
"{",
"dialog",
".",
"list_menu_multi",
"(",
"options",
"title",
"=",
"Choose",
"one",
"or",
"more",
"values",
"message",
"=",
"Choose",
"one",
"or",
"more",
"values",
"defaults",
"=",
"[]",
"**",
"kwargs",
")",
"}"
] |
python
|
train
|
Accelize/pycosio
|
pycosio/storage/azure.py
|
https://github.com/Accelize/pycosio/blob/1cc1f8fdf5394d92918b7bae2bfa682169ccc48c/pycosio/storage/azure.py#L48-L68
|
def _properties_model_to_dict(properties):
"""
Convert properties model to dict.
Args:
properties: Properties model.
Returns:
dict: Converted model.
"""
result = {}
for attr in properties.__dict__:
value = getattr(properties, attr)
if hasattr(value, '__module__') and 'models' in value.__module__:
value = _properties_model_to_dict(value)
if not (value is None or (isinstance(value, dict) and not value)):
result[attr] = value
return result
|
[
"def",
"_properties_model_to_dict",
"(",
"properties",
")",
":",
"result",
"=",
"{",
"}",
"for",
"attr",
"in",
"properties",
".",
"__dict__",
":",
"value",
"=",
"getattr",
"(",
"properties",
",",
"attr",
")",
"if",
"hasattr",
"(",
"value",
",",
"'__module__'",
")",
"and",
"'models'",
"in",
"value",
".",
"__module__",
":",
"value",
"=",
"_properties_model_to_dict",
"(",
"value",
")",
"if",
"not",
"(",
"value",
"is",
"None",
"or",
"(",
"isinstance",
"(",
"value",
",",
"dict",
")",
"and",
"not",
"value",
")",
")",
":",
"result",
"[",
"attr",
"]",
"=",
"value",
"return",
"result"
] |
Convert properties model to dict.
Args:
properties: Properties model.
Returns:
dict: Converted model.
|
[
"Convert",
"properties",
"model",
"to",
"dict",
"."
] |
python
|
train
|
limodou/uliweb
|
uliweb/contrib/auth/__init__.py
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/contrib/auth/__init__.py#L90-L125
|
def set_user_session(user):
"""
Set user session
:param user: user object chould be model instance or dict
:return:
"""
from uliweb import settings, request
user_fieldname = settings.get_var('AUTH/GET_AUTH_USER_FIELDNAME', 'id')
share_session = settings.get_var('AUTH/AUTH_SHARE_USER_SESSION', False)
if isinstance(user, dict):
user_id = user[user_fieldname]
else:
user_id = getattr(user, user_fieldname)
if share_session:
cache = functions.get_cache()
key = get_user_session_key(user_id)
session_id = cache.get(key, None)
log.debug('Auth: user session user_id={}, session_id={}, key={}'.format(user_id, session_id, key))
if not session_id:
request.session.save()
log.debug('Auth: set user session mapping userid={}, '
'session_id={}, expiry time={}'.format(user_id,
request.session.key,
request.session.expiry_time))
cache.set(key, request.session.key, expire=request.session.expiry_time)
elif session_id != request.session.key:
log.debug('Auth: load oldkey={}, key={}'.format(request.session.key, session_id))
request.session.delete()
request.session.load(session_id)
if isinstance(user, dict):
request.session[_get_auth_key()] = user
else:
request.session[_get_auth_key()] = user_id
request.user = user
|
[
"def",
"set_user_session",
"(",
"user",
")",
":",
"from",
"uliweb",
"import",
"settings",
",",
"request",
"user_fieldname",
"=",
"settings",
".",
"get_var",
"(",
"'AUTH/GET_AUTH_USER_FIELDNAME'",
",",
"'id'",
")",
"share_session",
"=",
"settings",
".",
"get_var",
"(",
"'AUTH/AUTH_SHARE_USER_SESSION'",
",",
"False",
")",
"if",
"isinstance",
"(",
"user",
",",
"dict",
")",
":",
"user_id",
"=",
"user",
"[",
"user_fieldname",
"]",
"else",
":",
"user_id",
"=",
"getattr",
"(",
"user",
",",
"user_fieldname",
")",
"if",
"share_session",
":",
"cache",
"=",
"functions",
".",
"get_cache",
"(",
")",
"key",
"=",
"get_user_session_key",
"(",
"user_id",
")",
"session_id",
"=",
"cache",
".",
"get",
"(",
"key",
",",
"None",
")",
"log",
".",
"debug",
"(",
"'Auth: user session user_id={}, session_id={}, key={}'",
".",
"format",
"(",
"user_id",
",",
"session_id",
",",
"key",
")",
")",
"if",
"not",
"session_id",
":",
"request",
".",
"session",
".",
"save",
"(",
")",
"log",
".",
"debug",
"(",
"'Auth: set user session mapping userid={}, '",
"'session_id={}, expiry time={}'",
".",
"format",
"(",
"user_id",
",",
"request",
".",
"session",
".",
"key",
",",
"request",
".",
"session",
".",
"expiry_time",
")",
")",
"cache",
".",
"set",
"(",
"key",
",",
"request",
".",
"session",
".",
"key",
",",
"expire",
"=",
"request",
".",
"session",
".",
"expiry_time",
")",
"elif",
"session_id",
"!=",
"request",
".",
"session",
".",
"key",
":",
"log",
".",
"debug",
"(",
"'Auth: load oldkey={}, key={}'",
".",
"format",
"(",
"request",
".",
"session",
".",
"key",
",",
"session_id",
")",
")",
"request",
".",
"session",
".",
"delete",
"(",
")",
"request",
".",
"session",
".",
"load",
"(",
"session_id",
")",
"if",
"isinstance",
"(",
"user",
",",
"dict",
")",
":",
"request",
".",
"session",
"[",
"_get_auth_key",
"(",
")",
"]",
"=",
"user",
"else",
":",
"request",
".",
"session",
"[",
"_get_auth_key",
"(",
")",
"]",
"=",
"user_id",
"request",
".",
"user",
"=",
"user"
] |
Set user session
:param user: user object chould be model instance or dict
:return:
|
[
"Set",
"user",
"session",
":",
"param",
"user",
":",
"user",
"object",
"chould",
"be",
"model",
"instance",
"or",
"dict",
":",
"return",
":"
] |
python
|
train
|
Erotemic/ubelt
|
ubelt/util_str.py
|
https://github.com/Erotemic/ubelt/blob/db802f3ad8abba025db74b54f86e6892b8927325/ubelt/util_str.py#L148-L169
|
def ensure_unicode(text):
r"""
Casts bytes into utf8 (mostly for python2 compatibility)
References:
http://stackoverflow.com/questions/12561063/extract-data-from-file
Example:
>>> from ubelt.util_str import *
>>> import codecs # NOQA
>>> assert ensure_unicode('my ünicôdé strįng') == 'my ünicôdé strįng'
>>> assert ensure_unicode('text1') == 'text1'
>>> assert ensure_unicode('text1'.encode('utf8')) == 'text1'
>>> assert ensure_unicode('text1'.encode('utf8')) == 'text1'
>>> assert (codecs.BOM_UTF8 + 'text»¿'.encode('utf8')).decode('utf8')
"""
if isinstance(text, six.text_type):
return text
elif isinstance(text, six.binary_type):
return text.decode('utf8')
else: # nocover
raise ValueError('unknown input type {!r}'.format(text))
|
[
"def",
"ensure_unicode",
"(",
"text",
")",
":",
"if",
"isinstance",
"(",
"text",
",",
"six",
".",
"text_type",
")",
":",
"return",
"text",
"elif",
"isinstance",
"(",
"text",
",",
"six",
".",
"binary_type",
")",
":",
"return",
"text",
".",
"decode",
"(",
"'utf8'",
")",
"else",
":",
"# nocover",
"raise",
"ValueError",
"(",
"'unknown input type {!r}'",
".",
"format",
"(",
"text",
")",
")"
] |
r"""
Casts bytes into utf8 (mostly for python2 compatibility)
References:
http://stackoverflow.com/questions/12561063/extract-data-from-file
Example:
>>> from ubelt.util_str import *
>>> import codecs # NOQA
>>> assert ensure_unicode('my ünicôdé strįng') == 'my ünicôdé strįng'
>>> assert ensure_unicode('text1') == 'text1'
>>> assert ensure_unicode('text1'.encode('utf8')) == 'text1'
>>> assert ensure_unicode('text1'.encode('utf8')) == 'text1'
>>> assert (codecs.BOM_UTF8 + 'text»¿'.encode('utf8')).decode('utf8')
|
[
"r",
"Casts",
"bytes",
"into",
"utf8",
"(",
"mostly",
"for",
"python2",
"compatibility",
")"
] |
python
|
valid
|
knipknap/SpiffWorkflow
|
SpiffWorkflow/bpmn/parser/ProcessParser.py
|
https://github.com/knipknap/SpiffWorkflow/blob/f0af7f59a332e0619e4f3c00a7d4a3d230760e00/SpiffWorkflow/bpmn/parser/ProcessParser.py#L118-L129
|
def get_spec(self):
"""
Parse this process (if it has not already been parsed), and return the
workflow spec.
"""
if self.is_parsed:
return self.spec
if self.parsing_started:
raise NotImplementedError(
'Recursive call Activities are not supported.')
self._parse()
return self.get_spec()
|
[
"def",
"get_spec",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_parsed",
":",
"return",
"self",
".",
"spec",
"if",
"self",
".",
"parsing_started",
":",
"raise",
"NotImplementedError",
"(",
"'Recursive call Activities are not supported.'",
")",
"self",
".",
"_parse",
"(",
")",
"return",
"self",
".",
"get_spec",
"(",
")"
] |
Parse this process (if it has not already been parsed), and return the
workflow spec.
|
[
"Parse",
"this",
"process",
"(",
"if",
"it",
"has",
"not",
"already",
"been",
"parsed",
")",
"and",
"return",
"the",
"workflow",
"spec",
"."
] |
python
|
valid
|
oasis-open/cti-stix-validator
|
stix2validator/v21/shoulds.py
|
https://github.com/oasis-open/cti-stix-validator/blob/a607014e3fa500a7678f8b61b278456ca581f9d0/stix2validator/v21/shoulds.py#L450-L464
|
def vocab_account_type(instance):
"""Ensure a user-account objects' 'account-type' property is from the
account-type-ov vocabulary.
"""
for key, obj in instance['objects'].items():
if 'type' in obj and obj['type'] == 'user-account':
try:
acct_type = obj['account_type']
except KeyError:
continue
if acct_type not in enums.ACCOUNT_TYPE_OV:
yield JSONError("Object '%s' is a User Account Object "
"with an 'account_type' of '%s', which is not a "
"value in the account-type-ov vocabulary."
% (key, acct_type), instance['id'], 'account-type')
|
[
"def",
"vocab_account_type",
"(",
"instance",
")",
":",
"for",
"key",
",",
"obj",
"in",
"instance",
"[",
"'objects'",
"]",
".",
"items",
"(",
")",
":",
"if",
"'type'",
"in",
"obj",
"and",
"obj",
"[",
"'type'",
"]",
"==",
"'user-account'",
":",
"try",
":",
"acct_type",
"=",
"obj",
"[",
"'account_type'",
"]",
"except",
"KeyError",
":",
"continue",
"if",
"acct_type",
"not",
"in",
"enums",
".",
"ACCOUNT_TYPE_OV",
":",
"yield",
"JSONError",
"(",
"\"Object '%s' is a User Account Object \"",
"\"with an 'account_type' of '%s', which is not a \"",
"\"value in the account-type-ov vocabulary.\"",
"%",
"(",
"key",
",",
"acct_type",
")",
",",
"instance",
"[",
"'id'",
"]",
",",
"'account-type'",
")"
] |
Ensure a user-account objects' 'account-type' property is from the
account-type-ov vocabulary.
|
[
"Ensure",
"a",
"user",
"-",
"account",
"objects",
"account",
"-",
"type",
"property",
"is",
"from",
"the",
"account",
"-",
"type",
"-",
"ov",
"vocabulary",
"."
] |
python
|
train
|
ARMmbed/mbed-cloud-sdk-python
|
src/mbed_cloud/_backends/update_service/apis/default_api.py
|
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/update_service/apis/default_api.py#L1563-L1583
|
def update_campaign_retrieve(self, campaign_id, **kwargs): # noqa: E501
"""Get a campaign. # noqa: E501
Get an update campaign. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.update_campaign_retrieve(campaign_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str campaign_id: The campaign ID (required)
:return: UpdateCampaign
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.update_campaign_retrieve_with_http_info(campaign_id, **kwargs) # noqa: E501
else:
(data) = self.update_campaign_retrieve_with_http_info(campaign_id, **kwargs) # noqa: E501
return data
|
[
"def",
"update_campaign_retrieve",
"(",
"self",
",",
"campaign_id",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'asynchronous'",
")",
":",
"return",
"self",
".",
"update_campaign_retrieve_with_http_info",
"(",
"campaign_id",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"update_campaign_retrieve_with_http_info",
"(",
"campaign_id",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] |
Get a campaign. # noqa: E501
Get an update campaign. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.update_campaign_retrieve(campaign_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str campaign_id: The campaign ID (required)
:return: UpdateCampaign
If the method is called asynchronously,
returns the request thread.
|
[
"Get",
"a",
"campaign",
".",
"#",
"noqa",
":",
"E501"
] |
python
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.