nwo
stringlengths 5
106
| sha
stringlengths 40
40
| path
stringlengths 4
174
| language
stringclasses 1
value | identifier
stringlengths 1
140
| parameters
stringlengths 0
87.7k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
426k
| docstring
stringlengths 0
64.3k
| docstring_summary
stringlengths 0
26.3k
| docstring_tokens
list | function
stringlengths 18
4.83M
| function_tokens
list | url
stringlengths 83
304
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Grunny/zap-cli
|
d58d4850ecfc5467badfac5e5bcc841d064bd419
|
zapcli/commands/scripts.py
|
python
|
remove_script
|
(zap_helper, script_name)
|
Remove a script.
|
Remove a script.
|
[
"Remove",
"a",
"script",
"."
] |
def remove_script(zap_helper, script_name):
"""Remove a script."""
with zap_error_handler():
console.debug('Removing script "{0}"'.format(script_name))
result = zap_helper.zap.script.remove(script_name)
if result != 'OK':
raise ZAPError('Error removing script: {0}'.format(result))
console.info('Script "{0}" removed'.format(script_name))
|
[
"def",
"remove_script",
"(",
"zap_helper",
",",
"script_name",
")",
":",
"with",
"zap_error_handler",
"(",
")",
":",
"console",
".",
"debug",
"(",
"'Removing script \"{0}\"'",
".",
"format",
"(",
"script_name",
")",
")",
"result",
"=",
"zap_helper",
".",
"zap",
".",
"script",
".",
"remove",
"(",
"script_name",
")",
"if",
"result",
"!=",
"'OK'",
":",
"raise",
"ZAPError",
"(",
"'Error removing script: {0}'",
".",
"format",
"(",
"result",
")",
")",
"console",
".",
"info",
"(",
"'Script \"{0}\" removed'",
".",
"format",
"(",
"script_name",
")",
")"
] |
https://github.com/Grunny/zap-cli/blob/d58d4850ecfc5467badfac5e5bcc841d064bd419/zapcli/commands/scripts.py#L83-L92
|
||
codemeta/codemeta
|
22cf4c2b836a0026565792b5d1b3c5ff6c1fc82b
|
scripts/aggregate.py
|
python
|
list_crosswalks
|
()
|
Returns the list of crosswalk files. If USE_OLD_ORDER, returns
OLD_ORDER. Otherwise, auto-discovers them from the crosswalk/
directory.
|
Returns the list of crosswalk files. If USE_OLD_ORDER, returns
OLD_ORDER. Otherwise, auto-discovers them from the crosswalk/
directory.
|
[
"Returns",
"the",
"list",
"of",
"crosswalk",
"files",
".",
"If",
"USE_OLD_ORDER",
"returns",
"OLD_ORDER",
".",
"Otherwise",
"auto",
"-",
"discovers",
"them",
"from",
"the",
"crosswalk",
"/",
"directory",
"."
] |
def list_crosswalks():
"""Returns the list of crosswalk files. If USE_OLD_ORDER, returns
OLD_ORDER. Otherwise, auto-discovers them from the crosswalk/
directory."""
if USE_OLD_ORDER:
return OLD_ORDER
else:
return sorted(os.listdir(SOURCE_DIR))
|
[
"def",
"list_crosswalks",
"(",
")",
":",
"if",
"USE_OLD_ORDER",
":",
"return",
"OLD_ORDER",
"else",
":",
"return",
"sorted",
"(",
"os",
".",
"listdir",
"(",
"SOURCE_DIR",
")",
")"
] |
https://github.com/codemeta/codemeta/blob/22cf4c2b836a0026565792b5d1b3c5ff6c1fc82b/scripts/aggregate.py#L82-L89
|
||
beeware/briefcase
|
4cbb0ba9ad5ffac4323ab6e1a65d22fde6eb67ec
|
src/briefcase/platforms/macOS/__init__.py
|
python
|
macOSPackageMixin.select_identity
|
(self, identity=None)
|
return identity
|
Get the codesigning identity to use.
:param identity: A pre-specified identity (either the 40-digit
hex checksum, or the string name of the identity). If provided, it
will be validated against the list of available identities to
confirm that it is a valid codesigning identity.
:returns: The final identity to use
|
Get the codesigning identity to use.
|
[
"Get",
"the",
"codesigning",
"identity",
"to",
"use",
"."
] |
def select_identity(self, identity=None):
"""
Get the codesigning identity to use.
:param identity: A pre-specified identity (either the 40-digit
hex checksum, or the string name of the identity). If provided, it
will be validated against the list of available identities to
confirm that it is a valid codesigning identity.
:returns: The final identity to use
"""
# Obtain the valid codesigning identities.
identities = self.get_identities(self, 'codesigning')
if identity:
try:
# Try to look up the identity as a hex checksum
return identities[identity]
except KeyError:
# It's not a valid checksum; try to use it as a value.
if identity in identities.values():
return identity
raise BriefcaseCommandError(
"Invalid code signing identity {identity!r}".format(
identity=identity
)
)
if len(identities) == 0:
raise BriefcaseCommandError(
"No code signing identities are available."
)
elif len(identities) == 1:
identity = list(identities.items())[0][1]
else:
print()
print("Select code signing identity to use:")
print()
selection = select_option(identities, input=self.input)
identity = identities[selection]
print("selected", identity)
return identity
|
[
"def",
"select_identity",
"(",
"self",
",",
"identity",
"=",
"None",
")",
":",
"# Obtain the valid codesigning identities.",
"identities",
"=",
"self",
".",
"get_identities",
"(",
"self",
",",
"'codesigning'",
")",
"if",
"identity",
":",
"try",
":",
"# Try to look up the identity as a hex checksum",
"return",
"identities",
"[",
"identity",
"]",
"except",
"KeyError",
":",
"# It's not a valid checksum; try to use it as a value.",
"if",
"identity",
"in",
"identities",
".",
"values",
"(",
")",
":",
"return",
"identity",
"raise",
"BriefcaseCommandError",
"(",
"\"Invalid code signing identity {identity!r}\"",
".",
"format",
"(",
"identity",
"=",
"identity",
")",
")",
"if",
"len",
"(",
"identities",
")",
"==",
"0",
":",
"raise",
"BriefcaseCommandError",
"(",
"\"No code signing identities are available.\"",
")",
"elif",
"len",
"(",
"identities",
")",
"==",
"1",
":",
"identity",
"=",
"list",
"(",
"identities",
".",
"items",
"(",
")",
")",
"[",
"0",
"]",
"[",
"1",
"]",
"else",
":",
"print",
"(",
")",
"print",
"(",
"\"Select code signing identity to use:\"",
")",
"print",
"(",
")",
"selection",
"=",
"select_option",
"(",
"identities",
",",
"input",
"=",
"self",
".",
"input",
")",
"identity",
"=",
"identities",
"[",
"selection",
"]",
"print",
"(",
"\"selected\"",
",",
"identity",
")",
"return",
"identity"
] |
https://github.com/beeware/briefcase/blob/4cbb0ba9ad5ffac4323ab6e1a65d22fde6eb67ec/src/briefcase/platforms/macOS/__init__.py#L123-L165
|
|
Rockhopper-Technologies/enlighten
|
e98ea52b4aebd9a8f458c98a54453c385f4867dc
|
examples/multicolored.py
|
python
|
Node._state
|
(self, variable, num)
|
return False
|
Generic method to randomly determine if state is reached
|
Generic method to randomly determine if state is reached
|
[
"Generic",
"method",
"to",
"randomly",
"determine",
"if",
"state",
"is",
"reached"
] |
def _state(self, variable, num):
"""
Generic method to randomly determine if state is reached
"""
value = getattr(self, variable)
if value is None:
return False
if value is True:
return True
if random.randint(1, num) == num:
setattr(self, variable, True)
return True
return False
|
[
"def",
"_state",
"(",
"self",
",",
"variable",
",",
"num",
")",
":",
"value",
"=",
"getattr",
"(",
"self",
",",
"variable",
")",
"if",
"value",
"is",
"None",
":",
"return",
"False",
"if",
"value",
"is",
"True",
":",
"return",
"True",
"if",
"random",
".",
"randint",
"(",
"1",
",",
"num",
")",
"==",
"num",
":",
"setattr",
"(",
"self",
",",
"variable",
",",
"True",
")",
"return",
"True",
"return",
"False"
] |
https://github.com/Rockhopper-Technologies/enlighten/blob/e98ea52b4aebd9a8f458c98a54453c385f4867dc/examples/multicolored.py#L64-L81
|
|
openfisca/openfisca-france
|
207a58191be6830716693f94d37846f1e5037b51
|
openfisca_france/model/prelevements_obligatoires/impot_revenu/ir.py
|
python
|
rag.formula
|
(individu, period, parameters)
|
return (
frag_exon + frag_impo + mrag_exon + mrag_impo
+ arag_exon + arag_impg - arag_defi
+ nrag_exon + nrag_impg - nrag_defi
+ nrag_ajag
)
|
Revenus agricoles
|
Revenus agricoles
|
[
"Revenus",
"agricoles"
] |
def formula(individu, period, parameters):
'''
Revenus agricoles
'''
frag_exon = individu('frag_exon', period)
frag_impo = individu('frag_impo', period)
mrag_exon = individu('mrag_exon', period)
mrag_impo = individu('mrag_impo', period)
arag_exon = individu('arag_exon', period)
arag_impg = individu('arag_impg', period)
arag_defi = individu('arag_defi', period)
nrag_exon = individu('nrag_exon', period)
nrag_impg = individu('nrag_impg', period)
nrag_defi = individu('nrag_defi', period)
nrag_ajag = individu('nrag_ajag', period)
return (
frag_exon + frag_impo + mrag_exon + mrag_impo
+ arag_exon + arag_impg - arag_defi
+ nrag_exon + nrag_impg - nrag_defi
+ nrag_ajag
)
|
[
"def",
"formula",
"(",
"individu",
",",
"period",
",",
"parameters",
")",
":",
"frag_exon",
"=",
"individu",
"(",
"'frag_exon'",
",",
"period",
")",
"frag_impo",
"=",
"individu",
"(",
"'frag_impo'",
",",
"period",
")",
"mrag_exon",
"=",
"individu",
"(",
"'mrag_exon'",
",",
"period",
")",
"mrag_impo",
"=",
"individu",
"(",
"'mrag_impo'",
",",
"period",
")",
"arag_exon",
"=",
"individu",
"(",
"'arag_exon'",
",",
"period",
")",
"arag_impg",
"=",
"individu",
"(",
"'arag_impg'",
",",
"period",
")",
"arag_defi",
"=",
"individu",
"(",
"'arag_defi'",
",",
"period",
")",
"nrag_exon",
"=",
"individu",
"(",
"'nrag_exon'",
",",
"period",
")",
"nrag_impg",
"=",
"individu",
"(",
"'nrag_impg'",
",",
"period",
")",
"nrag_defi",
"=",
"individu",
"(",
"'nrag_defi'",
",",
"period",
")",
"nrag_ajag",
"=",
"individu",
"(",
"'nrag_ajag'",
",",
"period",
")",
"return",
"(",
"frag_exon",
"+",
"frag_impo",
"+",
"mrag_exon",
"+",
"mrag_impo",
"+",
"arag_exon",
"+",
"arag_impg",
"-",
"arag_defi",
"+",
"nrag_exon",
"+",
"nrag_impg",
"-",
"nrag_defi",
"+",
"nrag_ajag",
")"
] |
https://github.com/openfisca/openfisca-france/blob/207a58191be6830716693f94d37846f1e5037b51/openfisca_france/model/prelevements_obligatoires/impot_revenu/ir.py#L2519-L2540
|
|
mozilla-services/autopush
|
87e273c4581af88478d9e2658aa51d8c82a6d630
|
autopush/websocket.py
|
python
|
PushServerProtocol._sendAutoPing
|
(self)
|
return WebSocketServerProtocol._sendAutoPing(self)
|
Override for sanity checking during auto-ping interval
|
Override for sanity checking during auto-ping interval
|
[
"Override",
"for",
"sanity",
"checking",
"during",
"auto",
"-",
"ping",
"interval"
] |
def _sendAutoPing(self):
"""Override for sanity checking during auto-ping interval"""
# Note: it's possible (but tracking information has yet to prove) that
# a websocket connection could persist longer than the message record
# expiration time (~30d), which might cause some problems. Most
# websocket connections time out far, far earlier than that, which
# resets the record expiration times.
if not self.ps.uaid:
# No uaid yet, drop the connection
self.sendClose()
elif self.factory.clients.get(self.ps.uaid) != self:
# UAID, but we're not in clients anymore for some reason
self.sendClose()
return WebSocketServerProtocol._sendAutoPing(self)
|
[
"def",
"_sendAutoPing",
"(",
"self",
")",
":",
"# Note: it's possible (but tracking information has yet to prove) that",
"# a websocket connection could persist longer than the message record",
"# expiration time (~30d), which might cause some problems. Most",
"# websocket connections time out far, far earlier than that, which",
"# resets the record expiration times.",
"if",
"not",
"self",
".",
"ps",
".",
"uaid",
":",
"# No uaid yet, drop the connection",
"self",
".",
"sendClose",
"(",
")",
"elif",
"self",
".",
"factory",
".",
"clients",
".",
"get",
"(",
"self",
".",
"ps",
".",
"uaid",
")",
"!=",
"self",
":",
"# UAID, but we're not in clients anymore for some reason",
"self",
".",
"sendClose",
"(",
")",
"return",
"WebSocketServerProtocol",
".",
"_sendAutoPing",
"(",
"self",
")"
] |
https://github.com/mozilla-services/autopush/blob/87e273c4581af88478d9e2658aa51d8c82a6d630/autopush/websocket.py#L422-L435
|
|
spectacles/CodeComplice
|
8ca8ee4236f72b58caa4209d2fbd5fa56bd31d62
|
libs/codeintel2/lang_javascript.py
|
python
|
JSObject.toElementTree
|
(self, cixelement)
|
return cixobject
|
[] |
def toElementTree(self, cixelement):
if not self.name:
log.debug("%s has no name, line: %d, ignoring it.",
self.cixname, self.line)
return None
if self.cixname == "function":
cixobject = createCixFunction(cixelement, self.name)
elif self.cixname in ("object", "variable"):
cixobject = createCixVariable(cixelement, self.name)
elif self.cixname in ("class"):
cixobject = createCixClass(cixelement, self.name)
# else:
# print "self.cixname: %r" %(self.cixname)
cixobject.attrib["line"] = str(self.line)
if self.lineend >= 0:
cixobject.attrib["lineend"] = str(self.lineend)
if ADD_PATH_CIX_INFO and self.path:
cixobject.attrib["path"] = self.path
jsdoc = self.jsdoc
if jsdoc:
# print "jsdoc: %r" % (jsdoc)
# the docstring
# docElem.text = self.doc
attributeDocs = []
if jsdoc.isDeprecated():
attributeDocs.append("DEPRECATED")
self.attributes.append("deprecated")
if jsdoc.isPrivate():
attributeDocs.append("PRIVATE")
if "private" not in self.attributes:
self.attributes.append("private")
if jsdoc.isStatic():
attributeDocs.append("STATIC")
if "__static__" not in self.attributes:
self.attributes.append("__static__")
if jsdoc.isConstant():
attributeDocs.append("CONSTANT")
if "constant" not in self.attributes:
self.attributes.append("constant")
if jsdoc.isConstructor():
attributeDocs.append("CONSTRUCTOR")
if "__ctor__" not in self.attributes:
self.attributes.append("__ctor__")
if jsdoc.is__local__():
attributeDocs.append("__LOCAL__")
if "__local__" not in self.attributes:
self.attributes.append("__local__")
if jsdoc.tags:
cixobject.attrib["tags"] = jsdoc.tags
if jsdoc.doc:
if attributeDocs:
setCixDoc(cixobject, "%s: %s" % (
" ".join(attributeDocs), jsdoc.doc))
else:
setCixDoc(cixobject, jsdoc.doc)
# Additional one-off attributes
if self.attributes:
cixobject.attrib["attributes"] = " ".join(self.attributes)
# Additional meta-data.
if self.metadata:
for key, value in list(self.metadata.items()):
cixobject.attrib[key] = value
# Add the type information, JSDoc overrides whatever the ciler found
if jsdoc and jsdoc.type:
# Convert the value into a standard name
addCixType(cixobject, standardizeJSType(jsdoc.type))
elif self.type:
assert isinstance(self.type, str), \
"self.type %r is not a str" % (self.type)
addCixType(cixobject, standardizeJSType(self.type))
if isinstance(self, JSFunction):
signature = "%s(" % (self.name)
# Add function arguments
if self.args:
signature += ", ".join(self.args)
# Add function arguments to tree
# Add signature - calltip
signature += ")"
cixobject.attrib["signature"] = signature
# Add return type for functions, JSDoc gets precedence
returnType = self.getReturnType()
if returnType:
addCixReturns(cixobject, returnType)
# Add a "this" member for class functions
if self._class:
createCixVariable(cixobject, "this", vartype=self._class.name)
elif self.parent and self.parent.cixname in ("object", "variable"):
createCixVariable(cixobject, "this", vartype=self.parent.name)
if self.cixname == "class":
for baseclass in self.classrefs:
addClassRef(cixobject, baseclass)
if self.jsdoc and self.jsdoc.baseclasses:
for baseclass in self.jsdoc.baseclasses:
if baseclass not in self.classrefs:
addClassRef(cixobject, baseclass)
# Note that arguments must be kept in the order they were defined.
variables = list(self.variables.values())
arguments = [x for x in variables if isinstance(x, JSArgument)]
variables = [x for x in variables if not isinstance(x, JSArgument)]
allValues = sorted(arguments, key=operator.attrgetter("pos", "name")) + \
sorted(list(self.functions.values()) + list(self.members.values()) +
list(self.classes.values()) + variables +
self.anonymous_functions,
key=operator.attrgetter("line", "name"))
# If this is a variable with child elements, yet has a citdl type of
# something that is not an "Object", don't bother to adding these child
# elements, as we will just go with what the citdl information holds.
# http://bugs.activestate.com/show_bug.cgi?id=78484
# Ideally the ciler should include this information and have the tree
# handler combine the completions from the citdl and also the child
# elements, but this is not yet possible.
if allValues and self.cixname == 'variable' and \
cixobject.get("citdl") and cixobject.get("citdl") not in ("Object", "require()"):
log.debug("Variable of type: %r contains %d child elements, "
"ignoring them.", cixobject.get("citdl"), len(allValues))
return None
# Sort and include contents
for v in allValues:
if not v.isHidden:
v.toElementTree(cixobject)
return cixobject
|
[
"def",
"toElementTree",
"(",
"self",
",",
"cixelement",
")",
":",
"if",
"not",
"self",
".",
"name",
":",
"log",
".",
"debug",
"(",
"\"%s has no name, line: %d, ignoring it.\"",
",",
"self",
".",
"cixname",
",",
"self",
".",
"line",
")",
"return",
"None",
"if",
"self",
".",
"cixname",
"==",
"\"function\"",
":",
"cixobject",
"=",
"createCixFunction",
"(",
"cixelement",
",",
"self",
".",
"name",
")",
"elif",
"self",
".",
"cixname",
"in",
"(",
"\"object\"",
",",
"\"variable\"",
")",
":",
"cixobject",
"=",
"createCixVariable",
"(",
"cixelement",
",",
"self",
".",
"name",
")",
"elif",
"self",
".",
"cixname",
"in",
"(",
"\"class\"",
")",
":",
"cixobject",
"=",
"createCixClass",
"(",
"cixelement",
",",
"self",
".",
"name",
")",
"# else:",
"# print \"self.cixname: %r\" %(self.cixname)",
"cixobject",
".",
"attrib",
"[",
"\"line\"",
"]",
"=",
"str",
"(",
"self",
".",
"line",
")",
"if",
"self",
".",
"lineend",
">=",
"0",
":",
"cixobject",
".",
"attrib",
"[",
"\"lineend\"",
"]",
"=",
"str",
"(",
"self",
".",
"lineend",
")",
"if",
"ADD_PATH_CIX_INFO",
"and",
"self",
".",
"path",
":",
"cixobject",
".",
"attrib",
"[",
"\"path\"",
"]",
"=",
"self",
".",
"path",
"jsdoc",
"=",
"self",
".",
"jsdoc",
"if",
"jsdoc",
":",
"# print \"jsdoc: %r\" % (jsdoc)",
"# the docstring",
"# docElem.text = self.doc",
"attributeDocs",
"=",
"[",
"]",
"if",
"jsdoc",
".",
"isDeprecated",
"(",
")",
":",
"attributeDocs",
".",
"append",
"(",
"\"DEPRECATED\"",
")",
"self",
".",
"attributes",
".",
"append",
"(",
"\"deprecated\"",
")",
"if",
"jsdoc",
".",
"isPrivate",
"(",
")",
":",
"attributeDocs",
".",
"append",
"(",
"\"PRIVATE\"",
")",
"if",
"\"private\"",
"not",
"in",
"self",
".",
"attributes",
":",
"self",
".",
"attributes",
".",
"append",
"(",
"\"private\"",
")",
"if",
"jsdoc",
".",
"isStatic",
"(",
")",
":",
"attributeDocs",
".",
"append",
"(",
"\"STATIC\"",
")",
"if",
"\"__static__\"",
"not",
"in",
"self",
".",
"attributes",
":",
"self",
".",
"attributes",
".",
"append",
"(",
"\"__static__\"",
")",
"if",
"jsdoc",
".",
"isConstant",
"(",
")",
":",
"attributeDocs",
".",
"append",
"(",
"\"CONSTANT\"",
")",
"if",
"\"constant\"",
"not",
"in",
"self",
".",
"attributes",
":",
"self",
".",
"attributes",
".",
"append",
"(",
"\"constant\"",
")",
"if",
"jsdoc",
".",
"isConstructor",
"(",
")",
":",
"attributeDocs",
".",
"append",
"(",
"\"CONSTRUCTOR\"",
")",
"if",
"\"__ctor__\"",
"not",
"in",
"self",
".",
"attributes",
":",
"self",
".",
"attributes",
".",
"append",
"(",
"\"__ctor__\"",
")",
"if",
"jsdoc",
".",
"is__local__",
"(",
")",
":",
"attributeDocs",
".",
"append",
"(",
"\"__LOCAL__\"",
")",
"if",
"\"__local__\"",
"not",
"in",
"self",
".",
"attributes",
":",
"self",
".",
"attributes",
".",
"append",
"(",
"\"__local__\"",
")",
"if",
"jsdoc",
".",
"tags",
":",
"cixobject",
".",
"attrib",
"[",
"\"tags\"",
"]",
"=",
"jsdoc",
".",
"tags",
"if",
"jsdoc",
".",
"doc",
":",
"if",
"attributeDocs",
":",
"setCixDoc",
"(",
"cixobject",
",",
"\"%s: %s\"",
"%",
"(",
"\" \"",
".",
"join",
"(",
"attributeDocs",
")",
",",
"jsdoc",
".",
"doc",
")",
")",
"else",
":",
"setCixDoc",
"(",
"cixobject",
",",
"jsdoc",
".",
"doc",
")",
"# Additional one-off attributes",
"if",
"self",
".",
"attributes",
":",
"cixobject",
".",
"attrib",
"[",
"\"attributes\"",
"]",
"=",
"\" \"",
".",
"join",
"(",
"self",
".",
"attributes",
")",
"# Additional meta-data.",
"if",
"self",
".",
"metadata",
":",
"for",
"key",
",",
"value",
"in",
"list",
"(",
"self",
".",
"metadata",
".",
"items",
"(",
")",
")",
":",
"cixobject",
".",
"attrib",
"[",
"key",
"]",
"=",
"value",
"# Add the type information, JSDoc overrides whatever the ciler found",
"if",
"jsdoc",
"and",
"jsdoc",
".",
"type",
":",
"# Convert the value into a standard name",
"addCixType",
"(",
"cixobject",
",",
"standardizeJSType",
"(",
"jsdoc",
".",
"type",
")",
")",
"elif",
"self",
".",
"type",
":",
"assert",
"isinstance",
"(",
"self",
".",
"type",
",",
"str",
")",
",",
"\"self.type %r is not a str\"",
"%",
"(",
"self",
".",
"type",
")",
"addCixType",
"(",
"cixobject",
",",
"standardizeJSType",
"(",
"self",
".",
"type",
")",
")",
"if",
"isinstance",
"(",
"self",
",",
"JSFunction",
")",
":",
"signature",
"=",
"\"%s(\"",
"%",
"(",
"self",
".",
"name",
")",
"# Add function arguments",
"if",
"self",
".",
"args",
":",
"signature",
"+=",
"\", \"",
".",
"join",
"(",
"self",
".",
"args",
")",
"# Add function arguments to tree",
"# Add signature - calltip",
"signature",
"+=",
"\")\"",
"cixobject",
".",
"attrib",
"[",
"\"signature\"",
"]",
"=",
"signature",
"# Add return type for functions, JSDoc gets precedence",
"returnType",
"=",
"self",
".",
"getReturnType",
"(",
")",
"if",
"returnType",
":",
"addCixReturns",
"(",
"cixobject",
",",
"returnType",
")",
"# Add a \"this\" member for class functions",
"if",
"self",
".",
"_class",
":",
"createCixVariable",
"(",
"cixobject",
",",
"\"this\"",
",",
"vartype",
"=",
"self",
".",
"_class",
".",
"name",
")",
"elif",
"self",
".",
"parent",
"and",
"self",
".",
"parent",
".",
"cixname",
"in",
"(",
"\"object\"",
",",
"\"variable\"",
")",
":",
"createCixVariable",
"(",
"cixobject",
",",
"\"this\"",
",",
"vartype",
"=",
"self",
".",
"parent",
".",
"name",
")",
"if",
"self",
".",
"cixname",
"==",
"\"class\"",
":",
"for",
"baseclass",
"in",
"self",
".",
"classrefs",
":",
"addClassRef",
"(",
"cixobject",
",",
"baseclass",
")",
"if",
"self",
".",
"jsdoc",
"and",
"self",
".",
"jsdoc",
".",
"baseclasses",
":",
"for",
"baseclass",
"in",
"self",
".",
"jsdoc",
".",
"baseclasses",
":",
"if",
"baseclass",
"not",
"in",
"self",
".",
"classrefs",
":",
"addClassRef",
"(",
"cixobject",
",",
"baseclass",
")",
"# Note that arguments must be kept in the order they were defined.",
"variables",
"=",
"list",
"(",
"self",
".",
"variables",
".",
"values",
"(",
")",
")",
"arguments",
"=",
"[",
"x",
"for",
"x",
"in",
"variables",
"if",
"isinstance",
"(",
"x",
",",
"JSArgument",
")",
"]",
"variables",
"=",
"[",
"x",
"for",
"x",
"in",
"variables",
"if",
"not",
"isinstance",
"(",
"x",
",",
"JSArgument",
")",
"]",
"allValues",
"=",
"sorted",
"(",
"arguments",
",",
"key",
"=",
"operator",
".",
"attrgetter",
"(",
"\"pos\"",
",",
"\"name\"",
")",
")",
"+",
"sorted",
"(",
"list",
"(",
"self",
".",
"functions",
".",
"values",
"(",
")",
")",
"+",
"list",
"(",
"self",
".",
"members",
".",
"values",
"(",
")",
")",
"+",
"list",
"(",
"self",
".",
"classes",
".",
"values",
"(",
")",
")",
"+",
"variables",
"+",
"self",
".",
"anonymous_functions",
",",
"key",
"=",
"operator",
".",
"attrgetter",
"(",
"\"line\"",
",",
"\"name\"",
")",
")",
"# If this is a variable with child elements, yet has a citdl type of",
"# something that is not an \"Object\", don't bother to adding these child",
"# elements, as we will just go with what the citdl information holds.",
"# http://bugs.activestate.com/show_bug.cgi?id=78484",
"# Ideally the ciler should include this information and have the tree",
"# handler combine the completions from the citdl and also the child",
"# elements, but this is not yet possible.",
"if",
"allValues",
"and",
"self",
".",
"cixname",
"==",
"'variable'",
"and",
"cixobject",
".",
"get",
"(",
"\"citdl\"",
")",
"and",
"cixobject",
".",
"get",
"(",
"\"citdl\"",
")",
"not",
"in",
"(",
"\"Object\"",
",",
"\"require()\"",
")",
":",
"log",
".",
"debug",
"(",
"\"Variable of type: %r contains %d child elements, \"",
"\"ignoring them.\"",
",",
"cixobject",
".",
"get",
"(",
"\"citdl\"",
")",
",",
"len",
"(",
"allValues",
")",
")",
"return",
"None",
"# Sort and include contents",
"for",
"v",
"in",
"allValues",
":",
"if",
"not",
"v",
".",
"isHidden",
":",
"v",
".",
"toElementTree",
"(",
"cixobject",
")",
"return",
"cixobject"
] |
https://github.com/spectacles/CodeComplice/blob/8ca8ee4236f72b58caa4209d2fbd5fa56bd31d62/libs/codeintel2/lang_javascript.py#L1297-L1429
|
|||
CvvT/dumpDex
|
92ab3b7e996194a06bf1dd5538a4954e8a5ee9c1
|
python/idaapi.py
|
python
|
strvec_t.erase
|
(self, *args)
|
return _idaapi.strvec_t_erase(self, *args)
|
erase(self, it) -> simpleline_t
erase(self, first, last) -> simpleline_t
|
erase(self, it) -> simpleline_t
erase(self, first, last) -> simpleline_t
|
[
"erase",
"(",
"self",
"it",
")",
"-",
">",
"simpleline_t",
"erase",
"(",
"self",
"first",
"last",
")",
"-",
">",
"simpleline_t"
] |
def erase(self, *args):
"""
erase(self, it) -> simpleline_t
erase(self, first, last) -> simpleline_t
"""
return _idaapi.strvec_t_erase(self, *args)
|
[
"def",
"erase",
"(",
"self",
",",
"*",
"args",
")",
":",
"return",
"_idaapi",
".",
"strvec_t_erase",
"(",
"self",
",",
"*",
"args",
")"
] |
https://github.com/CvvT/dumpDex/blob/92ab3b7e996194a06bf1dd5538a4954e8a5ee9c1/python/idaapi.py#L2127-L2132
|
|
mahmoudimus/nose-timer
|
472d8b514c4acc1824827ea193b686d75524a252
|
nosetimer/plugin.py
|
python
|
TimerPlugin._parse_filter
|
(value)
|
return value.split(',') if value is not None else None
|
Parse timer filters.
|
Parse timer filters.
|
[
"Parse",
"timer",
"filters",
"."
] |
def _parse_filter(value):
"""Parse timer filters."""
return value.split(',') if value is not None else None
|
[
"def",
"_parse_filter",
"(",
"value",
")",
":",
"return",
"value",
".",
"split",
"(",
"','",
")",
"if",
"value",
"is",
"not",
"None",
"else",
"None"
] |
https://github.com/mahmoudimus/nose-timer/blob/472d8b514c4acc1824827ea193b686d75524a252/nosetimer/plugin.py#L139-L141
|
|
wechatpy/wechatpy
|
5f693a7e90156786c2540ad3c941d12cdf6d88ef
|
wechatpy/work/client/api/appchat.py
|
python
|
WeChatAppChat._build_msg_content
|
(self, msgtype="text", **kwargs)
|
return data
|
构造消息内容
:param content: 消息内容,最长不超过2048个字节
:param msgtype: 消息类型,可以为text/image/voice/video/file/textcard/news/mpnews/markdown
:param kwargs: 具体消息类型的扩展参数
:return:
|
构造消息内容
|
[
"构造消息内容"
] |
def _build_msg_content(self, msgtype="text", **kwargs):
"""
构造消息内容
:param content: 消息内容,最长不超过2048个字节
:param msgtype: 消息类型,可以为text/image/voice/video/file/textcard/news/mpnews/markdown
:param kwargs: 具体消息类型的扩展参数
:return:
"""
data = {"msgtype": msgtype}
if msgtype == "text":
data[msgtype] = {"content": kwargs.get("content")}
elif msgtype == "image" or msgtype == "voice" or msgtype == "file":
data[msgtype] = {"media_id": kwargs.get("media_id")}
elif msgtype == "video":
data[msgtype] = {
"media_id": kwargs.get("media_id"),
"title": kwargs.get("title"),
"description": kwargs.get("description"),
}
elif msgtype == "textcard":
data[msgtype] = {
"title": kwargs.get("title"),
"description": kwargs.get("description"),
"url": kwargs.get("url"),
"btntxt": kwargs.get("btntxt"),
}
elif msgtype == "news":
# {
# "articles" :
# [
# {
# "title" : "中秋节礼品领取",
# "description" : "今年中秋节公司有豪礼相送",
# "url":"https://zhidao.baidu.com/question/2073647112026042748.html",
# "picurl":"http://res.mail.qq.com/node/ww/wwopenmng/images/independent/doc/test_pic_msg1.png"
# }
# ]
# }
data[msgtype] = kwargs
elif msgtype == "mpnews":
# {
# "articles":[
# {
# "title": "地球一小时",
# "thumb_media_id": "biz_get(image)",
# "author": "Author",
# "content_source_url": "https://work.weixin.qq.com",
# "content": "3月24日20:30-21:30 \n办公区将关闭照明一小时,请各部门同事相互转告",
# "digest": "3月24日20:30-21:30 \n办公区将关闭照明一小时"
# }
# ]
# }
data[msgtype] = kwargs
elif msgtype == "markdown":
# {
# "content": "您的会议室已经预定,稍后会同步到`邮箱`
# >**事项详情**
# >事 项:<font color=\"info\">开会</font>
# >组织者:@miglioguan
# >参与者:@miglioguan、@kunliu、@jamdeezhou、@kanexiong、@kisonwang
# >
# >会议室:<font color=\"info\">广州TIT 1楼 301</font>
# >日 期:<font color=\"warning\">2018年5月18日</font>
# >时 间:<font color=\"comment\">上午9:00-11:00</font>
# >
# >请准时参加会议。
# >
# >如需修改会议信息,请点击:[修改会议信息](https://work.weixin.qq.com)"
# }
data[msgtype] = kwargs
else:
raise TypeError(f"不能识别的msgtype: {msgtype}")
return data
|
[
"def",
"_build_msg_content",
"(",
"self",
",",
"msgtype",
"=",
"\"text\"",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"{",
"\"msgtype\"",
":",
"msgtype",
"}",
"if",
"msgtype",
"==",
"\"text\"",
":",
"data",
"[",
"msgtype",
"]",
"=",
"{",
"\"content\"",
":",
"kwargs",
".",
"get",
"(",
"\"content\"",
")",
"}",
"elif",
"msgtype",
"==",
"\"image\"",
"or",
"msgtype",
"==",
"\"voice\"",
"or",
"msgtype",
"==",
"\"file\"",
":",
"data",
"[",
"msgtype",
"]",
"=",
"{",
"\"media_id\"",
":",
"kwargs",
".",
"get",
"(",
"\"media_id\"",
")",
"}",
"elif",
"msgtype",
"==",
"\"video\"",
":",
"data",
"[",
"msgtype",
"]",
"=",
"{",
"\"media_id\"",
":",
"kwargs",
".",
"get",
"(",
"\"media_id\"",
")",
",",
"\"title\"",
":",
"kwargs",
".",
"get",
"(",
"\"title\"",
")",
",",
"\"description\"",
":",
"kwargs",
".",
"get",
"(",
"\"description\"",
")",
",",
"}",
"elif",
"msgtype",
"==",
"\"textcard\"",
":",
"data",
"[",
"msgtype",
"]",
"=",
"{",
"\"title\"",
":",
"kwargs",
".",
"get",
"(",
"\"title\"",
")",
",",
"\"description\"",
":",
"kwargs",
".",
"get",
"(",
"\"description\"",
")",
",",
"\"url\"",
":",
"kwargs",
".",
"get",
"(",
"\"url\"",
")",
",",
"\"btntxt\"",
":",
"kwargs",
".",
"get",
"(",
"\"btntxt\"",
")",
",",
"}",
"elif",
"msgtype",
"==",
"\"news\"",
":",
"# {",
"# \"articles\" :",
"# [",
"# {",
"# \"title\" : \"中秋节礼品领取\",",
"# \"description\" : \"今年中秋节公司有豪礼相送\",",
"# \"url\":\"https://zhidao.baidu.com/question/2073647112026042748.html\",",
"# \"picurl\":\"http://res.mail.qq.com/node/ww/wwopenmng/images/independent/doc/test_pic_msg1.png\"",
"# }",
"# ]",
"# }",
"data",
"[",
"msgtype",
"]",
"=",
"kwargs",
"elif",
"msgtype",
"==",
"\"mpnews\"",
":",
"# {",
"# \"articles\":[",
"# {",
"# \"title\": \"地球一小时\",",
"# \"thumb_media_id\": \"biz_get(image)\",",
"# \"author\": \"Author\",",
"# \"content_source_url\": \"https://work.weixin.qq.com\",",
"# \"content\": \"3月24日20:30-21:30 \\n办公区将关闭照明一小时,请各部门同事相互转告\",",
"# \"digest\": \"3月24日20:30-21:30 \\n办公区将关闭照明一小时\"",
"# }",
"# ]",
"# }",
"data",
"[",
"msgtype",
"]",
"=",
"kwargs",
"elif",
"msgtype",
"==",
"\"markdown\"",
":",
"# {",
"# \"content\": \"您的会议室已经预定,稍后会同步到`邮箱`",
"# >**事项详情**",
"# >事 项:<font color=\\\"info\\\">开会</font>",
"# >组织者:@miglioguan",
"# >参与者:@miglioguan、@kunliu、@jamdeezhou、@kanexiong、@kisonwang",
"# >",
"# >会议室:<font color=\\\"info\\\">广州TIT 1楼 301</font>",
"# >日 期:<font color=\\\"warning\\\">2018年5月18日</font>",
"# >时 间:<font color=\\\"comment\\\">上午9:00-11:00</font>",
"# >",
"# >请准时参加会议。",
"# >",
"# >如需修改会议信息,请点击:[修改会议信息](https://work.weixin.qq.com)\"",
"# }",
"data",
"[",
"msgtype",
"]",
"=",
"kwargs",
"else",
":",
"raise",
"TypeError",
"(",
"f\"不能识别的msgtype: {msgtype}\")",
"",
"return",
"data"
] |
https://github.com/wechatpy/wechatpy/blob/5f693a7e90156786c2540ad3c941d12cdf6d88ef/wechatpy/work/client/api/appchat.py#L104-L177
|
|
minio/minio-py
|
b3ba3bf99fe6b9ff2b28855550d6ab5345c134e3
|
minio/datatypes.py
|
python
|
Object.owner_name
|
(self)
|
return self._owner_name
|
Get owner name.
|
Get owner name.
|
[
"Get",
"owner",
"name",
"."
] |
def owner_name(self):
"""Get owner name."""
return self._owner_name
|
[
"def",
"owner_name",
"(",
"self",
")",
":",
"return",
"self",
".",
"_owner_name"
] |
https://github.com/minio/minio-py/blob/b3ba3bf99fe6b9ff2b28855550d6ab5345c134e3/minio/datatypes.py#L184-L186
|
|
haiwen/seafile-docker
|
2d2461d4c8cab3458ec9832611c419d47506c300
|
cluster/scripts/setup-seafile-mysql.py
|
python
|
NewDBConfigurator.ask_db_name
|
(self, program, default)
|
return Utils.ask_question(question,
key=key,
default=default,
validate=self.validate_db_name)
|
[] |
def ask_db_name(self, program, default):
question = 'Enter the database name for %s:' % program
key = '%s database' % program
return Utils.ask_question(question,
key=key,
default=default,
validate=self.validate_db_name)
|
[
"def",
"ask_db_name",
"(",
"self",
",",
"program",
",",
"default",
")",
":",
"question",
"=",
"'Enter the database name for %s:'",
"%",
"program",
"key",
"=",
"'%s database'",
"%",
"program",
"return",
"Utils",
".",
"ask_question",
"(",
"question",
",",
"key",
"=",
"key",
",",
"default",
"=",
"default",
",",
"validate",
"=",
"self",
".",
"validate_db_name",
")"
] |
https://github.com/haiwen/seafile-docker/blob/2d2461d4c8cab3458ec9832611c419d47506c300/cluster/scripts/setup-seafile-mysql.py#L633-L639
|
|||
tensorflow/quantum
|
864f9ce0774d7e58fb3b4c7d0b12d810042a2dbd
|
tensorflow_quantum/python/differentiators/differentiator.py
|
python
|
Differentiator.get_gradient_circuits
|
(self, programs, symbol_names, symbol_values)
|
Return circuits to compute gradients for given forward pass circuits.
Prepares (but does not execute) all intermediate circuits needed to
calculate the gradients for the given forward pass circuits specified by
`programs`, `symbol_names`, and `symbol_values`. The returned
`tf.Tensor` objects give all necessary information to recreate the
internal logic of the differentiator.
This base class defines the standard way to use the outputs of this
function to obtain either analytic gradients or sample gradients.
Below is code that is copied directly from the `differentiate_analytic`
default implementation, which is then compared to how one could
automatically get this gradient. The point is that the derivatives of
some functions cannot be calculated via the available auto-diff (such
as when the function is not expressible efficiently as a PauliSum),
and then one would need to use `get_gradient_circuits` the manual way.
Suppose we have some inputs `programs`, `symbol_names`, and
`symbol_values`. To get the derivative of the expectation values of a
tensor of PauliSums `pauli_sums` with respect to these inputs, do:
>>> diff = <some differentiator>()
>>> (
... batch_programs, new_symbol_names, batch_symbol_values,
... batch_weights, batch_mapper
... ) = diff.get_gradient_circuits(
... programs, symbol_names, symbol_values)
>>> exp_layer = tfq.layers.Expectation()
>>> batch_pauli_sums = tf.tile(
... tf.expand_dims(pauli_sums, 1),
... [1, tf.shape(batch_programs)[1], 1])
>>> n_batch_programs = tf.reduce_prod(tf.shape(batch_programs))
>>> n_symbols = tf.shape(new_symbol_names)[0]
>>> n_ops = tf.shape(pauli_sums)[1]
>>> batch_expectations = tfq.layers.Expectation()(
... tf.reshape(batch_programs, [n_batch_programs]),
... symbol_names=new_symbol_names,
... symbol_values=tf.reshape(
... batch_symbol_values, [n_batch_programs, n_symbols]),
... operators=tf.reshape(
... batch_pauli_sums, [n_batch_programs, n_ops]))
>>> batch_expectations = tf.reshape(
... batch_expectations, tf.shape(batch_pauli_sums))
>>> batch_jacobian = tf.map_fn(
... lambda x: tf.einsum('km,kmp->kp', x[0], tf.gather(x[1], x[2])),
... (batch_weights, batch_expectations, batch_mapper),
... fn_output_signature=tf.float32)
>>> grad_manual = tf.reduce_sum(batch_jacobian, -1)
To perform the same gradient calculation automatically:
>>> with tf.GradientTape() as g:
>>> g.watch(symbol_values)
>>> exact_outputs = tfq.layers.Expectation()(
... programs, symbol_names=symbol_names,
... symbol_values=symbol_values, operators=pauli_sums)
>>> grad_auto = g.gradient(exact_outputs, symbol_values)
>>> tf.math.reduce_all(grad_manual == grad_auto).numpy()
True
NOTE: this feature is intended for advanced users who need more
flexibility than the standard workflow allows.
Args:
programs: `tf.Tensor` of strings with shape [batch_size] containing
the string representations of the circuits to be executed during
the forward pass.
symbol_names: `tf.Tensor` of strings with shape [n_params], which is
used to specify the order in which the values in `symbol_values`
should be placed inside of the circuits in `programs`.
symbol_values: `tf.Tensor` of real numbers with shape
[batch_size, n_params] specifying parameter values to resolve
into the circuits specified by programs during the forward pass,
following the ordering dictated by `symbol_names`.
Returns:
batch_programs: 2-D `tf.Tensor` of strings representing circuits to
run to evaluate the gradients. The first dimension is the length
of the input `programs`. At each index `i` in the first
dimension is the tensor of circuits required to evaluate the
gradient of the input circuit `programs[i]`. The size of the
second dimension is determined by the inheriting differentiator.
new_symbol_names: `tf.Tensor` of strings, containing the name of
every symbol used in every circuit in `batch_programs`. The
length is determined by the inheriting differentiator.
batch_symbol_values: 3-D `tf.Tensor` of DType `tf.float32`
containing values to fill in to every parameter in every
circuit. The first two dimensions are the same shape as
`batch_programs`; the last dimension is the length of
`new_symbol_names`. Thus, at each index `i` in the first
dimension is the 2-D tensor of parameter values to fill in to
`batch_programs[i]`.
batch_weights: 3-D `tf.Tensor` of DType `tf.float32` which defines
how much weight to give to each program when computing the
derivatives. First dimension is the length of the input
`programs`, second dimension is the length of the input
`symbol_names`, and the third dimension is determined by the
inheriting differentiator.
batch_mapper: 3-D `tf.Tensor` of DType `tf.int32` which defines
how to map expectation values of the circuits generated by this
differentiator to the derivatives of the original circuits.
It says which indices of the returned programs are relevant for
the derivative of each symbol, for use by `tf.gather`.
The first dimension is the length of the input `programs`, the
second dimension is the length of the input `symbol_names`,
and the third dimension is the length of the last dimension of
the output `batch_weights`.
|
Return circuits to compute gradients for given forward pass circuits.
|
[
"Return",
"circuits",
"to",
"compute",
"gradients",
"for",
"given",
"forward",
"pass",
"circuits",
"."
] |
def get_gradient_circuits(self, programs, symbol_names, symbol_values):
"""Return circuits to compute gradients for given forward pass circuits.
Prepares (but does not execute) all intermediate circuits needed to
calculate the gradients for the given forward pass circuits specified by
`programs`, `symbol_names`, and `symbol_values`. The returned
`tf.Tensor` objects give all necessary information to recreate the
internal logic of the differentiator.
This base class defines the standard way to use the outputs of this
function to obtain either analytic gradients or sample gradients.
Below is code that is copied directly from the `differentiate_analytic`
default implementation, which is then compared to how one could
automatically get this gradient. The point is that the derivatives of
some functions cannot be calculated via the available auto-diff (such
as when the function is not expressible efficiently as a PauliSum),
and then one would need to use `get_gradient_circuits` the manual way.
Suppose we have some inputs `programs`, `symbol_names`, and
`symbol_values`. To get the derivative of the expectation values of a
tensor of PauliSums `pauli_sums` with respect to these inputs, do:
>>> diff = <some differentiator>()
>>> (
... batch_programs, new_symbol_names, batch_symbol_values,
... batch_weights, batch_mapper
... ) = diff.get_gradient_circuits(
... programs, symbol_names, symbol_values)
>>> exp_layer = tfq.layers.Expectation()
>>> batch_pauli_sums = tf.tile(
... tf.expand_dims(pauli_sums, 1),
... [1, tf.shape(batch_programs)[1], 1])
>>> n_batch_programs = tf.reduce_prod(tf.shape(batch_programs))
>>> n_symbols = tf.shape(new_symbol_names)[0]
>>> n_ops = tf.shape(pauli_sums)[1]
>>> batch_expectations = tfq.layers.Expectation()(
... tf.reshape(batch_programs, [n_batch_programs]),
... symbol_names=new_symbol_names,
... symbol_values=tf.reshape(
... batch_symbol_values, [n_batch_programs, n_symbols]),
... operators=tf.reshape(
... batch_pauli_sums, [n_batch_programs, n_ops]))
>>> batch_expectations = tf.reshape(
... batch_expectations, tf.shape(batch_pauli_sums))
>>> batch_jacobian = tf.map_fn(
... lambda x: tf.einsum('km,kmp->kp', x[0], tf.gather(x[1], x[2])),
... (batch_weights, batch_expectations, batch_mapper),
... fn_output_signature=tf.float32)
>>> grad_manual = tf.reduce_sum(batch_jacobian, -1)
To perform the same gradient calculation automatically:
>>> with tf.GradientTape() as g:
>>> g.watch(symbol_values)
>>> exact_outputs = tfq.layers.Expectation()(
... programs, symbol_names=symbol_names,
... symbol_values=symbol_values, operators=pauli_sums)
>>> grad_auto = g.gradient(exact_outputs, symbol_values)
>>> tf.math.reduce_all(grad_manual == grad_auto).numpy()
True
NOTE: this feature is intended for advanced users who need more
flexibility than the standard workflow allows.
Args:
programs: `tf.Tensor` of strings with shape [batch_size] containing
the string representations of the circuits to be executed during
the forward pass.
symbol_names: `tf.Tensor` of strings with shape [n_params], which is
used to specify the order in which the values in `symbol_values`
should be placed inside of the circuits in `programs`.
symbol_values: `tf.Tensor` of real numbers with shape
[batch_size, n_params] specifying parameter values to resolve
into the circuits specified by programs during the forward pass,
following the ordering dictated by `symbol_names`.
Returns:
batch_programs: 2-D `tf.Tensor` of strings representing circuits to
run to evaluate the gradients. The first dimension is the length
of the input `programs`. At each index `i` in the first
dimension is the tensor of circuits required to evaluate the
gradient of the input circuit `programs[i]`. The size of the
second dimension is determined by the inheriting differentiator.
new_symbol_names: `tf.Tensor` of strings, containing the name of
every symbol used in every circuit in `batch_programs`. The
length is determined by the inheriting differentiator.
batch_symbol_values: 3-D `tf.Tensor` of DType `tf.float32`
containing values to fill in to every parameter in every
circuit. The first two dimensions are the same shape as
`batch_programs`; the last dimension is the length of
`new_symbol_names`. Thus, at each index `i` in the first
dimension is the 2-D tensor of parameter values to fill in to
`batch_programs[i]`.
batch_weights: 3-D `tf.Tensor` of DType `tf.float32` which defines
how much weight to give to each program when computing the
derivatives. First dimension is the length of the input
`programs`, second dimension is the length of the input
`symbol_names`, and the third dimension is determined by the
inheriting differentiator.
batch_mapper: 3-D `tf.Tensor` of DType `tf.int32` which defines
how to map expectation values of the circuits generated by this
differentiator to the derivatives of the original circuits.
It says which indices of the returned programs are relevant for
the derivative of each symbol, for use by `tf.gather`.
The first dimension is the length of the input `programs`, the
second dimension is the length of the input `symbol_names`,
and the third dimension is the length of the last dimension of
the output `batch_weights`.
"""
|
[
"def",
"get_gradient_circuits",
"(",
"self",
",",
"programs",
",",
"symbol_names",
",",
"symbol_values",
")",
":"
] |
https://github.com/tensorflow/quantum/blob/864f9ce0774d7e58fb3b4c7d0b12d810042a2dbd/tensorflow_quantum/python/differentiators/differentiator.py#L212-L325
|
||
IJDykeman/wangTiles
|
7c1ee2095ebdf7f72bce07d94c6484915d5cae8b
|
experimental_code/tiles_3d/venv_mac_py3/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.py
|
python
|
Wheel.is_mountable
|
(self)
|
return True
|
Determine if a wheel is asserted as mountable by its metadata.
|
Determine if a wheel is asserted as mountable by its metadata.
|
[
"Determine",
"if",
"a",
"wheel",
"is",
"asserted",
"as",
"mountable",
"by",
"its",
"metadata",
"."
] |
def is_mountable(self):
"""
Determine if a wheel is asserted as mountable by its metadata.
"""
return True
|
[
"def",
"is_mountable",
"(",
"self",
")",
":",
"return",
"True"
] |
https://github.com/IJDykeman/wangTiles/blob/7c1ee2095ebdf7f72bce07d94c6484915d5cae8b/experimental_code/tiles_3d/venv_mac_py3/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.py#L713-L717
|
|
sqlalchemy/alembic
|
85152025ddba1dbeb51b467f40eb36b795d2ca37
|
alembic/autogenerate/api.py
|
python
|
AutogenContext.sorted_tables
|
(self)
|
return result
|
Return an aggregate of the :attr:`.MetaData.sorted_tables` collection(s).
For a sequence of :class:`.MetaData` objects, this
concatenates the :attr:`.MetaData.sorted_tables` collection
for each individual :class:`.MetaData` in the order of the
sequence. It does **not** collate the sorted tables collections.
|
Return an aggregate of the :attr:`.MetaData.sorted_tables` collection(s).
|
[
"Return",
"an",
"aggregate",
"of",
"the",
":",
"attr",
":",
".",
"MetaData",
".",
"sorted_tables",
"collection",
"(",
"s",
")",
"."
] |
def sorted_tables(self):
"""Return an aggregate of the :attr:`.MetaData.sorted_tables` collection(s).
For a sequence of :class:`.MetaData` objects, this
concatenates the :attr:`.MetaData.sorted_tables` collection
for each individual :class:`.MetaData` in the order of the
sequence. It does **not** collate the sorted tables collections.
"""
result = []
for m in util.to_list(self.metadata):
result.extend(m.sorted_tables)
return result
|
[
"def",
"sorted_tables",
"(",
"self",
")",
":",
"result",
"=",
"[",
"]",
"for",
"m",
"in",
"util",
".",
"to_list",
"(",
"self",
".",
"metadata",
")",
":",
"result",
".",
"extend",
"(",
"m",
".",
"sorted_tables",
")",
"return",
"result"
] |
https://github.com/sqlalchemy/alembic/blob/85152025ddba1dbeb51b467f40eb36b795d2ca37/alembic/autogenerate/api.py#L431-L443
|
|
cogitas3d/OrtogOnBlender
|
881e93f5beb2263e44c270974dd0e81deca44762
|
CompareTools.py
|
python
|
B_point_soft_real_pt.poll
|
(cls, context)
|
[] |
def poll(cls, context):
found = 'B point soft real' in bpy.data.objects
if found == False:
return True
else:
if found == True:
return False
|
[
"def",
"poll",
"(",
"cls",
",",
"context",
")",
":",
"found",
"=",
"'B point soft real'",
"in",
"bpy",
".",
"data",
".",
"objects",
"if",
"found",
"==",
"False",
":",
"return",
"True",
"else",
":",
"if",
"found",
"==",
"True",
":",
"return",
"False"
] |
https://github.com/cogitas3d/OrtogOnBlender/blob/881e93f5beb2263e44c270974dd0e81deca44762/CompareTools.py#L792-L800
|
||||
kubernetes-client/python
|
47b9da9de2d02b2b7a34fbe05afb44afd130d73a
|
kubernetes/client/models/v1_subject_rules_review_status.py
|
python
|
V1SubjectRulesReviewStatus.__eq__
|
(self, other)
|
return self.to_dict() == other.to_dict()
|
Returns true if both objects are equal
|
Returns true if both objects are equal
|
[
"Returns",
"true",
"if",
"both",
"objects",
"are",
"equal"
] |
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1SubjectRulesReviewStatus):
return False
return self.to_dict() == other.to_dict()
|
[
"def",
"__eq__",
"(",
"self",
",",
"other",
")",
":",
"if",
"not",
"isinstance",
"(",
"other",
",",
"V1SubjectRulesReviewStatus",
")",
":",
"return",
"False",
"return",
"self",
".",
"to_dict",
"(",
")",
"==",
"other",
".",
"to_dict",
"(",
")"
] |
https://github.com/kubernetes-client/python/blob/47b9da9de2d02b2b7a34fbe05afb44afd130d73a/kubernetes/client/models/v1_subject_rules_review_status.py#L197-L202
|
|
chribsen/simple-machine-learning-examples
|
dc94e52a4cebdc8bb959ff88b81ff8cfeca25022
|
venv/lib/python2.7/site-packages/pandas/stats/ols.py
|
python
|
MovingOLS._df_raw
|
(self)
|
return self._rank_raw
|
Returns the degrees of freedom.
|
Returns the degrees of freedom.
|
[
"Returns",
"the",
"degrees",
"of",
"freedom",
"."
] |
def _df_raw(self):
"""Returns the degrees of freedom."""
return self._rank_raw
|
[
"def",
"_df_raw",
"(",
"self",
")",
":",
"return",
"self",
".",
"_rank_raw"
] |
https://github.com/chribsen/simple-machine-learning-examples/blob/dc94e52a4cebdc8bb959ff88b81ff8cfeca25022/venv/lib/python2.7/site-packages/pandas/stats/ols.py#L928-L930
|
|
unias/docklet
|
70c089a6a5bb186dc3f898127af84d79b4dfab2d
|
src/master/userManager.py
|
python
|
userManager.__init__
|
(self, username = 'root', password = None)
|
Try to create the database when there is none
initialize 'root' user and 'root' & 'primary' group
|
Try to create the database when there is none
initialize 'root' user and 'root' & 'primary' group
|
[
"Try",
"to",
"create",
"the",
"database",
"when",
"there",
"is",
"none",
"initialize",
"root",
"user",
"and",
"root",
"&",
"primary",
"group"
] |
def __init__(self, username = 'root', password = None):
'''
Try to create the database when there is none
initialize 'root' user and 'root' & 'primary' group
'''
try:
User.query.all()
except:
db.create_all()
if password == None:
#set a random password
password = os.urandom(16)
password = b64encode(password).decode('utf-8')
fsdir = env.getenv('FS_PREFIX')
f = open(fsdir + '/local/generated_password.txt', 'w')
f.write("User=%s\nPass=%s\n"%(username, password))
f.close()
sys_admin = User(username, hashlib.sha512(password.encode('utf-8')).hexdigest())
sys_admin.status = 'normal'
sys_admin.nickname = 'root'
sys_admin.description = 'Root_User'
sys_admin.user_group = 'root'
sys_admin.auth_method = 'local'
db.session.add(sys_admin)
path = env.getenv('DOCKLET_LIB')
subprocess.call([path+"/master/userinit.sh", username])
db.session.commit()
if not os.path.exists(fspath+"/global/sys/quota"):
groupfile = open(fspath+"/global/sys/quota",'w')
groups = []
groups.append({'name':'root', 'quotas':{ 'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8', 'portmapping': '8', 'input_rate_limit':'10000', 'output_rate_limit':'10000'}})
groups.append({'name':'admin', 'quotas':{'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8', 'portmapping': '8', 'input_rate_limit':'10000', 'output_rate_limit':'10000'}})
groups.append({'name':'primary', 'quotas':{'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8', 'portmapping': '8', 'input_rate_limit':'10000', 'output_rate_limit':'10000'}})
groups.append({'name':'foundation', 'quotas':{'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8', 'portmapping': '8', 'input_rate_limit':'10000', 'output_rate_limit':'10000'}})
groupfile.write(json.dumps(groups))
groupfile.close()
if not os.path.exists(fspath+"/global/sys/quotainfo"):
quotafile = open(fspath+"/global/sys/quotainfo",'w')
quotas = {}
quotas['default'] = 'foundation'
quotas['quotainfo'] = []
quotas['quotainfo'].append({'name':'cpu', 'hint':'the cpu quota, number of cores, e.g. 4'})
quotas['quotainfo'].append({'name':'memory', 'hint':'the memory quota, number of MB , e.g. 4000'})
quotas['quotainfo'].append({'name':'disk', 'hint':'the disk quota, number of MB, e.g. 4000'})
quotas['quotainfo'].append({'name':'data', 'hint':'the quota of data space, number of GB, e.g. 100'})
quotas['quotainfo'].append({'name':'image', 'hint':'how many images the user can save, e.g. 10'})
quotas['quotainfo'].append({'name':'idletime', 'hint':'will stop cluster after idletime, number of hours, e.g. 24'})
quotas['quotainfo'].append({'name':'vnode', 'hint':'how many containers the user can have, e.g. 8'})
quotas['quotainfo'].append({'name':'portmapping', 'hint':'how many ports the user can map, e.g. 8'})
quotas['quotainfo'].append({'name':'input_rate_limit', 'hint':'the ingress speed of the network, number of kbps. 0 means the rate are unlimited.'})
quotas['quotainfo'].append({'name':'output_rate_limit', 'hint':'the egress speed of the network, number of kbps. 0 means the rate are unlimited.'})
quotafile.write(json.dumps(quotas))
quotafile.close()
if not os.path.exists(fspath+"/global/sys/lxc.default"):
settingfile = open(fspath+"/global/sys/lxc.default", 'w')
settings = {}
settings['cpu'] = "2"
settings["memory"] = "2000"
settings["disk"] = "2000"
settingfile.write(json.dumps(settings))
settingfile.close()
try:
UserUsage.query.all()
LoginMsg.query.all()
LoginFailMsg.query.all()
except:
db.create_all()
|
[
"def",
"__init__",
"(",
"self",
",",
"username",
"=",
"'root'",
",",
"password",
"=",
"None",
")",
":",
"try",
":",
"User",
".",
"query",
".",
"all",
"(",
")",
"except",
":",
"db",
".",
"create_all",
"(",
")",
"if",
"password",
"==",
"None",
":",
"#set a random password",
"password",
"=",
"os",
".",
"urandom",
"(",
"16",
")",
"password",
"=",
"b64encode",
"(",
"password",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"fsdir",
"=",
"env",
".",
"getenv",
"(",
"'FS_PREFIX'",
")",
"f",
"=",
"open",
"(",
"fsdir",
"+",
"'/local/generated_password.txt'",
",",
"'w'",
")",
"f",
".",
"write",
"(",
"\"User=%s\\nPass=%s\\n\"",
"%",
"(",
"username",
",",
"password",
")",
")",
"f",
".",
"close",
"(",
")",
"sys_admin",
"=",
"User",
"(",
"username",
",",
"hashlib",
".",
"sha512",
"(",
"password",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")",
")",
"sys_admin",
".",
"status",
"=",
"'normal'",
"sys_admin",
".",
"nickname",
"=",
"'root'",
"sys_admin",
".",
"description",
"=",
"'Root_User'",
"sys_admin",
".",
"user_group",
"=",
"'root'",
"sys_admin",
".",
"auth_method",
"=",
"'local'",
"db",
".",
"session",
".",
"add",
"(",
"sys_admin",
")",
"path",
"=",
"env",
".",
"getenv",
"(",
"'DOCKLET_LIB'",
")",
"subprocess",
".",
"call",
"(",
"[",
"path",
"+",
"\"/master/userinit.sh\"",
",",
"username",
"]",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"fspath",
"+",
"\"/global/sys/quota\"",
")",
":",
"groupfile",
"=",
"open",
"(",
"fspath",
"+",
"\"/global/sys/quota\"",
",",
"'w'",
")",
"groups",
"=",
"[",
"]",
"groups",
".",
"append",
"(",
"{",
"'name'",
":",
"'root'",
",",
"'quotas'",
":",
"{",
"'cpu'",
":",
"'4'",
",",
"'disk'",
":",
"'2000'",
",",
"'data'",
":",
"'100'",
",",
"'memory'",
":",
"'2000'",
",",
"'image'",
":",
"'10'",
",",
"'idletime'",
":",
"'24'",
",",
"'vnode'",
":",
"'8'",
",",
"'portmapping'",
":",
"'8'",
",",
"'input_rate_limit'",
":",
"'10000'",
",",
"'output_rate_limit'",
":",
"'10000'",
"}",
"}",
")",
"groups",
".",
"append",
"(",
"{",
"'name'",
":",
"'admin'",
",",
"'quotas'",
":",
"{",
"'cpu'",
":",
"'4'",
",",
"'disk'",
":",
"'2000'",
",",
"'data'",
":",
"'100'",
",",
"'memory'",
":",
"'2000'",
",",
"'image'",
":",
"'10'",
",",
"'idletime'",
":",
"'24'",
",",
"'vnode'",
":",
"'8'",
",",
"'portmapping'",
":",
"'8'",
",",
"'input_rate_limit'",
":",
"'10000'",
",",
"'output_rate_limit'",
":",
"'10000'",
"}",
"}",
")",
"groups",
".",
"append",
"(",
"{",
"'name'",
":",
"'primary'",
",",
"'quotas'",
":",
"{",
"'cpu'",
":",
"'4'",
",",
"'disk'",
":",
"'2000'",
",",
"'data'",
":",
"'100'",
",",
"'memory'",
":",
"'2000'",
",",
"'image'",
":",
"'10'",
",",
"'idletime'",
":",
"'24'",
",",
"'vnode'",
":",
"'8'",
",",
"'portmapping'",
":",
"'8'",
",",
"'input_rate_limit'",
":",
"'10000'",
",",
"'output_rate_limit'",
":",
"'10000'",
"}",
"}",
")",
"groups",
".",
"append",
"(",
"{",
"'name'",
":",
"'foundation'",
",",
"'quotas'",
":",
"{",
"'cpu'",
":",
"'4'",
",",
"'disk'",
":",
"'2000'",
",",
"'data'",
":",
"'100'",
",",
"'memory'",
":",
"'2000'",
",",
"'image'",
":",
"'10'",
",",
"'idletime'",
":",
"'24'",
",",
"'vnode'",
":",
"'8'",
",",
"'portmapping'",
":",
"'8'",
",",
"'input_rate_limit'",
":",
"'10000'",
",",
"'output_rate_limit'",
":",
"'10000'",
"}",
"}",
")",
"groupfile",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"groups",
")",
")",
"groupfile",
".",
"close",
"(",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"fspath",
"+",
"\"/global/sys/quotainfo\"",
")",
":",
"quotafile",
"=",
"open",
"(",
"fspath",
"+",
"\"/global/sys/quotainfo\"",
",",
"'w'",
")",
"quotas",
"=",
"{",
"}",
"quotas",
"[",
"'default'",
"]",
"=",
"'foundation'",
"quotas",
"[",
"'quotainfo'",
"]",
"=",
"[",
"]",
"quotas",
"[",
"'quotainfo'",
"]",
".",
"append",
"(",
"{",
"'name'",
":",
"'cpu'",
",",
"'hint'",
":",
"'the cpu quota, number of cores, e.g. 4'",
"}",
")",
"quotas",
"[",
"'quotainfo'",
"]",
".",
"append",
"(",
"{",
"'name'",
":",
"'memory'",
",",
"'hint'",
":",
"'the memory quota, number of MB , e.g. 4000'",
"}",
")",
"quotas",
"[",
"'quotainfo'",
"]",
".",
"append",
"(",
"{",
"'name'",
":",
"'disk'",
",",
"'hint'",
":",
"'the disk quota, number of MB, e.g. 4000'",
"}",
")",
"quotas",
"[",
"'quotainfo'",
"]",
".",
"append",
"(",
"{",
"'name'",
":",
"'data'",
",",
"'hint'",
":",
"'the quota of data space, number of GB, e.g. 100'",
"}",
")",
"quotas",
"[",
"'quotainfo'",
"]",
".",
"append",
"(",
"{",
"'name'",
":",
"'image'",
",",
"'hint'",
":",
"'how many images the user can save, e.g. 10'",
"}",
")",
"quotas",
"[",
"'quotainfo'",
"]",
".",
"append",
"(",
"{",
"'name'",
":",
"'idletime'",
",",
"'hint'",
":",
"'will stop cluster after idletime, number of hours, e.g. 24'",
"}",
")",
"quotas",
"[",
"'quotainfo'",
"]",
".",
"append",
"(",
"{",
"'name'",
":",
"'vnode'",
",",
"'hint'",
":",
"'how many containers the user can have, e.g. 8'",
"}",
")",
"quotas",
"[",
"'quotainfo'",
"]",
".",
"append",
"(",
"{",
"'name'",
":",
"'portmapping'",
",",
"'hint'",
":",
"'how many ports the user can map, e.g. 8'",
"}",
")",
"quotas",
"[",
"'quotainfo'",
"]",
".",
"append",
"(",
"{",
"'name'",
":",
"'input_rate_limit'",
",",
"'hint'",
":",
"'the ingress speed of the network, number of kbps. 0 means the rate are unlimited.'",
"}",
")",
"quotas",
"[",
"'quotainfo'",
"]",
".",
"append",
"(",
"{",
"'name'",
":",
"'output_rate_limit'",
",",
"'hint'",
":",
"'the egress speed of the network, number of kbps. 0 means the rate are unlimited.'",
"}",
")",
"quotafile",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"quotas",
")",
")",
"quotafile",
".",
"close",
"(",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"fspath",
"+",
"\"/global/sys/lxc.default\"",
")",
":",
"settingfile",
"=",
"open",
"(",
"fspath",
"+",
"\"/global/sys/lxc.default\"",
",",
"'w'",
")",
"settings",
"=",
"{",
"}",
"settings",
"[",
"'cpu'",
"]",
"=",
"\"2\"",
"settings",
"[",
"\"memory\"",
"]",
"=",
"\"2000\"",
"settings",
"[",
"\"disk\"",
"]",
"=",
"\"2000\"",
"settingfile",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"settings",
")",
")",
"settingfile",
".",
"close",
"(",
")",
"try",
":",
"UserUsage",
".",
"query",
".",
"all",
"(",
")",
"LoginMsg",
".",
"query",
".",
"all",
"(",
")",
"LoginFailMsg",
".",
"query",
".",
"all",
"(",
")",
"except",
":",
"db",
".",
"create_all",
"(",
")"
] |
https://github.com/unias/docklet/blob/70c089a6a5bb186dc3f898127af84d79b4dfab2d/src/master/userManager.py#L140-L207
|
||
pymedusa/Medusa
|
1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38
|
ext/boto/cloudfront/invalidation.py
|
python
|
InvalidationBatch.add
|
(self, path)
|
return self.paths.append(path)
|
Add another path to this invalidation request
|
Add another path to this invalidation request
|
[
"Add",
"another",
"path",
"to",
"this",
"invalidation",
"request"
] |
def add(self, path):
"""Add another path to this invalidation request"""
return self.paths.append(path)
|
[
"def",
"add",
"(",
"self",
",",
"path",
")",
":",
"return",
"self",
".",
"paths",
".",
"append",
"(",
"path",
")"
] |
https://github.com/pymedusa/Medusa/blob/1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38/ext/boto/cloudfront/invalidation.py#L53-L55
|
|
posativ/acrylamid
|
222e2eb7b33924138498ff8186dff9f0aeb78cea
|
acrylamid/readers.py
|
python
|
MetadataMixin.month
|
(self)
|
return '%02d' % self.imonth
|
entry's month as zero padded string
|
entry's month as zero padded string
|
[
"entry",
"s",
"month",
"as",
"zero",
"padded",
"string"
] |
def month(self):
"""entry's month as zero padded string"""
return '%02d' % self.imonth
|
[
"def",
"month",
"(",
"self",
")",
":",
"return",
"'%02d'",
"%",
"self",
".",
"imonth"
] |
https://github.com/posativ/acrylamid/blob/222e2eb7b33924138498ff8186dff9f0aeb78cea/acrylamid/readers.py#L423-L425
|
|
PaddlePaddle/PGL
|
e48545f2814523c777b8a9a9188bf5a7f00d6e52
|
examples/kddcup2021/PCQM4M/utils/config.py
|
python
|
save_files
|
(config)
|
Save config file so that we can know the config when we look back
|
Save config file so that we can know the config when we look back
|
[
"Save",
"config",
"file",
"so",
"that",
"we",
"can",
"know",
"the",
"config",
"when",
"we",
"look",
"back"
] |
def save_files(config):
"""Save config file so that we can know the config when we look back
"""
filelist = config.files2saved
targetpath = config.log_dir
if filelist is not None:
for file_or_dir in filelist:
if os.path.isdir(file_or_dir):
last_name = get_last_dir(file_or_dir)
dst = os.path.join(targetpath, last_name)
try:
copy_and_overwrite(file_or_dir, dst)
except Exception as e:
print(e)
print("backup %s to %s" % (file_or_dir, targetpath))
else:
for filename in files(files=file_or_dir):
if os.path.isfile(filename):
print("backup %s to %s" % (filename, targetpath))
shutil.copy2(filename, targetpath)
else:
print("%s is not existed." % filename)
|
[
"def",
"save_files",
"(",
"config",
")",
":",
"filelist",
"=",
"config",
".",
"files2saved",
"targetpath",
"=",
"config",
".",
"log_dir",
"if",
"filelist",
"is",
"not",
"None",
":",
"for",
"file_or_dir",
"in",
"filelist",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"file_or_dir",
")",
":",
"last_name",
"=",
"get_last_dir",
"(",
"file_or_dir",
")",
"dst",
"=",
"os",
".",
"path",
".",
"join",
"(",
"targetpath",
",",
"last_name",
")",
"try",
":",
"copy_and_overwrite",
"(",
"file_or_dir",
",",
"dst",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"e",
")",
"print",
"(",
"\"backup %s to %s\"",
"%",
"(",
"file_or_dir",
",",
"targetpath",
")",
")",
"else",
":",
"for",
"filename",
"in",
"files",
"(",
"files",
"=",
"file_or_dir",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"print",
"(",
"\"backup %s to %s\"",
"%",
"(",
"filename",
",",
"targetpath",
")",
")",
"shutil",
".",
"copy2",
"(",
"filename",
",",
"targetpath",
")",
"else",
":",
"print",
"(",
"\"%s is not existed.\"",
"%",
"filename",
")"
] |
https://github.com/PaddlePaddle/PGL/blob/e48545f2814523c777b8a9a9188bf5a7f00d6e52/examples/kddcup2021/PCQM4M/utils/config.py#L84-L106
|
||
uqfoundation/multiprocess
|
028cc73f02655e6451d92e5147d19d8c10aebe50
|
py3.3/multiprocess/managers.py
|
python
|
BaseManager._number_of_objects
|
(self)
|
Return the number of shared objects
|
Return the number of shared objects
|
[
"Return",
"the",
"number",
"of",
"shared",
"objects"
] |
def _number_of_objects(self):
'''
Return the number of shared objects
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'number_of_objects')
finally:
conn.close()
|
[
"def",
"_number_of_objects",
"(",
"self",
")",
":",
"conn",
"=",
"self",
".",
"_Client",
"(",
"self",
".",
"_address",
",",
"authkey",
"=",
"self",
".",
"_authkey",
")",
"try",
":",
"return",
"dispatch",
"(",
"conn",
",",
"None",
",",
"'number_of_objects'",
")",
"finally",
":",
"conn",
".",
"close",
"(",
")"
] |
https://github.com/uqfoundation/multiprocess/blob/028cc73f02655e6451d92e5147d19d8c10aebe50/py3.3/multiprocess/managers.py#L553-L561
|
||
iclavera/learning_to_adapt
|
bd7d99ba402521c96631e7d09714128f549db0f1
|
learning_to_adapt/mujoco_py/mjtypes.py
|
python
|
MjModelWrapper.light_cutoff
|
(self)
|
return arr
|
[] |
def light_cutoff(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.light_cutoff, dtype=np.float, count=(self.nlight*1)), (self.nlight, 1, ))
arr.setflags(write=False)
return arr
|
[
"def",
"light_cutoff",
"(",
"self",
")",
":",
"arr",
"=",
"np",
".",
"reshape",
"(",
"np",
".",
"fromiter",
"(",
"self",
".",
"_wrapped",
".",
"contents",
".",
"light_cutoff",
",",
"dtype",
"=",
"np",
".",
"float",
",",
"count",
"=",
"(",
"self",
".",
"nlight",
"*",
"1",
")",
")",
",",
"(",
"self",
".",
"nlight",
",",
"1",
",",
")",
")",
"arr",
".",
"setflags",
"(",
"write",
"=",
"False",
")",
"return",
"arr"
] |
https://github.com/iclavera/learning_to_adapt/blob/bd7d99ba402521c96631e7d09714128f549db0f1/learning_to_adapt/mujoco_py/mjtypes.py#L4755-L4758
|
|||
amanusk/s-tui
|
d7a9ee4efbfc6f56b373a16dcd578881c534b2ce
|
s_tui/sturwid/complex_bar_graph.py
|
python
|
LabeledBarGraphVector.set_title
|
(self, title)
|
[] |
def set_title(self, title):
if not title:
return
title_text_w = urwid.Text(title, align="center")
list_w = urwid.SimpleFocusListWalker([title_text_w])
self.title.original_widget = urwid.ListBox(list_w)
|
[
"def",
"set_title",
"(",
"self",
",",
"title",
")",
":",
"if",
"not",
"title",
":",
"return",
"title_text_w",
"=",
"urwid",
".",
"Text",
"(",
"title",
",",
"align",
"=",
"\"center\"",
")",
"list_w",
"=",
"urwid",
".",
"SimpleFocusListWalker",
"(",
"[",
"title_text_w",
"]",
")",
"self",
".",
"title",
".",
"original_widget",
"=",
"urwid",
".",
"ListBox",
"(",
"list_w",
")"
] |
https://github.com/amanusk/s-tui/blob/d7a9ee4efbfc6f56b373a16dcd578881c534b2ce/s_tui/sturwid/complex_bar_graph.py#L109-L114
|
||||
kubeflow/pipelines
|
bea751c9259ff0ae85290f873170aae89284ba8e
|
backend/api/python_http_client/kfp_server_api/models/api_list_jobs_response.py
|
python
|
ApiListJobsResponse.total_size
|
(self)
|
return self._total_size
|
Gets the total_size of this ApiListJobsResponse. # noqa: E501
The total number of jobs for the given query. # noqa: E501
:return: The total_size of this ApiListJobsResponse. # noqa: E501
:rtype: int
|
Gets the total_size of this ApiListJobsResponse. # noqa: E501
|
[
"Gets",
"the",
"total_size",
"of",
"this",
"ApiListJobsResponse",
".",
"#",
"noqa",
":",
"E501"
] |
def total_size(self):
"""Gets the total_size of this ApiListJobsResponse. # noqa: E501
The total number of jobs for the given query. # noqa: E501
:return: The total_size of this ApiListJobsResponse. # noqa: E501
:rtype: int
"""
return self._total_size
|
[
"def",
"total_size",
"(",
"self",
")",
":",
"return",
"self",
".",
"_total_size"
] |
https://github.com/kubeflow/pipelines/blob/bea751c9259ff0ae85290f873170aae89284ba8e/backend/api/python_http_client/kfp_server_api/models/api_list_jobs_response.py#L89-L97
|
|
ywangd/stash
|
773d15b8fb3853a65c15fe160bf5584c99437170
|
system/shio.py
|
python
|
ShIO.close
|
(self)
|
This IO object cannot be closed.
|
This IO object cannot be closed.
|
[
"This",
"IO",
"object",
"cannot",
"be",
"closed",
"."
] |
def close(self):
"""
This IO object cannot be closed.
"""
pass
|
[
"def",
"close",
"(",
"self",
")",
":",
"pass"
] |
https://github.com/ywangd/stash/blob/773d15b8fb3853a65c15fe160bf5584c99437170/system/shio.py#L41-L45
|
||
ethereum/lahja
|
f51c0b738a7dfd8b2b08a021cb31849792422625
|
lahja/base.py
|
python
|
EndpointAPI.wait_until_all_endpoints_subscribed_to
|
(
self, event: Type[BaseEvent], *, include_self: bool = True
)
|
Block until all currently connected remote endpoints are subscribed to the specified
event type from this endpoint.
|
Block until all currently connected remote endpoints are subscribed to the specified
event type from this endpoint.
|
[
"Block",
"until",
"all",
"currently",
"connected",
"remote",
"endpoints",
"are",
"subscribed",
"to",
"the",
"specified",
"event",
"type",
"from",
"this",
"endpoint",
"."
] |
async def wait_until_all_endpoints_subscribed_to(
self, event: Type[BaseEvent], *, include_self: bool = True
) -> None:
"""
Block until all currently connected remote endpoints are subscribed to the specified
event type from this endpoint.
"""
...
|
[
"async",
"def",
"wait_until_all_endpoints_subscribed_to",
"(",
"self",
",",
"event",
":",
"Type",
"[",
"BaseEvent",
"]",
",",
"*",
",",
"include_self",
":",
"bool",
"=",
"True",
")",
"->",
"None",
":",
"..."
] |
https://github.com/ethereum/lahja/blob/f51c0b738a7dfd8b2b08a021cb31849792422625/lahja/base.py#L579-L586
|
||
securesystemslab/zippy
|
ff0e84ac99442c2c55fe1d285332cfd4e185e089
|
zippy/benchmarks/src/benchmarks/sympy/sympy/mpmath/libmp/libmpc.py
|
python
|
mpc_exp
|
(z, prec, rnd=round_fast)
|
return re, im
|
Complex exponential function.
We use the direct formula exp(a+bi) = exp(a) * (cos(b) + sin(b)*i)
for the computation. This formula is very nice because it is
pefectly stable; since we just do real multiplications, the only
numerical errors that can creep in are single-ulp rounding errors.
The formula is efficient since mpmath's real exp is quite fast and
since we can compute cos and sin simultaneously.
It is no problem if a and b are large; if the implementations of
exp/cos/sin are accurate and efficient for all real numbers, then
so is this function for all complex numbers.
|
Complex exponential function.
|
[
"Complex",
"exponential",
"function",
"."
] |
def mpc_exp(z, prec, rnd=round_fast):
"""
Complex exponential function.
We use the direct formula exp(a+bi) = exp(a) * (cos(b) + sin(b)*i)
for the computation. This formula is very nice because it is
pefectly stable; since we just do real multiplications, the only
numerical errors that can creep in are single-ulp rounding errors.
The formula is efficient since mpmath's real exp is quite fast and
since we can compute cos and sin simultaneously.
It is no problem if a and b are large; if the implementations of
exp/cos/sin are accurate and efficient for all real numbers, then
so is this function for all complex numbers.
"""
a, b = z
if a == fzero:
return mpf_cos_sin(b, prec, rnd)
if b == fzero:
return mpf_exp(a, prec, rnd), fzero
mag = mpf_exp(a, prec+4, rnd)
c, s = mpf_cos_sin(b, prec+4, rnd)
re = mpf_mul(mag, c, prec, rnd)
im = mpf_mul(mag, s, prec, rnd)
return re, im
|
[
"def",
"mpc_exp",
"(",
"z",
",",
"prec",
",",
"rnd",
"=",
"round_fast",
")",
":",
"a",
",",
"b",
"=",
"z",
"if",
"a",
"==",
"fzero",
":",
"return",
"mpf_cos_sin",
"(",
"b",
",",
"prec",
",",
"rnd",
")",
"if",
"b",
"==",
"fzero",
":",
"return",
"mpf_exp",
"(",
"a",
",",
"prec",
",",
"rnd",
")",
",",
"fzero",
"mag",
"=",
"mpf_exp",
"(",
"a",
",",
"prec",
"+",
"4",
",",
"rnd",
")",
"c",
",",
"s",
"=",
"mpf_cos_sin",
"(",
"b",
",",
"prec",
"+",
"4",
",",
"rnd",
")",
"re",
"=",
"mpf_mul",
"(",
"mag",
",",
"c",
",",
"prec",
",",
"rnd",
")",
"im",
"=",
"mpf_mul",
"(",
"mag",
",",
"s",
",",
"prec",
",",
"rnd",
")",
"return",
"re",
",",
"im"
] |
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/benchmarks/src/benchmarks/sympy/sympy/mpmath/libmp/libmpc.py#L417-L442
|
|
SteveDoyle2/pyNastran
|
eda651ac2d4883d95a34951f8a002ff94f642a1a
|
pyNastran/gui/utils/qt/qtreeview2.py
|
python
|
get_many_cases
|
(data)
|
return cases
|
Get the result case ids that are a subset of the data/form list
data = [
(u'Element Checks', None, [
(u'ElementDim', 5, []),
(u'Min Edge Length', 6, []),
(u'Min Interior Angle', 7, []),
(u'Max Interior Angle', 8, [])],
),
]
>>> get_many_cases(data)
[5, 6, 7, 8]
>>> data = [(u'Max Interior Angle', 8, [])]
[8]
|
Get the result case ids that are a subset of the data/form list
|
[
"Get",
"the",
"result",
"case",
"ids",
"that",
"are",
"a",
"subset",
"of",
"the",
"data",
"/",
"form",
"list"
] |
def get_many_cases(data):
"""
Get the result case ids that are a subset of the data/form list
data = [
(u'Element Checks', None, [
(u'ElementDim', 5, []),
(u'Min Edge Length', 6, []),
(u'Min Interior Angle', 7, []),
(u'Max Interior Angle', 8, [])],
),
]
>>> get_many_cases(data)
[5, 6, 7, 8]
>>> data = [(u'Max Interior Angle', 8, [])]
[8]
"""
unused_name, case, rows = data
if case is None:
# remove many results
# (Geometry, None, [results...])
cases = []
for unused_irow, row in enumerate(rows):
unused_name, unused_row_id, unused_data2 = row
cases += get_many_cases(row)
else:
cases = [case]
return cases
|
[
"def",
"get_many_cases",
"(",
"data",
")",
":",
"unused_name",
",",
"case",
",",
"rows",
"=",
"data",
"if",
"case",
"is",
"None",
":",
"# remove many results",
"# (Geometry, None, [results...])",
"cases",
"=",
"[",
"]",
"for",
"unused_irow",
",",
"row",
"in",
"enumerate",
"(",
"rows",
")",
":",
"unused_name",
",",
"unused_row_id",
",",
"unused_data2",
"=",
"row",
"cases",
"+=",
"get_many_cases",
"(",
"row",
")",
"else",
":",
"cases",
"=",
"[",
"case",
"]",
"return",
"cases"
] |
https://github.com/SteveDoyle2/pyNastran/blob/eda651ac2d4883d95a34951f8a002ff94f642a1a/pyNastran/gui/utils/qt/qtreeview2.py#L432-L461
|
|
imageio/imageio
|
51f552ba5ed4aa771f205a2798e1fd3859fb2942
|
imageio/plugins/_tifffile.py
|
python
|
TiffFile.nih_metadata
|
(self)
|
return self.pages[0].tags["NIHImageHeader"].value
|
Return NIH Image metadata from NIHImageHeader tag as dict.
|
Return NIH Image metadata from NIHImageHeader tag as dict.
|
[
"Return",
"NIH",
"Image",
"metadata",
"from",
"NIHImageHeader",
"tag",
"as",
"dict",
"."
] |
def nih_metadata(self):
"""Return NIH Image metadata from NIHImageHeader tag as dict."""
if not self.is_nih:
return
return self.pages[0].tags["NIHImageHeader"].value
|
[
"def",
"nih_metadata",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_nih",
":",
"return",
"return",
"self",
".",
"pages",
"[",
"0",
"]",
".",
"tags",
"[",
"\"NIHImageHeader\"",
"]",
".",
"value"
] |
https://github.com/imageio/imageio/blob/51f552ba5ed4aa771f205a2798e1fd3859fb2942/imageio/plugins/_tifffile.py#L2844-L2848
|
|
PaddlePaddle/Research
|
2da0bd6c72d60e9df403aff23a7802779561c4a1
|
NLP/MRQA2019-D-NET/server/ernie_server/task_reader/tokenization.py
|
python
|
_is_whitespace
|
(char)
|
return False
|
Checks whether `chars` is a whitespace character.
|
Checks whether `chars` is a whitespace character.
|
[
"Checks",
"whether",
"chars",
"is",
"a",
"whitespace",
"character",
"."
] |
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
|
[
"def",
"_is_whitespace",
"(",
"char",
")",
":",
"# \\t, \\n, and \\r are technically contorl characters but we treat them",
"# as whitespace since they are generally considered as such.",
"if",
"char",
"==",
"\" \"",
"or",
"char",
"==",
"\"\\t\"",
"or",
"char",
"==",
"\"\\n\"",
"or",
"char",
"==",
"\"\\r\"",
":",
"return",
"True",
"cat",
"=",
"unicodedata",
".",
"category",
"(",
"char",
")",
"if",
"cat",
"==",
"\"Zs\"",
":",
"return",
"True",
"return",
"False"
] |
https://github.com/PaddlePaddle/Research/blob/2da0bd6c72d60e9df403aff23a7802779561c4a1/NLP/MRQA2019-D-NET/server/ernie_server/task_reader/tokenization.py#L337-L346
|
|
earwig/mwparserfromhell
|
abcf6298aa53f536f6626be1d8fd4a863afed878
|
src/mwparserfromhell/nodes/extras/attribute.py
|
python
|
Attribute._set_padding
|
(self, attr, value)
|
Setter for the value of a padding attribute.
|
Setter for the value of a padding attribute.
|
[
"Setter",
"for",
"the",
"value",
"of",
"a",
"padding",
"attribute",
"."
] |
def _set_padding(self, attr, value):
"""Setter for the value of a padding attribute."""
if not value:
setattr(self, attr, "")
else:
value = str(value)
if not value.isspace():
raise ValueError("padding must be entirely whitespace")
setattr(self, attr, value)
|
[
"def",
"_set_padding",
"(",
"self",
",",
"attr",
",",
"value",
")",
":",
"if",
"not",
"value",
":",
"setattr",
"(",
"self",
",",
"attr",
",",
"\"\"",
")",
"else",
":",
"value",
"=",
"str",
"(",
"value",
")",
"if",
"not",
"value",
".",
"isspace",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"padding must be entirely whitespace\"",
")",
"setattr",
"(",
"self",
",",
"attr",
",",
"value",
")"
] |
https://github.com/earwig/mwparserfromhell/blob/abcf6298aa53f536f6626be1d8fd4a863afed878/src/mwparserfromhell/nodes/extras/attribute.py#L77-L85
|
||
google-research/tf-slim
|
e00575ad39d19112a4b1342930825258316cf233
|
tf_slim/data/parallel_reader.py
|
python
|
single_pass_read
|
(data_sources, reader_class, reader_kwargs=None,
scope=None)
|
Reads sequentially the data_sources using the reader, doing a single pass.
Args:
data_sources: a list/tuple of files or the location of the data, i.e.
/path/to/train@128, /path/to/train* or /tmp/.../train*
reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader.
reader_kwargs: an optional dict, of kwargs for the reader.
scope: Optional name scope for the ops.
Returns:
key, value: a tuple of keys and values from the data_source.
|
Reads sequentially the data_sources using the reader, doing a single pass.
|
[
"Reads",
"sequentially",
"the",
"data_sources",
"using",
"the",
"reader",
"doing",
"a",
"single",
"pass",
"."
] |
def single_pass_read(data_sources, reader_class, reader_kwargs=None,
scope=None):
"""Reads sequentially the data_sources using the reader, doing a single pass.
Args:
data_sources: a list/tuple of files or the location of the data, i.e.
/path/to/train@128, /path/to/train* or /tmp/.../train*
reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader.
reader_kwargs: an optional dict, of kwargs for the reader.
scope: Optional name scope for the ops.
Returns:
key, value: a tuple of keys and values from the data_source.
"""
data_files = get_data_files(data_sources)
with ops.name_scope(scope, 'single_pass_read'):
filename_queue = tf_input.string_input_producer(
data_files, num_epochs=1, shuffle=False, capacity=1, name='filenames')
reader_kwargs = reader_kwargs or {}
return reader_class(**reader_kwargs).read(filename_queue)
|
[
"def",
"single_pass_read",
"(",
"data_sources",
",",
"reader_class",
",",
"reader_kwargs",
"=",
"None",
",",
"scope",
"=",
"None",
")",
":",
"data_files",
"=",
"get_data_files",
"(",
"data_sources",
")",
"with",
"ops",
".",
"name_scope",
"(",
"scope",
",",
"'single_pass_read'",
")",
":",
"filename_queue",
"=",
"tf_input",
".",
"string_input_producer",
"(",
"data_files",
",",
"num_epochs",
"=",
"1",
",",
"shuffle",
"=",
"False",
",",
"capacity",
"=",
"1",
",",
"name",
"=",
"'filenames'",
")",
"reader_kwargs",
"=",
"reader_kwargs",
"or",
"{",
"}",
"return",
"reader_class",
"(",
"*",
"*",
"reader_kwargs",
")",
".",
"read",
"(",
"filename_queue",
")"
] |
https://github.com/google-research/tf-slim/blob/e00575ad39d19112a4b1342930825258316cf233/tf_slim/data/parallel_reader.py#L273-L292
|
||
Esri/ArcREST
|
ab240fde2b0200f61d4a5f6df033516e53f2f416
|
src/arcrest/manageags/parameters.py
|
python
|
Extension.properties
|
(self)
|
return self._properties
|
gets/sets the extension properties
|
gets/sets the extension properties
|
[
"gets",
"/",
"sets",
"the",
"extension",
"properties"
] |
def properties(self):
"""gets/sets the extension properties"""
return self._properties
|
[
"def",
"properties",
"(",
"self",
")",
":",
"return",
"self",
".",
"_properties"
] |
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageags/parameters.py#L34-L36
|
|
DataDog/integrations-core
|
934674b29d94b70ccc008f76ea172d0cdae05e1e
|
twistlock/datadog_checks/twistlock/config_models/defaults.py
|
python
|
instance_proxy
|
(field, value)
|
return get_default_field_value(field, value)
|
[] |
def instance_proxy(field, value):
return get_default_field_value(field, value)
|
[
"def",
"instance_proxy",
"(",
"field",
",",
"value",
")",
":",
"return",
"get_default_field_value",
"(",
"field",
",",
"value",
")"
] |
https://github.com/DataDog/integrations-core/blob/934674b29d94b70ccc008f76ea172d0cdae05e1e/twistlock/datadog_checks/twistlock/config_models/defaults.py#L125-L126
|
|||
PaddlePaddle/PaddleX
|
2bab73f81ab54e328204e7871e6ae4a82e719f5d
|
paddlex/ppdet/modeling/losses/varifocal_loss.py
|
python
|
varifocal_loss
|
(pred,
target,
alpha=0.75,
gamma=2.0,
iou_weighted=True,
use_sigmoid=True)
|
return loss
|
`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
pred (Tensor): The prediction with shape (N, C), C is the
number of classes
target (Tensor): The learning target of the iou-aware
classification score with shape (N, C), C is the number of classes.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal Loss.
Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive example with the iou target. Defaults to True.
|
`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
|
[
"Varifocal",
"Loss",
"<https",
":",
"//",
"arxiv",
".",
"org",
"/",
"abs",
"/",
"2008",
".",
"13367",
">",
"_"
] |
def varifocal_loss(pred,
target,
alpha=0.75,
gamma=2.0,
iou_weighted=True,
use_sigmoid=True):
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
pred (Tensor): The prediction with shape (N, C), C is the
number of classes
target (Tensor): The learning target of the iou-aware
classification score with shape (N, C), C is the number of classes.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal Loss.
Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive example with the iou target. Defaults to True.
"""
# pred and target should be of the same size
assert pred.shape == target.shape
if use_sigmoid:
pred_new = F.sigmoid(pred)
else:
pred_new = pred
target = target.cast(pred.dtype)
if iou_weighted:
focal_weight = target * (target > 0.0).cast('float32') + \
alpha * (pred_new - target).abs().pow(gamma) * \
(target <= 0.0).cast('float32')
else:
focal_weight = (target > 0.0).cast('float32') + \
alpha * (pred_new - target).abs().pow(gamma) * \
(target <= 0.0).cast('float32')
if use_sigmoid:
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * focal_weight
else:
loss = F.binary_cross_entropy(
pred, target, reduction='none') * focal_weight
loss = loss.sum(axis=1)
return loss
|
[
"def",
"varifocal_loss",
"(",
"pred",
",",
"target",
",",
"alpha",
"=",
"0.75",
",",
"gamma",
"=",
"2.0",
",",
"iou_weighted",
"=",
"True",
",",
"use_sigmoid",
"=",
"True",
")",
":",
"# pred and target should be of the same size",
"assert",
"pred",
".",
"shape",
"==",
"target",
".",
"shape",
"if",
"use_sigmoid",
":",
"pred_new",
"=",
"F",
".",
"sigmoid",
"(",
"pred",
")",
"else",
":",
"pred_new",
"=",
"pred",
"target",
"=",
"target",
".",
"cast",
"(",
"pred",
".",
"dtype",
")",
"if",
"iou_weighted",
":",
"focal_weight",
"=",
"target",
"*",
"(",
"target",
">",
"0.0",
")",
".",
"cast",
"(",
"'float32'",
")",
"+",
"alpha",
"*",
"(",
"pred_new",
"-",
"target",
")",
".",
"abs",
"(",
")",
".",
"pow",
"(",
"gamma",
")",
"*",
"(",
"target",
"<=",
"0.0",
")",
".",
"cast",
"(",
"'float32'",
")",
"else",
":",
"focal_weight",
"=",
"(",
"target",
">",
"0.0",
")",
".",
"cast",
"(",
"'float32'",
")",
"+",
"alpha",
"*",
"(",
"pred_new",
"-",
"target",
")",
".",
"abs",
"(",
")",
".",
"pow",
"(",
"gamma",
")",
"*",
"(",
"target",
"<=",
"0.0",
")",
".",
"cast",
"(",
"'float32'",
")",
"if",
"use_sigmoid",
":",
"loss",
"=",
"F",
".",
"binary_cross_entropy_with_logits",
"(",
"pred",
",",
"target",
",",
"reduction",
"=",
"'none'",
")",
"*",
"focal_weight",
"else",
":",
"loss",
"=",
"F",
".",
"binary_cross_entropy",
"(",
"pred",
",",
"target",
",",
"reduction",
"=",
"'none'",
")",
"*",
"focal_weight",
"loss",
"=",
"loss",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"return",
"loss"
] |
https://github.com/PaddlePaddle/PaddleX/blob/2bab73f81ab54e328204e7871e6ae4a82e719f5d/paddlex/ppdet/modeling/losses/varifocal_loss.py#L31-L75
|
|
horazont/aioxmpp
|
c701e6399c90a6bb9bec0349018a03bd7b644cde
|
aioxmpp/security_layer.py
|
python
|
security_layer
|
(tls_provider, sasl_providers)
|
return result
|
.. deprecated:: 0.6
Replaced by :class:`SecurityLayer`.
Return a configured :class:`SecurityLayer`. `tls_provider` must be a
:class:`STARTTLSProvider`.
The return value can be passed to the constructor of
:class:`~.node.Client`.
Some very basic checking on the input is also performed.
|
.. deprecated:: 0.6
|
[
"..",
"deprecated",
"::",
"0",
".",
"6"
] |
def security_layer(tls_provider, sasl_providers):
"""
.. deprecated:: 0.6
Replaced by :class:`SecurityLayer`.
Return a configured :class:`SecurityLayer`. `tls_provider` must be a
:class:`STARTTLSProvider`.
The return value can be passed to the constructor of
:class:`~.node.Client`.
Some very basic checking on the input is also performed.
"""
sasl_providers = tuple(sasl_providers)
if not sasl_providers:
raise ValueError("At least one SASL provider must be given.")
for sasl_provider in sasl_providers:
sasl_provider.execute # check that sasl_provider has execute method
result = SecurityLayer(
tls_provider.ssl_context_factory,
tls_provider.certificate_verifier_factory,
tls_provider.tls_required,
sasl_providers
)
return result
|
[
"def",
"security_layer",
"(",
"tls_provider",
",",
"sasl_providers",
")",
":",
"sasl_providers",
"=",
"tuple",
"(",
"sasl_providers",
")",
"if",
"not",
"sasl_providers",
":",
"raise",
"ValueError",
"(",
"\"At least one SASL provider must be given.\"",
")",
"for",
"sasl_provider",
"in",
"sasl_providers",
":",
"sasl_provider",
".",
"execute",
"# check that sasl_provider has execute method",
"result",
"=",
"SecurityLayer",
"(",
"tls_provider",
".",
"ssl_context_factory",
",",
"tls_provider",
".",
"certificate_verifier_factory",
",",
"tls_provider",
".",
"tls_required",
",",
"sasl_providers",
")",
"return",
"result"
] |
https://github.com/horazont/aioxmpp/blob/c701e6399c90a6bb9bec0349018a03bd7b644cde/aioxmpp/security_layer.py#L1203-L1232
|
|
khamidou/kite
|
c049faf8522c8346c22c70f2a35a35db6b4a155d
|
src/back/kite/users.py
|
python
|
get_username_from_folder
|
(email_path)
|
return os.path.basename(os.path.normpath(os.path.abspath(os.path.join(os.path.dirname(email_path), os.path.pardir))))
|
This function get the username from a maildir.
ex: get_username_from_folder("/home/kite/Maildirs/testuser/new/1234563.mail") => "testuser"
|
This function get the username from a maildir.
ex: get_username_from_folder("/home/kite/Maildirs/testuser/new/1234563.mail") => "testuser"
|
[
"This",
"function",
"get",
"the",
"username",
"from",
"a",
"maildir",
".",
"ex",
":",
"get_username_from_folder",
"(",
"/",
"home",
"/",
"kite",
"/",
"Maildirs",
"/",
"testuser",
"/",
"new",
"/",
"1234563",
".",
"mail",
")",
"=",
">",
"testuser"
] |
def get_username_from_folder(email_path):
"""This function get the username from a maildir.
ex: get_username_from_folder("/home/kite/Maildirs/testuser/new/1234563.mail") => "testuser" """
# FIXME: refactor this monstrosity
return os.path.basename(os.path.normpath(os.path.abspath(os.path.join(os.path.dirname(email_path), os.path.pardir))))
|
[
"def",
"get_username_from_folder",
"(",
"email_path",
")",
":",
"# FIXME: refactor this monstrosity",
"return",
"os",
".",
"path",
".",
"basename",
"(",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"email_path",
")",
",",
"os",
".",
"path",
".",
"pardir",
")",
")",
")",
")"
] |
https://github.com/khamidou/kite/blob/c049faf8522c8346c22c70f2a35a35db6b4a155d/src/back/kite/users.py#L10-L14
|
|
qiucheng025/zao-
|
3a5edf3607b3a523f95746bc69b688090c76d89a
|
plugins/convert/writer/_base.py
|
python
|
Output.output_filename
|
(self, filename)
|
return out_filename
|
Return the output filename with the correct folder and extension
NB: The plugin must have a config item 'format' that contains the
file extension to use this method
|
Return the output filename with the correct folder and extension
NB: The plugin must have a config item 'format' that contains the
file extension to use this method
|
[
"Return",
"the",
"output",
"filename",
"with",
"the",
"correct",
"folder",
"and",
"extension",
"NB",
":",
"The",
"plugin",
"must",
"have",
"a",
"config",
"item",
"format",
"that",
"contains",
"the",
"file",
"extension",
"to",
"use",
"this",
"method"
] |
def output_filename(self, filename):
""" Return the output filename with the correct folder and extension
NB: The plugin must have a config item 'format' that contains the
file extension to use this method """
filename = os.path.splitext(os.path.basename(filename))[0]
out_filename = "{}.{}".format(filename, self.config["format"])
out_filename = os.path.join(self.output_folder, out_filename)
logger.trace("in filename: '%s', out filename: '%s'", filename, out_filename)
return out_filename
|
[
"def",
"output_filename",
"(",
"self",
",",
"filename",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
")",
")",
"[",
"0",
"]",
"out_filename",
"=",
"\"{}.{}\"",
".",
"format",
"(",
"filename",
",",
"self",
".",
"config",
"[",
"\"format\"",
"]",
")",
"out_filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"output_folder",
",",
"out_filename",
")",
"logger",
".",
"trace",
"(",
"\"in filename: '%s', out filename: '%s'\"",
",",
"filename",
",",
"out_filename",
")",
"return",
"out_filename"
] |
https://github.com/qiucheng025/zao-/blob/3a5edf3607b3a523f95746bc69b688090c76d89a/plugins/convert/writer/_base.py#L41-L49
|
|
riptideio/pymodbus
|
c5772b35ae3f29d1947f3ab453d8d00df846459f
|
pymodbus/datastore/context.py
|
python
|
ModbusServerContext.__contains__
|
(self, slave)
|
Check if the given slave is in this list
:param slave: slave The slave to check for existence
:returns: True if the slave exists, False otherwise
|
Check if the given slave is in this list
|
[
"Check",
"if",
"the",
"given",
"slave",
"is",
"in",
"this",
"list"
] |
def __contains__(self, slave):
''' Check if the given slave is in this list
:param slave: slave The slave to check for existence
:returns: True if the slave exists, False otherwise
'''
if self.single and self._slaves:
return True
else:
return slave in self._slaves
|
[
"def",
"__contains__",
"(",
"self",
",",
"slave",
")",
":",
"if",
"self",
".",
"single",
"and",
"self",
".",
"_slaves",
":",
"return",
"True",
"else",
":",
"return",
"slave",
"in",
"self",
".",
"_slaves"
] |
https://github.com/riptideio/pymodbus/blob/c5772b35ae3f29d1947f3ab453d8d00df846459f/pymodbus/datastore/context.py#L132-L141
|
||
yuzhoujr/leetcode
|
6a2ad1fc11225db18f68bfadd21a7419d2cb52a4
|
stack_queue/minStack.py
|
python
|
MinStack.pop
|
(self)
|
[] |
def pop(self):
self.minstack.pop()
self.stack.pop()
|
[
"def",
"pop",
"(",
"self",
")",
":",
"self",
".",
"minstack",
".",
"pop",
"(",
")",
"self",
".",
"stack",
".",
"pop",
"(",
")"
] |
https://github.com/yuzhoujr/leetcode/blob/6a2ad1fc11225db18f68bfadd21a7419d2cb52a4/stack_queue/minStack.py#L40-L42
|
||||
sahana/eden
|
1696fa50e90ce967df69f66b571af45356cc18da
|
controllers/org.py
|
python
|
booking_mode
|
()
|
return s3_rest_controller()
|
RESTful CRUD controller
|
RESTful CRUD controller
|
[
"RESTful",
"CRUD",
"controller"
] |
def booking_mode():
""" RESTful CRUD controller """
return s3_rest_controller()
|
[
"def",
"booking_mode",
"(",
")",
":",
"return",
"s3_rest_controller",
"(",
")"
] |
https://github.com/sahana/eden/blob/1696fa50e90ce967df69f66b571af45356cc18da/controllers/org.py#L453-L456
|
|
tensorflow/datasets
|
2e496976d7d45550508395fb2f35cf958c8a3414
|
tensorflow_datasets/core/utils/py_utils.py
|
python
|
rgetattr
|
(obj, attr, *args)
|
return functools.reduce(_getattr, [obj] + attr.split('.'))
|
Get attr that handles dots in attr name.
|
Get attr that handles dots in attr name.
|
[
"Get",
"attr",
"that",
"handles",
"dots",
"in",
"attr",
"name",
"."
] |
def rgetattr(obj, attr, *args):
"""Get attr that handles dots in attr name."""
def _getattr(obj, attr):
return getattr(obj, attr, *args)
return functools.reduce(_getattr, [obj] + attr.split('.'))
|
[
"def",
"rgetattr",
"(",
"obj",
",",
"attr",
",",
"*",
"args",
")",
":",
"def",
"_getattr",
"(",
"obj",
",",
"attr",
")",
":",
"return",
"getattr",
"(",
"obj",
",",
"attr",
",",
"*",
"args",
")",
"return",
"functools",
".",
"reduce",
"(",
"_getattr",
",",
"[",
"obj",
"]",
"+",
"attr",
".",
"split",
"(",
"'.'",
")",
")"
] |
https://github.com/tensorflow/datasets/blob/2e496976d7d45550508395fb2f35cf958c8a3414/tensorflow_datasets/core/utils/py_utils.py#L402-L408
|
|
AXErunners/electrum-axe
|
7ef05088c0edaf0688fb167df353d6da619ebf2f
|
electrum_axe/commands.py
|
python
|
Commands.getseed
|
(self, password=None)
|
return s
|
Get seed phrase. Print the generation seed of your wallet.
|
Get seed phrase. Print the generation seed of your wallet.
|
[
"Get",
"seed",
"phrase",
".",
"Print",
"the",
"generation",
"seed",
"of",
"your",
"wallet",
"."
] |
def getseed(self, password=None):
"""Get seed phrase. Print the generation seed of your wallet."""
s = self.wallet.get_seed(password)
return s
|
[
"def",
"getseed",
"(",
"self",
",",
"password",
"=",
"None",
")",
":",
"s",
"=",
"self",
".",
"wallet",
".",
"get_seed",
"(",
"password",
")",
"return",
"s"
] |
https://github.com/AXErunners/electrum-axe/blob/7ef05088c0edaf0688fb167df353d6da619ebf2f/electrum_axe/commands.py#L413-L416
|
|
jpadilla/pyjwt
|
77d791681fa3d0ba65a648de42dd3d671138cb95
|
jwt/api_jwk.py
|
python
|
PyJWKSet.from_dict
|
(obj)
|
return PyJWKSet(keys)
|
[] |
def from_dict(obj):
keys = obj.get("keys", [])
return PyJWKSet(keys)
|
[
"def",
"from_dict",
"(",
"obj",
")",
":",
"keys",
"=",
"obj",
".",
"get",
"(",
"\"keys\"",
",",
"[",
"]",
")",
"return",
"PyJWKSet",
"(",
"keys",
")"
] |
https://github.com/jpadilla/pyjwt/blob/77d791681fa3d0ba65a648de42dd3d671138cb95/jwt/api_jwk.py#L90-L92
|
|||
stephenmcd/cartridge
|
1f2feb5e3d3d246b604b8c2c885161eb29af9bce
|
cartridge/shop/views.py
|
python
|
cart
|
(request, template="shop/cart.html",
cart_formset_class=CartItemFormSet,
discount_form_class=DiscountForm,
extra_context=None)
|
return TemplateResponse(request, template, context)
|
Display cart and handle removing items from the cart.
|
Display cart and handle removing items from the cart.
|
[
"Display",
"cart",
"and",
"handle",
"removing",
"items",
"from",
"the",
"cart",
"."
] |
def cart(request, template="shop/cart.html",
cart_formset_class=CartItemFormSet,
discount_form_class=DiscountForm,
extra_context=None):
"""
Display cart and handle removing items from the cart.
"""
cart_formset = cart_formset_class(instance=request.cart)
discount_form = discount_form_class(request, request.POST or None)
if request.method == "POST":
valid = True
if request.POST.get("update_cart"):
valid = request.cart.has_items()
if not valid:
# Session timed out.
info(request, _("Your cart has expired"))
else:
cart_formset = cart_formset_class(request.POST,
instance=request.cart)
valid = cart_formset.is_valid()
if valid:
cart_formset.save()
recalculate_cart(request)
info(request, _("Cart updated"))
else:
# Reset the cart formset so that the cart
# always indicates the correct quantities.
# The user is shown their invalid quantity
# via the error message, which we need to
# copy over to the new formset here.
errors = cart_formset._errors
cart_formset = cart_formset_class(instance=request.cart)
cart_formset._errors = errors
else:
valid = discount_form.is_valid()
if valid:
discount_form.set_discount()
# Potentially need to set shipping if a discount code
# was previously entered with free shipping, and then
# another was entered (replacing the old) without
# free shipping, *and* the user has already progressed
# to the final checkout step, which they'd go straight
# to when returning to checkout, bypassing billing and
# shipping details step where shipping is normally set.
recalculate_cart(request)
if valid:
return redirect("shop_cart")
context = {"cart_formset": cart_formset}
context.update(extra_context or {})
settings.clear_cache()
if (settings.SHOP_DISCOUNT_FIELD_IN_CART and
DiscountCode.objects.active().exists()):
context["discount_form"] = discount_form
return TemplateResponse(request, template, context)
|
[
"def",
"cart",
"(",
"request",
",",
"template",
"=",
"\"shop/cart.html\"",
",",
"cart_formset_class",
"=",
"CartItemFormSet",
",",
"discount_form_class",
"=",
"DiscountForm",
",",
"extra_context",
"=",
"None",
")",
":",
"cart_formset",
"=",
"cart_formset_class",
"(",
"instance",
"=",
"request",
".",
"cart",
")",
"discount_form",
"=",
"discount_form_class",
"(",
"request",
",",
"request",
".",
"POST",
"or",
"None",
")",
"if",
"request",
".",
"method",
"==",
"\"POST\"",
":",
"valid",
"=",
"True",
"if",
"request",
".",
"POST",
".",
"get",
"(",
"\"update_cart\"",
")",
":",
"valid",
"=",
"request",
".",
"cart",
".",
"has_items",
"(",
")",
"if",
"not",
"valid",
":",
"# Session timed out.",
"info",
"(",
"request",
",",
"_",
"(",
"\"Your cart has expired\"",
")",
")",
"else",
":",
"cart_formset",
"=",
"cart_formset_class",
"(",
"request",
".",
"POST",
",",
"instance",
"=",
"request",
".",
"cart",
")",
"valid",
"=",
"cart_formset",
".",
"is_valid",
"(",
")",
"if",
"valid",
":",
"cart_formset",
".",
"save",
"(",
")",
"recalculate_cart",
"(",
"request",
")",
"info",
"(",
"request",
",",
"_",
"(",
"\"Cart updated\"",
")",
")",
"else",
":",
"# Reset the cart formset so that the cart",
"# always indicates the correct quantities.",
"# The user is shown their invalid quantity",
"# via the error message, which we need to",
"# copy over to the new formset here.",
"errors",
"=",
"cart_formset",
".",
"_errors",
"cart_formset",
"=",
"cart_formset_class",
"(",
"instance",
"=",
"request",
".",
"cart",
")",
"cart_formset",
".",
"_errors",
"=",
"errors",
"else",
":",
"valid",
"=",
"discount_form",
".",
"is_valid",
"(",
")",
"if",
"valid",
":",
"discount_form",
".",
"set_discount",
"(",
")",
"# Potentially need to set shipping if a discount code",
"# was previously entered with free shipping, and then",
"# another was entered (replacing the old) without",
"# free shipping, *and* the user has already progressed",
"# to the final checkout step, which they'd go straight",
"# to when returning to checkout, bypassing billing and",
"# shipping details step where shipping is normally set.",
"recalculate_cart",
"(",
"request",
")",
"if",
"valid",
":",
"return",
"redirect",
"(",
"\"shop_cart\"",
")",
"context",
"=",
"{",
"\"cart_formset\"",
":",
"cart_formset",
"}",
"context",
".",
"update",
"(",
"extra_context",
"or",
"{",
"}",
")",
"settings",
".",
"clear_cache",
"(",
")",
"if",
"(",
"settings",
".",
"SHOP_DISCOUNT_FIELD_IN_CART",
"and",
"DiscountCode",
".",
"objects",
".",
"active",
"(",
")",
".",
"exists",
"(",
")",
")",
":",
"context",
"[",
"\"discount_form\"",
"]",
"=",
"discount_form",
"return",
"TemplateResponse",
"(",
"request",
",",
"template",
",",
"context",
")"
] |
https://github.com/stephenmcd/cartridge/blob/1f2feb5e3d3d246b604b8c2c885161eb29af9bce/cartridge/shop/views.py#L156-L209
|
|
DingGuodong/LinuxBashShellScriptForOps
|
d5727b985f920292a10698a3c9751d5dff5fc1a3
|
projects/WeChatOps/WXBizMsgCrypt/WXBizMsgCrypt.py
|
python
|
XMLParse.extract
|
(xmltext)
|
提取出xml数据包中的加密消息
@param xmltext: 待提取的xml字符串
@return: 提取出的加密消息字符串
|
提取出xml数据包中的加密消息
|
[
"提取出xml数据包中的加密消息"
] |
def extract(xmltext):
"""提取出xml数据包中的加密消息
@param xmltext: 待提取的xml字符串
@return: 提取出的加密消息字符串
"""
try:
xml_tree = ET.fromstring(xmltext)
encrypt = xml_tree.find("Encrypt")
touser_name = xml_tree.find("ToUserName") # 开发者微信号
return WXBizMsgCrypt_OK, encrypt.text, touser_name.text
except Exception as e:
# print e
return WXBizMsgCrypt_ParseXml_Error, None, None
|
[
"def",
"extract",
"(",
"xmltext",
")",
":",
"try",
":",
"xml_tree",
"=",
"ET",
".",
"fromstring",
"(",
"xmltext",
")",
"encrypt",
"=",
"xml_tree",
".",
"find",
"(",
"\"Encrypt\"",
")",
"touser_name",
"=",
"xml_tree",
".",
"find",
"(",
"\"ToUserName\"",
")",
"# 开发者微信号",
"return",
"WXBizMsgCrypt_OK",
",",
"encrypt",
".",
"text",
",",
"touser_name",
".",
"text",
"except",
"Exception",
"as",
"e",
":",
"# print e",
"return",
"WXBizMsgCrypt_ParseXml_Error",
",",
"None",
",",
"None"
] |
https://github.com/DingGuodong/LinuxBashShellScriptForOps/blob/d5727b985f920292a10698a3c9751d5dff5fc1a3/projects/WeChatOps/WXBizMsgCrypt/WXBizMsgCrypt.py#L118-L130
|
||
mtianyan/OnlineMooc
|
51a910e27c8d2808a8a5198b4db31f463e646bf6
|
tyadmin_api/auto_serializers.py
|
python
|
ChapterListSerializer.get_ty_options_display_txt
|
(obj)
|
return str(obj)
|
[] |
def get_ty_options_display_txt(obj):
return str(obj)
|
[
"def",
"get_ty_options_display_txt",
"(",
"obj",
")",
":",
"return",
"str",
"(",
"obj",
")"
] |
https://github.com/mtianyan/OnlineMooc/blob/51a910e27c8d2808a8a5198b4db31f463e646bf6/tyadmin_api/auto_serializers.py#L1837-L1838
|
|||
openshift/openshift-tools
|
1188778e728a6e4781acf728123e5b356380fe6f
|
ansible/roles/lib_git/build/lib/ssh_agent.py
|
python
|
SshAgent.add_key
|
(self, key)
|
Add a key to the running agent.
Note:
This function can be called any number of times to add multiple keys.
Args:
key (str): A string containing the ssh private key to be added (the
actual key data, not the filename of a key)
Raises:
SshAgentException: when ssh-add does not immediately return (as in the
case of a private key with a passphrase)
|
Add a key to the running agent.
|
[
"Add",
"a",
"key",
"to",
"the",
"running",
"agent",
"."
] |
def add_key(self, key):
"""Add a key to the running agent.
Note:
This function can be called any number of times to add multiple keys.
Args:
key (str): A string containing the ssh private key to be added (the
actual key data, not the filename of a key)
Raises:
SshAgentException: when ssh-add does not immediately return (as in the
case of a private key with a passphrase)
"""
#if self.ssh_agent.poll() is None:
# raise SshAgentException("Unable to add ssh key. Did agent die?")
named_pipe_path = os.path.join(self.ssh_auth_sock_dir, "keypipe." + str(random.getrandbits(64)))
try:
os.mkfifo(named_pipe_path, 0600)
except OSError, exception:
print "Failed to create FIFO: %s" % exception
devnull = open(os.devnull, 'w')
ssh_add = subprocess.Popen(["ssh-add", named_pipe_path], stdout=devnull, stderr=devnull)
fifo = open(named_pipe_path, 'w')
print >> fifo, key
fifo.close()
#Popen.wait() doesn't have a timeout, so we'll implement one using poll() :(
start_time = datetime.datetime.now()
while ssh_add.poll() is None:
if (datetime.datetime.now() - start_time).total_seconds() > 5:
try:
ssh_add.kill()
except OSError:
pass
raise SshAgentException("Unable to add ssh key. Timed out. Does key have a passphrase?")
time.sleep(0.1)
os.remove(named_pipe_path)
|
[
"def",
"add_key",
"(",
"self",
",",
"key",
")",
":",
"#if self.ssh_agent.poll() is None:",
"# raise SshAgentException(\"Unable to add ssh key. Did agent die?\")",
"named_pipe_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"ssh_auth_sock_dir",
",",
"\"keypipe.\"",
"+",
"str",
"(",
"random",
".",
"getrandbits",
"(",
"64",
")",
")",
")",
"try",
":",
"os",
".",
"mkfifo",
"(",
"named_pipe_path",
",",
"0600",
")",
"except",
"OSError",
",",
"exception",
":",
"print",
"\"Failed to create FIFO: %s\"",
"%",
"exception",
"devnull",
"=",
"open",
"(",
"os",
".",
"devnull",
",",
"'w'",
")",
"ssh_add",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"\"ssh-add\"",
",",
"named_pipe_path",
"]",
",",
"stdout",
"=",
"devnull",
",",
"stderr",
"=",
"devnull",
")",
"fifo",
"=",
"open",
"(",
"named_pipe_path",
",",
"'w'",
")",
"print",
">>",
"fifo",
",",
"key",
"fifo",
".",
"close",
"(",
")",
"#Popen.wait() doesn't have a timeout, so we'll implement one using poll() :(",
"start_time",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"while",
"ssh_add",
".",
"poll",
"(",
")",
"is",
"None",
":",
"if",
"(",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"-",
"start_time",
")",
".",
"total_seconds",
"(",
")",
">",
"5",
":",
"try",
":",
"ssh_add",
".",
"kill",
"(",
")",
"except",
"OSError",
":",
"pass",
"raise",
"SshAgentException",
"(",
"\"Unable to add ssh key. Timed out. Does key have a passphrase?\"",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"os",
".",
"remove",
"(",
"named_pipe_path",
")"
] |
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/ansible/roles/lib_git/build/lib/ssh_agent.py#L95-L130
|
||
golismero/golismero
|
7d605b937e241f51c1ca4f47b20f755eeefb9d76
|
tools/sqlmap/thirdparty/oset/_abc.py
|
python
|
ABCMeta._dump_registry
|
(cls, file=None)
|
Debug helper to print the ABC registry.
|
Debug helper to print the ABC registry.
|
[
"Debug",
"helper",
"to",
"print",
"the",
"ABC",
"registry",
"."
] |
def _dump_registry(cls, file=None):
"""Debug helper to print the ABC registry."""
print >> file, "Class: %s.%s" % (cls.__module__, cls.__name__)
print >> file, "Inv.counter: %s" % ABCMeta._abc_invalidation_counter
for name in sorted(cls.__dict__.keys()):
if name.startswith("_abc_"):
value = getattr(cls, name)
print >> file, "%s: %r" % (name, value)
|
[
"def",
"_dump_registry",
"(",
"cls",
",",
"file",
"=",
"None",
")",
":",
"print",
">>",
"file",
",",
"\"Class: %s.%s\"",
"%",
"(",
"cls",
".",
"__module__",
",",
"cls",
".",
"__name__",
")",
"print",
">>",
"file",
",",
"\"Inv.counter: %s\"",
"%",
"ABCMeta",
".",
"_abc_invalidation_counter",
"for",
"name",
"in",
"sorted",
"(",
"cls",
".",
"__dict__",
".",
"keys",
"(",
")",
")",
":",
"if",
"name",
".",
"startswith",
"(",
"\"_abc_\"",
")",
":",
"value",
"=",
"getattr",
"(",
"cls",
",",
"name",
")",
"print",
">>",
"file",
",",
"\"%s: %r\"",
"%",
"(",
"name",
",",
"value",
")"
] |
https://github.com/golismero/golismero/blob/7d605b937e241f51c1ca4f47b20f755eeefb9d76/tools/sqlmap/thirdparty/oset/_abc.py#L97-L104
|
||
pwnieexpress/pwn_plug_sources
|
1a23324f5dc2c3de20f9c810269b6a29b2758cad
|
src/metagoofil/hachoir_parser/container/swf.py
|
python
|
parseExport
|
(parent, size)
|
[] |
def parseExport(parent, size):
yield UInt16(parent, "count")
for index in xrange(parent["count"].value):
yield Export(parent, "export[]")
|
[
"def",
"parseExport",
"(",
"parent",
",",
"size",
")",
":",
"yield",
"UInt16",
"(",
"parent",
",",
"\"count\"",
")",
"for",
"index",
"in",
"xrange",
"(",
"parent",
"[",
"\"count\"",
"]",
".",
"value",
")",
":",
"yield",
"Export",
"(",
"parent",
",",
"\"export[]\"",
")"
] |
https://github.com/pwnieexpress/pwn_plug_sources/blob/1a23324f5dc2c3de20f9c810269b6a29b2758cad/src/metagoofil/hachoir_parser/container/swf.py#L204-L207
|
||||
swz30/CycleISP
|
76a52aa1d0c45f41aa7cdaa7deebecb10530c748
|
utils/dataset_utils.py
|
python
|
bayer_unify
|
(raw: np.ndarray, rgb: np.ndarray, input_pattern: str, target_pattern: str, mode: str)
|
return out1, out2
|
Convert a bayer raw image from one bayer pattern to another.
Parameters
----------
raw : np.ndarray in shape (H, W)
Bayer raw image to be unified.
input_pattern : {"RGGB", "BGGR", "GRBG", "GBRG"}
The bayer pattern of the input image.
target_pattern : {"RGGB", "BGGR", "GRBG", "GBRG"}
The expected output pattern.
mode: {"crop", "pad"}
The way to handle submosaic shift. "crop" abandons the outmost pixels,
and "pad" introduces extra pixels. Use "crop" in training and "pad" in
testing.
|
Convert a bayer raw image from one bayer pattern to another.
|
[
"Convert",
"a",
"bayer",
"raw",
"image",
"from",
"one",
"bayer",
"pattern",
"to",
"another",
"."
] |
def bayer_unify(raw: np.ndarray, rgb: np.ndarray, input_pattern: str, target_pattern: str, mode: str) -> np.ndarray:
"""
Convert a bayer raw image from one bayer pattern to another.
Parameters
----------
raw : np.ndarray in shape (H, W)
Bayer raw image to be unified.
input_pattern : {"RGGB", "BGGR", "GRBG", "GBRG"}
The bayer pattern of the input image.
target_pattern : {"RGGB", "BGGR", "GRBG", "GBRG"}
The expected output pattern.
mode: {"crop", "pad"}
The way to handle submosaic shift. "crop" abandons the outmost pixels,
and "pad" introduces extra pixels. Use "crop" in training and "pad" in
testing.
"""
if input_pattern not in BAYER_PATTERNS:
raise ValueError('Unknown input bayer pattern!')
if target_pattern not in BAYER_PATTERNS:
raise ValueError('Unknown target bayer pattern!')
if mode not in NORMALIZATION_MODE:
raise ValueError('Unknown normalization mode!')
if not isinstance(raw, np.ndarray) or len(raw.shape) != 2:
raise ValueError('raw should be a 2-dimensional numpy.ndarray!')
if not isinstance(rgb, np.ndarray) or len(rgb.shape) != 3:
raise ValueError('rgb should be a 3-dimensional numpy.ndarray!')
if input_pattern == target_pattern:
h_offset, w_offset = 0, 0
elif input_pattern[0] == target_pattern[2] and input_pattern[1] == target_pattern[3]:
h_offset, w_offset = 1, 0
elif input_pattern[0] == target_pattern[1] and input_pattern[2] == target_pattern[3]:
h_offset, w_offset = 0, 1
elif input_pattern[0] == target_pattern[3] and input_pattern[1] == target_pattern[2]:
h_offset, w_offset = 1, 1
else: # This is not happening in ["RGGB", "BGGR", "GRBG", "GBRG"]
raise RuntimeError('Unexpected pair of input and target bayer pattern!')
if mode == "pad":
out1 = np.pad(raw, [[h_offset, h_offset], [w_offset, w_offset]], 'reflect')
out2_1 = np.pad(rgb[:,:,0], [[h_offset, h_offset], [w_offset, w_offset]], 'reflect')
out2_2 = np.pad(rgb[:,:,1], [[h_offset, h_offset], [w_offset, w_offset]], 'reflect')
out2_3 = np.pad(rgb[:,:,2], [[h_offset, h_offset], [w_offset, w_offset]], 'reflect')
out2 = np.dstack((out2_1, out2_2, out2_3))
elif mode == "crop":
h, w = raw.shape
out1 = raw[h_offset:h - h_offset, w_offset:w - w_offset]
out2 = rgb[h_offset:h - h_offset, w_offset:w - w_offset, :]
else:
raise ValueError('Unknown normalization mode!')
return out1, out2
|
[
"def",
"bayer_unify",
"(",
"raw",
":",
"np",
".",
"ndarray",
",",
"rgb",
":",
"np",
".",
"ndarray",
",",
"input_pattern",
":",
"str",
",",
"target_pattern",
":",
"str",
",",
"mode",
":",
"str",
")",
"->",
"np",
".",
"ndarray",
":",
"if",
"input_pattern",
"not",
"in",
"BAYER_PATTERNS",
":",
"raise",
"ValueError",
"(",
"'Unknown input bayer pattern!'",
")",
"if",
"target_pattern",
"not",
"in",
"BAYER_PATTERNS",
":",
"raise",
"ValueError",
"(",
"'Unknown target bayer pattern!'",
")",
"if",
"mode",
"not",
"in",
"NORMALIZATION_MODE",
":",
"raise",
"ValueError",
"(",
"'Unknown normalization mode!'",
")",
"if",
"not",
"isinstance",
"(",
"raw",
",",
"np",
".",
"ndarray",
")",
"or",
"len",
"(",
"raw",
".",
"shape",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'raw should be a 2-dimensional numpy.ndarray!'",
")",
"if",
"not",
"isinstance",
"(",
"rgb",
",",
"np",
".",
"ndarray",
")",
"or",
"len",
"(",
"rgb",
".",
"shape",
")",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"'rgb should be a 3-dimensional numpy.ndarray!'",
")",
"if",
"input_pattern",
"==",
"target_pattern",
":",
"h_offset",
",",
"w_offset",
"=",
"0",
",",
"0",
"elif",
"input_pattern",
"[",
"0",
"]",
"==",
"target_pattern",
"[",
"2",
"]",
"and",
"input_pattern",
"[",
"1",
"]",
"==",
"target_pattern",
"[",
"3",
"]",
":",
"h_offset",
",",
"w_offset",
"=",
"1",
",",
"0",
"elif",
"input_pattern",
"[",
"0",
"]",
"==",
"target_pattern",
"[",
"1",
"]",
"and",
"input_pattern",
"[",
"2",
"]",
"==",
"target_pattern",
"[",
"3",
"]",
":",
"h_offset",
",",
"w_offset",
"=",
"0",
",",
"1",
"elif",
"input_pattern",
"[",
"0",
"]",
"==",
"target_pattern",
"[",
"3",
"]",
"and",
"input_pattern",
"[",
"1",
"]",
"==",
"target_pattern",
"[",
"2",
"]",
":",
"h_offset",
",",
"w_offset",
"=",
"1",
",",
"1",
"else",
":",
"# This is not happening in [\"RGGB\", \"BGGR\", \"GRBG\", \"GBRG\"]",
"raise",
"RuntimeError",
"(",
"'Unexpected pair of input and target bayer pattern!'",
")",
"if",
"mode",
"==",
"\"pad\"",
":",
"out1",
"=",
"np",
".",
"pad",
"(",
"raw",
",",
"[",
"[",
"h_offset",
",",
"h_offset",
"]",
",",
"[",
"w_offset",
",",
"w_offset",
"]",
"]",
",",
"'reflect'",
")",
"out2_1",
"=",
"np",
".",
"pad",
"(",
"rgb",
"[",
":",
",",
":",
",",
"0",
"]",
",",
"[",
"[",
"h_offset",
",",
"h_offset",
"]",
",",
"[",
"w_offset",
",",
"w_offset",
"]",
"]",
",",
"'reflect'",
")",
"out2_2",
"=",
"np",
".",
"pad",
"(",
"rgb",
"[",
":",
",",
":",
",",
"1",
"]",
",",
"[",
"[",
"h_offset",
",",
"h_offset",
"]",
",",
"[",
"w_offset",
",",
"w_offset",
"]",
"]",
",",
"'reflect'",
")",
"out2_3",
"=",
"np",
".",
"pad",
"(",
"rgb",
"[",
":",
",",
":",
",",
"2",
"]",
",",
"[",
"[",
"h_offset",
",",
"h_offset",
"]",
",",
"[",
"w_offset",
",",
"w_offset",
"]",
"]",
",",
"'reflect'",
")",
"out2",
"=",
"np",
".",
"dstack",
"(",
"(",
"out2_1",
",",
"out2_2",
",",
"out2_3",
")",
")",
"elif",
"mode",
"==",
"\"crop\"",
":",
"h",
",",
"w",
"=",
"raw",
".",
"shape",
"out1",
"=",
"raw",
"[",
"h_offset",
":",
"h",
"-",
"h_offset",
",",
"w_offset",
":",
"w",
"-",
"w_offset",
"]",
"out2",
"=",
"rgb",
"[",
"h_offset",
":",
"h",
"-",
"h_offset",
",",
"w_offset",
":",
"w",
"-",
"w_offset",
",",
":",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'Unknown normalization mode!'",
")",
"return",
"out1",
",",
"out2"
] |
https://github.com/swz30/CycleISP/blob/76a52aa1d0c45f41aa7cdaa7deebecb10530c748/utils/dataset_utils.py#L8-L61
|
|
googleads/google-ads-python
|
2a1d6062221f6aad1992a6bcca0e7e4a93d2db86
|
google/ads/googleads/v9/services/services/shared_criterion_service/client.py
|
python
|
SharedCriterionServiceClient.parse_common_location_path
|
(path: str)
|
return m.groupdict() if m else {}
|
Parse a location path into its component segments.
|
Parse a location path into its component segments.
|
[
"Parse",
"a",
"location",
"path",
"into",
"its",
"component",
"segments",
"."
] |
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
|
[
"def",
"parse_common_location_path",
"(",
"path",
":",
"str",
")",
"->",
"Dict",
"[",
"str",
",",
"str",
"]",
":",
"m",
"=",
"re",
".",
"match",
"(",
"r\"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$\"",
",",
"path",
")",
"return",
"m",
".",
"groupdict",
"(",
")",
"if",
"m",
"else",
"{",
"}"
] |
https://github.com/googleads/google-ads-python/blob/2a1d6062221f6aad1992a6bcca0e7e4a93d2db86/google/ads/googleads/v9/services/services/shared_criterion_service/client.py#L268-L273
|
|
pulb/mailnag
|
7ef91050cf3ccea2eeca13aefdbac29716806487
|
Mailnag/common/imaplib2.py
|
python
|
IMAP4.store
|
(self, message_set, command, flags, **kw)
|
return self._simple_command('STORE', message_set, command, flags, **kw)
|
(typ, [data]) = store(message_set, command, flags)
Alters flag dispositions for messages in mailbox.
|
(typ, [data]) = store(message_set, command, flags)
Alters flag dispositions for messages in mailbox.
|
[
"(",
"typ",
"[",
"data",
"]",
")",
"=",
"store",
"(",
"message_set",
"command",
"flags",
")",
"Alters",
"flag",
"dispositions",
"for",
"messages",
"in",
"mailbox",
"."
] |
def store(self, message_set, command, flags, **kw):
"""(typ, [data]) = store(message_set, command, flags)
Alters flag dispositions for messages in mailbox."""
if (flags[0],flags[-1]) != ('(',')'):
flags = '(%s)' % flags # Avoid quoting the flags
kw['untagged_response'] = 'FETCH'
return self._simple_command('STORE', message_set, command, flags, **kw)
|
[
"def",
"store",
"(",
"self",
",",
"message_set",
",",
"command",
",",
"flags",
",",
"*",
"*",
"kw",
")",
":",
"if",
"(",
"flags",
"[",
"0",
"]",
",",
"flags",
"[",
"-",
"1",
"]",
")",
"!=",
"(",
"'('",
",",
"')'",
")",
":",
"flags",
"=",
"'(%s)'",
"%",
"flags",
"# Avoid quoting the flags",
"kw",
"[",
"'untagged_response'",
"]",
"=",
"'FETCH'",
"return",
"self",
".",
"_simple_command",
"(",
"'STORE'",
",",
"message_set",
",",
"command",
",",
"flags",
",",
"*",
"*",
"kw",
")"
] |
https://github.com/pulb/mailnag/blob/7ef91050cf3ccea2eeca13aefdbac29716806487/Mailnag/common/imaplib2.py#L1169-L1176
|
|
foamliu/InsightFace-v2
|
e07b738adecb69b81ac9b8750db964cee673e175
|
optimizer.py
|
python
|
InsightFaceOptimizer.zero_grad
|
(self)
|
[] |
def zero_grad(self):
self.optimizer.zero_grad()
|
[
"def",
"zero_grad",
"(",
"self",
")",
":",
"self",
".",
"optimizer",
".",
"zero_grad",
"(",
")"
] |
https://github.com/foamliu/InsightFace-v2/blob/e07b738adecb69b81ac9b8750db964cee673e175/optimizer.py#L9-L10
|
||||
debian-calibre/calibre
|
020fc81d3936a64b2ac51459ecb796666ab6a051
|
src/calibre/ebooks/rtf2xml/colors.py
|
python
|
Colors.__default_color_func
|
(self, line)
|
Requires:
line
Returns:
nothing
Logic:
get the hex number from the line and add it to the color string.
|
Requires:
line
Returns:
nothing
Logic:
get the hex number from the line and add it to the color string.
|
[
"Requires",
":",
"line",
"Returns",
":",
"nothing",
"Logic",
":",
"get",
"the",
"hex",
"number",
"from",
"the",
"line",
"and",
"add",
"it",
"to",
"the",
"color",
"string",
"."
] |
def __default_color_func(self, line):
"""
Requires:
line
Returns:
nothing
Logic:
get the hex number from the line and add it to the color string.
"""
hex_num = line[-3:-1]
self.__color_string += hex_num
|
[
"def",
"__default_color_func",
"(",
"self",
",",
"line",
")",
":",
"hex_num",
"=",
"line",
"[",
"-",
"3",
":",
"-",
"1",
"]",
"self",
".",
"__color_string",
"+=",
"hex_num"
] |
https://github.com/debian-calibre/calibre/blob/020fc81d3936a64b2ac51459ecb796666ab6a051/src/calibre/ebooks/rtf2xml/colors.py#L84-L94
|
||
DataDog/integrations-core
|
934674b29d94b70ccc008f76ea172d0cdae05e1e
|
ambari/datadog_checks/ambari/config_models/defaults.py
|
python
|
instance_aws_service
|
(field, value)
|
return get_default_field_value(field, value)
|
[] |
def instance_aws_service(field, value):
return get_default_field_value(field, value)
|
[
"def",
"instance_aws_service",
"(",
"field",
",",
"value",
")",
":",
"return",
"get_default_field_value",
"(",
"field",
",",
"value",
")"
] |
https://github.com/DataDog/integrations-core/blob/934674b29d94b70ccc008f76ea172d0cdae05e1e/ambari/datadog_checks/ambari/config_models/defaults.py#L57-L58
|
|||
sagemath/sage
|
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
|
src/sage/misc/prandom.py
|
python
|
gammavariate
|
(alpha, beta)
|
return _pyrand().gammavariate(alpha, beta)
|
r"""
Gamma distribution. Not the gamma function!
Conditions on the parameters are alpha > 0 and beta > 0.
EXAMPLES::
sage: sample = gammavariate(1.0, 3.0); sample # random
6.58282586130638
sage: sample > 0
True
sage: sample = gammavariate(3.0, 1.0); sample # random
3.07801512341612
sage: sample > 0
True
|
r"""
Gamma distribution. Not the gamma function!
|
[
"r",
"Gamma",
"distribution",
".",
"Not",
"the",
"gamma",
"function!"
] |
def gammavariate(alpha, beta):
r"""
Gamma distribution. Not the gamma function!
Conditions on the parameters are alpha > 0 and beta > 0.
EXAMPLES::
sage: sample = gammavariate(1.0, 3.0); sample # random
6.58282586130638
sage: sample > 0
True
sage: sample = gammavariate(3.0, 1.0); sample # random
3.07801512341612
sage: sample > 0
True
"""
return _pyrand().gammavariate(alpha, beta)
|
[
"def",
"gammavariate",
"(",
"alpha",
",",
"beta",
")",
":",
"return",
"_pyrand",
"(",
")",
".",
"gammavariate",
"(",
"alpha",
",",
"beta",
")"
] |
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/misc/prandom.py#L285-L302
|
|
ConvLab/ConvLab
|
a04582a77537c1a706fbf64715baa9ad0be1301a
|
convlab/lib/util.py
|
python
|
get_ts
|
(pattern=FILE_TS_FORMAT)
|
return ts
|
Get current ts, defaults to format used for filename
@param {str} pattern To format the ts
@returns {str} ts
@example
util.get_ts()
# => '2017_10_17_084739'
|
Get current ts, defaults to format used for filename
@param {str} pattern To format the ts
@returns {str} ts
@example
|
[
"Get",
"current",
"ts",
"defaults",
"to",
"format",
"used",
"for",
"filename",
"@param",
"{",
"str",
"}",
"pattern",
"To",
"format",
"the",
"ts",
"@returns",
"{",
"str",
"}",
"ts",
"@example"
] |
def get_ts(pattern=FILE_TS_FORMAT):
'''
Get current ts, defaults to format used for filename
@param {str} pattern To format the ts
@returns {str} ts
@example
util.get_ts()
# => '2017_10_17_084739'
'''
ts_obj = datetime.now()
ts = ts_obj.strftime(pattern)
assert RE_FILE_TS.search(ts)
return ts
|
[
"def",
"get_ts",
"(",
"pattern",
"=",
"FILE_TS_FORMAT",
")",
":",
"ts_obj",
"=",
"datetime",
".",
"now",
"(",
")",
"ts",
"=",
"ts_obj",
".",
"strftime",
"(",
"pattern",
")",
"assert",
"RE_FILE_TS",
".",
"search",
"(",
"ts",
")",
"return",
"ts"
] |
https://github.com/ConvLab/ConvLab/blob/a04582a77537c1a706fbf64715baa9ad0be1301a/convlab/lib/util.py#L233-L246
|
|
JimmXinu/FanFicFare
|
bc149a2deb2636320fe50a3e374af6eef8f61889
|
fanficfare/fetcher.py
|
python
|
BasicCache.set_autosave
|
(self,autosave=False,filename=None)
|
[] |
def set_autosave(self,autosave=False,filename=None):
self.autosave = autosave
self.filename = filename
|
[
"def",
"set_autosave",
"(",
"self",
",",
"autosave",
"=",
"False",
",",
"filename",
"=",
"None",
")",
":",
"self",
".",
"autosave",
"=",
"autosave",
"self",
".",
"filename",
"=",
"filename"
] |
https://github.com/JimmXinu/FanFicFare/blob/bc149a2deb2636320fe50a3e374af6eef8f61889/fanficfare/fetcher.py#L175-L177
|
||||
mandiant/flare-ida
|
b5b3993bdd224b7d6d7bd524045195550c156c44
|
examples/argtracker_example1.py
|
python
|
getFunctionArgumentCount
|
(ea)
|
return argCount
|
Bit of a hack, since IDA doesn't seem to have a good way to get this information.
Gets the frame for a given function, and counts named members following the 'r'
member.
Note: IDA won't create a frame member for an unreferenced function arg... so you've
been warned.
|
Bit of a hack, since IDA doesn't seem to have a good way to get this information.
Gets the frame for a given function, and counts named members following the 'r'
member.
Note: IDA won't create a frame member for an unreferenced function arg... so you've
been warned.
|
[
"Bit",
"of",
"a",
"hack",
"since",
"IDA",
"doesn",
"t",
"seem",
"to",
"have",
"a",
"good",
"way",
"to",
"get",
"this",
"information",
".",
"Gets",
"the",
"frame",
"for",
"a",
"given",
"function",
"and",
"counts",
"named",
"members",
"following",
"the",
"r",
"member",
".",
"Note",
":",
"IDA",
"won",
"t",
"create",
"a",
"frame",
"member",
"for",
"an",
"unreferenced",
"function",
"arg",
"...",
"so",
"you",
"ve",
"been",
"warned",
"."
] |
def getFunctionArgumentCount(ea):
'''
Bit of a hack, since IDA doesn't seem to have a good way to get this information.
Gets the frame for a given function, and counts named members following the 'r'
member.
Note: IDA won't create a frame member for an unreferenced function arg... so you've
been warned.
'''
rFound = False
argCount = 0
sid = idc.GetFrame(ea)
midx = idc.GetFirstMember(sid)
while midx != idc.BADADDR:
name = idc.GetMemberName(sid, midx)
if rFound and name is not None:
argCount += 1
#print 'Found arg at 0x%x: "%s"' % (midx, name)
elif name == ' r':
#print 'Found r at 0x%x:' % midx
rFound = True
else:
#print 'Found nonarg at 0x%x: "%s"' % (midx, name)
pass
midx = idc.GetStrucNextOff(sid, midx)
return argCount
|
[
"def",
"getFunctionArgumentCount",
"(",
"ea",
")",
":",
"rFound",
"=",
"False",
"argCount",
"=",
"0",
"sid",
"=",
"idc",
".",
"GetFrame",
"(",
"ea",
")",
"midx",
"=",
"idc",
".",
"GetFirstMember",
"(",
"sid",
")",
"while",
"midx",
"!=",
"idc",
".",
"BADADDR",
":",
"name",
"=",
"idc",
".",
"GetMemberName",
"(",
"sid",
",",
"midx",
")",
"if",
"rFound",
"and",
"name",
"is",
"not",
"None",
":",
"argCount",
"+=",
"1",
"#print 'Found arg at 0x%x: \"%s\"' % (midx, name)",
"elif",
"name",
"==",
"' r'",
":",
"#print 'Found r at 0x%x:' % midx",
"rFound",
"=",
"True",
"else",
":",
"#print 'Found nonarg at 0x%x: \"%s\"' % (midx, name)",
"pass",
"midx",
"=",
"idc",
".",
"GetStrucNextOff",
"(",
"sid",
",",
"midx",
")",
"return",
"argCount"
] |
https://github.com/mandiant/flare-ida/blob/b5b3993bdd224b7d6d7bd524045195550c156c44/examples/argtracker_example1.py#L51-L75
|
|
holzschu/Carnets
|
44effb10ddfc6aa5c8b0687582a724ba82c6b547
|
Library/lib/python3.7/site-packages/ipykernel/gui/gtkembed.py
|
python
|
GTKEmbed._wire_kernel
|
(self)
|
return False
|
Initializes the kernel inside GTK.
This is meant to run only once at startup, so it does its job and
returns False to ensure it doesn't get run again by GTK.
|
Initializes the kernel inside GTK.
|
[
"Initializes",
"the",
"kernel",
"inside",
"GTK",
"."
] |
def _wire_kernel(self):
"""Initializes the kernel inside GTK.
This is meant to run only once at startup, so it does its job and
returns False to ensure it doesn't get run again by GTK.
"""
self.gtk_main, self.gtk_main_quit = self._hijack_gtk()
gobject.timeout_add(int(1000*self.kernel._poll_interval),
self.iterate_kernel)
return False
|
[
"def",
"_wire_kernel",
"(",
"self",
")",
":",
"self",
".",
"gtk_main",
",",
"self",
".",
"gtk_main_quit",
"=",
"self",
".",
"_hijack_gtk",
"(",
")",
"gobject",
".",
"timeout_add",
"(",
"int",
"(",
"1000",
"*",
"self",
".",
"kernel",
".",
"_poll_interval",
")",
",",
"self",
".",
"iterate_kernel",
")",
"return",
"False"
] |
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/ipykernel/gui/gtkembed.py#L40-L49
|
|
w3h/isf
|
6faf0a3df185465ec17369c90ccc16e2a03a1870
|
lib/thirdparty/scapy/contrib/gsm_um.py
|
python
|
holdReject
|
()
|
return packet
|
HOLD REJECT Section 9.3.12
|
HOLD REJECT Section 9.3.12
|
[
"HOLD",
"REJECT",
"Section",
"9",
".",
"3",
".",
"12"
] |
def holdReject():
"""HOLD REJECT Section 9.3.12"""
a = TpPd(pd=0x3)
b = MessageType(mesType=0x1a) # 00011010
c = Cause()
packet = a / b / c
return packet
|
[
"def",
"holdReject",
"(",
")",
":",
"a",
"=",
"TpPd",
"(",
"pd",
"=",
"0x3",
")",
"b",
"=",
"MessageType",
"(",
"mesType",
"=",
"0x1a",
")",
"# 00011010",
"c",
"=",
"Cause",
"(",
")",
"packet",
"=",
"a",
"/",
"b",
"/",
"c",
"return",
"packet"
] |
https://github.com/w3h/isf/blob/6faf0a3df185465ec17369c90ccc16e2a03a1870/lib/thirdparty/scapy/contrib/gsm_um.py#L1811-L1817
|
|
keiffster/program-y
|
8c99b56f8c32f01a7b9887b5daae9465619d0385
|
src/programy/security/linking/accountlinker.py
|
python
|
BasicAccountLinkerService.__init__
|
(self, storage_engine)
|
[] |
def __init__(self, storage_engine):
self._storage_engine = storage_engine
|
[
"def",
"__init__",
"(",
"self",
",",
"storage_engine",
")",
":",
"self",
".",
"_storage_engine",
"=",
"storage_engine"
] |
https://github.com/keiffster/program-y/blob/8c99b56f8c32f01a7b9887b5daae9465619d0385/src/programy/security/linking/accountlinker.py#L68-L69
|
||||
JiYou/openstack
|
8607dd488bde0905044b303eb6e52bdea6806923
|
packages/source/cinder/cinder/volume/drivers/storwize_svc.py
|
python
|
StorwizeSVCDriver._get_chap_secret_for_host
|
(self, host_name)
|
return chap_secret
|
Return the CHAP secret for the given host.
|
Return the CHAP secret for the given host.
|
[
"Return",
"the",
"CHAP",
"secret",
"for",
"the",
"given",
"host",
"."
] |
def _get_chap_secret_for_host(self, host_name):
"""Return the CHAP secret for the given host."""
LOG.debug(_('enter: _get_chap_secret_for_host: host name %s')
% host_name)
ssh_cmd = 'lsiscsiauth -delim !'
out, err = self._run_ssh(ssh_cmd)
if not len(out.strip()):
return None
host_lines = out.strip().split('\n')
self._assert_ssh_return(len(host_lines), '_get_chap_secret_for_host',
ssh_cmd, out, err)
header = host_lines.pop(0).split('!')
self._assert_ssh_return('name' in header, '_get_chap_secret_for_host',
ssh_cmd, out, err)
self._assert_ssh_return('iscsi_auth_method' in header,
'_get_chap_secret_for_host', ssh_cmd, out, err)
self._assert_ssh_return('iscsi_chap_secret' in header,
'_get_chap_secret_for_host', ssh_cmd, out, err)
name_index = header.index('name')
method_index = header.index('iscsi_auth_method')
secret_index = header.index('iscsi_chap_secret')
chap_secret = None
host_found = False
for line in host_lines:
info = line.split('!')
if info[name_index] == host_name:
host_found = True
if info[method_index] == 'chap':
chap_secret = info[secret_index]
self._assert_ssh_return(host_found, '_get_chap_secret_for_host',
ssh_cmd, out, err)
LOG.debug(_('leave: _get_chap_secret_for_host: host name '
'%(host_name)s with secret %(chap_secret)s')
% {'host_name': host_name, 'chap_secret': chap_secret})
return chap_secret
|
[
"def",
"_get_chap_secret_for_host",
"(",
"self",
",",
"host_name",
")",
":",
"LOG",
".",
"debug",
"(",
"_",
"(",
"'enter: _get_chap_secret_for_host: host name %s'",
")",
"%",
"host_name",
")",
"ssh_cmd",
"=",
"'lsiscsiauth -delim !'",
"out",
",",
"err",
"=",
"self",
".",
"_run_ssh",
"(",
"ssh_cmd",
")",
"if",
"not",
"len",
"(",
"out",
".",
"strip",
"(",
")",
")",
":",
"return",
"None",
"host_lines",
"=",
"out",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
"self",
".",
"_assert_ssh_return",
"(",
"len",
"(",
"host_lines",
")",
",",
"'_get_chap_secret_for_host'",
",",
"ssh_cmd",
",",
"out",
",",
"err",
")",
"header",
"=",
"host_lines",
".",
"pop",
"(",
"0",
")",
".",
"split",
"(",
"'!'",
")",
"self",
".",
"_assert_ssh_return",
"(",
"'name'",
"in",
"header",
",",
"'_get_chap_secret_for_host'",
",",
"ssh_cmd",
",",
"out",
",",
"err",
")",
"self",
".",
"_assert_ssh_return",
"(",
"'iscsi_auth_method'",
"in",
"header",
",",
"'_get_chap_secret_for_host'",
",",
"ssh_cmd",
",",
"out",
",",
"err",
")",
"self",
".",
"_assert_ssh_return",
"(",
"'iscsi_chap_secret'",
"in",
"header",
",",
"'_get_chap_secret_for_host'",
",",
"ssh_cmd",
",",
"out",
",",
"err",
")",
"name_index",
"=",
"header",
".",
"index",
"(",
"'name'",
")",
"method_index",
"=",
"header",
".",
"index",
"(",
"'iscsi_auth_method'",
")",
"secret_index",
"=",
"header",
".",
"index",
"(",
"'iscsi_chap_secret'",
")",
"chap_secret",
"=",
"None",
"host_found",
"=",
"False",
"for",
"line",
"in",
"host_lines",
":",
"info",
"=",
"line",
".",
"split",
"(",
"'!'",
")",
"if",
"info",
"[",
"name_index",
"]",
"==",
"host_name",
":",
"host_found",
"=",
"True",
"if",
"info",
"[",
"method_index",
"]",
"==",
"'chap'",
":",
"chap_secret",
"=",
"info",
"[",
"secret_index",
"]",
"self",
".",
"_assert_ssh_return",
"(",
"host_found",
",",
"'_get_chap_secret_for_host'",
",",
"ssh_cmd",
",",
"out",
",",
"err",
")",
"LOG",
".",
"debug",
"(",
"_",
"(",
"'leave: _get_chap_secret_for_host: host name '",
"'%(host_name)s with secret %(chap_secret)s'",
")",
"%",
"{",
"'host_name'",
":",
"host_name",
",",
"'chap_secret'",
":",
"chap_secret",
"}",
")",
"return",
"chap_secret"
] |
https://github.com/JiYou/openstack/blob/8607dd488bde0905044b303eb6e52bdea6806923/packages/source/cinder/cinder/volume/drivers/storwize_svc.py#L352-L395
|
|
Asana/python-asana
|
9b54ab99423208bd6aa87dbfaa628c069430b127
|
asana/resources/gen/goals.py
|
python
|
_Goals.update_goal
|
(self, goal_gid, params=None, **options)
|
return self.client.put(path, params, **options)
|
Update a goal
:param str goal_gid: (required) Globally unique identifier for the goal.
:param Object params: Parameters for the request
:param **options
- opt_fields {list[str]}: Defines fields to return. Some requests return *compact* representations of objects in order to conserve resources and complete the request more efficiently. Other times requests return more information than you may need. This option allows you to list the exact set of fields that the API should be sure to return for the objects. The field names should be provided as paths, described below. The id of included objects will always be returned, regardless of the field options.
- opt_pretty {bool}: Provides “pretty” output. Provides the response in a “pretty” format. In the case of JSON this means doing proper line breaking and indentation to make it readable. This will take extra time and increase the response size so it is advisable only to use this during debugging.
:return: Object
|
Update a goal
:param str goal_gid: (required) Globally unique identifier for the goal.
:param Object params: Parameters for the request
:param **options
- opt_fields {list[str]}: Defines fields to return. Some requests return *compact* representations of objects in order to conserve resources and complete the request more efficiently. Other times requests return more information than you may need. This option allows you to list the exact set of fields that the API should be sure to return for the objects. The field names should be provided as paths, described below. The id of included objects will always be returned, regardless of the field options.
- opt_pretty {bool}: Provides “pretty” output. Provides the response in a “pretty” format. In the case of JSON this means doing proper line breaking and indentation to make it readable. This will take extra time and increase the response size so it is advisable only to use this during debugging.
:return: Object
|
[
"Update",
"a",
"goal",
":",
"param",
"str",
"goal_gid",
":",
"(",
"required",
")",
"Globally",
"unique",
"identifier",
"for",
"the",
"goal",
".",
":",
"param",
"Object",
"params",
":",
"Parameters",
"for",
"the",
"request",
":",
"param",
"**",
"options",
"-",
"opt_fields",
"{",
"list",
"[",
"str",
"]",
"}",
":",
"Defines",
"fields",
"to",
"return",
".",
"Some",
"requests",
"return",
"*",
"compact",
"*",
"representations",
"of",
"objects",
"in",
"order",
"to",
"conserve",
"resources",
"and",
"complete",
"the",
"request",
"more",
"efficiently",
".",
"Other",
"times",
"requests",
"return",
"more",
"information",
"than",
"you",
"may",
"need",
".",
"This",
"option",
"allows",
"you",
"to",
"list",
"the",
"exact",
"set",
"of",
"fields",
"that",
"the",
"API",
"should",
"be",
"sure",
"to",
"return",
"for",
"the",
"objects",
".",
"The",
"field",
"names",
"should",
"be",
"provided",
"as",
"paths",
"described",
"below",
".",
"The",
"id",
"of",
"included",
"objects",
"will",
"always",
"be",
"returned",
"regardless",
"of",
"the",
"field",
"options",
".",
"-",
"opt_pretty",
"{",
"bool",
"}",
":",
"Provides",
"“pretty”",
"output",
".",
"Provides",
"the",
"response",
"in",
"a",
"“pretty”",
"format",
".",
"In",
"the",
"case",
"of",
"JSON",
"this",
"means",
"doing",
"proper",
"line",
"breaking",
"and",
"indentation",
"to",
"make",
"it",
"readable",
".",
"This",
"will",
"take",
"extra",
"time",
"and",
"increase",
"the",
"response",
"size",
"so",
"it",
"is",
"advisable",
"only",
"to",
"use",
"this",
"during",
"debugging",
".",
":",
"return",
":",
"Object"
] |
def update_goal(self, goal_gid, params=None, **options):
"""Update a goal
:param str goal_gid: (required) Globally unique identifier for the goal.
:param Object params: Parameters for the request
:param **options
- opt_fields {list[str]}: Defines fields to return. Some requests return *compact* representations of objects in order to conserve resources and complete the request more efficiently. Other times requests return more information than you may need. This option allows you to list the exact set of fields that the API should be sure to return for the objects. The field names should be provided as paths, described below. The id of included objects will always be returned, regardless of the field options.
- opt_pretty {bool}: Provides “pretty” output. Provides the response in a “pretty” format. In the case of JSON this means doing proper line breaking and indentation to make it readable. This will take extra time and increase the response size so it is advisable only to use this during debugging.
:return: Object
"""
if params is None:
params = {}
path = "/goals/{goal_gid}".replace("{goal_gid}", goal_gid)
return self.client.put(path, params, **options)
|
[
"def",
"update_goal",
"(",
"self",
",",
"goal_gid",
",",
"params",
"=",
"None",
",",
"*",
"*",
"options",
")",
":",
"if",
"params",
"is",
"None",
":",
"params",
"=",
"{",
"}",
"path",
"=",
"\"/goals/{goal_gid}\"",
".",
"replace",
"(",
"\"{goal_gid}\"",
",",
"goal_gid",
")",
"return",
"self",
".",
"client",
".",
"put",
"(",
"path",
",",
"params",
",",
"*",
"*",
"options",
")"
] |
https://github.com/Asana/python-asana/blob/9b54ab99423208bd6aa87dbfaa628c069430b127/asana/resources/gen/goals.py#L201-L213
|
|
Emptyset110/dHydra
|
8ec44994ff4dda8bf1ec40e38dd068b757945933
|
dHydra/Vendor/SinaL2/Sina/Vendor.py
|
python
|
Vendor.__init__
|
(self, logLevel=logging.INFO)
|
[] |
def __init__(self, logLevel=logging.INFO):
self.logger = self.get_logger()
|
[
"def",
"__init__",
"(",
"self",
",",
"logLevel",
"=",
"logging",
".",
"INFO",
")",
":",
"self",
".",
"logger",
"=",
"self",
".",
"get_logger",
"(",
")"
] |
https://github.com/Emptyset110/dHydra/blob/8ec44994ff4dda8bf1ec40e38dd068b757945933/dHydra/Vendor/SinaL2/Sina/Vendor.py#L7-L8
|
||||
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
WebMirror/management/rss_parser_funcs/feed_parse_extractAswTenanBlogspotCom.py
|
python
|
extractAswTenanBlogspotCom
|
(item)
|
return False
|
Parser for 'asw-tenan.blogspot.com'
|
Parser for 'asw-tenan.blogspot.com'
|
[
"Parser",
"for",
"asw",
"-",
"tenan",
".",
"blogspot",
".",
"com"
] |
def extractAswTenanBlogspotCom(item):
'''
Parser for 'asw-tenan.blogspot.com'
'''
if 'English' not in item['tags']:
return None
if 'Bahasa Indonesia' in item['tags']:
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('mushoku no eiyuu', 'Mushoku no Eiyuu ~Betsu ni Skill Nanka Iranakattan daga~', 'translated'),
('s-rank girls', 'S Rank Boukensha de aru Ore no Musume-tachi wa Juudo no Father Con deshita', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
[
"def",
"extractAswTenanBlogspotCom",
"(",
"item",
")",
":",
"if",
"'English'",
"not",
"in",
"item",
"[",
"'tags'",
"]",
":",
"return",
"None",
"if",
"'Bahasa Indonesia'",
"in",
"item",
"[",
"'tags'",
"]",
":",
"return",
"None",
"vol",
",",
"chp",
",",
"frag",
",",
"postfix",
"=",
"extractVolChapterFragmentPostfix",
"(",
"item",
"[",
"'title'",
"]",
")",
"if",
"not",
"(",
"chp",
"or",
"vol",
")",
"or",
"\"preview\"",
"in",
"item",
"[",
"'title'",
"]",
".",
"lower",
"(",
")",
":",
"return",
"None",
"tagmap",
"=",
"[",
"(",
"'mushoku no eiyuu'",
",",
"'Mushoku no Eiyuu ~Betsu ni Skill Nanka Iranakattan daga~'",
",",
"'translated'",
")",
",",
"(",
"'s-rank girls'",
",",
"'S Rank Boukensha de aru Ore no Musume-tachi wa Juudo no Father Con deshita'",
",",
"'translated'",
")",
",",
"(",
"'PRC'",
",",
"'PRC'",
",",
"'translated'",
")",
",",
"(",
"'Loiterous'",
",",
"'Loiterous'",
",",
"'oel'",
")",
",",
"]",
"for",
"tagname",
",",
"name",
",",
"tl_type",
"in",
"tagmap",
":",
"if",
"tagname",
"in",
"item",
"[",
"'tags'",
"]",
":",
"return",
"buildReleaseMessageWithType",
"(",
"item",
",",
"name",
",",
"vol",
",",
"chp",
",",
"frag",
"=",
"frag",
",",
"postfix",
"=",
"postfix",
",",
"tl_type",
"=",
"tl_type",
")",
"return",
"False"
] |
https://github.com/fake-name/ReadableWebProxy/blob/ed5c7abe38706acc2684a1e6cd80242a03c5f010/WebMirror/management/rss_parser_funcs/feed_parse_extractAswTenanBlogspotCom.py#L1-L26
|
|
Ha0Tang/SelectionGAN
|
80aa7ad9f79f643c28633c40c621f208f3fb0121
|
selectiongan_v2/data/image_folder.py
|
python
|
ImageFolder.__len__
|
(self)
|
return len(self.imgs)
|
[] |
def __len__(self):
return len(self.imgs)
|
[
"def",
"__len__",
"(",
"self",
")",
":",
"return",
"len",
"(",
"self",
".",
"imgs",
")"
] |
https://github.com/Ha0Tang/SelectionGAN/blob/80aa7ad9f79f643c28633c40c621f208f3fb0121/selectiongan_v2/data/image_folder.py#L67-L68
|
|||
glitchdotcom/WebPutty
|
4f5da5eb2b4668cbf3c15cf002feacd1d95d2ef7
|
ziplibs/BeautifulSoup.py
|
python
|
Tag._getAttrMap
|
(self)
|
return self.attrMap
|
Initializes a map representation of this tag's attributes,
if not already initialized.
|
Initializes a map representation of this tag's attributes,
if not already initialized.
|
[
"Initializes",
"a",
"map",
"representation",
"of",
"this",
"tag",
"s",
"attributes",
"if",
"not",
"already",
"initialized",
"."
] |
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
|
[
"def",
"_getAttrMap",
"(",
"self",
")",
":",
"if",
"not",
"getattr",
"(",
"self",
",",
"'attrMap'",
")",
":",
"self",
".",
"attrMap",
"=",
"{",
"}",
"for",
"(",
"key",
",",
"value",
")",
"in",
"self",
".",
"attrs",
":",
"self",
".",
"attrMap",
"[",
"key",
"]",
"=",
"value",
"return",
"self",
".",
"attrMap"
] |
https://github.com/glitchdotcom/WebPutty/blob/4f5da5eb2b4668cbf3c15cf002feacd1d95d2ef7/ziplibs/BeautifulSoup.py#L861-L868
|
|
biolab/orange3
|
41685e1c7b1d1babe680113685a2d44bcc9fec0b
|
Orange/widgets/data/utils/pythoneditor/editor.py
|
python
|
LineNumberArea.setVisible
|
(self, val)
|
Override the QWidget::setVisible(bool) method to properly
recalculate the editor viewport.
|
Override the QWidget::setVisible(bool) method to properly
recalculate the editor viewport.
|
[
"Override",
"the",
"QWidget",
"::",
"setVisible",
"(",
"bool",
")",
"method",
"to",
"properly",
"recalculate",
"the",
"editor",
"viewport",
"."
] |
def setVisible(self, val):
"""Override the QWidget::setVisible(bool) method to properly
recalculate the editor viewport.
"""
if val != self.isVisible():
if val:
super().setVisible(True)
else:
super().setVisible(False)
self._editor.updateViewport()
|
[
"def",
"setVisible",
"(",
"self",
",",
"val",
")",
":",
"if",
"val",
"!=",
"self",
".",
"isVisible",
"(",
")",
":",
"if",
"val",
":",
"super",
"(",
")",
".",
"setVisible",
"(",
"True",
")",
"else",
":",
"super",
"(",
")",
".",
"setVisible",
"(",
"False",
")",
"self",
".",
"_editor",
".",
"updateViewport",
"(",
")"
] |
https://github.com/biolab/orange3/blob/41685e1c7b1d1babe680113685a2d44bcc9fec0b/Orange/widgets/data/utils/pythoneditor/editor.py#L1806-L1815
|
||
craftGBD/craftGBD
|
96d70519fbd02a86c3b5e0ddbedb2d67b5b6a99a
|
evaluation/lib/tmp/rpn/generate_anchors.py
|
python
|
_whctrs
|
(anchor)
|
return w, h, x_ctr, y_ctr
|
Return width, height, x center, and y center for an anchor (window).
|
Return width, height, x center, and y center for an anchor (window).
|
[
"Return",
"width",
"height",
"x",
"center",
"and",
"y",
"center",
"for",
"an",
"anchor",
"(",
"window",
")",
"."
] |
def _whctrs(anchor):
"""
Return width, height, x center, and y center for an anchor (window).
"""
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
|
[
"def",
"_whctrs",
"(",
"anchor",
")",
":",
"w",
"=",
"anchor",
"[",
"2",
"]",
"-",
"anchor",
"[",
"0",
"]",
"+",
"1",
"h",
"=",
"anchor",
"[",
"3",
"]",
"-",
"anchor",
"[",
"1",
"]",
"+",
"1",
"x_ctr",
"=",
"anchor",
"[",
"0",
"]",
"+",
"0.5",
"*",
"(",
"w",
"-",
"1",
")",
"y_ctr",
"=",
"anchor",
"[",
"1",
"]",
"+",
"0.5",
"*",
"(",
"h",
"-",
"1",
")",
"return",
"w",
",",
"h",
",",
"x_ctr",
",",
"y_ctr"
] |
https://github.com/craftGBD/craftGBD/blob/96d70519fbd02a86c3b5e0ddbedb2d67b5b6a99a/evaluation/lib/tmp/rpn/generate_anchors.py#L50-L59
|
|
HewlettPackard/dlcookbook-dlbs
|
863ac1d7e72ad2fcafc78d8a13f67d35bc00c235
|
python/tf_cnn_benchmarks/allreduce.py
|
python
|
contains_any
|
(haystack, needles)
|
return False
|
Tests if any needle is a substring of haystack.
Args:
haystack: a string
needles: list of strings
Returns:
True if any element of needles is a substring of haystack,
False otherwise.
|
Tests if any needle is a substring of haystack.
|
[
"Tests",
"if",
"any",
"needle",
"is",
"a",
"substring",
"of",
"haystack",
"."
] |
def contains_any(haystack, needles):
"""Tests if any needle is a substring of haystack.
Args:
haystack: a string
needles: list of strings
Returns:
True if any element of needles is a substring of haystack,
False otherwise.
"""
for n in needles:
if n in haystack:
return True
return False
|
[
"def",
"contains_any",
"(",
"haystack",
",",
"needles",
")",
":",
"for",
"n",
"in",
"needles",
":",
"if",
"n",
"in",
"haystack",
":",
"return",
"True",
"return",
"False"
] |
https://github.com/HewlettPackard/dlcookbook-dlbs/blob/863ac1d7e72ad2fcafc78d8a13f67d35bc00c235/python/tf_cnn_benchmarks/allreduce.py#L264-L278
|
|
librosa/librosa
|
76029d35ce4c76a7475f07aab67fe2df3f73c25c
|
librosa/util/_nnls.py
|
python
|
_nnls_obj
|
(x, shape, A, B)
|
return value, grad.flatten()
|
Compute the objective and gradient for NNLS
|
Compute the objective and gradient for NNLS
|
[
"Compute",
"the",
"objective",
"and",
"gradient",
"for",
"NNLS"
] |
def _nnls_obj(x, shape, A, B):
"""Compute the objective and gradient for NNLS"""
# Scipy's lbfgs flattens all arrays, so we first reshape
# the iterate x
x = x.reshape(shape)
# Compute the difference matrix
diff = np.einsum("mf,...ft->...mt", A, x, optimize=True) - B
# Compute the objective value
value = (1 / B.size) * 0.5 * np.sum(diff ** 2)
# And the gradient
grad = (1 / B.size) * np.einsum("mf,...mt->...ft", A, diff, optimize=True)
# Flatten the gradient
return value, grad.flatten()
|
[
"def",
"_nnls_obj",
"(",
"x",
",",
"shape",
",",
"A",
",",
"B",
")",
":",
"# Scipy's lbfgs flattens all arrays, so we first reshape",
"# the iterate x",
"x",
"=",
"x",
".",
"reshape",
"(",
"shape",
")",
"# Compute the difference matrix",
"diff",
"=",
"np",
".",
"einsum",
"(",
"\"mf,...ft->...mt\"",
",",
"A",
",",
"x",
",",
"optimize",
"=",
"True",
")",
"-",
"B",
"# Compute the objective value",
"value",
"=",
"(",
"1",
"/",
"B",
".",
"size",
")",
"*",
"0.5",
"*",
"np",
".",
"sum",
"(",
"diff",
"**",
"2",
")",
"# And the gradient",
"grad",
"=",
"(",
"1",
"/",
"B",
".",
"size",
")",
"*",
"np",
".",
"einsum",
"(",
"\"mf,...mt->...ft\"",
",",
"A",
",",
"diff",
",",
"optimize",
"=",
"True",
")",
"# Flatten the gradient",
"return",
"value",
",",
"grad",
".",
"flatten",
"(",
")"
] |
https://github.com/librosa/librosa/blob/76029d35ce4c76a7475f07aab67fe2df3f73c25c/librosa/util/_nnls.py#L20-L37
|
|
rubik/xenon
|
2991cb8a4b0a2a9242ccc3ebc4ee316293e58bc2
|
xenon/core.py
|
python
|
find_infractions
|
(args, logger, results)
|
return infractions
|
Analyze the results and find if the thresholds are surpassed.
*args* and *logger* are the same as in :func:`~xenon.core.analyze`, while
*results* is a dictionary holding the results of the complexity analysis.
The number of infractions with respect to the threshold values is returned.
|
Analyze the results and find if the thresholds are surpassed.
|
[
"Analyze",
"the",
"results",
"and",
"find",
"if",
"the",
"thresholds",
"are",
"surpassed",
"."
] |
def find_infractions(args, logger, results):
'''Analyze the results and find if the thresholds are surpassed.
*args* and *logger* are the same as in :func:`~xenon.core.analyze`, while
*results* is a dictionary holding the results of the complexity analysis.
The number of infractions with respect to the threshold values is returned.
'''
infractions = 0
module_averages = []
total_cc = 0.
total_blocks = 0
for module, blocks in results.items():
module_cc = 0.
if isinstance(blocks, dict) and blocks.get('error'):
logger.warning('cannot parse %s: %s', module, blocks['error'])
continue
for block in blocks:
module_cc += block['complexity']
r = cc_rank(block['complexity'])
if check(r, args.absolute):
logger.error('block "%s:%s %s" has a rank of %s', module,
block['lineno'], block['name'], r)
infractions += 1
module_averages.append((module, av(module_cc, len(blocks))))
total_cc += module_cc
total_blocks += len(blocks)
av_cc = av(total_cc, total_blocks)
ar = cc_rank(av_cc)
if args.averagenum is not None and av_cc > args.averagenum:
logger.error('total average complexity is %s', av_cc)
infractions += 1
if check(ar, args.average):
logger.error('average complexity is ranked %s', ar)
infractions += 1
for module, ma in module_averages:
mar = cc_rank(ma)
if check(mar, args.modules):
logger.error('module %r has a rank of %s', module, mar)
infractions += 1
return infractions
|
[
"def",
"find_infractions",
"(",
"args",
",",
"logger",
",",
"results",
")",
":",
"infractions",
"=",
"0",
"module_averages",
"=",
"[",
"]",
"total_cc",
"=",
"0.",
"total_blocks",
"=",
"0",
"for",
"module",
",",
"blocks",
"in",
"results",
".",
"items",
"(",
")",
":",
"module_cc",
"=",
"0.",
"if",
"isinstance",
"(",
"blocks",
",",
"dict",
")",
"and",
"blocks",
".",
"get",
"(",
"'error'",
")",
":",
"logger",
".",
"warning",
"(",
"'cannot parse %s: %s'",
",",
"module",
",",
"blocks",
"[",
"'error'",
"]",
")",
"continue",
"for",
"block",
"in",
"blocks",
":",
"module_cc",
"+=",
"block",
"[",
"'complexity'",
"]",
"r",
"=",
"cc_rank",
"(",
"block",
"[",
"'complexity'",
"]",
")",
"if",
"check",
"(",
"r",
",",
"args",
".",
"absolute",
")",
":",
"logger",
".",
"error",
"(",
"'block \"%s:%s %s\" has a rank of %s'",
",",
"module",
",",
"block",
"[",
"'lineno'",
"]",
",",
"block",
"[",
"'name'",
"]",
",",
"r",
")",
"infractions",
"+=",
"1",
"module_averages",
".",
"append",
"(",
"(",
"module",
",",
"av",
"(",
"module_cc",
",",
"len",
"(",
"blocks",
")",
")",
")",
")",
"total_cc",
"+=",
"module_cc",
"total_blocks",
"+=",
"len",
"(",
"blocks",
")",
"av_cc",
"=",
"av",
"(",
"total_cc",
",",
"total_blocks",
")",
"ar",
"=",
"cc_rank",
"(",
"av_cc",
")",
"if",
"args",
".",
"averagenum",
"is",
"not",
"None",
"and",
"av_cc",
">",
"args",
".",
"averagenum",
":",
"logger",
".",
"error",
"(",
"'total average complexity is %s'",
",",
"av_cc",
")",
"infractions",
"+=",
"1",
"if",
"check",
"(",
"ar",
",",
"args",
".",
"average",
")",
":",
"logger",
".",
"error",
"(",
"'average complexity is ranked %s'",
",",
"ar",
")",
"infractions",
"+=",
"1",
"for",
"module",
",",
"ma",
"in",
"module_averages",
":",
"mar",
"=",
"cc_rank",
"(",
"ma",
")",
"if",
"check",
"(",
"mar",
",",
"args",
".",
"modules",
")",
":",
"logger",
".",
"error",
"(",
"'module %r has a rank of %s'",
",",
"module",
",",
"mar",
")",
"infractions",
"+=",
"1",
"return",
"infractions"
] |
https://github.com/rubik/xenon/blob/2991cb8a4b0a2a9242ccc3ebc4ee316293e58bc2/xenon/core.py#L47-L90
|
|
Trusted-AI/AIX360
|
36459f2a585d0e2a2e8582562bf226d4402b57d6
|
aix360/algorithms/lime/lime_wrapper.py
|
python
|
LimeTextExplainer.set_params
|
(self, *argv, **kwargs)
|
Optionally, set parameters for the explainer.
|
Optionally, set parameters for the explainer.
|
[
"Optionally",
"set",
"parameters",
"for",
"the",
"explainer",
"."
] |
def set_params(self, *argv, **kwargs):
"""
Optionally, set parameters for the explainer.
"""
pass
|
[
"def",
"set_params",
"(",
"self",
",",
"*",
"argv",
",",
"*",
"*",
"kwargs",
")",
":",
"pass"
] |
https://github.com/Trusted-AI/AIX360/blob/36459f2a585d0e2a2e8582562bf226d4402b57d6/aix360/algorithms/lime/lime_wrapper.py#L23-L27
|
||
mrlesmithjr/Ansible
|
d44f0dc0d942bdf3bf7334b307e6048f0ee16e36
|
roles/ansible-vsphere-management/scripts/pdns/lib/python2.7/site-packages/urllib3/packages/six.py
|
python
|
add_metaclass
|
(metaclass)
|
return wrapper
|
Class decorator for creating a class with a metaclass.
|
Class decorator for creating a class with a metaclass.
|
[
"Class",
"decorator",
"for",
"creating",
"a",
"class",
"with",
"a",
"metaclass",
"."
] |
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
|
[
"def",
"add_metaclass",
"(",
"metaclass",
")",
":",
"def",
"wrapper",
"(",
"cls",
")",
":",
"orig_vars",
"=",
"cls",
".",
"__dict__",
".",
"copy",
"(",
")",
"slots",
"=",
"orig_vars",
".",
"get",
"(",
"'__slots__'",
")",
"if",
"slots",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"slots",
",",
"str",
")",
":",
"slots",
"=",
"[",
"slots",
"]",
"for",
"slots_var",
"in",
"slots",
":",
"orig_vars",
".",
"pop",
"(",
"slots_var",
")",
"orig_vars",
".",
"pop",
"(",
"'__dict__'",
",",
"None",
")",
"orig_vars",
".",
"pop",
"(",
"'__weakref__'",
",",
"None",
")",
"return",
"metaclass",
"(",
"cls",
".",
"__name__",
",",
"cls",
".",
"__bases__",
",",
"orig_vars",
")",
"return",
"wrapper"
] |
https://github.com/mrlesmithjr/Ansible/blob/d44f0dc0d942bdf3bf7334b307e6048f0ee16e36/roles/ansible-vsphere-management/scripts/pdns/lib/python2.7/site-packages/urllib3/packages/six.py#L812-L825
|
|
holzschu/Carnets
|
44effb10ddfc6aa5c8b0687582a724ba82c6b547
|
Library/lib/python3.7/turtle.py
|
python
|
TPen.pencolor
|
(self, *args)
|
Return or set the pencolor.
Arguments:
Four input formats are allowed:
- pencolor()
Return the current pencolor as color specification string,
possibly in hex-number format (see example).
May be used as input to another color/pencolor/fillcolor call.
- pencolor(colorstring)
s is a Tk color specification string, such as "red" or "yellow"
- pencolor((r, g, b))
*a tuple* of r, g, and b, which represent, an RGB color,
and each of r, g, and b are in the range 0..colormode,
where colormode is either 1.0 or 255
- pencolor(r, g, b)
r, g, and b represent an RGB color, and each of r, g, and b
are in the range 0..colormode
If turtleshape is a polygon, the outline of that polygon is drawn
with the newly set pencolor.
Example (for a Turtle instance named turtle):
>>> turtle.pencolor('brown')
>>> tup = (0.2, 0.8, 0.55)
>>> turtle.pencolor(tup)
>>> turtle.pencolor()
'#33cc8c'
|
Return or set the pencolor.
|
[
"Return",
"or",
"set",
"the",
"pencolor",
"."
] |
def pencolor(self, *args):
""" Return or set the pencolor.
Arguments:
Four input formats are allowed:
- pencolor()
Return the current pencolor as color specification string,
possibly in hex-number format (see example).
May be used as input to another color/pencolor/fillcolor call.
- pencolor(colorstring)
s is a Tk color specification string, such as "red" or "yellow"
- pencolor((r, g, b))
*a tuple* of r, g, and b, which represent, an RGB color,
and each of r, g, and b are in the range 0..colormode,
where colormode is either 1.0 or 255
- pencolor(r, g, b)
r, g, and b represent an RGB color, and each of r, g, and b
are in the range 0..colormode
If turtleshape is a polygon, the outline of that polygon is drawn
with the newly set pencolor.
Example (for a Turtle instance named turtle):
>>> turtle.pencolor('brown')
>>> tup = (0.2, 0.8, 0.55)
>>> turtle.pencolor(tup)
>>> turtle.pencolor()
'#33cc8c'
"""
if args:
color = self._colorstr(args)
if color == self._pencolor:
return
self.pen(pencolor=color)
else:
return self._color(self._pencolor)
|
[
"def",
"pencolor",
"(",
"self",
",",
"*",
"args",
")",
":",
"if",
"args",
":",
"color",
"=",
"self",
".",
"_colorstr",
"(",
"args",
")",
"if",
"color",
"==",
"self",
".",
"_pencolor",
":",
"return",
"self",
".",
"pen",
"(",
"pencolor",
"=",
"color",
")",
"else",
":",
"return",
"self",
".",
"_color",
"(",
"self",
".",
"_pencolor",
")"
] |
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/turtle.py#L2222-L2257
|
||
zetaops/ulakbus
|
bcc05abf17bbd6dbeec93809e4ad30885e94e83e
|
ulakbus/views/ders_programi/derslik_sinav_programlari.py
|
python
|
DerslikSinavProgramlari.bilgi_ver
|
(self)
|
Yayınlananmış sınavlar yok ise bilgi mesajı ekrana basılır.
|
Yayınlananmış sınavlar yok ise bilgi mesajı ekrana basılır.
|
[
"Yayınlananmış",
"sınavlar",
"yok",
"ise",
"bilgi",
"mesajı",
"ekrana",
"basılır",
"."
] |
def bilgi_ver(self):
"""
Yayınlananmış sınavlar yok ise bilgi mesajı ekrana basılır.
"""
self.current.output['msgbox'] = {
'type': 'info', "title": _(u'Yayınlanmamış Sınavlar'),
'msg': _(u"Yayınlanmış sınavlar bulunmamaktadır.")
}
|
[
"def",
"bilgi_ver",
"(",
"self",
")",
":",
"self",
".",
"current",
".",
"output",
"[",
"'msgbox'",
"]",
"=",
"{",
"'type'",
":",
"'info'",
",",
"\"title\"",
":",
"_",
"(",
"u'Yayınlanmamış Sınavlar'),",
"",
"",
"'msg'",
":",
"_",
"(",
"u\"Yayınlanmış sınavlar bulunmamaktadır.\")",
"",
"}"
] |
https://github.com/zetaops/ulakbus/blob/bcc05abf17bbd6dbeec93809e4ad30885e94e83e/ulakbus/views/ders_programi/derslik_sinav_programlari.py#L58-L66
|
||
windelbouwman/ppci
|
915c069e0667042c085ec42c78e9e3c9a5295324
|
ppci/arch/arch_info.py
|
python
|
ArchInfo.get_register
|
(self, name)
|
return self._registers_by_name[name]
|
Retrieve the machine register by name.
|
Retrieve the machine register by name.
|
[
"Retrieve",
"the",
"machine",
"register",
"by",
"name",
"."
] |
def get_register(self, name):
""" Retrieve the machine register by name. """
return self._registers_by_name[name]
|
[
"def",
"get_register",
"(",
"self",
",",
"name",
")",
":",
"return",
"self",
".",
"_registers_by_name",
"[",
"name",
"]"
] |
https://github.com/windelbouwman/ppci/blob/915c069e0667042c085ec42c78e9e3c9a5295324/ppci/arch/arch_info.py#L60-L62
|
|
open-io/oio-sds
|
16041950b6056a55d5ce7ca77795defe6dfa6c61
|
oio/common/green.py
|
python
|
eventlet_yield
|
()
|
Swith to another eventlet coroutine.
|
Swith to another eventlet coroutine.
|
[
"Swith",
"to",
"another",
"eventlet",
"coroutine",
"."
] |
def eventlet_yield():
"""Swith to another eventlet coroutine."""
sleep(0)
|
[
"def",
"eventlet_yield",
"(",
")",
":",
"sleep",
"(",
"0",
")"
] |
https://github.com/open-io/oio-sds/blob/16041950b6056a55d5ce7ca77795defe6dfa6c61/oio/common/green.py#L73-L75
|
||
cloudera/hue
|
23f02102d4547c17c32bd5ea0eb24e9eadd657a4
|
desktop/core/ext-py/nose-1.3.7/nose/plugins/logcapture.py
|
python
|
LogCapture.afterTest
|
(self, test)
|
Clear buffers after test.
|
Clear buffers after test.
|
[
"Clear",
"buffers",
"after",
"test",
"."
] |
def afterTest(self, test):
"""Clear buffers after test.
"""
self.handler.truncate()
|
[
"def",
"afterTest",
"(",
"self",
",",
"test",
")",
":",
"self",
".",
"handler",
".",
"truncate",
"(",
")"
] |
https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/nose-1.3.7/nose/plugins/logcapture.py#L219-L222
|
||
PaddlePaddle/ERNIE
|
15eddb022ce1beb281777e9ab8807a1bdfa7a76e
|
propeller/paddle/train/monitored_executor.py
|
python
|
RunState.__repr__
|
(self)
|
return repr(self.state_dict())
|
doc
|
doc
|
[
"doc"
] |
def __repr__(self):
"""doc"""
return repr(self.state_dict())
|
[
"def",
"__repr__",
"(",
"self",
")",
":",
"return",
"repr",
"(",
"self",
".",
"state_dict",
"(",
")",
")"
] |
https://github.com/PaddlePaddle/ERNIE/blob/15eddb022ce1beb281777e9ab8807a1bdfa7a76e/propeller/paddle/train/monitored_executor.py#L99-L101
|
|
jantman/awslimitchecker
|
411ad9e734ddb16d87720ff5b994f19f47b8b098
|
awslimitchecker/services/apigateway.py
|
python
|
_ApigatewayService._find_usage_vpc_links
|
(self)
|
Find usage on VPC Links. Update `self.limits`.
|
Find usage on VPC Links. Update `self.limits`.
|
[
"Find",
"usage",
"on",
"VPC",
"Links",
".",
"Update",
"self",
".",
"limits",
"."
] |
def _find_usage_vpc_links(self):
"""
Find usage on VPC Links. Update `self.limits`.
"""
logger.debug('Finding usage for VPC Links')
link_count = 0
paginator = self.conn.get_paginator('get_vpc_links')
for resp in paginator.paginate():
link_count += len(resp['items'])
self.limits['VPC Links per account']._add_current_usage(
link_count, aws_type='AWS::ApiGateway::VpcLink'
)
|
[
"def",
"_find_usage_vpc_links",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"'Finding usage for VPC Links'",
")",
"link_count",
"=",
"0",
"paginator",
"=",
"self",
".",
"conn",
".",
"get_paginator",
"(",
"'get_vpc_links'",
")",
"for",
"resp",
"in",
"paginator",
".",
"paginate",
"(",
")",
":",
"link_count",
"+=",
"len",
"(",
"resp",
"[",
"'items'",
"]",
")",
"self",
".",
"limits",
"[",
"'VPC Links per account'",
"]",
".",
"_add_current_usage",
"(",
"link_count",
",",
"aws_type",
"=",
"'AWS::ApiGateway::VpcLink'",
")"
] |
https://github.com/jantman/awslimitchecker/blob/411ad9e734ddb16d87720ff5b994f19f47b8b098/awslimitchecker/services/apigateway.py#L180-L191
|
||
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
WebMirror/management/rss_parser_funcs/feed_parse_extractRosynightWordpressCom.py
|
python
|
extractRosynightWordpressCom
|
(item)
|
return False
|
Parser for 'rosynight.wordpress.com'
|
Parser for 'rosynight.wordpress.com'
|
[
"Parser",
"for",
"rosynight",
".",
"wordpress",
".",
"com"
] |
def extractRosynightWordpressCom(item):
'''
Parser for 'rosynight.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('CC', 'Cāi Cāi', 'translated'),
('tswwt', 'Those Steps We Walked Together', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
[
"def",
"extractRosynightWordpressCom",
"(",
"item",
")",
":",
"vol",
",",
"chp",
",",
"frag",
",",
"postfix",
"=",
"extractVolChapterFragmentPostfix",
"(",
"item",
"[",
"'title'",
"]",
")",
"if",
"not",
"(",
"chp",
"or",
"vol",
")",
"or",
"\"preview\"",
"in",
"item",
"[",
"'title'",
"]",
".",
"lower",
"(",
")",
":",
"return",
"None",
"tagmap",
"=",
"[",
"(",
"'CC'",
",",
"'Cāi Cāi', ",
" ",
"ranslated'),",
"",
"",
"(",
"'tswwt'",
",",
"'Those Steps We Walked Together'",
",",
"'translated'",
")",
",",
"(",
"'PRC'",
",",
"'PRC'",
",",
"'translated'",
")",
",",
"(",
"'Loiterous'",
",",
"'Loiterous'",
",",
"'oel'",
")",
",",
"]",
"for",
"tagname",
",",
"name",
",",
"tl_type",
"in",
"tagmap",
":",
"if",
"tagname",
"in",
"item",
"[",
"'tags'",
"]",
":",
"return",
"buildReleaseMessageWithType",
"(",
"item",
",",
"name",
",",
"vol",
",",
"chp",
",",
"frag",
"=",
"frag",
",",
"postfix",
"=",
"postfix",
",",
"tl_type",
"=",
"tl_type",
")",
"return",
"False"
] |
https://github.com/fake-name/ReadableWebProxy/blob/ed5c7abe38706acc2684a1e6cd80242a03c5f010/WebMirror/management/rss_parser_funcs/feed_parse_extractRosynightWordpressCom.py#L1-L22
|
|
onaio/onadata
|
89ad16744e8f247fb748219476f6ac295869a95f
|
onadata/apps/logger/models/submission_review.py
|
python
|
SubmissionReview.set_deleted
|
(self, deleted_at=timezone.now(), user=None)
|
Sets the deleted_at and deleted_by fields
|
Sets the deleted_at and deleted_by fields
|
[
"Sets",
"the",
"deleted_at",
"and",
"deleted_by",
"fields"
] |
def set_deleted(self, deleted_at=timezone.now(), user=None):
"""
Sets the deleted_at and deleted_by fields
"""
if user:
self.deleted_by = user
self.deleted_at = deleted_at
self.save()
|
[
"def",
"set_deleted",
"(",
"self",
",",
"deleted_at",
"=",
"timezone",
".",
"now",
"(",
")",
",",
"user",
"=",
"None",
")",
":",
"if",
"user",
":",
"self",
".",
"deleted_by",
"=",
"user",
"self",
".",
"deleted_at",
"=",
"deleted_at",
"self",
".",
"save",
"(",
")"
] |
https://github.com/onaio/onadata/blob/89ad16744e8f247fb748219476f6ac295869a95f/onadata/apps/logger/models/submission_review.py#L80-L87
|
||
tp4a/teleport
|
1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad
|
server/www/packages/packages-linux/x64/tornado/web.py
|
python
|
StaticFileHandler.get
|
(self, path: str, include_body: bool = True)
|
[] |
async def get(self, path: str, include_body: bool = True) -> None:
# Set up our path instance variables.
self.path = self.parse_url_path(path)
del path # make sure we don't refer to path instead of self.path again
absolute_path = self.get_absolute_path(self.root, self.path)
self.absolute_path = self.validate_absolute_path(self.root, absolute_path)
if self.absolute_path is None:
return
self.modified = self.get_modified_time()
self.set_headers()
if self.should_return_304():
self.set_status(304)
return
request_range = None
range_header = self.request.headers.get("Range")
if range_header:
# As per RFC 2616 14.16, if an invalid Range header is specified,
# the request will be treated as if the header didn't exist.
request_range = httputil._parse_request_range(range_header)
size = self.get_content_size()
if request_range:
start, end = request_range
if start is not None and start < 0:
start += size
if start < 0:
start = 0
if (
start is not None
and (start >= size or (end is not None and start >= end))
) or end == 0:
# As per RFC 2616 14.35.1, a range is not satisfiable only: if
# the first requested byte is equal to or greater than the
# content, or when a suffix with length 0 is specified.
# https://tools.ietf.org/html/rfc7233#section-2.1
# A byte-range-spec is invalid if the last-byte-pos value is present
# and less than the first-byte-pos.
self.set_status(416) # Range Not Satisfiable
self.set_header("Content-Type", "text/plain")
self.set_header("Content-Range", "bytes */%s" % (size,))
return
if end is not None and end > size:
# Clients sometimes blindly use a large range to limit their
# download size; cap the endpoint at the actual file size.
end = size
# Note: only return HTTP 206 if less than the entire range has been
# requested. Not only is this semantically correct, but Chrome
# refuses to play audio if it gets an HTTP 206 in response to
# ``Range: bytes=0-``.
if size != (end or size) - (start or 0):
self.set_status(206) # Partial Content
self.set_header(
"Content-Range", httputil._get_content_range(start, end, size)
)
else:
start = end = None
if start is not None and end is not None:
content_length = end - start
elif end is not None:
content_length = end
elif start is not None:
content_length = size - start
else:
content_length = size
self.set_header("Content-Length", content_length)
if include_body:
content = self.get_content(self.absolute_path, start, end)
if isinstance(content, bytes):
content = [content]
for chunk in content:
try:
self.write(chunk)
await self.flush()
except iostream.StreamClosedError:
return
else:
assert self.request.method == "HEAD"
|
[
"async",
"def",
"get",
"(",
"self",
",",
"path",
":",
"str",
",",
"include_body",
":",
"bool",
"=",
"True",
")",
"->",
"None",
":",
"# Set up our path instance variables.",
"self",
".",
"path",
"=",
"self",
".",
"parse_url_path",
"(",
"path",
")",
"del",
"path",
"# make sure we don't refer to path instead of self.path again",
"absolute_path",
"=",
"self",
".",
"get_absolute_path",
"(",
"self",
".",
"root",
",",
"self",
".",
"path",
")",
"self",
".",
"absolute_path",
"=",
"self",
".",
"validate_absolute_path",
"(",
"self",
".",
"root",
",",
"absolute_path",
")",
"if",
"self",
".",
"absolute_path",
"is",
"None",
":",
"return",
"self",
".",
"modified",
"=",
"self",
".",
"get_modified_time",
"(",
")",
"self",
".",
"set_headers",
"(",
")",
"if",
"self",
".",
"should_return_304",
"(",
")",
":",
"self",
".",
"set_status",
"(",
"304",
")",
"return",
"request_range",
"=",
"None",
"range_header",
"=",
"self",
".",
"request",
".",
"headers",
".",
"get",
"(",
"\"Range\"",
")",
"if",
"range_header",
":",
"# As per RFC 2616 14.16, if an invalid Range header is specified,",
"# the request will be treated as if the header didn't exist.",
"request_range",
"=",
"httputil",
".",
"_parse_request_range",
"(",
"range_header",
")",
"size",
"=",
"self",
".",
"get_content_size",
"(",
")",
"if",
"request_range",
":",
"start",
",",
"end",
"=",
"request_range",
"if",
"start",
"is",
"not",
"None",
"and",
"start",
"<",
"0",
":",
"start",
"+=",
"size",
"if",
"start",
"<",
"0",
":",
"start",
"=",
"0",
"if",
"(",
"start",
"is",
"not",
"None",
"and",
"(",
"start",
">=",
"size",
"or",
"(",
"end",
"is",
"not",
"None",
"and",
"start",
">=",
"end",
")",
")",
")",
"or",
"end",
"==",
"0",
":",
"# As per RFC 2616 14.35.1, a range is not satisfiable only: if",
"# the first requested byte is equal to or greater than the",
"# content, or when a suffix with length 0 is specified.",
"# https://tools.ietf.org/html/rfc7233#section-2.1",
"# A byte-range-spec is invalid if the last-byte-pos value is present",
"# and less than the first-byte-pos.",
"self",
".",
"set_status",
"(",
"416",
")",
"# Range Not Satisfiable",
"self",
".",
"set_header",
"(",
"\"Content-Type\"",
",",
"\"text/plain\"",
")",
"self",
".",
"set_header",
"(",
"\"Content-Range\"",
",",
"\"bytes */%s\"",
"%",
"(",
"size",
",",
")",
")",
"return",
"if",
"end",
"is",
"not",
"None",
"and",
"end",
">",
"size",
":",
"# Clients sometimes blindly use a large range to limit their",
"# download size; cap the endpoint at the actual file size.",
"end",
"=",
"size",
"# Note: only return HTTP 206 if less than the entire range has been",
"# requested. Not only is this semantically correct, but Chrome",
"# refuses to play audio if it gets an HTTP 206 in response to",
"# ``Range: bytes=0-``.",
"if",
"size",
"!=",
"(",
"end",
"or",
"size",
")",
"-",
"(",
"start",
"or",
"0",
")",
":",
"self",
".",
"set_status",
"(",
"206",
")",
"# Partial Content",
"self",
".",
"set_header",
"(",
"\"Content-Range\"",
",",
"httputil",
".",
"_get_content_range",
"(",
"start",
",",
"end",
",",
"size",
")",
")",
"else",
":",
"start",
"=",
"end",
"=",
"None",
"if",
"start",
"is",
"not",
"None",
"and",
"end",
"is",
"not",
"None",
":",
"content_length",
"=",
"end",
"-",
"start",
"elif",
"end",
"is",
"not",
"None",
":",
"content_length",
"=",
"end",
"elif",
"start",
"is",
"not",
"None",
":",
"content_length",
"=",
"size",
"-",
"start",
"else",
":",
"content_length",
"=",
"size",
"self",
".",
"set_header",
"(",
"\"Content-Length\"",
",",
"content_length",
")",
"if",
"include_body",
":",
"content",
"=",
"self",
".",
"get_content",
"(",
"self",
".",
"absolute_path",
",",
"start",
",",
"end",
")",
"if",
"isinstance",
"(",
"content",
",",
"bytes",
")",
":",
"content",
"=",
"[",
"content",
"]",
"for",
"chunk",
"in",
"content",
":",
"try",
":",
"self",
".",
"write",
"(",
"chunk",
")",
"await",
"self",
".",
"flush",
"(",
")",
"except",
"iostream",
".",
"StreamClosedError",
":",
"return",
"else",
":",
"assert",
"self",
".",
"request",
".",
"method",
"==",
"\"HEAD\""
] |
https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-linux/x64/tornado/web.py#L2576-L2657
|
||||
openshift/openshift-tools
|
1188778e728a6e4781acf728123e5b356380fe6f
|
openshift/installer/vendored/openshift-ansible-3.9.14-1/roles/lib_openshift/library/oc_obj.py
|
python
|
Yedit._write
|
(filename, contents)
|
Actually write the file contents to disk. This helps with mocking.
|
Actually write the file contents to disk. This helps with mocking.
|
[
"Actually",
"write",
"the",
"file",
"contents",
"to",
"disk",
".",
"This",
"helps",
"with",
"mocking",
"."
] |
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
fcntl.flock(yfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
yfd.write(contents)
fcntl.flock(yfd, fcntl.LOCK_UN)
os.rename(tmp_filename, filename)
|
[
"def",
"_write",
"(",
"filename",
",",
"contents",
")",
":",
"tmp_filename",
"=",
"filename",
"+",
"'.yedit'",
"with",
"open",
"(",
"tmp_filename",
",",
"'w'",
")",
"as",
"yfd",
":",
"fcntl",
".",
"flock",
"(",
"yfd",
",",
"fcntl",
".",
"LOCK_EX",
"|",
"fcntl",
".",
"LOCK_NB",
")",
"yfd",
".",
"write",
"(",
"contents",
")",
"fcntl",
".",
"flock",
"(",
"yfd",
",",
"fcntl",
".",
"LOCK_UN",
")",
"os",
".",
"rename",
"(",
"tmp_filename",
",",
"filename",
")"
] |
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.9.14-1/roles/lib_openshift/library/oc_obj.py#L361-L371
|
||
tobegit3hub/deep_image_model
|
8a53edecd9e00678b278bb10f6fb4bdb1e4ee25e
|
java_predict_client/src/main/proto/tensorflow/python/ops/linalg_grad.py
|
python
|
_CholeskyGrad
|
(op, grad)
|
return linalg_ops.cholesky_grad(op.outputs[0], grad)
|
Gradient for Cholesky.
|
Gradient for Cholesky.
|
[
"Gradient",
"for",
"Cholesky",
"."
] |
def _CholeskyGrad(op, grad):
"""Gradient for Cholesky."""
return linalg_ops.cholesky_grad(op.outputs[0], grad)
|
[
"def",
"_CholeskyGrad",
"(",
"op",
",",
"grad",
")",
":",
"return",
"linalg_ops",
".",
"cholesky_grad",
"(",
"op",
".",
"outputs",
"[",
"0",
"]",
",",
"grad",
")"
] |
https://github.com/tobegit3hub/deep_image_model/blob/8a53edecd9e00678b278bb10f6fb4bdb1e4ee25e/java_predict_client/src/main/proto/tensorflow/python/ops/linalg_grad.py#L58-L60
|
|
aws-samples/aws-kube-codesuite
|
ab4e5ce45416b83bffb947ab8d234df5437f4fca
|
src/kubernetes/client/apis/apiregistration_v1beta1_api.py
|
python
|
ApiregistrationV1beta1Api.get_api_resources
|
(self, **kwargs)
|
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_api_resources(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
|
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_api_resources(callback=callback_function)
|
[
"get",
"available",
"resources",
"This",
"method",
"makes",
"a",
"synchronous",
"HTTP",
"request",
"by",
"default",
".",
"To",
"make",
"an",
"asynchronous",
"HTTP",
"request",
"please",
"define",
"a",
"callback",
"function",
"to",
"be",
"invoked",
"when",
"receiving",
"the",
"response",
".",
">>>",
"def",
"callback_function",
"(",
"response",
")",
":",
">>>",
"pprint",
"(",
"response",
")",
">>>",
">>>",
"thread",
"=",
"api",
".",
"get_api_resources",
"(",
"callback",
"=",
"callback_function",
")"
] |
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_api_resources(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
|
[
"def",
"get_api_resources",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'callback'",
")",
":",
"return",
"self",
".",
"get_api_resources_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"get_api_resources_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"return",
"data"
] |
https://github.com/aws-samples/aws-kube-codesuite/blob/ab4e5ce45416b83bffb947ab8d234df5437f4fca/src/kubernetes/client/apis/apiregistration_v1beta1_api.py#L406-L428
|
||
twisted/twisted
|
dee676b040dd38b847ea6fb112a712cb5e119490
|
src/twisted/conch/ssh/connection.py
|
python
|
SSHConnection.ssh_CHANNEL_OPEN_FAILURE
|
(self, packet)
|
The other side did not accept our MSG_CHANNEL_OPEN request. Payload::
uint32 local channel number
uint32 reason code
string reason description
Find the channel using the local channel number and notify it by
calling its openFailed() method.
|
The other side did not accept our MSG_CHANNEL_OPEN request. Payload::
uint32 local channel number
uint32 reason code
string reason description
|
[
"The",
"other",
"side",
"did",
"not",
"accept",
"our",
"MSG_CHANNEL_OPEN",
"request",
".",
"Payload",
"::",
"uint32",
"local",
"channel",
"number",
"uint32",
"reason",
"code",
"string",
"reason",
"description"
] |
def ssh_CHANNEL_OPEN_FAILURE(self, packet):
"""
The other side did not accept our MSG_CHANNEL_OPEN request. Payload::
uint32 local channel number
uint32 reason code
string reason description
Find the channel using the local channel number and notify it by
calling its openFailed() method.
"""
localChannel, reasonCode = struct.unpack(">2L", packet[:8])
reasonDesc = common.getNS(packet[8:])[0]
channel = self.channels[localChannel]
del self.channels[localChannel]
channel.conn = self
reason = error.ConchError(reasonDesc, reasonCode)
channel.openFailed(reason)
|
[
"def",
"ssh_CHANNEL_OPEN_FAILURE",
"(",
"self",
",",
"packet",
")",
":",
"localChannel",
",",
"reasonCode",
"=",
"struct",
".",
"unpack",
"(",
"\">2L\"",
",",
"packet",
"[",
":",
"8",
"]",
")",
"reasonDesc",
"=",
"common",
".",
"getNS",
"(",
"packet",
"[",
"8",
":",
"]",
")",
"[",
"0",
"]",
"channel",
"=",
"self",
".",
"channels",
"[",
"localChannel",
"]",
"del",
"self",
".",
"channels",
"[",
"localChannel",
"]",
"channel",
".",
"conn",
"=",
"self",
"reason",
"=",
"error",
".",
"ConchError",
"(",
"reasonDesc",
",",
"reasonCode",
")",
"channel",
".",
"openFailed",
"(",
"reason",
")"
] |
https://github.com/twisted/twisted/blob/dee676b040dd38b847ea6fb112a712cb5e119490/src/twisted/conch/ssh/connection.py#L207-L223
|
||
PennyLaneAI/pennylane
|
1275736f790ced1d778858ed383448d4a43a4cdd
|
pennylane/devices/default_gaussian.py
|
python
|
squeezed_cov
|
(r, phi, hbar=2)
|
return R @ cov @ R.T
|
r"""Returns the squeezed covariance matrix of a squeezed state.
Args:
r (float): the squeezing magnitude
p (float): the squeezing phase :math:`\phi`
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`
Returns:
array: the squeezed state
|
r"""Returns the squeezed covariance matrix of a squeezed state.
|
[
"r",
"Returns",
"the",
"squeezed",
"covariance",
"matrix",
"of",
"a",
"squeezed",
"state",
"."
] |
def squeezed_cov(r, phi, hbar=2):
r"""Returns the squeezed covariance matrix of a squeezed state.
Args:
r (float): the squeezing magnitude
p (float): the squeezing phase :math:`\phi`
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`
Returns:
array: the squeezed state
"""
cov = np.array([[math.exp(-2 * r), 0], [0, math.exp(2 * r)]]) * hbar / 2
R = rotation(phi / 2)
return R @ cov @ R.T
|
[
"def",
"squeezed_cov",
"(",
"r",
",",
"phi",
",",
"hbar",
"=",
"2",
")",
":",
"cov",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"math",
".",
"exp",
"(",
"-",
"2",
"*",
"r",
")",
",",
"0",
"]",
",",
"[",
"0",
",",
"math",
".",
"exp",
"(",
"2",
"*",
"r",
")",
"]",
"]",
")",
"*",
"hbar",
"/",
"2",
"R",
"=",
"rotation",
"(",
"phi",
"/",
"2",
")",
"return",
"R",
"@",
"cov",
"@",
"R",
".",
"T"
] |
https://github.com/PennyLaneAI/pennylane/blob/1275736f790ced1d778858ed383448d4a43a4cdd/pennylane/devices/default_gaussian.py#L332-L347
|
|
dropbox/dropbox-sdk-python
|
015437429be224732990041164a21a0501235db1
|
dropbox/base_team.py
|
python
|
DropboxTeamBase.file_properties_templates_list_for_team
|
(self)
|
return r
|
Get the template identifiers for a team. To get the schema of each
template use :meth:`file_properties_templates_get_for_team`.
:rtype: :class:`dropbox.file_properties.ListTemplateResult`
:raises: :class:`.exceptions.ApiError`
If this raises, ApiError will contain:
:class:`dropbox.file_properties.TemplateError`
|
Get the template identifiers for a team. To get the schema of each
template use :meth:`file_properties_templates_get_for_team`.
|
[
"Get",
"the",
"template",
"identifiers",
"for",
"a",
"team",
".",
"To",
"get",
"the",
"schema",
"of",
"each",
"template",
"use",
":",
"meth",
":",
"file_properties_templates_get_for_team",
"."
] |
def file_properties_templates_list_for_team(self):
"""
Get the template identifiers for a team. To get the schema of each
template use :meth:`file_properties_templates_get_for_team`.
:rtype: :class:`dropbox.file_properties.ListTemplateResult`
:raises: :class:`.exceptions.ApiError`
If this raises, ApiError will contain:
:class:`dropbox.file_properties.TemplateError`
"""
arg = None
r = self.request(
file_properties.templates_list_for_team,
'file_properties',
arg,
None,
)
return r
|
[
"def",
"file_properties_templates_list_for_team",
"(",
"self",
")",
":",
"arg",
"=",
"None",
"r",
"=",
"self",
".",
"request",
"(",
"file_properties",
".",
"templates_list_for_team",
",",
"'file_properties'",
",",
"arg",
",",
"None",
",",
")",
"return",
"r"
] |
https://github.com/dropbox/dropbox-sdk-python/blob/015437429be224732990041164a21a0501235db1/dropbox/base_team.py#L101-L119
|
|
smart-mobile-software/gitstack
|
d9fee8f414f202143eb6e620529e8e5539a2af56
|
python/Lib/site-packages/django/db/backends/__init__.py
|
python
|
BaseDatabaseOperations.value_to_db_decimal
|
(self, value, max_digits, decimal_places)
|
return util.format_number(value, max_digits, decimal_places)
|
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
|
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
|
[
"Transform",
"a",
"decimal",
".",
"Decimal",
"value",
"to",
"an",
"object",
"compatible",
"with",
"what",
"is",
"expected",
"by",
"the",
"backend",
"driver",
"for",
"decimal",
"(",
"numeric",
")",
"columns",
"."
] |
def value_to_db_decimal(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
return util.format_number(value, max_digits, decimal_places)
|
[
"def",
"value_to_db_decimal",
"(",
"self",
",",
"value",
",",
"max_digits",
",",
"decimal_places",
")",
":",
"if",
"value",
"is",
"None",
":",
"return",
"None",
"return",
"util",
".",
"format_number",
"(",
"value",
",",
"max_digits",
",",
"decimal_places",
")"
] |
https://github.com/smart-mobile-software/gitstack/blob/d9fee8f414f202143eb6e620529e8e5539a2af56/python/Lib/site-packages/django/db/backends/__init__.py#L802-L809
|
|
apache/tvm
|
6eb4ed813ebcdcd9558f0906a1870db8302ff1e0
|
python/tvm/auto_scheduler/relay_integration.py
|
python
|
exit_layout_rewrite
|
()
|
Exit layout rewrite tracing environment
|
Exit layout rewrite tracing environment
|
[
"Exit",
"layout",
"rewrite",
"tracing",
"environment"
] |
def exit_layout_rewrite():
"""Exit layout rewrite tracing environment"""
env = TracingEnvironment.current
env.__exit__(None, None, None)
|
[
"def",
"exit_layout_rewrite",
"(",
")",
":",
"env",
"=",
"TracingEnvironment",
".",
"current",
"env",
".",
"__exit__",
"(",
"None",
",",
"None",
",",
"None",
")"
] |
https://github.com/apache/tvm/blob/6eb4ed813ebcdcd9558f0906a1870db8302ff1e0/python/tvm/auto_scheduler/relay_integration.py#L249-L252
|
||
meiqua/6DPose
|
619be5790448b4cd13290cf7727b35f1265e69e0
|
cxxLCHF/setup.py
|
python
|
CMakeBuild.build_extension
|
(self, ext)
|
[] |
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j2']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
# cmake twice then everything is OK, due to proto's strange bug
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
|
[
"def",
"build_extension",
"(",
"self",
",",
"ext",
")",
":",
"extdir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"self",
".",
"get_ext_fullpath",
"(",
"ext",
".",
"name",
")",
")",
")",
"cmake_args",
"=",
"[",
"'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY='",
"+",
"extdir",
",",
"'-DPYTHON_EXECUTABLE='",
"+",
"sys",
".",
"executable",
"]",
"cfg",
"=",
"'Debug'",
"if",
"self",
".",
"debug",
"else",
"'Release'",
"build_args",
"=",
"[",
"'--config'",
",",
"cfg",
"]",
"if",
"platform",
".",
"system",
"(",
")",
"==",
"\"Windows\"",
":",
"cmake_args",
"+=",
"[",
"'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'",
".",
"format",
"(",
"cfg",
".",
"upper",
"(",
")",
",",
"extdir",
")",
"]",
"if",
"sys",
".",
"maxsize",
">",
"2",
"**",
"32",
":",
"cmake_args",
"+=",
"[",
"'-A'",
",",
"'x64'",
"]",
"build_args",
"+=",
"[",
"'--'",
",",
"'/m'",
"]",
"else",
":",
"cmake_args",
"+=",
"[",
"'-DCMAKE_BUILD_TYPE='",
"+",
"cfg",
"]",
"build_args",
"+=",
"[",
"'--'",
",",
"'-j2'",
"]",
"env",
"=",
"os",
".",
"environ",
".",
"copy",
"(",
")",
"env",
"[",
"'CXXFLAGS'",
"]",
"=",
"'{} -DVERSION_INFO=\\\\\"{}\\\\\"'",
".",
"format",
"(",
"env",
".",
"get",
"(",
"'CXXFLAGS'",
",",
"''",
")",
",",
"self",
".",
"distribution",
".",
"get_version",
"(",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"build_temp",
")",
":",
"os",
".",
"makedirs",
"(",
"self",
".",
"build_temp",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'cmake'",
",",
"ext",
".",
"sourcedir",
"]",
"+",
"cmake_args",
",",
"cwd",
"=",
"self",
".",
"build_temp",
",",
"env",
"=",
"env",
")",
"# cmake twice then everything is OK, due to proto's strange bug",
"subprocess",
".",
"check_call",
"(",
"[",
"'cmake'",
",",
"ext",
".",
"sourcedir",
"]",
"+",
"cmake_args",
",",
"cwd",
"=",
"self",
".",
"build_temp",
",",
"env",
"=",
"env",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'cmake'",
",",
"'--build'",
",",
"'.'",
"]",
"+",
"build_args",
",",
"cwd",
"=",
"self",
".",
"build_temp",
")"
] |
https://github.com/meiqua/6DPose/blob/619be5790448b4cd13290cf7727b35f1265e69e0/cxxLCHF/setup.py#L35-L61
|
||||
JacquesLucke/animation_nodes
|
b1e3ace8dcb0a771fd882fc3ac4e490b009fa0d1
|
animation_nodes/nodes/object/object_id_key.py
|
python
|
ObjectIDKeyNode.getExecutionCode_Base
|
(self, keyName, required)
|
[] |
def getExecutionCode_Base(self, keyName, required):
dataType = self.keyDataType
if "exists" in required:
yield "exists = _key.exists(object, %s)" % keyName
yield "data = _key.get(object, %s)" % keyName
if dataType == "Transforms":
yield "location, rotation, scale = data"
if "matrix" in required:
yield "matrix = AN.utils.math.composeMatrix(location, rotation, scale)"
elif dataType == "Text":
yield "text = data"
elif dataType in ("Integer", "Float"):
yield "number = data"
|
[
"def",
"getExecutionCode_Base",
"(",
"self",
",",
"keyName",
",",
"required",
")",
":",
"dataType",
"=",
"self",
".",
"keyDataType",
"if",
"\"exists\"",
"in",
"required",
":",
"yield",
"\"exists = _key.exists(object, %s)\"",
"%",
"keyName",
"yield",
"\"data = _key.get(object, %s)\"",
"%",
"keyName",
"if",
"dataType",
"==",
"\"Transforms\"",
":",
"yield",
"\"location, rotation, scale = data\"",
"if",
"\"matrix\"",
"in",
"required",
":",
"yield",
"\"matrix = AN.utils.math.composeMatrix(location, rotation, scale)\"",
"elif",
"dataType",
"==",
"\"Text\"",
":",
"yield",
"\"text = data\"",
"elif",
"dataType",
"in",
"(",
"\"Integer\"",
",",
"\"Float\"",
")",
":",
"yield",
"\"number = data\""
] |
https://github.com/JacquesLucke/animation_nodes/blob/b1e3ace8dcb0a771fd882fc3ac4e490b009fa0d1/animation_nodes/nodes/object/object_id_key.py#L87-L102
|
||||
crossbario/crossbar
|
ed350b7ba1c8421f3640b9c2e94a21ed4cfdff64
|
crossbar/router/broker.py
|
python
|
Broker.detach
|
(self, session)
|
Implements :func:`crossbar.router.interfaces.IBroker.detach`
|
Implements :func:`crossbar.router.interfaces.IBroker.detach`
|
[
"Implements",
":",
"func",
":",
"crossbar",
".",
"router",
".",
"interfaces",
".",
"IBroker",
".",
"detach"
] |
def detach(self, session):
"""
Implements :func:`crossbar.router.interfaces.IBroker.detach`
"""
if session in self._session_to_subscriptions:
for subscription in self._session_to_subscriptions[session]:
was_subscribed, was_last_subscriber = self._subscription_map.drop_observer(session, subscription)
was_deleted = False
# delete it if there are no subscribers and no retained events
#
if was_subscribed and was_last_subscriber and not subscription.extra.retained_events:
was_deleted = True
self._subscription_map.delete_observation(subscription)
# publish WAMP meta events, if we have a service session, but
# not for the meta API itself!
#
if self._router._realm and \
self._router._realm.session and \
not subscription.uri.startswith('wamp.'):
def _publish(subscription):
service_session = self._router._realm.session
# FIXME: what about exclude_authid as colleced from forward_for? like we do elsewhere in this file!
options = types.PublishOptions(correlation_id=None,
correlation_is_anchor=True,
correlation_is_last=False)
if was_subscribed:
service_session.publish(
'wamp.subscription.on_unsubscribe',
session._session_id,
subscription.id,
options=options,
)
if was_deleted:
options.correlation_is_last = True
service_session.publish(
'wamp.subscription.on_delete',
session._session_id,
subscription.id,
options=options,
)
# we postpone actual sending of meta events until we return to this client session
self._reactor.callLater(0, _publish, subscription)
del self._session_to_subscriptions[session]
else:
raise NotAttached("session with ID {} not attached".format(session._session_id))
|
[
"def",
"detach",
"(",
"self",
",",
"session",
")",
":",
"if",
"session",
"in",
"self",
".",
"_session_to_subscriptions",
":",
"for",
"subscription",
"in",
"self",
".",
"_session_to_subscriptions",
"[",
"session",
"]",
":",
"was_subscribed",
",",
"was_last_subscriber",
"=",
"self",
".",
"_subscription_map",
".",
"drop_observer",
"(",
"session",
",",
"subscription",
")",
"was_deleted",
"=",
"False",
"# delete it if there are no subscribers and no retained events",
"#",
"if",
"was_subscribed",
"and",
"was_last_subscriber",
"and",
"not",
"subscription",
".",
"extra",
".",
"retained_events",
":",
"was_deleted",
"=",
"True",
"self",
".",
"_subscription_map",
".",
"delete_observation",
"(",
"subscription",
")",
"# publish WAMP meta events, if we have a service session, but",
"# not for the meta API itself!",
"#",
"if",
"self",
".",
"_router",
".",
"_realm",
"and",
"self",
".",
"_router",
".",
"_realm",
".",
"session",
"and",
"not",
"subscription",
".",
"uri",
".",
"startswith",
"(",
"'wamp.'",
")",
":",
"def",
"_publish",
"(",
"subscription",
")",
":",
"service_session",
"=",
"self",
".",
"_router",
".",
"_realm",
".",
"session",
"# FIXME: what about exclude_authid as colleced from forward_for? like we do elsewhere in this file!",
"options",
"=",
"types",
".",
"PublishOptions",
"(",
"correlation_id",
"=",
"None",
",",
"correlation_is_anchor",
"=",
"True",
",",
"correlation_is_last",
"=",
"False",
")",
"if",
"was_subscribed",
":",
"service_session",
".",
"publish",
"(",
"'wamp.subscription.on_unsubscribe'",
",",
"session",
".",
"_session_id",
",",
"subscription",
".",
"id",
",",
"options",
"=",
"options",
",",
")",
"if",
"was_deleted",
":",
"options",
".",
"correlation_is_last",
"=",
"True",
"service_session",
".",
"publish",
"(",
"'wamp.subscription.on_delete'",
",",
"session",
".",
"_session_id",
",",
"subscription",
".",
"id",
",",
"options",
"=",
"options",
",",
")",
"# we postpone actual sending of meta events until we return to this client session",
"self",
".",
"_reactor",
".",
"callLater",
"(",
"0",
",",
"_publish",
",",
"subscription",
")",
"del",
"self",
".",
"_session_to_subscriptions",
"[",
"session",
"]",
"else",
":",
"raise",
"NotAttached",
"(",
"\"session with ID {} not attached\"",
".",
"format",
"(",
"session",
".",
"_session_id",
")",
")"
] |
https://github.com/crossbario/crossbar/blob/ed350b7ba1c8421f3640b9c2e94a21ed4cfdff64/crossbar/router/broker.py#L119-L174
|
||
CGATOxford/cgat
|
326aad4694bdfae8ddc194171bb5d73911243947
|
CGAT/Bed.py
|
python
|
Bed.fromIntervals
|
(self, intervals)
|
Fill co-ordinates from list of intervals.
If multiple intervals are provided and entry is BED12 then the
blocks are automatically set.
Arguments
---------
intervals : list
List of tuples (start, end) with block coordinates.
|
Fill co-ordinates from list of intervals.
|
[
"Fill",
"co",
"-",
"ordinates",
"from",
"list",
"of",
"intervals",
"."
] |
def fromIntervals(self, intervals):
"""Fill co-ordinates from list of intervals.
If multiple intervals are provided and entry is BED12 then the
blocks are automatically set.
Arguments
---------
intervals : list
List of tuples (start, end) with block coordinates.
"""
intervals = sorted(intervals)
self.start = intervals[0][0]
self.end = intervals[-1][1]
if self.columns >= 12:
self["thickStart"] = self.start
self["thickEnd"] = self.end
blockStarts = [interval[0] - self.start for interval in intervals]
blockSizes = [end - start for start, end in intervals]
blockCount = len(intervals)
self["blockStarts"] = ",".join(map(str, blockStarts))
self["blockSizes"] = ",".join(map(str, blockSizes))
self["blockCount"] = str(blockCount)
else:
if len(intervals) > 1:
raise ValueError(
"Multiple intervals provided to non-bed12 entry")
|
[
"def",
"fromIntervals",
"(",
"self",
",",
"intervals",
")",
":",
"intervals",
"=",
"sorted",
"(",
"intervals",
")",
"self",
".",
"start",
"=",
"intervals",
"[",
"0",
"]",
"[",
"0",
"]",
"self",
".",
"end",
"=",
"intervals",
"[",
"-",
"1",
"]",
"[",
"1",
"]",
"if",
"self",
".",
"columns",
">=",
"12",
":",
"self",
"[",
"\"thickStart\"",
"]",
"=",
"self",
".",
"start",
"self",
"[",
"\"thickEnd\"",
"]",
"=",
"self",
".",
"end",
"blockStarts",
"=",
"[",
"interval",
"[",
"0",
"]",
"-",
"self",
".",
"start",
"for",
"interval",
"in",
"intervals",
"]",
"blockSizes",
"=",
"[",
"end",
"-",
"start",
"for",
"start",
",",
"end",
"in",
"intervals",
"]",
"blockCount",
"=",
"len",
"(",
"intervals",
")",
"self",
"[",
"\"blockStarts\"",
"]",
"=",
"\",\"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"blockStarts",
")",
")",
"self",
"[",
"\"blockSizes\"",
"]",
"=",
"\",\"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"blockSizes",
")",
")",
"self",
"[",
"\"blockCount\"",
"]",
"=",
"str",
"(",
"blockCount",
")",
"else",
":",
"if",
"len",
"(",
"intervals",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"Multiple intervals provided to non-bed12 entry\"",
")"
] |
https://github.com/CGATOxford/cgat/blob/326aad4694bdfae8ddc194171bb5d73911243947/CGAT/Bed.py#L167-L199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.