nwo
stringlengths 5
86
| sha
stringlengths 40
40
| path
stringlengths 4
189
| language
stringclasses 1
value | identifier
stringlengths 1
94
| parameters
stringlengths 2
4.03k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
11.5k
| docstring
stringlengths 1
33.2k
| docstring_summary
stringlengths 0
5.15k
| docstring_tokens
sequence | function
stringlengths 34
151k
| function_tokens
sequence | url
stringlengths 90
278
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ETH3D/badslam | 36cacb24565897190a2d985c60eb4848c174a1fc | libvis/third_party/flann/src/python/pyflann/index.py | python | FLANN.nn_index | (self, qpts, num_neighbors=1, **kwargs) | For each point in querypts, (which may be a single point), it
returns the num_neighbors nearest points in the index built by
calling build_index. | For each point in querypts, (which may be a single point), it
returns the num_neighbors nearest points in the index built by
calling build_index. | [
"For",
"each",
"point",
"in",
"querypts",
"(",
"which",
"may",
"be",
"a",
"single",
"point",
")",
"it",
"returns",
"the",
"num_neighbors",
"nearest",
"points",
"in",
"the",
"index",
"built",
"by",
"calling",
"build_index",
"."
] | def nn_index(self, qpts, num_neighbors=1, **kwargs):
"""
For each point in querypts, (which may be a single point), it
returns the num_neighbors nearest points in the index built by
calling build_index.
"""
if self.__curindex is None:
raise FLANNException(
'build_index(...) method not called first or current index deleted.')
if qpts.dtype.type not in allowed_types:
raise FLANNException('Cannot handle type: %s' % qpts.dtype)
if self.__curindex_type != qpts.dtype.type:
raise FLANNException('Index and query must have the same type')
qpts = ensure_2d_array(qpts, default_flags)
npts, dim = self.__curindex_data.shape
if qpts.size == dim:
qpts.reshape(1, dim)
nqpts = qpts.shape[0]
assert qpts.shape[1] == dim, 'data and query must have the same dims'
assert npts >= num_neighbors, 'more neighbors than there are points'
result = np.empty((nqpts, num_neighbors), dtype=index_type)
if self.__curindex_type == np.float64:
dists = np.empty((nqpts, num_neighbors), dtype=np.float64)
else:
dists = np.empty((nqpts, num_neighbors), dtype=np.float32)
self.__flann_parameters.update(kwargs)
flann.find_nearest_neighbors_index[
self.__curindex_type](
self.__curindex, qpts, nqpts, result, dists, num_neighbors,
pointer(self.__flann_parameters))
if num_neighbors == 1:
return (result.reshape(nqpts), dists.reshape(nqpts))
else:
return (result, dists) | [
"def",
"nn_index",
"(",
"self",
",",
"qpts",
",",
"num_neighbors",
"=",
"1",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"__curindex",
"is",
"None",
":",
"raise",
"FLANNException",
"(",
"'build_index(...) method not called first or current index deleted.'",
")",
"if",
"qpts",
".",
"dtype",
".",
"type",
"not",
"in",
"allowed_types",
":",
"raise",
"FLANNException",
"(",
"'Cannot handle type: %s'",
"%",
"qpts",
".",
"dtype",
")",
"if",
"self",
".",
"__curindex_type",
"!=",
"qpts",
".",
"dtype",
".",
"type",
":",
"raise",
"FLANNException",
"(",
"'Index and query must have the same type'",
")",
"qpts",
"=",
"ensure_2d_array",
"(",
"qpts",
",",
"default_flags",
")",
"npts",
",",
"dim",
"=",
"self",
".",
"__curindex_data",
".",
"shape",
"if",
"qpts",
".",
"size",
"==",
"dim",
":",
"qpts",
".",
"reshape",
"(",
"1",
",",
"dim",
")",
"nqpts",
"=",
"qpts",
".",
"shape",
"[",
"0",
"]",
"assert",
"qpts",
".",
"shape",
"[",
"1",
"]",
"==",
"dim",
",",
"'data and query must have the same dims'",
"assert",
"npts",
">=",
"num_neighbors",
",",
"'more neighbors than there are points'",
"result",
"=",
"np",
".",
"empty",
"(",
"(",
"nqpts",
",",
"num_neighbors",
")",
",",
"dtype",
"=",
"index_type",
")",
"if",
"self",
".",
"__curindex_type",
"==",
"np",
".",
"float64",
":",
"dists",
"=",
"np",
".",
"empty",
"(",
"(",
"nqpts",
",",
"num_neighbors",
")",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"else",
":",
"dists",
"=",
"np",
".",
"empty",
"(",
"(",
"nqpts",
",",
"num_neighbors",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"self",
".",
"__flann_parameters",
".",
"update",
"(",
"kwargs",
")",
"flann",
".",
"find_nearest_neighbors_index",
"[",
"self",
".",
"__curindex_type",
"]",
"(",
"self",
".",
"__curindex",
",",
"qpts",
",",
"nqpts",
",",
"result",
",",
"dists",
",",
"num_neighbors",
",",
"pointer",
"(",
"self",
".",
"__flann_parameters",
")",
")",
"if",
"num_neighbors",
"==",
"1",
":",
"return",
"(",
"result",
".",
"reshape",
"(",
"nqpts",
")",
",",
"dists",
".",
"reshape",
"(",
"nqpts",
")",
")",
"else",
":",
"return",
"(",
"result",
",",
"dists",
")"
] | https://github.com/ETH3D/badslam/blob/36cacb24565897190a2d985c60eb4848c174a1fc/libvis/third_party/flann/src/python/pyflann/index.py#L215-L260 |
||
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/fitting_widgets/basic_fitting/basic_fitting_view.py | python | BasicFittingView.start_x | (self, value: float) | Sets the selected start X. | Sets the selected start X. | [
"Sets",
"the",
"selected",
"start",
"X",
"."
] | def start_x(self, value: float) -> None:
"""Sets the selected start X."""
self.fit_function_options.start_x = value | [
"def",
"start_x",
"(",
"self",
",",
"value",
":",
"float",
")",
"->",
"None",
":",
"self",
".",
"fit_function_options",
".",
"start_x",
"=",
"value"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/fitting_widgets/basic_fitting/basic_fitting_view.py#L207-L209 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/setuptools/command/install_lib.py | python | install_lib.get_exclusions | (self) | return set(starmap(self._exclude_pkg_path, excl_specs)) | Return a collections.Sized collections.Container of paths to be
excluded for single_version_externally_managed installations. | Return a collections.Sized collections.Container of paths to be
excluded for single_version_externally_managed installations. | [
"Return",
"a",
"collections",
".",
"Sized",
"collections",
".",
"Container",
"of",
"paths",
"to",
"be",
"excluded",
"for",
"single_version_externally_managed",
"installations",
"."
] | def get_exclusions(self):
"""
Return a collections.Sized collections.Container of paths to be
excluded for single_version_externally_managed installations.
"""
all_packages = (
pkg
for ns_pkg in self._get_SVEM_NSPs()
for pkg in self._all_packages(ns_pkg)
)
excl_specs = product(all_packages, self._gen_exclusion_paths())
return set(starmap(self._exclude_pkg_path, excl_specs)) | [
"def",
"get_exclusions",
"(",
"self",
")",
":",
"all_packages",
"=",
"(",
"pkg",
"for",
"ns_pkg",
"in",
"self",
".",
"_get_SVEM_NSPs",
"(",
")",
"for",
"pkg",
"in",
"self",
".",
"_all_packages",
"(",
"ns_pkg",
")",
")",
"excl_specs",
"=",
"product",
"(",
"all_packages",
",",
"self",
".",
"_gen_exclusion_paths",
"(",
")",
")",
"return",
"set",
"(",
"starmap",
"(",
"self",
".",
"_exclude_pkg_path",
",",
"excl_specs",
")",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/setuptools/command/install_lib.py#L17-L29 |
|
indutny/candor | 48e7260618f5091c80a3416828e2808cad3ea22e | tools/gyp/pylib/gyp/mac_tool.py | python | MacTool.ExecCopyInfoPlist | (self, source, dest) | Copies the |source| Info.plist to the destination directory |dest|. | Copies the |source| Info.plist to the destination directory |dest|. | [
"Copies",
"the",
"|source|",
"Info",
".",
"plist",
"to",
"the",
"destination",
"directory",
"|dest|",
"."
] | def ExecCopyInfoPlist(self, source, dest):
"""Copies the |source| Info.plist to the destination directory |dest|."""
# Read the source Info.plist into memory.
fd = open(source, 'r')
lines = fd.read()
fd.close()
# Go through all the environment variables and replace them as variables in
# the file.
for key in os.environ:
if key.startswith('_'):
continue
evar = '${%s}' % key
lines = string.replace(lines, evar, os.environ[key])
# Write out the file with variables replaced.
fd = open(dest, 'w')
fd.write(lines)
fd.close()
# Now write out PkgInfo file now that the Info.plist file has been
# "compiled".
self._WritePkgInfo(dest) | [
"def",
"ExecCopyInfoPlist",
"(",
"self",
",",
"source",
",",
"dest",
")",
":",
"# Read the source Info.plist into memory.",
"fd",
"=",
"open",
"(",
"source",
",",
"'r'",
")",
"lines",
"=",
"fd",
".",
"read",
"(",
")",
"fd",
".",
"close",
"(",
")",
"# Go through all the environment variables and replace them as variables in",
"# the file.",
"for",
"key",
"in",
"os",
".",
"environ",
":",
"if",
"key",
".",
"startswith",
"(",
"'_'",
")",
":",
"continue",
"evar",
"=",
"'${%s}'",
"%",
"key",
"lines",
"=",
"string",
".",
"replace",
"(",
"lines",
",",
"evar",
",",
"os",
".",
"environ",
"[",
"key",
"]",
")",
"# Write out the file with variables replaced.",
"fd",
"=",
"open",
"(",
"dest",
",",
"'w'",
")",
"fd",
".",
"write",
"(",
"lines",
")",
"fd",
".",
"close",
"(",
")",
"# Now write out PkgInfo file now that the Info.plist file has been",
"# \"compiled\".",
"self",
".",
"_WritePkgInfo",
"(",
"dest",
")"
] | https://github.com/indutny/candor/blob/48e7260618f5091c80a3416828e2808cad3ea22e/tools/gyp/pylib/gyp/mac_tool.py#L108-L130 |
||
ablab/spades | 3a754192b88540524ce6fb69eef5ea9273a38465 | ioncommunity/AssemblerPlus/lib/hypertext.py | python | ATTR | (**kwargs) | Sets the given keyword-arguments as attributes of the current HTML element | Sets the given keyword-arguments as attributes of the current HTML element | [
"Sets",
"the",
"given",
"keyword",
"-",
"arguments",
"as",
"attributes",
"of",
"the",
"current",
"HTML",
"element"
] | def ATTR(**kwargs):
"""Sets the given keyword-arguments as attributes of the current HTML element"""
THIS()(**kwargs) | [
"def",
"ATTR",
"(",
"*",
"*",
"kwargs",
")",
":",
"THIS",
"(",
")",
"(",
"*",
"*",
"kwargs",
")"
] | https://github.com/ablab/spades/blob/3a754192b88540524ce6fb69eef5ea9273a38465/ioncommunity/AssemblerPlus/lib/hypertext.py#L232-L234 |
||
rdiankov/openrave | d1a23023fd4b58f077d2ca949ceaf1b91f3f13d7 | python/interfaces/visualfeedback.py | python | VisualFeedback.SampleVisibilityGoal | (self,numsamples=None) | return numpy.reshape(numpy.array(samples[1:],float),(returnedsamples,(len(samples)-1)/returnedsamples)) | See :ref:`module-visualfeedback-samplevisibilitygoal` | See :ref:`module-visualfeedback-samplevisibilitygoal` | [
"See",
":",
"ref",
":",
"module",
"-",
"visualfeedback",
"-",
"samplevisibilitygoal"
] | def SampleVisibilityGoal(self,numsamples=None):
"""See :ref:`module-visualfeedback-samplevisibilitygoal`
"""
cmd = 'SampleVisibilityGoal '
if numsamples is not None:
cmd += 'numsamples %d '%numsamples
res = self.prob.SendCommand(cmd)
if res is None:
raise PlanningError()
samples = [float(s) for s in res.split()]
returnedsamples = int(samples[0])
return numpy.reshape(numpy.array(samples[1:],float),(returnedsamples,(len(samples)-1)/returnedsamples)) | [
"def",
"SampleVisibilityGoal",
"(",
"self",
",",
"numsamples",
"=",
"None",
")",
":",
"cmd",
"=",
"'SampleVisibilityGoal '",
"if",
"numsamples",
"is",
"not",
"None",
":",
"cmd",
"+=",
"'numsamples %d '",
"%",
"numsamples",
"res",
"=",
"self",
".",
"prob",
".",
"SendCommand",
"(",
"cmd",
")",
"if",
"res",
"is",
"None",
":",
"raise",
"PlanningError",
"(",
")",
"samples",
"=",
"[",
"float",
"(",
"s",
")",
"for",
"s",
"in",
"res",
".",
"split",
"(",
")",
"]",
"returnedsamples",
"=",
"int",
"(",
"samples",
"[",
"0",
"]",
")",
"return",
"numpy",
".",
"reshape",
"(",
"numpy",
".",
"array",
"(",
"samples",
"[",
"1",
":",
"]",
",",
"float",
")",
",",
"(",
"returnedsamples",
",",
"(",
"len",
"(",
"samples",
")",
"-",
"1",
")",
"/",
"returnedsamples",
")",
")"
] | https://github.com/rdiankov/openrave/blob/d1a23023fd4b58f077d2ca949ceaf1b91f3f13d7/python/interfaces/visualfeedback.py#L145-L156 |
|
InsightSoftwareConsortium/ITK | 87acfce9a93d928311c38bc371b666b515b9f19d | Modules/ThirdParty/pygccxml/src/pygccxml/declarations/container_traits.py | python | find_container_traits | (cls_or_string) | Find the container traits type of a declaration.
Args:
cls_or_string (str | declarations.declaration_t): a string
Returns:
declarations.container_traits: a container traits | Find the container traits type of a declaration. | [
"Find",
"the",
"container",
"traits",
"type",
"of",
"a",
"declaration",
"."
] | def find_container_traits(cls_or_string):
"""
Find the container traits type of a declaration.
Args:
cls_or_string (str | declarations.declaration_t): a string
Returns:
declarations.container_traits: a container traits
"""
if utils.is_str(cls_or_string):
if not templates.is_instantiation(cls_or_string):
return None
name = templates.name(cls_or_string)
if name.startswith('std::'):
name = name[len('std::'):]
if name.startswith('std::tr1::'):
name = name[len('std::tr1::'):]
for cls_traits in all_container_traits:
if cls_traits.name() == name:
return cls_traits
else:
if isinstance(cls_or_string, class_declaration.class_types):
# Look in the cache.
if cls_or_string.cache.container_traits is not None:
return cls_or_string.cache.container_traits
# Look for a container traits
for cls_traits in all_container_traits:
if cls_traits.is_my_case(cls_or_string):
# Store in the cache
if isinstance(cls_or_string, class_declaration.class_types):
cls_or_string.cache.container_traits = cls_traits
return cls_traits | [
"def",
"find_container_traits",
"(",
"cls_or_string",
")",
":",
"if",
"utils",
".",
"is_str",
"(",
"cls_or_string",
")",
":",
"if",
"not",
"templates",
".",
"is_instantiation",
"(",
"cls_or_string",
")",
":",
"return",
"None",
"name",
"=",
"templates",
".",
"name",
"(",
"cls_or_string",
")",
"if",
"name",
".",
"startswith",
"(",
"'std::'",
")",
":",
"name",
"=",
"name",
"[",
"len",
"(",
"'std::'",
")",
":",
"]",
"if",
"name",
".",
"startswith",
"(",
"'std::tr1::'",
")",
":",
"name",
"=",
"name",
"[",
"len",
"(",
"'std::tr1::'",
")",
":",
"]",
"for",
"cls_traits",
"in",
"all_container_traits",
":",
"if",
"cls_traits",
".",
"name",
"(",
")",
"==",
"name",
":",
"return",
"cls_traits",
"else",
":",
"if",
"isinstance",
"(",
"cls_or_string",
",",
"class_declaration",
".",
"class_types",
")",
":",
"# Look in the cache.",
"if",
"cls_or_string",
".",
"cache",
".",
"container_traits",
"is",
"not",
"None",
":",
"return",
"cls_or_string",
".",
"cache",
".",
"container_traits",
"# Look for a container traits",
"for",
"cls_traits",
"in",
"all_container_traits",
":",
"if",
"cls_traits",
".",
"is_my_case",
"(",
"cls_or_string",
")",
":",
"# Store in the cache",
"if",
"isinstance",
"(",
"cls_or_string",
",",
"class_declaration",
".",
"class_types",
")",
":",
"cls_or_string",
".",
"cache",
".",
"container_traits",
"=",
"cls_traits",
"return",
"cls_traits"
] | https://github.com/InsightSoftwareConsortium/ITK/blob/87acfce9a93d928311c38bc371b666b515b9f19d/Modules/ThirdParty/pygccxml/src/pygccxml/declarations/container_traits.py#L697-L732 |
||
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | tools/site_compare/site_compare.py | python | main | () | return 0 | Main executable. Parse the command line and invoke the command. | Main executable. Parse the command line and invoke the command. | [
"Main",
"executable",
".",
"Parse",
"the",
"command",
"line",
"and",
"invoke",
"the",
"command",
"."
] | def main():
"""Main executable. Parse the command line and invoke the command."""
cmdline = command_line.CommandLine()
# The below two commands are currently unstable so have been disabled
# commands.compare2.CreateCommand(cmdline)
# commands.maskmaker.CreateCommand(cmdline)
commands.measure.CreateCommand(cmdline)
commands.scrape.CreateCommand(cmdline)
cmdline.ParseCommandLine()
return 0 | [
"def",
"main",
"(",
")",
":",
"cmdline",
"=",
"command_line",
".",
"CommandLine",
"(",
")",
"# The below two commands are currently unstable so have been disabled",
"# commands.compare2.CreateCommand(cmdline)",
"# commands.maskmaker.CreateCommand(cmdline)",
"commands",
".",
"measure",
".",
"CreateCommand",
"(",
"cmdline",
")",
"commands",
".",
"scrape",
".",
"CreateCommand",
"(",
"cmdline",
")",
"cmdline",
".",
"ParseCommandLine",
"(",
")",
"return",
"0"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/tools/site_compare/site_compare.py#L161-L172 |
|
daijifeng001/caffe-rfcn | 543f8f6a4b7c88256ea1445ae951a12d1ad9cffd | scripts/cpp_lint.py | python | CheckCheck | (filename, clean_lines, linenum, error) | Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. | Checks the use of CHECK and EXPECT macros. | [
"Checks",
"the",
"use",
"of",
"CHECK",
"and",
"EXPECT",
"macros",
"."
] | def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
lines = clean_lines.elided
check_macro = None
start_pos = -1
for macro in _CHECK_MACROS:
i = lines[linenum].find(macro)
if i >= 0:
check_macro = macro
# Find opening parenthesis. Do a regular expression match here
# to make sure that we are matching the expected CHECK macro, as
# opposed to some other macro that happens to contain the CHECK
# substring.
matched = Match(r'^(.*\b' + check_macro + r'\s*)\(', lines[linenum])
if not matched:
continue
start_pos = len(matched.group(1))
break
if not check_macro or start_pos < 0:
# Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
return
# Find end of the boolean expression by matching parentheses
(last_line, end_line, end_pos) = CloseExpression(
clean_lines, linenum, start_pos)
if end_pos < 0:
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
for i in xrange(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
# Parse expression so that we can take parentheses into account.
# This avoids false positives for inputs like "CHECK((a < 4) == b)",
# which is not replaceable by CHECK_LE.
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
if token == '(':
# Parenthesized operand
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, 1, '(', ')')
if end < 0:
return # Unmatched parenthesis
lhs += '(' + expression[0:end]
expression = expression[end:]
elif token in ('&&', '||'):
# Logical and/or operators. This means the expression
# contains more than one term, for example:
# CHECK(42 < a && a < b);
#
# These are not replaceable with CHECK_LE, so bail out early.
return
elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
# Non-relational operator
lhs += token
expression = matched.group(2)
else:
# Relational operator
operator = token
rhs = matched.group(2)
break
else:
# Unparenthesized operand. Instead of appending to lhs one character
# at a time, we do another regular expression match to consume several
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
matched = Match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
expression = matched.group(2)
# Only apply checks if we got all parts of the boolean expression
if not (lhs and operator and rhs):
return
# Check that rhs do not contain logical operators. We already know
# that lhs is fine since the loop above parses out && and ||.
if rhs.find('&&') > -1 or rhs.find('||') > -1:
return
# At least one of the operands must be a constant literal. This is
# to avoid suggesting replacements for unprintable things like
# CHECK(variable != iterator)
#
# The following pattern matches decimal, hex integers, strings, and
# characters (in that order).
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
if Match(match_constant, lhs) or Match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
# Instead of:
# Consider using CHECK_EQ instead of CHECK(a == b)
#
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[check_macro][operator],
check_macro, operator)) | [
"def",
"CheckCheck",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
":",
"# Decide the set of replacement macros that should be suggested",
"lines",
"=",
"clean_lines",
".",
"elided",
"check_macro",
"=",
"None",
"start_pos",
"=",
"-",
"1",
"for",
"macro",
"in",
"_CHECK_MACROS",
":",
"i",
"=",
"lines",
"[",
"linenum",
"]",
".",
"find",
"(",
"macro",
")",
"if",
"i",
">=",
"0",
":",
"check_macro",
"=",
"macro",
"# Find opening parenthesis. Do a regular expression match here",
"# to make sure that we are matching the expected CHECK macro, as",
"# opposed to some other macro that happens to contain the CHECK",
"# substring.",
"matched",
"=",
"Match",
"(",
"r'^(.*\\b'",
"+",
"check_macro",
"+",
"r'\\s*)\\('",
",",
"lines",
"[",
"linenum",
"]",
")",
"if",
"not",
"matched",
":",
"continue",
"start_pos",
"=",
"len",
"(",
"matched",
".",
"group",
"(",
"1",
")",
")",
"break",
"if",
"not",
"check_macro",
"or",
"start_pos",
"<",
"0",
":",
"# Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'",
"return",
"# Find end of the boolean expression by matching parentheses",
"(",
"last_line",
",",
"end_line",
",",
"end_pos",
")",
"=",
"CloseExpression",
"(",
"clean_lines",
",",
"linenum",
",",
"start_pos",
")",
"if",
"end_pos",
"<",
"0",
":",
"return",
"if",
"linenum",
"==",
"end_line",
":",
"expression",
"=",
"lines",
"[",
"linenum",
"]",
"[",
"start_pos",
"+",
"1",
":",
"end_pos",
"-",
"1",
"]",
"else",
":",
"expression",
"=",
"lines",
"[",
"linenum",
"]",
"[",
"start_pos",
"+",
"1",
":",
"]",
"for",
"i",
"in",
"xrange",
"(",
"linenum",
"+",
"1",
",",
"end_line",
")",
":",
"expression",
"+=",
"lines",
"[",
"i",
"]",
"expression",
"+=",
"last_line",
"[",
"0",
":",
"end_pos",
"-",
"1",
"]",
"# Parse expression so that we can take parentheses into account.",
"# This avoids false positives for inputs like \"CHECK((a < 4) == b)\",",
"# which is not replaceable by CHECK_LE.",
"lhs",
"=",
"''",
"rhs",
"=",
"''",
"operator",
"=",
"None",
"while",
"expression",
":",
"matched",
"=",
"Match",
"(",
"r'^\\s*(<<|<<=|>>|>>=|->\\*|->|&&|\\|\\||'",
"r'==|!=|>=|>|<=|<|\\()(.*)$'",
",",
"expression",
")",
"if",
"matched",
":",
"token",
"=",
"matched",
".",
"group",
"(",
"1",
")",
"if",
"token",
"==",
"'('",
":",
"# Parenthesized operand",
"expression",
"=",
"matched",
".",
"group",
"(",
"2",
")",
"(",
"end",
",",
"_",
")",
"=",
"FindEndOfExpressionInLine",
"(",
"expression",
",",
"0",
",",
"1",
",",
"'('",
",",
"')'",
")",
"if",
"end",
"<",
"0",
":",
"return",
"# Unmatched parenthesis",
"lhs",
"+=",
"'('",
"+",
"expression",
"[",
"0",
":",
"end",
"]",
"expression",
"=",
"expression",
"[",
"end",
":",
"]",
"elif",
"token",
"in",
"(",
"'&&'",
",",
"'||'",
")",
":",
"# Logical and/or operators. This means the expression",
"# contains more than one term, for example:",
"# CHECK(42 < a && a < b);",
"#",
"# These are not replaceable with CHECK_LE, so bail out early.",
"return",
"elif",
"token",
"in",
"(",
"'<<'",
",",
"'<<='",
",",
"'>>'",
",",
"'>>='",
",",
"'->*'",
",",
"'->'",
")",
":",
"# Non-relational operator",
"lhs",
"+=",
"token",
"expression",
"=",
"matched",
".",
"group",
"(",
"2",
")",
"else",
":",
"# Relational operator",
"operator",
"=",
"token",
"rhs",
"=",
"matched",
".",
"group",
"(",
"2",
")",
"break",
"else",
":",
"# Unparenthesized operand. Instead of appending to lhs one character",
"# at a time, we do another regular expression match to consume several",
"# characters at once if possible. Trivial benchmark shows that this",
"# is more efficient when the operands are longer than a single",
"# character, which is generally the case.",
"matched",
"=",
"Match",
"(",
"r'^([^-=!<>()&|]+)(.*)$'",
",",
"expression",
")",
"if",
"not",
"matched",
":",
"matched",
"=",
"Match",
"(",
"r'^(\\s*\\S)(.*)$'",
",",
"expression",
")",
"if",
"not",
"matched",
":",
"break",
"lhs",
"+=",
"matched",
".",
"group",
"(",
"1",
")",
"expression",
"=",
"matched",
".",
"group",
"(",
"2",
")",
"# Only apply checks if we got all parts of the boolean expression",
"if",
"not",
"(",
"lhs",
"and",
"operator",
"and",
"rhs",
")",
":",
"return",
"# Check that rhs do not contain logical operators. We already know",
"# that lhs is fine since the loop above parses out && and ||.",
"if",
"rhs",
".",
"find",
"(",
"'&&'",
")",
">",
"-",
"1",
"or",
"rhs",
".",
"find",
"(",
"'||'",
")",
">",
"-",
"1",
":",
"return",
"# At least one of the operands must be a constant literal. This is",
"# to avoid suggesting replacements for unprintable things like",
"# CHECK(variable != iterator)",
"#",
"# The following pattern matches decimal, hex integers, strings, and",
"# characters (in that order).",
"lhs",
"=",
"lhs",
".",
"strip",
"(",
")",
"rhs",
"=",
"rhs",
".",
"strip",
"(",
")",
"match_constant",
"=",
"r'^([-+]?(\\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|\".*\"|\\'.*\\')$'",
"if",
"Match",
"(",
"match_constant",
",",
"lhs",
")",
"or",
"Match",
"(",
"match_constant",
",",
"rhs",
")",
":",
"# Note: since we know both lhs and rhs, we can provide a more",
"# descriptive error message like:",
"# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)",
"# Instead of:",
"# Consider using CHECK_EQ instead of CHECK(a == b)",
"#",
"# We are still keeping the less descriptive message because if lhs",
"# or rhs gets long, the error message might become unreadable.",
"error",
"(",
"filename",
",",
"linenum",
",",
"'readability/check'",
",",
"2",
",",
"'Consider using %s instead of %s(a %s b)'",
"%",
"(",
"_CHECK_REPLACEMENT",
"[",
"check_macro",
"]",
"[",
"operator",
"]",
",",
"check_macro",
",",
"operator",
")",
")"
] | https://github.com/daijifeng001/caffe-rfcn/blob/543f8f6a4b7c88256ea1445ae951a12d1ad9cffd/scripts/cpp_lint.py#L3278-L3402 |
||
microsoft/Azure-Kinect-Sensor-SDK | d87ef578676c05b9a5d23c097502942753bf3777 | examples/calibration_registration/camera_tools.py | python | r_as_matrix | (rotation:np.array) | return rmat | Convert a 3vec rotation array to a rotation matrix.
Args:
rotation (np.array): 3 vector array representing rotation.
Returns:
[np.array]: Rotation matrix. | Convert a 3vec rotation array to a rotation matrix. | [
"Convert",
"a",
"3vec",
"rotation",
"array",
"to",
"a",
"rotation",
"matrix",
"."
] | def r_as_matrix(rotation:np.array):
"""Convert a 3vec rotation array to a rotation matrix.
Args:
rotation (np.array): 3 vector array representing rotation.
Returns:
[np.array]: Rotation matrix.
"""
rmat = np.zeros(shape=(3,3))
cv2.Rodrigues(rotation, rmat)
return rmat | [
"def",
"r_as_matrix",
"(",
"rotation",
":",
"np",
".",
"array",
")",
":",
"rmat",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"(",
"3",
",",
"3",
")",
")",
"cv2",
".",
"Rodrigues",
"(",
"rotation",
",",
"rmat",
")",
"return",
"rmat"
] | https://github.com/microsoft/Azure-Kinect-Sensor-SDK/blob/d87ef578676c05b9a5d23c097502942753bf3777/examples/calibration_registration/camera_tools.py#L64-L75 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/agw/hyperlink.py | python | HyperLinkCtrl.GetURL | (self) | return self._URL | Retrieve the URL associated to the :class:`HyperLinkCtrl`. | Retrieve the URL associated to the :class:`HyperLinkCtrl`. | [
"Retrieve",
"the",
"URL",
"associated",
"to",
"the",
":",
"class",
":",
"HyperLinkCtrl",
"."
] | def GetURL(self):
""" Retrieve the URL associated to the :class:`HyperLinkCtrl`. """
return self._URL | [
"def",
"GetURL",
"(",
"self",
")",
":",
"return",
"self",
".",
"_URL"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/hyperlink.py#L561-L564 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/richtext.py | python | RichTextDrawingContext.GetVirtualAttributes | (*args, **kwargs) | return _richtext.RichTextDrawingContext_GetVirtualAttributes(*args, **kwargs) | GetVirtualAttributes(self, RichTextObject obj) -> RichTextAttr | GetVirtualAttributes(self, RichTextObject obj) -> RichTextAttr | [
"GetVirtualAttributes",
"(",
"self",
"RichTextObject",
"obj",
")",
"-",
">",
"RichTextAttr"
] | def GetVirtualAttributes(*args, **kwargs):
"""GetVirtualAttributes(self, RichTextObject obj) -> RichTextAttr"""
return _richtext.RichTextDrawingContext_GetVirtualAttributes(*args, **kwargs) | [
"def",
"GetVirtualAttributes",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_richtext",
".",
"RichTextDrawingContext_GetVirtualAttributes",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/richtext.py#L1094-L1096 |
|
hpi-xnor/BMXNet-v2 | af2b1859eafc5c721b1397cef02f946aaf2ce20d | python/mxnet/gluon/trainer.py | python | Trainer.load_states | (self, fname) | Loads trainer states (e.g. optimizer, momentum) from a file.
Parameters
----------
fname : str
Path to input states file.
Note
----
`optimizer.param_dict`, which contains Parameter information (such as
`lr_mult` and `wd_mult`) will not be loaded from the file, but rather set
based on current Trainer's parameters. | Loads trainer states (e.g. optimizer, momentum) from a file. | [
"Loads",
"trainer",
"states",
"(",
"e",
".",
"g",
".",
"optimizer",
"momentum",
")",
"from",
"a",
"file",
"."
] | def load_states(self, fname):
"""Loads trainer states (e.g. optimizer, momentum) from a file.
Parameters
----------
fname : str
Path to input states file.
Note
----
`optimizer.param_dict`, which contains Parameter information (such as
`lr_mult` and `wd_mult`) will not be loaded from the file, but rather set
based on current Trainer's parameters.
"""
if not self._kv_initialized:
self._init_kvstore()
if self._params_to_init:
self._init_params()
if self._update_on_kvstore:
self._kvstore.load_optimizer_states(fname)
self._optimizer = self._kvstore._updater.optimizer
else:
with open(fname, 'rb') as f:
states = f.read()
for updater in self._updaters:
updater.set_states(states)
updater.optimizer = self._updaters[0].optimizer
self._optimizer = self._updaters[0].optimizer
param_dict = {i: param for i, param in enumerate(self._params)}
self._optimizer.param_dict = param_dict | [
"def",
"load_states",
"(",
"self",
",",
"fname",
")",
":",
"if",
"not",
"self",
".",
"_kv_initialized",
":",
"self",
".",
"_init_kvstore",
"(",
")",
"if",
"self",
".",
"_params_to_init",
":",
"self",
".",
"_init_params",
"(",
")",
"if",
"self",
".",
"_update_on_kvstore",
":",
"self",
".",
"_kvstore",
".",
"load_optimizer_states",
"(",
"fname",
")",
"self",
".",
"_optimizer",
"=",
"self",
".",
"_kvstore",
".",
"_updater",
".",
"optimizer",
"else",
":",
"with",
"open",
"(",
"fname",
",",
"'rb'",
")",
"as",
"f",
":",
"states",
"=",
"f",
".",
"read",
"(",
")",
"for",
"updater",
"in",
"self",
".",
"_updaters",
":",
"updater",
".",
"set_states",
"(",
"states",
")",
"updater",
".",
"optimizer",
"=",
"self",
".",
"_updaters",
"[",
"0",
"]",
".",
"optimizer",
"self",
".",
"_optimizer",
"=",
"self",
".",
"_updaters",
"[",
"0",
"]",
".",
"optimizer",
"param_dict",
"=",
"{",
"i",
":",
"param",
"for",
"i",
",",
"param",
"in",
"enumerate",
"(",
"self",
".",
"_params",
")",
"}",
"self",
".",
"_optimizer",
".",
"param_dict",
"=",
"param_dict"
] | https://github.com/hpi-xnor/BMXNet-v2/blob/af2b1859eafc5c721b1397cef02f946aaf2ce20d/python/mxnet/gluon/trainer.py#L465-L495 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/_controls.py | python | ToolBarToolBase.GetDisabledBitmap | (*args, **kwargs) | return _controls_.ToolBarToolBase_GetDisabledBitmap(*args, **kwargs) | GetDisabledBitmap(self) -> Bitmap | GetDisabledBitmap(self) -> Bitmap | [
"GetDisabledBitmap",
"(",
"self",
")",
"-",
">",
"Bitmap"
] | def GetDisabledBitmap(*args, **kwargs):
"""GetDisabledBitmap(self) -> Bitmap"""
return _controls_.ToolBarToolBase_GetDisabledBitmap(*args, **kwargs) | [
"def",
"GetDisabledBitmap",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"ToolBarToolBase_GetDisabledBitmap",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_controls.py#L3501-L3503 |
|
mysql/mysql-router | cc0179f982bb9739a834eb6fd205a56224616133 | ext/gmock/scripts/upload.py | python | HttpRpcServer._Authenticate | (self) | Save the cookie jar after authentication. | Save the cookie jar after authentication. | [
"Save",
"the",
"cookie",
"jar",
"after",
"authentication",
"."
] | def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save() | [
"def",
"_Authenticate",
"(",
"self",
")",
":",
"super",
"(",
"HttpRpcServer",
",",
"self",
")",
".",
"_Authenticate",
"(",
")",
"if",
"self",
".",
"save_cookies",
":",
"StatusUpdate",
"(",
"\"Saving authentication cookies to %s\"",
"%",
"self",
".",
"cookie_file",
")",
"self",
".",
"cookie_jar",
".",
"save",
"(",
")"
] | https://github.com/mysql/mysql-router/blob/cc0179f982bb9739a834eb6fd205a56224616133/ext/gmock/scripts/upload.py#L347-L352 |
||
miyosuda/TensorFlowAndroidMNIST | 7b5a4603d2780a8a2834575706e9001977524007 | jni-build/jni/include/external/bazel_tools/tools/android/merge_manifests.py | python | MergeManifests._ReplaceArgumentPlaceholders | (self, dom) | Replaces argument placeholders with their values.
Modifies the attribute values of the input node.
Args:
dom: Xml node that should get placeholders replaced. | Replaces argument placeholders with their values. | [
"Replaces",
"argument",
"placeholders",
"with",
"their",
"values",
"."
] | def _ReplaceArgumentPlaceholders(self, dom):
"""Replaces argument placeholders with their values.
Modifies the attribute values of the input node.
Args:
dom: Xml node that should get placeholders replaced.
"""
placeholders = {
'packageName': self._merger_dom.getElementsByTagName(
self._MANIFEST).item(0).getAttribute(self._PACKAGE),
}
for element in dom.getElementsByTagName('*'):
for i in range(element.attributes.length):
attr = element.attributes.item(i)
attr.value = self._ReplaceArgumentHelper(placeholders, attr.value) | [
"def",
"_ReplaceArgumentPlaceholders",
"(",
"self",
",",
"dom",
")",
":",
"placeholders",
"=",
"{",
"'packageName'",
":",
"self",
".",
"_merger_dom",
".",
"getElementsByTagName",
"(",
"self",
".",
"_MANIFEST",
")",
".",
"item",
"(",
"0",
")",
".",
"getAttribute",
"(",
"self",
".",
"_PACKAGE",
")",
",",
"}",
"for",
"element",
"in",
"dom",
".",
"getElementsByTagName",
"(",
"'*'",
")",
":",
"for",
"i",
"in",
"range",
"(",
"element",
".",
"attributes",
".",
"length",
")",
":",
"attr",
"=",
"element",
".",
"attributes",
".",
"item",
"(",
"i",
")",
"attr",
".",
"value",
"=",
"self",
".",
"_ReplaceArgumentHelper",
"(",
"placeholders",
",",
"attr",
".",
"value",
")"
] | https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/external/bazel_tools/tools/android/merge_manifests.py#L253-L270 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/wheel/bdist_wheel.py | python | bdist_wheel.egg2dist | (self, egginfo_path, distinfo_path) | Convert an .egg-info directory into a .dist-info directory | Convert an .egg-info directory into a .dist-info directory | [
"Convert",
"an",
".",
"egg",
"-",
"info",
"directory",
"into",
"a",
".",
"dist",
"-",
"info",
"directory"
] | def egg2dist(self, egginfo_path, distinfo_path):
"""Convert an .egg-info directory into a .dist-info directory"""
def adios(p):
"""Appropriately delete directory, file or link."""
if os.path.exists(p) and not os.path.islink(p) and os.path.isdir(p):
shutil.rmtree(p)
elif os.path.exists(p):
os.unlink(p)
adios(distinfo_path)
if not os.path.exists(egginfo_path):
# There is no egg-info. This is probably because the egg-info
# file/directory is not named matching the distribution name used
# to name the archive file. Check for this case and report
# accordingly.
import glob
pat = os.path.join(os.path.dirname(egginfo_path), '*.egg-info')
possible = glob.glob(pat)
err = "Egg metadata expected at %s but not found" % (egginfo_path,)
if possible:
alt = os.path.basename(possible[0])
err += " (%s found - possible misnamed archive file?)" % (alt,)
raise ValueError(err)
if os.path.isfile(egginfo_path):
# .egg-info is a single file
pkginfo_path = egginfo_path
pkg_info = pkginfo_to_metadata(egginfo_path, egginfo_path)
os.mkdir(distinfo_path)
else:
# .egg-info is a directory
pkginfo_path = os.path.join(egginfo_path, 'PKG-INFO')
pkg_info = pkginfo_to_metadata(egginfo_path, pkginfo_path)
# ignore common egg metadata that is useless to wheel
shutil.copytree(egginfo_path, distinfo_path,
ignore=lambda x, y: {'PKG-INFO', 'requires.txt', 'SOURCES.txt',
'not-zip-safe'}
)
# delete dependency_links if it is only whitespace
dependency_links_path = os.path.join(distinfo_path, 'dependency_links.txt')
with open(dependency_links_path, 'r') as dependency_links_file:
dependency_links = dependency_links_file.read().strip()
if not dependency_links:
adios(dependency_links_path)
write_pkg_info(os.path.join(distinfo_path, 'METADATA'), pkg_info)
for license_path in self.license_paths:
filename = os.path.basename(license_path)
shutil.copy(license_path, os.path.join(distinfo_path, filename))
adios(egginfo_path) | [
"def",
"egg2dist",
"(",
"self",
",",
"egginfo_path",
",",
"distinfo_path",
")",
":",
"def",
"adios",
"(",
"p",
")",
":",
"\"\"\"Appropriately delete directory, file or link.\"\"\"",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"p",
")",
"and",
"not",
"os",
".",
"path",
".",
"islink",
"(",
"p",
")",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"p",
")",
":",
"shutil",
".",
"rmtree",
"(",
"p",
")",
"elif",
"os",
".",
"path",
".",
"exists",
"(",
"p",
")",
":",
"os",
".",
"unlink",
"(",
"p",
")",
"adios",
"(",
"distinfo_path",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"egginfo_path",
")",
":",
"# There is no egg-info. This is probably because the egg-info",
"# file/directory is not named matching the distribution name used",
"# to name the archive file. Check for this case and report",
"# accordingly.",
"import",
"glob",
"pat",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"egginfo_path",
")",
",",
"'*.egg-info'",
")",
"possible",
"=",
"glob",
".",
"glob",
"(",
"pat",
")",
"err",
"=",
"\"Egg metadata expected at %s but not found\"",
"%",
"(",
"egginfo_path",
",",
")",
"if",
"possible",
":",
"alt",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"possible",
"[",
"0",
"]",
")",
"err",
"+=",
"\" (%s found - possible misnamed archive file?)\"",
"%",
"(",
"alt",
",",
")",
"raise",
"ValueError",
"(",
"err",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"egginfo_path",
")",
":",
"# .egg-info is a single file",
"pkginfo_path",
"=",
"egginfo_path",
"pkg_info",
"=",
"pkginfo_to_metadata",
"(",
"egginfo_path",
",",
"egginfo_path",
")",
"os",
".",
"mkdir",
"(",
"distinfo_path",
")",
"else",
":",
"# .egg-info is a directory",
"pkginfo_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"egginfo_path",
",",
"'PKG-INFO'",
")",
"pkg_info",
"=",
"pkginfo_to_metadata",
"(",
"egginfo_path",
",",
"pkginfo_path",
")",
"# ignore common egg metadata that is useless to wheel",
"shutil",
".",
"copytree",
"(",
"egginfo_path",
",",
"distinfo_path",
",",
"ignore",
"=",
"lambda",
"x",
",",
"y",
":",
"{",
"'PKG-INFO'",
",",
"'requires.txt'",
",",
"'SOURCES.txt'",
",",
"'not-zip-safe'",
"}",
")",
"# delete dependency_links if it is only whitespace",
"dependency_links_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"distinfo_path",
",",
"'dependency_links.txt'",
")",
"with",
"open",
"(",
"dependency_links_path",
",",
"'r'",
")",
"as",
"dependency_links_file",
":",
"dependency_links",
"=",
"dependency_links_file",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
"if",
"not",
"dependency_links",
":",
"adios",
"(",
"dependency_links_path",
")",
"write_pkg_info",
"(",
"os",
".",
"path",
".",
"join",
"(",
"distinfo_path",
",",
"'METADATA'",
")",
",",
"pkg_info",
")",
"for",
"license_path",
"in",
"self",
".",
"license_paths",
":",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"license_path",
")",
"shutil",
".",
"copy",
"(",
"license_path",
",",
"os",
".",
"path",
".",
"join",
"(",
"distinfo_path",
",",
"filename",
")",
")",
"adios",
"(",
"egginfo_path",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/wheel/bdist_wheel.py#L437-L492 |
||
hpi-xnor/BMXNet-v2 | af2b1859eafc5c721b1397cef02f946aaf2ce20d | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | python | add_n | (attrs, inputs, proto_obj) | return 'add_n', attrs, inputs | Elementwise sum of arrays | Elementwise sum of arrays | [
"Elementwise",
"sum",
"of",
"arrays"
] | def add_n(attrs, inputs, proto_obj):
"""Elementwise sum of arrays"""
return 'add_n', attrs, inputs | [
"def",
"add_n",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"return",
"'add_n'",
",",
"attrs",
",",
"inputs"
] | https://github.com/hpi-xnor/BMXNet-v2/blob/af2b1859eafc5c721b1397cef02f946aaf2ce20d/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L141-L143 |
|
v8mips/v8mips | f0c9cc0bbfd461c7f516799d9a58e9a7395f737e | tools/stats-viewer.py | python | ChromeCounter.Value | (self) | return self.data.IntAt(self.value_offset) | Return the integer value of this counter. | Return the integer value of this counter. | [
"Return",
"the",
"integer",
"value",
"of",
"this",
"counter",
"."
] | def Value(self):
"""Return the integer value of this counter."""
return self.data.IntAt(self.value_offset) | [
"def",
"Value",
"(",
"self",
")",
":",
"return",
"self",
".",
"data",
".",
"IntAt",
"(",
"self",
".",
"value_offset",
")"
] | https://github.com/v8mips/v8mips/blob/f0c9cc0bbfd461c7f516799d9a58e9a7395f737e/tools/stats-viewer.py#L398-L400 |
|
hughperkins/tf-coriander | 970d3df6c11400ad68405f22b0c42a52374e94ca | tensorflow/contrib/learn/python/learn/preprocessing/text.py | python | VocabularyProcessor.save | (self, filename) | Saves vocabulary processor into given file.
Args:
filename: Path to output file. | Saves vocabulary processor into given file. | [
"Saves",
"vocabulary",
"processor",
"into",
"given",
"file",
"."
] | def save(self, filename):
"""Saves vocabulary processor into given file.
Args:
filename: Path to output file.
"""
with gfile.Open(filename, 'wb') as f:
f.write(pickle.dumps(self)) | [
"def",
"save",
"(",
"self",
",",
"filename",
")",
":",
"with",
"gfile",
".",
"Open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"pickle",
".",
"dumps",
"(",
"self",
")",
")"
] | https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/contrib/learn/python/learn/preprocessing/text.py#L206-L213 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/_windows.py | python | Printout.GetPaperRectPixels | (*args, **kwargs) | return _windows_.Printout_GetPaperRectPixels(*args, **kwargs) | GetPaperRectPixels(self) -> Rect | GetPaperRectPixels(self) -> Rect | [
"GetPaperRectPixels",
"(",
"self",
")",
"-",
">",
"Rect"
] | def GetPaperRectPixels(*args, **kwargs):
"""GetPaperRectPixels(self) -> Rect"""
return _windows_.Printout_GetPaperRectPixels(*args, **kwargs) | [
"def",
"GetPaperRectPixels",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_windows_",
".",
"Printout_GetPaperRectPixels",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_windows.py#L5367-L5369 |
|
idaholab/moose | 9eeebc65e098b4c30f8205fb41591fd5b61eb6ff | python/FactorySystem/Parser.py | python | Parser.check | (self) | Perform error checking on the loaded hit tree | Perform error checking on the loaded hit tree | [
"Perform",
"error",
"checking",
"on",
"the",
"loaded",
"hit",
"tree"
] | def check(self):
"""Perform error checking on the loaded hit tree"""
for err in Parser.checkDuplicates(self.root):
self.error(*err) | [
"def",
"check",
"(",
"self",
")",
":",
"for",
"err",
"in",
"Parser",
".",
"checkDuplicates",
"(",
"self",
".",
"root",
")",
":",
"self",
".",
"error",
"(",
"*",
"err",
")"
] | https://github.com/idaholab/moose/blob/9eeebc65e098b4c30f8205fb41591fd5b61eb6ff/python/FactorySystem/Parser.py#L61-L64 |
||
panda3d/panda3d | 833ad89ebad58395d0af0b7ec08538e5e4308265 | direct/src/gui/DirectOptionMenu.py | python | DirectOptionMenu.hidePopupMenu | (self, event = None) | Put away popup and cancel frame | Put away popup and cancel frame | [
"Put",
"away",
"popup",
"and",
"cancel",
"frame"
] | def hidePopupMenu(self, event = None):
""" Put away popup and cancel frame """
self.popupMenu.hide()
self.cancelFrame.hide() | [
"def",
"hidePopupMenu",
"(",
"self",
",",
"event",
"=",
"None",
")",
":",
"self",
".",
"popupMenu",
".",
"hide",
"(",
")",
"self",
".",
"cancelFrame",
".",
"hide",
"(",
")"
] | https://github.com/panda3d/panda3d/blob/833ad89ebad58395d0af0b7ec08538e5e4308265/direct/src/gui/DirectOptionMenu.py#L250-L253 |
||
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/contrib/linalg/python/ops/linear_operator.py | python | LinearOperator._check_input_dtype | (self, arg) | Check that arg.dtype == self.dtype. | Check that arg.dtype == self.dtype. | [
"Check",
"that",
"arg",
".",
"dtype",
"==",
"self",
".",
"dtype",
"."
] | def _check_input_dtype(self, arg):
"""Check that arg.dtype == self.dtype."""
if arg.dtype != self.dtype:
raise TypeError(
"Expected argument to have dtype %s. Found: %s in tensor %s" %
(self.dtype, arg.dtype, arg)) | [
"def",
"_check_input_dtype",
"(",
"self",
",",
"arg",
")",
":",
"if",
"arg",
".",
"dtype",
"!=",
"self",
".",
"dtype",
":",
"raise",
"TypeError",
"(",
"\"Expected argument to have dtype %s. Found: %s in tensor %s\"",
"%",
"(",
"self",
".",
"dtype",
",",
"arg",
".",
"dtype",
",",
"arg",
")",
")"
] | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/linalg/python/ops/linear_operator.py#L573-L578 |
||
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | Framework/PythonInterface/plugins/algorithms/LoadVesuvio.py | python | LoadVesuvio._get_foil_periods | (self) | return foil_out_periods, foil_thin_periods, foil_thick_periods | Return the period numbers (starting from 1) that contribute to the
respective foil states | Return the period numbers (starting from 1) that contribute to the
respective foil states | [
"Return",
"the",
"period",
"numbers",
"(",
"starting",
"from",
"1",
")",
"that",
"contribute",
"to",
"the",
"respective",
"foil",
"states"
] | def _get_foil_periods(self):
"""
Return the period numbers (starting from 1) that contribute to the
respective foil states
"""
if self._nperiods == 2:
foil_out_periods = (2,)
foil_thin_periods = (1,)
foil_thick_periods = None
elif self._nperiods == 3:
foil_out_periods = (3,)
foil_thin_periods = (2,)
foil_thick_periods = (1,)
elif self._nperiods == 6:
if self._spectra_type == BACKWARD:
foil_out_periods = (5,6)
foil_thin_periods = (3,4)
foil_thick_periods = (1,2)
else:
foil_out_periods = (4,5,6)
foil_thin_periods = (1,2,3)
foil_thick_periods = (1,2)
else:
pass
return foil_out_periods, foil_thin_periods, foil_thick_periods | [
"def",
"_get_foil_periods",
"(",
"self",
")",
":",
"if",
"self",
".",
"_nperiods",
"==",
"2",
":",
"foil_out_periods",
"=",
"(",
"2",
",",
")",
"foil_thin_periods",
"=",
"(",
"1",
",",
")",
"foil_thick_periods",
"=",
"None",
"elif",
"self",
".",
"_nperiods",
"==",
"3",
":",
"foil_out_periods",
"=",
"(",
"3",
",",
")",
"foil_thin_periods",
"=",
"(",
"2",
",",
")",
"foil_thick_periods",
"=",
"(",
"1",
",",
")",
"elif",
"self",
".",
"_nperiods",
"==",
"6",
":",
"if",
"self",
".",
"_spectra_type",
"==",
"BACKWARD",
":",
"foil_out_periods",
"=",
"(",
"5",
",",
"6",
")",
"foil_thin_periods",
"=",
"(",
"3",
",",
"4",
")",
"foil_thick_periods",
"=",
"(",
"1",
",",
"2",
")",
"else",
":",
"foil_out_periods",
"=",
"(",
"4",
",",
"5",
",",
"6",
")",
"foil_thin_periods",
"=",
"(",
"1",
",",
"2",
",",
"3",
")",
"foil_thick_periods",
"=",
"(",
"1",
",",
"2",
")",
"else",
":",
"pass",
"return",
"foil_out_periods",
",",
"foil_thin_periods",
",",
"foil_thick_periods"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/Framework/PythonInterface/plugins/algorithms/LoadVesuvio.py#L783-L808 |
|
apple/turicreate | cce55aa5311300e3ce6af93cb45ba791fd1bdf49 | deps/src/libxml2-2.9.1/python/libxml2class.py | python | resetLastError | () | Cleanup the last global error registered. For parsing error
this does not change the well-formedness result. | Cleanup the last global error registered. For parsing error
this does not change the well-formedness result. | [
"Cleanup",
"the",
"last",
"global",
"error",
"registered",
".",
"For",
"parsing",
"error",
"this",
"does",
"not",
"change",
"the",
"well",
"-",
"formedness",
"result",
"."
] | def resetLastError():
"""Cleanup the last global error registered. For parsing error
this does not change the well-formedness result. """
libxml2mod.xmlResetLastError() | [
"def",
"resetLastError",
"(",
")",
":",
"libxml2mod",
".",
"xmlResetLastError",
"(",
")"
] | https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/deps/src/libxml2-2.9.1/python/libxml2class.py#L1146-L1149 |
||
intel/llvm | e6d0547e9d99b5a56430c4749f6c7e328bf221ab | llvm/tools/sancov/coverage-report-server.py | python | SymcovData.compute_filecoverage | (self) | return result | Build a filename->pct coverage. | Build a filename->pct coverage. | [
"Build",
"a",
"filename",
"-",
">",
"pct",
"coverage",
"."
] | def compute_filecoverage(self):
"""Build a filename->pct coverage."""
result = dict()
for filename, fns in self.point_symbol_info.items():
file_points = []
for fn, points in fns.items():
file_points.extend(points.keys())
covered_points = self.covered_points & set(file_points)
result[filename] = int(math.ceil(
len(covered_points) * 100 / len(file_points)))
return result | [
"def",
"compute_filecoverage",
"(",
"self",
")",
":",
"result",
"=",
"dict",
"(",
")",
"for",
"filename",
",",
"fns",
"in",
"self",
".",
"point_symbol_info",
".",
"items",
"(",
")",
":",
"file_points",
"=",
"[",
"]",
"for",
"fn",
",",
"points",
"in",
"fns",
".",
"items",
"(",
")",
":",
"file_points",
".",
"extend",
"(",
"points",
".",
"keys",
"(",
")",
")",
"covered_points",
"=",
"self",
".",
"covered_points",
"&",
"set",
"(",
"file_points",
")",
"result",
"[",
"filename",
"]",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"len",
"(",
"covered_points",
")",
"*",
"100",
"/",
"len",
"(",
"file_points",
")",
")",
")",
"return",
"result"
] | https://github.com/intel/llvm/blob/e6d0547e9d99b5a56430c4749f6c7e328bf221ab/llvm/tools/sancov/coverage-report-server.py#L107-L117 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/aui.py | python | AuiDockInfo.IsHorizontal | (*args, **kwargs) | return _aui.AuiDockInfo_IsHorizontal(*args, **kwargs) | IsHorizontal(self) -> bool | IsHorizontal(self) -> bool | [
"IsHorizontal",
"(",
"self",
")",
"-",
">",
"bool"
] | def IsHorizontal(*args, **kwargs):
"""IsHorizontal(self) -> bool"""
return _aui.AuiDockInfo_IsHorizontal(*args, **kwargs) | [
"def",
"IsHorizontal",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_aui",
".",
"AuiDockInfo_IsHorizontal",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/aui.py#L885-L887 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/agw/ultimatelistctrl.py | python | UltimateListMainWindow.GetSelectedItemCount | (self) | return countSel | Returns the number of selected items in :class:`UltimateListCtrl`. | Returns the number of selected items in :class:`UltimateListCtrl`. | [
"Returns",
"the",
"number",
"of",
"selected",
"items",
"in",
":",
"class",
":",
"UltimateListCtrl",
"."
] | def GetSelectedItemCount(self):
""" Returns the number of selected items in :class:`UltimateListCtrl`. """
# deal with the quick case first
if self.IsSingleSel():
return (self.HasCurrent() and [self.IsHighlighted(self._current)] or [False])[0]
# virtual controls remmebers all its selections itself
if self.IsVirtual():
return self._selStore.GetSelectedCount()
# TODO: we probably should maintain the number of items selected even for
# non virtual controls as enumerating all lines is really slow...
countSel = 0
count = self.GetItemCount()
for line in xrange(count):
if self.GetLine(line).IsHighlighted():
countSel += 1
return countSel | [
"def",
"GetSelectedItemCount",
"(",
"self",
")",
":",
"# deal with the quick case first",
"if",
"self",
".",
"IsSingleSel",
"(",
")",
":",
"return",
"(",
"self",
".",
"HasCurrent",
"(",
")",
"and",
"[",
"self",
".",
"IsHighlighted",
"(",
"self",
".",
"_current",
")",
"]",
"or",
"[",
"False",
"]",
")",
"[",
"0",
"]",
"# virtual controls remmebers all its selections itself",
"if",
"self",
".",
"IsVirtual",
"(",
")",
":",
"return",
"self",
".",
"_selStore",
".",
"GetSelectedCount",
"(",
")",
"# TODO: we probably should maintain the number of items selected even for",
"# non virtual controls as enumerating all lines is really slow...",
"countSel",
"=",
"0",
"count",
"=",
"self",
".",
"GetItemCount",
"(",
")",
"for",
"line",
"in",
"xrange",
"(",
"count",
")",
":",
"if",
"self",
".",
"GetLine",
"(",
"line",
")",
".",
"IsHighlighted",
"(",
")",
":",
"countSel",
"+=",
"1",
"return",
"countSel"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/ultimatelistctrl.py#L9489-L9508 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/idlelib/configdialog.py | python | ConfigDialog.deactivate_current_config | (self) | Remove current key bindings.
Iterate over window instances defined in parent and remove
the keybindings. | Remove current key bindings.
Iterate over window instances defined in parent and remove
the keybindings. | [
"Remove",
"current",
"key",
"bindings",
".",
"Iterate",
"over",
"window",
"instances",
"defined",
"in",
"parent",
"and",
"remove",
"the",
"keybindings",
"."
] | def deactivate_current_config(self):
"""Remove current key bindings.
Iterate over window instances defined in parent and remove
the keybindings.
"""
# Before a config is saved, some cleanup of current
# config must be done - remove the previous keybindings.
win_instances = self.parent.instance_dict.keys()
for instance in win_instances:
instance.RemoveKeybindings() | [
"def",
"deactivate_current_config",
"(",
"self",
")",
":",
"# Before a config is saved, some cleanup of current",
"# config must be done - remove the previous keybindings.",
"win_instances",
"=",
"self",
".",
"parent",
".",
"instance_dict",
".",
"keys",
"(",
")",
"for",
"instance",
"in",
"win_instances",
":",
"instance",
".",
"RemoveKeybindings",
"(",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/idlelib/configdialog.py#L217-L226 |
||
apache/qpid-proton | 6bcdfebb55ea3554bc29b1901422532db331a591 | python/proton/_delivery.py | python | Delivery.local_state | (self) | return DispositionType.get(pn_delivery_local_state(self._impl)) | A local state of the delivery. | A local state of the delivery. | [
"A",
"local",
"state",
"of",
"the",
"delivery",
"."
] | def local_state(self) -> DispositionType:
"""A local state of the delivery."""
return DispositionType.get(pn_delivery_local_state(self._impl)) | [
"def",
"local_state",
"(",
"self",
")",
"->",
"DispositionType",
":",
"return",
"DispositionType",
".",
"get",
"(",
"pn_delivery_local_state",
"(",
"self",
".",
"_impl",
")",
")"
] | https://github.com/apache/qpid-proton/blob/6bcdfebb55ea3554bc29b1901422532db331a591/python/proton/_delivery.py#L380-L382 |
|
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/distribute/cross_device_ops.py | python | AllReduceCrossDeviceOps._do_batch_all_reduce_sparse | (self, reduce_op, sparse_values) | return self._simple_cross_replica_ops.batch_reduce(
reduce_op, zip(sparse_values, sparse_values)) | Run batch all-reduce for sparse values. | Run batch all-reduce for sparse values. | [
"Run",
"batch",
"all",
"-",
"reduce",
"for",
"sparse",
"values",
"."
] | def _do_batch_all_reduce_sparse(self, reduce_op, sparse_values):
"""Run batch all-reduce for sparse values."""
logging.log_first_n(
logging.WARN,
"Efficient allreduce is not supported for %d IndexedSlices" %
len(sparse_values), 10)
# Use `sparse_values` as destinations to do all-reduces. It is effectively
# an allgather under the hood but not an efficient one.
return self._simple_cross_replica_ops.batch_reduce(
reduce_op, zip(sparse_values, sparse_values)) | [
"def",
"_do_batch_all_reduce_sparse",
"(",
"self",
",",
"reduce_op",
",",
"sparse_values",
")",
":",
"logging",
".",
"log_first_n",
"(",
"logging",
".",
"WARN",
",",
"\"Efficient allreduce is not supported for %d IndexedSlices\"",
"%",
"len",
"(",
"sparse_values",
")",
",",
"10",
")",
"# Use `sparse_values` as destinations to do all-reduces. It is effectively",
"# an allgather under the hood but not an efficient one.",
"return",
"self",
".",
"_simple_cross_replica_ops",
".",
"batch_reduce",
"(",
"reduce_op",
",",
"zip",
"(",
"sparse_values",
",",
"sparse_values",
")",
")"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/distribute/cross_device_ops.py#L774-L783 |
|
gnuradio/gnuradio | 09c3c4fa4bfb1a02caac74cb5334dfe065391e3b | gr-digital/python/digital/qa_ofdm_txrx.py | python | test_ofdm_txrx.test_004_tx1packet_large_fO | (self) | Transmit one packet, with slight AWGN and large frequency offset.
Check packet is received and no bit errors have occurred. | Transmit one packet, with slight AWGN and large frequency offset.
Check packet is received and no bit errors have occurred. | [
"Transmit",
"one",
"packet",
"with",
"slight",
"AWGN",
"and",
"large",
"frequency",
"offset",
".",
"Check",
"packet",
"is",
"received",
"and",
"no",
"bit",
"errors",
"have",
"occurred",
"."
] | def test_004_tx1packet_large_fO(self):
""" Transmit one packet, with slight AWGN and large frequency offset.
Check packet is received and no bit errors have occurred. """
fft_len = 64
len_tag_key = 'frame_len'
n_bytes = 21
test_data = list([random.randint(0, 255) for x in range(n_bytes)])
#test_data = tuple([255 for x in range(n_bytes)])
# 1.0/fft_len is one sub-carrier
frequency_offset = 1.0 / fft_len * 2.5
channel = channels.channel_model(0.00001, frequency_offset)
# Tx
tx_fg = ofdm_tx_fg(test_data, len_tag_key)
tx_fg.run()
tx_samples = tx_fg.get_tx_samples()
# Rx
rx_fg = ofdm_rx_fg(tx_samples, len_tag_key, channel, prepend_zeros=100)
rx_fg.run()
rx_data = rx_fg.get_rx_bytes()
self.assertEqual(test_data, rx_data) | [
"def",
"test_004_tx1packet_large_fO",
"(",
"self",
")",
":",
"fft_len",
"=",
"64",
"len_tag_key",
"=",
"'frame_len'",
"n_bytes",
"=",
"21",
"test_data",
"=",
"list",
"(",
"[",
"random",
".",
"randint",
"(",
"0",
",",
"255",
")",
"for",
"x",
"in",
"range",
"(",
"n_bytes",
")",
"]",
")",
"#test_data = tuple([255 for x in range(n_bytes)])",
"# 1.0/fft_len is one sub-carrier",
"frequency_offset",
"=",
"1.0",
"/",
"fft_len",
"*",
"2.5",
"channel",
"=",
"channels",
".",
"channel_model",
"(",
"0.00001",
",",
"frequency_offset",
")",
"# Tx",
"tx_fg",
"=",
"ofdm_tx_fg",
"(",
"test_data",
",",
"len_tag_key",
")",
"tx_fg",
".",
"run",
"(",
")",
"tx_samples",
"=",
"tx_fg",
".",
"get_tx_samples",
"(",
")",
"# Rx",
"rx_fg",
"=",
"ofdm_rx_fg",
"(",
"tx_samples",
",",
"len_tag_key",
",",
"channel",
",",
"prepend_zeros",
"=",
"100",
")",
"rx_fg",
".",
"run",
"(",
")",
"rx_data",
"=",
"rx_fg",
".",
"get_rx_bytes",
"(",
")",
"self",
".",
"assertEqual",
"(",
"test_data",
",",
"rx_data",
")"
] | https://github.com/gnuradio/gnuradio/blob/09c3c4fa4bfb1a02caac74cb5334dfe065391e3b/gr-digital/python/digital/qa_ofdm_txrx.py#L172-L191 |
||
ApolloAuto/apollo-platform | 86d9dc6743b496ead18d597748ebabd34a513289 | ros/roslisp/rosbuild/scripts/genmsg_lisp.py | python | write_defclass | (s, spec) | Writes the defclass that defines the message type | Writes the defclass that defines the message type | [
"Writes",
"the",
"defclass",
"that",
"defines",
"the",
"message",
"type"
] | def write_defclass(s, spec):
"Writes the defclass that defines the message type"
cl = message_class(spec)
new_cl = new_message_class(spec)
suffix = 'srv' if spec.component_type == 'service' else 'msg'
s.write('(cl:defclass %s (roslisp-msg-protocol:ros-message)'%cl)
with Indent(s):
s.write('(')
with Indent(s, inc=1, indent_first=False):
for field in spec.parsed_fields():
write_slot_definition(s, field)
s.write(')', indent=False)
s.write(')')
s.newline()
s.write('(cl:defclass %s (%s)'%(new_cl, cl))
with Indent(s):
s.write('())')
s.newline()
s.write('(cl:defmethod cl:initialize-instance :after ((m %s) cl:&rest args)'%cl)
with Indent(s):
s.write('(cl:declare (cl:ignorable args))')
s.write('(cl:unless (cl:typep m \'%s)'%new_cl)
with Indent(s):
s.write('(roslisp-msg-protocol:msg-deprecation-warning "using old message class name %s-%s:%s is deprecated: use %s-%s:%s instead.")))'%(spec.package, suffix, cl, spec.package, suffix, new_cl)) | [
"def",
"write_defclass",
"(",
"s",
",",
"spec",
")",
":",
"cl",
"=",
"message_class",
"(",
"spec",
")",
"new_cl",
"=",
"new_message_class",
"(",
"spec",
")",
"suffix",
"=",
"'srv'",
"if",
"spec",
".",
"component_type",
"==",
"'service'",
"else",
"'msg'",
"s",
".",
"write",
"(",
"'(cl:defclass %s (roslisp-msg-protocol:ros-message)'",
"%",
"cl",
")",
"with",
"Indent",
"(",
"s",
")",
":",
"s",
".",
"write",
"(",
"'('",
")",
"with",
"Indent",
"(",
"s",
",",
"inc",
"=",
"1",
",",
"indent_first",
"=",
"False",
")",
":",
"for",
"field",
"in",
"spec",
".",
"parsed_fields",
"(",
")",
":",
"write_slot_definition",
"(",
"s",
",",
"field",
")",
"s",
".",
"write",
"(",
"')'",
",",
"indent",
"=",
"False",
")",
"s",
".",
"write",
"(",
"')'",
")",
"s",
".",
"newline",
"(",
")",
"s",
".",
"write",
"(",
"'(cl:defclass %s (%s)'",
"%",
"(",
"new_cl",
",",
"cl",
")",
")",
"with",
"Indent",
"(",
"s",
")",
":",
"s",
".",
"write",
"(",
"'())'",
")",
"s",
".",
"newline",
"(",
")",
"s",
".",
"write",
"(",
"'(cl:defmethod cl:initialize-instance :after ((m %s) cl:&rest args)'",
"%",
"cl",
")",
"with",
"Indent",
"(",
"s",
")",
":",
"s",
".",
"write",
"(",
"'(cl:declare (cl:ignorable args))'",
")",
"s",
".",
"write",
"(",
"'(cl:unless (cl:typep m \\'%s)'",
"%",
"new_cl",
")",
"with",
"Indent",
"(",
"s",
")",
":",
"s",
".",
"write",
"(",
"'(roslisp-msg-protocol:msg-deprecation-warning \"using old message class name %s-%s:%s is deprecated: use %s-%s:%s instead.\")))'",
"%",
"(",
"spec",
".",
"package",
",",
"suffix",
",",
"cl",
",",
"spec",
".",
"package",
",",
"suffix",
",",
"new_cl",
")",
")"
] | https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/roslisp/rosbuild/scripts/genmsg_lisp.py#L248-L271 |
||
microsoft/checkedc-clang | a173fefde5d7877b7750e7ce96dd08cf18baebf2 | clang/bindings/python/clang/cindex.py | python | CursorKind.is_declaration | (self) | return conf.lib.clang_isDeclaration(self) | Test if this is a declaration kind. | Test if this is a declaration kind. | [
"Test",
"if",
"this",
"is",
"a",
"declaration",
"kind",
"."
] | def is_declaration(self):
"""Test if this is a declaration kind."""
return conf.lib.clang_isDeclaration(self) | [
"def",
"is_declaration",
"(",
"self",
")",
":",
"return",
"conf",
".",
"lib",
".",
"clang_isDeclaration",
"(",
"self",
")"
] | https://github.com/microsoft/checkedc-clang/blob/a173fefde5d7877b7750e7ce96dd08cf18baebf2/clang/bindings/python/clang/cindex.py#L671-L673 |
|
HKUST-Aerial-Robotics/Fast-Planner | 2ddd7793eecd573dbb5b47e2c985aa06606df3cf | uav_simulator/Utils/multi_map_server/quadrotor_msgs/build/catkin_generated/installspace/_setup_util.py | python | rollback_env_variables | (environ, env_var_subfolders) | return lines | Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks. | Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks. | [
"Generate",
"shell",
"code",
"to",
"reset",
"environment",
"variables",
"by",
"unrolling",
"modifications",
"based",
"on",
"all",
"workspaces",
"in",
"CMAKE_PREFIX_PATH",
".",
"This",
"does",
"not",
"cover",
"modifications",
"performed",
"by",
"environment",
"hooks",
"."
] | def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
value = _rollback_env_variable(unmodified_environ, key, subfolder)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines | [
"def",
"rollback_env_variables",
"(",
"environ",
",",
"env_var_subfolders",
")",
":",
"lines",
"=",
"[",
"]",
"unmodified_environ",
"=",
"copy",
".",
"copy",
"(",
"environ",
")",
"for",
"key",
"in",
"sorted",
"(",
"env_var_subfolders",
".",
"keys",
"(",
")",
")",
":",
"subfolders",
"=",
"env_var_subfolders",
"[",
"key",
"]",
"if",
"not",
"isinstance",
"(",
"subfolders",
",",
"list",
")",
":",
"subfolders",
"=",
"[",
"subfolders",
"]",
"for",
"subfolder",
"in",
"subfolders",
":",
"value",
"=",
"_rollback_env_variable",
"(",
"unmodified_environ",
",",
"key",
",",
"subfolder",
")",
"if",
"value",
"is",
"not",
"None",
":",
"environ",
"[",
"key",
"]",
"=",
"value",
"lines",
".",
"append",
"(",
"assignment",
"(",
"key",
",",
"value",
")",
")",
"if",
"lines",
":",
"lines",
".",
"insert",
"(",
"0",
",",
"comment",
"(",
"'reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'",
")",
")",
"return",
"lines"
] | https://github.com/HKUST-Aerial-Robotics/Fast-Planner/blob/2ddd7793eecd573dbb5b47e2c985aa06606df3cf/uav_simulator/Utils/multi_map_server/quadrotor_msgs/build/catkin_generated/installspace/_setup_util.py#L62-L81 |
|
genn-team/genn | 75e1eb218cafa228bf36ae4613d1ce26e877b12c | pygenn/genn_groups.py | python | SynapseGroup.push_connectivity_to_device | (self) | Wrapper around GeNNModel.push_connectivity_to_device | Wrapper around GeNNModel.push_connectivity_to_device | [
"Wrapper",
"around",
"GeNNModel",
".",
"push_connectivity_to_device"
] | def push_connectivity_to_device(self):
"""Wrapper around GeNNModel.push_connectivity_to_device"""
self._model.push_connectivity_to_device(self.name) | [
"def",
"push_connectivity_to_device",
"(",
"self",
")",
":",
"self",
".",
"_model",
".",
"push_connectivity_to_device",
"(",
"self",
".",
"name",
")"
] | https://github.com/genn-team/genn/blob/75e1eb218cafa228bf36ae4613d1ce26e877b12c/pygenn/genn_groups.py#L1138-L1140 |
||
RcppCore/RcppParallel | ff49e84602a1771c06bc39fdea995447564f2b7f | src/tbb/python/tbb/__init__.py | python | TBBProcessPool3._repopulate_pool | (self) | Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited. | Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited. | [
"Bring",
"the",
"number",
"of",
"pool",
"processes",
"up",
"to",
"the",
"specified",
"number",
"for",
"use",
"after",
"reaping",
"workers",
"which",
"have",
"exited",
"."
] | def _repopulate_pool(self):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
from multiprocessing.util import debug
for i in range(self._processes - len(self._pool)):
w = self.Process(target=tbb_process_pool_worker3,
args=(self._inqueue, self._outqueue,
self._initializer,
self._initargs, self._maxtasksperchild,
self._wrap_exception)
)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
debug('added worker') | [
"def",
"_repopulate_pool",
"(",
"self",
")",
":",
"from",
"multiprocessing",
".",
"util",
"import",
"debug",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"_processes",
"-",
"len",
"(",
"self",
".",
"_pool",
")",
")",
":",
"w",
"=",
"self",
".",
"Process",
"(",
"target",
"=",
"tbb_process_pool_worker3",
",",
"args",
"=",
"(",
"self",
".",
"_inqueue",
",",
"self",
".",
"_outqueue",
",",
"self",
".",
"_initializer",
",",
"self",
".",
"_initargs",
",",
"self",
".",
"_maxtasksperchild",
",",
"self",
".",
"_wrap_exception",
")",
")",
"self",
".",
"_pool",
".",
"append",
"(",
"w",
")",
"w",
".",
"name",
"=",
"w",
".",
"name",
".",
"replace",
"(",
"'Process'",
",",
"'PoolWorker'",
")",
"w",
".",
"daemon",
"=",
"True",
"w",
".",
"start",
"(",
")",
"debug",
"(",
"'added worker'",
")"
] | https://github.com/RcppCore/RcppParallel/blob/ff49e84602a1771c06bc39fdea995447564f2b7f/src/tbb/python/tbb/__init__.py#L117-L134 |
||
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/gsutil/third_party/apitools/ez_setup.py | python | use_setuptools | (
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
) | return do_download() | Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script. | Automatically find/download setuptools and make it available on sys.path | [
"Automatically",
"find",
"/",
"download",
"setuptools",
"and",
"make",
"it",
"available",
"on",
"sys",
".",
"path"
] | def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
except pkg_resources.DistributionNotFound:
pass
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download() | [
"def",
"use_setuptools",
"(",
"version",
"=",
"DEFAULT_VERSION",
",",
"download_base",
"=",
"DEFAULT_URL",
",",
"to_dir",
"=",
"os",
".",
"curdir",
",",
"download_delay",
"=",
"15",
")",
":",
"was_imported",
"=",
"'pkg_resources'",
"in",
"sys",
".",
"modules",
"or",
"'setuptools'",
"in",
"sys",
".",
"modules",
"def",
"do_download",
"(",
")",
":",
"egg",
"=",
"download_setuptools",
"(",
"version",
",",
"download_base",
",",
"to_dir",
",",
"download_delay",
")",
"sys",
".",
"path",
".",
"insert",
"(",
"0",
",",
"egg",
")",
"import",
"setuptools",
"setuptools",
".",
"bootstrap_install_from",
"=",
"egg",
"try",
":",
"import",
"pkg_resources",
"except",
"ImportError",
":",
"return",
"do_download",
"(",
")",
"try",
":",
"pkg_resources",
".",
"require",
"(",
"\"setuptools>=\"",
"+",
"version",
")",
"return",
"except",
"pkg_resources",
".",
"VersionConflict",
",",
"e",
":",
"if",
"was_imported",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"(",
"\"The required version of setuptools (>=%s) is not available, and\\n\"",
"\"can't be installed while this script is running. Please install\\n\"",
"\" a more recent version first, using 'easy_install -U setuptools'.\"",
"\"\\n\\n(Currently using %r)\"",
")",
"%",
"(",
"version",
",",
"e",
".",
"args",
"[",
"0",
"]",
")",
"sys",
".",
"exit",
"(",
"2",
")",
"except",
"pkg_resources",
".",
"DistributionNotFound",
":",
"pass",
"del",
"pkg_resources",
",",
"sys",
".",
"modules",
"[",
"'pkg_resources'",
"]",
"# reload ok",
"return",
"do_download",
"(",
")"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/apitools/ez_setup.py#L53-L92 |
|
idaholab/moose | 9eeebc65e098b4c30f8205fb41591fd5b61eb6ff | python/peacock/base/PreferenceWidget.py | python | PreferenceWidget.count | (self) | return len(self._widgets) | Returns the number of preferences widgets | Returns the number of preferences widgets | [
"Returns",
"the",
"number",
"of",
"preferences",
"widgets"
] | def count(self):
"""
Returns the number of preferences widgets
"""
return len(self._widgets) | [
"def",
"count",
"(",
"self",
")",
":",
"return",
"len",
"(",
"self",
".",
"_widgets",
")"
] | https://github.com/idaholab/moose/blob/9eeebc65e098b4c30f8205fb41591fd5b61eb6ff/python/peacock/base/PreferenceWidget.py#L64-L68 |
|
raymondlu/super-animation-samples | 04234269112ff0dc32447f27a761dbbb00b8ba17 | samples/cocos2d-x-3.1/CocosLuaGame2/frameworks/cocos2d-x/tools/bindings-generator/clang/cindex.py | python | SourceRange.end | (self) | return conf.lib.clang_getRangeEnd(self) | Return a SourceLocation representing the last character within a
source range. | Return a SourceLocation representing the last character within a
source range. | [
"Return",
"a",
"SourceLocation",
"representing",
"the",
"last",
"character",
"within",
"a",
"source",
"range",
"."
] | def end(self):
"""
Return a SourceLocation representing the last character within a
source range.
"""
return conf.lib.clang_getRangeEnd(self) | [
"def",
"end",
"(",
"self",
")",
":",
"return",
"conf",
".",
"lib",
".",
"clang_getRangeEnd",
"(",
"self",
")"
] | https://github.com/raymondlu/super-animation-samples/blob/04234269112ff0dc32447f27a761dbbb00b8ba17/samples/cocos2d-x-3.1/CocosLuaGame2/frameworks/cocos2d-x/tools/bindings-generator/clang/cindex.py#L256-L261 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/richtext.py | python | RichTextObjectList.__getitem__ | (*args, **kwargs) | return _richtext.RichTextObjectList___getitem__(*args, **kwargs) | __getitem__(self, size_t index) -> RichTextObject | __getitem__(self, size_t index) -> RichTextObject | [
"__getitem__",
"(",
"self",
"size_t",
"index",
")",
"-",
">",
"RichTextObject"
] | def __getitem__(*args, **kwargs):
"""__getitem__(self, size_t index) -> RichTextObject"""
return _richtext.RichTextObjectList___getitem__(*args, **kwargs) | [
"def",
"__getitem__",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_richtext",
".",
"RichTextObjectList___getitem__",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/richtext.py#L1530-L1532 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/series.py | python | Series.axes | (self) | return [self.index] | Return a list of the row axis labels. | Return a list of the row axis labels. | [
"Return",
"a",
"list",
"of",
"the",
"row",
"axis",
"labels",
"."
] | def axes(self):
"""
Return a list of the row axis labels.
"""
return [self.index] | [
"def",
"axes",
"(",
"self",
")",
":",
"return",
"[",
"self",
".",
"index",
"]"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/series.py#L797-L801 |
|
etternagame/etterna | 8775f74ac9c353320128609d4b4150672e9a6d04 | extern/crashpad/buildtools/checkdeps/builddeps.py | python | DepsBuilder._ApplyRules | (self, existing_rules, includes, specific_includes,
cur_dir_norm) | return rules | Applies the given include rules, returning the new rules.
Args:
existing_rules: A set of existing rules that will be combined.
include: The list of rules from the "include_rules" section of DEPS.
specific_includes: E.g. {'.*_unittest\.cc': ['+foo', '-blat']} rules
from the "specific_include_rules" section of DEPS.
cur_dir_norm: The current directory, normalized path. We will create an
implicit rule that allows inclusion from this directory.
Returns: A new set of rules combining the existing_rules with the other
arguments. | Applies the given include rules, returning the new rules. | [
"Applies",
"the",
"given",
"include",
"rules",
"returning",
"the",
"new",
"rules",
"."
] | def _ApplyRules(self, existing_rules, includes, specific_includes,
cur_dir_norm):
"""Applies the given include rules, returning the new rules.
Args:
existing_rules: A set of existing rules that will be combined.
include: The list of rules from the "include_rules" section of DEPS.
specific_includes: E.g. {'.*_unittest\.cc': ['+foo', '-blat']} rules
from the "specific_include_rules" section of DEPS.
cur_dir_norm: The current directory, normalized path. We will create an
implicit rule that allows inclusion from this directory.
Returns: A new set of rules combining the existing_rules with the other
arguments.
"""
rules = copy.deepcopy(existing_rules)
# First apply the implicit "allow" rule for the current directory.
base_dir_norm = NormalizePath(self.base_directory)
if not cur_dir_norm.startswith(base_dir_norm):
raise Exception(
'Internal error: base directory is not at the beginning for\n'
' %s and base dir\n'
' %s' % (cur_dir_norm, base_dir_norm))
relative_dir = posixpath.relpath(cur_dir_norm, base_dir_norm)
# Make the help string a little more meaningful.
source = relative_dir or 'top level'
rules.AddRule('+' + relative_dir,
relative_dir,
'Default rule for ' + source)
def ApplyOneRule(rule_str, dependee_regexp=None):
"""Deduces a sensible description for the rule being added, and
adds the rule with its description to |rules|.
If we are ignoring temporary rules, this function does nothing
for rules beginning with the Rule.TEMP_ALLOW character.
"""
if self._ignore_temp_rules and rule_str.startswith(Rule.TEMP_ALLOW):
return
rule_block_name = 'include_rules'
if dependee_regexp:
rule_block_name = 'specific_include_rules'
if relative_dir:
rule_description = relative_dir + "'s %s" % rule_block_name
else:
rule_description = 'the top level %s' % rule_block_name
rules.AddRule(rule_str, relative_dir, rule_description, dependee_regexp)
# Apply the additional explicit rules.
for rule_str in includes:
ApplyOneRule(rule_str)
# Finally, apply the specific rules.
if self._ignore_specific_rules:
return rules
for regexp, specific_rules in specific_includes.iteritems():
for rule_str in specific_rules:
ApplyOneRule(rule_str, regexp)
return rules | [
"def",
"_ApplyRules",
"(",
"self",
",",
"existing_rules",
",",
"includes",
",",
"specific_includes",
",",
"cur_dir_norm",
")",
":",
"rules",
"=",
"copy",
".",
"deepcopy",
"(",
"existing_rules",
")",
"# First apply the implicit \"allow\" rule for the current directory.",
"base_dir_norm",
"=",
"NormalizePath",
"(",
"self",
".",
"base_directory",
")",
"if",
"not",
"cur_dir_norm",
".",
"startswith",
"(",
"base_dir_norm",
")",
":",
"raise",
"Exception",
"(",
"'Internal error: base directory is not at the beginning for\\n'",
"' %s and base dir\\n'",
"' %s'",
"%",
"(",
"cur_dir_norm",
",",
"base_dir_norm",
")",
")",
"relative_dir",
"=",
"posixpath",
".",
"relpath",
"(",
"cur_dir_norm",
",",
"base_dir_norm",
")",
"# Make the help string a little more meaningful.",
"source",
"=",
"relative_dir",
"or",
"'top level'",
"rules",
".",
"AddRule",
"(",
"'+'",
"+",
"relative_dir",
",",
"relative_dir",
",",
"'Default rule for '",
"+",
"source",
")",
"def",
"ApplyOneRule",
"(",
"rule_str",
",",
"dependee_regexp",
"=",
"None",
")",
":",
"\"\"\"Deduces a sensible description for the rule being added, and\n adds the rule with its description to |rules|.\n\n If we are ignoring temporary rules, this function does nothing\n for rules beginning with the Rule.TEMP_ALLOW character.\n \"\"\"",
"if",
"self",
".",
"_ignore_temp_rules",
"and",
"rule_str",
".",
"startswith",
"(",
"Rule",
".",
"TEMP_ALLOW",
")",
":",
"return",
"rule_block_name",
"=",
"'include_rules'",
"if",
"dependee_regexp",
":",
"rule_block_name",
"=",
"'specific_include_rules'",
"if",
"relative_dir",
":",
"rule_description",
"=",
"relative_dir",
"+",
"\"'s %s\"",
"%",
"rule_block_name",
"else",
":",
"rule_description",
"=",
"'the top level %s'",
"%",
"rule_block_name",
"rules",
".",
"AddRule",
"(",
"rule_str",
",",
"relative_dir",
",",
"rule_description",
",",
"dependee_regexp",
")",
"# Apply the additional explicit rules.",
"for",
"rule_str",
"in",
"includes",
":",
"ApplyOneRule",
"(",
"rule_str",
")",
"# Finally, apply the specific rules.",
"if",
"self",
".",
"_ignore_specific_rules",
":",
"return",
"rules",
"for",
"regexp",
",",
"specific_rules",
"in",
"specific_includes",
".",
"iteritems",
"(",
")",
":",
"for",
"rule_str",
"in",
"specific_rules",
":",
"ApplyOneRule",
"(",
"rule_str",
",",
"regexp",
")",
"return",
"rules"
] | https://github.com/etternagame/etterna/blob/8775f74ac9c353320128609d4b4150672e9a6d04/extern/crashpad/buildtools/checkdeps/builddeps.py#L124-L187 |
|
greenheartgames/greenworks | 3ea4ab490b56676de3f0a237c74bcfdb17323e60 | deps/cpplint/cpplint.py | python | ParseNolintSuppressions | (filename, raw_line, linenum, error) | Updates the global list of line error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler. | Updates the global list of line error-suppressions. | [
"Updates",
"the",
"global",
"list",
"of",
"line",
"error",
"-",
"suppressions",
"."
] | def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of line error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
if matched:
if matched.group(1):
suppressed_line = linenum + 1
else:
suppressed_line = linenum
category = matched.group(2)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(suppressed_line)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(suppressed_line)
elif category not in _LEGACY_ERROR_CATEGORIES:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category) | [
"def",
"ParseNolintSuppressions",
"(",
"filename",
",",
"raw_line",
",",
"linenum",
",",
"error",
")",
":",
"matched",
"=",
"Search",
"(",
"r'\\bNOLINT(NEXTLINE)?\\b(\\([^)]+\\))?'",
",",
"raw_line",
")",
"if",
"matched",
":",
"if",
"matched",
".",
"group",
"(",
"1",
")",
":",
"suppressed_line",
"=",
"linenum",
"+",
"1",
"else",
":",
"suppressed_line",
"=",
"linenum",
"category",
"=",
"matched",
".",
"group",
"(",
"2",
")",
"if",
"category",
"in",
"(",
"None",
",",
"'(*)'",
")",
":",
"# => \"suppress all\"",
"_error_suppressions",
".",
"setdefault",
"(",
"None",
",",
"set",
"(",
")",
")",
".",
"add",
"(",
"suppressed_line",
")",
"else",
":",
"if",
"category",
".",
"startswith",
"(",
"'('",
")",
"and",
"category",
".",
"endswith",
"(",
"')'",
")",
":",
"category",
"=",
"category",
"[",
"1",
":",
"-",
"1",
"]",
"if",
"category",
"in",
"_ERROR_CATEGORIES",
":",
"_error_suppressions",
".",
"setdefault",
"(",
"category",
",",
"set",
"(",
")",
")",
".",
"add",
"(",
"suppressed_line",
")",
"elif",
"category",
"not",
"in",
"_LEGACY_ERROR_CATEGORIES",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'readability/nolint'",
",",
"5",
",",
"'Unknown NOLINT error category: %s'",
"%",
"category",
")"
] | https://github.com/greenheartgames/greenworks/blob/3ea4ab490b56676de3f0a237c74bcfdb17323e60/deps/cpplint/cpplint.py#L571-L600 |
||
hughperkins/tf-coriander | 970d3df6c11400ad68405f22b0c42a52374e94ca | tensorflow/python/framework/function.py | python | _DefinedFunction.definition | (self) | return self._definition | Function definition proto. | Function definition proto. | [
"Function",
"definition",
"proto",
"."
] | def definition(self):
"""Function definition proto."""
self._create_definition_if_needed()
return self._definition | [
"def",
"definition",
"(",
"self",
")",
":",
"self",
".",
"_create_definition_if_needed",
"(",
")",
"return",
"self",
".",
"_definition"
] | https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/python/framework/function.py#L461-L464 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/stc.py | python | StyledTextCtrl.SetCaretWidth | (*args, **kwargs) | return _stc.StyledTextCtrl_SetCaretWidth(*args, **kwargs) | SetCaretWidth(self, int pixelWidth)
Set the width of the insert mode caret. | SetCaretWidth(self, int pixelWidth) | [
"SetCaretWidth",
"(",
"self",
"int",
"pixelWidth",
")"
] | def SetCaretWidth(*args, **kwargs):
"""
SetCaretWidth(self, int pixelWidth)
Set the width of the insert mode caret.
"""
return _stc.StyledTextCtrl_SetCaretWidth(*args, **kwargs) | [
"def",
"SetCaretWidth",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_stc",
".",
"StyledTextCtrl_SetCaretWidth",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/stc.py#L3697-L3703 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/AWSPythonSDK/1.5.8/botocore/vendored/requests/sessions.py | python | SessionRedirectMixin.rebuild_auth | (self, prepared_request, response) | return | When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss. | When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss. | [
"When",
"being",
"redirected",
"we",
"may",
"want",
"to",
"strip",
"authentication",
"from",
"the",
"request",
"to",
"avoid",
"leaking",
"credentials",
".",
"This",
"method",
"intelligently",
"removes",
"and",
"reapplies",
"authentication",
"where",
"possible",
"to",
"avoid",
"credential",
"loss",
"."
] | def rebuild_auth(self, prepared_request, response):
"""
When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss.
"""
headers = prepared_request.headers
url = prepared_request.url
if 'Authorization' in headers:
# If we get redirected to a new host, we should strip out any
# authentication headers.
original_parsed = urlparse(response.request.url)
redirect_parsed = urlparse(url)
if (original_parsed.hostname != redirect_parsed.hostname):
del headers['Authorization']
# .netrc might have more auth for us on our new host.
new_auth = get_netrc_auth(url) if self.trust_env else None
if new_auth is not None:
prepared_request.prepare_auth(new_auth)
return | [
"def",
"rebuild_auth",
"(",
"self",
",",
"prepared_request",
",",
"response",
")",
":",
"headers",
"=",
"prepared_request",
".",
"headers",
"url",
"=",
"prepared_request",
".",
"url",
"if",
"'Authorization'",
"in",
"headers",
":",
"# If we get redirected to a new host, we should strip out any",
"# authentication headers.",
"original_parsed",
"=",
"urlparse",
"(",
"response",
".",
"request",
".",
"url",
")",
"redirect_parsed",
"=",
"urlparse",
"(",
"url",
")",
"if",
"(",
"original_parsed",
".",
"hostname",
"!=",
"redirect_parsed",
".",
"hostname",
")",
":",
"del",
"headers",
"[",
"'Authorization'",
"]",
"# .netrc might have more auth for us on our new host.",
"new_auth",
"=",
"get_netrc_auth",
"(",
"url",
")",
"if",
"self",
".",
"trust_env",
"else",
"None",
"if",
"new_auth",
"is",
"not",
"None",
":",
"prepared_request",
".",
"prepare_auth",
"(",
"new_auth",
")",
"return"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/AWSPythonSDK/1.5.8/botocore/vendored/requests/sessions.py#L204-L227 |
|
mindspore-ai/mindspore | fb8fd3338605bb34fa5cea054e535a8b1d753fab | mindspore/python/mindspore/ops/composite/multitype_ops/add_impl.py | python | _tensor_add_tensor | (x, y) | return F.add(x, y) | Returns x + y element-wise.
Args:
x (Tensor): x
y (Tensor): The dtype is same as x.
Returns:
Tensor, has the same dtype as x. | Returns x + y element-wise. | [
"Returns",
"x",
"+",
"y",
"element",
"-",
"wise",
"."
] | def _tensor_add_tensor(x, y):
"""
Returns x + y element-wise.
Args:
x (Tensor): x
y (Tensor): The dtype is same as x.
Returns:
Tensor, has the same dtype as x.
"""
return F.add(x, y) | [
"def",
"_tensor_add_tensor",
"(",
"x",
",",
"y",
")",
":",
"return",
"F",
".",
"add",
"(",
"x",
",",
"y",
")"
] | https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/ops/composite/multitype_ops/add_impl.py#L201-L212 |
|
SpenceKonde/megaTinyCore | 1c4a70b18a149fe6bcb551dfa6db11ca50b8997b | megaavr/tools/libs/serial/serialwin32.py | python | Serial._reconfigure_port | (self) | Set communication parameters on opened port. | Set communication parameters on opened port. | [
"Set",
"communication",
"parameters",
"on",
"opened",
"port",
"."
] | def _reconfigure_port(self):
"""Set communication parameters on opened port."""
if not self._port_handle:
raise SerialException("Can only operate on a valid port handle")
# Set Windows timeout values
# timeouts is a tuple with the following items:
# (ReadIntervalTimeout,ReadTotalTimeoutMultiplier,
# ReadTotalTimeoutConstant,WriteTotalTimeoutMultiplier,
# WriteTotalTimeoutConstant)
timeouts = win32.COMMTIMEOUTS()
if self._timeout is None:
pass # default of all zeros is OK
elif self._timeout == 0:
timeouts.ReadIntervalTimeout = win32.MAXDWORD
else:
timeouts.ReadTotalTimeoutConstant = max(int(self._timeout * 1000), 1)
if self._timeout != 0 and self._inter_byte_timeout is not None:
timeouts.ReadIntervalTimeout = max(int(self._inter_byte_timeout * 1000), 1)
if self._write_timeout is None:
pass
elif self._write_timeout == 0:
timeouts.WriteTotalTimeoutConstant = win32.MAXDWORD
else:
timeouts.WriteTotalTimeoutConstant = max(int(self._write_timeout * 1000), 1)
win32.SetCommTimeouts(self._port_handle, ctypes.byref(timeouts))
win32.SetCommMask(self._port_handle, win32.EV_ERR)
# Setup the connection info.
# Get state and modify it:
comDCB = win32.DCB()
win32.GetCommState(self._port_handle, ctypes.byref(comDCB))
comDCB.BaudRate = self._baudrate
if self._bytesize == serial.FIVEBITS:
comDCB.ByteSize = 5
elif self._bytesize == serial.SIXBITS:
comDCB.ByteSize = 6
elif self._bytesize == serial.SEVENBITS:
comDCB.ByteSize = 7
elif self._bytesize == serial.EIGHTBITS:
comDCB.ByteSize = 8
else:
raise ValueError("Unsupported number of data bits: {!r}".format(self._bytesize))
if self._parity == serial.PARITY_NONE:
comDCB.Parity = win32.NOPARITY
comDCB.fParity = 0 # Disable Parity Check
elif self._parity == serial.PARITY_EVEN:
comDCB.Parity = win32.EVENPARITY
comDCB.fParity = 1 # Enable Parity Check
elif self._parity == serial.PARITY_ODD:
comDCB.Parity = win32.ODDPARITY
comDCB.fParity = 1 # Enable Parity Check
elif self._parity == serial.PARITY_MARK:
comDCB.Parity = win32.MARKPARITY
comDCB.fParity = 1 # Enable Parity Check
elif self._parity == serial.PARITY_SPACE:
comDCB.Parity = win32.SPACEPARITY
comDCB.fParity = 1 # Enable Parity Check
else:
raise ValueError("Unsupported parity mode: {!r}".format(self._parity))
if self._stopbits == serial.STOPBITS_ONE:
comDCB.StopBits = win32.ONESTOPBIT
elif self._stopbits == serial.STOPBITS_ONE_POINT_FIVE:
comDCB.StopBits = win32.ONE5STOPBITS
elif self._stopbits == serial.STOPBITS_TWO:
comDCB.StopBits = win32.TWOSTOPBITS
else:
raise ValueError("Unsupported number of stop bits: {!r}".format(self._stopbits))
comDCB.fBinary = 1 # Enable Binary Transmission
# Char. w/ Parity-Err are replaced with 0xff (if fErrorChar is set to TRUE)
if self._rs485_mode is None:
if self._rtscts:
comDCB.fRtsControl = win32.RTS_CONTROL_HANDSHAKE
else:
comDCB.fRtsControl = win32.RTS_CONTROL_ENABLE if self._rts_state else win32.RTS_CONTROL_DISABLE
comDCB.fOutxCtsFlow = self._rtscts
else:
# checks for unsupported settings
# XXX verify if platform really does not have a setting for those
if not self._rs485_mode.rts_level_for_tx:
raise ValueError(
'Unsupported value for RS485Settings.rts_level_for_tx: {!r}'.format(
self._rs485_mode.rts_level_for_tx,))
if self._rs485_mode.rts_level_for_rx:
raise ValueError(
'Unsupported value for RS485Settings.rts_level_for_rx: {!r}'.format(
self._rs485_mode.rts_level_for_rx,))
if self._rs485_mode.delay_before_tx is not None:
raise ValueError(
'Unsupported value for RS485Settings.delay_before_tx: {!r}'.format(
self._rs485_mode.delay_before_tx,))
if self._rs485_mode.delay_before_rx is not None:
raise ValueError(
'Unsupported value for RS485Settings.delay_before_rx: {!r}'.format(
self._rs485_mode.delay_before_rx,))
if self._rs485_mode.loopback:
raise ValueError(
'Unsupported value for RS485Settings.loopback: {!r}'.format(
self._rs485_mode.loopback,))
comDCB.fRtsControl = win32.RTS_CONTROL_TOGGLE
comDCB.fOutxCtsFlow = 0
if self._dsrdtr:
comDCB.fDtrControl = win32.DTR_CONTROL_HANDSHAKE
else:
comDCB.fDtrControl = win32.DTR_CONTROL_ENABLE if self._dtr_state else win32.DTR_CONTROL_DISABLE
comDCB.fOutxDsrFlow = self._dsrdtr
comDCB.fOutX = self._xonxoff
comDCB.fInX = self._xonxoff
comDCB.fNull = 0
comDCB.fErrorChar = 0
comDCB.fAbortOnError = 0
comDCB.XonChar = serial.XON
comDCB.XoffChar = serial.XOFF
if not win32.SetCommState(self._port_handle, ctypes.byref(comDCB)):
raise SerialException(
'Cannot configure port, something went wrong. '
'Original message: {!r}'.format(ctypes.WinError())) | [
"def",
"_reconfigure_port",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_port_handle",
":",
"raise",
"SerialException",
"(",
"\"Can only operate on a valid port handle\"",
")",
"# Set Windows timeout values",
"# timeouts is a tuple with the following items:",
"# (ReadIntervalTimeout,ReadTotalTimeoutMultiplier,",
"# ReadTotalTimeoutConstant,WriteTotalTimeoutMultiplier,",
"# WriteTotalTimeoutConstant)",
"timeouts",
"=",
"win32",
".",
"COMMTIMEOUTS",
"(",
")",
"if",
"self",
".",
"_timeout",
"is",
"None",
":",
"pass",
"# default of all zeros is OK",
"elif",
"self",
".",
"_timeout",
"==",
"0",
":",
"timeouts",
".",
"ReadIntervalTimeout",
"=",
"win32",
".",
"MAXDWORD",
"else",
":",
"timeouts",
".",
"ReadTotalTimeoutConstant",
"=",
"max",
"(",
"int",
"(",
"self",
".",
"_timeout",
"*",
"1000",
")",
",",
"1",
")",
"if",
"self",
".",
"_timeout",
"!=",
"0",
"and",
"self",
".",
"_inter_byte_timeout",
"is",
"not",
"None",
":",
"timeouts",
".",
"ReadIntervalTimeout",
"=",
"max",
"(",
"int",
"(",
"self",
".",
"_inter_byte_timeout",
"*",
"1000",
")",
",",
"1",
")",
"if",
"self",
".",
"_write_timeout",
"is",
"None",
":",
"pass",
"elif",
"self",
".",
"_write_timeout",
"==",
"0",
":",
"timeouts",
".",
"WriteTotalTimeoutConstant",
"=",
"win32",
".",
"MAXDWORD",
"else",
":",
"timeouts",
".",
"WriteTotalTimeoutConstant",
"=",
"max",
"(",
"int",
"(",
"self",
".",
"_write_timeout",
"*",
"1000",
")",
",",
"1",
")",
"win32",
".",
"SetCommTimeouts",
"(",
"self",
".",
"_port_handle",
",",
"ctypes",
".",
"byref",
"(",
"timeouts",
")",
")",
"win32",
".",
"SetCommMask",
"(",
"self",
".",
"_port_handle",
",",
"win32",
".",
"EV_ERR",
")",
"# Setup the connection info.",
"# Get state and modify it:",
"comDCB",
"=",
"win32",
".",
"DCB",
"(",
")",
"win32",
".",
"GetCommState",
"(",
"self",
".",
"_port_handle",
",",
"ctypes",
".",
"byref",
"(",
"comDCB",
")",
")",
"comDCB",
".",
"BaudRate",
"=",
"self",
".",
"_baudrate",
"if",
"self",
".",
"_bytesize",
"==",
"serial",
".",
"FIVEBITS",
":",
"comDCB",
".",
"ByteSize",
"=",
"5",
"elif",
"self",
".",
"_bytesize",
"==",
"serial",
".",
"SIXBITS",
":",
"comDCB",
".",
"ByteSize",
"=",
"6",
"elif",
"self",
".",
"_bytesize",
"==",
"serial",
".",
"SEVENBITS",
":",
"comDCB",
".",
"ByteSize",
"=",
"7",
"elif",
"self",
".",
"_bytesize",
"==",
"serial",
".",
"EIGHTBITS",
":",
"comDCB",
".",
"ByteSize",
"=",
"8",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unsupported number of data bits: {!r}\"",
".",
"format",
"(",
"self",
".",
"_bytesize",
")",
")",
"if",
"self",
".",
"_parity",
"==",
"serial",
".",
"PARITY_NONE",
":",
"comDCB",
".",
"Parity",
"=",
"win32",
".",
"NOPARITY",
"comDCB",
".",
"fParity",
"=",
"0",
"# Disable Parity Check",
"elif",
"self",
".",
"_parity",
"==",
"serial",
".",
"PARITY_EVEN",
":",
"comDCB",
".",
"Parity",
"=",
"win32",
".",
"EVENPARITY",
"comDCB",
".",
"fParity",
"=",
"1",
"# Enable Parity Check",
"elif",
"self",
".",
"_parity",
"==",
"serial",
".",
"PARITY_ODD",
":",
"comDCB",
".",
"Parity",
"=",
"win32",
".",
"ODDPARITY",
"comDCB",
".",
"fParity",
"=",
"1",
"# Enable Parity Check",
"elif",
"self",
".",
"_parity",
"==",
"serial",
".",
"PARITY_MARK",
":",
"comDCB",
".",
"Parity",
"=",
"win32",
".",
"MARKPARITY",
"comDCB",
".",
"fParity",
"=",
"1",
"# Enable Parity Check",
"elif",
"self",
".",
"_parity",
"==",
"serial",
".",
"PARITY_SPACE",
":",
"comDCB",
".",
"Parity",
"=",
"win32",
".",
"SPACEPARITY",
"comDCB",
".",
"fParity",
"=",
"1",
"# Enable Parity Check",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unsupported parity mode: {!r}\"",
".",
"format",
"(",
"self",
".",
"_parity",
")",
")",
"if",
"self",
".",
"_stopbits",
"==",
"serial",
".",
"STOPBITS_ONE",
":",
"comDCB",
".",
"StopBits",
"=",
"win32",
".",
"ONESTOPBIT",
"elif",
"self",
".",
"_stopbits",
"==",
"serial",
".",
"STOPBITS_ONE_POINT_FIVE",
":",
"comDCB",
".",
"StopBits",
"=",
"win32",
".",
"ONE5STOPBITS",
"elif",
"self",
".",
"_stopbits",
"==",
"serial",
".",
"STOPBITS_TWO",
":",
"comDCB",
".",
"StopBits",
"=",
"win32",
".",
"TWOSTOPBITS",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unsupported number of stop bits: {!r}\"",
".",
"format",
"(",
"self",
".",
"_stopbits",
")",
")",
"comDCB",
".",
"fBinary",
"=",
"1",
"# Enable Binary Transmission",
"# Char. w/ Parity-Err are replaced with 0xff (if fErrorChar is set to TRUE)",
"if",
"self",
".",
"_rs485_mode",
"is",
"None",
":",
"if",
"self",
".",
"_rtscts",
":",
"comDCB",
".",
"fRtsControl",
"=",
"win32",
".",
"RTS_CONTROL_HANDSHAKE",
"else",
":",
"comDCB",
".",
"fRtsControl",
"=",
"win32",
".",
"RTS_CONTROL_ENABLE",
"if",
"self",
".",
"_rts_state",
"else",
"win32",
".",
"RTS_CONTROL_DISABLE",
"comDCB",
".",
"fOutxCtsFlow",
"=",
"self",
".",
"_rtscts",
"else",
":",
"# checks for unsupported settings",
"# XXX verify if platform really does not have a setting for those",
"if",
"not",
"self",
".",
"_rs485_mode",
".",
"rts_level_for_tx",
":",
"raise",
"ValueError",
"(",
"'Unsupported value for RS485Settings.rts_level_for_tx: {!r}'",
".",
"format",
"(",
"self",
".",
"_rs485_mode",
".",
"rts_level_for_tx",
",",
")",
")",
"if",
"self",
".",
"_rs485_mode",
".",
"rts_level_for_rx",
":",
"raise",
"ValueError",
"(",
"'Unsupported value for RS485Settings.rts_level_for_rx: {!r}'",
".",
"format",
"(",
"self",
".",
"_rs485_mode",
".",
"rts_level_for_rx",
",",
")",
")",
"if",
"self",
".",
"_rs485_mode",
".",
"delay_before_tx",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"'Unsupported value for RS485Settings.delay_before_tx: {!r}'",
".",
"format",
"(",
"self",
".",
"_rs485_mode",
".",
"delay_before_tx",
",",
")",
")",
"if",
"self",
".",
"_rs485_mode",
".",
"delay_before_rx",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"'Unsupported value for RS485Settings.delay_before_rx: {!r}'",
".",
"format",
"(",
"self",
".",
"_rs485_mode",
".",
"delay_before_rx",
",",
")",
")",
"if",
"self",
".",
"_rs485_mode",
".",
"loopback",
":",
"raise",
"ValueError",
"(",
"'Unsupported value for RS485Settings.loopback: {!r}'",
".",
"format",
"(",
"self",
".",
"_rs485_mode",
".",
"loopback",
",",
")",
")",
"comDCB",
".",
"fRtsControl",
"=",
"win32",
".",
"RTS_CONTROL_TOGGLE",
"comDCB",
".",
"fOutxCtsFlow",
"=",
"0",
"if",
"self",
".",
"_dsrdtr",
":",
"comDCB",
".",
"fDtrControl",
"=",
"win32",
".",
"DTR_CONTROL_HANDSHAKE",
"else",
":",
"comDCB",
".",
"fDtrControl",
"=",
"win32",
".",
"DTR_CONTROL_ENABLE",
"if",
"self",
".",
"_dtr_state",
"else",
"win32",
".",
"DTR_CONTROL_DISABLE",
"comDCB",
".",
"fOutxDsrFlow",
"=",
"self",
".",
"_dsrdtr",
"comDCB",
".",
"fOutX",
"=",
"self",
".",
"_xonxoff",
"comDCB",
".",
"fInX",
"=",
"self",
".",
"_xonxoff",
"comDCB",
".",
"fNull",
"=",
"0",
"comDCB",
".",
"fErrorChar",
"=",
"0",
"comDCB",
".",
"fAbortOnError",
"=",
"0",
"comDCB",
".",
"XonChar",
"=",
"serial",
".",
"XON",
"comDCB",
".",
"XoffChar",
"=",
"serial",
".",
"XOFF",
"if",
"not",
"win32",
".",
"SetCommState",
"(",
"self",
".",
"_port_handle",
",",
"ctypes",
".",
"byref",
"(",
"comDCB",
")",
")",
":",
"raise",
"SerialException",
"(",
"'Cannot configure port, something went wrong. '",
"'Original message: {!r}'",
".",
"format",
"(",
"ctypes",
".",
"WinError",
"(",
")",
")",
")"
] | https://github.com/SpenceKonde/megaTinyCore/blob/1c4a70b18a149fe6bcb551dfa6db11ca50b8997b/megaavr/tools/libs/serial/serialwin32.py#L98-L222 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/site-packages/pip/_internal/utils/unpacking.py | python | untar_file | (filename, location) | Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs. | Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs. | [
"Untar",
"the",
"file",
"(",
"with",
"path",
"filename",
")",
"to",
"the",
"destination",
"location",
".",
"All",
"files",
"are",
"written",
"based",
"on",
"system",
"defaults",
"and",
"umask",
"(",
"i",
".",
"e",
".",
"permissions",
"are",
"not",
"preserved",
")",
"except",
"that",
"regular",
"file",
"members",
"with",
"any",
"execute",
"permissions",
"(",
"user",
"group",
"or",
"world",
")",
"have",
"chmod",
"+",
"x",
"applied",
"after",
"being",
"written",
".",
"Note",
"that",
"for",
"windows",
"any",
"execute",
"changes",
"using",
"os",
".",
"chmod",
"are",
"no",
"-",
"ops",
"per",
"the",
"python",
"docs",
"."
] | def untar_file(filename, location):
# type: (str, str) -> None
"""
Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
mode = 'r:gz'
elif filename.lower().endswith(BZ2_EXTENSIONS):
mode = 'r:bz2'
elif filename.lower().endswith(XZ_EXTENSIONS):
mode = 'r:xz'
elif filename.lower().endswith('.tar'):
mode = 'r'
else:
logger.warning(
'Cannot determine compression type for file %s', filename,
)
mode = 'r:*'
tar = tarfile.open(filename, mode)
try:
leading = has_leading_dir([
member.name for member in tar.getmembers()
])
for member in tar.getmembers():
fn = member.name
if leading:
fn = split_leading_dir(fn)[1]
path = os.path.join(location, fn)
if not is_within_directory(location, path):
message = (
'The tar file ({}) has a file ({}) trying to install '
'outside target directory ({})'
)
raise InstallationError(
message.format(filename, path, location)
)
if member.isdir():
ensure_dir(path)
elif member.issym():
try:
# https://github.com/python/typeshed/issues/2673
tar._extract_member(member, path) # type: ignore
except Exception as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
ensure_dir(os.path.dirname(path))
assert fp is not None
with open(path, 'wb') as destfp:
shutil.copyfileobj(fp, destfp)
fp.close()
# Update the timestamp (useful for cython compiled files)
tar.utime(member, path)
# member have any execute permissions for user/group/world?
if member.mode & 0o111:
set_extracted_file_to_default_mode_plus_executable(path)
finally:
tar.close() | [
"def",
"untar_file",
"(",
"filename",
",",
"location",
")",
":",
"# type: (str, str) -> None",
"ensure_dir",
"(",
"location",
")",
"if",
"filename",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"'.gz'",
")",
"or",
"filename",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"'.tgz'",
")",
":",
"mode",
"=",
"'r:gz'",
"elif",
"filename",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"BZ2_EXTENSIONS",
")",
":",
"mode",
"=",
"'r:bz2'",
"elif",
"filename",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"XZ_EXTENSIONS",
")",
":",
"mode",
"=",
"'r:xz'",
"elif",
"filename",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"'.tar'",
")",
":",
"mode",
"=",
"'r'",
"else",
":",
"logger",
".",
"warning",
"(",
"'Cannot determine compression type for file %s'",
",",
"filename",
",",
")",
"mode",
"=",
"'r:*'",
"tar",
"=",
"tarfile",
".",
"open",
"(",
"filename",
",",
"mode",
")",
"try",
":",
"leading",
"=",
"has_leading_dir",
"(",
"[",
"member",
".",
"name",
"for",
"member",
"in",
"tar",
".",
"getmembers",
"(",
")",
"]",
")",
"for",
"member",
"in",
"tar",
".",
"getmembers",
"(",
")",
":",
"fn",
"=",
"member",
".",
"name",
"if",
"leading",
":",
"fn",
"=",
"split_leading_dir",
"(",
"fn",
")",
"[",
"1",
"]",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"location",
",",
"fn",
")",
"if",
"not",
"is_within_directory",
"(",
"location",
",",
"path",
")",
":",
"message",
"=",
"(",
"'The tar file ({}) has a file ({}) trying to install '",
"'outside target directory ({})'",
")",
"raise",
"InstallationError",
"(",
"message",
".",
"format",
"(",
"filename",
",",
"path",
",",
"location",
")",
")",
"if",
"member",
".",
"isdir",
"(",
")",
":",
"ensure_dir",
"(",
"path",
")",
"elif",
"member",
".",
"issym",
"(",
")",
":",
"try",
":",
"# https://github.com/python/typeshed/issues/2673",
"tar",
".",
"_extract_member",
"(",
"member",
",",
"path",
")",
"# type: ignore",
"except",
"Exception",
"as",
"exc",
":",
"# Some corrupt tar files seem to produce this",
"# (specifically bad symlinks)",
"logger",
".",
"warning",
"(",
"'In the tar file %s the member %s is invalid: %s'",
",",
"filename",
",",
"member",
".",
"name",
",",
"exc",
",",
")",
"continue",
"else",
":",
"try",
":",
"fp",
"=",
"tar",
".",
"extractfile",
"(",
"member",
")",
"except",
"(",
"KeyError",
",",
"AttributeError",
")",
"as",
"exc",
":",
"# Some corrupt tar files seem to produce this",
"# (specifically bad symlinks)",
"logger",
".",
"warning",
"(",
"'In the tar file %s the member %s is invalid: %s'",
",",
"filename",
",",
"member",
".",
"name",
",",
"exc",
",",
")",
"continue",
"ensure_dir",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
")",
"assert",
"fp",
"is",
"not",
"None",
"with",
"open",
"(",
"path",
",",
"'wb'",
")",
"as",
"destfp",
":",
"shutil",
".",
"copyfileobj",
"(",
"fp",
",",
"destfp",
")",
"fp",
".",
"close",
"(",
")",
"# Update the timestamp (useful for cython compiled files)",
"tar",
".",
"utime",
"(",
"member",
",",
"path",
")",
"# member have any execute permissions for user/group/world?",
"if",
"member",
".",
"mode",
"&",
"0o111",
":",
"set_extracted_file_to_default_mode_plus_executable",
"(",
"path",
")",
"finally",
":",
"tar",
".",
"close",
"(",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/pip/_internal/utils/unpacking.py#L161-L239 |
||
rbgirshick/caffe-fast-rcnn | 28a579eaf0668850705598b3075b8969f22226d9 | scripts/cpp_lint.py | python | _IncludeState.CanonicalizeAlphabeticalOrder | (self, header_path) | return header_path.replace('-inl.h', '.h').replace('-', '_').lower() | Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path. | Returns a path canonicalized for alphabetical comparison. | [
"Returns",
"a",
"path",
"canonicalized",
"for",
"alphabetical",
"comparison",
"."
] | def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower() | [
"def",
"CanonicalizeAlphabeticalOrder",
"(",
"self",
",",
"header_path",
")",
":",
"return",
"header_path",
".",
"replace",
"(",
"'-inl.h'",
",",
"'.h'",
")",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
".",
"lower",
"(",
")"
] | https://github.com/rbgirshick/caffe-fast-rcnn/blob/28a579eaf0668850705598b3075b8969f22226d9/scripts/cpp_lint.py#L597-L610 |
|
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/lib2to3/fixer_util.py | python | FromImport | (package_name, name_leafs) | return imp | Return an import statement in the form:
from package import name_leafs | Return an import statement in the form:
from package import name_leafs | [
"Return",
"an",
"import",
"statement",
"in",
"the",
"form",
":",
"from",
"package",
"import",
"name_leafs"
] | def FromImport(package_name, name_leafs):
""" Return an import statement in the form:
from package import name_leafs"""
# XXX: May not handle dotted imports properly (eg, package_name='foo.bar')
#assert package_name == '.' or '.' not in package_name, "FromImport has "\
# "not been tested with dotted package names -- use at your own "\
# "peril!"
for leaf in name_leafs:
# Pull the leaves out of their old tree
leaf.remove()
children = [Leaf(token.NAME, u"from"),
Leaf(token.NAME, package_name, prefix=u" "),
Leaf(token.NAME, u"import", prefix=u" "),
Node(syms.import_as_names, name_leafs)]
imp = Node(syms.import_from, children)
return imp | [
"def",
"FromImport",
"(",
"package_name",
",",
"name_leafs",
")",
":",
"# XXX: May not handle dotted imports properly (eg, package_name='foo.bar')",
"#assert package_name == '.' or '.' not in package_name, \"FromImport has \"\\",
"# \"not been tested with dotted package names -- use at your own \"\\",
"# \"peril!\"",
"for",
"leaf",
"in",
"name_leafs",
":",
"# Pull the leaves out of their old tree",
"leaf",
".",
"remove",
"(",
")",
"children",
"=",
"[",
"Leaf",
"(",
"token",
".",
"NAME",
",",
"u\"from\"",
")",
",",
"Leaf",
"(",
"token",
".",
"NAME",
",",
"package_name",
",",
"prefix",
"=",
"u\" \"",
")",
",",
"Leaf",
"(",
"token",
".",
"NAME",
",",
"u\"import\"",
",",
"prefix",
"=",
"u\" \"",
")",
",",
"Node",
"(",
"syms",
".",
"import_as_names",
",",
"name_leafs",
")",
"]",
"imp",
"=",
"Node",
"(",
"syms",
".",
"import_from",
",",
"children",
")",
"return",
"imp"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/lib2to3/fixer_util.py#L113-L130 |
|
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | build/android/gradle/generate_gradle.py | python | _CreateJavaSourceDir | (entry_output_dir, java_sources_file) | return java_dirs | Computes and constructs when necessary the list of java source directories.
1. Computes the root java source directories from the list of files.
2. Determines whether there are any .java files in them that are not included
in |java_sources_file|.
3. If not, returns the list of java source directories. If so, constructs a
tree of symlinks within |entry_output_dir| of all files in
|java_sources_file|. | Computes and constructs when necessary the list of java source directories. | [
"Computes",
"and",
"constructs",
"when",
"necessary",
"the",
"list",
"of",
"java",
"source",
"directories",
"."
] | def _CreateJavaSourceDir(entry_output_dir, java_sources_file):
"""Computes and constructs when necessary the list of java source directories.
1. Computes the root java source directories from the list of files.
2. Determines whether there are any .java files in them that are not included
in |java_sources_file|.
3. If not, returns the list of java source directories. If so, constructs a
tree of symlinks within |entry_output_dir| of all files in
|java_sources_file|.
"""
java_dirs = []
if java_sources_file:
java_files = _RebasePath(build_utils.ReadSourcesList(java_sources_file))
java_dirs = _ComputeJavaSourceDirs(java_files)
found_java_files = build_utils.FindInDirectories(java_dirs, '*.java')
unwanted_java_files = set(found_java_files) - set(java_files)
missing_java_files = set(java_files) - set(found_java_files)
if unwanted_java_files:
logging.debug('Target requires .java symlinks: %s', entry_output_dir)
symlink_dir = os.path.join(entry_output_dir, _JAVA_SUBDIR)
_CreateSymlinkTree(entry_output_dir, symlink_dir, java_files, java_dirs)
java_dirs = [symlink_dir]
if missing_java_files:
logging.warning('Some java files were not found: %s', missing_java_files)
return java_dirs | [
"def",
"_CreateJavaSourceDir",
"(",
"entry_output_dir",
",",
"java_sources_file",
")",
":",
"java_dirs",
"=",
"[",
"]",
"if",
"java_sources_file",
":",
"java_files",
"=",
"_RebasePath",
"(",
"build_utils",
".",
"ReadSourcesList",
"(",
"java_sources_file",
")",
")",
"java_dirs",
"=",
"_ComputeJavaSourceDirs",
"(",
"java_files",
")",
"found_java_files",
"=",
"build_utils",
".",
"FindInDirectories",
"(",
"java_dirs",
",",
"'*.java'",
")",
"unwanted_java_files",
"=",
"set",
"(",
"found_java_files",
")",
"-",
"set",
"(",
"java_files",
")",
"missing_java_files",
"=",
"set",
"(",
"java_files",
")",
"-",
"set",
"(",
"found_java_files",
")",
"if",
"unwanted_java_files",
":",
"logging",
".",
"debug",
"(",
"'Target requires .java symlinks: %s'",
",",
"entry_output_dir",
")",
"symlink_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"entry_output_dir",
",",
"_JAVA_SUBDIR",
")",
"_CreateSymlinkTree",
"(",
"entry_output_dir",
",",
"symlink_dir",
",",
"java_files",
",",
"java_dirs",
")",
"java_dirs",
"=",
"[",
"symlink_dir",
"]",
"if",
"missing_java_files",
":",
"logging",
".",
"warning",
"(",
"'Some java files were not found: %s'",
",",
"missing_java_files",
")",
"return",
"java_dirs"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/build/android/gradle/generate_gradle.py#L167-L193 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/richtext.py | python | RichTextBuffer.EndFontSize | (*args, **kwargs) | return _richtext.RichTextBuffer_EndFontSize(*args, **kwargs) | EndFontSize(self) -> bool | EndFontSize(self) -> bool | [
"EndFontSize",
"(",
"self",
")",
"-",
">",
"bool"
] | def EndFontSize(*args, **kwargs):
"""EndFontSize(self) -> bool"""
return _richtext.RichTextBuffer_EndFontSize(*args, **kwargs) | [
"def",
"EndFontSize",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_richtext",
".",
"RichTextBuffer_EndFontSize",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/richtext.py#L2361-L2363 |
|
musescore/MuseScore | a817fea23e3c2be30847b7fde5b01746222c252e | tools/crashdump/posix/generate_breakpad_symbols.py | python | Resolve | (path, exe_path, loader_path, rpaths) | return path | Resolve a dyld path.
@executable_path is replaced with |exe_path|
@loader_path is replaced with |loader_path|
@rpath is replaced with the first path in |rpaths| where the referenced file
is found | Resolve a dyld path. | [
"Resolve",
"a",
"dyld",
"path",
"."
] | def Resolve(path, exe_path, loader_path, rpaths):
"""Resolve a dyld path.
@executable_path is replaced with |exe_path|
@loader_path is replaced with |loader_path|
@rpath is replaced with the first path in |rpaths| where the referenced file
is found
"""
path = path.replace('@loader_path', loader_path)
path = path.replace('@executable_path', exe_path)
if path.find('@rpath') != -1:
for rpath in rpaths:
new_path = path.replace('@rpath', rpath)
if os.access(new_path, os.X_OK):
return new_path
return ''
return path | [
"def",
"Resolve",
"(",
"path",
",",
"exe_path",
",",
"loader_path",
",",
"rpaths",
")",
":",
"path",
"=",
"path",
".",
"replace",
"(",
"'@loader_path'",
",",
"loader_path",
")",
"path",
"=",
"path",
".",
"replace",
"(",
"'@executable_path'",
",",
"exe_path",
")",
"if",
"path",
".",
"find",
"(",
"'@rpath'",
")",
"!=",
"-",
"1",
":",
"for",
"rpath",
"in",
"rpaths",
":",
"new_path",
"=",
"path",
".",
"replace",
"(",
"'@rpath'",
",",
"rpath",
")",
"if",
"os",
".",
"access",
"(",
"new_path",
",",
"os",
".",
"X_OK",
")",
":",
"return",
"new_path",
"return",
"''",
"return",
"path"
] | https://github.com/musescore/MuseScore/blob/a817fea23e3c2be30847b7fde5b01746222c252e/tools/crashdump/posix/generate_breakpad_symbols.py#L47-L63 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_core.py | python | SizerItemWindow | (*args, **kwargs) | return val | SizerItemWindow(Window window, int proportion=0, int flag=0, int border=0,
PyObject userData=None) -> SizerItem
Constructs a `wx.SizerItem` for tracking a window. | SizerItemWindow(Window window, int proportion=0, int flag=0, int border=0,
PyObject userData=None) -> SizerItem | [
"SizerItemWindow",
"(",
"Window",
"window",
"int",
"proportion",
"=",
"0",
"int",
"flag",
"=",
"0",
"int",
"border",
"=",
"0",
"PyObject",
"userData",
"=",
"None",
")",
"-",
">",
"SizerItem"
] | def SizerItemWindow(*args, **kwargs):
"""
SizerItemWindow(Window window, int proportion=0, int flag=0, int border=0,
PyObject userData=None) -> SizerItem
Constructs a `wx.SizerItem` for tracking a window.
"""
val = _core_.new_SizerItemWindow(*args, **kwargs)
return val | [
"def",
"SizerItemWindow",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"val",
"=",
"_core_",
".",
"new_SizerItemWindow",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"val"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_core.py#L14385-L14393 |
|
apple/swift-lldb | d74be846ef3e62de946df343e8c234bde93a8912 | scripts/Python/static-binding/lldb.py | python | SBStructuredData.GetValueForKey | (self, key) | return _lldb.SBStructuredData_GetValueForKey(self, key) | GetValueForKey(SBStructuredData self, char const * key) -> SBStructuredData | GetValueForKey(SBStructuredData self, char const * key) -> SBStructuredData | [
"GetValueForKey",
"(",
"SBStructuredData",
"self",
"char",
"const",
"*",
"key",
")",
"-",
">",
"SBStructuredData"
] | def GetValueForKey(self, key):
"""GetValueForKey(SBStructuredData self, char const * key) -> SBStructuredData"""
return _lldb.SBStructuredData_GetValueForKey(self, key) | [
"def",
"GetValueForKey",
"(",
"self",
",",
"key",
")",
":",
"return",
"_lldb",
".",
"SBStructuredData_GetValueForKey",
"(",
"self",
",",
"key",
")"
] | https://github.com/apple/swift-lldb/blob/d74be846ef3e62de946df343e8c234bde93a8912/scripts/Python/static-binding/lldb.py#L9713-L9715 |
|
google/llvm-propeller | 45c226984fe8377ebfb2ad7713c680d652ba678d | clang/tools/scan-build-py/libscanbuild/report.py | python | create_counters | () | return predicate | Create counters for bug statistics.
Two entries are maintained: 'total' is an integer, represents the
number of bugs. The 'categories' is a two level categorisation of bug
counters. The first level is 'bug category' the second is 'bug type'.
Each entry in this classification is a dictionary of 'count', 'type'
and 'label'. | Create counters for bug statistics. | [
"Create",
"counters",
"for",
"bug",
"statistics",
"."
] | def create_counters():
""" Create counters for bug statistics.
Two entries are maintained: 'total' is an integer, represents the
number of bugs. The 'categories' is a two level categorisation of bug
counters. The first level is 'bug category' the second is 'bug type'.
Each entry in this classification is a dictionary of 'count', 'type'
and 'label'. """
def predicate(bug):
bug_category = bug['bug_category']
bug_type = bug['bug_type']
current_category = predicate.categories.get(bug_category, dict())
current_type = current_category.get(bug_type, {
'bug_type': bug_type,
'bug_type_class': category_type_name(bug),
'bug_count': 0
})
current_type.update({'bug_count': current_type['bug_count'] + 1})
current_category.update({bug_type: current_type})
predicate.categories.update({bug_category: current_category})
predicate.total += 1
predicate.total = 0
predicate.categories = dict()
return predicate | [
"def",
"create_counters",
"(",
")",
":",
"def",
"predicate",
"(",
"bug",
")",
":",
"bug_category",
"=",
"bug",
"[",
"'bug_category'",
"]",
"bug_type",
"=",
"bug",
"[",
"'bug_type'",
"]",
"current_category",
"=",
"predicate",
".",
"categories",
".",
"get",
"(",
"bug_category",
",",
"dict",
"(",
")",
")",
"current_type",
"=",
"current_category",
".",
"get",
"(",
"bug_type",
",",
"{",
"'bug_type'",
":",
"bug_type",
",",
"'bug_type_class'",
":",
"category_type_name",
"(",
"bug",
")",
",",
"'bug_count'",
":",
"0",
"}",
")",
"current_type",
".",
"update",
"(",
"{",
"'bug_count'",
":",
"current_type",
"[",
"'bug_count'",
"]",
"+",
"1",
"}",
")",
"current_category",
".",
"update",
"(",
"{",
"bug_type",
":",
"current_type",
"}",
")",
"predicate",
".",
"categories",
".",
"update",
"(",
"{",
"bug_category",
":",
"current_category",
"}",
")",
"predicate",
".",
"total",
"+=",
"1",
"predicate",
".",
"total",
"=",
"0",
"predicate",
".",
"categories",
"=",
"dict",
"(",
")",
"return",
"predicate"
] | https://github.com/google/llvm-propeller/blob/45c226984fe8377ebfb2ad7713c680d652ba678d/clang/tools/scan-build-py/libscanbuild/report.py#L369-L394 |
|
microsoft/clang | 86d4513d3e0daa4d5a29b0b1de7c854ca15f9fe5 | bindings/python/clang/cindex.py | python | TranslationUnit.from_ast_file | (cls, filename, index=None) | return cls(ptr=ptr, index=index) | Create a TranslationUnit instance from a saved AST file.
A previously-saved AST file (provided with -emit-ast or
TranslationUnit.save()) is loaded from the filename specified.
If the file cannot be loaded, a TranslationUnitLoadError will be
raised.
index is optional and is the Index instance to use. If not provided,
a default Index will be created. | Create a TranslationUnit instance from a saved AST file. | [
"Create",
"a",
"TranslationUnit",
"instance",
"from",
"a",
"saved",
"AST",
"file",
"."
] | def from_ast_file(cls, filename, index=None):
"""Create a TranslationUnit instance from a saved AST file.
A previously-saved AST file (provided with -emit-ast or
TranslationUnit.save()) is loaded from the filename specified.
If the file cannot be loaded, a TranslationUnitLoadError will be
raised.
index is optional and is the Index instance to use. If not provided,
a default Index will be created.
"""
if index is None:
index = Index.create()
ptr = conf.lib.clang_createTranslationUnit(index, filename)
if not ptr:
raise TranslationUnitLoadError(filename)
return cls(ptr=ptr, index=index) | [
"def",
"from_ast_file",
"(",
"cls",
",",
"filename",
",",
"index",
"=",
"None",
")",
":",
"if",
"index",
"is",
"None",
":",
"index",
"=",
"Index",
".",
"create",
"(",
")",
"ptr",
"=",
"conf",
".",
"lib",
".",
"clang_createTranslationUnit",
"(",
"index",
",",
"filename",
")",
"if",
"not",
"ptr",
":",
"raise",
"TranslationUnitLoadError",
"(",
"filename",
")",
"return",
"cls",
"(",
"ptr",
"=",
"ptr",
",",
"index",
"=",
"index",
")"
] | https://github.com/microsoft/clang/blob/86d4513d3e0daa4d5a29b0b1de7c854ca15f9fe5/bindings/python/clang/cindex.py#L2818-L2837 |
|
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/telemetry/third_party/mox3/mox3/mox.py | python | MockAnything._Reset | (self) | Reset the state of this mock to record mode with an empty queue. | Reset the state of this mock to record mode with an empty queue. | [
"Reset",
"the",
"state",
"of",
"this",
"mock",
"to",
"record",
"mode",
"with",
"an",
"empty",
"queue",
"."
] | def _Reset(self):
"""Reset the state of this mock to record mode with an empty queue."""
# Maintain a list of method calls we are expecting
self._expected_calls_queue = collections.deque()
# Make sure we are in setup mode, not replay mode
self._replay_mode = False | [
"def",
"_Reset",
"(",
"self",
")",
":",
"# Maintain a list of method calls we are expecting",
"self",
".",
"_expected_calls_queue",
"=",
"collections",
".",
"deque",
"(",
")",
"# Make sure we are in setup mode, not replay mode",
"self",
".",
"_replay_mode",
"=",
"False"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/telemetry/third_party/mox3/mox3/mox.py#L545-L552 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python3/src/Lib/multiprocessing/util.py | python | get_logger | () | return _logger | Returns logger used by multiprocessing | Returns logger used by multiprocessing | [
"Returns",
"logger",
"used",
"by",
"multiprocessing"
] | def get_logger():
'''
Returns logger used by multiprocessing
'''
global _logger
import logging
logging._acquireLock()
try:
if not _logger:
_logger = logging.getLogger(LOGGER_NAME)
_logger.propagate = 0
# XXX multiprocessing should cleanup before logging
if hasattr(atexit, 'unregister'):
atexit.unregister(_exit_function)
atexit.register(_exit_function)
else:
atexit._exithandlers.remove((_exit_function, (), {}))
atexit._exithandlers.append((_exit_function, (), {}))
finally:
logging._releaseLock()
return _logger | [
"def",
"get_logger",
"(",
")",
":",
"global",
"_logger",
"import",
"logging",
"logging",
".",
"_acquireLock",
"(",
")",
"try",
":",
"if",
"not",
"_logger",
":",
"_logger",
"=",
"logging",
".",
"getLogger",
"(",
"LOGGER_NAME",
")",
"_logger",
".",
"propagate",
"=",
"0",
"# XXX multiprocessing should cleanup before logging",
"if",
"hasattr",
"(",
"atexit",
",",
"'unregister'",
")",
":",
"atexit",
".",
"unregister",
"(",
"_exit_function",
")",
"atexit",
".",
"register",
"(",
"_exit_function",
")",
"else",
":",
"atexit",
".",
"_exithandlers",
".",
"remove",
"(",
"(",
"_exit_function",
",",
"(",
")",
",",
"{",
"}",
")",
")",
"atexit",
".",
"_exithandlers",
".",
"append",
"(",
"(",
"_exit_function",
",",
"(",
")",
",",
"{",
"}",
")",
")",
"finally",
":",
"logging",
".",
"_releaseLock",
"(",
")",
"return",
"_logger"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/multiprocessing/util.py#L60-L85 |
|
apache/incubator-mxnet | f03fb23f1d103fec9541b5ae59ee06b1734a51d9 | python/mxnet/contrib/tensorrt.py | python | set_use_fp16 | (status) | Set an environment variable which will enable or disable the use of FP16 precision in
TensorRT
Note: The mode FP16 force the whole TRT node to be executed in FP16
:param status: Boolean, True if TensorRT should run in FP16, False for FP32 | Set an environment variable which will enable or disable the use of FP16 precision in
TensorRT
Note: The mode FP16 force the whole TRT node to be executed in FP16
:param status: Boolean, True if TensorRT should run in FP16, False for FP32 | [
"Set",
"an",
"environment",
"variable",
"which",
"will",
"enable",
"or",
"disable",
"the",
"use",
"of",
"FP16",
"precision",
"in",
"TensorRT",
"Note",
":",
"The",
"mode",
"FP16",
"force",
"the",
"whole",
"TRT",
"node",
"to",
"be",
"executed",
"in",
"FP16",
":",
"param",
"status",
":",
"Boolean",
"True",
"if",
"TensorRT",
"should",
"run",
"in",
"FP16",
"False",
"for",
"FP32"
] | def set_use_fp16(status):
"""
Set an environment variable which will enable or disable the use of FP16 precision in
TensorRT
Note: The mode FP16 force the whole TRT node to be executed in FP16
:param status: Boolean, True if TensorRT should run in FP16, False for FP32
"""
os.environ["MXNET_TENSORRT_USE_FP16"] = str(int(status)) | [
"def",
"set_use_fp16",
"(",
"status",
")",
":",
"os",
".",
"environ",
"[",
"\"MXNET_TENSORRT_USE_FP16\"",
"]",
"=",
"str",
"(",
"int",
"(",
"status",
")",
")"
] | https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/python/mxnet/contrib/tensorrt.py#L21-L28 |
||
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/contrib/layers/python/layers/layers.py | python | batch_norm | (inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
activation_fn=None,
param_initializers=None,
param_regularizers=None,
updates_collections=ops.GraphKeys.UPDATE_OPS,
is_training=True,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
batch_weights=None,
fused=None,
data_format=DATA_FORMAT_NHWC,
zero_debias_moving_mean=False,
scope=None,
renorm=False,
renorm_clipping=None,
renorm_decay=0.99,
adjustment=None) | Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Can be used as a normalizer function for conv2d and fully_connected.
Note: when training, the moving_mean and moving_variance need to be updated.
By default the update ops are placed in `tf.GraphKeys.UPDATE_OPS`, so they
need to be added as a dependency to the `train_op`. For example:
```python
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss)
```
One can set updates_collections=None to force the updates in place, but that
can have a speed penalty, especially in distributed settings.
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension has
`batch_size`. The normalization is over all but the last dimension if
`data_format` is `NHWC` and the second dimension if `data_format` is
`NCHW`.
decay: Decay for the moving average. Reasonable values for `decay` are close
to 1.0, typically in the multiple-nines range: 0.999, 0.99, 0.9, etc.
Lower `decay` value (recommend trying `decay`=0.9) if model experiences
reasonably good training performance but poor validation and/or test
performance. Try zero_debias_moving_mean=True for improved stability.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
epsilon: Small float added to variance to avoid dividing by zero.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
param_initializers: Optional initializers for beta, gamma, moving mean and
moving variance.
param_regularizers: Optional regularizer for beta and gamma.
updates_collections: Collections to collect the update ops for computation.
The updates_ops need to be executed with the train_op.
If None, a control dependency would be added to make sure the updates are
computed in place.
is_training: Whether or not the layer is in training mode. In training mode
it would accumulate the statistics of the moments into `moving_mean` and
`moving_variance` using an exponential moving average with the given
`decay`. When it is not in training mode then it would use the values of
the `moving_mean` and the `moving_variance`.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional collections for the variables.
outputs_collections: Collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
batch_weights: An optional tensor of shape `[batch_size]`,
containing a frequency weight for each batch item. If present,
then the batch normalization uses weighted mean and
variance. (This can be used to correct for bias in training
example selection.)
fused: if `True`, use a faster, fused implementation if possible.
If `None`, use the system recommended implementation.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
zero_debias_moving_mean: Use zero_debias for moving_mean. It creates a new
pair of variables 'moving_mean/biased' and 'moving_mean/local_step'.
scope: Optional scope for `variable_scope`.
renorm: Whether to use Batch Renormalization
(https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction
`(r, d)` is used as `corrected_value = normalized_value * r + d`, with
`r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_decay: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training
and should be neither too small (which would add noise) nor too large
(which would give stale estimates). Note that `decay` is still applied
to get the means and variances for inference.
adjustment: A function taking the `Tensor` containing the (dynamic) shape of
the input tensor and returning a pair (scale, bias) to apply to the
normalized values (before gamma and beta), only during training. For
example,
`adjustment = lambda shape: (
tf.random_uniform(shape[-1:], 0.93, 1.07),
tf.random_uniform(shape[-1:], -0.1, 0.1))`
will scale the normalized value by up to 7% up or down, then shift the
result by up to 0.1 (with independent scaling and bias for each feature
but shared across all examples), and finally apply gamma and/or beta. If
`None`, no adjustment is applied.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If the rank of `inputs` is undefined.
ValueError: If rank or channels dimension of `inputs` is undefined. | Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167. | [
"Adds",
"a",
"Batch",
"Normalization",
"layer",
"from",
"http",
":",
"//",
"arxiv",
".",
"org",
"/",
"abs",
"/",
"1502",
".",
"03167",
"."
] | def batch_norm(inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
activation_fn=None,
param_initializers=None,
param_regularizers=None,
updates_collections=ops.GraphKeys.UPDATE_OPS,
is_training=True,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
batch_weights=None,
fused=None,
data_format=DATA_FORMAT_NHWC,
zero_debias_moving_mean=False,
scope=None,
renorm=False,
renorm_clipping=None,
renorm_decay=0.99,
adjustment=None):
"""Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Can be used as a normalizer function for conv2d and fully_connected.
Note: when training, the moving_mean and moving_variance need to be updated.
By default the update ops are placed in `tf.GraphKeys.UPDATE_OPS`, so they
need to be added as a dependency to the `train_op`. For example:
```python
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss)
```
One can set updates_collections=None to force the updates in place, but that
can have a speed penalty, especially in distributed settings.
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension has
`batch_size`. The normalization is over all but the last dimension if
`data_format` is `NHWC` and the second dimension if `data_format` is
`NCHW`.
decay: Decay for the moving average. Reasonable values for `decay` are close
to 1.0, typically in the multiple-nines range: 0.999, 0.99, 0.9, etc.
Lower `decay` value (recommend trying `decay`=0.9) if model experiences
reasonably good training performance but poor validation and/or test
performance. Try zero_debias_moving_mean=True for improved stability.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
epsilon: Small float added to variance to avoid dividing by zero.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
param_initializers: Optional initializers for beta, gamma, moving mean and
moving variance.
param_regularizers: Optional regularizer for beta and gamma.
updates_collections: Collections to collect the update ops for computation.
The updates_ops need to be executed with the train_op.
If None, a control dependency would be added to make sure the updates are
computed in place.
is_training: Whether or not the layer is in training mode. In training mode
it would accumulate the statistics of the moments into `moving_mean` and
`moving_variance` using an exponential moving average with the given
`decay`. When it is not in training mode then it would use the values of
the `moving_mean` and the `moving_variance`.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional collections for the variables.
outputs_collections: Collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
batch_weights: An optional tensor of shape `[batch_size]`,
containing a frequency weight for each batch item. If present,
then the batch normalization uses weighted mean and
variance. (This can be used to correct for bias in training
example selection.)
fused: if `True`, use a faster, fused implementation if possible.
If `None`, use the system recommended implementation.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
zero_debias_moving_mean: Use zero_debias for moving_mean. It creates a new
pair of variables 'moving_mean/biased' and 'moving_mean/local_step'.
scope: Optional scope for `variable_scope`.
renorm: Whether to use Batch Renormalization
(https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction
`(r, d)` is used as `corrected_value = normalized_value * r + d`, with
`r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_decay: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training
and should be neither too small (which would add noise) nor too large
(which would give stale estimates). Note that `decay` is still applied
to get the means and variances for inference.
adjustment: A function taking the `Tensor` containing the (dynamic) shape of
the input tensor and returning a pair (scale, bias) to apply to the
normalized values (before gamma and beta), only during training. For
example,
`adjustment = lambda shape: (
tf.random_uniform(shape[-1:], 0.93, 1.07),
tf.random_uniform(shape[-1:], -0.1, 0.1))`
will scale the normalized value by up to 7% up or down, then shift the
result by up to 0.1 (with independent scaling and bias for each feature
but shared across all examples), and finally apply gamma and/or beta. If
`None`, no adjustment is applied.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If the rank of `inputs` is undefined.
ValueError: If rank or channels dimension of `inputs` is undefined.
"""
if fused is None:
fused = True
# Only use _fused_batch_norm if all of the following three
# conditions are true:
# (1) fused is set True;
# (2) it is possible to use (currently it doesn't support batch weights,
# renorm, and the case when rank is neither 2 nor 4);
# (3) it is used with zero_debias_moving_mean, or an input shape of rank 2,
# or non-default updates_collections (not implemented in
# normalization_layers.BatchNormalization yet); otherwise use the fused
# implementation in normalization_layers.BatchNormalization.
inputs = ops.convert_to_tensor(inputs)
rank = inputs.get_shape().ndims
possible_to_fuse = (batch_weights is None and
not renorm and
rank in [2, 4] and
adjustment is None)
if fused and possible_to_fuse and (
zero_debias_moving_mean or rank == 2 or
updates_collections is not ops.GraphKeys.UPDATE_OPS):
return _fused_batch_norm(
inputs,
decay=decay,
center=center,
scale=scale,
epsilon=epsilon,
activation_fn=activation_fn,
param_initializers=param_initializers,
updates_collections=updates_collections,
is_training=is_training,
reuse=reuse,
variables_collections=variables_collections,
outputs_collections=outputs_collections,
trainable=trainable,
data_format=data_format,
zero_debias_moving_mean=zero_debias_moving_mean,
scope=scope)
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
layer_variable_getter = _build_variable_getter()
with variable_scope.variable_scope(
scope, 'BatchNorm', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
# Determine whether we can use the core layer class.
if (batch_weights is None and
updates_collections is ops.GraphKeys.UPDATE_OPS and
not zero_debias_moving_mean):
# Use the core layer class.
axis = 1 if data_format == DATA_FORMAT_NCHW else -1
if not param_initializers:
param_initializers = {}
beta_initializer = param_initializers.get('beta',
init_ops.zeros_initializer())
gamma_initializer = param_initializers.get('gamma',
init_ops.ones_initializer())
moving_mean_initializer = param_initializers.get(
'moving_mean', init_ops.zeros_initializer())
moving_variance_initializer = param_initializers.get(
'moving_variance', init_ops.ones_initializer())
if not param_regularizers:
param_regularizers = {}
beta_regularizer = param_regularizers.get('beta')
gamma_regularizer = param_regularizers.get('gamma')
layer = normalization_layers.BatchNormalization(
axis=axis,
momentum=decay,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
trainable=trainable,
renorm=renorm,
renorm_clipping=renorm_clipping,
renorm_momentum=renorm_decay,
adjustment=adjustment,
name=sc.name,
_scope=sc,
_reuse=reuse,
fused=fused)
outputs = layer.apply(inputs, training=is_training)
# Add variables to collections.
_add_variable_to_collections(
layer.moving_mean, variables_collections, 'moving_mean')
_add_variable_to_collections(
layer.moving_variance, variables_collections, 'moving_variance')
if layer.beta is not None:
_add_variable_to_collections(layer.beta, variables_collections, 'beta')
if layer.gamma is not None:
_add_variable_to_collections(
layer.gamma, variables_collections, 'gamma')
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
# Not supported by layer class: batch_weights argument,
# and custom updates_collections. In that case, use the legacy BN
# implementation.
# Custom updates collections are not supported because the update logic
# is different in this case, in particular w.r.t. "forced updates" and
# update op reuse.
if renorm:
raise ValueError('renorm is not supported with batch_weights, '
'updates_collections or zero_debias_moving_mean')
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Inputs %s has undefined rank.' % inputs.name)
dtype = inputs.dtype.base_dtype
if batch_weights is not None:
batch_weights = ops.convert_to_tensor(batch_weights)
inputs_shape[0:1].assert_is_compatible_with(batch_weights.get_shape())
# Reshape batch weight values so they broadcast across inputs.
nshape = [-1] + [1 for _ in range(inputs_rank - 1)]
batch_weights = array_ops.reshape(batch_weights, nshape)
if data_format == DATA_FORMAT_NCHW:
moments_axes = [0] + list(range(2, inputs_rank))
params_shape = inputs_shape[1:2]
# For NCHW format, rather than relying on implicit broadcasting, we
# explicitly reshape the params to params_shape_broadcast when computing
# the moments and the batch normalization.
params_shape_broadcast = list(
[1, inputs_shape[1].value] + [1 for _ in range(2, inputs_rank)])
else:
moments_axes = list(range(inputs_rank - 1))
params_shape = inputs_shape[-1:]
params_shape_broadcast = None
if not params_shape.is_fully_defined():
raise ValueError('Inputs %s has undefined channels dimension %s.' % (
inputs.name, params_shape))
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
if not param_initializers:
param_initializers = {}
if center:
beta_collections = utils.get_variable_collections(variables_collections,
'beta')
beta_initializer = param_initializers.get('beta',
init_ops.zeros_initializer())
beta = variables.model_variable('beta',
shape=params_shape,
dtype=dtype,
initializer=beta_initializer,
collections=beta_collections,
trainable=trainable)
if scale:
gamma_collections = utils.get_variable_collections(variables_collections,
'gamma')
gamma_initializer = param_initializers.get('gamma',
init_ops.ones_initializer())
gamma = variables.model_variable('gamma',
shape=params_shape,
dtype=dtype,
initializer=gamma_initializer,
collections=gamma_collections,
trainable=trainable)
# Create moving_mean and moving_variance variables and add them to the
# appropriate collections. We disable variable partitioning while creating
# them, because assign_moving_average is not yet supported for partitioned
# variables (this needs to be handled carefully, as it may break
# the checkpoint backward compatibility).
with variable_scope.variable_scope(
variable_scope.get_variable_scope()) as local_scope:
local_scope.set_partitioner(None)
moving_mean_collections = utils.get_variable_collections(
variables_collections, 'moving_mean')
moving_mean_initializer = param_initializers.get(
'moving_mean', init_ops.zeros_initializer())
moving_mean = variables.model_variable(
'moving_mean',
shape=params_shape,
dtype=dtype,
initializer=moving_mean_initializer,
trainable=False,
collections=moving_mean_collections)
moving_variance_collections = utils.get_variable_collections(
variables_collections, 'moving_variance')
moving_variance_initializer = param_initializers.get(
'moving_variance', init_ops.ones_initializer())
moving_variance = variables.model_variable(
'moving_variance',
shape=params_shape,
dtype=dtype,
initializer=moving_variance_initializer,
trainable=False,
collections=moving_variance_collections)
# If `is_training` doesn't have a constant value, because it is a `Tensor`,
# a `Variable` or `Placeholder` then is_training_value will be None and
# `needs_moments` will be true.
is_training_value = utils.constant_value(is_training)
need_moments = is_training_value is None or is_training_value
if need_moments:
# Calculate the moments based on the individual batch.
if batch_weights is None:
if data_format == DATA_FORMAT_NCHW:
mean, variance = nn.moments(inputs, moments_axes, keep_dims=True)
mean = array_ops.reshape(mean, [-1])
variance = array_ops.reshape(variance, [-1])
else:
mean, variance = nn.moments(inputs, moments_axes)
else:
if data_format == DATA_FORMAT_NCHW:
mean, variance = nn.weighted_moments(inputs, moments_axes,
batch_weights, keep_dims=True)
mean = array_ops.reshape(mean, [-1])
variance = array_ops.reshape(variance, [-1])
else:
mean, variance = nn.weighted_moments(inputs, moments_axes,
batch_weights)
moving_vars_fn = lambda: (moving_mean, moving_variance)
if updates_collections is None:
def _force_updates():
"""Internal function forces updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
with ops.control_dependencies([update_moving_mean,
update_moving_variance]):
return array_ops.identity(mean), array_ops.identity(variance)
mean, variance = utils.smart_cond(is_training,
_force_updates,
moving_vars_fn)
else:
def _delay_updates():
"""Internal function that delay updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
return update_moving_mean, update_moving_variance
update_mean, update_variance = utils.smart_cond(is_training,
_delay_updates,
moving_vars_fn)
ops.add_to_collections(updates_collections, update_mean)
ops.add_to_collections(updates_collections, update_variance)
# Use computed moments during training and moving_vars otherwise.
vars_fn = lambda: (mean, variance)
mean, variance = utils.smart_cond(is_training, vars_fn, moving_vars_fn)
else:
mean, variance = moving_mean, moving_variance
if data_format == DATA_FORMAT_NCHW:
mean = array_ops.reshape(mean, params_shape_broadcast)
variance = array_ops.reshape(variance, params_shape_broadcast)
if beta is not None:
beta = array_ops.reshape(beta, params_shape_broadcast)
if gamma is not None:
gamma = array_ops.reshape(gamma, params_shape_broadcast)
# Compute batch_normalization.
outputs = nn.batch_normalization(inputs, mean, variance, beta, gamma,
epsilon)
outputs.set_shape(inputs_shape)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs) | [
"def",
"batch_norm",
"(",
"inputs",
",",
"decay",
"=",
"0.999",
",",
"center",
"=",
"True",
",",
"scale",
"=",
"False",
",",
"epsilon",
"=",
"0.001",
",",
"activation_fn",
"=",
"None",
",",
"param_initializers",
"=",
"None",
",",
"param_regularizers",
"=",
"None",
",",
"updates_collections",
"=",
"ops",
".",
"GraphKeys",
".",
"UPDATE_OPS",
",",
"is_training",
"=",
"True",
",",
"reuse",
"=",
"None",
",",
"variables_collections",
"=",
"None",
",",
"outputs_collections",
"=",
"None",
",",
"trainable",
"=",
"True",
",",
"batch_weights",
"=",
"None",
",",
"fused",
"=",
"None",
",",
"data_format",
"=",
"DATA_FORMAT_NHWC",
",",
"zero_debias_moving_mean",
"=",
"False",
",",
"scope",
"=",
"None",
",",
"renorm",
"=",
"False",
",",
"renorm_clipping",
"=",
"None",
",",
"renorm_decay",
"=",
"0.99",
",",
"adjustment",
"=",
"None",
")",
":",
"if",
"fused",
"is",
"None",
":",
"fused",
"=",
"True",
"# Only use _fused_batch_norm if all of the following three",
"# conditions are true:",
"# (1) fused is set True;",
"# (2) it is possible to use (currently it doesn't support batch weights,",
"# renorm, and the case when rank is neither 2 nor 4);",
"# (3) it is used with zero_debias_moving_mean, or an input shape of rank 2,",
"# or non-default updates_collections (not implemented in",
"# normalization_layers.BatchNormalization yet); otherwise use the fused",
"# implementation in normalization_layers.BatchNormalization.",
"inputs",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"inputs",
")",
"rank",
"=",
"inputs",
".",
"get_shape",
"(",
")",
".",
"ndims",
"possible_to_fuse",
"=",
"(",
"batch_weights",
"is",
"None",
"and",
"not",
"renorm",
"and",
"rank",
"in",
"[",
"2",
",",
"4",
"]",
"and",
"adjustment",
"is",
"None",
")",
"if",
"fused",
"and",
"possible_to_fuse",
"and",
"(",
"zero_debias_moving_mean",
"or",
"rank",
"==",
"2",
"or",
"updates_collections",
"is",
"not",
"ops",
".",
"GraphKeys",
".",
"UPDATE_OPS",
")",
":",
"return",
"_fused_batch_norm",
"(",
"inputs",
",",
"decay",
"=",
"decay",
",",
"center",
"=",
"center",
",",
"scale",
"=",
"scale",
",",
"epsilon",
"=",
"epsilon",
",",
"activation_fn",
"=",
"activation_fn",
",",
"param_initializers",
"=",
"param_initializers",
",",
"updates_collections",
"=",
"updates_collections",
",",
"is_training",
"=",
"is_training",
",",
"reuse",
"=",
"reuse",
",",
"variables_collections",
"=",
"variables_collections",
",",
"outputs_collections",
"=",
"outputs_collections",
",",
"trainable",
"=",
"trainable",
",",
"data_format",
"=",
"data_format",
",",
"zero_debias_moving_mean",
"=",
"zero_debias_moving_mean",
",",
"scope",
"=",
"scope",
")",
"if",
"data_format",
"not",
"in",
"(",
"DATA_FORMAT_NCHW",
",",
"DATA_FORMAT_NHWC",
")",
":",
"raise",
"ValueError",
"(",
"'data_format has to be either NCHW or NHWC.'",
")",
"layer_variable_getter",
"=",
"_build_variable_getter",
"(",
")",
"with",
"variable_scope",
".",
"variable_scope",
"(",
"scope",
",",
"'BatchNorm'",
",",
"[",
"inputs",
"]",
",",
"reuse",
"=",
"reuse",
",",
"custom_getter",
"=",
"layer_variable_getter",
")",
"as",
"sc",
":",
"inputs",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"inputs",
")",
"# Determine whether we can use the core layer class.",
"if",
"(",
"batch_weights",
"is",
"None",
"and",
"updates_collections",
"is",
"ops",
".",
"GraphKeys",
".",
"UPDATE_OPS",
"and",
"not",
"zero_debias_moving_mean",
")",
":",
"# Use the core layer class.",
"axis",
"=",
"1",
"if",
"data_format",
"==",
"DATA_FORMAT_NCHW",
"else",
"-",
"1",
"if",
"not",
"param_initializers",
":",
"param_initializers",
"=",
"{",
"}",
"beta_initializer",
"=",
"param_initializers",
".",
"get",
"(",
"'beta'",
",",
"init_ops",
".",
"zeros_initializer",
"(",
")",
")",
"gamma_initializer",
"=",
"param_initializers",
".",
"get",
"(",
"'gamma'",
",",
"init_ops",
".",
"ones_initializer",
"(",
")",
")",
"moving_mean_initializer",
"=",
"param_initializers",
".",
"get",
"(",
"'moving_mean'",
",",
"init_ops",
".",
"zeros_initializer",
"(",
")",
")",
"moving_variance_initializer",
"=",
"param_initializers",
".",
"get",
"(",
"'moving_variance'",
",",
"init_ops",
".",
"ones_initializer",
"(",
")",
")",
"if",
"not",
"param_regularizers",
":",
"param_regularizers",
"=",
"{",
"}",
"beta_regularizer",
"=",
"param_regularizers",
".",
"get",
"(",
"'beta'",
")",
"gamma_regularizer",
"=",
"param_regularizers",
".",
"get",
"(",
"'gamma'",
")",
"layer",
"=",
"normalization_layers",
".",
"BatchNormalization",
"(",
"axis",
"=",
"axis",
",",
"momentum",
"=",
"decay",
",",
"epsilon",
"=",
"epsilon",
",",
"center",
"=",
"center",
",",
"scale",
"=",
"scale",
",",
"beta_initializer",
"=",
"beta_initializer",
",",
"gamma_initializer",
"=",
"gamma_initializer",
",",
"moving_mean_initializer",
"=",
"moving_mean_initializer",
",",
"moving_variance_initializer",
"=",
"moving_variance_initializer",
",",
"beta_regularizer",
"=",
"beta_regularizer",
",",
"gamma_regularizer",
"=",
"gamma_regularizer",
",",
"trainable",
"=",
"trainable",
",",
"renorm",
"=",
"renorm",
",",
"renorm_clipping",
"=",
"renorm_clipping",
",",
"renorm_momentum",
"=",
"renorm_decay",
",",
"adjustment",
"=",
"adjustment",
",",
"name",
"=",
"sc",
".",
"name",
",",
"_scope",
"=",
"sc",
",",
"_reuse",
"=",
"reuse",
",",
"fused",
"=",
"fused",
")",
"outputs",
"=",
"layer",
".",
"apply",
"(",
"inputs",
",",
"training",
"=",
"is_training",
")",
"# Add variables to collections.",
"_add_variable_to_collections",
"(",
"layer",
".",
"moving_mean",
",",
"variables_collections",
",",
"'moving_mean'",
")",
"_add_variable_to_collections",
"(",
"layer",
".",
"moving_variance",
",",
"variables_collections",
",",
"'moving_variance'",
")",
"if",
"layer",
".",
"beta",
"is",
"not",
"None",
":",
"_add_variable_to_collections",
"(",
"layer",
".",
"beta",
",",
"variables_collections",
",",
"'beta'",
")",
"if",
"layer",
".",
"gamma",
"is",
"not",
"None",
":",
"_add_variable_to_collections",
"(",
"layer",
".",
"gamma",
",",
"variables_collections",
",",
"'gamma'",
")",
"if",
"activation_fn",
"is",
"not",
"None",
":",
"outputs",
"=",
"activation_fn",
"(",
"outputs",
")",
"return",
"utils",
".",
"collect_named_outputs",
"(",
"outputs_collections",
",",
"sc",
".",
"name",
",",
"outputs",
")",
"# Not supported by layer class: batch_weights argument,",
"# and custom updates_collections. In that case, use the legacy BN",
"# implementation.",
"# Custom updates collections are not supported because the update logic",
"# is different in this case, in particular w.r.t. \"forced updates\" and",
"# update op reuse.",
"if",
"renorm",
":",
"raise",
"ValueError",
"(",
"'renorm is not supported with batch_weights, '",
"'updates_collections or zero_debias_moving_mean'",
")",
"inputs_shape",
"=",
"inputs",
".",
"get_shape",
"(",
")",
"inputs_rank",
"=",
"inputs_shape",
".",
"ndims",
"if",
"inputs_rank",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Inputs %s has undefined rank.'",
"%",
"inputs",
".",
"name",
")",
"dtype",
"=",
"inputs",
".",
"dtype",
".",
"base_dtype",
"if",
"batch_weights",
"is",
"not",
"None",
":",
"batch_weights",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"batch_weights",
")",
"inputs_shape",
"[",
"0",
":",
"1",
"]",
".",
"assert_is_compatible_with",
"(",
"batch_weights",
".",
"get_shape",
"(",
")",
")",
"# Reshape batch weight values so they broadcast across inputs.",
"nshape",
"=",
"[",
"-",
"1",
"]",
"+",
"[",
"1",
"for",
"_",
"in",
"range",
"(",
"inputs_rank",
"-",
"1",
")",
"]",
"batch_weights",
"=",
"array_ops",
".",
"reshape",
"(",
"batch_weights",
",",
"nshape",
")",
"if",
"data_format",
"==",
"DATA_FORMAT_NCHW",
":",
"moments_axes",
"=",
"[",
"0",
"]",
"+",
"list",
"(",
"range",
"(",
"2",
",",
"inputs_rank",
")",
")",
"params_shape",
"=",
"inputs_shape",
"[",
"1",
":",
"2",
"]",
"# For NCHW format, rather than relying on implicit broadcasting, we",
"# explicitly reshape the params to params_shape_broadcast when computing",
"# the moments and the batch normalization.",
"params_shape_broadcast",
"=",
"list",
"(",
"[",
"1",
",",
"inputs_shape",
"[",
"1",
"]",
".",
"value",
"]",
"+",
"[",
"1",
"for",
"_",
"in",
"range",
"(",
"2",
",",
"inputs_rank",
")",
"]",
")",
"else",
":",
"moments_axes",
"=",
"list",
"(",
"range",
"(",
"inputs_rank",
"-",
"1",
")",
")",
"params_shape",
"=",
"inputs_shape",
"[",
"-",
"1",
":",
"]",
"params_shape_broadcast",
"=",
"None",
"if",
"not",
"params_shape",
".",
"is_fully_defined",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'Inputs %s has undefined channels dimension %s.'",
"%",
"(",
"inputs",
".",
"name",
",",
"params_shape",
")",
")",
"# Allocate parameters for the beta and gamma of the normalization.",
"beta",
",",
"gamma",
"=",
"None",
",",
"None",
"if",
"not",
"param_initializers",
":",
"param_initializers",
"=",
"{",
"}",
"if",
"center",
":",
"beta_collections",
"=",
"utils",
".",
"get_variable_collections",
"(",
"variables_collections",
",",
"'beta'",
")",
"beta_initializer",
"=",
"param_initializers",
".",
"get",
"(",
"'beta'",
",",
"init_ops",
".",
"zeros_initializer",
"(",
")",
")",
"beta",
"=",
"variables",
".",
"model_variable",
"(",
"'beta'",
",",
"shape",
"=",
"params_shape",
",",
"dtype",
"=",
"dtype",
",",
"initializer",
"=",
"beta_initializer",
",",
"collections",
"=",
"beta_collections",
",",
"trainable",
"=",
"trainable",
")",
"if",
"scale",
":",
"gamma_collections",
"=",
"utils",
".",
"get_variable_collections",
"(",
"variables_collections",
",",
"'gamma'",
")",
"gamma_initializer",
"=",
"param_initializers",
".",
"get",
"(",
"'gamma'",
",",
"init_ops",
".",
"ones_initializer",
"(",
")",
")",
"gamma",
"=",
"variables",
".",
"model_variable",
"(",
"'gamma'",
",",
"shape",
"=",
"params_shape",
",",
"dtype",
"=",
"dtype",
",",
"initializer",
"=",
"gamma_initializer",
",",
"collections",
"=",
"gamma_collections",
",",
"trainable",
"=",
"trainable",
")",
"# Create moving_mean and moving_variance variables and add them to the",
"# appropriate collections. We disable variable partitioning while creating",
"# them, because assign_moving_average is not yet supported for partitioned",
"# variables (this needs to be handled carefully, as it may break",
"# the checkpoint backward compatibility).",
"with",
"variable_scope",
".",
"variable_scope",
"(",
"variable_scope",
".",
"get_variable_scope",
"(",
")",
")",
"as",
"local_scope",
":",
"local_scope",
".",
"set_partitioner",
"(",
"None",
")",
"moving_mean_collections",
"=",
"utils",
".",
"get_variable_collections",
"(",
"variables_collections",
",",
"'moving_mean'",
")",
"moving_mean_initializer",
"=",
"param_initializers",
".",
"get",
"(",
"'moving_mean'",
",",
"init_ops",
".",
"zeros_initializer",
"(",
")",
")",
"moving_mean",
"=",
"variables",
".",
"model_variable",
"(",
"'moving_mean'",
",",
"shape",
"=",
"params_shape",
",",
"dtype",
"=",
"dtype",
",",
"initializer",
"=",
"moving_mean_initializer",
",",
"trainable",
"=",
"False",
",",
"collections",
"=",
"moving_mean_collections",
")",
"moving_variance_collections",
"=",
"utils",
".",
"get_variable_collections",
"(",
"variables_collections",
",",
"'moving_variance'",
")",
"moving_variance_initializer",
"=",
"param_initializers",
".",
"get",
"(",
"'moving_variance'",
",",
"init_ops",
".",
"ones_initializer",
"(",
")",
")",
"moving_variance",
"=",
"variables",
".",
"model_variable",
"(",
"'moving_variance'",
",",
"shape",
"=",
"params_shape",
",",
"dtype",
"=",
"dtype",
",",
"initializer",
"=",
"moving_variance_initializer",
",",
"trainable",
"=",
"False",
",",
"collections",
"=",
"moving_variance_collections",
")",
"# If `is_training` doesn't have a constant value, because it is a `Tensor`,",
"# a `Variable` or `Placeholder` then is_training_value will be None and",
"# `needs_moments` will be true.",
"is_training_value",
"=",
"utils",
".",
"constant_value",
"(",
"is_training",
")",
"need_moments",
"=",
"is_training_value",
"is",
"None",
"or",
"is_training_value",
"if",
"need_moments",
":",
"# Calculate the moments based on the individual batch.",
"if",
"batch_weights",
"is",
"None",
":",
"if",
"data_format",
"==",
"DATA_FORMAT_NCHW",
":",
"mean",
",",
"variance",
"=",
"nn",
".",
"moments",
"(",
"inputs",
",",
"moments_axes",
",",
"keep_dims",
"=",
"True",
")",
"mean",
"=",
"array_ops",
".",
"reshape",
"(",
"mean",
",",
"[",
"-",
"1",
"]",
")",
"variance",
"=",
"array_ops",
".",
"reshape",
"(",
"variance",
",",
"[",
"-",
"1",
"]",
")",
"else",
":",
"mean",
",",
"variance",
"=",
"nn",
".",
"moments",
"(",
"inputs",
",",
"moments_axes",
")",
"else",
":",
"if",
"data_format",
"==",
"DATA_FORMAT_NCHW",
":",
"mean",
",",
"variance",
"=",
"nn",
".",
"weighted_moments",
"(",
"inputs",
",",
"moments_axes",
",",
"batch_weights",
",",
"keep_dims",
"=",
"True",
")",
"mean",
"=",
"array_ops",
".",
"reshape",
"(",
"mean",
",",
"[",
"-",
"1",
"]",
")",
"variance",
"=",
"array_ops",
".",
"reshape",
"(",
"variance",
",",
"[",
"-",
"1",
"]",
")",
"else",
":",
"mean",
",",
"variance",
"=",
"nn",
".",
"weighted_moments",
"(",
"inputs",
",",
"moments_axes",
",",
"batch_weights",
")",
"moving_vars_fn",
"=",
"lambda",
":",
"(",
"moving_mean",
",",
"moving_variance",
")",
"if",
"updates_collections",
"is",
"None",
":",
"def",
"_force_updates",
"(",
")",
":",
"\"\"\"Internal function forces updates moving_vars if is_training.\"\"\"",
"update_moving_mean",
"=",
"moving_averages",
".",
"assign_moving_average",
"(",
"moving_mean",
",",
"mean",
",",
"decay",
",",
"zero_debias",
"=",
"zero_debias_moving_mean",
")",
"update_moving_variance",
"=",
"moving_averages",
".",
"assign_moving_average",
"(",
"moving_variance",
",",
"variance",
",",
"decay",
",",
"zero_debias",
"=",
"False",
")",
"with",
"ops",
".",
"control_dependencies",
"(",
"[",
"update_moving_mean",
",",
"update_moving_variance",
"]",
")",
":",
"return",
"array_ops",
".",
"identity",
"(",
"mean",
")",
",",
"array_ops",
".",
"identity",
"(",
"variance",
")",
"mean",
",",
"variance",
"=",
"utils",
".",
"smart_cond",
"(",
"is_training",
",",
"_force_updates",
",",
"moving_vars_fn",
")",
"else",
":",
"def",
"_delay_updates",
"(",
")",
":",
"\"\"\"Internal function that delay updates moving_vars if is_training.\"\"\"",
"update_moving_mean",
"=",
"moving_averages",
".",
"assign_moving_average",
"(",
"moving_mean",
",",
"mean",
",",
"decay",
",",
"zero_debias",
"=",
"zero_debias_moving_mean",
")",
"update_moving_variance",
"=",
"moving_averages",
".",
"assign_moving_average",
"(",
"moving_variance",
",",
"variance",
",",
"decay",
",",
"zero_debias",
"=",
"False",
")",
"return",
"update_moving_mean",
",",
"update_moving_variance",
"update_mean",
",",
"update_variance",
"=",
"utils",
".",
"smart_cond",
"(",
"is_training",
",",
"_delay_updates",
",",
"moving_vars_fn",
")",
"ops",
".",
"add_to_collections",
"(",
"updates_collections",
",",
"update_mean",
")",
"ops",
".",
"add_to_collections",
"(",
"updates_collections",
",",
"update_variance",
")",
"# Use computed moments during training and moving_vars otherwise.",
"vars_fn",
"=",
"lambda",
":",
"(",
"mean",
",",
"variance",
")",
"mean",
",",
"variance",
"=",
"utils",
".",
"smart_cond",
"(",
"is_training",
",",
"vars_fn",
",",
"moving_vars_fn",
")",
"else",
":",
"mean",
",",
"variance",
"=",
"moving_mean",
",",
"moving_variance",
"if",
"data_format",
"==",
"DATA_FORMAT_NCHW",
":",
"mean",
"=",
"array_ops",
".",
"reshape",
"(",
"mean",
",",
"params_shape_broadcast",
")",
"variance",
"=",
"array_ops",
".",
"reshape",
"(",
"variance",
",",
"params_shape_broadcast",
")",
"if",
"beta",
"is",
"not",
"None",
":",
"beta",
"=",
"array_ops",
".",
"reshape",
"(",
"beta",
",",
"params_shape_broadcast",
")",
"if",
"gamma",
"is",
"not",
"None",
":",
"gamma",
"=",
"array_ops",
".",
"reshape",
"(",
"gamma",
",",
"params_shape_broadcast",
")",
"# Compute batch_normalization.",
"outputs",
"=",
"nn",
".",
"batch_normalization",
"(",
"inputs",
",",
"mean",
",",
"variance",
",",
"beta",
",",
"gamma",
",",
"epsilon",
")",
"outputs",
".",
"set_shape",
"(",
"inputs_shape",
")",
"if",
"activation_fn",
"is",
"not",
"None",
":",
"outputs",
"=",
"activation_fn",
"(",
"outputs",
")",
"return",
"utils",
".",
"collect_named_outputs",
"(",
"outputs_collections",
",",
"sc",
".",
"name",
",",
"outputs",
")"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/layers/python/layers/layers.py#L445-L842 |
||
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/training/input.py | python | _SparseMetaData.__init__ | (self, sparse, map_op, rank) | Create the metadata.
Args:
sparse: Python boolean.
map_op: The `Operation` that created the `SparseTensorsMap` in question.
This Op contains information about the underlying Map object and the
dtype of the original data.
rank: The statically known rank of the `SparseTensor`. | Create the metadata. | [
"Create",
"the",
"metadata",
"."
] | def __init__(self, sparse, map_op, rank):
"""Create the metadata.
Args:
sparse: Python boolean.
map_op: The `Operation` that created the `SparseTensorsMap` in question.
This Op contains information about the underlying Map object and the
dtype of the original data.
rank: The statically known rank of the `SparseTensor`.
"""
self._sparse = sparse
self._map_op = map_op
self._rank = tensor_shape.Dimension(rank) | [
"def",
"__init__",
"(",
"self",
",",
"sparse",
",",
"map_op",
",",
"rank",
")",
":",
"self",
".",
"_sparse",
"=",
"sparse",
"self",
".",
"_map_op",
"=",
"map_op",
"self",
".",
"_rank",
"=",
"tensor_shape",
".",
"Dimension",
"(",
"rank",
")"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/training/input.py#L389-L401 |
||
ricardoquesada/Spidermonkey | 4a75ea2543408bd1b2c515aa95901523eeef7858 | python/psutil/psutil/__init__.py | python | disk_usage | (path) | return _psplatform.get_disk_usage(path) | Return disk usage statistics about the given path as a namedtuple
including total, used and free space expressed in bytes plus the
percentage usage. | Return disk usage statistics about the given path as a namedtuple
including total, used and free space expressed in bytes plus the
percentage usage. | [
"Return",
"disk",
"usage",
"statistics",
"about",
"the",
"given",
"path",
"as",
"a",
"namedtuple",
"including",
"total",
"used",
"and",
"free",
"space",
"expressed",
"in",
"bytes",
"plus",
"the",
"percentage",
"usage",
"."
] | def disk_usage(path):
"""Return disk usage statistics about the given path as a namedtuple
including total, used and free space expressed in bytes plus the
percentage usage.
"""
return _psplatform.get_disk_usage(path) | [
"def",
"disk_usage",
"(",
"path",
")",
":",
"return",
"_psplatform",
".",
"get_disk_usage",
"(",
"path",
")"
] | https://github.com/ricardoquesada/Spidermonkey/blob/4a75ea2543408bd1b2c515aa95901523eeef7858/python/psutil/psutil/__init__.py#L1191-L1196 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/plot.py | python | PlotCanvas.SetPointLabelFunc | (self, func) | Sets the function with custom code for pointLabel drawing
******** more info needed *************** | Sets the function with custom code for pointLabel drawing
******** more info needed *************** | [
"Sets",
"the",
"function",
"with",
"custom",
"code",
"for",
"pointLabel",
"drawing",
"********",
"more",
"info",
"needed",
"***************"
] | def SetPointLabelFunc(self, func):
"""Sets the function with custom code for pointLabel drawing
******** more info needed ***************
"""
self._pointLabelFunc = func | [
"def",
"SetPointLabelFunc",
"(",
"self",
",",
"func",
")",
":",
"self",
".",
"_pointLabelFunc",
"=",
"func"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/plot.py#L988-L992 |
||
okex/V3-Open-API-SDK | c5abb0db7e2287718e0055e17e57672ce0ec7fd9 | okex-python-sdk-api/venv/Lib/site-packages/pip-19.0.3-py3.8.egg/pip/_vendor/requests/sessions.py | python | Session.request | (self, method, url,
params=None, data=None, headers=None, cookies=None, files=None,
auth=None, timeout=None, allow_redirects=True, proxies=None,
hooks=None, stream=None, verify=None, cert=None, json=None) | return resp | Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of ``'filename': file-like-objects``
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol or protocol and
hostname to the URL of the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
:rtype: requests.Response | Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object. | [
"Constructs",
"a",
":",
"class",
":",
"Request",
"<Request",
">",
"prepares",
"it",
"and",
"sends",
"it",
".",
"Returns",
":",
"class",
":",
"Response",
"<Response",
">",
"object",
"."
] | def request(self, method, url,
params=None, data=None, headers=None, cookies=None, files=None,
auth=None, timeout=None, allow_redirects=True, proxies=None,
hooks=None, stream=None, verify=None, cert=None, json=None):
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of ``'filename': file-like-objects``
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol or protocol and
hostname to the URL of the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
:rtype: requests.Response
"""
# Create the Request.
req = Request(
method=method.upper(),
url=url,
headers=headers,
files=files,
data=data or {},
json=json,
params=params or {},
auth=auth,
cookies=cookies,
hooks=hooks,
)
prep = self.prepare_request(req)
proxies = proxies or {}
settings = self.merge_environment_settings(
prep.url, proxies, stream, verify, cert
)
# Send the request.
send_kwargs = {
'timeout': timeout,
'allow_redirects': allow_redirects,
}
send_kwargs.update(settings)
resp = self.send(prep, **send_kwargs)
return resp | [
"def",
"request",
"(",
"self",
",",
"method",
",",
"url",
",",
"params",
"=",
"None",
",",
"data",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"cookies",
"=",
"None",
",",
"files",
"=",
"None",
",",
"auth",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"allow_redirects",
"=",
"True",
",",
"proxies",
"=",
"None",
",",
"hooks",
"=",
"None",
",",
"stream",
"=",
"None",
",",
"verify",
"=",
"None",
",",
"cert",
"=",
"None",
",",
"json",
"=",
"None",
")",
":",
"# Create the Request.",
"req",
"=",
"Request",
"(",
"method",
"=",
"method",
".",
"upper",
"(",
")",
",",
"url",
"=",
"url",
",",
"headers",
"=",
"headers",
",",
"files",
"=",
"files",
",",
"data",
"=",
"data",
"or",
"{",
"}",
",",
"json",
"=",
"json",
",",
"params",
"=",
"params",
"or",
"{",
"}",
",",
"auth",
"=",
"auth",
",",
"cookies",
"=",
"cookies",
",",
"hooks",
"=",
"hooks",
",",
")",
"prep",
"=",
"self",
".",
"prepare_request",
"(",
"req",
")",
"proxies",
"=",
"proxies",
"or",
"{",
"}",
"settings",
"=",
"self",
".",
"merge_environment_settings",
"(",
"prep",
".",
"url",
",",
"proxies",
",",
"stream",
",",
"verify",
",",
"cert",
")",
"# Send the request.",
"send_kwargs",
"=",
"{",
"'timeout'",
":",
"timeout",
",",
"'allow_redirects'",
":",
"allow_redirects",
",",
"}",
"send_kwargs",
".",
"update",
"(",
"settings",
")",
"resp",
"=",
"self",
".",
"send",
"(",
"prep",
",",
"*",
"*",
"send_kwargs",
")",
"return",
"resp"
] | https://github.com/okex/V3-Open-API-SDK/blob/c5abb0db7e2287718e0055e17e57672ce0ec7fd9/okex-python-sdk-api/venv/Lib/site-packages/pip-19.0.3-py3.8.egg/pip/_vendor/requests/sessions.py#L466-L535 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/agw/flatmenu.py | python | FlatToolbarItem.Select | (self, select=True) | Selects or checks a radio or check item.
:param bool `select`: ``True`` to select or check a tool, ``False`` to unselect
or uncheck it. | Selects or checks a radio or check item. | [
"Selects",
"or",
"checks",
"a",
"radio",
"or",
"check",
"item",
"."
] | def Select(self, select=True):
"""
Selects or checks a radio or check item.
:param bool `select`: ``True`` to select or check a tool, ``False`` to unselect
or uncheck it.
"""
self._selected = select | [
"def",
"Select",
"(",
"self",
",",
"select",
"=",
"True",
")",
":",
"self",
".",
"_selected",
"=",
"select"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/flatmenu.py#L4702-L4710 |
||
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/python/debug/lib/debug_data.py | python | DebugDumpDir.transitive_inputs | (self,
node_name,
include_control=True,
include_reversed_ref=False,
device_name=None,) | return tracer.inputs() | Get the transitive inputs of given node according to partition graphs.
Args:
node_name: Name of the node.
include_control: Include control inputs (True by default).
include_reversed_ref: Whether a ref input, say from A to B, is to be also
considered as an input from B to A. The rationale is that ref inputs
generally let the recipient (e.g., B in this case) mutate the value of
the source (e.g., A in this case). So the reverse direction of the ref
edge reflects the direction of information flow.
device_name: (`str`) name of the device. If there is only one device or if
node_name exists on only one device, this argument is optional.
Returns:
(`list` of `str`) all transitive inputs to the node, as a list of node
names.
Raises:
LookupError: If node inputs and control inputs have not been loaded
from partition graphs yet. | Get the transitive inputs of given node according to partition graphs. | [
"Get",
"the",
"transitive",
"inputs",
"of",
"given",
"node",
"according",
"to",
"partition",
"graphs",
"."
] | def transitive_inputs(self,
node_name,
include_control=True,
include_reversed_ref=False,
device_name=None,):
"""Get the transitive inputs of given node according to partition graphs.
Args:
node_name: Name of the node.
include_control: Include control inputs (True by default).
include_reversed_ref: Whether a ref input, say from A to B, is to be also
considered as an input from B to A. The rationale is that ref inputs
generally let the recipient (e.g., B in this case) mutate the value of
the source (e.g., A in this case). So the reverse direction of the ref
edge reflects the direction of information flow.
device_name: (`str`) name of the device. If there is only one device or if
node_name exists on only one device, this argument is optional.
Returns:
(`list` of `str`) all transitive inputs to the node, as a list of node
names.
Raises:
LookupError: If node inputs and control inputs have not been loaded
from partition graphs yet.
"""
if not self._debug_graphs:
raise LookupError(
"Node inputs are not loaded from partition graphs yet.")
device_name = self._infer_device_name(device_name, node_name)
input_lists = [self._debug_graphs[device_name].node_inputs]
if include_control:
input_lists.append(self._debug_graphs[device_name].node_ctrl_inputs)
if include_reversed_ref:
input_lists.append(
self._debug_graphs[device_name].node_reversed_ref_inputs)
tracer = debug_graphs.DFSGraphTracer(
input_lists,
skip_node_names=self._get_merge_node_names(device_name))
tracer.trace(node_name)
return tracer.inputs() | [
"def",
"transitive_inputs",
"(",
"self",
",",
"node_name",
",",
"include_control",
"=",
"True",
",",
"include_reversed_ref",
"=",
"False",
",",
"device_name",
"=",
"None",
",",
")",
":",
"if",
"not",
"self",
".",
"_debug_graphs",
":",
"raise",
"LookupError",
"(",
"\"Node inputs are not loaded from partition graphs yet.\"",
")",
"device_name",
"=",
"self",
".",
"_infer_device_name",
"(",
"device_name",
",",
"node_name",
")",
"input_lists",
"=",
"[",
"self",
".",
"_debug_graphs",
"[",
"device_name",
"]",
".",
"node_inputs",
"]",
"if",
"include_control",
":",
"input_lists",
".",
"append",
"(",
"self",
".",
"_debug_graphs",
"[",
"device_name",
"]",
".",
"node_ctrl_inputs",
")",
"if",
"include_reversed_ref",
":",
"input_lists",
".",
"append",
"(",
"self",
".",
"_debug_graphs",
"[",
"device_name",
"]",
".",
"node_reversed_ref_inputs",
")",
"tracer",
"=",
"debug_graphs",
".",
"DFSGraphTracer",
"(",
"input_lists",
",",
"skip_node_names",
"=",
"self",
".",
"_get_merge_node_names",
"(",
"device_name",
")",
")",
"tracer",
".",
"trace",
"(",
"node_name",
")",
"return",
"tracer",
".",
"inputs",
"(",
")"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/debug/lib/debug_data.py#L1093-L1135 |
|
psi4/psi4 | be533f7f426b6ccc263904e55122899b16663395 | psi4/driver/qcdb/libmintsmolecule.py | python | LibmintsMolecule.full_geometry | (self, np_out=False) | Returns the full (dummies included) geometry in Bohr as a N X 3 array.
>>> print(H2OH2O.full_geometry())
[[-2.930978460188563, -0.21641143673806384, 0.0], [-3.655219780069251, 1.4409218455037016, 0.0], [-1.1332252981904638, 0.0769345303220403, 0.0], [0.0, 0.0, 0.0], [2.5523113582286716, 0.21064588230662976, 0.0], [3.175492014248769, -0.7062681346308132, -1.4334725450878665], [3.175492014248769, -0.7062681346308132, 1.4334725450878665]] | Returns the full (dummies included) geometry in Bohr as a N X 3 array. | [
"Returns",
"the",
"full",
"(",
"dummies",
"included",
")",
"geometry",
"in",
"Bohr",
"as",
"a",
"N",
"X",
"3",
"array",
"."
] | def full_geometry(self, np_out=False):
"""Returns the full (dummies included) geometry in Bohr as a N X 3 array.
>>> print(H2OH2O.full_geometry())
[[-2.930978460188563, -0.21641143673806384, 0.0], [-3.655219780069251, 1.4409218455037016, 0.0], [-1.1332252981904638, 0.0769345303220403, 0.0], [0.0, 0.0, 0.0], [2.5523113582286716, 0.21064588230662976, 0.0], [3.175492014248769, -0.7062681346308132, -1.4334725450878665], [3.175492014248769, -0.7062681346308132, 1.4334725450878665]]
"""
geom = np.asarray([self.full_atoms[at].compute() for at in range(self.nallatom())])
geom *= self.input_units_to_au()
if np_out:
return geom
else:
return geom.tolist() | [
"def",
"full_geometry",
"(",
"self",
",",
"np_out",
"=",
"False",
")",
":",
"geom",
"=",
"np",
".",
"asarray",
"(",
"[",
"self",
".",
"full_atoms",
"[",
"at",
"]",
".",
"compute",
"(",
")",
"for",
"at",
"in",
"range",
"(",
"self",
".",
"nallatom",
"(",
")",
")",
"]",
")",
"geom",
"*=",
"self",
".",
"input_units_to_au",
"(",
")",
"if",
"np_out",
":",
"return",
"geom",
"else",
":",
"return",
"geom",
".",
"tolist",
"(",
")"
] | https://github.com/psi4/psi4/blob/be533f7f426b6ccc263904e55122899b16663395/psi4/driver/qcdb/libmintsmolecule.py#L1249-L1261 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/lib/shape_base.py | python | get_array_prepare | (*args) | return None | Find the wrapper for the array with the highest priority.
In case of ties, leftmost wins. If no wrapper is found, return None | Find the wrapper for the array with the highest priority. | [
"Find",
"the",
"wrapper",
"for",
"the",
"array",
"with",
"the",
"highest",
"priority",
"."
] | def get_array_prepare(*args):
"""Find the wrapper for the array with the highest priority.
In case of ties, leftmost wins. If no wrapper is found, return None
"""
wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
x.__array_prepare__) for i, x in enumerate(args)
if hasattr(x, '__array_prepare__'))
if wrappers:
return wrappers[-1][-1]
return None | [
"def",
"get_array_prepare",
"(",
"*",
"args",
")",
":",
"wrappers",
"=",
"sorted",
"(",
"(",
"getattr",
"(",
"x",
",",
"'__array_priority__'",
",",
"0",
")",
",",
"-",
"i",
",",
"x",
".",
"__array_prepare__",
")",
"for",
"i",
",",
"x",
"in",
"enumerate",
"(",
"args",
")",
"if",
"hasattr",
"(",
"x",
",",
"'__array_prepare__'",
")",
")",
"if",
"wrappers",
":",
"return",
"wrappers",
"[",
"-",
"1",
"]",
"[",
"-",
"1",
"]",
"return",
"None"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/lib/shape_base.py#L1036-L1046 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/dataview.py | python | DataViewModelNotifier.SetOwner | (*args, **kwargs) | return _dataview.DataViewModelNotifier_SetOwner(*args, **kwargs) | SetOwner(self, DataViewModel owner)
Sets the owner (the model) of this notifier. Used internally. | SetOwner(self, DataViewModel owner) | [
"SetOwner",
"(",
"self",
"DataViewModel",
"owner",
")"
] | def SetOwner(*args, **kwargs):
"""
SetOwner(self, DataViewModel owner)
Sets the owner (the model) of this notifier. Used internally.
"""
return _dataview.DataViewModelNotifier_SetOwner(*args, **kwargs) | [
"def",
"SetOwner",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_dataview",
".",
"DataViewModelNotifier_SetOwner",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/dataview.py#L276-L282 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/_core.py | python | MenuBar.EnableTop | (*args, **kwargs) | return _core_.MenuBar_EnableTop(*args, **kwargs) | EnableTop(self, size_t pos, bool enable) | EnableTop(self, size_t pos, bool enable) | [
"EnableTop",
"(",
"self",
"size_t",
"pos",
"bool",
"enable",
")"
] | def EnableTop(*args, **kwargs):
"""EnableTop(self, size_t pos, bool enable)"""
return _core_.MenuBar_EnableTop(*args, **kwargs) | [
"def",
"EnableTop",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"MenuBar_EnableTop",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_core.py#L12296-L12298 |
|
BlzFans/wke | b0fa21158312e40c5fbd84682d643022b6c34a93 | cygwin/lib/python2.6/mimify.py | python | mimify_part | (ifile, ofile, is_mime) | Convert an 8bit part of a MIME mail message to quoted-printable. | Convert an 8bit part of a MIME mail message to quoted-printable. | [
"Convert",
"an",
"8bit",
"part",
"of",
"a",
"MIME",
"mail",
"message",
"to",
"quoted",
"-",
"printable",
"."
] | def mimify_part(ifile, ofile, is_mime):
"""Convert an 8bit part of a MIME mail message to quoted-printable."""
has_cte = is_qp = is_base64 = 0
multipart = None
must_quote_body = must_quote_header = has_iso_chars = 0
header = []
header_end = ''
message = []
message_end = ''
# read header
hfile = HeaderFile(ifile)
while 1:
line = hfile.readline()
if not line:
break
if not must_quote_header and iso_char.search(line):
must_quote_header = 1
if mv.match(line):
is_mime = 1
if cte.match(line):
has_cte = 1
if qp.match(line):
is_qp = 1
elif base64_re.match(line):
is_base64 = 1
mp_res = mp.match(line)
if mp_res:
multipart = '--' + mp_res.group(1)
if he.match(line):
header_end = line
break
header.append(line)
# read body
while 1:
line = ifile.readline()
if not line:
break
if multipart:
if line == multipart + '--\n':
message_end = line
break
if line == multipart + '\n':
message_end = line
break
if is_base64:
message.append(line)
continue
if is_qp:
while line[-2:] == '=\n':
line = line[:-2]
newline = ifile.readline()
if newline[:len(QUOTE)] == QUOTE:
newline = newline[len(QUOTE):]
line = line + newline
line = mime_decode(line)
message.append(line)
if not has_iso_chars:
if iso_char.search(line):
has_iso_chars = must_quote_body = 1
if not must_quote_body:
if len(line) > MAXLEN:
must_quote_body = 1
# convert and output header and body
for line in header:
if must_quote_header:
line = mime_encode_header(line)
chrset_res = chrset.match(line)
if chrset_res:
if has_iso_chars:
# change us-ascii into iso-8859-1
if chrset_res.group(2).lower() == 'us-ascii':
line = '%s%s%s' % (chrset_res.group(1),
CHARSET,
chrset_res.group(3))
else:
# change iso-8859-* into us-ascii
line = '%sus-ascii%s' % chrset_res.group(1, 3)
if has_cte and cte.match(line):
line = 'Content-Transfer-Encoding: '
if is_base64:
line = line + 'base64\n'
elif must_quote_body:
line = line + 'quoted-printable\n'
else:
line = line + '7bit\n'
ofile.write(line)
if (must_quote_header or must_quote_body) and not is_mime:
ofile.write('Mime-Version: 1.0\n')
ofile.write('Content-Type: text/plain; ')
if has_iso_chars:
ofile.write('charset="%s"\n' % CHARSET)
else:
ofile.write('charset="us-ascii"\n')
if must_quote_body and not has_cte:
ofile.write('Content-Transfer-Encoding: quoted-printable\n')
ofile.write(header_end)
for line in message:
if must_quote_body:
line = mime_encode(line, 0)
ofile.write(line)
ofile.write(message_end)
line = message_end
while multipart:
if line == multipart + '--\n':
# read bit after the end of the last part
while 1:
line = ifile.readline()
if not line:
return
if must_quote_body:
line = mime_encode(line, 0)
ofile.write(line)
if line == multipart + '\n':
nifile = File(ifile, multipart)
mimify_part(nifile, ofile, 1)
line = nifile.peek
if not line:
# premature end of file
break
ofile.write(line)
continue
# unexpectedly no multipart separator--copy rest of file
while 1:
line = ifile.readline()
if not line:
return
if must_quote_body:
line = mime_encode(line, 0)
ofile.write(line) | [
"def",
"mimify_part",
"(",
"ifile",
",",
"ofile",
",",
"is_mime",
")",
":",
"has_cte",
"=",
"is_qp",
"=",
"is_base64",
"=",
"0",
"multipart",
"=",
"None",
"must_quote_body",
"=",
"must_quote_header",
"=",
"has_iso_chars",
"=",
"0",
"header",
"=",
"[",
"]",
"header_end",
"=",
"''",
"message",
"=",
"[",
"]",
"message_end",
"=",
"''",
"# read header",
"hfile",
"=",
"HeaderFile",
"(",
"ifile",
")",
"while",
"1",
":",
"line",
"=",
"hfile",
".",
"readline",
"(",
")",
"if",
"not",
"line",
":",
"break",
"if",
"not",
"must_quote_header",
"and",
"iso_char",
".",
"search",
"(",
"line",
")",
":",
"must_quote_header",
"=",
"1",
"if",
"mv",
".",
"match",
"(",
"line",
")",
":",
"is_mime",
"=",
"1",
"if",
"cte",
".",
"match",
"(",
"line",
")",
":",
"has_cte",
"=",
"1",
"if",
"qp",
".",
"match",
"(",
"line",
")",
":",
"is_qp",
"=",
"1",
"elif",
"base64_re",
".",
"match",
"(",
"line",
")",
":",
"is_base64",
"=",
"1",
"mp_res",
"=",
"mp",
".",
"match",
"(",
"line",
")",
"if",
"mp_res",
":",
"multipart",
"=",
"'--'",
"+",
"mp_res",
".",
"group",
"(",
"1",
")",
"if",
"he",
".",
"match",
"(",
"line",
")",
":",
"header_end",
"=",
"line",
"break",
"header",
".",
"append",
"(",
"line",
")",
"# read body",
"while",
"1",
":",
"line",
"=",
"ifile",
".",
"readline",
"(",
")",
"if",
"not",
"line",
":",
"break",
"if",
"multipart",
":",
"if",
"line",
"==",
"multipart",
"+",
"'--\\n'",
":",
"message_end",
"=",
"line",
"break",
"if",
"line",
"==",
"multipart",
"+",
"'\\n'",
":",
"message_end",
"=",
"line",
"break",
"if",
"is_base64",
":",
"message",
".",
"append",
"(",
"line",
")",
"continue",
"if",
"is_qp",
":",
"while",
"line",
"[",
"-",
"2",
":",
"]",
"==",
"'=\\n'",
":",
"line",
"=",
"line",
"[",
":",
"-",
"2",
"]",
"newline",
"=",
"ifile",
".",
"readline",
"(",
")",
"if",
"newline",
"[",
":",
"len",
"(",
"QUOTE",
")",
"]",
"==",
"QUOTE",
":",
"newline",
"=",
"newline",
"[",
"len",
"(",
"QUOTE",
")",
":",
"]",
"line",
"=",
"line",
"+",
"newline",
"line",
"=",
"mime_decode",
"(",
"line",
")",
"message",
".",
"append",
"(",
"line",
")",
"if",
"not",
"has_iso_chars",
":",
"if",
"iso_char",
".",
"search",
"(",
"line",
")",
":",
"has_iso_chars",
"=",
"must_quote_body",
"=",
"1",
"if",
"not",
"must_quote_body",
":",
"if",
"len",
"(",
"line",
")",
">",
"MAXLEN",
":",
"must_quote_body",
"=",
"1",
"# convert and output header and body",
"for",
"line",
"in",
"header",
":",
"if",
"must_quote_header",
":",
"line",
"=",
"mime_encode_header",
"(",
"line",
")",
"chrset_res",
"=",
"chrset",
".",
"match",
"(",
"line",
")",
"if",
"chrset_res",
":",
"if",
"has_iso_chars",
":",
"# change us-ascii into iso-8859-1",
"if",
"chrset_res",
".",
"group",
"(",
"2",
")",
".",
"lower",
"(",
")",
"==",
"'us-ascii'",
":",
"line",
"=",
"'%s%s%s'",
"%",
"(",
"chrset_res",
".",
"group",
"(",
"1",
")",
",",
"CHARSET",
",",
"chrset_res",
".",
"group",
"(",
"3",
")",
")",
"else",
":",
"# change iso-8859-* into us-ascii",
"line",
"=",
"'%sus-ascii%s'",
"%",
"chrset_res",
".",
"group",
"(",
"1",
",",
"3",
")",
"if",
"has_cte",
"and",
"cte",
".",
"match",
"(",
"line",
")",
":",
"line",
"=",
"'Content-Transfer-Encoding: '",
"if",
"is_base64",
":",
"line",
"=",
"line",
"+",
"'base64\\n'",
"elif",
"must_quote_body",
":",
"line",
"=",
"line",
"+",
"'quoted-printable\\n'",
"else",
":",
"line",
"=",
"line",
"+",
"'7bit\\n'",
"ofile",
".",
"write",
"(",
"line",
")",
"if",
"(",
"must_quote_header",
"or",
"must_quote_body",
")",
"and",
"not",
"is_mime",
":",
"ofile",
".",
"write",
"(",
"'Mime-Version: 1.0\\n'",
")",
"ofile",
".",
"write",
"(",
"'Content-Type: text/plain; '",
")",
"if",
"has_iso_chars",
":",
"ofile",
".",
"write",
"(",
"'charset=\"%s\"\\n'",
"%",
"CHARSET",
")",
"else",
":",
"ofile",
".",
"write",
"(",
"'charset=\"us-ascii\"\\n'",
")",
"if",
"must_quote_body",
"and",
"not",
"has_cte",
":",
"ofile",
".",
"write",
"(",
"'Content-Transfer-Encoding: quoted-printable\\n'",
")",
"ofile",
".",
"write",
"(",
"header_end",
")",
"for",
"line",
"in",
"message",
":",
"if",
"must_quote_body",
":",
"line",
"=",
"mime_encode",
"(",
"line",
",",
"0",
")",
"ofile",
".",
"write",
"(",
"line",
")",
"ofile",
".",
"write",
"(",
"message_end",
")",
"line",
"=",
"message_end",
"while",
"multipart",
":",
"if",
"line",
"==",
"multipart",
"+",
"'--\\n'",
":",
"# read bit after the end of the last part",
"while",
"1",
":",
"line",
"=",
"ifile",
".",
"readline",
"(",
")",
"if",
"not",
"line",
":",
"return",
"if",
"must_quote_body",
":",
"line",
"=",
"mime_encode",
"(",
"line",
",",
"0",
")",
"ofile",
".",
"write",
"(",
"line",
")",
"if",
"line",
"==",
"multipart",
"+",
"'\\n'",
":",
"nifile",
"=",
"File",
"(",
"ifile",
",",
"multipart",
")",
"mimify_part",
"(",
"nifile",
",",
"ofile",
",",
"1",
")",
"line",
"=",
"nifile",
".",
"peek",
"if",
"not",
"line",
":",
"# premature end of file",
"break",
"ofile",
".",
"write",
"(",
"line",
")",
"continue",
"# unexpectedly no multipart separator--copy rest of file",
"while",
"1",
":",
"line",
"=",
"ifile",
".",
"readline",
"(",
")",
"if",
"not",
"line",
":",
"return",
"if",
"must_quote_body",
":",
"line",
"=",
"mime_encode",
"(",
"line",
",",
"0",
")",
"ofile",
".",
"write",
"(",
"line",
")"
] | https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/mimify.py#L280-L413 |
||
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | gpu/command_buffer/build_gles2_cmd_buffer.py | python | DELnHandler.WriteImmediateServiceUnitTest | (self, func, file) | Overrriden from TypeHandler. | Overrriden from TypeHandler. | [
"Overrriden",
"from",
"TypeHandler",
"."
] | def WriteImmediateServiceUnitTest(self, func, file):
"""Overrriden from TypeHandler."""
valid_test = """
TEST_F(%(test_name)s, %(name)sValidArgs) {
EXPECT_CALL(
*gl_,
%(gl_func_name)s(1, Pointee(kService%(upper_resource_name)sId)))
.Times(1);
cmds::%(name)s& cmd = *GetImmediateAs<cmds::%(name)s>();
SpecializedSetup<cmds::%(name)s, 0>(true);
cmd.Init(1, &client_%(resource_name)s_id_);
EXPECT_EQ(error::kNoError,
ExecuteImmediateCmd(cmd, sizeof(client_%(resource_name)s_id_)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_TRUE(
Get%(upper_resource_name)s(client_%(resource_name)s_id_) == NULL);
}
"""
self.WriteValidUnitTest(func, file, valid_test, {
'resource_name': func.GetInfo('resource_type').lower(),
'upper_resource_name': func.GetInfo('resource_type'),
})
invalid_test = """
TEST_F(%(test_name)s, %(name)sInvalidArgs) {
cmds::%(name)s& cmd = *GetImmediateAs<cmds::%(name)s>();
SpecializedSetup<cmds::%(name)s, 0>(false);
GLuint temp = kInvalidClientId;
cmd.Init(1, &temp);
EXPECT_EQ(error::kNoError,
ExecuteImmediateCmd(cmd, sizeof(temp)));
}
"""
self.WriteValidUnitTest(func, file, invalid_test) | [
"def",
"WriteImmediateServiceUnitTest",
"(",
"self",
",",
"func",
",",
"file",
")",
":",
"valid_test",
"=",
"\"\"\"\nTEST_F(%(test_name)s, %(name)sValidArgs) {\n EXPECT_CALL(\n *gl_,\n %(gl_func_name)s(1, Pointee(kService%(upper_resource_name)sId)))\n .Times(1);\n cmds::%(name)s& cmd = *GetImmediateAs<cmds::%(name)s>();\n SpecializedSetup<cmds::%(name)s, 0>(true);\n cmd.Init(1, &client_%(resource_name)s_id_);\n EXPECT_EQ(error::kNoError,\n ExecuteImmediateCmd(cmd, sizeof(client_%(resource_name)s_id_)));\n EXPECT_EQ(GL_NO_ERROR, GetGLError());\n EXPECT_TRUE(\n Get%(upper_resource_name)s(client_%(resource_name)s_id_) == NULL);\n}\n\"\"\"",
"self",
".",
"WriteValidUnitTest",
"(",
"func",
",",
"file",
",",
"valid_test",
",",
"{",
"'resource_name'",
":",
"func",
".",
"GetInfo",
"(",
"'resource_type'",
")",
".",
"lower",
"(",
")",
",",
"'upper_resource_name'",
":",
"func",
".",
"GetInfo",
"(",
"'resource_type'",
")",
",",
"}",
")",
"invalid_test",
"=",
"\"\"\"\nTEST_F(%(test_name)s, %(name)sInvalidArgs) {\n cmds::%(name)s& cmd = *GetImmediateAs<cmds::%(name)s>();\n SpecializedSetup<cmds::%(name)s, 0>(false);\n GLuint temp = kInvalidClientId;\n cmd.Init(1, &temp);\n EXPECT_EQ(error::kNoError,\n ExecuteImmediateCmd(cmd, sizeof(temp)));\n}\n\"\"\"",
"self",
".",
"WriteValidUnitTest",
"(",
"func",
",",
"file",
",",
"invalid_test",
")"
] | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/gpu/command_buffer/build_gles2_cmd_buffer.py#L4320-L4352 |
||
Z3Prover/z3 | d745d03afdfdf638d66093e2bfbacaf87187f35b | src/api/python/z3/z3.py | python | BoolSortRef.cast | (self, val) | return val | Try to cast `val` as a Boolean.
>>> x = BoolSort().cast(True)
>>> x
True
>>> is_expr(x)
True
>>> is_expr(True)
False
>>> x.sort()
Bool | Try to cast `val` as a Boolean. | [
"Try",
"to",
"cast",
"val",
"as",
"a",
"Boolean",
"."
] | def cast(self, val):
"""Try to cast `val` as a Boolean.
>>> x = BoolSort().cast(True)
>>> x
True
>>> is_expr(x)
True
>>> is_expr(True)
False
>>> x.sort()
Bool
"""
if isinstance(val, bool):
return BoolVal(val, self.ctx)
if z3_debug():
if not is_expr(val):
msg = "True, False or Z3 Boolean expression expected. Received %s of type %s"
_z3_assert(is_expr(val), msg % (val, type(val)))
if not self.eq(val.sort()):
_z3_assert(self.eq(val.sort()), "Value cannot be converted into a Z3 Boolean value")
return val | [
"def",
"cast",
"(",
"self",
",",
"val",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"bool",
")",
":",
"return",
"BoolVal",
"(",
"val",
",",
"self",
".",
"ctx",
")",
"if",
"z3_debug",
"(",
")",
":",
"if",
"not",
"is_expr",
"(",
"val",
")",
":",
"msg",
"=",
"\"True, False or Z3 Boolean expression expected. Received %s of type %s\"",
"_z3_assert",
"(",
"is_expr",
"(",
"val",
")",
",",
"msg",
"%",
"(",
"val",
",",
"type",
"(",
"val",
")",
")",
")",
"if",
"not",
"self",
".",
"eq",
"(",
"val",
".",
"sort",
"(",
")",
")",
":",
"_z3_assert",
"(",
"self",
".",
"eq",
"(",
"val",
".",
"sort",
"(",
")",
")",
",",
"\"Value cannot be converted into a Z3 Boolean value\"",
")",
"return",
"val"
] | https://github.com/Z3Prover/z3/blob/d745d03afdfdf638d66093e2bfbacaf87187f35b/src/api/python/z3/z3.py#L1487-L1508 |
|
natanielruiz/android-yolo | 1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f | jni-build/jni/include/tensorflow/python/framework/ops.py | python | SparseTensor.values | (self) | return self._values | The non-zero values in the represented dense tensor.
Returns:
A 1-D Tensor of any data type. | The non-zero values in the represented dense tensor. | [
"The",
"non",
"-",
"zero",
"values",
"in",
"the",
"represented",
"dense",
"tensor",
"."
] | def values(self):
"""The non-zero values in the represented dense tensor.
Returns:
A 1-D Tensor of any data type.
"""
return self._values | [
"def",
"values",
"(",
"self",
")",
":",
"return",
"self",
".",
"_values"
] | https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/tensorflow/python/framework/ops.py#L1007-L1013 |
|
netket/netket | 0d534e54ecbf25b677ea72af6b85947979420652 | netket/jax/utils.py | python | tree_leaf_isreal | (pars: PyTree) | return any(jax.tree_leaves(jax.tree_map(is_real, pars))) | Returns true if at least one leaf in the tree has real dtype. | Returns true if at least one leaf in the tree has real dtype. | [
"Returns",
"true",
"if",
"at",
"least",
"one",
"leaf",
"in",
"the",
"tree",
"has",
"real",
"dtype",
"."
] | def tree_leaf_isreal(pars: PyTree) -> bool:
"""
Returns true if at least one leaf in the tree has real dtype.
"""
return any(jax.tree_leaves(jax.tree_map(is_real, pars))) | [
"def",
"tree_leaf_isreal",
"(",
"pars",
":",
"PyTree",
")",
"->",
"bool",
":",
"return",
"any",
"(",
"jax",
".",
"tree_leaves",
"(",
"jax",
".",
"tree_map",
"(",
"is_real",
",",
"pars",
")",
")",
")"
] | https://github.com/netket/netket/blob/0d534e54ecbf25b677ea72af6b85947979420652/netket/jax/utils.py#L97-L101 |
|
glotzerlab/hoomd-blue | f7f97abfa3fcc2522fa8d458d65d0aeca7ba781a | hoomd/mpcd/data.py | python | system.take_snapshot | (self, particles=True) | return snapshot(self.data.takeSnapshot(particles)) | R""" Takes a snapshot of the current state of the MPCD system
Args:
particles (bool): If true, include particle data in snapshot
Examples::
snap = mpcd_sys.take_snapshot() | R""" Takes a snapshot of the current state of the MPCD system | [
"R",
"Takes",
"a",
"snapshot",
"of",
"the",
"current",
"state",
"of",
"the",
"MPCD",
"system"
] | def take_snapshot(self, particles=True):
R""" Takes a snapshot of the current state of the MPCD system
Args:
particles (bool): If true, include particle data in snapshot
Examples::
snap = mpcd_sys.take_snapshot()
"""
return snapshot(self.data.takeSnapshot(particles)) | [
"def",
"take_snapshot",
"(",
"self",
",",
"particles",
"=",
"True",
")",
":",
"return",
"snapshot",
"(",
"self",
".",
"data",
".",
"takeSnapshot",
"(",
"particles",
")",
")"
] | https://github.com/glotzerlab/hoomd-blue/blob/f7f97abfa3fcc2522fa8d458d65d0aeca7ba781a/hoomd/mpcd/data.py#L260-L271 |
|
eclipse/sumo | 7132a9b8b6eea734bdec38479026b4d8c4336d03 | tools/contributed/sumopy/coremodules/misc/shapefile.py | python | Reader.shapeRecord | (self, i=0) | return _ShapeRecord(shape=self.shape(i),
record=self.record(i)) | Returns a combination geometry and attribute record for the
supplied record index. | Returns a combination geometry and attribute record for the
supplied record index. | [
"Returns",
"a",
"combination",
"geometry",
"and",
"attribute",
"record",
"for",
"the",
"supplied",
"record",
"index",
"."
] | def shapeRecord(self, i=0):
"""Returns a combination geometry and attribute record for the
supplied record index."""
i = self.__restrictIndex(i)
return _ShapeRecord(shape=self.shape(i),
record=self.record(i)) | [
"def",
"shapeRecord",
"(",
"self",
",",
"i",
"=",
"0",
")",
":",
"i",
"=",
"self",
".",
"__restrictIndex",
"(",
"i",
")",
"return",
"_ShapeRecord",
"(",
"shape",
"=",
"self",
".",
"shape",
"(",
"i",
")",
",",
"record",
"=",
"self",
".",
"record",
"(",
"i",
")",
")"
] | https://github.com/eclipse/sumo/blob/7132a9b8b6eea734bdec38479026b4d8c4336d03/tools/contributed/sumopy/coremodules/misc/shapefile.py#L453-L458 |
|
apple/turicreate | cce55aa5311300e3ce6af93cb45ba791fd1bdf49 | src/python/turicreate/toolkits/regression/linear_regression.py | python | LinearRegression.__repr__ | (self) | return _toolkit_repr_print(self, sections, section_titles, width=30) | Return a string description of the model, including a description of
the training data, training statistics, and model hyper-parameters.
Returns
-------
out : string
A description of the model. | Return a string description of the model, including a description of
the training data, training statistics, and model hyper-parameters. | [
"Return",
"a",
"string",
"description",
"of",
"the",
"model",
"including",
"a",
"description",
"of",
"the",
"training",
"data",
"training",
"statistics",
"and",
"model",
"hyper",
"-",
"parameters",
"."
] | def __repr__(self):
"""
Return a string description of the model, including a description of
the training data, training statistics, and model hyper-parameters.
Returns
-------
out : string
A description of the model.
"""
(sections, section_titles) = self._get_summary_struct()
return _toolkit_repr_print(self, sections, section_titles, width=30) | [
"def",
"__repr__",
"(",
"self",
")",
":",
"(",
"sections",
",",
"section_titles",
")",
"=",
"self",
".",
"_get_summary_struct",
"(",
")",
"return",
"_toolkit_repr_print",
"(",
"self",
",",
"sections",
",",
"section_titles",
",",
"width",
"=",
"30",
")"
] | https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/src/python/turicreate/toolkits/regression/linear_regression.py#L432-L445 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/py3/scipy/signal/ltisys.py | python | TransferFunction.num | (self) | return self._num | Numerator of the `TransferFunction` system. | Numerator of the `TransferFunction` system. | [
"Numerator",
"of",
"the",
"TransferFunction",
"system",
"."
] | def num(self):
"""Numerator of the `TransferFunction` system."""
return self._num | [
"def",
"num",
"(",
"self",
")",
":",
"return",
"self",
".",
"_num"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py3/scipy/signal/ltisys.py#L604-L606 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/site-packages/botocore/credentials.py | python | RefreshableCredentials.get_frozen_credentials | (self) | return self._frozen_credentials | Return immutable credentials.
The ``access_key``, ``secret_key``, and ``token`` properties
on this class will always check and refresh credentials if
needed before returning the particular credentials.
This has an edge case where you can get inconsistent
credentials. Imagine this:
# Current creds are "t1"
tmp.access_key ---> expired? no, so return t1.access_key
# ---- time is now expired, creds need refreshing to "t2" ----
tmp.secret_key ---> expired? yes, refresh and return t2.secret_key
This means we're using the access key from t1 with the secret key
from t2. To fix this issue, you can request a frozen credential object
which is guaranteed not to change.
The frozen credentials returned from this method should be used
immediately and then discarded. The typical usage pattern would
be::
creds = RefreshableCredentials(...)
some_code = SomeSignerObject()
# I'm about to sign the request.
# The frozen credentials are only used for the
# duration of generate_presigned_url and will be
# immediately thrown away.
request = some_code.sign_some_request(
with_credentials=creds.get_frozen_credentials())
print("Signed request:", request) | Return immutable credentials. | [
"Return",
"immutable",
"credentials",
"."
] | def get_frozen_credentials(self):
"""Return immutable credentials.
The ``access_key``, ``secret_key``, and ``token`` properties
on this class will always check and refresh credentials if
needed before returning the particular credentials.
This has an edge case where you can get inconsistent
credentials. Imagine this:
# Current creds are "t1"
tmp.access_key ---> expired? no, so return t1.access_key
# ---- time is now expired, creds need refreshing to "t2" ----
tmp.secret_key ---> expired? yes, refresh and return t2.secret_key
This means we're using the access key from t1 with the secret key
from t2. To fix this issue, you can request a frozen credential object
which is guaranteed not to change.
The frozen credentials returned from this method should be used
immediately and then discarded. The typical usage pattern would
be::
creds = RefreshableCredentials(...)
some_code = SomeSignerObject()
# I'm about to sign the request.
# The frozen credentials are only used for the
# duration of generate_presigned_url and will be
# immediately thrown away.
request = some_code.sign_some_request(
with_credentials=creds.get_frozen_credentials())
print("Signed request:", request)
"""
self._refresh()
return self._frozen_credentials | [
"def",
"get_frozen_credentials",
"(",
"self",
")",
":",
"self",
".",
"_refresh",
"(",
")",
"return",
"self",
".",
"_frozen_credentials"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/botocore/credentials.py#L557-L592 |
|
networkit/networkit | 695b7a786a894a303fa8587597d5ef916e797729 | networkit/profiling/plot.py | python | Theme.getDefaultWidth | (self) | return self.__defaultWidth | returns the default width value of the theme | returns the default width value of the theme | [
"returns",
"the",
"default",
"width",
"value",
"of",
"the",
"theme"
] | def getDefaultWidth(self):
""" returns the default width value of the theme """
return self.__defaultWidth | [
"def",
"getDefaultWidth",
"(",
"self",
")",
":",
"return",
"self",
".",
"__defaultWidth"
] | https://github.com/networkit/networkit/blob/695b7a786a894a303fa8587597d5ef916e797729/networkit/profiling/plot.py#L96-L98 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/site-packages/pip/_vendor/distlib/index.py | python | PackageIndex._reader | (self, name, stream, outbuf) | Thread runner for reading lines of from a subprocess into a buffer.
:param name: The logical name of the stream (used for logging only).
:param stream: The stream to read from. This will typically a pipe
connected to the output stream of a subprocess.
:param outbuf: The list to append the read lines to. | Thread runner for reading lines of from a subprocess into a buffer. | [
"Thread",
"runner",
"for",
"reading",
"lines",
"of",
"from",
"a",
"subprocess",
"into",
"a",
"buffer",
"."
] | def _reader(self, name, stream, outbuf):
"""
Thread runner for reading lines of from a subprocess into a buffer.
:param name: The logical name of the stream (used for logging only).
:param stream: The stream to read from. This will typically a pipe
connected to the output stream of a subprocess.
:param outbuf: The list to append the read lines to.
"""
while True:
s = stream.readline()
if not s:
break
s = s.decode('utf-8').rstrip()
outbuf.append(s)
logger.debug('%s: %s' % (name, s))
stream.close() | [
"def",
"_reader",
"(",
"self",
",",
"name",
",",
"stream",
",",
"outbuf",
")",
":",
"while",
"True",
":",
"s",
"=",
"stream",
".",
"readline",
"(",
")",
"if",
"not",
"s",
":",
"break",
"s",
"=",
"s",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"rstrip",
"(",
")",
"outbuf",
".",
"append",
"(",
"s",
")",
"logger",
".",
"debug",
"(",
"'%s: %s'",
"%",
"(",
"name",
",",
"s",
")",
")",
"stream",
".",
"close",
"(",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/pip/_vendor/distlib/index.py#L134-L150 |
||
moai/moai-dev | 0ba7c678311d1fa9dbc091f60665e95e54169fdf | 3rdparty/libwebp-0.4.1/swig/libwebp.py | python | WebPDecodeBGR | (*args) | return _libwebp.WebPDecodeBGR(*args) | WebPDecodeBGR(uint8_t data) -> (rgb, width, height) | WebPDecodeBGR(uint8_t data) -> (rgb, width, height) | [
"WebPDecodeBGR",
"(",
"uint8_t",
"data",
")",
"-",
">",
"(",
"rgb",
"width",
"height",
")"
] | def WebPDecodeBGR(*args):
"""WebPDecodeBGR(uint8_t data) -> (rgb, width, height)"""
return _libwebp.WebPDecodeBGR(*args) | [
"def",
"WebPDecodeBGR",
"(",
"*",
"args",
")",
":",
"return",
"_libwebp",
".",
"WebPDecodeBGR",
"(",
"*",
"args",
")"
] | https://github.com/moai/moai-dev/blob/0ba7c678311d1fa9dbc091f60665e95e54169fdf/3rdparty/libwebp-0.4.1/swig/libwebp.py#L91-L93 |
|
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/contrib/slim/python/slim/evaluation.py | python | evaluate_once | (master,
checkpoint_path,
logdir,
num_evals=1,
initial_op=None,
initial_op_feed_dict=None,
eval_op=None,
eval_op_feed_dict=None,
final_op=None,
final_op_feed_dict=None,
summary_op=_USE_DEFAULT,
summary_op_feed_dict=None,
variables_to_restore=None,
session_config=None) | return evaluation.evaluate_once(
checkpoint_path,
master=master,
scaffold=monitored_session.Scaffold(
init_op=initial_op, init_feed_dict=initial_op_feed_dict, saver=saver),
eval_ops=eval_op,
feed_dict=eval_op_feed_dict,
final_ops=final_op,
final_ops_feed_dict=final_op_feed_dict,
hooks=hooks,
config=session_config) | Evaluates the model at the given checkpoint path.
Args:
master: The BNS address of the TensorFlow master.
checkpoint_path: The path to a checkpoint to use for evaluation.
logdir: The directory where the TensorFlow summaries are written to.
num_evals: The number of times to run `eval_op`.
initial_op: An operation run at the beginning of evaluation.
initial_op_feed_dict: A feed dictionary to use when executing `initial_op`.
eval_op: A operation run `num_evals` times.
eval_op_feed_dict: The feed dictionary to use when executing the `eval_op`.
final_op: An operation to execute after all of the `eval_op` executions. The
value of `final_op` is returned.
final_op_feed_dict: A feed dictionary to use when executing `final_op`.
summary_op: The summary_op to evaluate after running TF-Slims metric ops. By
default the summary_op is set to tf.summary.merge_all().
summary_op_feed_dict: An optional feed dictionary to use when running the
`summary_op`.
variables_to_restore: A list of TensorFlow variables to restore during
evaluation. If the argument is left as `None` then
slim.variables.GetVariablesToRestore() is used.
session_config: An instance of `tf.ConfigProto` that will be used to
configure the `Session`. If left as `None`, the default will be used.
Returns:
The value of `final_op` or `None` if `final_op` is `None`. | Evaluates the model at the given checkpoint path. | [
"Evaluates",
"the",
"model",
"at",
"the",
"given",
"checkpoint",
"path",
"."
] | def evaluate_once(master,
checkpoint_path,
logdir,
num_evals=1,
initial_op=None,
initial_op_feed_dict=None,
eval_op=None,
eval_op_feed_dict=None,
final_op=None,
final_op_feed_dict=None,
summary_op=_USE_DEFAULT,
summary_op_feed_dict=None,
variables_to_restore=None,
session_config=None):
"""Evaluates the model at the given checkpoint path.
Args:
master: The BNS address of the TensorFlow master.
checkpoint_path: The path to a checkpoint to use for evaluation.
logdir: The directory where the TensorFlow summaries are written to.
num_evals: The number of times to run `eval_op`.
initial_op: An operation run at the beginning of evaluation.
initial_op_feed_dict: A feed dictionary to use when executing `initial_op`.
eval_op: A operation run `num_evals` times.
eval_op_feed_dict: The feed dictionary to use when executing the `eval_op`.
final_op: An operation to execute after all of the `eval_op` executions. The
value of `final_op` is returned.
final_op_feed_dict: A feed dictionary to use when executing `final_op`.
summary_op: The summary_op to evaluate after running TF-Slims metric ops. By
default the summary_op is set to tf.summary.merge_all().
summary_op_feed_dict: An optional feed dictionary to use when running the
`summary_op`.
variables_to_restore: A list of TensorFlow variables to restore during
evaluation. If the argument is left as `None` then
slim.variables.GetVariablesToRestore() is used.
session_config: An instance of `tf.ConfigProto` that will be used to
configure the `Session`. If left as `None`, the default will be used.
Returns:
The value of `final_op` or `None` if `final_op` is `None`.
"""
if summary_op == _USE_DEFAULT:
summary_op = summary.merge_all()
hooks = [evaluation.StopAfterNEvalsHook(num_evals),]
if summary_op is not None:
hooks.append(evaluation.SummaryAtEndHook(
log_dir=logdir, summary_op=summary_op, feed_dict=summary_op_feed_dict))
saver = None
if variables_to_restore is not None:
saver = tf_saver.Saver(variables_to_restore)
return evaluation.evaluate_once(
checkpoint_path,
master=master,
scaffold=monitored_session.Scaffold(
init_op=initial_op, init_feed_dict=initial_op_feed_dict, saver=saver),
eval_ops=eval_op,
feed_dict=eval_op_feed_dict,
final_ops=final_op,
final_ops_feed_dict=final_op_feed_dict,
hooks=hooks,
config=session_config) | [
"def",
"evaluate_once",
"(",
"master",
",",
"checkpoint_path",
",",
"logdir",
",",
"num_evals",
"=",
"1",
",",
"initial_op",
"=",
"None",
",",
"initial_op_feed_dict",
"=",
"None",
",",
"eval_op",
"=",
"None",
",",
"eval_op_feed_dict",
"=",
"None",
",",
"final_op",
"=",
"None",
",",
"final_op_feed_dict",
"=",
"None",
",",
"summary_op",
"=",
"_USE_DEFAULT",
",",
"summary_op_feed_dict",
"=",
"None",
",",
"variables_to_restore",
"=",
"None",
",",
"session_config",
"=",
"None",
")",
":",
"if",
"summary_op",
"==",
"_USE_DEFAULT",
":",
"summary_op",
"=",
"summary",
".",
"merge_all",
"(",
")",
"hooks",
"=",
"[",
"evaluation",
".",
"StopAfterNEvalsHook",
"(",
"num_evals",
")",
",",
"]",
"if",
"summary_op",
"is",
"not",
"None",
":",
"hooks",
".",
"append",
"(",
"evaluation",
".",
"SummaryAtEndHook",
"(",
"log_dir",
"=",
"logdir",
",",
"summary_op",
"=",
"summary_op",
",",
"feed_dict",
"=",
"summary_op_feed_dict",
")",
")",
"saver",
"=",
"None",
"if",
"variables_to_restore",
"is",
"not",
"None",
":",
"saver",
"=",
"tf_saver",
".",
"Saver",
"(",
"variables_to_restore",
")",
"return",
"evaluation",
".",
"evaluate_once",
"(",
"checkpoint_path",
",",
"master",
"=",
"master",
",",
"scaffold",
"=",
"monitored_session",
".",
"Scaffold",
"(",
"init_op",
"=",
"initial_op",
",",
"init_feed_dict",
"=",
"initial_op_feed_dict",
",",
"saver",
"=",
"saver",
")",
",",
"eval_ops",
"=",
"eval_op",
",",
"feed_dict",
"=",
"eval_op_feed_dict",
",",
"final_ops",
"=",
"final_op",
",",
"final_ops_feed_dict",
"=",
"final_op_feed_dict",
",",
"hooks",
"=",
"hooks",
",",
"config",
"=",
"session_config",
")"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/slim/python/slim/evaluation.py#L143-L207 |
|
google/shaka-packager | e1b0c7c45431327fd3ce193514a5407d07b39b22 | packager/third_party/protobuf/python/google/protobuf/internal/decoder.py | python | MessageSetItemDecoder | (descriptor) | return DecodeItem | Returns a decoder for a MessageSet item.
The parameter is the message Descriptor.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
} | Returns a decoder for a MessageSet item. | [
"Returns",
"a",
"decoder",
"for",
"a",
"MessageSet",
"item",
"."
] | def MessageSetItemDecoder(descriptor):
"""Returns a decoder for a MessageSet item.
The parameter is the message Descriptor.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT)
message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)
item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_ReadTag = ReadTag
local_DecodeVarint = _DecodeVarint
local_SkipField = SkipField
def DecodeItem(buffer, pos, end, message, field_dict):
message_set_item_start = pos
type_id = -1
message_start = -1
message_end = -1
# Technically, type_id and message can appear in any order, so we need
# a little loop here.
while 1:
(tag_bytes, pos) = local_ReadTag(buffer, pos)
if tag_bytes == type_id_tag_bytes:
(type_id, pos) = local_DecodeVarint(buffer, pos)
elif tag_bytes == message_tag_bytes:
(size, message_start) = local_DecodeVarint(buffer, pos)
pos = message_end = message_start + size
elif tag_bytes == item_end_tag_bytes:
break
else:
pos = SkipField(buffer, pos, end, tag_bytes)
if pos == -1:
raise _DecodeError('Missing group end tag.')
if pos > end:
raise _DecodeError('Truncated message.')
if type_id == -1:
raise _DecodeError('MessageSet item missing type_id.')
if message_start == -1:
raise _DecodeError('MessageSet item missing message.')
extension = message.Extensions._FindExtensionByNumber(type_id)
if extension is not None:
value = field_dict.get(extension)
if value is None:
value = field_dict.setdefault(
extension, extension.message_type._concrete_class())
if value._InternalParse(buffer, message_start,message_end) != message_end:
# The only reason _InternalParse would return early is if it encountered
# an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
else:
if not message._unknown_fields:
message._unknown_fields = []
message._unknown_fields.append((MESSAGE_SET_ITEM_TAG,
buffer[message_set_item_start:pos]))
return pos
return DecodeItem | [
"def",
"MessageSetItemDecoder",
"(",
"descriptor",
")",
":",
"type_id_tag_bytes",
"=",
"encoder",
".",
"TagBytes",
"(",
"2",
",",
"wire_format",
".",
"WIRETYPE_VARINT",
")",
"message_tag_bytes",
"=",
"encoder",
".",
"TagBytes",
"(",
"3",
",",
"wire_format",
".",
"WIRETYPE_LENGTH_DELIMITED",
")",
"item_end_tag_bytes",
"=",
"encoder",
".",
"TagBytes",
"(",
"1",
",",
"wire_format",
".",
"WIRETYPE_END_GROUP",
")",
"local_ReadTag",
"=",
"ReadTag",
"local_DecodeVarint",
"=",
"_DecodeVarint",
"local_SkipField",
"=",
"SkipField",
"def",
"DecodeItem",
"(",
"buffer",
",",
"pos",
",",
"end",
",",
"message",
",",
"field_dict",
")",
":",
"message_set_item_start",
"=",
"pos",
"type_id",
"=",
"-",
"1",
"message_start",
"=",
"-",
"1",
"message_end",
"=",
"-",
"1",
"# Technically, type_id and message can appear in any order, so we need",
"# a little loop here.",
"while",
"1",
":",
"(",
"tag_bytes",
",",
"pos",
")",
"=",
"local_ReadTag",
"(",
"buffer",
",",
"pos",
")",
"if",
"tag_bytes",
"==",
"type_id_tag_bytes",
":",
"(",
"type_id",
",",
"pos",
")",
"=",
"local_DecodeVarint",
"(",
"buffer",
",",
"pos",
")",
"elif",
"tag_bytes",
"==",
"message_tag_bytes",
":",
"(",
"size",
",",
"message_start",
")",
"=",
"local_DecodeVarint",
"(",
"buffer",
",",
"pos",
")",
"pos",
"=",
"message_end",
"=",
"message_start",
"+",
"size",
"elif",
"tag_bytes",
"==",
"item_end_tag_bytes",
":",
"break",
"else",
":",
"pos",
"=",
"SkipField",
"(",
"buffer",
",",
"pos",
",",
"end",
",",
"tag_bytes",
")",
"if",
"pos",
"==",
"-",
"1",
":",
"raise",
"_DecodeError",
"(",
"'Missing group end tag.'",
")",
"if",
"pos",
">",
"end",
":",
"raise",
"_DecodeError",
"(",
"'Truncated message.'",
")",
"if",
"type_id",
"==",
"-",
"1",
":",
"raise",
"_DecodeError",
"(",
"'MessageSet item missing type_id.'",
")",
"if",
"message_start",
"==",
"-",
"1",
":",
"raise",
"_DecodeError",
"(",
"'MessageSet item missing message.'",
")",
"extension",
"=",
"message",
".",
"Extensions",
".",
"_FindExtensionByNumber",
"(",
"type_id",
")",
"if",
"extension",
"is",
"not",
"None",
":",
"value",
"=",
"field_dict",
".",
"get",
"(",
"extension",
")",
"if",
"value",
"is",
"None",
":",
"value",
"=",
"field_dict",
".",
"setdefault",
"(",
"extension",
",",
"extension",
".",
"message_type",
".",
"_concrete_class",
"(",
")",
")",
"if",
"value",
".",
"_InternalParse",
"(",
"buffer",
",",
"message_start",
",",
"message_end",
")",
"!=",
"message_end",
":",
"# The only reason _InternalParse would return early is if it encountered",
"# an end-group tag.",
"raise",
"_DecodeError",
"(",
"'Unexpected end-group tag.'",
")",
"else",
":",
"if",
"not",
"message",
".",
"_unknown_fields",
":",
"message",
".",
"_unknown_fields",
"=",
"[",
"]",
"message",
".",
"_unknown_fields",
".",
"append",
"(",
"(",
"MESSAGE_SET_ITEM_TAG",
",",
"buffer",
"[",
"message_set_item_start",
":",
"pos",
"]",
")",
")",
"return",
"pos",
"return",
"DecodeItem"
] | https://github.com/google/shaka-packager/blob/e1b0c7c45431327fd3ce193514a5407d07b39b22/packager/third_party/protobuf/python/google/protobuf/internal/decoder.py#L645-L715 |
|
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/contrib/distributions/python/ops/wishart.py | python | _WishartLinearOperator.cholesky_input_output_matrices | (self) | return self._cholesky_input_output_matrices | Boolean indicating if `Tensor` input/outputs are Cholesky factorized. | Boolean indicating if `Tensor` input/outputs are Cholesky factorized. | [
"Boolean",
"indicating",
"if",
"Tensor",
"input",
"/",
"outputs",
"are",
"Cholesky",
"factorized",
"."
] | def cholesky_input_output_matrices(self):
"""Boolean indicating if `Tensor` input/outputs are Cholesky factorized."""
return self._cholesky_input_output_matrices | [
"def",
"cholesky_input_output_matrices",
"(",
"self",
")",
":",
"return",
"self",
".",
"_cholesky_input_output_matrices"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/distributions/python/ops/wishart.py#L190-L192 |
|
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | tools/resources/find_unused_resources.py | python | GetBaseResourceId | (resource_id) | return resource_id | Removes common suffixes from a resource ID.
Removes suffixies that may be added by macros like IMAGE_GRID or IMAGE_BORDER.
For example, converts IDR_FOO_LEFT and IDR_FOO_RIGHT to just IDR_FOO.
Args:
resource_id: String resource ID.
Returns:
A string with the base part of the resource ID. | Removes common suffixes from a resource ID. | [
"Removes",
"common",
"suffixes",
"from",
"a",
"resource",
"ID",
"."
] | def GetBaseResourceId(resource_id):
"""Removes common suffixes from a resource ID.
Removes suffixies that may be added by macros like IMAGE_GRID or IMAGE_BORDER.
For example, converts IDR_FOO_LEFT and IDR_FOO_RIGHT to just IDR_FOO.
Args:
resource_id: String resource ID.
Returns:
A string with the base part of the resource ID.
"""
suffixes = [
'_TOP_LEFT', '_TOP', '_TOP_RIGHT',
'_LEFT', '_CENTER', '_RIGHT',
'_BOTTOM_LEFT', '_BOTTOM', '_BOTTOM_RIGHT',
'_TL', '_T', '_TR',
'_L', '_M', '_R',
'_BL', '_B', '_BR']
# Note: This does not check _HOVER, _PRESSED, _HOT, etc. as those are never
# used in macros.
for suffix in suffixes:
if resource_id.endswith(suffix):
resource_id = resource_id[:-len(suffix)]
return resource_id | [
"def",
"GetBaseResourceId",
"(",
"resource_id",
")",
":",
"suffixes",
"=",
"[",
"'_TOP_LEFT'",
",",
"'_TOP'",
",",
"'_TOP_RIGHT'",
",",
"'_LEFT'",
",",
"'_CENTER'",
",",
"'_RIGHT'",
",",
"'_BOTTOM_LEFT'",
",",
"'_BOTTOM'",
",",
"'_BOTTOM_RIGHT'",
",",
"'_TL'",
",",
"'_T'",
",",
"'_TR'",
",",
"'_L'",
",",
"'_M'",
",",
"'_R'",
",",
"'_BL'",
",",
"'_B'",
",",
"'_BR'",
"]",
"# Note: This does not check _HOVER, _PRESSED, _HOT, etc. as those are never",
"# used in macros.",
"for",
"suffix",
"in",
"suffixes",
":",
"if",
"resource_id",
".",
"endswith",
"(",
"suffix",
")",
":",
"resource_id",
"=",
"resource_id",
"[",
":",
"-",
"len",
"(",
"suffix",
")",
"]",
"return",
"resource_id"
] | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/resources/find_unused_resources.py#L27-L51 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/py3/scipy/cluster/vq.py | python | _missing_raise | () | raise a ClusterError when called. | raise a ClusterError when called. | [
"raise",
"a",
"ClusterError",
"when",
"called",
"."
] | def _missing_raise():
"""raise a ClusterError when called."""
raise ClusterError("One of the clusters is empty. "
"Re-run kmeans with a different initialization.") | [
"def",
"_missing_raise",
"(",
")",
":",
"raise",
"ClusterError",
"(",
"\"One of the clusters is empty. \"",
"\"Re-run kmeans with a different initialization.\"",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py3/scipy/cluster/vq.py#L584-L587 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python3/src/Lib/_collections_abc.py | python | pop | (self, key, default=__marker) | D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised. | D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised. | [
"D",
".",
"pop",
"(",
"k",
"[",
"d",
"]",
")",
"-",
">",
"v",
"remove",
"specified",
"key",
"and",
"return",
"the",
"corresponding",
"value",
".",
"If",
"key",
"is",
"not",
"found",
"d",
"is",
"returned",
"if",
"given",
"otherwise",
"KeyError",
"is",
"raised",
"."
] | def pop(self, key, default=__marker):
'''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value | [
"def",
"pop",
"(",
"self",
",",
"key",
",",
"default",
"=",
"__marker",
")",
":",
"try",
":",
"value",
"=",
"self",
"[",
"key",
"]",
"except",
"KeyError",
":",
"if",
"default",
"is",
"self",
".",
"__marker",
":",
"raise",
"return",
"default",
"else",
":",
"del",
"self",
"[",
"key",
"]",
"return",
"value"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/_collections_abc.py#L899-L911 |
||
apache/incubator-mxnet | f03fb23f1d103fec9541b5ae59ee06b1734a51d9 | python/mxnet/onnx/mx2onnx/_op_translations/_op_translations_opset12.py | python | convert_ceil | (node, **kwargs) | return create_basic_op_node('Ceil', node, kwargs) | Map MXNet's ceil operator attributes to onnx's Ceil operator
and return the created node. | Map MXNet's ceil operator attributes to onnx's Ceil operator
and return the created node. | [
"Map",
"MXNet",
"s",
"ceil",
"operator",
"attributes",
"to",
"onnx",
"s",
"Ceil",
"operator",
"and",
"return",
"the",
"created",
"node",
"."
] | def convert_ceil(node, **kwargs):
"""Map MXNet's ceil operator attributes to onnx's Ceil operator
and return the created node.
"""
return create_basic_op_node('Ceil', node, kwargs) | [
"def",
"convert_ceil",
"(",
"node",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"create_basic_op_node",
"(",
"'Ceil'",
",",
"node",
",",
"kwargs",
")"
] | https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/python/mxnet/onnx/mx2onnx/_op_translations/_op_translations_opset12.py#L1749-L1753 |
|
llvm/llvm-project | ffa6262cb4e2a335d26416fad39a581b4f98c5f4 | clang/tools/scan-build-py/lib/libscanbuild/analyze.py | python | ctu_collect_phase | (opts) | Preprocess source by generating all data needed by CTU analysis. | Preprocess source by generating all data needed by CTU analysis. | [
"Preprocess",
"source",
"by",
"generating",
"all",
"data",
"needed",
"by",
"CTU",
"analysis",
"."
] | def ctu_collect_phase(opts):
""" Preprocess source by generating all data needed by CTU analysis. """
def generate_ast(triple_arch):
""" Generates ASTs for the current compilation command. """
args = opts['direct_args'] + opts['flags']
ast_joined_path = os.path.join(opts['ctu'].dir, triple_arch, 'ast',
os.path.realpath(opts['file'])[1:] +
'.ast')
ast_path = os.path.abspath(ast_joined_path)
ast_dir = os.path.dirname(ast_path)
if not os.path.isdir(ast_dir):
try:
os.makedirs(ast_dir)
except OSError:
# In case an other process already created it.
pass
ast_command = [opts['clang'], '-emit-ast']
ast_command.extend(args)
ast_command.append('-w')
ast_command.append(opts['file'])
ast_command.append('-o')
ast_command.append(ast_path)
logging.debug("Generating AST using '%s'", ast_command)
run_command(ast_command, cwd=opts['directory'])
def map_extdefs(triple_arch):
""" Generate external definition map file for the current source. """
args = opts['direct_args'] + opts['flags']
extdefmap_command = [opts['ctu'].extdef_map_cmd]
extdefmap_command.append(opts['file'])
extdefmap_command.append('--')
extdefmap_command.extend(args)
logging.debug("Generating external definition map using '%s'",
extdefmap_command)
extdef_src_list = run_command(extdefmap_command, cwd=opts['directory'])
extdef_ast_list = extdef_map_list_src_to_ast(extdef_src_list)
extern_defs_map_folder = os.path.join(opts['ctu'].dir, triple_arch,
CTU_TEMP_DEFMAP_FOLDER)
if not os.path.isdir(extern_defs_map_folder):
try:
os.makedirs(extern_defs_map_folder)
except OSError:
# In case an other process already created it.
pass
if extdef_ast_list:
with tempfile.NamedTemporaryFile(mode='w',
dir=extern_defs_map_folder,
delete=False) as out_file:
out_file.write("\n".join(extdef_ast_list) + "\n")
cwd = opts['directory']
cmd = [opts['clang'], '--analyze'] + opts['direct_args'] + opts['flags'] \
+ [opts['file']]
triple_arch = get_triple_arch(cmd, cwd)
generate_ast(triple_arch)
map_extdefs(triple_arch) | [
"def",
"ctu_collect_phase",
"(",
"opts",
")",
":",
"def",
"generate_ast",
"(",
"triple_arch",
")",
":",
"\"\"\" Generates ASTs for the current compilation command. \"\"\"",
"args",
"=",
"opts",
"[",
"'direct_args'",
"]",
"+",
"opts",
"[",
"'flags'",
"]",
"ast_joined_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"opts",
"[",
"'ctu'",
"]",
".",
"dir",
",",
"triple_arch",
",",
"'ast'",
",",
"os",
".",
"path",
".",
"realpath",
"(",
"opts",
"[",
"'file'",
"]",
")",
"[",
"1",
":",
"]",
"+",
"'.ast'",
")",
"ast_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"ast_joined_path",
")",
"ast_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"ast_path",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"ast_dir",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"ast_dir",
")",
"except",
"OSError",
":",
"# In case an other process already created it.",
"pass",
"ast_command",
"=",
"[",
"opts",
"[",
"'clang'",
"]",
",",
"'-emit-ast'",
"]",
"ast_command",
".",
"extend",
"(",
"args",
")",
"ast_command",
".",
"append",
"(",
"'-w'",
")",
"ast_command",
".",
"append",
"(",
"opts",
"[",
"'file'",
"]",
")",
"ast_command",
".",
"append",
"(",
"'-o'",
")",
"ast_command",
".",
"append",
"(",
"ast_path",
")",
"logging",
".",
"debug",
"(",
"\"Generating AST using '%s'\"",
",",
"ast_command",
")",
"run_command",
"(",
"ast_command",
",",
"cwd",
"=",
"opts",
"[",
"'directory'",
"]",
")",
"def",
"map_extdefs",
"(",
"triple_arch",
")",
":",
"\"\"\" Generate external definition map file for the current source. \"\"\"",
"args",
"=",
"opts",
"[",
"'direct_args'",
"]",
"+",
"opts",
"[",
"'flags'",
"]",
"extdefmap_command",
"=",
"[",
"opts",
"[",
"'ctu'",
"]",
".",
"extdef_map_cmd",
"]",
"extdefmap_command",
".",
"append",
"(",
"opts",
"[",
"'file'",
"]",
")",
"extdefmap_command",
".",
"append",
"(",
"'--'",
")",
"extdefmap_command",
".",
"extend",
"(",
"args",
")",
"logging",
".",
"debug",
"(",
"\"Generating external definition map using '%s'\"",
",",
"extdefmap_command",
")",
"extdef_src_list",
"=",
"run_command",
"(",
"extdefmap_command",
",",
"cwd",
"=",
"opts",
"[",
"'directory'",
"]",
")",
"extdef_ast_list",
"=",
"extdef_map_list_src_to_ast",
"(",
"extdef_src_list",
")",
"extern_defs_map_folder",
"=",
"os",
".",
"path",
".",
"join",
"(",
"opts",
"[",
"'ctu'",
"]",
".",
"dir",
",",
"triple_arch",
",",
"CTU_TEMP_DEFMAP_FOLDER",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"extern_defs_map_folder",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"extern_defs_map_folder",
")",
"except",
"OSError",
":",
"# In case an other process already created it.",
"pass",
"if",
"extdef_ast_list",
":",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"mode",
"=",
"'w'",
",",
"dir",
"=",
"extern_defs_map_folder",
",",
"delete",
"=",
"False",
")",
"as",
"out_file",
":",
"out_file",
".",
"write",
"(",
"\"\\n\"",
".",
"join",
"(",
"extdef_ast_list",
")",
"+",
"\"\\n\"",
")",
"cwd",
"=",
"opts",
"[",
"'directory'",
"]",
"cmd",
"=",
"[",
"opts",
"[",
"'clang'",
"]",
",",
"'--analyze'",
"]",
"+",
"opts",
"[",
"'direct_args'",
"]",
"+",
"opts",
"[",
"'flags'",
"]",
"+",
"[",
"opts",
"[",
"'file'",
"]",
"]",
"triple_arch",
"=",
"get_triple_arch",
"(",
"cmd",
",",
"cwd",
")",
"generate_ast",
"(",
"triple_arch",
")",
"map_extdefs",
"(",
"triple_arch",
")"
] | https://github.com/llvm/llvm-project/blob/ffa6262cb4e2a335d26416fad39a581b4f98c5f4/clang/tools/scan-build-py/lib/libscanbuild/analyze.py#L599-L657 |
||
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/contrib/specs/python/specs.py | python | eval_spec | (spec, environment=None) | return bindings | Evaluates a spec and returns the environment.
This function allows you to use a spec to obtain multiple bindings
in an environment. That is useful if you use the spec language to
specify multiple components of a larger network, for example: "left
= Cr(64, [5,5]); right = Fc(64)" Usually, you will want to use
`create_net` or `create_net_fun` below.
Args:
spec: specification as a string
environment: a dictionary of input bindings
Returns:
Environment with additional bindings created by spec.
Raises:
Exception: other exceptions raised during execution of `spec` | Evaluates a spec and returns the environment. | [
"Evaluates",
"a",
"spec",
"and",
"returns",
"the",
"environment",
"."
] | def eval_spec(spec, environment=None):
"""Evaluates a spec and returns the environment.
This function allows you to use a spec to obtain multiple bindings
in an environment. That is useful if you use the spec language to
specify multiple components of a larger network, for example: "left
= Cr(64, [5,5]); right = Fc(64)" Usually, you will want to use
`create_net` or `create_net_fun` below.
Args:
spec: specification as a string
environment: a dictionary of input bindings
Returns:
Environment with additional bindings created by spec.
Raises:
Exception: other exceptions raised during execution of `spec`
"""
specs_lib.check_keywords(spec)
bindings = {}
if environment:
bindings.update(environment)
exec_(spec, vars(specs_ops), bindings) # pylint: disable=exec-used
return bindings | [
"def",
"eval_spec",
"(",
"spec",
",",
"environment",
"=",
"None",
")",
":",
"specs_lib",
".",
"check_keywords",
"(",
"spec",
")",
"bindings",
"=",
"{",
"}",
"if",
"environment",
":",
"bindings",
".",
"update",
"(",
"environment",
")",
"exec_",
"(",
"spec",
",",
"vars",
"(",
"specs_ops",
")",
",",
"bindings",
")",
"# pylint: disable=exec-used",
"return",
"bindings"
] | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/specs/python/specs.py#L51-L76 |
|
FreeCAD/FreeCAD | ba42231b9c6889b89e064d6d563448ed81e376ec | src/Mod/Draft/drafttaskpanels/task_shapestring.py | python | ShapeStringTaskPanel.action | (self, arg) | scene event handler | scene event handler | [
"scene",
"event",
"handler"
] | def action(self, arg):
"""scene event handler"""
if arg["Type"] == "SoKeyboardEvent":
if arg["Key"] == "ESCAPE":
self.reject()
elif arg["Type"] == "SoLocation2Event": # mouse movement detection
self.point,ctrlPoint,info = gui_tool_utils.getPoint(self.sourceCmd, arg, noTracker=True)
if not self.pointPicked:
self.setPoint(self.point)
elif arg["Type"] == "SoMouseButtonEvent":
if (arg["State"] == "DOWN") and (arg["Button"] == "BUTTON1"):
self.setPoint(self.point)
self.pointPicked = True | [
"def",
"action",
"(",
"self",
",",
"arg",
")",
":",
"if",
"arg",
"[",
"\"Type\"",
"]",
"==",
"\"SoKeyboardEvent\"",
":",
"if",
"arg",
"[",
"\"Key\"",
"]",
"==",
"\"ESCAPE\"",
":",
"self",
".",
"reject",
"(",
")",
"elif",
"arg",
"[",
"\"Type\"",
"]",
"==",
"\"SoLocation2Event\"",
":",
"# mouse movement detection",
"self",
".",
"point",
",",
"ctrlPoint",
",",
"info",
"=",
"gui_tool_utils",
".",
"getPoint",
"(",
"self",
".",
"sourceCmd",
",",
"arg",
",",
"noTracker",
"=",
"True",
")",
"if",
"not",
"self",
".",
"pointPicked",
":",
"self",
".",
"setPoint",
"(",
"self",
".",
"point",
")",
"elif",
"arg",
"[",
"\"Type\"",
"]",
"==",
"\"SoMouseButtonEvent\"",
":",
"if",
"(",
"arg",
"[",
"\"State\"",
"]",
"==",
"\"DOWN\"",
")",
"and",
"(",
"arg",
"[",
"\"Button\"",
"]",
"==",
"\"BUTTON1\"",
")",
":",
"self",
".",
"setPoint",
"(",
"self",
".",
"point",
")",
"self",
".",
"pointPicked",
"=",
"True"
] | https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Draft/drafttaskpanels/task_shapestring.py#L100-L112 |
||
pmq20/node-packer | 12c46c6e44fbc14d9ee645ebd17d5296b324f7e0 | lts/tools/gyp/pylib/gyp/generator/ninja.py | python | OpenOutput | (path, mode='w') | return open(path, mode) | Open |path| for writing, creating directories if necessary. | Open |path| for writing, creating directories if necessary. | [
"Open",
"|path|",
"for",
"writing",
"creating",
"directories",
"if",
"necessary",
"."
] | def OpenOutput(path, mode='w'):
"""Open |path| for writing, creating directories if necessary."""
gyp.common.EnsureDirExists(path)
return open(path, mode) | [
"def",
"OpenOutput",
"(",
"path",
",",
"mode",
"=",
"'w'",
")",
":",
"gyp",
".",
"common",
".",
"EnsureDirExists",
"(",
"path",
")",
"return",
"open",
"(",
"path",
",",
"mode",
")"
] | https://github.com/pmq20/node-packer/blob/12c46c6e44fbc14d9ee645ebd17d5296b324f7e0/lts/tools/gyp/pylib/gyp/generator/ninja.py#L1736-L1739 |
|
devsisters/libquic | 8954789a056d8e7d5fcb6452fd1572ca57eb5c4e | src/third_party/protobuf/python/google/protobuf/internal/encoder.py | python | GroupEncoder | (field_number, is_repeated, is_packed) | Returns an encoder for a group field. | Returns an encoder for a group field. | [
"Returns",
"an",
"encoder",
"for",
"a",
"group",
"field",
"."
] | def GroupEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a group field."""
start_tag = TagBytes(field_number, wire_format.WIRETYPE_START_GROUP)
end_tag = TagBytes(field_number, wire_format.WIRETYPE_END_GROUP)
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
write(start_tag)
element._InternalSerialize(write)
write(end_tag)
return EncodeRepeatedField
else:
def EncodeField(write, value):
write(start_tag)
value._InternalSerialize(write)
return write(end_tag)
return EncodeField | [
"def",
"GroupEncoder",
"(",
"field_number",
",",
"is_repeated",
",",
"is_packed",
")",
":",
"start_tag",
"=",
"TagBytes",
"(",
"field_number",
",",
"wire_format",
".",
"WIRETYPE_START_GROUP",
")",
"end_tag",
"=",
"TagBytes",
"(",
"field_number",
",",
"wire_format",
".",
"WIRETYPE_END_GROUP",
")",
"assert",
"not",
"is_packed",
"if",
"is_repeated",
":",
"def",
"EncodeRepeatedField",
"(",
"write",
",",
"value",
")",
":",
"for",
"element",
"in",
"value",
":",
"write",
"(",
"start_tag",
")",
"element",
".",
"_InternalSerialize",
"(",
"write",
")",
"write",
"(",
"end_tag",
")",
"return",
"EncodeRepeatedField",
"else",
":",
"def",
"EncodeField",
"(",
"write",
",",
"value",
")",
":",
"write",
"(",
"start_tag",
")",
"value",
".",
"_InternalSerialize",
"(",
"write",
")",
"return",
"write",
"(",
"end_tag",
")",
"return",
"EncodeField"
] | https://github.com/devsisters/libquic/blob/8954789a056d8e7d5fcb6452fd1572ca57eb5c4e/src/third_party/protobuf/python/google/protobuf/internal/encoder.py#L725-L743 |
||
gem5/gem5 | 141cc37c2d4b93959d4c249b8f7e6a8b2ef75338 | ext/ply/example/ansic/cparse.py | python | p_jump_statement_1 | (t) | jump_statement : GOTO ID SEMI | jump_statement : GOTO ID SEMI | [
"jump_statement",
":",
"GOTO",
"ID",
"SEMI"
] | def p_jump_statement_1(t):
'jump_statement : GOTO ID SEMI'
pass | [
"def",
"p_jump_statement_1",
"(",
"t",
")",
":",
"pass"
] | https://github.com/gem5/gem5/blob/141cc37c2d4b93959d4c249b8f7e6a8b2ef75338/ext/ply/example/ansic/cparse.py#L541-L543 |
Subsets and Splits