nwo
stringlengths 5
86
| sha
stringlengths 40
40
| path
stringlengths 4
189
| language
stringclasses 1
value | identifier
stringlengths 1
94
| parameters
stringlengths 2
4.03k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
11.5k
| docstring
stringlengths 1
33.2k
| docstring_summary
stringlengths 0
5.15k
| docstring_tokens
sequence | function
stringlengths 34
151k
| function_tokens
sequence | url
stringlengths 90
278
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/analysis.py | python | _fix_loop_exit | (cfg, loop) | Fixes loop.exits for Py3.8 bytecode CFG changes.
This is to handle `break` inside loops. | Fixes loop.exits for Py3.8 bytecode CFG changes.
This is to handle `break` inside loops. | [
"Fixes",
"loop",
".",
"exits",
"for",
"Py3",
".",
"8",
"bytecode",
"CFG",
"changes",
".",
"This",
"is",
"to",
"handle",
"break",
"inside",
"loops",
"."
] | def _fix_loop_exit(cfg, loop):
"""
Fixes loop.exits for Py3.8 bytecode CFG changes.
This is to handle `break` inside loops.
"""
# Computes the common postdoms of exit nodes
postdoms = cfg.post_dominators()
exits = reduce(
operator.and_,
[postdoms[b] for b in loop.exits],
loop.exits,
)
if exits:
# Put the non-common-exits as body nodes
body = loop.body | loop.exits - exits
return loop._replace(exits=exits, body=body)
else:
return loop | [
"def",
"_fix_loop_exit",
"(",
"cfg",
",",
"loop",
")",
":",
"# Computes the common postdoms of exit nodes",
"postdoms",
"=",
"cfg",
".",
"post_dominators",
"(",
")",
"exits",
"=",
"reduce",
"(",
"operator",
".",
"and_",
",",
"[",
"postdoms",
"[",
"b",
"]",
"for",
"b",
"in",
"loop",
".",
"exits",
"]",
",",
"loop",
".",
"exits",
",",
")",
"if",
"exits",
":",
"# Put the non-common-exits as body nodes",
"body",
"=",
"loop",
".",
"body",
"|",
"loop",
".",
"exits",
"-",
"exits",
"return",
"loop",
".",
"_replace",
"(",
"exits",
"=",
"exits",
",",
"body",
"=",
"body",
")",
"else",
":",
"return",
"loop"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/analysis.py#L266-L283 |
||
bigartm/bigartm | 47e37f982de87aa67bfd475ff1f39da696b181b3 | utils/cpplint.py | python | CheckCStyleCast | (filename, clean_lines, linenum, cast_type, pattern, error) | return True | Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise. | Checks for a C-style cast by looking for the pattern. | [
"Checks",
"for",
"a",
"C",
"-",
"style",
"cast",
"by",
"looking",
"for",
"the",
"pattern",
"."
] | def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
"""Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
line = clean_lines.elided[linenum]
match = Search(pattern, line)
if not match:
return False
# Exclude lines with keywords that tend to look like casts
context = line[0:match.start(1) - 1]
if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
return False
# Try expanding current context to see if we one level of
# parentheses inside a macro.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 5), -1):
context = clean_lines.elided[i] + context
if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
return False
# operator++(int) and operator--(int)
if context.endswith(' operator++') or context.endswith(' operator--'):
return False
# A single unnamed argument for a function tends to look like old
# style cast. If we see those, don't issue warnings for deprecated
# casts, instead issue warnings for unnamed arguments where
# appropriate.
#
# These are things that we want warnings for, since the style guide
# explicitly require all parameters to be named:
# Function(int);
# Function(int) {
# ConstMember(int) const;
# ConstMember(int) const {
# ExceptionMember(int) throw (...);
# ExceptionMember(int) throw (...) {
# PureVirtual(int) = 0;
# [](int) -> bool {
#
# These are functions of some sort, where the compiler would be fine
# if they had named parameters, but people often omit those
# identifiers to reduce clutter:
# (FunctionPointer)(int);
# (FunctionPointer)(int) = value;
# Function((function_pointer_arg)(int))
# Function((function_pointer_arg)(int), int param)
# <TemplateArgument(int)>;
# <(FunctionPointerTemplateArgument)(int)>;
remainder = line[match.end(0):]
if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)',
remainder):
# Looks like an unnamed parameter.
# Don't warn on any kind of template arguments.
if Match(r'^\s*>', remainder):
return False
# Don't warn on assignments to function pointers, but keep warnings for
# unnamed parameters to pure virtual functions. Note that this pattern
# will also pass on assignments of "0" to function pointers, but the
# preferred values for those would be "nullptr" or "NULL".
matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder)
if matched_zero and matched_zero.group(1) != '0':
return False
# Don't warn on function pointer declarations. For this we need
# to check what came before the "(type)" string.
if Match(r'.*\)\s*$', line[0:match.start(0)]):
return False
# Don't warn if the parameter is named with block comments, e.g.:
# Function(int /*unused_param*/);
raw_line = clean_lines.raw_lines[linenum]
if '/*' in raw_line:
return False
# Passed all filters, issue warning here.
error(filename, linenum, 'readability/function', 3,
'All parameters should be named in a function')
return True
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True | [
"def",
"CheckCStyleCast",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"cast_type",
",",
"pattern",
",",
"error",
")",
":",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"match",
"=",
"Search",
"(",
"pattern",
",",
"line",
")",
"if",
"not",
"match",
":",
"return",
"False",
"# Exclude lines with keywords that tend to look like casts",
"context",
"=",
"line",
"[",
"0",
":",
"match",
".",
"start",
"(",
"1",
")",
"-",
"1",
"]",
"if",
"Match",
"(",
"r'.*\\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\\s*$'",
",",
"context",
")",
":",
"return",
"False",
"# Try expanding current context to see if we one level of",
"# parentheses inside a macro.",
"if",
"linenum",
">",
"0",
":",
"for",
"i",
"in",
"xrange",
"(",
"linenum",
"-",
"1",
",",
"max",
"(",
"0",
",",
"linenum",
"-",
"5",
")",
",",
"-",
"1",
")",
":",
"context",
"=",
"clean_lines",
".",
"elided",
"[",
"i",
"]",
"+",
"context",
"if",
"Match",
"(",
"r'.*\\b[_A-Z][_A-Z0-9]*\\s*\\((?:\\([^()]*\\)|[^()])*$'",
",",
"context",
")",
":",
"return",
"False",
"# operator++(int) and operator--(int)",
"if",
"context",
".",
"endswith",
"(",
"' operator++'",
")",
"or",
"context",
".",
"endswith",
"(",
"' operator--'",
")",
":",
"return",
"False",
"# A single unnamed argument for a function tends to look like old",
"# style cast. If we see those, don't issue warnings for deprecated",
"# casts, instead issue warnings for unnamed arguments where",
"# appropriate.",
"#",
"# These are things that we want warnings for, since the style guide",
"# explicitly require all parameters to be named:",
"# Function(int);",
"# Function(int) {",
"# ConstMember(int) const;",
"# ConstMember(int) const {",
"# ExceptionMember(int) throw (...);",
"# ExceptionMember(int) throw (...) {",
"# PureVirtual(int) = 0;",
"# [](int) -> bool {",
"#",
"# These are functions of some sort, where the compiler would be fine",
"# if they had named parameters, but people often omit those",
"# identifiers to reduce clutter:",
"# (FunctionPointer)(int);",
"# (FunctionPointer)(int) = value;",
"# Function((function_pointer_arg)(int))",
"# Function((function_pointer_arg)(int), int param)",
"# <TemplateArgument(int)>;",
"# <(FunctionPointerTemplateArgument)(int)>;",
"remainder",
"=",
"line",
"[",
"match",
".",
"end",
"(",
"0",
")",
":",
"]",
"if",
"Match",
"(",
"r'^\\s*(?:;|const\\b|throw\\b|final\\b|override\\b|[=>{),]|->)'",
",",
"remainder",
")",
":",
"# Looks like an unnamed parameter.",
"# Don't warn on any kind of template arguments.",
"if",
"Match",
"(",
"r'^\\s*>'",
",",
"remainder",
")",
":",
"return",
"False",
"# Don't warn on assignments to function pointers, but keep warnings for",
"# unnamed parameters to pure virtual functions. Note that this pattern",
"# will also pass on assignments of \"0\" to function pointers, but the",
"# preferred values for those would be \"nullptr\" or \"NULL\".",
"matched_zero",
"=",
"Match",
"(",
"r'^\\s=\\s*(\\S+)\\s*;'",
",",
"remainder",
")",
"if",
"matched_zero",
"and",
"matched_zero",
".",
"group",
"(",
"1",
")",
"!=",
"'0'",
":",
"return",
"False",
"# Don't warn on function pointer declarations. For this we need",
"# to check what came before the \"(type)\" string.",
"if",
"Match",
"(",
"r'.*\\)\\s*$'",
",",
"line",
"[",
"0",
":",
"match",
".",
"start",
"(",
"0",
")",
"]",
")",
":",
"return",
"False",
"# Don't warn if the parameter is named with block comments, e.g.:",
"# Function(int /*unused_param*/);",
"raw_line",
"=",
"clean_lines",
".",
"raw_lines",
"[",
"linenum",
"]",
"if",
"'/*'",
"in",
"raw_line",
":",
"return",
"False",
"# Passed all filters, issue warning here.",
"error",
"(",
"filename",
",",
"linenum",
",",
"'readability/function'",
",",
"3",
",",
"'All parameters should be named in a function'",
")",
"return",
"True",
"# At this point, all that should be left is actual casts.",
"error",
"(",
"filename",
",",
"linenum",
",",
"'readability/casting'",
",",
"4",
",",
"'Using C-style cast. Use %s<%s>(...) instead'",
"%",
"(",
"cast_type",
",",
"match",
".",
"group",
"(",
"1",
")",
")",
")",
"return",
"True"
] | https://github.com/bigartm/bigartm/blob/47e37f982de87aa67bfd475ff1f39da696b181b3/utils/cpplint.py#L5341-L5442 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/html.py | python | HtmlHelpFrame.GetHelpWindow | (*args, **kwargs) | return _html.HtmlHelpFrame_GetHelpWindow(*args, **kwargs) | GetHelpWindow(self) -> HtmlHelpWindow | GetHelpWindow(self) -> HtmlHelpWindow | [
"GetHelpWindow",
"(",
"self",
")",
"-",
">",
"HtmlHelpWindow"
] | def GetHelpWindow(*args, **kwargs):
"""GetHelpWindow(self) -> HtmlHelpWindow"""
return _html.HtmlHelpFrame_GetHelpWindow(*args, **kwargs) | [
"def",
"GetHelpWindow",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_html",
".",
"HtmlHelpFrame_GetHelpWindow",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/html.py#L1774-L1776 |
|
KhronosGroup/SPIRV-LLVM | 1eb85593f3fe2c39379b9a9b088d51eda4f42b8b | utils/llvm-build/llvmbuild/main.py | python | add_magic_target_components | (parser, project, opts) | add_magic_target_components(project, opts) -> None
Add the "magic" target based components to the project, which can only be
determined based on the target configuration options.
This currently is responsible for populating the required_libraries list of
the "all-targets", "Native", "NativeCodeGen", and "Engine" components. | add_magic_target_components(project, opts) -> None | [
"add_magic_target_components",
"(",
"project",
"opts",
")",
"-",
">",
"None"
] | def add_magic_target_components(parser, project, opts):
"""add_magic_target_components(project, opts) -> None
Add the "magic" target based components to the project, which can only be
determined based on the target configuration options.
This currently is responsible for populating the required_libraries list of
the "all-targets", "Native", "NativeCodeGen", and "Engine" components.
"""
# Determine the available targets.
available_targets = dict((ci.name,ci)
for ci in project.component_infos
if ci.type_name == 'TargetGroup')
# Find the configured native target.
# We handle a few special cases of target names here for historical
# reasons, as these are the names configure currently comes up with.
native_target_name = { 'x86' : 'X86',
'x86_64' : 'X86',
'Unknown' : None }.get(opts.native_target,
opts.native_target)
if native_target_name is None:
native_target = None
else:
native_target = available_targets.get(native_target_name)
if native_target is None:
parser.error("invalid native target: %r (not in project)" % (
opts.native_target,))
if native_target.type_name != 'TargetGroup':
parser.error("invalid native target: %r (not a target)" % (
opts.native_target,))
# Find the list of targets to enable.
if opts.enable_targets is None:
enable_targets = available_targets.values()
else:
# We support both space separated and semi-colon separated lists.
if opts.enable_targets == '':
enable_target_names = []
elif ' ' in opts.enable_targets:
enable_target_names = opts.enable_targets.split()
else:
enable_target_names = opts.enable_targets.split(';')
enable_targets = []
for name in enable_target_names:
target = available_targets.get(name)
if target is None:
parser.error("invalid target to enable: %r (not in project)" % (
name,))
if target.type_name != 'TargetGroup':
parser.error("invalid target to enable: %r (not a target)" % (
name,))
enable_targets.append(target)
# Find the special library groups we are going to populate. We enforce that
# these appear in the project (instead of just adding them) so that they at
# least have an explicit representation in the project LLVMBuild files (and
# comments explaining how they are populated).
def find_special_group(name):
info = info_map.get(name)
if info is None:
fatal("expected project to contain special %r component" % (
name,))
if info.type_name != 'LibraryGroup':
fatal("special component %r should be a LibraryGroup" % (
name,))
if info.required_libraries:
fatal("special component %r must have empty %r list" % (
name, 'required_libraries'))
if info.add_to_library_groups:
fatal("special component %r must have empty %r list" % (
name, 'add_to_library_groups'))
info._is_special_group = True
return info
info_map = dict((ci.name, ci) for ci in project.component_infos)
all_targets = find_special_group('all-targets')
native_group = find_special_group('Native')
native_codegen_group = find_special_group('NativeCodeGen')
engine_group = find_special_group('Engine')
# Set the enabled bit in all the target groups, and append to the
# all-targets list.
for ci in enable_targets:
all_targets.required_libraries.append(ci.name)
ci.enabled = True
# If we have a native target, then that defines the native and
# native_codegen libraries.
if native_target and native_target.enabled:
native_group.required_libraries.append(native_target.name)
native_codegen_group.required_libraries.append(
'%sCodeGen' % native_target.name)
# If we have a native target with a JIT, use that for the engine. Otherwise,
# use the interpreter.
if native_target and native_target.enabled and native_target.has_jit:
engine_group.required_libraries.append('MCJIT')
engine_group.required_libraries.append(native_group.name)
else:
engine_group.required_libraries.append('Interpreter') | [
"def",
"add_magic_target_components",
"(",
"parser",
",",
"project",
",",
"opts",
")",
":",
"# Determine the available targets.",
"available_targets",
"=",
"dict",
"(",
"(",
"ci",
".",
"name",
",",
"ci",
")",
"for",
"ci",
"in",
"project",
".",
"component_infos",
"if",
"ci",
".",
"type_name",
"==",
"'TargetGroup'",
")",
"# Find the configured native target.",
"# We handle a few special cases of target names here for historical",
"# reasons, as these are the names configure currently comes up with.",
"native_target_name",
"=",
"{",
"'x86'",
":",
"'X86'",
",",
"'x86_64'",
":",
"'X86'",
",",
"'Unknown'",
":",
"None",
"}",
".",
"get",
"(",
"opts",
".",
"native_target",
",",
"opts",
".",
"native_target",
")",
"if",
"native_target_name",
"is",
"None",
":",
"native_target",
"=",
"None",
"else",
":",
"native_target",
"=",
"available_targets",
".",
"get",
"(",
"native_target_name",
")",
"if",
"native_target",
"is",
"None",
":",
"parser",
".",
"error",
"(",
"\"invalid native target: %r (not in project)\"",
"%",
"(",
"opts",
".",
"native_target",
",",
")",
")",
"if",
"native_target",
".",
"type_name",
"!=",
"'TargetGroup'",
":",
"parser",
".",
"error",
"(",
"\"invalid native target: %r (not a target)\"",
"%",
"(",
"opts",
".",
"native_target",
",",
")",
")",
"# Find the list of targets to enable.",
"if",
"opts",
".",
"enable_targets",
"is",
"None",
":",
"enable_targets",
"=",
"available_targets",
".",
"values",
"(",
")",
"else",
":",
"# We support both space separated and semi-colon separated lists.",
"if",
"opts",
".",
"enable_targets",
"==",
"''",
":",
"enable_target_names",
"=",
"[",
"]",
"elif",
"' '",
"in",
"opts",
".",
"enable_targets",
":",
"enable_target_names",
"=",
"opts",
".",
"enable_targets",
".",
"split",
"(",
")",
"else",
":",
"enable_target_names",
"=",
"opts",
".",
"enable_targets",
".",
"split",
"(",
"';'",
")",
"enable_targets",
"=",
"[",
"]",
"for",
"name",
"in",
"enable_target_names",
":",
"target",
"=",
"available_targets",
".",
"get",
"(",
"name",
")",
"if",
"target",
"is",
"None",
":",
"parser",
".",
"error",
"(",
"\"invalid target to enable: %r (not in project)\"",
"%",
"(",
"name",
",",
")",
")",
"if",
"target",
".",
"type_name",
"!=",
"'TargetGroup'",
":",
"parser",
".",
"error",
"(",
"\"invalid target to enable: %r (not a target)\"",
"%",
"(",
"name",
",",
")",
")",
"enable_targets",
".",
"append",
"(",
"target",
")",
"# Find the special library groups we are going to populate. We enforce that",
"# these appear in the project (instead of just adding them) so that they at",
"# least have an explicit representation in the project LLVMBuild files (and",
"# comments explaining how they are populated).",
"def",
"find_special_group",
"(",
"name",
")",
":",
"info",
"=",
"info_map",
".",
"get",
"(",
"name",
")",
"if",
"info",
"is",
"None",
":",
"fatal",
"(",
"\"expected project to contain special %r component\"",
"%",
"(",
"name",
",",
")",
")",
"if",
"info",
".",
"type_name",
"!=",
"'LibraryGroup'",
":",
"fatal",
"(",
"\"special component %r should be a LibraryGroup\"",
"%",
"(",
"name",
",",
")",
")",
"if",
"info",
".",
"required_libraries",
":",
"fatal",
"(",
"\"special component %r must have empty %r list\"",
"%",
"(",
"name",
",",
"'required_libraries'",
")",
")",
"if",
"info",
".",
"add_to_library_groups",
":",
"fatal",
"(",
"\"special component %r must have empty %r list\"",
"%",
"(",
"name",
",",
"'add_to_library_groups'",
")",
")",
"info",
".",
"_is_special_group",
"=",
"True",
"return",
"info",
"info_map",
"=",
"dict",
"(",
"(",
"ci",
".",
"name",
",",
"ci",
")",
"for",
"ci",
"in",
"project",
".",
"component_infos",
")",
"all_targets",
"=",
"find_special_group",
"(",
"'all-targets'",
")",
"native_group",
"=",
"find_special_group",
"(",
"'Native'",
")",
"native_codegen_group",
"=",
"find_special_group",
"(",
"'NativeCodeGen'",
")",
"engine_group",
"=",
"find_special_group",
"(",
"'Engine'",
")",
"# Set the enabled bit in all the target groups, and append to the",
"# all-targets list.",
"for",
"ci",
"in",
"enable_targets",
":",
"all_targets",
".",
"required_libraries",
".",
"append",
"(",
"ci",
".",
"name",
")",
"ci",
".",
"enabled",
"=",
"True",
"# If we have a native target, then that defines the native and",
"# native_codegen libraries.",
"if",
"native_target",
"and",
"native_target",
".",
"enabled",
":",
"native_group",
".",
"required_libraries",
".",
"append",
"(",
"native_target",
".",
"name",
")",
"native_codegen_group",
".",
"required_libraries",
".",
"append",
"(",
"'%sCodeGen'",
"%",
"native_target",
".",
"name",
")",
"# If we have a native target with a JIT, use that for the engine. Otherwise,",
"# use the interpreter.",
"if",
"native_target",
"and",
"native_target",
".",
"enabled",
"and",
"native_target",
".",
"has_jit",
":",
"engine_group",
".",
"required_libraries",
".",
"append",
"(",
"'MCJIT'",
")",
"engine_group",
".",
"required_libraries",
".",
"append",
"(",
"native_group",
".",
"name",
")",
"else",
":",
"engine_group",
".",
"required_libraries",
".",
"append",
"(",
"'Interpreter'",
")"
] | https://github.com/KhronosGroup/SPIRV-LLVM/blob/1eb85593f3fe2c39379b9a9b088d51eda4f42b8b/utils/llvm-build/llvmbuild/main.py#L702-L808 |
||
SequoiaDB/SequoiaDB | 2894ed7e5bd6fe57330afc900cf76d0ff0df9f64 | tools/server/php_linux/libxml2/lib/python2.4/site-packages/libxml2.py | python | xmlNode.xpointerNewRange | (self, startindex, end, endindex) | return xpathObjectRet(ret) | Create a new xmlXPathObjectPtr of type range | Create a new xmlXPathObjectPtr of type range | [
"Create",
"a",
"new",
"xmlXPathObjectPtr",
"of",
"type",
"range"
] | def xpointerNewRange(self, startindex, end, endindex):
"""Create a new xmlXPathObjectPtr of type range """
if end is None: end__o = None
else: end__o = end._o
ret = libxml2mod.xmlXPtrNewRange(self._o, startindex, end__o, endindex)
if ret is None:raise treeError('xmlXPtrNewRange() failed')
return xpathObjectRet(ret) | [
"def",
"xpointerNewRange",
"(",
"self",
",",
"startindex",
",",
"end",
",",
"endindex",
")",
":",
"if",
"end",
"is",
"None",
":",
"end__o",
"=",
"None",
"else",
":",
"end__o",
"=",
"end",
".",
"_o",
"ret",
"=",
"libxml2mod",
".",
"xmlXPtrNewRange",
"(",
"self",
".",
"_o",
",",
"startindex",
",",
"end__o",
",",
"endindex",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"treeError",
"(",
"'xmlXPtrNewRange() failed'",
")",
"return",
"xpathObjectRet",
"(",
"ret",
")"
] | https://github.com/SequoiaDB/SequoiaDB/blob/2894ed7e5bd6fe57330afc900cf76d0ff0df9f64/tools/server/php_linux/libxml2/lib/python2.4/site-packages/libxml2.py#L3886-L3892 |
|
weolar/miniblink49 | 1c4678db0594a4abde23d3ebbcc7cd13c3170777 | third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/pep8.py | python | register_check | (check, codes=None) | Register a new check object. | Register a new check object. | [
"Register",
"a",
"new",
"check",
"object",
"."
] | def register_check(check, codes=None):
"""Register a new check object."""
def _add_check(check, kind, codes, args):
if check in _checks[kind]:
_checks[kind][check][0].extend(codes or [])
else:
_checks[kind][check] = (codes or [''], args)
if inspect.isfunction(check):
args = inspect.getargspec(check)[0]
if args and args[0] in ('physical_line', 'logical_line'):
if codes is None:
codes = ERRORCODE_REGEX.findall(check.__doc__ or '')
_add_check(check, args[0], codes, args)
elif inspect.isclass(check):
if inspect.getargspec(check.__init__)[0][:2] == ['self', 'tree']:
_add_check(check, 'tree', codes, None) | [
"def",
"register_check",
"(",
"check",
",",
"codes",
"=",
"None",
")",
":",
"def",
"_add_check",
"(",
"check",
",",
"kind",
",",
"codes",
",",
"args",
")",
":",
"if",
"check",
"in",
"_checks",
"[",
"kind",
"]",
":",
"_checks",
"[",
"kind",
"]",
"[",
"check",
"]",
"[",
"0",
"]",
".",
"extend",
"(",
"codes",
"or",
"[",
"]",
")",
"else",
":",
"_checks",
"[",
"kind",
"]",
"[",
"check",
"]",
"=",
"(",
"codes",
"or",
"[",
"''",
"]",
",",
"args",
")",
"if",
"inspect",
".",
"isfunction",
"(",
"check",
")",
":",
"args",
"=",
"inspect",
".",
"getargspec",
"(",
"check",
")",
"[",
"0",
"]",
"if",
"args",
"and",
"args",
"[",
"0",
"]",
"in",
"(",
"'physical_line'",
",",
"'logical_line'",
")",
":",
"if",
"codes",
"is",
"None",
":",
"codes",
"=",
"ERRORCODE_REGEX",
".",
"findall",
"(",
"check",
".",
"__doc__",
"or",
"''",
")",
"_add_check",
"(",
"check",
",",
"args",
"[",
"0",
"]",
",",
"codes",
",",
"args",
")",
"elif",
"inspect",
".",
"isclass",
"(",
"check",
")",
":",
"if",
"inspect",
".",
"getargspec",
"(",
"check",
".",
"__init__",
")",
"[",
"0",
"]",
"[",
":",
"2",
"]",
"==",
"[",
"'self'",
",",
"'tree'",
"]",
":",
"_add_check",
"(",
"check",
",",
"'tree'",
",",
"codes",
",",
"None",
")"
] | https://github.com/weolar/miniblink49/blob/1c4678db0594a4abde23d3ebbcc7cd13c3170777/third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/pep8.py#L1178-L1193 |
||
generalized-intelligence/GAAS | 29ab17d3e8a4ba18edef3a57c36d8db6329fac73 | deprecated/algorithms/sfm/OpenSfM/opensfm/large/metadataset.py | python | MetaDataSet._submodel_images_path | (self, i) | return os.path.join(self.data_path, template % i) | Path to submodel i images folder. | Path to submodel i images folder. | [
"Path",
"to",
"submodel",
"i",
"images",
"folder",
"."
] | def _submodel_images_path(self, i):
"""Path to submodel i images folder."""
template = self.config['submodel_images_relpath_template']
return os.path.join(self.data_path, template % i) | [
"def",
"_submodel_images_path",
"(",
"self",
",",
"i",
")",
":",
"template",
"=",
"self",
".",
"config",
"[",
"'submodel_images_relpath_template'",
"]",
"return",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"data_path",
",",
"template",
"%",
"i",
")"
] | https://github.com/generalized-intelligence/GAAS/blob/29ab17d3e8a4ba18edef3a57c36d8db6329fac73/deprecated/algorithms/sfm/OpenSfM/opensfm/large/metadataset.py#L39-L42 |
|
apache/incubator-mxnet | f03fb23f1d103fec9541b5ae59ee06b1734a51d9 | benchmark/opperf/nd_operations/binary_operators.py | python | run_mx_binary_broadcast_operators_benchmarks | (ctx=mx.cpu(), dtype='float32', profiler='native', int64_tensor='off', warmup=25, runs=100) | return mx_binary_op_results | Runs benchmarks with the given context, precision (dtype), and input data size (int64_tensor) for all the binary
broadcast operators in MXNet.
Parameters
----------
ctx: mx.ctx
Context to run benchmarks
dtype: str, default 'float32'
Precision to use for benchmarks
profiler: str, default 'native'
Type of Profiler to use (native/python)
int64_tensor: str, default 'off'
Input tensor size to use for tests (if on, dimensions >= 2**32)
warmup: int, default 25
Number of times to run for warmup
runs: int, default 100
Number of runs to capture benchmark results
Returns
-------
Dictionary of results. Key -> Name of the operator, Value -> Benchmark results. | Runs benchmarks with the given context, precision (dtype), and input data size (int64_tensor) for all the binary
broadcast operators in MXNet. | [
"Runs",
"benchmarks",
"with",
"the",
"given",
"context",
"precision",
"(",
"dtype",
")",
"and",
"input",
"data",
"size",
"(",
"int64_tensor",
")",
"for",
"all",
"the",
"binary",
"broadcast",
"operators",
"in",
"MXNet",
"."
] | def run_mx_binary_broadcast_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', int64_tensor='off', warmup=25, runs=100):
"""Runs benchmarks with the given context, precision (dtype), and input data size (int64_tensor) for all the binary
broadcast operators in MXNet.
Parameters
----------
ctx: mx.ctx
Context to run benchmarks
dtype: str, default 'float32'
Precision to use for benchmarks
profiler: str, default 'native'
Type of Profiler to use (native/python)
int64_tensor: str, default 'off'
Input tensor size to use for tests (if on, dimensions >= 2**32)
warmup: int, default 25
Number of times to run for warmup
runs: int, default 100
Number of runs to capture benchmark results
Returns
-------
Dictionary of results. Key -> Name of the operator, Value -> Benchmark results.
"""
# Fetch all Binary Broadcast Operators
mx_binary_broadcast_ops = get_all_broadcast_binary_operators()
# Run benchmarks
mx_binary_op_results = run_op_benchmarks(mx_binary_broadcast_ops, dtype, ctx, profiler, int64_tensor, warmup, runs)
return mx_binary_op_results | [
"def",
"run_mx_binary_broadcast_operators_benchmarks",
"(",
"ctx",
"=",
"mx",
".",
"cpu",
"(",
")",
",",
"dtype",
"=",
"'float32'",
",",
"profiler",
"=",
"'native'",
",",
"int64_tensor",
"=",
"'off'",
",",
"warmup",
"=",
"25",
",",
"runs",
"=",
"100",
")",
":",
"# Fetch all Binary Broadcast Operators",
"mx_binary_broadcast_ops",
"=",
"get_all_broadcast_binary_operators",
"(",
")",
"# Run benchmarks",
"mx_binary_op_results",
"=",
"run_op_benchmarks",
"(",
"mx_binary_broadcast_ops",
",",
"dtype",
",",
"ctx",
",",
"profiler",
",",
"int64_tensor",
",",
"warmup",
",",
"runs",
")",
"return",
"mx_binary_op_results"
] | https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/benchmark/opperf/nd_operations/binary_operators.py#L72-L100 |
|
CGRU/cgru | 1881a4128530e3d31ac6c25314c18314fc50c2c7 | plugins/houdini/pdg/types/afanasyscheduler.py | python | AfanasyScheduler.onStop | (self) | return True | [virtual] Called by PDG when scheduler is cleaned up. | [virtual] Called by PDG when scheduler is cleaned up. | [
"[",
"virtual",
"]",
"Called",
"by",
"PDG",
"when",
"scheduler",
"is",
"cleaned",
"up",
"."
] | def onStop(self):
"""
[virtual] Called by PDG when scheduler is cleaned up.
"""
self._log('onStop')
self.stopCallbackServer()
self._deleteJob()
return True | [
"def",
"onStop",
"(",
"self",
")",
":",
"self",
".",
"_log",
"(",
"'onStop'",
")",
"self",
".",
"stopCallbackServer",
"(",
")",
"self",
".",
"_deleteJob",
"(",
")",
"return",
"True"
] | https://github.com/CGRU/cgru/blob/1881a4128530e3d31ac6c25314c18314fc50c2c7/plugins/houdini/pdg/types/afanasyscheduler.py#L263-L270 |
|
apple/turicreate | cce55aa5311300e3ce6af93cb45ba791fd1bdf49 | src/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py | python | _get_embeddable_interpretation_doc | (indent=0) | return "\n".join(" " * indent + line for line in output_rows) | Returns a list of the available interpretations and what they do.
If indent is specified, then the entire doc string is indented by that amount. | Returns a list of the available interpretations and what they do. | [
"Returns",
"a",
"list",
"of",
"the",
"available",
"interpretations",
"and",
"what",
"they",
"do",
"."
] | def _get_embeddable_interpretation_doc(indent=0):
"""
Returns a list of the available interpretations and what they do.
If indent is specified, then the entire doc string is indented by that amount.
"""
output_rows = []
# Pull out the doc string and put it in a table.
for name in sorted(dir(_interpretations)):
if name.startswith("_") or "__" not in name:
continue
interpretation, type_str = name.split("__")
func = getattr(_interpretations, name)
output_rows.append("%s (%s type):" % (interpretation, type_str))
output_rows += [
(" " + line) for line in _textwrap.dedent(func.__doc__).strip().split("\n")
]
output_rows.append("")
return "\n".join(" " * indent + line for line in output_rows) | [
"def",
"_get_embeddable_interpretation_doc",
"(",
"indent",
"=",
"0",
")",
":",
"output_rows",
"=",
"[",
"]",
"# Pull out the doc string and put it in a table.",
"for",
"name",
"in",
"sorted",
"(",
"dir",
"(",
"_interpretations",
")",
")",
":",
"if",
"name",
".",
"startswith",
"(",
"\"_\"",
")",
"or",
"\"__\"",
"not",
"in",
"name",
":",
"continue",
"interpretation",
",",
"type_str",
"=",
"name",
".",
"split",
"(",
"\"__\"",
")",
"func",
"=",
"getattr",
"(",
"_interpretations",
",",
"name",
")",
"output_rows",
".",
"append",
"(",
"\"%s (%s type):\"",
"%",
"(",
"interpretation",
",",
"type_str",
")",
")",
"output_rows",
"+=",
"[",
"(",
"\" \"",
"+",
"line",
")",
"for",
"line",
"in",
"_textwrap",
".",
"dedent",
"(",
"func",
".",
"__doc__",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"]",
"output_rows",
".",
"append",
"(",
"\"\"",
")",
"return",
"\"\\n\"",
".",
"join",
"(",
"\" \"",
"*",
"indent",
"+",
"line",
"for",
"line",
"in",
"output_rows",
")"
] | https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/src/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L455-L480 |
|
microsoft/CNTK | e9396480025b9ca457d26b6f33dd07c474c6aa04 | bindings/python/cntk/random/__init__.py | python | bernoulli | (shape, dtype=default_override_or(np.float32), mean=0.5, seed=auto_select, name='') | return bernoulli_random(shape, dtype, mean, seed, name) | bernoulli(shape, dtype=default_override_or(np.float32), mean=0.5, seed=auto_select, name='')
Generates samples from the Bernoulli distribution with success probability `mean`.
Args:
shape (tuple): shape of the output (entries are independent random draws)
dtype (np.float32 or np.float64 or np.float16): data type. Default is np.float32.
mean (float): success probability
seed (int): pseudo random number generator seed (default: automatically select a unique seed)
name (str, optional): the name of the Function instance in the network
Returns:
:class:`~cntk.ops.functions.Function`
Examples:
>>> b = C.random.bernoulli((2,3), seed=98052)
>>> b.eval(device=C.cpu()) # explicitly setting cpu because this is tested on multiple platforms; leave it unspecified in your code
array([[ 1., 1., 0.],
[ 1., 0., 0.]], dtype=float32) | bernoulli(shape, dtype=default_override_or(np.float32), mean=0.5, seed=auto_select, name='')
Generates samples from the Bernoulli distribution with success probability `mean`. | [
"bernoulli",
"(",
"shape",
"dtype",
"=",
"default_override_or",
"(",
"np",
".",
"float32",
")",
"mean",
"=",
"0",
".",
"5",
"seed",
"=",
"auto_select",
"name",
"=",
")",
"Generates",
"samples",
"from",
"the",
"Bernoulli",
"distribution",
"with",
"success",
"probability",
"mean",
"."
] | def bernoulli(shape, dtype=default_override_or(np.float32), mean=0.5, seed=auto_select, name=''):
"""bernoulli(shape, dtype=default_override_or(np.float32), mean=0.5, seed=auto_select, name='')
Generates samples from the Bernoulli distribution with success probability `mean`.
Args:
shape (tuple): shape of the output (entries are independent random draws)
dtype (np.float32 or np.float64 or np.float16): data type. Default is np.float32.
mean (float): success probability
seed (int): pseudo random number generator seed (default: automatically select a unique seed)
name (str, optional): the name of the Function instance in the network
Returns:
:class:`~cntk.ops.functions.Function`
Examples:
>>> b = C.random.bernoulli((2,3), seed=98052)
>>> b.eval(device=C.cpu()) # explicitly setting cpu because this is tested on multiple platforms; leave it unspecified in your code
array([[ 1., 1., 0.],
[ 1., 0., 0.]], dtype=float32)
"""
from cntk.cntk_py import bernoulli_random
shape, dtype = sanitize_random_args(shape, dtype)
return bernoulli_random(shape, dtype, mean, seed, name) | [
"def",
"bernoulli",
"(",
"shape",
",",
"dtype",
"=",
"default_override_or",
"(",
"np",
".",
"float32",
")",
",",
"mean",
"=",
"0.5",
",",
"seed",
"=",
"auto_select",
",",
"name",
"=",
"''",
")",
":",
"from",
"cntk",
".",
"cntk_py",
"import",
"bernoulli_random",
"shape",
",",
"dtype",
"=",
"sanitize_random_args",
"(",
"shape",
",",
"dtype",
")",
"return",
"bernoulli_random",
"(",
"shape",
",",
"dtype",
",",
"mean",
",",
"seed",
",",
"name",
")"
] | https://github.com/microsoft/CNTK/blob/e9396480025b9ca457d26b6f33dd07c474c6aa04/bindings/python/cntk/random/__init__.py#L106-L128 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/grid.py | python | GridUpdateLocker.__init__ | (self, *args, **kwargs) | __init__(self, Grid grid=None) -> GridUpdateLocker | __init__(self, Grid grid=None) -> GridUpdateLocker | [
"__init__",
"(",
"self",
"Grid",
"grid",
"=",
"None",
")",
"-",
">",
"GridUpdateLocker"
] | def __init__(self, *args, **kwargs):
"""__init__(self, Grid grid=None) -> GridUpdateLocker"""
_grid.GridUpdateLocker_swiginit(self,_grid.new_GridUpdateLocker(*args, **kwargs)) | [
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"_grid",
".",
"GridUpdateLocker_swiginit",
"(",
"self",
",",
"_grid",
".",
"new_GridUpdateLocker",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/grid.py#L2282-L2284 |
||
krishauser/Klampt | 972cc83ea5befac3f653c1ba20f80155768ad519 | Python/control-examples/klampt_catkin/src/klampt/scripts/controller.py | python | MultiController.map_input | (self,c,regitem,citem=None) | return | Sends register regitem to the input of controller c.
If citem is specified, the data is is mapped to name citem.
If this is not called for a given controller, then all items
in the register are automatically sent to the controller. | Sends register regitem to the input of controller c.
If citem is specified, the data is is mapped to name citem.
If this is not called for a given controller, then all items
in the register are automatically sent to the controller. | [
"Sends",
"register",
"regitem",
"to",
"the",
"input",
"of",
"controller",
"c",
".",
"If",
"citem",
"is",
"specified",
"the",
"data",
"is",
"is",
"mapped",
"to",
"name",
"citem",
".",
"If",
"this",
"is",
"not",
"called",
"for",
"a",
"given",
"controller",
"then",
"all",
"items",
"in",
"the",
"register",
"are",
"automatically",
"sent",
"to",
"the",
"controller",
"."
] | def map_input(self,c,regitem,citem=None):
"""Sends register regitem to the input of controller c.
If citem is specified, the data is is mapped to name citem.
If this is not called for a given controller, then all items
in the register are automatically sent to the controller."""
if self.inmap[c]==None:
self.inmap[c] = {}
if citem == None:
self.inmap[c][regitem]=regitem
else:
self.inmap[c][citem]=regitem
return | [
"def",
"map_input",
"(",
"self",
",",
"c",
",",
"regitem",
",",
"citem",
"=",
"None",
")",
":",
"if",
"self",
".",
"inmap",
"[",
"c",
"]",
"==",
"None",
":",
"self",
".",
"inmap",
"[",
"c",
"]",
"=",
"{",
"}",
"if",
"citem",
"==",
"None",
":",
"self",
".",
"inmap",
"[",
"c",
"]",
"[",
"regitem",
"]",
"=",
"regitem",
"else",
":",
"self",
".",
"inmap",
"[",
"c",
"]",
"[",
"citem",
"]",
"=",
"regitem",
"return"
] | https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/control-examples/klampt_catkin/src/klampt/scripts/controller.py#L91-L102 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/AWSPythonSDK/1.5.8/docutils/utils/math/math2html.py | python | LoneCommand.parse | (self,reader) | return [] | Read nothing | Read nothing | [
"Read",
"nothing"
] | def parse(self,reader):
"Read nothing"
return [] | [
"def",
"parse",
"(",
"self",
",",
"reader",
")",
":",
"return",
"[",
"]"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/AWSPythonSDK/1.5.8/docutils/utils/math/math2html.py#L1461-L1463 |
|
krishauser/Klampt | 972cc83ea5befac3f653c1ba20f80155768ad519 | Python/python2_version/klampt/model/trajectory.py | python | GeodesicHermiteTrajectory.length | (self,metric=None) | return l | Upper bound on the length | Upper bound on the length | [
"Upper",
"bound",
"on",
"the",
"length"
] | def length(self,metric=None):
"""Upper bound on the length"""
if metric is None:
metric = self.geodesic.distance
n = self.geodesic.extrinsicDimension()
l = 0
for i,(a,b) in enumerate(zip(self.milestones[:-1],self.milestones[1:])):
dt = self.times[i+1]-self.times[i]
c0 = a[:n]
v0 = vectorops.mul(a[n:],dt)
c3 = b[:n]
v3 = vectorops.mul(b[n:],dt)
third = 1.0/3.0
c1 = self.geodesic.integrate(c0,v0,third)
c2 = self.geodesic.integrate(c3,v3,-third)
l += metric(c0,c1)
l += metric(c1,c2)
l += metric(c2,c3)
return l | [
"def",
"length",
"(",
"self",
",",
"metric",
"=",
"None",
")",
":",
"if",
"metric",
"is",
"None",
":",
"metric",
"=",
"self",
".",
"geodesic",
".",
"distance",
"n",
"=",
"self",
".",
"geodesic",
".",
"extrinsicDimension",
"(",
")",
"l",
"=",
"0",
"for",
"i",
",",
"(",
"a",
",",
"b",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"self",
".",
"milestones",
"[",
":",
"-",
"1",
"]",
",",
"self",
".",
"milestones",
"[",
"1",
":",
"]",
")",
")",
":",
"dt",
"=",
"self",
".",
"times",
"[",
"i",
"+",
"1",
"]",
"-",
"self",
".",
"times",
"[",
"i",
"]",
"c0",
"=",
"a",
"[",
":",
"n",
"]",
"v0",
"=",
"vectorops",
".",
"mul",
"(",
"a",
"[",
"n",
":",
"]",
",",
"dt",
")",
"c3",
"=",
"b",
"[",
":",
"n",
"]",
"v3",
"=",
"vectorops",
".",
"mul",
"(",
"b",
"[",
"n",
":",
"]",
",",
"dt",
")",
"third",
"=",
"1.0",
"/",
"3.0",
"c1",
"=",
"self",
".",
"geodesic",
".",
"integrate",
"(",
"c0",
",",
"v0",
",",
"third",
")",
"c2",
"=",
"self",
".",
"geodesic",
".",
"integrate",
"(",
"c3",
",",
"v3",
",",
"-",
"third",
")",
"l",
"+=",
"metric",
"(",
"c0",
",",
"c1",
")",
"l",
"+=",
"metric",
"(",
"c1",
",",
"c2",
")",
"l",
"+=",
"metric",
"(",
"c2",
",",
"c3",
")",
"return",
"l"
] | https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/python2_version/klampt/model/trajectory.py#L1246-L1264 |
|
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/contrib/specs/python/summaries.py | python | tf_spec_summary | (spec,
inputs=None,
input_shape=None,
input_type=dtypes.float32) | Output a summary of the specification.
This prints a list of left-most tensor operations and summarized the
variables found in the right branches. This kind of representation
is particularly useful for networks that are generally structured
like pipelines.
Args:
spec: specification
inputs: input to the spec construction (usually a Tensor)
input_shape: optional shape of input
input_type: type of the input tensor | Output a summary of the specification. | [
"Output",
"a",
"summary",
"of",
"the",
"specification",
"."
] | def tf_spec_summary(spec,
inputs=None,
input_shape=None,
input_type=dtypes.float32):
"""Output a summary of the specification.
This prints a list of left-most tensor operations and summarized the
variables found in the right branches. This kind of representation
is particularly useful for networks that are generally structured
like pipelines.
Args:
spec: specification
inputs: input to the spec construction (usually a Tensor)
input_shape: optional shape of input
input_type: type of the input tensor
"""
if inputs is None:
inputs = array_ops.placeholder(input_type, input_shape)
outputs = specs.create_net(spec, inputs)
tf_parameter_summary(outputs) | [
"def",
"tf_spec_summary",
"(",
"spec",
",",
"inputs",
"=",
"None",
",",
"input_shape",
"=",
"None",
",",
"input_type",
"=",
"dtypes",
".",
"float32",
")",
":",
"if",
"inputs",
"is",
"None",
":",
"inputs",
"=",
"array_ops",
".",
"placeholder",
"(",
"input_type",
",",
"input_shape",
")",
"outputs",
"=",
"specs",
".",
"create_net",
"(",
"spec",
",",
"inputs",
")",
"tf_parameter_summary",
"(",
"outputs",
")"
] | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/specs/python/summaries.py#L273-L294 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/pyparsing.py | python | withAttribute | (*args, **attrDict) | return pa | Helper to create a validating parse action to be used with start
tags created with :class:`makeXMLTags` or
:class:`makeHTMLTags`. Use ``withAttribute`` to qualify
a starting tag with a required attribute value, to avoid false
matches on common tags such as ``<TD>`` or ``<DIV>``.
Call ``withAttribute`` with a series of attribute names and
values. Specify the list of filter attributes names and values as:
- keyword arguments, as in ``(align="right")``, or
- as an explicit dict with ``**`` operator, when an attribute
name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}``
- a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))``
For attribute names with a namespace prefix, you must use the second
form. Attribute names are matched insensitive to upper/lower case.
If just testing for ``class`` (with or without a namespace), use
:class:`withClass`.
To verify that the attribute exists, but without specifying a value,
pass ``withAttribute.ANY_VALUE`` as the value.
Example::
html = '''
<div>
Some text
<div type="grid">1 4 0 1 0</div>
<div type="graph">1,3 2,3 1,1</div>
<div>this has no type</div>
</div>
'''
div,div_end = makeHTMLTags("div")
# only match div tag having a type attribute with value "grid"
div_grid = div().setParseAction(withAttribute(type="grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
# construct a match with any div tag having a type attribute, regardless of the value
div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1 | Helper to create a validating parse action to be used with start
tags created with :class:`makeXMLTags` or
:class:`makeHTMLTags`. Use ``withAttribute`` to qualify
a starting tag with a required attribute value, to avoid false
matches on common tags such as ``<TD>`` or ``<DIV>``. | [
"Helper",
"to",
"create",
"a",
"validating",
"parse",
"action",
"to",
"be",
"used",
"with",
"start",
"tags",
"created",
"with",
":",
"class",
":",
"makeXMLTags",
"or",
":",
"class",
":",
"makeHTMLTags",
".",
"Use",
"withAttribute",
"to",
"qualify",
"a",
"starting",
"tag",
"with",
"a",
"required",
"attribute",
"value",
"to",
"avoid",
"false",
"matches",
"on",
"common",
"tags",
"such",
"as",
"<TD",
">",
"or",
"<DIV",
">",
"."
] | def withAttribute(*args, **attrDict):
"""Helper to create a validating parse action to be used with start
tags created with :class:`makeXMLTags` or
:class:`makeHTMLTags`. Use ``withAttribute`` to qualify
a starting tag with a required attribute value, to avoid false
matches on common tags such as ``<TD>`` or ``<DIV>``.
Call ``withAttribute`` with a series of attribute names and
values. Specify the list of filter attributes names and values as:
- keyword arguments, as in ``(align="right")``, or
- as an explicit dict with ``**`` operator, when an attribute
name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}``
- a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))``
For attribute names with a namespace prefix, you must use the second
form. Attribute names are matched insensitive to upper/lower case.
If just testing for ``class`` (with or without a namespace), use
:class:`withClass`.
To verify that the attribute exists, but without specifying a value,
pass ``withAttribute.ANY_VALUE`` as the value.
Example::
html = '''
<div>
Some text
<div type="grid">1 4 0 1 0</div>
<div type="graph">1,3 2,3 1,1</div>
<div>this has no type</div>
</div>
'''
div,div_end = makeHTMLTags("div")
# only match div tag having a type attribute with value "grid"
div_grid = div().setParseAction(withAttribute(type="grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
# construct a match with any div tag having a type attribute, regardless of the value
div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k, v) for k, v in attrs]
def pa(s, l, tokens):
for attrName, attrValue in attrs:
if attrName not in tokens:
raise ParseException(s, l, "no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s, l, "attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa | [
"def",
"withAttribute",
"(",
"*",
"args",
",",
"*",
"*",
"attrDict",
")",
":",
"if",
"args",
":",
"attrs",
"=",
"args",
"[",
":",
"]",
"else",
":",
"attrs",
"=",
"attrDict",
".",
"items",
"(",
")",
"attrs",
"=",
"[",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"attrs",
"]",
"def",
"pa",
"(",
"s",
",",
"l",
",",
"tokens",
")",
":",
"for",
"attrName",
",",
"attrValue",
"in",
"attrs",
":",
"if",
"attrName",
"not",
"in",
"tokens",
":",
"raise",
"ParseException",
"(",
"s",
",",
"l",
",",
"\"no matching attribute \"",
"+",
"attrName",
")",
"if",
"attrValue",
"!=",
"withAttribute",
".",
"ANY_VALUE",
"and",
"tokens",
"[",
"attrName",
"]",
"!=",
"attrValue",
":",
"raise",
"ParseException",
"(",
"s",
",",
"l",
",",
"\"attribute '%s' has value '%s', must be '%s'\"",
"%",
"(",
"attrName",
",",
"tokens",
"[",
"attrName",
"]",
",",
"attrValue",
")",
")",
"return",
"pa"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/pyparsing.py#L5875-L5943 |
|
dicecco1/fpga_caffe | 7a191704efd7873071cfef35772d7e7bf3e3cfd6 | scripts/cpp_lint.py | python | _NestingState.SeenOpenBrace | (self) | return (not self.stack) or self.stack[-1].seen_open_brace | Check if we have seen the opening brace for the innermost block.
Returns:
True if we have seen the opening brace, False if the innermost
block is still expecting an opening brace. | Check if we have seen the opening brace for the innermost block. | [
"Check",
"if",
"we",
"have",
"seen",
"the",
"opening",
"brace",
"for",
"the",
"innermost",
"block",
"."
] | def SeenOpenBrace(self):
"""Check if we have seen the opening brace for the innermost block.
Returns:
True if we have seen the opening brace, False if the innermost
block is still expecting an opening brace.
"""
return (not self.stack) or self.stack[-1].seen_open_brace | [
"def",
"SeenOpenBrace",
"(",
"self",
")",
":",
"return",
"(",
"not",
"self",
".",
"stack",
")",
"or",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
".",
"seen_open_brace"
] | https://github.com/dicecco1/fpga_caffe/blob/7a191704efd7873071cfef35772d7e7bf3e3cfd6/scripts/cpp_lint.py#L1935-L1942 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/agw/ribbon/art_msw.py | python | RibbonMSWArtProvider.DrawTabSeparator | (self, dc, wnd, rect, visibility) | Draw a separator between two tabs in a ribbon bar.
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto;
:param `rect`: The rectangle within which to draw, which will be entirely
within a rectangle on the same device context previously painted with
:meth:`~RibbonMSWArtProvider.DrawTabCtrlBackground`;
:param `visibility`: The opacity with which to draw the separator. Values
are in the range [0, 1], with 0 being totally transparent, and 1 being totally
opaque. | Draw a separator between two tabs in a ribbon bar. | [
"Draw",
"a",
"separator",
"between",
"two",
"tabs",
"in",
"a",
"ribbon",
"bar",
"."
] | def DrawTabSeparator(self, dc, wnd, rect, visibility):
"""
Draw a separator between two tabs in a ribbon bar.
:param `dc`: The device context to draw onto;
:param `wnd`: The window which is being drawn onto;
:param `rect`: The rectangle within which to draw, which will be entirely
within a rectangle on the same device context previously painted with
:meth:`~RibbonMSWArtProvider.DrawTabCtrlBackground`;
:param `visibility`: The opacity with which to draw the separator. Values
are in the range [0, 1], with 0 being totally transparent, and 1 being totally
opaque.
"""
if visibility <= 0.0:
return
if visibility > 1.0:
visibility = 1.0
# The tab separator is relatively expensive to draw (for its size), and is
# usually drawn multiple times sequentially (in different positions), so it
# makes sense to draw it once and cache it.
if not self._cached_tab_separator.IsOk() or self._cached_tab_separator.GetSize() != rect.GetSize() or \
visibility != self._cached_tab_separator_visibility:
size = wx.Rect(0, 0, *rect.GetSize())
self.ReallyDrawTabSeparator(wnd, size, visibility)
dc.DrawBitmap(self._cached_tab_separator, rect.x, rect.y, False) | [
"def",
"DrawTabSeparator",
"(",
"self",
",",
"dc",
",",
"wnd",
",",
"rect",
",",
"visibility",
")",
":",
"if",
"visibility",
"<=",
"0.0",
":",
"return",
"if",
"visibility",
">",
"1.0",
":",
"visibility",
"=",
"1.0",
"# The tab separator is relatively expensive to draw (for its size), and is",
"# usually drawn multiple times sequentially (in different positions), so it",
"# makes sense to draw it once and cache it.",
"if",
"not",
"self",
".",
"_cached_tab_separator",
".",
"IsOk",
"(",
")",
"or",
"self",
".",
"_cached_tab_separator",
".",
"GetSize",
"(",
")",
"!=",
"rect",
".",
"GetSize",
"(",
")",
"or",
"visibility",
"!=",
"self",
".",
"_cached_tab_separator_visibility",
":",
"size",
"=",
"wx",
".",
"Rect",
"(",
"0",
",",
"0",
",",
"*",
"rect",
".",
"GetSize",
"(",
")",
")",
"self",
".",
"ReallyDrawTabSeparator",
"(",
"wnd",
",",
"size",
",",
"visibility",
")",
"dc",
".",
"DrawBitmap",
"(",
"self",
".",
"_cached_tab_separator",
",",
"rect",
".",
"x",
",",
"rect",
".",
"y",
",",
"False",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/ribbon/art_msw.py#L1043-L1073 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/_windows.py | python | PyWindow.DoGetPosition | (*args, **kwargs) | return _windows_.PyWindow_DoGetPosition(*args, **kwargs) | DoGetPosition() -> (x,y) | DoGetPosition() -> (x,y) | [
"DoGetPosition",
"()",
"-",
">",
"(",
"x",
"y",
")"
] | def DoGetPosition(*args, **kwargs):
"""DoGetPosition() -> (x,y)"""
return _windows_.PyWindow_DoGetPosition(*args, **kwargs) | [
"def",
"DoGetPosition",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_windows_",
".",
"PyWindow_DoGetPosition",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_windows.py#L4170-L4172 |
|
openMSX/openMSX | c9cfbc0a2a2baaf2c4513c87543fe29bfe8cf806 | src/video/scalers/hq.py | python | genHQLiteOffsetsTable | (pixelExpr) | In the hqlite case, the result color depends on at most one neighbour
color. Therefore, an offset into an interpolated texture is used instead
of explicit weights.
Output is a 64N * 64N texture, where N is the zoom factor. | In the hqlite case, the result color depends on at most one neighbour
color. Therefore, an offset into an interpolated texture is used instead
of explicit weights.
Output is a 64N * 64N texture, where N is the zoom factor. | [
"In",
"the",
"hqlite",
"case",
"the",
"result",
"color",
"depends",
"on",
"at",
"most",
"one",
"neighbour",
"color",
".",
"Therefore",
"an",
"offset",
"into",
"an",
"interpolated",
"texture",
"is",
"used",
"instead",
"of",
"explicit",
"weights",
".",
"Output",
"is",
"a",
"64N",
"*",
"64N",
"texture",
"where",
"N",
"is",
"the",
"zoom",
"factor",
"."
] | def genHQLiteOffsetsTable(pixelExpr):
'''In the hqlite case, the result color depends on at most one neighbour
color. Therefore, an offset into an interpolated texture is used instead
of explicit weights.
Output is a 64N * 64N texture, where N is the zoom factor.
'''
zoom = getZoom(pixelExpr)
for caseMajor in range(0, len(pixelExpr), 64):
for subY in range(zoom):
for caseMinor in range(64):
for subX in range(zoom):
subPixel = zoom * subY + subX
weights = pixelExpr[caseMajor + caseMinor][subPixel]
if weights is None:
neighbour = None
else:
neighbours = computeNeighbours(weights)
assert neighbours[1] is None, neighbours
neighbour = neighbours[0]
factor = sum(weights)
x = int(192.5 - 128 * (0.5 + subX) / zoom)
y = int(192.5 - 128 * (0.5 + subY) / zoom)
if neighbour == 3:
x -= 128 * weights[3] // factor
elif neighbour == 5:
x += 128 * weights[5] // factor
else:
assert neighbour is None, neighbour
assert 0 <= x < 256, x
assert 0 <= y < 256, y
yield x
yield y | [
"def",
"genHQLiteOffsetsTable",
"(",
"pixelExpr",
")",
":",
"zoom",
"=",
"getZoom",
"(",
"pixelExpr",
")",
"for",
"caseMajor",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"pixelExpr",
")",
",",
"64",
")",
":",
"for",
"subY",
"in",
"range",
"(",
"zoom",
")",
":",
"for",
"caseMinor",
"in",
"range",
"(",
"64",
")",
":",
"for",
"subX",
"in",
"range",
"(",
"zoom",
")",
":",
"subPixel",
"=",
"zoom",
"*",
"subY",
"+",
"subX",
"weights",
"=",
"pixelExpr",
"[",
"caseMajor",
"+",
"caseMinor",
"]",
"[",
"subPixel",
"]",
"if",
"weights",
"is",
"None",
":",
"neighbour",
"=",
"None",
"else",
":",
"neighbours",
"=",
"computeNeighbours",
"(",
"weights",
")",
"assert",
"neighbours",
"[",
"1",
"]",
"is",
"None",
",",
"neighbours",
"neighbour",
"=",
"neighbours",
"[",
"0",
"]",
"factor",
"=",
"sum",
"(",
"weights",
")",
"x",
"=",
"int",
"(",
"192.5",
"-",
"128",
"*",
"(",
"0.5",
"+",
"subX",
")",
"/",
"zoom",
")",
"y",
"=",
"int",
"(",
"192.5",
"-",
"128",
"*",
"(",
"0.5",
"+",
"subY",
")",
"/",
"zoom",
")",
"if",
"neighbour",
"==",
"3",
":",
"x",
"-=",
"128",
"*",
"weights",
"[",
"3",
"]",
"//",
"factor",
"elif",
"neighbour",
"==",
"5",
":",
"x",
"+=",
"128",
"*",
"weights",
"[",
"5",
"]",
"//",
"factor",
"else",
":",
"assert",
"neighbour",
"is",
"None",
",",
"neighbour",
"assert",
"0",
"<=",
"x",
"<",
"256",
",",
"x",
"assert",
"0",
"<=",
"y",
"<",
"256",
",",
"y",
"yield",
"x",
"yield",
"y"
] | https://github.com/openMSX/openMSX/blob/c9cfbc0a2a2baaf2c4513c87543fe29bfe8cf806/src/video/scalers/hq.py#L339-L371 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/AWSPythonSDK/1.5.8/docutils/writers/_html_base.py | python | HTMLTranslator.check_simple_list | (self, node) | Check for a simple list that can be rendered compactly. | Check for a simple list that can be rendered compactly. | [
"Check",
"for",
"a",
"simple",
"list",
"that",
"can",
"be",
"rendered",
"compactly",
"."
] | def check_simple_list(self, node):
"""Check for a simple list that can be rendered compactly."""
visitor = SimpleListChecker(self.document)
try:
node.walk(visitor)
except nodes.NodeFound:
return False
else:
return True | [
"def",
"check_simple_list",
"(",
"self",
",",
"node",
")",
":",
"visitor",
"=",
"SimpleListChecker",
"(",
"self",
".",
"document",
")",
"try",
":",
"node",
".",
"walk",
"(",
"visitor",
")",
"except",
"nodes",
".",
"NodeFound",
":",
"return",
"False",
"else",
":",
"return",
"True"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/AWSPythonSDK/1.5.8/docutils/writers/_html_base.py#L479-L487 |
||
arangodb/arangodb | 0d658689c7d1b721b314fa3ca27d38303e1570c8 | 3rdParty/V8/gyp/generator/cmake.py | python | SetTargetProperty | (output, target_name, property_name, values, sep='') | Given a target, sets the given property. | Given a target, sets the given property. | [
"Given",
"a",
"target",
"sets",
"the",
"given",
"property",
"."
] | def SetTargetProperty(output, target_name, property_name, values, sep=''):
"""Given a target, sets the given property."""
output.write('set_target_properties(')
output.write(target_name)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n') | [
"def",
"SetTargetProperty",
"(",
"output",
",",
"target_name",
",",
"property_name",
",",
"values",
",",
"sep",
"=",
"''",
")",
":",
"output",
".",
"write",
"(",
"'set_target_properties('",
")",
"output",
".",
"write",
"(",
"target_name",
")",
"output",
".",
"write",
"(",
"' PROPERTIES '",
")",
"output",
".",
"write",
"(",
"property_name",
")",
"output",
".",
"write",
"(",
"' \"'",
")",
"for",
"value",
"in",
"values",
":",
"output",
".",
"write",
"(",
"CMakeStringEscape",
"(",
"value",
")",
")",
"output",
".",
"write",
"(",
"sep",
")",
"output",
".",
"write",
"(",
"'\")\\n'",
")"
] | https://github.com/arangodb/arangodb/blob/0d658689c7d1b721b314fa3ca27d38303e1570c8/3rdParty/V8/gyp/generator/cmake.py#L172-L182 |
||
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/contrib/seq2seq/python/ops/helper.py | python | GreedyEmbeddingHelper.next_inputs | (self, time, outputs, state, sample_ids, name=None) | return (finished, next_inputs, state) | next_inputs_fn for GreedyEmbeddingHelper. | next_inputs_fn for GreedyEmbeddingHelper. | [
"next_inputs_fn",
"for",
"GreedyEmbeddingHelper",
"."
] | def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""next_inputs_fn for GreedyEmbeddingHelper."""
del time, outputs # unused by next_inputs_fn
finished = math_ops.equal(sample_ids, self._end_token)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished,
# If we're finished, the next_inputs value doesn't matter
lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
return (finished, next_inputs, state) | [
"def",
"next_inputs",
"(",
"self",
",",
"time",
",",
"outputs",
",",
"state",
",",
"sample_ids",
",",
"name",
"=",
"None",
")",
":",
"del",
"time",
",",
"outputs",
"# unused by next_inputs_fn",
"finished",
"=",
"math_ops",
".",
"equal",
"(",
"sample_ids",
",",
"self",
".",
"_end_token",
")",
"all_finished",
"=",
"math_ops",
".",
"reduce_all",
"(",
"finished",
")",
"next_inputs",
"=",
"control_flow_ops",
".",
"cond",
"(",
"all_finished",
",",
"# If we're finished, the next_inputs value doesn't matter",
"lambda",
":",
"self",
".",
"_start_inputs",
",",
"lambda",
":",
"self",
".",
"_embedding_fn",
"(",
"sample_ids",
")",
")",
"return",
"(",
"finished",
",",
"next_inputs",
",",
"state",
")"
] | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/seq2seq/python/ops/helper.py#L503-L513 |
|
PrincetonUniversity/athena-public-version | 9c266692b9423743d8e23509b3ab266a232a92d2 | tst/style/cpplint.py | python | CheckMakePairUsesDeduction | (filename, clean_lines, linenum, error) | Check that make_pair's template arguments are deduced.
G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. | Check that make_pair's template arguments are deduced. | [
"Check",
"that",
"make_pair",
"s",
"template",
"arguments",
"are",
"deduced",
"."
] | def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly') | [
"def",
"CheckMakePairUsesDeduction",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
":",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"match",
"=",
"_RE_PATTERN_EXPLICIT_MAKEPAIR",
".",
"search",
"(",
"line",
")",
"if",
"match",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'build/explicit_make_pair'",
",",
"4",
",",
"# 4 = high confidence",
"'For C++11-compatibility, omit template arguments from make_pair'",
"' OR use pair directly OR if appropriate, construct a pair directly'",
")"
] | https://github.com/PrincetonUniversity/athena-public-version/blob/9c266692b9423743d8e23509b3ab266a232a92d2/tst/style/cpplint.py#L5883-L5901 |
||
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/contrib/framework/python/framework/checkpoint_utils.py | python | load_variable | (checkpoint_dir, name) | return reader.get_tensor(name) | Returns a Tensor with the contents of the given variable in the checkpoint.
Args:
checkpoint_dir: Directory with checkpoints file or path to checkpoint.
name: Name of the tensor to return.
Returns:
`Tensor` object. | Returns a Tensor with the contents of the given variable in the checkpoint. | [
"Returns",
"a",
"Tensor",
"with",
"the",
"contents",
"of",
"the",
"given",
"variable",
"in",
"the",
"checkpoint",
"."
] | def load_variable(checkpoint_dir, name):
"""Returns a Tensor with the contents of the given variable in the checkpoint.
Args:
checkpoint_dir: Directory with checkpoints file or path to checkpoint.
name: Name of the tensor to return.
Returns:
`Tensor` object.
"""
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
name = name[:-2]
reader = load_checkpoint(checkpoint_dir)
return reader.get_tensor(name) | [
"def",
"load_variable",
"(",
"checkpoint_dir",
",",
"name",
")",
":",
"# TODO(b/29227106): Fix this in the right place and remove this.",
"if",
"name",
".",
"endswith",
"(",
"\":0\"",
")",
":",
"name",
"=",
"name",
"[",
":",
"-",
"2",
"]",
"reader",
"=",
"load_checkpoint",
"(",
"checkpoint_dir",
")",
"return",
"reader",
".",
"get_tensor",
"(",
"name",
")"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/framework/python/framework/checkpoint_utils.py#L66-L80 |
|
nasa/astrobee | 9241e67e6692810d6e275abb3165b6d02f4ca5ef | scripts/git/cpplint.py | python | CheckForHeaderGuard | (filename, lines, error) | Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found. | Checks that the file contains a header guard. | [
"Checks",
"that",
"the",
"file",
"contains",
"a",
"header",
"guard",
"."
] | def CheckForHeaderGuard(filename, lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# Don't check for header guards if there are error suppression
# comments somewhere in this file.
#
# Because this is silencing a warning for a nonexistent line, we
# only support the very specific NOLINT(build/header_guard) syntax,
# and not the general NOLINT or NOLINT(*) syntax.
for i in lines:
if Search(r"//\s*NOLINT\(build/header_guard\)", i):
return
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = None
ifndef_linenum = 0
define = None
endif = None
endif_linenum = 0
for linenum, line in enumerate(lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == "#ifndef":
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == "#define":
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith("#endif"):
endif = line
endif_linenum = linenum
if not ifndef:
error(
filename,
0,
"build/header_guard",
5,
"No #ifndef header guard found, suggested CPP variable is: %s" % cppvar,
)
return
if not define:
error(
filename,
0,
"build/header_guard",
5,
"No #define header guard found, suggested CPP variable is: %s" % cppvar,
)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + "_":
error_level = 5
ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum, error)
error(
filename,
ifndef_linenum,
"build/header_guard",
error_level,
"#ifndef header guard has wrong style, please use: %s" % cppvar,
)
if define != ifndef:
error(
filename,
0,
"build/header_guard",
5,
"#ifndef and #define don't match, suggested CPP variable is: %s" % cppvar,
)
return
if endif != ("#endif // %s" % cppvar):
error_level = 0
if endif != ("#endif // %s" % (cppvar + "_")):
error_level = 5
ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum, error)
error(
filename,
endif_linenum,
"build/header_guard",
error_level,
'#endif line should be "#endif // %s"' % cppvar,
) | [
"def",
"CheckForHeaderGuard",
"(",
"filename",
",",
"lines",
",",
"error",
")",
":",
"# Don't check for header guards if there are error suppression",
"# comments somewhere in this file.",
"#",
"# Because this is silencing a warning for a nonexistent line, we",
"# only support the very specific NOLINT(build/header_guard) syntax,",
"# and not the general NOLINT or NOLINT(*) syntax.",
"for",
"i",
"in",
"lines",
":",
"if",
"Search",
"(",
"r\"//\\s*NOLINT\\(build/header_guard\\)\"",
",",
"i",
")",
":",
"return",
"cppvar",
"=",
"GetHeaderGuardCPPVariable",
"(",
"filename",
")",
"ifndef",
"=",
"None",
"ifndef_linenum",
"=",
"0",
"define",
"=",
"None",
"endif",
"=",
"None",
"endif_linenum",
"=",
"0",
"for",
"linenum",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"linesplit",
"=",
"line",
".",
"split",
"(",
")",
"if",
"len",
"(",
"linesplit",
")",
">=",
"2",
":",
"# find the first occurrence of #ifndef and #define, save arg",
"if",
"not",
"ifndef",
"and",
"linesplit",
"[",
"0",
"]",
"==",
"\"#ifndef\"",
":",
"# set ifndef to the header guard presented on the #ifndef line.",
"ifndef",
"=",
"linesplit",
"[",
"1",
"]",
"ifndef_linenum",
"=",
"linenum",
"if",
"not",
"define",
"and",
"linesplit",
"[",
"0",
"]",
"==",
"\"#define\"",
":",
"define",
"=",
"linesplit",
"[",
"1",
"]",
"# find the last occurrence of #endif, save entire line",
"if",
"line",
".",
"startswith",
"(",
"\"#endif\"",
")",
":",
"endif",
"=",
"line",
"endif_linenum",
"=",
"linenum",
"if",
"not",
"ifndef",
":",
"error",
"(",
"filename",
",",
"0",
",",
"\"build/header_guard\"",
",",
"5",
",",
"\"No #ifndef header guard found, suggested CPP variable is: %s\"",
"%",
"cppvar",
",",
")",
"return",
"if",
"not",
"define",
":",
"error",
"(",
"filename",
",",
"0",
",",
"\"build/header_guard\"",
",",
"5",
",",
"\"No #define header guard found, suggested CPP variable is: %s\"",
"%",
"cppvar",
",",
")",
"return",
"# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__",
"# for backward compatibility.",
"if",
"ifndef",
"!=",
"cppvar",
":",
"error_level",
"=",
"0",
"if",
"ifndef",
"!=",
"cppvar",
"+",
"\"_\"",
":",
"error_level",
"=",
"5",
"ParseNolintSuppressions",
"(",
"filename",
",",
"lines",
"[",
"ifndef_linenum",
"]",
",",
"ifndef_linenum",
",",
"error",
")",
"error",
"(",
"filename",
",",
"ifndef_linenum",
",",
"\"build/header_guard\"",
",",
"error_level",
",",
"\"#ifndef header guard has wrong style, please use: %s\"",
"%",
"cppvar",
",",
")",
"if",
"define",
"!=",
"ifndef",
":",
"error",
"(",
"filename",
",",
"0",
",",
"\"build/header_guard\"",
",",
"5",
",",
"\"#ifndef and #define don't match, suggested CPP variable is: %s\"",
"%",
"cppvar",
",",
")",
"return",
"if",
"endif",
"!=",
"(",
"\"#endif // %s\"",
"%",
"cppvar",
")",
":",
"error_level",
"=",
"0",
"if",
"endif",
"!=",
"(",
"\"#endif // %s\"",
"%",
"(",
"cppvar",
"+",
"\"_\"",
")",
")",
":",
"error_level",
"=",
"5",
"ParseNolintSuppressions",
"(",
"filename",
",",
"lines",
"[",
"endif_linenum",
"]",
",",
"endif_linenum",
",",
"error",
")",
"error",
"(",
"filename",
",",
"endif_linenum",
",",
"\"build/header_guard\"",
",",
"error_level",
",",
"'#endif line should be \"#endif // %s\"'",
"%",
"cppvar",
",",
")"
] | https://github.com/nasa/astrobee/blob/9241e67e6692810d6e275abb3165b6d02f4ca5ef/scripts/git/cpplint.py#L1752-L1854 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/propgrid.py | python | PropertyGrid.GetCategoryDefaultCell | (*args, **kwargs) | return _propgrid.PropertyGrid_GetCategoryDefaultCell(*args, **kwargs) | GetCategoryDefaultCell(self) -> PGCell | GetCategoryDefaultCell(self) -> PGCell | [
"GetCategoryDefaultCell",
"(",
"self",
")",
"-",
">",
"PGCell"
] | def GetCategoryDefaultCell(*args, **kwargs):
"""GetCategoryDefaultCell(self) -> PGCell"""
return _propgrid.PropertyGrid_GetCategoryDefaultCell(*args, **kwargs) | [
"def",
"GetCategoryDefaultCell",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_propgrid",
".",
"PropertyGrid_GetCategoryDefaultCell",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/propgrid.py#L2456-L2458 |
|
pytorch/pytorch | 7176c92687d3cc847cc046bf002269c6949a21c2 | torch/fx/experimental/unification/multipledispatch/dispatcher.py | python | Dispatcher.get_func_annotations | (cls, func) | get annotations of function positional parameters | get annotations of function positional parameters | [
"get",
"annotations",
"of",
"function",
"positional",
"parameters"
] | def get_func_annotations(cls, func):
""" get annotations of function positional parameters
"""
params = cls.get_func_params(func)
if params:
Parameter = inspect.Parameter
params = (param for param in params
if param.kind in
(Parameter.POSITIONAL_ONLY,
Parameter.POSITIONAL_OR_KEYWORD))
annotations = tuple(
param.annotation
for param in params)
if all(ann is not Parameter.empty for ann in annotations):
return annotations | [
"def",
"get_func_annotations",
"(",
"cls",
",",
"func",
")",
":",
"params",
"=",
"cls",
".",
"get_func_params",
"(",
"func",
")",
"if",
"params",
":",
"Parameter",
"=",
"inspect",
".",
"Parameter",
"params",
"=",
"(",
"param",
"for",
"param",
"in",
"params",
"if",
"param",
".",
"kind",
"in",
"(",
"Parameter",
".",
"POSITIONAL_ONLY",
",",
"Parameter",
".",
"POSITIONAL_OR_KEYWORD",
")",
")",
"annotations",
"=",
"tuple",
"(",
"param",
".",
"annotation",
"for",
"param",
"in",
"params",
")",
"if",
"all",
"(",
"ann",
"is",
"not",
"Parameter",
".",
"empty",
"for",
"ann",
"in",
"annotations",
")",
":",
"return",
"annotations"
] | https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/fx/experimental/unification/multipledispatch/dispatcher.py#L151-L168 |
||
ApolloAuto/apollo-platform | 86d9dc6743b496ead18d597748ebabd34a513289 | ros/vision_opencv/image_geometry/src/image_geometry/cameramodels.py | python | StereoCameraModel.getZ | (self, disparity) | return Tx / disparity | :param disparity: disparity, in pixels
:type disparity: float
Returns the depth at which a point is observed with a given disparity.
This is the inverse of :meth:`getDisparity`.
Note that a disparity of zero implies Z is infinite. | :param disparity: disparity, in pixels
:type disparity: float | [
":",
"param",
"disparity",
":",
"disparity",
"in",
"pixels",
":",
"type",
"disparity",
":",
"float"
] | def getZ(self, disparity):
"""
:param disparity: disparity, in pixels
:type disparity: float
Returns the depth at which a point is observed with a given disparity.
This is the inverse of :meth:`getDisparity`.
Note that a disparity of zero implies Z is infinite.
"""
if disparity == 0:
return float('inf')
Tx = -self.right.P[0, 3]
return Tx / disparity | [
"def",
"getZ",
"(",
"self",
",",
"disparity",
")",
":",
"if",
"disparity",
"==",
"0",
":",
"return",
"float",
"(",
"'inf'",
")",
"Tx",
"=",
"-",
"self",
".",
"right",
".",
"P",
"[",
"0",
",",
"3",
"]",
"return",
"Tx",
"/",
"disparity"
] | https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/vision_opencv/image_geometry/src/image_geometry/cameramodels.py#L347-L360 |
|
google/shaka-packager | e1b0c7c45431327fd3ce193514a5407d07b39b22 | packager/third_party/protobuf/python/mox.py | python | Reset | (*args) | Reset mocks.
Args:
# args is any number of mocks to be reset. | Reset mocks. | [
"Reset",
"mocks",
"."
] | def Reset(*args):
"""Reset mocks.
Args:
# args is any number of mocks to be reset.
"""
for mock in args:
mock._Reset() | [
"def",
"Reset",
"(",
"*",
"args",
")",
":",
"for",
"mock",
"in",
"args",
":",
"mock",
".",
"_Reset",
"(",
")"
] | https://github.com/google/shaka-packager/blob/e1b0c7c45431327fd3ce193514a5407d07b39b22/packager/third_party/protobuf/python/mox.py#L257-L265 |
||
cvxpy/cvxpy | 5165b4fb750dfd237de8659383ef24b4b2e33aaf | cvxpy/interface/matrix_utilities.py | python | from_2D_to_1D | (constant) | Convert 2D Numpy matrices or arrays to 1D. | Convert 2D Numpy matrices or arrays to 1D. | [
"Convert",
"2D",
"Numpy",
"matrices",
"or",
"arrays",
"to",
"1D",
"."
] | def from_2D_to_1D(constant):
"""Convert 2D Numpy matrices or arrays to 1D.
"""
if isinstance(constant, np.ndarray) and constant.ndim == 2:
return np.asarray(constant)[:, 0]
else:
return constant | [
"def",
"from_2D_to_1D",
"(",
"constant",
")",
":",
"if",
"isinstance",
"(",
"constant",
",",
"np",
".",
"ndarray",
")",
"and",
"constant",
".",
"ndim",
"==",
"2",
":",
"return",
"np",
".",
"asarray",
"(",
"constant",
")",
"[",
":",
",",
"0",
"]",
"else",
":",
"return",
"constant"
] | https://github.com/cvxpy/cvxpy/blob/5165b4fb750dfd237de8659383ef24b4b2e33aaf/cvxpy/interface/matrix_utilities.py#L152-L158 |
||
pmq20/node-packer | 12c46c6e44fbc14d9ee645ebd17d5296b324f7e0 | lts/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/msvs_emulation.py | python | _ExtractImportantEnvironment | (output_of_set) | return env | Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command. | Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command. | [
"Extracts",
"environment",
"variables",
"required",
"for",
"the",
"toolchain",
"to",
"run",
"from",
"a",
"textual",
"dump",
"output",
"by",
"the",
"cmd",
".",
"exe",
"set",
"command",
"."
] | def _ExtractImportantEnvironment(output_of_set):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
for line in output_of_set.splitlines():
for envvar in envvars_to_save:
if re.match(envvar + '=', line.lower()):
var, setting = line.split('=', 1)
if envvar == 'path':
# Our own rules (for running gyp-win-tool) and other actions in
# Chromium rely on python being in the path. Add the path to this
# python here so that if it's not in the path when ninja is run
# later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting
env[var.upper()] = setting
break
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env | [
"def",
"_ExtractImportantEnvironment",
"(",
"output_of_set",
")",
":",
"envvars_to_save",
"=",
"(",
"'goma_.*'",
",",
"# TODO(scottmg): This is ugly, but needed for goma.",
"'include'",
",",
"'lib'",
",",
"'libpath'",
",",
"'path'",
",",
"'pathext'",
",",
"'systemroot'",
",",
"'temp'",
",",
"'tmp'",
",",
")",
"env",
"=",
"{",
"}",
"for",
"line",
"in",
"output_of_set",
".",
"splitlines",
"(",
")",
":",
"for",
"envvar",
"in",
"envvars_to_save",
":",
"if",
"re",
".",
"match",
"(",
"envvar",
"+",
"'='",
",",
"line",
".",
"lower",
"(",
")",
")",
":",
"var",
",",
"setting",
"=",
"line",
".",
"split",
"(",
"'='",
",",
"1",
")",
"if",
"envvar",
"==",
"'path'",
":",
"# Our own rules (for running gyp-win-tool) and other actions in",
"# Chromium rely on python being in the path. Add the path to this",
"# python here so that if it's not in the path when ninja is run",
"# later, python will still be found.",
"setting",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"sys",
".",
"executable",
")",
"+",
"os",
".",
"pathsep",
"+",
"setting",
"env",
"[",
"var",
".",
"upper",
"(",
")",
"]",
"=",
"setting",
"break",
"for",
"required",
"in",
"(",
"'SYSTEMROOT'",
",",
"'TEMP'",
",",
"'TMP'",
")",
":",
"if",
"required",
"not",
"in",
"env",
":",
"raise",
"Exception",
"(",
"'Environment variable \"%s\" '",
"'required to be set to valid path'",
"%",
"required",
")",
"return",
"env"
] | https://github.com/pmq20/node-packer/blob/12c46c6e44fbc14d9ee645ebd17d5296b324f7e0/lts/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/msvs_emulation.py#L962-L993 |
|
lammps/lammps | b75c3065430a75b1b5543a10e10f46d9b4c91913 | tools/i-pi/ipi/engine/simulation.py | python | Simulation.softexit | (self) | Deals with a soft exit request.
Tries to ensure that a consistent restart checkpoint is
written out. | Deals with a soft exit request. | [
"Deals",
"with",
"a",
"soft",
"exit",
"request",
"."
] | def softexit(self):
"""Deals with a soft exit request.
Tries to ensure that a consistent restart checkpoint is
written out.
"""
if self.step < self.tsteps:
self.step += 1
if not self.rollback:
self.chk.store()
self.chk.write(store=False)
self.forces.stop() | [
"def",
"softexit",
"(",
"self",
")",
":",
"if",
"self",
".",
"step",
"<",
"self",
".",
"tsteps",
":",
"self",
".",
"step",
"+=",
"1",
"if",
"not",
"self",
".",
"rollback",
":",
"self",
".",
"chk",
".",
"store",
"(",
")",
"self",
".",
"chk",
".",
"write",
"(",
"store",
"=",
"False",
")",
"self",
".",
"forces",
".",
"stop",
"(",
")"
] | https://github.com/lammps/lammps/blob/b75c3065430a75b1b5543a10e10f46d9b4c91913/tools/i-pi/ipi/engine/simulation.py#L150-L163 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/setuptools/dist.py | python | Distribution.exclude | (self, **attrs) | Remove items from distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
the distribution's 'py_modules' attribute. Excluding packages uses
the 'exclude_package()' method, so all of the package's contained
packages, modules, and extensions are also excluded.
Currently, this method only supports exclusion from attributes that are
lists or tuples. If you need to add support for excluding from other
attributes in this or a subclass, you can add an '_exclude_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
handle whatever special exclusion logic is needed. | Remove items from distribution that are named in keyword arguments | [
"Remove",
"items",
"from",
"distribution",
"that",
"are",
"named",
"in",
"keyword",
"arguments"
] | def exclude(self, **attrs):
"""Remove items from distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
the distribution's 'py_modules' attribute. Excluding packages uses
the 'exclude_package()' method, so all of the package's contained
packages, modules, and extensions are also excluded.
Currently, this method only supports exclusion from attributes that are
lists or tuples. If you need to add support for excluding from other
attributes in this or a subclass, you can add an '_exclude_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
handle whatever special exclusion logic is needed.
"""
for k, v in attrs.items():
exclude = getattr(self, '_exclude_' + k, None)
if exclude:
exclude(v)
else:
self._exclude_misc(k, v) | [
"def",
"exclude",
"(",
"self",
",",
"*",
"*",
"attrs",
")",
":",
"for",
"k",
",",
"v",
"in",
"attrs",
".",
"items",
"(",
")",
":",
"exclude",
"=",
"getattr",
"(",
"self",
",",
"'_exclude_'",
"+",
"k",
",",
"None",
")",
"if",
"exclude",
":",
"exclude",
"(",
"v",
")",
"else",
":",
"self",
".",
"_exclude_misc",
"(",
"k",
",",
"v",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/setuptools/dist.py#L717-L738 |
||
Caffe-MPI/Caffe-MPI.github.io | df5992af571a2a19981b69635115c393f18d1c76 | python/caffe/coord_map.py | python | crop | (top_from, top_to) | return L.Crop(top_from, top_to,
crop_param=dict(axis=ax + 1, # +1 for first cropping dim.
offset=list(-np.round(b).astype(int)))) | Define a Crop layer to crop a top (from) to another top (to) by
determining the coordinate mapping between the two and net spec'ing
the axis and shift parameters of the crop. | Define a Crop layer to crop a top (from) to another top (to) by
determining the coordinate mapping between the two and net spec'ing
the axis and shift parameters of the crop. | [
"Define",
"a",
"Crop",
"layer",
"to",
"crop",
"a",
"top",
"(",
"from",
")",
"to",
"another",
"top",
"(",
"to",
")",
"by",
"determining",
"the",
"coordinate",
"mapping",
"between",
"the",
"two",
"and",
"net",
"spec",
"ing",
"the",
"axis",
"and",
"shift",
"parameters",
"of",
"the",
"crop",
"."
] | def crop(top_from, top_to):
"""
Define a Crop layer to crop a top (from) to another top (to) by
determining the coordinate mapping between the two and net spec'ing
the axis and shift parameters of the crop.
"""
ax, a, b = coord_map_from_to(top_from, top_to)
assert (a == 1).all(), 'scale mismatch on crop (a = {})'.format(a)
assert (b <= 0).all(), 'cannot crop negative offset (b = {})'.format(b)
assert (np.round(b) == b).all(), 'cannot crop noninteger offset ' \
'(b = {})'.format(b)
return L.Crop(top_from, top_to,
crop_param=dict(axis=ax + 1, # +1 for first cropping dim.
offset=list(-np.round(b).astype(int)))) | [
"def",
"crop",
"(",
"top_from",
",",
"top_to",
")",
":",
"ax",
",",
"a",
",",
"b",
"=",
"coord_map_from_to",
"(",
"top_from",
",",
"top_to",
")",
"assert",
"(",
"a",
"==",
"1",
")",
".",
"all",
"(",
")",
",",
"'scale mismatch on crop (a = {})'",
".",
"format",
"(",
"a",
")",
"assert",
"(",
"b",
"<=",
"0",
")",
".",
"all",
"(",
")",
",",
"'cannot crop negative offset (b = {})'",
".",
"format",
"(",
"b",
")",
"assert",
"(",
"np",
".",
"round",
"(",
"b",
")",
"==",
"b",
")",
".",
"all",
"(",
")",
",",
"'cannot crop noninteger offset '",
"'(b = {})'",
".",
"format",
"(",
"b",
")",
"return",
"L",
".",
"Crop",
"(",
"top_from",
",",
"top_to",
",",
"crop_param",
"=",
"dict",
"(",
"axis",
"=",
"ax",
"+",
"1",
",",
"# +1 for first cropping dim.",
"offset",
"=",
"list",
"(",
"-",
"np",
".",
"round",
"(",
"b",
")",
".",
"astype",
"(",
"int",
")",
")",
")",
")"
] | https://github.com/Caffe-MPI/Caffe-MPI.github.io/blob/df5992af571a2a19981b69635115c393f18d1c76/python/caffe/coord_map.py#L172-L185 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/scipy/signal/fir_filter_design.py | python | remez | (numtaps, bands, desired, weight=None, Hz=1, type='bandpass',
maxiter=25, grid_density=16) | return sigtools._remez(numtaps, bands, desired, weight, tnum, Hz,
maxiter, grid_density) | Calculate the minimax optimal filter using the Remez exchange algorithm.
Calculate the filter-coefficients for the finite impulse response
(FIR) filter whose transfer function minimizes the maximum error
between the desired gain and the realized gain in the specified
frequency bands using the Remez exchange algorithm.
Parameters
----------
numtaps : int
The desired number of taps in the filter. The number of taps is
the number of terms in the filter, or the filter order plus one.
bands : array_like
A monotonic sequence containing the band edges in Hz.
All elements must be non-negative and less than half the sampling
frequency as given by `Hz`.
desired : array_like
A sequence half the size of bands containing the desired gain
in each of the specified bands.
weight : array_like, optional
A relative weighting to give to each band region. The length of
`weight` has to be half the length of `bands`.
Hz : scalar, optional
The sampling frequency in Hz. Default is 1.
type : {'bandpass', 'differentiator', 'hilbert'}, optional
The type of filter:
* 'bandpass' : flat response in bands. This is the default.
* 'differentiator' : frequency proportional response in bands.
* 'hilbert' : filter with odd symmetry, that is, type III
(for even order) or type IV (for odd order)
linear phase filters.
maxiter : int, optional
Maximum number of iterations of the algorithm. Default is 25.
grid_density : int, optional
Grid density. The dense grid used in `remez` is of size
``(numtaps + 1) * grid_density``. Default is 16.
Returns
-------
out : ndarray
A rank-1 array containing the coefficients of the optimal
(in a minimax sense) filter.
See Also
--------
freqz
firls
firwin
firwin2
References
----------
.. [1] J. H. McClellan and T. W. Parks, "A unified approach to the
design of optimum FIR linear phase digital filters",
IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
.. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer
Program for Designing Optimum FIR Linear Phase Digital
Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
pp. 506-525, 1973.
Examples
--------
We want to construct a filter with a passband at 0.2-0.4 Hz, and
stop bands at 0-0.1 Hz and 0.45-0.5 Hz. Note that this means that the
behavior in the frequency ranges between those bands is unspecified and
may overshoot.
>>> from scipy import signal
>>> bpass = signal.remez(72, [0, 0.1, 0.2, 0.4, 0.45, 0.5], [0, 1, 0])
>>> freq, response = signal.freqz(bpass)
>>> ampl = np.abs(response)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(111)
>>> ax1.semilogy(freq/(2*np.pi), ampl, 'b-') # freq in Hz
>>> plt.show() | Calculate the minimax optimal filter using the Remez exchange algorithm. | [
"Calculate",
"the",
"minimax",
"optimal",
"filter",
"using",
"the",
"Remez",
"exchange",
"algorithm",
"."
] | def remez(numtaps, bands, desired, weight=None, Hz=1, type='bandpass',
maxiter=25, grid_density=16):
"""
Calculate the minimax optimal filter using the Remez exchange algorithm.
Calculate the filter-coefficients for the finite impulse response
(FIR) filter whose transfer function minimizes the maximum error
between the desired gain and the realized gain in the specified
frequency bands using the Remez exchange algorithm.
Parameters
----------
numtaps : int
The desired number of taps in the filter. The number of taps is
the number of terms in the filter, or the filter order plus one.
bands : array_like
A monotonic sequence containing the band edges in Hz.
All elements must be non-negative and less than half the sampling
frequency as given by `Hz`.
desired : array_like
A sequence half the size of bands containing the desired gain
in each of the specified bands.
weight : array_like, optional
A relative weighting to give to each band region. The length of
`weight` has to be half the length of `bands`.
Hz : scalar, optional
The sampling frequency in Hz. Default is 1.
type : {'bandpass', 'differentiator', 'hilbert'}, optional
The type of filter:
* 'bandpass' : flat response in bands. This is the default.
* 'differentiator' : frequency proportional response in bands.
* 'hilbert' : filter with odd symmetry, that is, type III
(for even order) or type IV (for odd order)
linear phase filters.
maxiter : int, optional
Maximum number of iterations of the algorithm. Default is 25.
grid_density : int, optional
Grid density. The dense grid used in `remez` is of size
``(numtaps + 1) * grid_density``. Default is 16.
Returns
-------
out : ndarray
A rank-1 array containing the coefficients of the optimal
(in a minimax sense) filter.
See Also
--------
freqz
firls
firwin
firwin2
References
----------
.. [1] J. H. McClellan and T. W. Parks, "A unified approach to the
design of optimum FIR linear phase digital filters",
IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
.. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer
Program for Designing Optimum FIR Linear Phase Digital
Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
pp. 506-525, 1973.
Examples
--------
We want to construct a filter with a passband at 0.2-0.4 Hz, and
stop bands at 0-0.1 Hz and 0.45-0.5 Hz. Note that this means that the
behavior in the frequency ranges between those bands is unspecified and
may overshoot.
>>> from scipy import signal
>>> bpass = signal.remez(72, [0, 0.1, 0.2, 0.4, 0.45, 0.5], [0, 1, 0])
>>> freq, response = signal.freqz(bpass)
>>> ampl = np.abs(response)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(111)
>>> ax1.semilogy(freq/(2*np.pi), ampl, 'b-') # freq in Hz
>>> plt.show()
"""
# Convert type
try:
tnum = {'bandpass': 1, 'differentiator': 2, 'hilbert': 3}[type]
except KeyError:
raise ValueError("Type must be 'bandpass', 'differentiator', "
"or 'hilbert'")
# Convert weight
if weight is None:
weight = [1] * len(desired)
bands = np.asarray(bands).copy()
return sigtools._remez(numtaps, bands, desired, weight, tnum, Hz,
maxiter, grid_density) | [
"def",
"remez",
"(",
"numtaps",
",",
"bands",
",",
"desired",
",",
"weight",
"=",
"None",
",",
"Hz",
"=",
"1",
",",
"type",
"=",
"'bandpass'",
",",
"maxiter",
"=",
"25",
",",
"grid_density",
"=",
"16",
")",
":",
"# Convert type",
"try",
":",
"tnum",
"=",
"{",
"'bandpass'",
":",
"1",
",",
"'differentiator'",
":",
"2",
",",
"'hilbert'",
":",
"3",
"}",
"[",
"type",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"\"Type must be 'bandpass', 'differentiator', \"",
"\"or 'hilbert'\"",
")",
"# Convert weight",
"if",
"weight",
"is",
"None",
":",
"weight",
"=",
"[",
"1",
"]",
"*",
"len",
"(",
"desired",
")",
"bands",
"=",
"np",
".",
"asarray",
"(",
"bands",
")",
".",
"copy",
"(",
")",
"return",
"sigtools",
".",
"_remez",
"(",
"numtaps",
",",
"bands",
",",
"desired",
",",
"weight",
",",
"tnum",
",",
"Hz",
",",
"maxiter",
",",
"grid_density",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/scipy/signal/fir_filter_design.py#L506-L605 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/asyncio/transports.py | python | SubprocessTransport.get_pid | (self) | Get subprocess id. | Get subprocess id. | [
"Get",
"subprocess",
"id",
"."
] | def get_pid(self):
"""Get subprocess id."""
raise NotImplementedError | [
"def",
"get_pid",
"(",
"self",
")",
":",
"raise",
"NotImplementedError"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/asyncio/transports.py#L183-L185 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/xrc.py | python | XmlResource.GetVersion | (*args, **kwargs) | return _xrc.XmlResource_GetVersion(*args, **kwargs) | GetVersion(self) -> long | GetVersion(self) -> long | [
"GetVersion",
"(",
"self",
")",
"-",
">",
"long"
] | def GetVersion(*args, **kwargs):
"""GetVersion(self) -> long"""
return _xrc.XmlResource_GetVersion(*args, **kwargs) | [
"def",
"GetVersion",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_xrc",
".",
"XmlResource_GetVersion",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/xrc.py#L192-L194 |
|
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/tpu/tensor_tracer.py | python | TensorTracer._check_trace_files | (self) | Checks if any requirements for trace files are satisfied. | Checks if any requirements for trace files are satisfied. | [
"Checks",
"if",
"any",
"requirements",
"for",
"trace",
"files",
"are",
"satisfied",
"."
] | def _check_trace_files(self):
"""Checks if any requirements for trace files are satisfied."""
if not self._parameters.trace_dir:
# traces will be written to stderr. No need to check trace files.
return
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_SUMMARY:
# Output files are handled by tf.summary operations, no need to precreate
# them.
return
if _trace_files_need_precreated(self._parameters.trace_dir):
for replica_id in range(0, self._tt_config.num_replicas):
trace_file_path = os.path.join(
self._parameters.trace_dir,
_COMPACT_TRACE_FILE_PREFIX) + '%d'%replica_id
if not gfile.Exists(trace_file_path):
raise RuntimeError(
'%s must be pre-created with the '
'appropriate properties.'%trace_file_path)
else:
if not gfile.Exists(self._parameters.trace_dir):
gfile.MkDir(self._parameters.trace_dir)
if not gfile.Exists(self._parameters.trace_dir):
raise RuntimeError('Failed to create %s'%self._parameters.trace_dir) | [
"def",
"_check_trace_files",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_parameters",
".",
"trace_dir",
":",
"# traces will be written to stderr. No need to check trace files.",
"return",
"if",
"self",
".",
"_parameters",
".",
"trace_mode",
"==",
"tensor_tracer_flags",
".",
"TRACE_MODE_SUMMARY",
":",
"# Output files are handled by tf.summary operations, no need to precreate",
"# them.",
"return",
"if",
"_trace_files_need_precreated",
"(",
"self",
".",
"_parameters",
".",
"trace_dir",
")",
":",
"for",
"replica_id",
"in",
"range",
"(",
"0",
",",
"self",
".",
"_tt_config",
".",
"num_replicas",
")",
":",
"trace_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_parameters",
".",
"trace_dir",
",",
"_COMPACT_TRACE_FILE_PREFIX",
")",
"+",
"'%d'",
"%",
"replica_id",
"if",
"not",
"gfile",
".",
"Exists",
"(",
"trace_file_path",
")",
":",
"raise",
"RuntimeError",
"(",
"'%s must be pre-created with the '",
"'appropriate properties.'",
"%",
"trace_file_path",
")",
"else",
":",
"if",
"not",
"gfile",
".",
"Exists",
"(",
"self",
".",
"_parameters",
".",
"trace_dir",
")",
":",
"gfile",
".",
"MkDir",
"(",
"self",
".",
"_parameters",
".",
"trace_dir",
")",
"if",
"not",
"gfile",
".",
"Exists",
"(",
"self",
".",
"_parameters",
".",
"trace_dir",
")",
":",
"raise",
"RuntimeError",
"(",
"'Failed to create %s'",
"%",
"self",
".",
"_parameters",
".",
"trace_dir",
")"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/tpu/tensor_tracer.py#L957-L980 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/tools/Editra/src/plugin.py | python | PluginManager.GetIncompatible | (self) | return self._obsolete | Get the list of loaded plugins that are incompatible with the
current running version of Editra.
return: dict(name=module) | Get the list of loaded plugins that are incompatible with the
current running version of Editra.
return: dict(name=module) | [
"Get",
"the",
"list",
"of",
"loaded",
"plugins",
"that",
"are",
"incompatible",
"with",
"the",
"current",
"running",
"version",
"of",
"Editra",
".",
"return",
":",
"dict",
"(",
"name",
"=",
"module",
")"
] | def GetIncompatible(self):
"""Get the list of loaded plugins that are incompatible with the
current running version of Editra.
return: dict(name=module)
"""
return self._obsolete | [
"def",
"GetIncompatible",
"(",
"self",
")",
":",
"return",
"self",
".",
"_obsolete"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/plugin.py#L623-L629 |
|
Z3Prover/z3 | d745d03afdfdf638d66093e2bfbacaf87187f35b | src/api/python/z3/z3.py | python | is_and | (a) | return is_app_of(a, Z3_OP_AND) | Return `True` if `a` is a Z3 and expression.
>>> p, q = Bools('p q')
>>> is_and(And(p, q))
True
>>> is_and(Or(p, q))
False | Return `True` if `a` is a Z3 and expression. | [
"Return",
"True",
"if",
"a",
"is",
"a",
"Z3",
"and",
"expression",
"."
] | def is_and(a):
"""Return `True` if `a` is a Z3 and expression.
>>> p, q = Bools('p q')
>>> is_and(And(p, q))
True
>>> is_and(Or(p, q))
False
"""
return is_app_of(a, Z3_OP_AND) | [
"def",
"is_and",
"(",
"a",
")",
":",
"return",
"is_app_of",
"(",
"a",
",",
"Z3_OP_AND",
")"
] | https://github.com/Z3Prover/z3/blob/d745d03afdfdf638d66093e2bfbacaf87187f35b/src/api/python/z3/z3.py#L1589-L1598 |
|
PaddlePaddle/Paddle | 1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c | python/paddle/distributed/auto_parallel/reshard.py | python | _compute_partition_index | (process, complete_shape, dims_mapping,
process_shape, process_group) | return partition_index | Compute the partition index in complete tensor. | Compute the partition index in complete tensor. | [
"Compute",
"the",
"partition",
"index",
"in",
"complete",
"tensor",
"."
] | def _compute_partition_index(process, complete_shape, dims_mapping,
process_shape, process_group):
"""Compute the partition index in complete tensor."""
partition_shape = _compute_partition_shape(complete_shape, dims_mapping,
process_shape)
process_index = _compute_process_index(process, process_group,
process_shape)
partition_index = []
for i in range(len(complete_shape)):
if dims_mapping[i] == -1:
partition_index.append([0, partition_shape[i]])
else:
partition_index.append([
process_index[dims_mapping[i]] * partition_shape[i],
(process_index[dims_mapping[i]] + 1) * partition_shape[i]
])
return partition_index | [
"def",
"_compute_partition_index",
"(",
"process",
",",
"complete_shape",
",",
"dims_mapping",
",",
"process_shape",
",",
"process_group",
")",
":",
"partition_shape",
"=",
"_compute_partition_shape",
"(",
"complete_shape",
",",
"dims_mapping",
",",
"process_shape",
")",
"process_index",
"=",
"_compute_process_index",
"(",
"process",
",",
"process_group",
",",
"process_shape",
")",
"partition_index",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"complete_shape",
")",
")",
":",
"if",
"dims_mapping",
"[",
"i",
"]",
"==",
"-",
"1",
":",
"partition_index",
".",
"append",
"(",
"[",
"0",
",",
"partition_shape",
"[",
"i",
"]",
"]",
")",
"else",
":",
"partition_index",
".",
"append",
"(",
"[",
"process_index",
"[",
"dims_mapping",
"[",
"i",
"]",
"]",
"*",
"partition_shape",
"[",
"i",
"]",
",",
"(",
"process_index",
"[",
"dims_mapping",
"[",
"i",
"]",
"]",
"+",
"1",
")",
"*",
"partition_shape",
"[",
"i",
"]",
"]",
")",
"return",
"partition_index"
] | https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/distributed/auto_parallel/reshard.py#L205-L223 |
|
pytorch/pytorch | 7176c92687d3cc847cc046bf002269c6949a21c2 | benchmarks/operator_benchmark/benchmark_pytorch.py | python | PyTorchOperatorTestCase.run_backward | (self, num_runs, print_per_iter=False) | Run the backward path of an op in many iterations | Run the backward path of an op in many iterations | [
"Run",
"the",
"backward",
"path",
"of",
"an",
"op",
"in",
"many",
"iterations"
] | def run_backward(self, num_runs, print_per_iter=False):
""" Run the backward path of an op in many iterations
"""
# TODO: can we use JIT here to reduce python overhead?
for _ in range(num_runs):
self.mean.backward(retain_graph=True) | [
"def",
"run_backward",
"(",
"self",
",",
"num_runs",
",",
"print_per_iter",
"=",
"False",
")",
":",
"# TODO: can we use JIT here to reduce python overhead?",
"for",
"_",
"in",
"range",
"(",
"num_runs",
")",
":",
"self",
".",
"mean",
".",
"backward",
"(",
"retain_graph",
"=",
"True",
")"
] | https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/benchmarks/operator_benchmark/benchmark_pytorch.py#L173-L178 |
||
chromiumembedded/cef | 80caf947f3fe2210e5344713c5281d8af9bdc295 | tools/yapf/yapf/yapflib/split_penalty.py | python | ComputeSplitPenalties | (tree) | Compute split penalties on tokens in the given parse tree.
Arguments:
tree: the top-level pytree node to annotate with penalties. | Compute split penalties on tokens in the given parse tree. | [
"Compute",
"split",
"penalties",
"on",
"tokens",
"in",
"the",
"given",
"parse",
"tree",
"."
] | def ComputeSplitPenalties(tree):
"""Compute split penalties on tokens in the given parse tree.
Arguments:
tree: the top-level pytree node to annotate with penalties.
"""
_SplitPenaltyAssigner().Visit(tree) | [
"def",
"ComputeSplitPenalties",
"(",
"tree",
")",
":",
"_SplitPenaltyAssigner",
"(",
")",
".",
"Visit",
"(",
"tree",
")"
] | https://github.com/chromiumembedded/cef/blob/80caf947f3fe2210e5344713c5281d8af9bdc295/tools/yapf/yapf/yapflib/split_penalty.py#L49-L55 |
||
wyrover/book-code | 7f4883d9030d553bc6bcfa3da685e34789839900 | 3rdparty/protobuf/python/stubout.py | python | StubOutForTesting.Set | (self, parent, child_name, new_child) | Replace child_name's old definition with new_child, in the context
of the given parent. The parent could be a module when the child is a
function at module scope. Or the parent could be a class when a class'
method is being replaced. The named child is set to new_child, while
the prior definition is saved away for later, when UnsetAll() is called.
This method supports the case where child_name is a staticmethod or a
classmethod of parent. | Replace child_name's old definition with new_child, in the context
of the given parent. The parent could be a module when the child is a
function at module scope. Or the parent could be a class when a class'
method is being replaced. The named child is set to new_child, while
the prior definition is saved away for later, when UnsetAll() is called. | [
"Replace",
"child_name",
"s",
"old",
"definition",
"with",
"new_child",
"in",
"the",
"context",
"of",
"the",
"given",
"parent",
".",
"The",
"parent",
"could",
"be",
"a",
"module",
"when",
"the",
"child",
"is",
"a",
"function",
"at",
"module",
"scope",
".",
"Or",
"the",
"parent",
"could",
"be",
"a",
"class",
"when",
"a",
"class",
"method",
"is",
"being",
"replaced",
".",
"The",
"named",
"child",
"is",
"set",
"to",
"new_child",
"while",
"the",
"prior",
"definition",
"is",
"saved",
"away",
"for",
"later",
"when",
"UnsetAll",
"()",
"is",
"called",
"."
] | def Set(self, parent, child_name, new_child):
"""Replace child_name's old definition with new_child, in the context
of the given parent. The parent could be a module when the child is a
function at module scope. Or the parent could be a class when a class'
method is being replaced. The named child is set to new_child, while
the prior definition is saved away for later, when UnsetAll() is called.
This method supports the case where child_name is a staticmethod or a
classmethod of parent.
"""
old_child = getattr(parent, child_name)
old_attribute = parent.__dict__.get(child_name)
if old_attribute is not None and isinstance(old_attribute, staticmethod):
old_child = staticmethod(old_child)
self.cache.append((parent, old_child, child_name))
setattr(parent, child_name, new_child) | [
"def",
"Set",
"(",
"self",
",",
"parent",
",",
"child_name",
",",
"new_child",
")",
":",
"old_child",
"=",
"getattr",
"(",
"parent",
",",
"child_name",
")",
"old_attribute",
"=",
"parent",
".",
"__dict__",
".",
"get",
"(",
"child_name",
")",
"if",
"old_attribute",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"old_attribute",
",",
"staticmethod",
")",
":",
"old_child",
"=",
"staticmethod",
"(",
"old_child",
")",
"self",
".",
"cache",
".",
"append",
"(",
"(",
"parent",
",",
"old_child",
",",
"child_name",
")",
")",
"setattr",
"(",
"parent",
",",
"child_name",
",",
"new_child",
")"
] | https://github.com/wyrover/book-code/blob/7f4883d9030d553bc6bcfa3da685e34789839900/3rdparty/protobuf/python/stubout.py#L109-L126 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemFramework/v1/ResourceManager/lib/Crypto/Signature/DSS.py | python | new | (key, mode, encoding='binary', randfunc=None) | Create a signature object :class:`DSS_SigScheme` that
can perform (EC)DSA signature or verification.
.. note::
Refer to `NIST SP 800 Part 1 Rev 4`_ (or newer release) for an
overview of the recommended key lengths.
:parameter key:
The key to use for computing the signature (*private* keys only)
or verifying one: it must be either
:class:`Crypto.PublicKey.DSA` or :class:`Crypto.PublicKey.ECC`.
For DSA keys, let ``L`` and ``N`` be the bit lengths of the modulus ``p``
and of ``q``: the pair ``(L,N)`` must appear in the following list,
in compliance to section 4.2 of `FIPS 186-4`_:
- (1024, 160) *legacy only; do not create new signatures with this*
- (2048, 224) *deprecated; do not create new signatures with this*
- (2048, 256)
- (3072, 256)
For ECC, only keys over P-256, P384, and P-521 are accepted.
:type key:
a key object
:parameter mode:
The parameter can take these values:
- *'fips-186-3'*. The signature generation is randomized and carried out
according to `FIPS 186-3`_: the nonce ``k`` is taken from the RNG.
- *'deterministic-rfc6979'*. The signature generation is not
randomized. See RFC6979_.
:type mode:
string
:parameter encoding:
How the signature is encoded. This value determines the output of
:meth:`sign` and the input to :meth:`verify`.
The following values are accepted:
- *'binary'* (default), the signature is the raw concatenation
of ``r`` and ``s``. It is defined in the IEEE P.1363 standard.
For DSA, the size in bytes of the signature is ``N/4``
(e.g. 64 bytes for ``N=256``).
For ECDSA, the signature is always twice the length of a point
coordinate (e.g. 64 bytes for P-256).
- *'der'*, the signature is an ASN.1 SEQUENCE with two
INTEGERs (``r`` and ``s``) encoded with DER.
The size of the signature is variable.
:type encoding: string
:parameter randfunc:
A function that returns random *byte strings*, of a given length.
If omitted, the internal RNG is used.
Only applicable for the *'fips-186-3'* mode.
:type randfunc: callable
.. _FIPS 186-3: http://csrc.nist.gov/publications/fips/fips186-3/fips_186-3.pdf
.. _FIPS 186-4: http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf
.. _NIST SP 800 Part 1 Rev 4: http://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-57pt1r4.pdf
.. _RFC6979: http://tools.ietf.org/html/rfc6979 | Create a signature object :class:`DSS_SigScheme` that
can perform (EC)DSA signature or verification. | [
"Create",
"a",
"signature",
"object",
":",
"class",
":",
"DSS_SigScheme",
"that",
"can",
"perform",
"(",
"EC",
")",
"DSA",
"signature",
"or",
"verification",
"."
] | def new(key, mode, encoding='binary', randfunc=None):
"""Create a signature object :class:`DSS_SigScheme` that
can perform (EC)DSA signature or verification.
.. note::
Refer to `NIST SP 800 Part 1 Rev 4`_ (or newer release) for an
overview of the recommended key lengths.
:parameter key:
The key to use for computing the signature (*private* keys only)
or verifying one: it must be either
:class:`Crypto.PublicKey.DSA` or :class:`Crypto.PublicKey.ECC`.
For DSA keys, let ``L`` and ``N`` be the bit lengths of the modulus ``p``
and of ``q``: the pair ``(L,N)`` must appear in the following list,
in compliance to section 4.2 of `FIPS 186-4`_:
- (1024, 160) *legacy only; do not create new signatures with this*
- (2048, 224) *deprecated; do not create new signatures with this*
- (2048, 256)
- (3072, 256)
For ECC, only keys over P-256, P384, and P-521 are accepted.
:type key:
a key object
:parameter mode:
The parameter can take these values:
- *'fips-186-3'*. The signature generation is randomized and carried out
according to `FIPS 186-3`_: the nonce ``k`` is taken from the RNG.
- *'deterministic-rfc6979'*. The signature generation is not
randomized. See RFC6979_.
:type mode:
string
:parameter encoding:
How the signature is encoded. This value determines the output of
:meth:`sign` and the input to :meth:`verify`.
The following values are accepted:
- *'binary'* (default), the signature is the raw concatenation
of ``r`` and ``s``. It is defined in the IEEE P.1363 standard.
For DSA, the size in bytes of the signature is ``N/4``
(e.g. 64 bytes for ``N=256``).
For ECDSA, the signature is always twice the length of a point
coordinate (e.g. 64 bytes for P-256).
- *'der'*, the signature is an ASN.1 SEQUENCE with two
INTEGERs (``r`` and ``s``) encoded with DER.
The size of the signature is variable.
:type encoding: string
:parameter randfunc:
A function that returns random *byte strings*, of a given length.
If omitted, the internal RNG is used.
Only applicable for the *'fips-186-3'* mode.
:type randfunc: callable
.. _FIPS 186-3: http://csrc.nist.gov/publications/fips/fips186-3/fips_186-3.pdf
.. _FIPS 186-4: http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf
.. _NIST SP 800 Part 1 Rev 4: http://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-57pt1r4.pdf
.. _RFC6979: http://tools.ietf.org/html/rfc6979
"""
# The goal of the 'mode' parameter is to avoid to
# have the current version of the standard as default.
#
# Over time, such version will be superseded by (for instance)
# FIPS 186-4 and it will be odd to have -3 as default.
if encoding not in ('binary', 'der'):
raise ValueError("Unknown encoding '%s'" % encoding)
if isinstance(key, EccKey):
order = key._curve.order
private_key_attr = 'd'
else:
order = Integer(key.q)
private_key_attr = 'x'
if key.has_private():
private_key = getattr(key, private_key_attr)
else:
private_key = None
if mode == 'deterministic-rfc6979':
return DeterministicDsaSigScheme(key, encoding, order, private_key)
elif mode == 'fips-186-3':
if isinstance(key, EccKey):
return FipsEcDsaSigScheme(key, encoding, order, randfunc)
else:
return FipsDsaSigScheme(key, encoding, order, randfunc)
else:
raise ValueError("Unknown DSS mode '%s'" % mode) | [
"def",
"new",
"(",
"key",
",",
"mode",
",",
"encoding",
"=",
"'binary'",
",",
"randfunc",
"=",
"None",
")",
":",
"# The goal of the 'mode' parameter is to avoid to",
"# have the current version of the standard as default.",
"#",
"# Over time, such version will be superseded by (for instance)",
"# FIPS 186-4 and it will be odd to have -3 as default.",
"if",
"encoding",
"not",
"in",
"(",
"'binary'",
",",
"'der'",
")",
":",
"raise",
"ValueError",
"(",
"\"Unknown encoding '%s'\"",
"%",
"encoding",
")",
"if",
"isinstance",
"(",
"key",
",",
"EccKey",
")",
":",
"order",
"=",
"key",
".",
"_curve",
".",
"order",
"private_key_attr",
"=",
"'d'",
"else",
":",
"order",
"=",
"Integer",
"(",
"key",
".",
"q",
")",
"private_key_attr",
"=",
"'x'",
"if",
"key",
".",
"has_private",
"(",
")",
":",
"private_key",
"=",
"getattr",
"(",
"key",
",",
"private_key_attr",
")",
"else",
":",
"private_key",
"=",
"None",
"if",
"mode",
"==",
"'deterministic-rfc6979'",
":",
"return",
"DeterministicDsaSigScheme",
"(",
"key",
",",
"encoding",
",",
"order",
",",
"private_key",
")",
"elif",
"mode",
"==",
"'fips-186-3'",
":",
"if",
"isinstance",
"(",
"key",
",",
"EccKey",
")",
":",
"return",
"FipsEcDsaSigScheme",
"(",
"key",
",",
"encoding",
",",
"order",
",",
"randfunc",
")",
"else",
":",
"return",
"FipsDsaSigScheme",
"(",
"key",
",",
"encoding",
",",
"order",
",",
"randfunc",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown DSS mode '%s'\"",
"%",
"mode",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/ResourceManager/lib/Crypto/Signature/DSS.py#L311-L408 |
||
christinaa/LLVM-VideoCore4 | 7773c3c9e5d22b785d4b96ed0acea37c8aa9c183 | utils/lit/lit/ProgressBar.py | python | TerminalController.render | (self, template) | return re.sub(r'\$\$|\${\w+}', self._render_sub, template) | Replace each $-substitutions in the given template string with
the corresponding terminal control string (if it's defined) or
'' (if it's not). | Replace each $-substitutions in the given template string with
the corresponding terminal control string (if it's defined) or
'' (if it's not). | [
"Replace",
"each",
"$",
"-",
"substitutions",
"in",
"the",
"given",
"template",
"string",
"with",
"the",
"corresponding",
"terminal",
"control",
"string",
"(",
"if",
"it",
"s",
"defined",
")",
"or",
"(",
"if",
"it",
"s",
"not",
")",
"."
] | def render(self, template):
"""
Replace each $-substitutions in the given template string with
the corresponding terminal control string (if it's defined) or
'' (if it's not).
"""
return re.sub(r'\$\$|\${\w+}', self._render_sub, template) | [
"def",
"render",
"(",
"self",
",",
"template",
")",
":",
"return",
"re",
".",
"sub",
"(",
"r'\\$\\$|\\${\\w+}'",
",",
"self",
".",
"_render_sub",
",",
"template",
")"
] | https://github.com/christinaa/LLVM-VideoCore4/blob/7773c3c9e5d22b785d4b96ed0acea37c8aa9c183/utils/lit/lit/ProgressBar.py#L153-L159 |
|
hpi-xnor/BMXNet | ed0b201da6667887222b8e4b5f997c4f6b61943d | python/mxnet/ndarray/ndarray.py | python | NDArray.split | (self, *args, **kwargs) | return op.split(self, *args, **kwargs) | Convenience fluent method for :py:func:`split`.
The arguments are the same as for :py:func:`split`, with
this array as data. | Convenience fluent method for :py:func:`split`. | [
"Convenience",
"fluent",
"method",
"for",
":",
"py",
":",
"func",
":",
"split",
"."
] | def split(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`split`.
The arguments are the same as for :py:func:`split`, with
this array as data.
"""
return op.split(self, *args, **kwargs) | [
"def",
"split",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"op",
".",
"split",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/hpi-xnor/BMXNet/blob/ed0b201da6667887222b8e4b5f997c4f6b61943d/python/mxnet/ndarray/ndarray.py#L1036-L1042 |
|
yuxng/PoseCNN | 9f3dd7b7bce21dcafc05e8f18ccc90da3caabd04 | lib/datasets/gmu_scene.py | python | gmu_scene.image_path_from_index | (self, index) | return image_path | Construct an image path from the image's "index" identifier. | Construct an image path from the image's "index" identifier. | [
"Construct",
"an",
"image",
"path",
"from",
"the",
"image",
"s",
"index",
"identifier",
"."
] | def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path | [
"def",
"image_path_from_index",
"(",
"self",
",",
"index",
")",
":",
"image_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_data_path",
",",
"index",
"+",
"self",
".",
"_image_ext",
")",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"image_path",
")",
",",
"'Path does not exist: {}'",
".",
"format",
"(",
"image_path",
")",
"return",
"image_path"
] | https://github.com/yuxng/PoseCNN/blob/9f3dd7b7bce21dcafc05e8f18ccc90da3caabd04/lib/datasets/gmu_scene.py#L42-L50 |
|
Kitware/ParaView | f760af9124ff4634b23ebbeab95a4f56e0261955 | Plugins/pvblot/pvblot.py | python | _PVBlotInterp.do_help | (self, command_name) | The given argument is a string representing a command name. The
string may be empty. Prints documentation for the command name if given
else prints a list of available command names. | The given argument is a string representing a command name. The
string may be empty. Prints documentation for the command name if given
else prints a list of available command names. | [
"The",
"given",
"argument",
"is",
"a",
"string",
"representing",
"a",
"command",
"name",
".",
"The",
"string",
"may",
"be",
"empty",
".",
"Prints",
"documentation",
"for",
"the",
"command",
"name",
"if",
"given",
"else",
"prints",
"a",
"list",
"of",
"available",
"command",
"names",
"."
] | def do_help(self, command_name):
"""The given argument is a string representing a command name. The
string may be empty. Prints documentation for the command name if given
else prints a list of available command names."""
if not command_name:
print _PVBlotInterp.__doc__
print "The following commands are supported:"
print " ",
blotish_commands = self._blotish_commands.keys()
blotish_commands.sort()
for c in blotish_commands:
print c,
print
print
print "For more information on any command, try help <command>."
return
try:
command = self.get_unique_command(command_name)
print command.__doc__
except blotish.BlotishError, err:
blot_common.print_blot_error(err) | [
"def",
"do_help",
"(",
"self",
",",
"command_name",
")",
":",
"if",
"not",
"command_name",
":",
"print",
"_PVBlotInterp",
".",
"__doc__",
"print",
"\"The following commands are supported:\"",
"print",
"\" \"",
",",
"blotish_commands",
"=",
"self",
".",
"_blotish_commands",
".",
"keys",
"(",
")",
"blotish_commands",
".",
"sort",
"(",
")",
"for",
"c",
"in",
"blotish_commands",
":",
"print",
"c",
",",
"print",
"print",
"print",
"\"For more information on any command, try help <command>.\"",
"return",
"try",
":",
"command",
"=",
"self",
".",
"get_unique_command",
"(",
"command_name",
")",
"print",
"command",
".",
"__doc__",
"except",
"blotish",
".",
"BlotishError",
",",
"err",
":",
"blot_common",
".",
"print_blot_error",
"(",
"err",
")"
] | https://github.com/Kitware/ParaView/blob/f760af9124ff4634b23ebbeab95a4f56e0261955/Plugins/pvblot/pvblot.py#L97-L117 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/aui.py | python | AuiPaneInfo.HasMaximizeButton | (*args, **kwargs) | return _aui.AuiPaneInfo_HasMaximizeButton(*args, **kwargs) | HasMaximizeButton(self) -> bool | HasMaximizeButton(self) -> bool | [
"HasMaximizeButton",
"(",
"self",
")",
"-",
">",
"bool"
] | def HasMaximizeButton(*args, **kwargs):
"""HasMaximizeButton(self) -> bool"""
return _aui.AuiPaneInfo_HasMaximizeButton(*args, **kwargs) | [
"def",
"HasMaximizeButton",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_aui",
".",
"AuiPaneInfo_HasMaximizeButton",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/aui.py#L317-L319 |
|
lammps/lammps | b75c3065430a75b1b5543a10e10f46d9b4c91913 | tools/i-pi/ipi/inputs/barostats.py | python | InputBaro.fetch | (self) | return baro | Creates a barostat object.
Returns:
A barostat object of the appropriate type and with the appropriate
thermostat given the attributes of the InputBaro object. | Creates a barostat object. | [
"Creates",
"a",
"barostat",
"object",
"."
] | def fetch(self):
"""Creates a barostat object.
Returns:
A barostat object of the appropriate type and with the appropriate
thermostat given the attributes of the InputBaro object.
"""
super(InputBaro,self).fetch()
if self.mode.fetch() == "isotropic":
baro = BaroBZP(thermostat=self.thermostat.fetch(), tau=self.tau.fetch())
if self.p._explicit: baro.p = self.p.fetch()
elif self.mode.fetch() == "dummy":
baro = Barostat(thermostat=self.thermostat.fetch(), tau=self.tau.fetch())
else:
raise ValueError(self.mode.fetch() + " is not a valid mode of barostat")
return baro | [
"def",
"fetch",
"(",
"self",
")",
":",
"super",
"(",
"InputBaro",
",",
"self",
")",
".",
"fetch",
"(",
")",
"if",
"self",
".",
"mode",
".",
"fetch",
"(",
")",
"==",
"\"isotropic\"",
":",
"baro",
"=",
"BaroBZP",
"(",
"thermostat",
"=",
"self",
".",
"thermostat",
".",
"fetch",
"(",
")",
",",
"tau",
"=",
"self",
".",
"tau",
".",
"fetch",
"(",
")",
")",
"if",
"self",
".",
"p",
".",
"_explicit",
":",
"baro",
".",
"p",
"=",
"self",
".",
"p",
".",
"fetch",
"(",
")",
"elif",
"self",
".",
"mode",
".",
"fetch",
"(",
")",
"==",
"\"dummy\"",
":",
"baro",
"=",
"Barostat",
"(",
"thermostat",
"=",
"self",
".",
"thermostat",
".",
"fetch",
"(",
")",
",",
"tau",
"=",
"self",
".",
"tau",
".",
"fetch",
"(",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"self",
".",
"mode",
".",
"fetch",
"(",
")",
"+",
"\" is not a valid mode of barostat\"",
")",
"return",
"baro"
] | https://github.com/lammps/lammps/blob/b75c3065430a75b1b5543a10e10f46d9b4c91913/tools/i-pi/ipi/inputs/barostats.py#L90-L107 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/agw/shortcuteditor.py | python | Shortcut.IsTop | (self) | return self.topMenu | Returns ``True`` if this :class:`Shortcut` is associated with a top-level :class:`Menu`,
(i.e., in the top :class:`MenuBar` level), ``False`` otherwise. | Returns ``True`` if this :class:`Shortcut` is associated with a top-level :class:`Menu`,
(i.e., in the top :class:`MenuBar` level), ``False`` otherwise. | [
"Returns",
"True",
"if",
"this",
":",
"class",
":",
"Shortcut",
"is",
"associated",
"with",
"a",
"top",
"-",
"level",
":",
"class",
":",
"Menu",
"(",
"i",
".",
"e",
".",
"in",
"the",
"top",
":",
"class",
":",
"MenuBar",
"level",
")",
"False",
"otherwise",
"."
] | def IsTop(self):
"""
Returns ``True`` if this :class:`Shortcut` is associated with a top-level :class:`Menu`,
(i.e., in the top :class:`MenuBar` level), ``False`` otherwise.
"""
return self.topMenu | [
"def",
"IsTop",
"(",
"self",
")",
":",
"return",
"self",
".",
"topMenu"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/shortcuteditor.py#L1276-L1282 |
|
ricardoquesada/Spidermonkey | 4a75ea2543408bd1b2c515aa95901523eeef7858 | python/configobj/validate.py | python | VdtValueTooShortError.__init__ | (self, value) | >>> raise VdtValueTooShortError('jed')
Traceback (most recent call last):
VdtValueTooShortError: the value "jed" is too short. | >>> raise VdtValueTooShortError('jed')
Traceback (most recent call last):
VdtValueTooShortError: the value "jed" is too short. | [
">>>",
"raise",
"VdtValueTooShortError",
"(",
"jed",
")",
"Traceback",
"(",
"most",
"recent",
"call",
"last",
")",
":",
"VdtValueTooShortError",
":",
"the",
"value",
"jed",
"is",
"too",
"short",
"."
] | def __init__(self, value):
"""
>>> raise VdtValueTooShortError('jed')
Traceback (most recent call last):
VdtValueTooShortError: the value "jed" is too short.
"""
ValidateError.__init__(
self,
'the value "%s" is too short.' % (value,)) | [
"def",
"__init__",
"(",
"self",
",",
"value",
")",
":",
"ValidateError",
".",
"__init__",
"(",
"self",
",",
"'the value \"%s\" is too short.'",
"%",
"(",
"value",
",",
")",
")"
] | https://github.com/ricardoquesada/Spidermonkey/blob/4a75ea2543408bd1b2c515aa95901523eeef7858/python/configobj/validate.py#L421-L429 |
||
mldbai/mldb | 69994bd879bf592d03374bf43870457384fbae93 | jml-build/jmlbuild.py | python | Parser.parse_func_nodejs_module | (self, line) | return line | Parses for the nodejs module params and adds the relevant dependencies | Parses for the nodejs module params and adds the relevant dependencies | [
"Parses",
"for",
"the",
"nodejs",
"module",
"params",
"and",
"adds",
"the",
"relevant",
"dependencies"
] | def parse_func_nodejs_module(self, line):
"""
Parses for the nodejs module params and adds the relevant dependencies
"""
print_dbg("\tnodejs_module: " + line)
params, line = self.parse_func_params(line)
assert len(params) > 0
assert len(params[0]) == 1
module = params[0][0] + Ext.NODEJS_MODULE
self.graph.add_edge(self.current_file, module)
self.graph.add_vertex(module)
if len(params) > 1:
assert len(params[1]) == 1
sources = params[1][0]
# Both modules and addon can be specified in the same one param. A good
# educated guess is that our dependency is built before our library. And
# by good I mean laughable notion that a build system would retain some
# kind of sane structure...
if len(params) > 2:
for lib in params[2]:
if lib + Ext.NODEJS_ADDON in self.graph.edges:
self.graph.add_edge(module, lib + Ext.NODEJS_ADDON)
else:
self.graph.add_edge(module, lib + Ext.NODEJS_MODULE)
return line | [
"def",
"parse_func_nodejs_module",
"(",
"self",
",",
"line",
")",
":",
"print_dbg",
"(",
"\"\\tnodejs_module: \"",
"+",
"line",
")",
"params",
",",
"line",
"=",
"self",
".",
"parse_func_params",
"(",
"line",
")",
"assert",
"len",
"(",
"params",
")",
">",
"0",
"assert",
"len",
"(",
"params",
"[",
"0",
"]",
")",
"==",
"1",
"module",
"=",
"params",
"[",
"0",
"]",
"[",
"0",
"]",
"+",
"Ext",
".",
"NODEJS_MODULE",
"self",
".",
"graph",
".",
"add_edge",
"(",
"self",
".",
"current_file",
",",
"module",
")",
"self",
".",
"graph",
".",
"add_vertex",
"(",
"module",
")",
"if",
"len",
"(",
"params",
")",
">",
"1",
":",
"assert",
"len",
"(",
"params",
"[",
"1",
"]",
")",
"==",
"1",
"sources",
"=",
"params",
"[",
"1",
"]",
"[",
"0",
"]",
"# Both modules and addon can be specified in the same one param. A good",
"# educated guess is that our dependency is built before our library. And",
"# by good I mean laughable notion that a build system would retain some",
"# kind of sane structure...",
"if",
"len",
"(",
"params",
")",
">",
"2",
":",
"for",
"lib",
"in",
"params",
"[",
"2",
"]",
":",
"if",
"lib",
"+",
"Ext",
".",
"NODEJS_ADDON",
"in",
"self",
".",
"graph",
".",
"edges",
":",
"self",
".",
"graph",
".",
"add_edge",
"(",
"module",
",",
"lib",
"+",
"Ext",
".",
"NODEJS_ADDON",
")",
"else",
":",
"self",
".",
"graph",
".",
"add_edge",
"(",
"module",
",",
"lib",
"+",
"Ext",
".",
"NODEJS_MODULE",
")",
"return",
"line"
] | https://github.com/mldbai/mldb/blob/69994bd879bf592d03374bf43870457384fbae93/jml-build/jmlbuild.py#L382-L411 |
|
apple/turicreate | cce55aa5311300e3ce6af93cb45ba791fd1bdf49 | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/builder.py | python | NeuralNetworkBuilder.inspect_optimizer | (self) | Prints the summary for the optimizer. | Prints the summary for the optimizer. | [
"Prints",
"the",
"summary",
"for",
"the",
"optimizer",
"."
] | def inspect_optimizer(self):
""" Prints the summary for the optimizer.
"""
optimizer = self.nn_spec.updateParams.optimizer
optimizer_type = optimizer.WhichOneof("OptimizerType")
print("Optimizer Type: {}".format(optimizer_type))
if optimizer_type == "sgdOptimizer":
lr = optimizer.sgdOptimizer.learningRate
batch = optimizer.sgdOptimizer.miniBatchSize
momentum = optimizer.sgdOptimizer.momentum
print(
"lr: {}, min: {}, max: {}".format(
lr.defaultValue, lr.range.minValue, lr.range.maxValue
)
)
print(
"batch: {}, allowed_set: {}".format(
batch.defaultValue, batch.set.values
)
)
print(
"momentum: {}, min: {}, max: {}".format(
momentum.defaultValue,
momentum.range.minValue,
momentum.range.maxValue,
)
)
elif optimizer_type == "adamOptimizer":
lr = optimizer.adamOptimizer.learningRate
batch = optimizer.adamOptimizer.miniBatchSize
beta1 = optimizer.adamOptimizer.beta1
beta2 = optimizer.adamOptimizer.beta2
eps = optimizer.adamOptimizer.eps
print(
"lr: {}, min: {}, max: {}".format(
lr.defaultValue, lr.range.minValue, lr.range.maxValue
)
)
print(
"batch: {}, allowed_set: {}".format(
batch.defaultValue, batch.set.values
)
)
print(
"beta1: {}, min: {}, max: {}".format(
beta1.defaultValue, beta1.range.minValue, beta1.range.maxValue
)
)
print(
"beta2: {}, min: {}, max: {}".format(
beta2.defaultValue, beta2.range.minValue, beta2.range.maxValue
)
)
print(
"epsilon: {}, min: {}, max: {}".format(
eps.defaultValue, eps.range.minValue, eps.range.maxValue
)
) | [
"def",
"inspect_optimizer",
"(",
"self",
")",
":",
"optimizer",
"=",
"self",
".",
"nn_spec",
".",
"updateParams",
".",
"optimizer",
"optimizer_type",
"=",
"optimizer",
".",
"WhichOneof",
"(",
"\"OptimizerType\"",
")",
"print",
"(",
"\"Optimizer Type: {}\"",
".",
"format",
"(",
"optimizer_type",
")",
")",
"if",
"optimizer_type",
"==",
"\"sgdOptimizer\"",
":",
"lr",
"=",
"optimizer",
".",
"sgdOptimizer",
".",
"learningRate",
"batch",
"=",
"optimizer",
".",
"sgdOptimizer",
".",
"miniBatchSize",
"momentum",
"=",
"optimizer",
".",
"sgdOptimizer",
".",
"momentum",
"print",
"(",
"\"lr: {}, min: {}, max: {}\"",
".",
"format",
"(",
"lr",
".",
"defaultValue",
",",
"lr",
".",
"range",
".",
"minValue",
",",
"lr",
".",
"range",
".",
"maxValue",
")",
")",
"print",
"(",
"\"batch: {}, allowed_set: {}\"",
".",
"format",
"(",
"batch",
".",
"defaultValue",
",",
"batch",
".",
"set",
".",
"values",
")",
")",
"print",
"(",
"\"momentum: {}, min: {}, max: {}\"",
".",
"format",
"(",
"momentum",
".",
"defaultValue",
",",
"momentum",
".",
"range",
".",
"minValue",
",",
"momentum",
".",
"range",
".",
"maxValue",
",",
")",
")",
"elif",
"optimizer_type",
"==",
"\"adamOptimizer\"",
":",
"lr",
"=",
"optimizer",
".",
"adamOptimizer",
".",
"learningRate",
"batch",
"=",
"optimizer",
".",
"adamOptimizer",
".",
"miniBatchSize",
"beta1",
"=",
"optimizer",
".",
"adamOptimizer",
".",
"beta1",
"beta2",
"=",
"optimizer",
".",
"adamOptimizer",
".",
"beta2",
"eps",
"=",
"optimizer",
".",
"adamOptimizer",
".",
"eps",
"print",
"(",
"\"lr: {}, min: {}, max: {}\"",
".",
"format",
"(",
"lr",
".",
"defaultValue",
",",
"lr",
".",
"range",
".",
"minValue",
",",
"lr",
".",
"range",
".",
"maxValue",
")",
")",
"print",
"(",
"\"batch: {}, allowed_set: {}\"",
".",
"format",
"(",
"batch",
".",
"defaultValue",
",",
"batch",
".",
"set",
".",
"values",
")",
")",
"print",
"(",
"\"beta1: {}, min: {}, max: {}\"",
".",
"format",
"(",
"beta1",
".",
"defaultValue",
",",
"beta1",
".",
"range",
".",
"minValue",
",",
"beta1",
".",
"range",
".",
"maxValue",
")",
")",
"print",
"(",
"\"beta2: {}, min: {}, max: {}\"",
".",
"format",
"(",
"beta2",
".",
"defaultValue",
",",
"beta2",
".",
"range",
".",
"minValue",
",",
"beta2",
".",
"range",
".",
"maxValue",
")",
")",
"print",
"(",
"\"epsilon: {}, min: {}, max: {}\"",
".",
"format",
"(",
"eps",
".",
"defaultValue",
",",
"eps",
".",
"range",
".",
"minValue",
",",
"eps",
".",
"range",
".",
"maxValue",
")",
")"
] | https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/builder.py#L1105-L1162 |
||
apple/turicreate | cce55aa5311300e3ce6af93cb45ba791fd1bdf49 | deps/src/libevent-2.0.18-stable/event_rpcgen.py | python | Struct.EntryTagName | (self, entry) | return name.upper() | Creates the name inside an enumeration for distinguishing data
types. | Creates the name inside an enumeration for distinguishing data
types. | [
"Creates",
"the",
"name",
"inside",
"an",
"enumeration",
"for",
"distinguishing",
"data",
"types",
"."
] | def EntryTagName(self, entry):
"""Creates the name inside an enumeration for distinguishing data
types."""
name = "%s_%s" % (self._name, entry.Name())
return name.upper() | [
"def",
"EntryTagName",
"(",
"self",
",",
"entry",
")",
":",
"name",
"=",
"\"%s_%s\"",
"%",
"(",
"self",
".",
"_name",
",",
"entry",
".",
"Name",
"(",
")",
")",
"return",
"name",
".",
"upper",
"(",
")"
] | https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/deps/src/libevent-2.0.18-stable/event_rpcgen.py#L66-L70 |
|
nasa/astrobee | 9241e67e6692810d6e275abb3165b6d02f4ca5ef | scripts/git/cpplint.py | python | CheckForMultilineCommentsAndStrings | (filename, clean_lines, linenum, error) | Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. | Logs an error if we see /* ... */ or "..." that extend past one line. | [
"Logs",
"an",
"error",
"if",
"we",
"see",
"/",
"*",
"...",
"*",
"/",
"or",
"...",
"that",
"extend",
"past",
"one",
"line",
"."
] | def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace("\\\\", "")
if line.count("/*") > line.count("*/"):
error(
filename,
linenum,
"readability/multiline_comment",
5,
"Complex multi-line /*...*/-style comment found. "
"Lint may give bogus warnings. "
"Consider replacing these with //-style comments, "
"with #if 0...#endif, "
"or with more clearly structured multi-line comments.",
)
if (line.count('"') - line.count('\\"')) % 2:
error(
filename,
linenum,
"readability/multiline_string",
5,
'Multi-line string ("...") found. This lint script doesn\'t '
"do well with such strings, and may give bogus warnings. "
"Use C++11 raw strings or concatenation instead.",
) | [
"def",
"CheckForMultilineCommentsAndStrings",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
":",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"# Remove all \\\\ (escaped backslashes) from the line. They are OK, and the",
"# second (escaped) slash may trigger later \\\" detection erroneously.",
"line",
"=",
"line",
".",
"replace",
"(",
"\"\\\\\\\\\"",
",",
"\"\"",
")",
"if",
"line",
".",
"count",
"(",
"\"/*\"",
")",
">",
"line",
".",
"count",
"(",
"\"*/\"",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"\"readability/multiline_comment\"",
",",
"5",
",",
"\"Complex multi-line /*...*/-style comment found. \"",
"\"Lint may give bogus warnings. \"",
"\"Consider replacing these with //-style comments, \"",
"\"with #if 0...#endif, \"",
"\"or with more clearly structured multi-line comments.\"",
",",
")",
"if",
"(",
"line",
".",
"count",
"(",
"'\"'",
")",
"-",
"line",
".",
"count",
"(",
"'\\\\\"'",
")",
")",
"%",
"2",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"\"readability/multiline_string\"",
",",
"5",
",",
"'Multi-line string (\"...\") found. This lint script doesn\\'t '",
"\"do well with such strings, and may give bogus warnings. \"",
"\"Use C++11 raw strings or concatenation instead.\"",
",",
")"
] | https://github.com/nasa/astrobee/blob/9241e67e6692810d6e275abb3165b6d02f4ca5ef/scripts/git/cpplint.py#L1910-L1955 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemFramework/v1/AWS/common-code/lib/OpenSSL/SSL.py | python | Context.add_extra_chain_cert | (self, certobj) | Add certificate to chain
:param certobj: The X509 certificate object to add to the chain
:return: None | Add certificate to chain | [
"Add",
"certificate",
"to",
"chain"
] | def add_extra_chain_cert(self, certobj):
"""
Add certificate to chain
:param certobj: The X509 certificate object to add to the chain
:return: None
"""
if not isinstance(certobj, X509):
raise TypeError("certobj must be an X509 instance")
copy = _lib.X509_dup(certobj._x509)
add_result = _lib.SSL_CTX_add_extra_chain_cert(self._context, copy)
if not add_result:
# TODO: This is untested.
_lib.X509_free(copy)
_raise_current_error() | [
"def",
"add_extra_chain_cert",
"(",
"self",
",",
"certobj",
")",
":",
"if",
"not",
"isinstance",
"(",
"certobj",
",",
"X509",
")",
":",
"raise",
"TypeError",
"(",
"\"certobj must be an X509 instance\"",
")",
"copy",
"=",
"_lib",
".",
"X509_dup",
"(",
"certobj",
".",
"_x509",
")",
"add_result",
"=",
"_lib",
".",
"SSL_CTX_add_extra_chain_cert",
"(",
"self",
".",
"_context",
",",
"copy",
")",
"if",
"not",
"add_result",
":",
"# TODO: This is untested.",
"_lib",
".",
"X509_free",
"(",
"copy",
")",
"_raise_current_error",
"(",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/AWS/common-code/lib/OpenSSL/SSL.py#L962-L977 |
||
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/ops/array_ops.py | python | transpose | (a, perm=None, name="transpose", conjugate=False) | Transposes `a`.
Permutes the dimensions according to `perm`.
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors. If conjugate is True and
`a.dtype` is either `complex64` or `complex128` then the values of `a`
are conjugated and transposed.
@compatibility(numpy)
In `numpy` transposes are memory-efficient constant time operations as they
simply return a new view of the same data with adjusted `strides`.
TensorFlow does not support strides, so `transpose` returns a new tensor with
the items permuted.
@end_compatibility
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.transpose(x) # [[1, 4]
# [2, 5]
# [3, 6]]
# Equivalently
tf.transpose(x, perm=[1, 0]) # [[1, 4]
# [2, 5]
# [3, 6]]
# If x is complex, setting conjugate=True gives the conjugate transpose
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
# 'perm' is more useful for n-dimensional tensors, for n > 2
x = tf.constant([[[ 1, 2, 3],
[ 4, 5, 6]],
[[ 7, 8, 9],
[10, 11, 12]]])
# Take the transpose of the matrices in dimension-0
# (this common operation has a shorthand `linalg.matrix_transpose`)
tf.transpose(x, perm=[0, 2, 1]) # [[[1, 4],
# [2, 5],
# [3, 6]],
# [[7, 10],
# [8, 11],
# [9, 12]]]
```
Args:
a: A `Tensor`.
perm: A permutation of the dimensions of `a`.
name: A name for the operation (optional).
conjugate: Optional bool. Setting it to `True` is mathematically equivalent
to tf.math.conj(tf.transpose(input)).
Returns:
A transposed `Tensor`. | Transposes `a`. | [
"Transposes",
"a",
"."
] | def transpose(a, perm=None, name="transpose", conjugate=False):
"""Transposes `a`.
Permutes the dimensions according to `perm`.
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors. If conjugate is True and
`a.dtype` is either `complex64` or `complex128` then the values of `a`
are conjugated and transposed.
@compatibility(numpy)
In `numpy` transposes are memory-efficient constant time operations as they
simply return a new view of the same data with adjusted `strides`.
TensorFlow does not support strides, so `transpose` returns a new tensor with
the items permuted.
@end_compatibility
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.transpose(x) # [[1, 4]
# [2, 5]
# [3, 6]]
# Equivalently
tf.transpose(x, perm=[1, 0]) # [[1, 4]
# [2, 5]
# [3, 6]]
# If x is complex, setting conjugate=True gives the conjugate transpose
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
# 'perm' is more useful for n-dimensional tensors, for n > 2
x = tf.constant([[[ 1, 2, 3],
[ 4, 5, 6]],
[[ 7, 8, 9],
[10, 11, 12]]])
# Take the transpose of the matrices in dimension-0
# (this common operation has a shorthand `linalg.matrix_transpose`)
tf.transpose(x, perm=[0, 2, 1]) # [[[1, 4],
# [2, 5],
# [3, 6]],
# [[7, 10],
# [8, 11],
# [9, 12]]]
```
Args:
a: A `Tensor`.
perm: A permutation of the dimensions of `a`.
name: A name for the operation (optional).
conjugate: Optional bool. Setting it to `True` is mathematically equivalent
to tf.math.conj(tf.transpose(input)).
Returns:
A transposed `Tensor`.
"""
with ops.name_scope(name, "transpose", [a]) as name:
if not tensor_util.is_tf_type(a):
a = ops.convert_to_tensor(a, name="a")
if conjugate and a.dtype.is_complex:
transpose_fn = gen_array_ops.conjugate_transpose
else:
transpose_fn = gen_array_ops.transpose
if perm is not None:
return transpose_fn(a, perm, name=name)
rank = a.shape.rank
if rank is None:
perm = gen_math_ops._range(gen_array_ops.rank(a) - 1, -1, -1)
else:
perm = np.arange(rank - 1, -1, -1, dtype=np.int32)
return transpose_fn(a, perm, name=name) | [
"def",
"transpose",
"(",
"a",
",",
"perm",
"=",
"None",
",",
"name",
"=",
"\"transpose\"",
",",
"conjugate",
"=",
"False",
")",
":",
"with",
"ops",
".",
"name_scope",
"(",
"name",
",",
"\"transpose\"",
",",
"[",
"a",
"]",
")",
"as",
"name",
":",
"if",
"not",
"tensor_util",
".",
"is_tf_type",
"(",
"a",
")",
":",
"a",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"a",
",",
"name",
"=",
"\"a\"",
")",
"if",
"conjugate",
"and",
"a",
".",
"dtype",
".",
"is_complex",
":",
"transpose_fn",
"=",
"gen_array_ops",
".",
"conjugate_transpose",
"else",
":",
"transpose_fn",
"=",
"gen_array_ops",
".",
"transpose",
"if",
"perm",
"is",
"not",
"None",
":",
"return",
"transpose_fn",
"(",
"a",
",",
"perm",
",",
"name",
"=",
"name",
")",
"rank",
"=",
"a",
".",
"shape",
".",
"rank",
"if",
"rank",
"is",
"None",
":",
"perm",
"=",
"gen_math_ops",
".",
"_range",
"(",
"gen_array_ops",
".",
"rank",
"(",
"a",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
"else",
":",
"perm",
"=",
"np",
".",
"arange",
"(",
"rank",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"return",
"transpose_fn",
"(",
"a",
",",
"perm",
",",
"name",
"=",
"name",
")"
] | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/ops/array_ops.py#L2291-L2374 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/pkg_resources/__init__.py | python | run_script | (dist_spec, script_name) | Locate distribution `dist_spec` and run its `script_name` script | Locate distribution `dist_spec` and run its `script_name` script | [
"Locate",
"distribution",
"dist_spec",
"and",
"run",
"its",
"script_name",
"script"
] | def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns) | [
"def",
"run_script",
"(",
"dist_spec",
",",
"script_name",
")",
":",
"ns",
"=",
"sys",
".",
"_getframe",
"(",
"1",
")",
".",
"f_globals",
"name",
"=",
"ns",
"[",
"'__name__'",
"]",
"ns",
".",
"clear",
"(",
")",
"ns",
"[",
"'__name__'",
"]",
"=",
"name",
"require",
"(",
"dist_spec",
")",
"[",
"0",
"]",
".",
"run_script",
"(",
"script_name",
",",
"ns",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/pkg_resources/__init__.py#L464-L470 |
||
Polidea/SiriusObfuscator | b0e590d8130e97856afe578869b83a209e2b19be | SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py | python | SBLaunchInfo.GetLaunchEventData | (self) | return _lldb.SBLaunchInfo_GetLaunchEventData(self) | GetLaunchEventData(self) -> str | GetLaunchEventData(self) -> str | [
"GetLaunchEventData",
"(",
"self",
")",
"-",
">",
"str"
] | def GetLaunchEventData(self):
"""GetLaunchEventData(self) -> str"""
return _lldb.SBLaunchInfo_GetLaunchEventData(self) | [
"def",
"GetLaunchEventData",
"(",
"self",
")",
":",
"return",
"_lldb",
".",
"SBLaunchInfo_GetLaunchEventData",
"(",
"self",
")"
] | https://github.com/Polidea/SiriusObfuscator/blob/b0e590d8130e97856afe578869b83a209e2b19be/SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py#L5561-L5563 |
|
google/earthenterprise | 0fe84e29be470cd857e3a0e52e5d0afd5bb8cee9 | earth_enterprise/src/google/protobuf-py/google/protobuf/internal/cpp_message.py | python | NewCMessage | (full_message_name) | return _net_proto2___python.NewCMessage(full_message_name) | Creates a new C++ protocol message by its name. | Creates a new C++ protocol message by its name. | [
"Creates",
"a",
"new",
"C",
"++",
"protocol",
"message",
"by",
"its",
"name",
"."
] | def NewCMessage(full_message_name):
"""Creates a new C++ protocol message by its name."""
return _net_proto2___python.NewCMessage(full_message_name) | [
"def",
"NewCMessage",
"(",
"full_message_name",
")",
":",
"return",
"_net_proto2___python",
".",
"NewCMessage",
"(",
"full_message_name",
")"
] | https://github.com/google/earthenterprise/blob/0fe84e29be470cd857e3a0e52e5d0afd5bb8cee9/earth_enterprise/src/google/protobuf-py/google/protobuf/internal/cpp_message.py#L70-L72 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/setuptools/py3/setuptools/_vendor/more_itertools/more.py | python | groupby_transform | (iterable, keyfunc=None, valuefunc=None, reducefunc=None) | return ret | An extension of :func:`itertools.groupby` that can apply transformations
to the grouped data.
* *keyfunc* is a function computing a key value for each item in *iterable*
* *valuefunc* is a function that transforms the individual items from
*iterable* after grouping
* *reducefunc* is a function that transforms each group of items
>>> iterable = 'aAAbBBcCC'
>>> keyfunc = lambda k: k.upper()
>>> valuefunc = lambda v: v.lower()
>>> reducefunc = lambda g: ''.join(g)
>>> list(groupby_transform(iterable, keyfunc, valuefunc, reducefunc))
[('A', 'aaa'), ('B', 'bbb'), ('C', 'ccc')]
Each optional argument defaults to an identity function if not specified.
:func:`groupby_transform` is useful when grouping elements of an iterable
using a separate iterable as the key. To do this, :func:`zip` the iterables
and pass a *keyfunc* that extracts the first element and a *valuefunc*
that extracts the second element::
>>> from operator import itemgetter
>>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3]
>>> values = 'abcdefghi'
>>> iterable = zip(keys, values)
>>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1))
>>> [(k, ''.join(g)) for k, g in grouper]
[(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')]
Note that the order of items in the iterable is significant.
Only adjacent items are grouped together, so if you don't want any
duplicate groups, you should sort the iterable by the key function. | An extension of :func:`itertools.groupby` that can apply transformations
to the grouped data. | [
"An",
"extension",
"of",
":",
"func",
":",
"itertools",
".",
"groupby",
"that",
"can",
"apply",
"transformations",
"to",
"the",
"grouped",
"data",
"."
] | def groupby_transform(iterable, keyfunc=None, valuefunc=None, reducefunc=None):
"""An extension of :func:`itertools.groupby` that can apply transformations
to the grouped data.
* *keyfunc* is a function computing a key value for each item in *iterable*
* *valuefunc* is a function that transforms the individual items from
*iterable* after grouping
* *reducefunc* is a function that transforms each group of items
>>> iterable = 'aAAbBBcCC'
>>> keyfunc = lambda k: k.upper()
>>> valuefunc = lambda v: v.lower()
>>> reducefunc = lambda g: ''.join(g)
>>> list(groupby_transform(iterable, keyfunc, valuefunc, reducefunc))
[('A', 'aaa'), ('B', 'bbb'), ('C', 'ccc')]
Each optional argument defaults to an identity function if not specified.
:func:`groupby_transform` is useful when grouping elements of an iterable
using a separate iterable as the key. To do this, :func:`zip` the iterables
and pass a *keyfunc* that extracts the first element and a *valuefunc*
that extracts the second element::
>>> from operator import itemgetter
>>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3]
>>> values = 'abcdefghi'
>>> iterable = zip(keys, values)
>>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1))
>>> [(k, ''.join(g)) for k, g in grouper]
[(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')]
Note that the order of items in the iterable is significant.
Only adjacent items are grouped together, so if you don't want any
duplicate groups, you should sort the iterable by the key function.
"""
ret = groupby(iterable, keyfunc)
if valuefunc:
ret = ((k, map(valuefunc, g)) for k, g in ret)
if reducefunc:
ret = ((k, reducefunc(g)) for k, g in ret)
return ret | [
"def",
"groupby_transform",
"(",
"iterable",
",",
"keyfunc",
"=",
"None",
",",
"valuefunc",
"=",
"None",
",",
"reducefunc",
"=",
"None",
")",
":",
"ret",
"=",
"groupby",
"(",
"iterable",
",",
"keyfunc",
")",
"if",
"valuefunc",
":",
"ret",
"=",
"(",
"(",
"k",
",",
"map",
"(",
"valuefunc",
",",
"g",
")",
")",
"for",
"k",
",",
"g",
"in",
"ret",
")",
"if",
"reducefunc",
":",
"ret",
"=",
"(",
"(",
"k",
",",
"reducefunc",
"(",
"g",
")",
")",
"for",
"k",
",",
"g",
"in",
"ret",
")",
"return",
"ret"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/setuptools/py3/setuptools/_vendor/more_itertools/more.py#L1844-L1886 |
|
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/py_vulcanize/third_party/rcssmin/_setup/py2/term/_term.py | python | yellow | (fmt, **kwargs) | Write something in yellow on the screen | Write something in yellow on the screen | [
"Write",
"something",
"in",
"yellow",
"on",
"the",
"screen"
] | def yellow(fmt, **kwargs):
""" Write something in yellow on the screen """
announce("%%(BOLD)s%%(YELLOW)s%s%%(NORMAL)s" % fmt, **kwargs) | [
"def",
"yellow",
"(",
"fmt",
",",
"*",
"*",
"kwargs",
")",
":",
"announce",
"(",
"\"%%(BOLD)s%%(YELLOW)s%s%%(NORMAL)s\"",
"%",
"fmt",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/py_vulcanize/third_party/rcssmin/_setup/py2/term/_term.py#L104-L106 |
||
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/x86/toolchain/lib/python2.7/lib-tk/Tkinter.py | python | Listbox.nearest | (self, y) | return getint(self.tk.call(
self._w, 'nearest', y)) | Get index of item which is nearest to y coordinate Y. | Get index of item which is nearest to y coordinate Y. | [
"Get",
"index",
"of",
"item",
"which",
"is",
"nearest",
"to",
"y",
"coordinate",
"Y",
"."
] | def nearest(self, y):
"""Get index of item which is nearest to y coordinate Y."""
return getint(self.tk.call(
self._w, 'nearest', y)) | [
"def",
"nearest",
"(",
"self",
",",
"y",
")",
":",
"return",
"getint",
"(",
"self",
".",
"tk",
".",
"call",
"(",
"self",
".",
"_w",
",",
"'nearest'",
",",
"y",
")",
")"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/lib-tk/Tkinter.py#L2581-L2584 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/py2/scipy/sparse/linalg/_onenormest.py | python | _onenormest_core | (A, AT, t, itmax) | return est, v, w, nmults, nresamples | Compute a lower bound of the 1-norm of a sparse matrix.
Parameters
----------
A : ndarray or other linear operator
A linear operator that can produce matrix products.
AT : ndarray or other linear operator
The transpose of A.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
itmax : int, optional
Use at most this many iterations.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
nmults : int, optional
The number of matrix products that were computed.
nresamples : int, optional
The number of times a parallel column was observed,
necessitating a re-randomization of the column.
Notes
-----
This is algorithm 2.4. | Compute a lower bound of the 1-norm of a sparse matrix. | [
"Compute",
"a",
"lower",
"bound",
"of",
"the",
"1",
"-",
"norm",
"of",
"a",
"sparse",
"matrix",
"."
] | def _onenormest_core(A, AT, t, itmax):
"""
Compute a lower bound of the 1-norm of a sparse matrix.
Parameters
----------
A : ndarray or other linear operator
A linear operator that can produce matrix products.
AT : ndarray or other linear operator
The transpose of A.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
itmax : int, optional
Use at most this many iterations.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
nmults : int, optional
The number of matrix products that were computed.
nresamples : int, optional
The number of times a parallel column was observed,
necessitating a re-randomization of the column.
Notes
-----
This is algorithm 2.4.
"""
# This function is a more or less direct translation
# of Algorithm 2.4 from the Higham and Tisseur (2000) paper.
A_linear_operator = aslinearoperator(A)
AT_linear_operator = aslinearoperator(AT)
if itmax < 2:
raise ValueError('at least two iterations are required')
if t < 1:
raise ValueError('at least one column is required')
n = A.shape[0]
if t >= n:
raise ValueError('t should be smaller than the order of A')
# Track the number of big*small matrix multiplications
# and the number of resamplings.
nmults = 0
nresamples = 0
# "We now explain our choice of starting matrix. We take the first
# column of X to be the vector of 1s [...] This has the advantage that
# for a matrix with nonnegative elements the algorithm converges
# with an exact estimate on the second iteration, and such matrices
# arise in applications [...]"
X = np.ones((n, t), dtype=float)
# "The remaining columns are chosen as rand{-1,1},
# with a check for and correction of parallel columns,
# exactly as for S in the body of the algorithm."
if t > 1:
for i in range(1, t):
# These are technically initial samples, not resamples,
# so the resampling count is not incremented.
resample_column(i, X)
for i in range(t):
while column_needs_resampling(i, X):
resample_column(i, X)
nresamples += 1
# "Choose starting matrix X with columns of unit 1-norm."
X /= float(n)
# "indices of used unit vectors e_j"
ind_hist = np.zeros(0, dtype=np.intp)
est_old = 0
S = np.zeros((n, t), dtype=float)
k = 1
ind = None
while True:
Y = np.asarray(A_linear_operator.matmat(X))
nmults += 1
mags = _sum_abs_axis0(Y)
est = np.max(mags)
best_j = np.argmax(mags)
if est > est_old or k == 2:
if k >= 2:
ind_best = ind[best_j]
w = Y[:, best_j]
# (1)
if k >= 2 and est <= est_old:
est = est_old
break
est_old = est
S_old = S
if k > itmax:
break
S = sign_round_up(Y)
del Y
# (2)
if every_col_of_X_is_parallel_to_a_col_of_Y(S, S_old):
break
if t > 1:
# "Ensure that no column of S is parallel to another column of S
# or to a column of S_old by replacing columns of S by rand{-1,1}."
for i in range(t):
while column_needs_resampling(i, S, S_old):
resample_column(i, S)
nresamples += 1
del S_old
# (3)
Z = np.asarray(AT_linear_operator.matmat(S))
nmults += 1
h = _max_abs_axis1(Z)
del Z
# (4)
if k >= 2 and max(h) == h[ind_best]:
break
# "Sort h so that h_first >= ... >= h_last
# and re-order ind correspondingly."
#
# Later on, we will need at most t+len(ind_hist) largest
# entries, so drop the rest
ind = np.argsort(h)[::-1][:t+len(ind_hist)].copy()
del h
if t > 1:
# (5)
# Break if the most promising t vectors have been visited already.
if np.in1d(ind[:t], ind_hist).all():
break
# Put the most promising unvisited vectors at the front of the list
# and put the visited vectors at the end of the list.
# Preserve the order of the indices induced by the ordering of h.
seen = np.in1d(ind, ind_hist)
ind = np.concatenate((ind[~seen], ind[seen]))
for j in range(t):
X[:, j] = elementary_vector(n, ind[j])
new_ind = ind[:t][~np.in1d(ind[:t], ind_hist)]
ind_hist = np.concatenate((ind_hist, new_ind))
k += 1
v = elementary_vector(n, ind_best)
return est, v, w, nmults, nresamples | [
"def",
"_onenormest_core",
"(",
"A",
",",
"AT",
",",
"t",
",",
"itmax",
")",
":",
"# This function is a more or less direct translation",
"# of Algorithm 2.4 from the Higham and Tisseur (2000) paper.",
"A_linear_operator",
"=",
"aslinearoperator",
"(",
"A",
")",
"AT_linear_operator",
"=",
"aslinearoperator",
"(",
"AT",
")",
"if",
"itmax",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"'at least two iterations are required'",
")",
"if",
"t",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"'at least one column is required'",
")",
"n",
"=",
"A",
".",
"shape",
"[",
"0",
"]",
"if",
"t",
">=",
"n",
":",
"raise",
"ValueError",
"(",
"'t should be smaller than the order of A'",
")",
"# Track the number of big*small matrix multiplications",
"# and the number of resamplings.",
"nmults",
"=",
"0",
"nresamples",
"=",
"0",
"# \"We now explain our choice of starting matrix. We take the first",
"# column of X to be the vector of 1s [...] This has the advantage that",
"# for a matrix with nonnegative elements the algorithm converges",
"# with an exact estimate on the second iteration, and such matrices",
"# arise in applications [...]\"",
"X",
"=",
"np",
".",
"ones",
"(",
"(",
"n",
",",
"t",
")",
",",
"dtype",
"=",
"float",
")",
"# \"The remaining columns are chosen as rand{-1,1},",
"# with a check for and correction of parallel columns,",
"# exactly as for S in the body of the algorithm.\"",
"if",
"t",
">",
"1",
":",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"t",
")",
":",
"# These are technically initial samples, not resamples,",
"# so the resampling count is not incremented.",
"resample_column",
"(",
"i",
",",
"X",
")",
"for",
"i",
"in",
"range",
"(",
"t",
")",
":",
"while",
"column_needs_resampling",
"(",
"i",
",",
"X",
")",
":",
"resample_column",
"(",
"i",
",",
"X",
")",
"nresamples",
"+=",
"1",
"# \"Choose starting matrix X with columns of unit 1-norm.\"",
"X",
"/=",
"float",
"(",
"n",
")",
"# \"indices of used unit vectors e_j\"",
"ind_hist",
"=",
"np",
".",
"zeros",
"(",
"0",
",",
"dtype",
"=",
"np",
".",
"intp",
")",
"est_old",
"=",
"0",
"S",
"=",
"np",
".",
"zeros",
"(",
"(",
"n",
",",
"t",
")",
",",
"dtype",
"=",
"float",
")",
"k",
"=",
"1",
"ind",
"=",
"None",
"while",
"True",
":",
"Y",
"=",
"np",
".",
"asarray",
"(",
"A_linear_operator",
".",
"matmat",
"(",
"X",
")",
")",
"nmults",
"+=",
"1",
"mags",
"=",
"_sum_abs_axis0",
"(",
"Y",
")",
"est",
"=",
"np",
".",
"max",
"(",
"mags",
")",
"best_j",
"=",
"np",
".",
"argmax",
"(",
"mags",
")",
"if",
"est",
">",
"est_old",
"or",
"k",
"==",
"2",
":",
"if",
"k",
">=",
"2",
":",
"ind_best",
"=",
"ind",
"[",
"best_j",
"]",
"w",
"=",
"Y",
"[",
":",
",",
"best_j",
"]",
"# (1)",
"if",
"k",
">=",
"2",
"and",
"est",
"<=",
"est_old",
":",
"est",
"=",
"est_old",
"break",
"est_old",
"=",
"est",
"S_old",
"=",
"S",
"if",
"k",
">",
"itmax",
":",
"break",
"S",
"=",
"sign_round_up",
"(",
"Y",
")",
"del",
"Y",
"# (2)",
"if",
"every_col_of_X_is_parallel_to_a_col_of_Y",
"(",
"S",
",",
"S_old",
")",
":",
"break",
"if",
"t",
">",
"1",
":",
"# \"Ensure that no column of S is parallel to another column of S",
"# or to a column of S_old by replacing columns of S by rand{-1,1}.\"",
"for",
"i",
"in",
"range",
"(",
"t",
")",
":",
"while",
"column_needs_resampling",
"(",
"i",
",",
"S",
",",
"S_old",
")",
":",
"resample_column",
"(",
"i",
",",
"S",
")",
"nresamples",
"+=",
"1",
"del",
"S_old",
"# (3)",
"Z",
"=",
"np",
".",
"asarray",
"(",
"AT_linear_operator",
".",
"matmat",
"(",
"S",
")",
")",
"nmults",
"+=",
"1",
"h",
"=",
"_max_abs_axis1",
"(",
"Z",
")",
"del",
"Z",
"# (4)",
"if",
"k",
">=",
"2",
"and",
"max",
"(",
"h",
")",
"==",
"h",
"[",
"ind_best",
"]",
":",
"break",
"# \"Sort h so that h_first >= ... >= h_last",
"# and re-order ind correspondingly.\"",
"#",
"# Later on, we will need at most t+len(ind_hist) largest",
"# entries, so drop the rest",
"ind",
"=",
"np",
".",
"argsort",
"(",
"h",
")",
"[",
":",
":",
"-",
"1",
"]",
"[",
":",
"t",
"+",
"len",
"(",
"ind_hist",
")",
"]",
".",
"copy",
"(",
")",
"del",
"h",
"if",
"t",
">",
"1",
":",
"# (5)",
"# Break if the most promising t vectors have been visited already.",
"if",
"np",
".",
"in1d",
"(",
"ind",
"[",
":",
"t",
"]",
",",
"ind_hist",
")",
".",
"all",
"(",
")",
":",
"break",
"# Put the most promising unvisited vectors at the front of the list",
"# and put the visited vectors at the end of the list.",
"# Preserve the order of the indices induced by the ordering of h.",
"seen",
"=",
"np",
".",
"in1d",
"(",
"ind",
",",
"ind_hist",
")",
"ind",
"=",
"np",
".",
"concatenate",
"(",
"(",
"ind",
"[",
"~",
"seen",
"]",
",",
"ind",
"[",
"seen",
"]",
")",
")",
"for",
"j",
"in",
"range",
"(",
"t",
")",
":",
"X",
"[",
":",
",",
"j",
"]",
"=",
"elementary_vector",
"(",
"n",
",",
"ind",
"[",
"j",
"]",
")",
"new_ind",
"=",
"ind",
"[",
":",
"t",
"]",
"[",
"~",
"np",
".",
"in1d",
"(",
"ind",
"[",
":",
"t",
"]",
",",
"ind_hist",
")",
"]",
"ind_hist",
"=",
"np",
".",
"concatenate",
"(",
"(",
"ind_hist",
",",
"new_ind",
")",
")",
"k",
"+=",
"1",
"v",
"=",
"elementary_vector",
"(",
"n",
",",
"ind_best",
")",
"return",
"est",
",",
"v",
",",
"w",
",",
"nmults",
",",
"nresamples"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py2/scipy/sparse/linalg/_onenormest.py#L325-L468 |
|
lammps/lammps | b75c3065430a75b1b5543a10e10f46d9b4c91913 | tools/i-pi/ipi/utils/io/io_xml.py | python | write_list | (data, delims="[]") | return rstr | Writes a formatted string from a list.
The format of the output is as for a standard python list,
[list[0], list[1],..., list[n]]. Note the space after the commas, and the
use of square brackets.
Args:
data: The value to be read in.
delims: An optional string of two characters giving the first and last
character to be printed. Defaults to "[]".
Returns:
A formatted string. | Writes a formatted string from a list. | [
"Writes",
"a",
"formatted",
"string",
"from",
"a",
"list",
"."
] | def write_list(data, delims="[]"):
"""Writes a formatted string from a list.
The format of the output is as for a standard python list,
[list[0], list[1],..., list[n]]. Note the space after the commas, and the
use of square brackets.
Args:
data: The value to be read in.
delims: An optional string of two characters giving the first and last
character to be printed. Defaults to "[]".
Returns:
A formatted string.
"""
rstr = delims[0]
for v in data:
rstr += str(v) + ", "
rstr = rstr.rstrip(", ")
rstr += delims[1]
return rstr | [
"def",
"write_list",
"(",
"data",
",",
"delims",
"=",
"\"[]\"",
")",
":",
"rstr",
"=",
"delims",
"[",
"0",
"]",
"for",
"v",
"in",
"data",
":",
"rstr",
"+=",
"str",
"(",
"v",
")",
"+",
"\", \"",
"rstr",
"=",
"rstr",
".",
"rstrip",
"(",
"\", \"",
")",
"rstr",
"+=",
"delims",
"[",
"1",
"]",
"return",
"rstr"
] | https://github.com/lammps/lammps/blob/b75c3065430a75b1b5543a10e10f46d9b4c91913/tools/i-pi/ipi/utils/io/io_xml.py#L424-L447 |
|
pytorch/pytorch | 7176c92687d3cc847cc046bf002269c6949a21c2 | torch/distributed/algorithms/_optimizer_overlap/optimizer_overlap.py | python | OverlappedOptimizer.register_fsdp | (self, fsdp: FullyShardedDataParallel) | Registers the overlapped optimizer with FSDP. | Registers the overlapped optimizer with FSDP. | [
"Registers",
"the",
"overlapped",
"optimizer",
"with",
"FSDP",
"."
] | def register_fsdp(self, fsdp: FullyShardedDataParallel) -> None:
"""Registers the overlapped optimizer with FSDP."""
raise NotImplementedError(
f"{self.__class__.__name__} does not support overlapped FSDP."
) | [
"def",
"register_fsdp",
"(",
"self",
",",
"fsdp",
":",
"FullyShardedDataParallel",
")",
"->",
"None",
":",
"raise",
"NotImplementedError",
"(",
"f\"{self.__class__.__name__} does not support overlapped FSDP.\"",
")"
] | https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/distributed/algorithms/_optimizer_overlap/optimizer_overlap.py#L48-L52 |
||
neopenx/Dragon | 0e639a7319035ddc81918bd3df059230436ee0a1 | Dragon/python/dragon/vm/caffe/coord_map.py | python | inverse | (coord_map) | return ax, 1 / a, -b / a | Invert a coord map by de-scaling and un-shifting;
this gives the backward mapping for the gradient. | Invert a coord map by de-scaling and un-shifting;
this gives the backward mapping for the gradient. | [
"Invert",
"a",
"coord",
"map",
"by",
"de",
"-",
"scaling",
"and",
"un",
"-",
"shifting",
";",
"this",
"gives",
"the",
"backward",
"mapping",
"for",
"the",
"gradient",
"."
] | def inverse(coord_map):
"""
Invert a coord map by de-scaling and un-shifting;
this gives the backward mapping for the gradient.
"""
ax, a, b = coord_map
return ax, 1 / a, -b / a | [
"def",
"inverse",
"(",
"coord_map",
")",
":",
"ax",
",",
"a",
",",
"b",
"=",
"coord_map",
"return",
"ax",
",",
"1",
"/",
"a",
",",
"-",
"b",
"/",
"a"
] | https://github.com/neopenx/Dragon/blob/0e639a7319035ddc81918bd3df059230436ee0a1/Dragon/python/dragon/vm/caffe/coord_map.py#L106-L112 |
|
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | tools/telemetry/third_party/pyserial/serial/urlhandler/protocol_socket.py | python | SocketSerial.inWaiting | (self) | return 0 | Return the number of characters currently in the input buffer. | Return the number of characters currently in the input buffer. | [
"Return",
"the",
"number",
"of",
"characters",
"currently",
"in",
"the",
"input",
"buffer",
"."
] | def inWaiting(self):
"""Return the number of characters currently in the input buffer."""
if not self._isOpen: raise portNotOpenError
if self.logger:
# set this one to debug as the function could be called often...
self.logger.debug('WARNING: inWaiting returns dummy value')
return 0 | [
"def",
"inWaiting",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_isOpen",
":",
"raise",
"portNotOpenError",
"if",
"self",
".",
"logger",
":",
"# set this one to debug as the function could be called often...",
"self",
".",
"logger",
".",
"debug",
"(",
"'WARNING: inWaiting returns dummy value'",
")",
"return",
"0"
] | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/telemetry/third_party/pyserial/serial/urlhandler/protocol_socket.py#L126-L132 |
|
apple/turicreate | cce55aa5311300e3ce6af93cb45ba791fd1bdf49 | src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py | python | MultipleTimesGroup.AddMethod | (self, mock_method) | Add a method to this group.
Args:
mock_method: A mock method to be added to this group. | Add a method to this group. | [
"Add",
"a",
"method",
"to",
"this",
"group",
"."
] | def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.add(mock_method) | [
"def",
"AddMethod",
"(",
"self",
",",
"mock_method",
")",
":",
"self",
".",
"_methods",
".",
"add",
"(",
"mock_method",
")"
] | https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L1276-L1283 |
||
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | scripts/SANS/isis_reduction_steps.py | python | StripEndNans._isInf | (self, val) | return math.isinf(val) | Check if the value is inf or not
@param val: float to check
@returns true if value is inf | Check if the value is inf or not | [
"Check",
"if",
"the",
"value",
"is",
"inf",
"or",
"not"
] | def _isInf(self, val):
'''
Check if the value is inf or not
@param val: float to check
@returns true if value is inf
'''
return math.isinf(val) | [
"def",
"_isInf",
"(",
"self",
",",
"val",
")",
":",
"return",
"math",
".",
"isinf",
"(",
"val",
")"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/scripts/SANS/isis_reduction_steps.py#L4114-L4120 |
|
apache/singa | 93fd9da72694e68bfe3fb29d0183a65263d238a1 | setup.py | python | AuditCommand.status | (s) | Prints things in bold. | Prints things in bold. | [
"Prints",
"things",
"in",
"bold",
"."
] | def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s)) | [
"def",
"status",
"(",
"s",
")",
":",
"print",
"(",
"'\\033[1m{0}\\033[0m'",
".",
"format",
"(",
"s",
")",
")"
] | https://github.com/apache/singa/blob/93fd9da72694e68bfe3fb29d0183a65263d238a1/setup.py#L104-L106 |
||
NVIDIA/TensorRT | 42805f078052daad1a98bc5965974fcffaad0960 | samples/python/efficientdet/onnx_utils.py | python | find_descendant_by_op | (self, node, op, depth=10) | return None | Starting from the given node, finds a node lower in the graph matching the given operation name. This is not an
exhaustive graph search, it will take only the first output of each node traversed while searching depth-first.
:param self: The gs.Graph object being extended.
:param node: The node to start searching from.
:param op: The operation name to search for.
:param depth: Stop searching after traversing these many nodes.
:return: The first descendant node matching that performs that op. | Starting from the given node, finds a node lower in the graph matching the given operation name. This is not an
exhaustive graph search, it will take only the first output of each node traversed while searching depth-first.
:param self: The gs.Graph object being extended.
:param node: The node to start searching from.
:param op: The operation name to search for.
:param depth: Stop searching after traversing these many nodes.
:return: The first descendant node matching that performs that op. | [
"Starting",
"from",
"the",
"given",
"node",
"finds",
"a",
"node",
"lower",
"in",
"the",
"graph",
"matching",
"the",
"given",
"operation",
"name",
".",
"This",
"is",
"not",
"an",
"exhaustive",
"graph",
"search",
"it",
"will",
"take",
"only",
"the",
"first",
"output",
"of",
"each",
"node",
"traversed",
"while",
"searching",
"depth",
"-",
"first",
".",
":",
"param",
"self",
":",
"The",
"gs",
".",
"Graph",
"object",
"being",
"extended",
".",
":",
"param",
"node",
":",
"The",
"node",
"to",
"start",
"searching",
"from",
".",
":",
"param",
"op",
":",
"The",
"operation",
"name",
"to",
"search",
"for",
".",
":",
"param",
"depth",
":",
"Stop",
"searching",
"after",
"traversing",
"these",
"many",
"nodes",
".",
":",
"return",
":",
"The",
"first",
"descendant",
"node",
"matching",
"that",
"performs",
"that",
"op",
"."
] | def find_descendant_by_op(self, node, op, depth=10):
"""
Starting from the given node, finds a node lower in the graph matching the given operation name. This is not an
exhaustive graph search, it will take only the first output of each node traversed while searching depth-first.
:param self: The gs.Graph object being extended.
:param node: The node to start searching from.
:param op: The operation name to search for.
:param depth: Stop searching after traversing these many nodes.
:return: The first descendant node matching that performs that op.
"""
for i in range(depth):
node = node.o()
if node.op == op:
return node
return None | [
"def",
"find_descendant_by_op",
"(",
"self",
",",
"node",
",",
"op",
",",
"depth",
"=",
"10",
")",
":",
"for",
"i",
"in",
"range",
"(",
"depth",
")",
":",
"node",
"=",
"node",
".",
"o",
"(",
")",
"if",
"node",
".",
"op",
"==",
"op",
":",
"return",
"node",
"return",
"None"
] | https://github.com/NVIDIA/TensorRT/blob/42805f078052daad1a98bc5965974fcffaad0960/samples/python/efficientdet/onnx_utils.py#L111-L125 |
|
irods/irods | ed6328646cee87182098d569919004049bf4ce21 | scripts/irods/pyparsing.py | python | ParseResults.itervalues | ( self ) | return (self[k] for k in self.iterkeys()) | Returns all named result values. | Returns all named result values. | [
"Returns",
"all",
"named",
"result",
"values",
"."
] | def itervalues( self ):
"""Returns all named result values."""
return (self[k] for k in self.iterkeys()) | [
"def",
"itervalues",
"(",
"self",
")",
":",
"return",
"(",
"self",
"[",
"k",
"]",
"for",
"k",
"in",
"self",
".",
"iterkeys",
"(",
")",
")"
] | https://github.com/irods/irods/blob/ed6328646cee87182098d569919004049bf4ce21/scripts/irods/pyparsing.py#L381-L383 |
|
google-ar/WebARonTango | e86965d2cbc652156b480e0fcf77c716745578cd | chromium/src/gpu/command_buffer/build_gles2_cmd_buffer.py | python | STRnHandler.WriteGLES2Implementation | (self, func, f) | Overrriden from TypeHandler. | Overrriden from TypeHandler. | [
"Overrriden",
"from",
"TypeHandler",
"."
] | def WriteGLES2Implementation(self, func, f):
"""Overrriden from TypeHandler."""
code_1 = """%(return_type)s GLES2Implementation::%(func_name)s(%(args)s) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
"""
code_2 = """ GPU_CLIENT_LOG("[" << GetLogPrefix()
<< "] gl%(func_name)s" << "("
<< %(arg0)s << ", "
<< %(arg1)s << ", "
<< static_cast<void*>(%(arg2)s) << ", "
<< static_cast<void*>(%(arg3)s) << ")");
helper_->SetBucketSize(kResultBucketId, 0);
helper_->%(func_name)s(%(id_name)s, kResultBucketId);
std::string str;
GLsizei max_size = 0;
if (GetBucketAsString(kResultBucketId, &str)) {
if (bufsize > 0) {
max_size =
std::min(static_cast<size_t>(%(bufsize_name)s) - 1, str.size());
memcpy(%(dest_name)s, str.c_str(), max_size);
%(dest_name)s[max_size] = '\\0';
GPU_CLIENT_LOG("------\\n" << %(dest_name)s << "\\n------");
}
}
if (%(length_name)s != NULL) {
*%(length_name)s = max_size;
}
CheckGLError();
}
"""
args = func.GetOriginalArgs()
str_args = {
'return_type': func.return_type,
'func_name': func.original_name,
'args': func.MakeTypedOriginalArgString(""),
'id_name': args[0].name,
'bufsize_name': args[1].name,
'length_name': args[2].name,
'dest_name': args[3].name,
'arg0': args[0].name,
'arg1': args[1].name,
'arg2': args[2].name,
'arg3': args[3].name,
}
f.write(code_1 % str_args)
func.WriteDestinationInitalizationValidation(f)
f.write(code_2 % str_args) | [
"def",
"WriteGLES2Implementation",
"(",
"self",
",",
"func",
",",
"f",
")",
":",
"code_1",
"=",
"\"\"\"%(return_type)s GLES2Implementation::%(func_name)s(%(args)s) {\n GPU_CLIENT_SINGLE_THREAD_CHECK();\n\"\"\"",
"code_2",
"=",
"\"\"\" GPU_CLIENT_LOG(\"[\" << GetLogPrefix()\n << \"] gl%(func_name)s\" << \"(\"\n << %(arg0)s << \", \"\n << %(arg1)s << \", \"\n << static_cast<void*>(%(arg2)s) << \", \"\n << static_cast<void*>(%(arg3)s) << \")\");\n helper_->SetBucketSize(kResultBucketId, 0);\n helper_->%(func_name)s(%(id_name)s, kResultBucketId);\n std::string str;\n GLsizei max_size = 0;\n if (GetBucketAsString(kResultBucketId, &str)) {\n if (bufsize > 0) {\n max_size =\n std::min(static_cast<size_t>(%(bufsize_name)s) - 1, str.size());\n memcpy(%(dest_name)s, str.c_str(), max_size);\n %(dest_name)s[max_size] = '\\\\0';\n GPU_CLIENT_LOG(\"------\\\\n\" << %(dest_name)s << \"\\\\n------\");\n }\n }\n if (%(length_name)s != NULL) {\n *%(length_name)s = max_size;\n }\n CheckGLError();\n}\n\"\"\"",
"args",
"=",
"func",
".",
"GetOriginalArgs",
"(",
")",
"str_args",
"=",
"{",
"'return_type'",
":",
"func",
".",
"return_type",
",",
"'func_name'",
":",
"func",
".",
"original_name",
",",
"'args'",
":",
"func",
".",
"MakeTypedOriginalArgString",
"(",
"\"\"",
")",
",",
"'id_name'",
":",
"args",
"[",
"0",
"]",
".",
"name",
",",
"'bufsize_name'",
":",
"args",
"[",
"1",
"]",
".",
"name",
",",
"'length_name'",
":",
"args",
"[",
"2",
"]",
".",
"name",
",",
"'dest_name'",
":",
"args",
"[",
"3",
"]",
".",
"name",
",",
"'arg0'",
":",
"args",
"[",
"0",
"]",
".",
"name",
",",
"'arg1'",
":",
"args",
"[",
"1",
"]",
".",
"name",
",",
"'arg2'",
":",
"args",
"[",
"2",
"]",
".",
"name",
",",
"'arg3'",
":",
"args",
"[",
"3",
"]",
".",
"name",
",",
"}",
"f",
".",
"write",
"(",
"code_1",
"%",
"str_args",
")",
"func",
".",
"WriteDestinationInitalizationValidation",
"(",
"f",
")",
"f",
".",
"write",
"(",
"code_2",
"%",
"str_args",
")"
] | https://github.com/google-ar/WebARonTango/blob/e86965d2cbc652156b480e0fcf77c716745578cd/chromium/src/gpu/command_buffer/build_gles2_cmd_buffer.py#L8296-L8342 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python/src/Lib/plat-mac/lib-scriptpackages/StdSuites/Standard_Suite.py | python | Standard_Suite_Events.duplicate | (self, _object, _attributes={}, **_arguments) | duplicate: Duplicate one or more objects
Required argument: the object(s) to duplicate
Keyword argument to: the new location for the object(s)
Keyword argument with_properties: the initial values for properties of the new object that are to be different from the original
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: to the duplicated object(s) | duplicate: Duplicate one or more objects
Required argument: the object(s) to duplicate
Keyword argument to: the new location for the object(s)
Keyword argument with_properties: the initial values for properties of the new object that are to be different from the original
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: to the duplicated object(s) | [
"duplicate",
":",
"Duplicate",
"one",
"or",
"more",
"objects",
"Required",
"argument",
":",
"the",
"object",
"(",
"s",
")",
"to",
"duplicate",
"Keyword",
"argument",
"to",
":",
"the",
"new",
"location",
"for",
"the",
"object",
"(",
"s",
")",
"Keyword",
"argument",
"with_properties",
":",
"the",
"initial",
"values",
"for",
"properties",
"of",
"the",
"new",
"object",
"that",
"are",
"to",
"be",
"different",
"from",
"the",
"original",
"Keyword",
"argument",
"_attributes",
":",
"AppleEvent",
"attribute",
"dictionary",
"Returns",
":",
"to",
"the",
"duplicated",
"object",
"(",
"s",
")"
] | def duplicate(self, _object, _attributes={}, **_arguments):
"""duplicate: Duplicate one or more objects
Required argument: the object(s) to duplicate
Keyword argument to: the new location for the object(s)
Keyword argument with_properties: the initial values for properties of the new object that are to be different from the original
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: to the duplicated object(s)
"""
_code = 'core'
_subcode = 'clon'
aetools.keysubst(_arguments, self._argmap_duplicate)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----'] | [
"def",
"duplicate",
"(",
"self",
",",
"_object",
",",
"_attributes",
"=",
"{",
"}",
",",
"*",
"*",
"_arguments",
")",
":",
"_code",
"=",
"'core'",
"_subcode",
"=",
"'clon'",
"aetools",
".",
"keysubst",
"(",
"_arguments",
",",
"self",
".",
"_argmap_duplicate",
")",
"_arguments",
"[",
"'----'",
"]",
"=",
"_object",
"_reply",
",",
"_arguments",
",",
"_attributes",
"=",
"self",
".",
"send",
"(",
"_code",
",",
"_subcode",
",",
"_arguments",
",",
"_attributes",
")",
"if",
"_arguments",
".",
"get",
"(",
"'errn'",
",",
"0",
")",
":",
"raise",
"aetools",
".",
"Error",
",",
"aetools",
".",
"decodeerror",
"(",
"_arguments",
")",
"# XXXX Optionally decode result",
"if",
"_arguments",
".",
"has_key",
"(",
"'----'",
")",
":",
"return",
"_arguments",
"[",
"'----'",
"]"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/plat-mac/lib-scriptpackages/StdSuites/Standard_Suite.py#L147-L168 |
||
snap-stanford/snap-python | d53c51b0a26aa7e3e7400b014cdf728948fde80a | setup/snap.py | python | TStr.GetTrunc | (self) | return _snap.TStr_GetTrunc(self) | GetTrunc(TStr self) -> TStr
Parameters:
self: TStr const * | GetTrunc(TStr self) -> TStr | [
"GetTrunc",
"(",
"TStr",
"self",
")",
"-",
">",
"TStr"
] | def GetTrunc(self):
"""
GetTrunc(TStr self) -> TStr
Parameters:
self: TStr const *
"""
return _snap.TStr_GetTrunc(self) | [
"def",
"GetTrunc",
"(",
"self",
")",
":",
"return",
"_snap",
".",
"TStr_GetTrunc",
"(",
"self",
")"
] | https://github.com/snap-stanford/snap-python/blob/d53c51b0a26aa7e3e7400b014cdf728948fde80a/setup/snap.py#L9813-L9821 |
|
albertz/openlierox | d316c14a8eb57848ef56e9bfa7b23a56f694a51b | tools/DedicatedServerVideo/gdata/service.py | python | GDataService._SetAuthSubToken | (self, auth_token, scopes=None) | Deprecated, use SetAuthSubToken instead. | Deprecated, use SetAuthSubToken instead. | [
"Deprecated",
"use",
"SetAuthSubToken",
"instead",
"."
] | def _SetAuthSubToken(self, auth_token, scopes=None):
"""Deprecated, use SetAuthSubToken instead."""
self.SetAuthSubToken(auth_token, scopes=scopes) | [
"def",
"_SetAuthSubToken",
"(",
"self",
",",
"auth_token",
",",
"scopes",
"=",
"None",
")",
":",
"self",
".",
"SetAuthSubToken",
"(",
"auth_token",
",",
"scopes",
"=",
"scopes",
")"
] | https://github.com/albertz/openlierox/blob/d316c14a8eb57848ef56e9bfa7b23a56f694a51b/tools/DedicatedServerVideo/gdata/service.py#L303-L305 |
||
BlzFans/wke | b0fa21158312e40c5fbd84682d643022b6c34a93 | cygwin/lib/python2.6/difflib.py | python | SequenceMatcher.get_grouped_opcodes | (self, n=3) | Isolate change clusters by eliminating ranges with no changes.
Return a generator of groups with upto n lines of context.
Each group is in the same format as returned by get_opcodes().
>>> from pprint import pprint
>>> a = map(str, range(1,40))
>>> b = a[:]
>>> b[8:8] = ['i'] # Make an insertion
>>> b[20] += 'x' # Make a replacement
>>> b[23:28] = [] # Make a deletion
>>> b[30] += 'y' # Make another replacement
>>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
[[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
[('equal', 16, 19, 17, 20),
('replace', 19, 20, 20, 21),
('equal', 20, 22, 21, 23),
('delete', 22, 27, 23, 23),
('equal', 27, 30, 23, 26)],
[('equal', 31, 34, 27, 30),
('replace', 34, 35, 30, 31),
('equal', 35, 38, 31, 34)]] | Isolate change clusters by eliminating ranges with no changes. | [
"Isolate",
"change",
"clusters",
"by",
"eliminating",
"ranges",
"with",
"no",
"changes",
"."
] | def get_grouped_opcodes(self, n=3):
""" Isolate change clusters by eliminating ranges with no changes.
Return a generator of groups with upto n lines of context.
Each group is in the same format as returned by get_opcodes().
>>> from pprint import pprint
>>> a = map(str, range(1,40))
>>> b = a[:]
>>> b[8:8] = ['i'] # Make an insertion
>>> b[20] += 'x' # Make a replacement
>>> b[23:28] = [] # Make a deletion
>>> b[30] += 'y' # Make another replacement
>>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
[[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
[('equal', 16, 19, 17, 20),
('replace', 19, 20, 20, 21),
('equal', 20, 22, 21, 23),
('delete', 22, 27, 23, 23),
('equal', 27, 30, 23, 26)],
[('equal', 31, 34, 27, 30),
('replace', 34, 35, 30, 31),
('equal', 35, 38, 31, 34)]]
"""
codes = self.get_opcodes()
if not codes:
codes = [("equal", 0, 1, 0, 1)]
# Fixup leading and trailing groups if they show no changes.
if codes[0][0] == 'equal':
tag, i1, i2, j1, j2 = codes[0]
codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2
if codes[-1][0] == 'equal':
tag, i1, i2, j1, j2 = codes[-1]
codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n)
nn = n + n
group = []
for tag, i1, i2, j1, j2 in codes:
# End the current group and start a new one whenever
# there is a large range with no changes.
if tag == 'equal' and i2-i1 > nn:
group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n)))
yield group
group = []
i1, j1 = max(i1, i2-n), max(j1, j2-n)
group.append((tag, i1, i2, j1 ,j2))
if group and not (len(group)==1 and group[0][0] == 'equal'):
yield group | [
"def",
"get_grouped_opcodes",
"(",
"self",
",",
"n",
"=",
"3",
")",
":",
"codes",
"=",
"self",
".",
"get_opcodes",
"(",
")",
"if",
"not",
"codes",
":",
"codes",
"=",
"[",
"(",
"\"equal\"",
",",
"0",
",",
"1",
",",
"0",
",",
"1",
")",
"]",
"# Fixup leading and trailing groups if they show no changes.",
"if",
"codes",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"'equal'",
":",
"tag",
",",
"i1",
",",
"i2",
",",
"j1",
",",
"j2",
"=",
"codes",
"[",
"0",
"]",
"codes",
"[",
"0",
"]",
"=",
"tag",
",",
"max",
"(",
"i1",
",",
"i2",
"-",
"n",
")",
",",
"i2",
",",
"max",
"(",
"j1",
",",
"j2",
"-",
"n",
")",
",",
"j2",
"if",
"codes",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"==",
"'equal'",
":",
"tag",
",",
"i1",
",",
"i2",
",",
"j1",
",",
"j2",
"=",
"codes",
"[",
"-",
"1",
"]",
"codes",
"[",
"-",
"1",
"]",
"=",
"tag",
",",
"i1",
",",
"min",
"(",
"i2",
",",
"i1",
"+",
"n",
")",
",",
"j1",
",",
"min",
"(",
"j2",
",",
"j1",
"+",
"n",
")",
"nn",
"=",
"n",
"+",
"n",
"group",
"=",
"[",
"]",
"for",
"tag",
",",
"i1",
",",
"i2",
",",
"j1",
",",
"j2",
"in",
"codes",
":",
"# End the current group and start a new one whenever",
"# there is a large range with no changes.",
"if",
"tag",
"==",
"'equal'",
"and",
"i2",
"-",
"i1",
">",
"nn",
":",
"group",
".",
"append",
"(",
"(",
"tag",
",",
"i1",
",",
"min",
"(",
"i2",
",",
"i1",
"+",
"n",
")",
",",
"j1",
",",
"min",
"(",
"j2",
",",
"j1",
"+",
"n",
")",
")",
")",
"yield",
"group",
"group",
"=",
"[",
"]",
"i1",
",",
"j1",
"=",
"max",
"(",
"i1",
",",
"i2",
"-",
"n",
")",
",",
"max",
"(",
"j1",
",",
"j2",
"-",
"n",
")",
"group",
".",
"append",
"(",
"(",
"tag",
",",
"i1",
",",
"i2",
",",
"j1",
",",
"j2",
")",
")",
"if",
"group",
"and",
"not",
"(",
"len",
"(",
"group",
")",
"==",
"1",
"and",
"group",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"'equal'",
")",
":",
"yield",
"group"
] | https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/difflib.py#L587-L635 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/idlelib/rpc.py | python | SocketIO.pollresponse | (self, myseq, wait) | Handle messages received on the socket.
Some messages received may be asynchronous 'call' or 'queue' requests,
and some may be responses for other threads.
'call' requests are passed to self.localcall() with the expectation of
immediate execution, during which time the socket is not serviced.
'queue' requests are used for tasks (which may block or hang) to be
processed in a different thread. These requests are fed into
request_queue by self.localcall(). Responses to queued requests are
taken from response_queue and sent across the link with the associated
sequence numbers. Messages in the queues are (sequence_number,
request/response) tuples and code using this module removing messages
from the request_queue is responsible for returning the correct
sequence number in the response_queue.
pollresponse() will loop until a response message with the myseq
sequence number is received, and will save other responses in
self.responses and notify the owning thread. | Handle messages received on the socket. | [
"Handle",
"messages",
"received",
"on",
"the",
"socket",
"."
] | def pollresponse(self, myseq, wait):
"""Handle messages received on the socket.
Some messages received may be asynchronous 'call' or 'queue' requests,
and some may be responses for other threads.
'call' requests are passed to self.localcall() with the expectation of
immediate execution, during which time the socket is not serviced.
'queue' requests are used for tasks (which may block or hang) to be
processed in a different thread. These requests are fed into
request_queue by self.localcall(). Responses to queued requests are
taken from response_queue and sent across the link with the associated
sequence numbers. Messages in the queues are (sequence_number,
request/response) tuples and code using this module removing messages
from the request_queue is responsible for returning the correct
sequence number in the response_queue.
pollresponse() will loop until a response message with the myseq
sequence number is received, and will save other responses in
self.responses and notify the owning thread.
"""
while 1:
# send queued response if there is one available
try:
qmsg = response_queue.get(0)
except queue.Empty:
pass
else:
seq, response = qmsg
message = (seq, ('OK', response))
self.putmessage(message)
# poll for message on link
try:
message = self.pollmessage(wait)
if message is None: # socket not ready
return None
except EOFError:
self.handle_EOF()
return None
except AttributeError:
return None
seq, resq = message
how = resq[0]
self.debug("pollresponse:%d:myseq:%s" % (seq, myseq))
# process or queue a request
if how in ("CALL", "QUEUE"):
self.debug("pollresponse:%d:localcall:call:" % seq)
response = self.localcall(seq, resq)
self.debug("pollresponse:%d:localcall:response:%s"
% (seq, response))
if how == "CALL":
self.putmessage((seq, response))
elif how == "QUEUE":
# don't acknowledge the 'queue' request!
pass
continue
# return if completed message transaction
elif seq == myseq:
return resq
# must be a response for a different thread:
else:
cv = self.cvars.get(seq, None)
# response involving unknown sequence number is discarded,
# probably intended for prior incarnation of server
if cv is not None:
cv.acquire()
self.responses[seq] = resq
cv.notify()
cv.release()
continue | [
"def",
"pollresponse",
"(",
"self",
",",
"myseq",
",",
"wait",
")",
":",
"while",
"1",
":",
"# send queued response if there is one available",
"try",
":",
"qmsg",
"=",
"response_queue",
".",
"get",
"(",
"0",
")",
"except",
"queue",
".",
"Empty",
":",
"pass",
"else",
":",
"seq",
",",
"response",
"=",
"qmsg",
"message",
"=",
"(",
"seq",
",",
"(",
"'OK'",
",",
"response",
")",
")",
"self",
".",
"putmessage",
"(",
"message",
")",
"# poll for message on link",
"try",
":",
"message",
"=",
"self",
".",
"pollmessage",
"(",
"wait",
")",
"if",
"message",
"is",
"None",
":",
"# socket not ready",
"return",
"None",
"except",
"EOFError",
":",
"self",
".",
"handle_EOF",
"(",
")",
"return",
"None",
"except",
"AttributeError",
":",
"return",
"None",
"seq",
",",
"resq",
"=",
"message",
"how",
"=",
"resq",
"[",
"0",
"]",
"self",
".",
"debug",
"(",
"\"pollresponse:%d:myseq:%s\"",
"%",
"(",
"seq",
",",
"myseq",
")",
")",
"# process or queue a request",
"if",
"how",
"in",
"(",
"\"CALL\"",
",",
"\"QUEUE\"",
")",
":",
"self",
".",
"debug",
"(",
"\"pollresponse:%d:localcall:call:\"",
"%",
"seq",
")",
"response",
"=",
"self",
".",
"localcall",
"(",
"seq",
",",
"resq",
")",
"self",
".",
"debug",
"(",
"\"pollresponse:%d:localcall:response:%s\"",
"%",
"(",
"seq",
",",
"response",
")",
")",
"if",
"how",
"==",
"\"CALL\"",
":",
"self",
".",
"putmessage",
"(",
"(",
"seq",
",",
"response",
")",
")",
"elif",
"how",
"==",
"\"QUEUE\"",
":",
"# don't acknowledge the 'queue' request!",
"pass",
"continue",
"# return if completed message transaction",
"elif",
"seq",
"==",
"myseq",
":",
"return",
"resq",
"# must be a response for a different thread:",
"else",
":",
"cv",
"=",
"self",
".",
"cvars",
".",
"get",
"(",
"seq",
",",
"None",
")",
"# response involving unknown sequence number is discarded,",
"# probably intended for prior incarnation of server",
"if",
"cv",
"is",
"not",
"None",
":",
"cv",
".",
"acquire",
"(",
")",
"self",
".",
"responses",
"[",
"seq",
"]",
"=",
"resq",
"cv",
".",
"notify",
"(",
")",
"cv",
".",
"release",
"(",
")",
"continue"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/idlelib/rpc.py#L398-L469 |
||
qboticslabs/mastering_ros | d83e78f30acc45b0f18522c1d5fae3a7f52974b9 | chapter_9_codes/chefbot/chefbot/chefbot_bringup/scripts/bkup_working/GoalsSequencer.py | python | SimpleGoalsFileParser._ExtractValue | (self, variableName, linePart) | return float(nameValueParts[1].strip()) | Takes as input text like this:
x: 0.73444
Checks that the specified variableName matches the name of the variable in the string.
then extracts the float value of the '=' sign | Takes as input text like this:
x: 0.73444
Checks that the specified variableName matches the name of the variable in the string.
then extracts the float value of the '=' sign | [
"Takes",
"as",
"input",
"text",
"like",
"this",
":",
"x",
":",
"0",
".",
"73444",
"Checks",
"that",
"the",
"specified",
"variableName",
"matches",
"the",
"name",
"of",
"the",
"variable",
"in",
"the",
"string",
".",
"then",
"extracts",
"the",
"float",
"value",
"of",
"the",
"=",
"sign"
] | def _ExtractValue(self, variableName, linePart):
'''
Takes as input text like this:
x: 0.73444
Checks that the specified variableName matches the name of the variable in the string.
then extracts the float value of the '=' sign
'''
nameValueParts = linePart.split(':')
if nameValueParts[0].strip() != variableName:
raise NameError('Expected variable name ' + variableName + ' but found ' + nameValueParts[0].strip())
return float(nameValueParts[1].strip()) | [
"def",
"_ExtractValue",
"(",
"self",
",",
"variableName",
",",
"linePart",
")",
":",
"nameValueParts",
"=",
"linePart",
".",
"split",
"(",
"':'",
")",
"if",
"nameValueParts",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"!=",
"variableName",
":",
"raise",
"NameError",
"(",
"'Expected variable name '",
"+",
"variableName",
"+",
"' but found '",
"+",
"nameValueParts",
"[",
"0",
"]",
".",
"strip",
"(",
")",
")",
"return",
"float",
"(",
"nameValueParts",
"[",
"1",
"]",
".",
"strip",
"(",
")",
")"
] | https://github.com/qboticslabs/mastering_ros/blob/d83e78f30acc45b0f18522c1d5fae3a7f52974b9/chapter_9_codes/chefbot/chefbot/chefbot_bringup/scripts/bkup_working/GoalsSequencer.py#L324-L338 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/pandas/py3/pandas/core/groupby/groupby.py | python | BaseGroupBy.__iter__ | (self) | return self.grouper.get_iterator(self.obj, axis=self.axis) | Groupby iterator.
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group | Groupby iterator. | [
"Groupby",
"iterator",
"."
] | def __iter__(self) -> Iterator[tuple[Hashable, FrameOrSeries]]:
"""
Groupby iterator.
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis) | [
"def",
"__iter__",
"(",
"self",
")",
"->",
"Iterator",
"[",
"tuple",
"[",
"Hashable",
",",
"FrameOrSeries",
"]",
"]",
":",
"return",
"self",
".",
"grouper",
".",
"get_iterator",
"(",
"self",
".",
"obj",
",",
"axis",
"=",
"self",
".",
"axis",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py3/pandas/core/groupby/groupby.py#L759-L768 |
|
epiqc/ScaffCC | 66a79944ee4cd116b27bc1a69137276885461db8 | clang/tools/scan-build-py/libscanbuild/analyze.py | python | create_global_ctu_extdef_map | (extdef_map_lines) | return mangled_ast_pairs | Takes iterator of individual external definition maps and creates a
global map keeping only unique names. We leave conflicting names out of
CTU.
:param extdef_map_lines: Contains the id of a definition (mangled name) and
the originating source (the corresponding AST file) name.
:type extdef_map_lines: Iterator of str.
:returns: Mangled name - AST file pairs.
:rtype: List of (str, str) tuples. | Takes iterator of individual external definition maps and creates a
global map keeping only unique names. We leave conflicting names out of
CTU. | [
"Takes",
"iterator",
"of",
"individual",
"external",
"definition",
"maps",
"and",
"creates",
"a",
"global",
"map",
"keeping",
"only",
"unique",
"names",
".",
"We",
"leave",
"conflicting",
"names",
"out",
"of",
"CTU",
"."
] | def create_global_ctu_extdef_map(extdef_map_lines):
""" Takes iterator of individual external definition maps and creates a
global map keeping only unique names. We leave conflicting names out of
CTU.
:param extdef_map_lines: Contains the id of a definition (mangled name) and
the originating source (the corresponding AST file) name.
:type extdef_map_lines: Iterator of str.
:returns: Mangled name - AST file pairs.
:rtype: List of (str, str) tuples.
"""
mangled_to_asts = defaultdict(set)
for line in extdef_map_lines:
mangled_name, ast_file = line.strip().split(' ', 1)
mangled_to_asts[mangled_name].add(ast_file)
mangled_ast_pairs = []
for mangled_name, ast_files in mangled_to_asts.items():
if len(ast_files) == 1:
mangled_ast_pairs.append((mangled_name, next(iter(ast_files))))
return mangled_ast_pairs | [
"def",
"create_global_ctu_extdef_map",
"(",
"extdef_map_lines",
")",
":",
"mangled_to_asts",
"=",
"defaultdict",
"(",
"set",
")",
"for",
"line",
"in",
"extdef_map_lines",
":",
"mangled_name",
",",
"ast_file",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"' '",
",",
"1",
")",
"mangled_to_asts",
"[",
"mangled_name",
"]",
".",
"add",
"(",
"ast_file",
")",
"mangled_ast_pairs",
"=",
"[",
"]",
"for",
"mangled_name",
",",
"ast_files",
"in",
"mangled_to_asts",
".",
"items",
"(",
")",
":",
"if",
"len",
"(",
"ast_files",
")",
"==",
"1",
":",
"mangled_ast_pairs",
".",
"append",
"(",
"(",
"mangled_name",
",",
"next",
"(",
"iter",
"(",
"ast_files",
")",
")",
")",
")",
"return",
"mangled_ast_pairs"
] | https://github.com/epiqc/ScaffCC/blob/66a79944ee4cd116b27bc1a69137276885461db8/clang/tools/scan-build-py/libscanbuild/analyze.py#L136-L160 |
|
root-project/root | fcd3583bb14852bf2e8cd2415717cbaac0e75896 | interpreter/llvm/src/tools/clang/utils/check_cfc/obj_diff.py | python | compare_exact | (objfilea, objfileb) | return filecmp.cmp(objfilea, objfileb) | Byte for byte comparison between object files.
Returns True if equal, False otherwise. | Byte for byte comparison between object files.
Returns True if equal, False otherwise. | [
"Byte",
"for",
"byte",
"comparison",
"between",
"object",
"files",
".",
"Returns",
"True",
"if",
"equal",
"False",
"otherwise",
"."
] | def compare_exact(objfilea, objfileb):
"""Byte for byte comparison between object files.
Returns True if equal, False otherwise.
"""
return filecmp.cmp(objfilea, objfileb) | [
"def",
"compare_exact",
"(",
"objfilea",
",",
"objfileb",
")",
":",
"return",
"filecmp",
".",
"cmp",
"(",
"objfilea",
",",
"objfileb",
")"
] | https://github.com/root-project/root/blob/fcd3583bb14852bf2e8cd2415717cbaac0e75896/interpreter/llvm/src/tools/clang/utils/check_cfc/obj_diff.py#L86-L90 |
|
hfinkel/llvm-project-cxxjit | 91084ef018240bbb8e24235ff5cd8c355a9c1a1e | clang/bindings/python/clang/cindex.py | python | Config.set_library_path | (path) | Set the path in which to search for libclang | Set the path in which to search for libclang | [
"Set",
"the",
"path",
"in",
"which",
"to",
"search",
"for",
"libclang"
] | def set_library_path(path):
"""Set the path in which to search for libclang"""
if Config.loaded:
raise Exception("library path must be set before before using " \
"any other functionalities in libclang.")
Config.library_path = fspath(path) | [
"def",
"set_library_path",
"(",
"path",
")",
":",
"if",
"Config",
".",
"loaded",
":",
"raise",
"Exception",
"(",
"\"library path must be set before before using \"",
"\"any other functionalities in libclang.\"",
")",
"Config",
".",
"library_path",
"=",
"fspath",
"(",
"path",
")"
] | https://github.com/hfinkel/llvm-project-cxxjit/blob/91084ef018240bbb8e24235ff5cd8c355a9c1a1e/clang/bindings/python/clang/cindex.py#L4098-L4104 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | catboost/python-package/catboost/core.py | python | Pool._check_baseline_shape | (self, baseline, samples_count) | Check baseline length and dimension. | Check baseline length and dimension. | [
"Check",
"baseline",
"length",
"and",
"dimension",
"."
] | def _check_baseline_shape(self, baseline, samples_count):
"""
Check baseline length and dimension.
"""
if len(baseline) != samples_count:
raise CatBoostError("Length of baseline={} and length of data={} are different.".format(len(baseline), samples_count))
if not isinstance(baseline[0], Iterable) or isinstance(baseline[0], STRING_TYPES):
raise CatBoostError("Baseline must be 2 dimensional data, 1 column for each class.")
try:
if np.array(baseline).dtype not in (np.dtype('float'), np.dtype('float32'), np.dtype('int')):
raise CatBoostError()
except CatBoostError:
raise CatBoostError("Invalid baseline value type={}: must be float or int.".format(np.array(baseline).dtype)) | [
"def",
"_check_baseline_shape",
"(",
"self",
",",
"baseline",
",",
"samples_count",
")",
":",
"if",
"len",
"(",
"baseline",
")",
"!=",
"samples_count",
":",
"raise",
"CatBoostError",
"(",
"\"Length of baseline={} and length of data={} are different.\"",
".",
"format",
"(",
"len",
"(",
"baseline",
")",
",",
"samples_count",
")",
")",
"if",
"not",
"isinstance",
"(",
"baseline",
"[",
"0",
"]",
",",
"Iterable",
")",
"or",
"isinstance",
"(",
"baseline",
"[",
"0",
"]",
",",
"STRING_TYPES",
")",
":",
"raise",
"CatBoostError",
"(",
"\"Baseline must be 2 dimensional data, 1 column for each class.\"",
")",
"try",
":",
"if",
"np",
".",
"array",
"(",
"baseline",
")",
".",
"dtype",
"not",
"in",
"(",
"np",
".",
"dtype",
"(",
"'float'",
")",
",",
"np",
".",
"dtype",
"(",
"'float32'",
")",
",",
"np",
".",
"dtype",
"(",
"'int'",
")",
")",
":",
"raise",
"CatBoostError",
"(",
")",
"except",
"CatBoostError",
":",
"raise",
"CatBoostError",
"(",
"\"Invalid baseline value type={}: must be float or int.\"",
".",
"format",
"(",
"np",
".",
"array",
"(",
"baseline",
")",
".",
"dtype",
")",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/catboost/python-package/catboost/core.py#L873-L885 |
||
idaholab/moose | 9eeebc65e098b4c30f8205fb41591fd5b61eb6ff | python/peacock/base/TabPluginManager.py | python | TabPluginManager.addObject | (self, widget) | Method for adding a widget to the Manager. (override)
Args:
widget[QWidget]: The widget to add a new tab. | Method for adding a widget to the Manager. (override) | [
"Method",
"for",
"adding",
"a",
"widget",
"to",
"the",
"Manager",
".",
"(",
"override",
")"
] | def addObject(self, widget):
"""
Method for adding a widget to the Manager. (override)
Args:
widget[QWidget]: The widget to add a new tab.
"""
index = self.addTab(widget, widget.tabName())
widget.setTabIndex(index, self.currentChanged) | [
"def",
"addObject",
"(",
"self",
",",
"widget",
")",
":",
"index",
"=",
"self",
".",
"addTab",
"(",
"widget",
",",
"widget",
".",
"tabName",
"(",
")",
")",
"widget",
".",
"setTabIndex",
"(",
"index",
",",
"self",
".",
"currentChanged",
")"
] | https://github.com/idaholab/moose/blob/9eeebc65e098b4c30f8205fb41591fd5b61eb6ff/python/peacock/base/TabPluginManager.py#L46-L54 |
||
y123456yz/reading-and-annotate-mongodb-3.6 | 93280293672ca7586dc24af18132aa61e4ed7fcf | mongo/buildscripts/cpplint.py | python | Search | (pattern, s) | return _regexp_compile_cache[pattern].search(s) | Searches the string for the pattern, caching the compiled regexp. | Searches the string for the pattern, caching the compiled regexp. | [
"Searches",
"the",
"string",
"for",
"the",
"pattern",
"caching",
"the",
"compiled",
"regexp",
"."
] | def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s) | [
"def",
"Search",
"(",
"pattern",
",",
"s",
")",
":",
"if",
"pattern",
"not",
"in",
"_regexp_compile_cache",
":",
"_regexp_compile_cache",
"[",
"pattern",
"]",
"=",
"sre_compile",
".",
"compile",
"(",
"pattern",
")",
"return",
"_regexp_compile_cache",
"[",
"pattern",
"]",
".",
"search",
"(",
"s",
")"
] | https://github.com/y123456yz/reading-and-annotate-mongodb-3.6/blob/93280293672ca7586dc24af18132aa61e4ed7fcf/mongo/buildscripts/cpplint.py#L580-L584 |
|
weolar/miniblink49 | 1c4678db0594a4abde23d3ebbcc7cd13c3170777 | third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/coverage/files.py | python | FileLocator.relative_filename | (self, filename) | return filename | Return the relative form of `filename`.
The filename will be relative to the current directory when the
`FileLocator` was constructed. | Return the relative form of `filename`. | [
"Return",
"the",
"relative",
"form",
"of",
"filename",
"."
] | def relative_filename(self, filename):
"""Return the relative form of `filename`.
The filename will be relative to the current directory when the
`FileLocator` was constructed.
"""
if filename.startswith(self.relative_dir):
filename = filename.replace(self.relative_dir, "")
return filename | [
"def",
"relative_filename",
"(",
"self",
",",
"filename",
")",
":",
"if",
"filename",
".",
"startswith",
"(",
"self",
".",
"relative_dir",
")",
":",
"filename",
"=",
"filename",
".",
"replace",
"(",
"self",
".",
"relative_dir",
",",
"\"\"",
")",
"return",
"filename"
] | https://github.com/weolar/miniblink49/blob/1c4678db0594a4abde23d3ebbcc7cd13c3170777/third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/coverage/files.py#L22-L31 |
|
hpi-xnor/BMXNet-v2 | af2b1859eafc5c721b1397cef02f946aaf2ce20d | example/nce-loss/text8_data.py | python | _get_subword_units | (token, gram) | return [t[i:i + gram] for i in range(0, len(t) - gram + 1)] | Return subword-units presentation, given a word/token. | Return subword-units presentation, given a word/token. | [
"Return",
"subword",
"-",
"units",
"presentation",
"given",
"a",
"word",
"/",
"token",
"."
] | def _get_subword_units(token, gram):
"""Return subword-units presentation, given a word/token.
"""
if token == '</s>': # special token for padding purpose.
return [token]
t = '#' + token + '#'
return [t[i:i + gram] for i in range(0, len(t) - gram + 1)] | [
"def",
"_get_subword_units",
"(",
"token",
",",
"gram",
")",
":",
"if",
"token",
"==",
"'</s>'",
":",
"# special token for padding purpose.",
"return",
"[",
"token",
"]",
"t",
"=",
"'#'",
"+",
"token",
"+",
"'#'",
"return",
"[",
"t",
"[",
"i",
":",
"i",
"+",
"gram",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"t",
")",
"-",
"gram",
"+",
"1",
")",
"]"
] | https://github.com/hpi-xnor/BMXNet-v2/blob/af2b1859eafc5c721b1397cef02f946aaf2ce20d/example/nce-loss/text8_data.py#L68-L74 |
|
cms-sw/cmssw | fd9de012d503d3405420bcbeec0ec879baa57cf2 | DQM/Integration/scripts/contentValuesLib.py | python | getSummaryValues | (file_name, translate, filters = None) | return (run_number, result) | Method to extract keys from root file and return dict | Method to extract keys from root file and return dict | [
"Method",
"to",
"extract",
"keys",
"from",
"root",
"file",
"and",
"return",
"dict"
] | def getSummaryValues(file_name, translate, filters = None):
""" Method to extract keys from root file and return dict """
ROOT.gROOT.Reset()
run_number = None
result = {}
f = ROOT.TFile(file_name, 'READ')
root = f.GetDirectory("DQMData")
if root == None: return (run_number, result)
run = None
for key in root.GetListOfKeys():
if re.match("^Run [0-9]+$", key.ReadObj().GetName()) and key.IsFolder():
run_number = int(re.sub("^Run ", "", key.ReadObj().GetName()))
run = key.ReadObj()
break
if run == None: return (run_number, result)
for sub in run.GetListOfKeys():
sub_name = sub.ReadObj().GetName()
if sub_name not in SUBSYSTEMS: continue
sub_key = sub_name
if translate:
sub_key = SUBSYSTEMS[sub_name]
if filters != None:
if not re.match(filters[0], sub_key):
continue
if sub_key not in result:
result[sub_key] = {}
evInfo = sub.ReadObj().GetDirectory("Run summary/EventInfo")
if evInfo == None: continue
for folder_name in FOLDERS.keys():
folder = evInfo.GetDirectory(folder_name)
if folder == None: continue
folder_id = folder_name
if translate:
folder_id = FOLDERS[folder_name][0]
if filters != None:
if not re.match(filters[1], folder_id):
continue
if folder_id not in result[sub_key]:
result[sub_key][folder_id] = {}
value_filter = None
if filters != None:
value_filter = filters[2]
writeValues(folder, result[sub_key][folder_id], None, value_filter)
writeValues(evInfo, result[sub_key][folder_id], {FOLDERS[folder_name][1]: 'Summary'}, value_filter)
f.Close()
return (run_number, result) | [
"def",
"getSummaryValues",
"(",
"file_name",
",",
"translate",
",",
"filters",
"=",
"None",
")",
":",
"ROOT",
".",
"gROOT",
".",
"Reset",
"(",
")",
"run_number",
"=",
"None",
"result",
"=",
"{",
"}",
"f",
"=",
"ROOT",
".",
"TFile",
"(",
"file_name",
",",
"'READ'",
")",
"root",
"=",
"f",
".",
"GetDirectory",
"(",
"\"DQMData\"",
")",
"if",
"root",
"==",
"None",
":",
"return",
"(",
"run_number",
",",
"result",
")",
"run",
"=",
"None",
"for",
"key",
"in",
"root",
".",
"GetListOfKeys",
"(",
")",
":",
"if",
"re",
".",
"match",
"(",
"\"^Run [0-9]+$\"",
",",
"key",
".",
"ReadObj",
"(",
")",
".",
"GetName",
"(",
")",
")",
"and",
"key",
".",
"IsFolder",
"(",
")",
":",
"run_number",
"=",
"int",
"(",
"re",
".",
"sub",
"(",
"\"^Run \"",
",",
"\"\"",
",",
"key",
".",
"ReadObj",
"(",
")",
".",
"GetName",
"(",
")",
")",
")",
"run",
"=",
"key",
".",
"ReadObj",
"(",
")",
"break",
"if",
"run",
"==",
"None",
":",
"return",
"(",
"run_number",
",",
"result",
")",
"for",
"sub",
"in",
"run",
".",
"GetListOfKeys",
"(",
")",
":",
"sub_name",
"=",
"sub",
".",
"ReadObj",
"(",
")",
".",
"GetName",
"(",
")",
"if",
"sub_name",
"not",
"in",
"SUBSYSTEMS",
":",
"continue",
"sub_key",
"=",
"sub_name",
"if",
"translate",
":",
"sub_key",
"=",
"SUBSYSTEMS",
"[",
"sub_name",
"]",
"if",
"filters",
"!=",
"None",
":",
"if",
"not",
"re",
".",
"match",
"(",
"filters",
"[",
"0",
"]",
",",
"sub_key",
")",
":",
"continue",
"if",
"sub_key",
"not",
"in",
"result",
":",
"result",
"[",
"sub_key",
"]",
"=",
"{",
"}",
"evInfo",
"=",
"sub",
".",
"ReadObj",
"(",
")",
".",
"GetDirectory",
"(",
"\"Run summary/EventInfo\"",
")",
"if",
"evInfo",
"==",
"None",
":",
"continue",
"for",
"folder_name",
"in",
"FOLDERS",
".",
"keys",
"(",
")",
":",
"folder",
"=",
"evInfo",
".",
"GetDirectory",
"(",
"folder_name",
")",
"if",
"folder",
"==",
"None",
":",
"continue",
"folder_id",
"=",
"folder_name",
"if",
"translate",
":",
"folder_id",
"=",
"FOLDERS",
"[",
"folder_name",
"]",
"[",
"0",
"]",
"if",
"filters",
"!=",
"None",
":",
"if",
"not",
"re",
".",
"match",
"(",
"filters",
"[",
"1",
"]",
",",
"folder_id",
")",
":",
"continue",
"if",
"folder_id",
"not",
"in",
"result",
"[",
"sub_key",
"]",
":",
"result",
"[",
"sub_key",
"]",
"[",
"folder_id",
"]",
"=",
"{",
"}",
"value_filter",
"=",
"None",
"if",
"filters",
"!=",
"None",
":",
"value_filter",
"=",
"filters",
"[",
"2",
"]",
"writeValues",
"(",
"folder",
",",
"result",
"[",
"sub_key",
"]",
"[",
"folder_id",
"]",
",",
"None",
",",
"value_filter",
")",
"writeValues",
"(",
"evInfo",
",",
"result",
"[",
"sub_key",
"]",
"[",
"folder_id",
"]",
",",
"{",
"FOLDERS",
"[",
"folder_name",
"]",
"[",
"1",
"]",
":",
"'Summary'",
"}",
",",
"value_filter",
")",
"f",
".",
"Close",
"(",
")",
"return",
"(",
"run_number",
",",
"result",
")"
] | https://github.com/cms-sw/cmssw/blob/fd9de012d503d3405420bcbeec0ec879baa57cf2/DQM/Integration/scripts/contentValuesLib.py#L41-L106 |
|
fastmachinelearning/hls4ml | 58d761006250deed721d85fefea91201708f2165 | hls4ml/writer/vivado_accelerator_writer.py | python | VivadoAcceleratorWriter.write_axi_wrapper | (self, model) | Write a top level HLS C++ file to wrap the hls4ml project with AXI interfaces
Args:
model : The ModelGraph to write the wrapper for | Write a top level HLS C++ file to wrap the hls4ml project with AXI interfaces
Args:
model : The ModelGraph to write the wrapper for | [
"Write",
"a",
"top",
"level",
"HLS",
"C",
"++",
"file",
"to",
"wrap",
"the",
"hls4ml",
"project",
"with",
"AXI",
"interfaces",
"Args",
":",
"model",
":",
"The",
"ModelGraph",
"to",
"write",
"the",
"wrapper",
"for"
] | def write_axi_wrapper(self, model):
''' Write a top level HLS C++ file to wrap the hls4ml project with AXI interfaces
Args:
model : The ModelGraph to write the wrapper for
'''
inp_axi_t, out_axi_t, inp, out = self.vivado_accelerator_config.get_corrected_types()
indent = ' '
#######################
## myproject_axi.h
#######################
filedir = os.path.dirname(os.path.abspath(__file__))
f = open(os.path.join(filedir, '../templates/vivado_accelerator/myproject_axi.h'), 'r')
fout = open('{}/firmware/{}_axi.h'.format(model.config.get_output_dir(), model.config.get_project_name()), 'w')
for line in f.readlines():
if 'MYPROJECT' in line:
newline = line.replace('MYPROJECT', format(model.config.get_project_name().upper()))
elif '//hls-fpga-machine-learning insert include' in line:
newline = '#include "{}.h"\n'.format(model.config.get_project_name())
elif 'void myproject(' in line:
newline = 'void {}_axi(\n'.format(model.config.get_project_name())
elif '//hls-fpga-machine-learning insert definitions' in line:
newline = ''
newline += 'static const unsigned N_IN = {};\n'.format(inp.size())
newline += 'static const unsigned N_OUT = {};\n'.format(out.size())
if self.vivado_accelerator_config.get_interface() == 'axi_stream':
newline += 'typedef {} T_in;\n'.format(inp_axi_t)
newline += 'typedef {} T_out;\n'.format(out_axi_t)
newline += 'typedef struct in_struct {\n' + \
indent + 'T_in data;\n' + \
indent + 'ap_uint<1> last;\n' + \
indent + 'in_struct(const T_in& data, const ap_uint<1>& last){this->data = data; this->last = last;};\n' + \
indent + 'in_struct(){this->data = 0; this->last = 0;};\n' + \
indent + 'friend std::ostream& operator<<(std::ostream& stream, const in_struct& in)\n' + \
indent + '{ return stream << "{ data: " << in.data << ", last: " << in.last << " }" << std::endl; }\n' + \
indent + 'operator float() const {return this->data;}\n' + \
indent + 'operator double() const {return this->data;}\n' + \
indent + 'in_struct(float data) {this->data = data; this->last = 0;}\n' + \
indent + 'in_struct(double data) {this->data = data; this->last = 0;}\n' + \
'} input_axi_t;\n'
newline += 'typedef struct out_struct {\n' + \
indent + 'T_out data;\n' + \
indent + 'ap_uint<1> last;\n' + \
indent + 'out_struct(const T_out& data, const ap_uint<1>& last){this->data = data; this->last = last;};\n' + \
indent + 'out_struct(){this->data = 0; this->last = 0;};\n' + \
indent + 'friend std::ostream& operator<<(std::ostream& stream, const out_struct& out)\n' + \
indent + '{ return stream << "{ data: " << out.data << ", last: " << out.last << " }" << std::endl; }\n' + \
indent + 'operator float() const {return this->data;}\n' + \
indent + 'operator double() const {return this->data;}\n' + \
indent + 'out_struct(float data) {this->data = data; this->last = 0;}\n' + \
indent + 'out_struct(double data) {this->data = data; this->last = 0;}\n' + \
'} output_axi_t;\n'
else:
newline += 'typedef {} input_axi_t;\n'.format(inp_axi_t)
newline += 'typedef {} output_axi_t;\n'.format(out_axi_t)
else:
newline = line
fout.write(newline)
f.close()
fout.close()
#######################
## myproject_axi.cpp
#######################
f = open(os.path.join(filedir, '../templates/vivado_accelerator/myproject_axi.cpp'), 'r')
fout = open('{}/firmware/{}_axi.cpp'.format(model.config.get_output_dir(), model.config.get_project_name()),
'w')
io_type = model.config.get_config_value("IOType")
for line in f.readlines():
if 'void myproject(' in line:
newline = 'void {}_axi(\n'.format(model.config.get_project_name())
elif '//hls-fpga-machine-learning insert include' in line:
newline = '#include "{}_axi.h"\n'.format(model.config.get_project_name())
elif '//hls-fpga-machine-learning insert local vars' in line:
newline = ''
if self.vivado_accelerator_config.get_interface() == 'axi_stream':
newline += indent + 'bool is_last = false;\n'
if io_type == 'io_parallel':
newline += indent + inp.type.name + ' in_local[N_IN];\n'
newline += indent + out.type.name + ' out_local[N_OUT];\n'
elif io_type == 'io_stream':
newline += indent + 'hls::stream<' + inp.type.name + '> in_local("input_1");\n'
newline += indent + 'hls::stream<' + out.type.name + '> out_local("output_1");\n\n'
newline += indent + '#pragma HLS STREAM variable=in_local depth=N_IN\n'
newline += indent + '#pragma HLS STREAM variable=out_local depth=N_OUT\n'
elif '//hls-fpga-machine-learning insert call' in line:
newline = indent + '{}(in_local, out_local, in_size, out_size);\n'.format(
model.config.get_project_name())
elif '//hls-fpga-machine-learning insert interface' in line:
if self.vivado_accelerator_config.get_interface() == 'axi_lite':
newline = ''
newline += indent + '#pragma HLS INTERFACE ap_ctrl_none port=return\n'
newline += indent + '#pragma HLS INTERFACE s_axilite port=in\n'
newline += indent + '#pragma HLS INTERFACE s_axilite port=out\n'
elif self.vivado_accelerator_config.get_interface() == 'axi_master':
newline = ''
newline += indent + '#pragma HLS INTERFACE s_axilite port=return bundle=CTRL_BUS\n'
newline += indent + '#pragma HLS INTERFACE m_axi depth=N_IN port=in offset=slave bundle=IN_BUS\n'
newline += indent + '#pragma HLS INTERFACE m_axi depth=N_OUT port=out offset=slave bundle=OUT_BUS\n'
elif self.vivado_accelerator_config.get_interface() == 'axi_stream':
newline = ''
newline += indent + '#pragma HLS INTERFACE axis port=in\n'
newline += indent + '#pragma HLS INTERFACE axis port=out\n'
newline += indent + '#pragma HLS INTERFACE ap_ctrl_none port=return\n'
if model.config.get_config_value("IOType") == 'io_stream':
newline += indent + '#pragma HLS DATAFLOW\n'
elif '//hls-fpga-machine-learning insert enqueue' in line:
io_type = model.config.get_config_value("IOType")
if io_type == 'io_parallel':
newline = ''
newline += indent + 'for(unsigned i = 0; i < N_IN; i++){\n'
if self.vivado_accelerator_config.get_interface() == 'axi_stream':
newline += indent + indent + '#pragma HLS PIPELINE\n'
newline += indent + indent + 'in_local[i] = in[i].data; // Read input with cast\n'
newline += indent + indent + 'is_last |= (in[i].last == 1)? true: false;\n'
else:
newline += indent + indent + '#pragma HLS UNROLL\n'
newline += indent + indent + 'in_local[i] = in[i]; // Read input with cast\n'
newline += indent + '}\n'
elif io_type == 'io_stream':
newline = ''
newline += indent + 'for(unsigned i = 0; i < N_IN / {input_t}::size; ++i) {{\n'
# newline += indent + indent + '#pragma HLS PIPELINE\n'
newline += indent + indent + '{input_t} ctype;\n'
newline += indent + indent + '#pragma HLS DATA_PACK variable=ctype\n'
newline += indent + indent + 'for(unsigned j = 0; j < {input_t}::size; j++) {{\n'
# newline += indent + indent + indent + '#pragma HLS UNROLL\n'
if self.vivado_accelerator_config.get_interface() == 'axi_stream':
newline += indent + indent + indent + 'ctype[j] = typename {input_t}::value_type(in[i * {input_t}::size + j].data);\n'
newline += indent + indent + indent + 'is_last |= (in[i * input_t::size + j].last == 1)? true : false;\n'
else:
newline += indent + indent + indent + 'ctype[j] = typename {input_t}::value_type(in[i * {input_t}::size + j]);\n'
newline += indent + indent + '}}\n'
newline += indent + indent + 'in_local.write(ctype);\n'
newline += indent + '}}\n'
newline = newline.format(input_t=inp.type.name)
elif '//hls-fpga-machine-learning insert dequeue' in line:
io_type = model.config.get_config_value("IOType")
if io_type == 'io_parallel':
newline = ''
newline += indent + 'for(unsigned i = 0; i < N_OUT; i++){\n'
if self.vivado_accelerator_config.get_interface() == 'axi_stream':
newline += indent + indent + '#pragma HLS PIPELINE\n'
newline += indent + indent + 'out[i].data = out_local[i]; // Write output with cast\n'
newline += indent + indent + 'out[i].last = (is_last && (i == N_OUT - 1))? true : false;\n'
else:
newline += indent + indent + '#pragma HLS UNROLL\n'
newline += indent + indent + 'out[i] = out_local[i]; // Write output with cast\n'
newline += indent + '}\n'
elif io_type == 'io_stream':
newline = ''
newline += indent + 'for(unsigned i = 0; i < N_OUT / {result_t}::size; ++i) {{\n'
# newline += indent + indent + '#pragma HLS PIPELINE\n'
newline += indent + indent + '{result_t} ctype = out_local.read();\n'
newline += indent + indent + 'for(unsigned j = 0; j < {result_t}::size; j++) {{\n'
# newline += indent + indent + indent + '#pragma HLS UNROLL\n'
if self.vivado_accelerator_config.get_interface() == 'axi_stream':
newline += indent + indent + indent + 'bool last = (is_last && (i * {result_t}::size + j == N_OUT - 1)) ? true : false;\n'
newline += indent + indent + indent + 'out[i * {result_t}::size + j] = output_axi_t(ctype[j], last);\n'
else:
newline += indent + indent + indent + 'out[i * {result_t}::size + j] = output_axi_t(ctype[j]);\n'
newline += indent + indent + '}}\n'
newline += indent + '}}\n'
newline = newline.format(result_t=out.type.name)
else:
newline = line
fout.write(newline)
f.close()
fout.close() | [
"def",
"write_axi_wrapper",
"(",
"self",
",",
"model",
")",
":",
"inp_axi_t",
",",
"out_axi_t",
",",
"inp",
",",
"out",
"=",
"self",
".",
"vivado_accelerator_config",
".",
"get_corrected_types",
"(",
")",
"indent",
"=",
"' '",
"#######################",
"## myproject_axi.h",
"#######################",
"filedir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
"f",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"filedir",
",",
"'../templates/vivado_accelerator/myproject_axi.h'",
")",
",",
"'r'",
")",
"fout",
"=",
"open",
"(",
"'{}/firmware/{}_axi.h'",
".",
"format",
"(",
"model",
".",
"config",
".",
"get_output_dir",
"(",
")",
",",
"model",
".",
"config",
".",
"get_project_name",
"(",
")",
")",
",",
"'w'",
")",
"for",
"line",
"in",
"f",
".",
"readlines",
"(",
")",
":",
"if",
"'MYPROJECT'",
"in",
"line",
":",
"newline",
"=",
"line",
".",
"replace",
"(",
"'MYPROJECT'",
",",
"format",
"(",
"model",
".",
"config",
".",
"get_project_name",
"(",
")",
".",
"upper",
"(",
")",
")",
")",
"elif",
"'//hls-fpga-machine-learning insert include'",
"in",
"line",
":",
"newline",
"=",
"'#include \"{}.h\"\\n'",
".",
"format",
"(",
"model",
".",
"config",
".",
"get_project_name",
"(",
")",
")",
"elif",
"'void myproject('",
"in",
"line",
":",
"newline",
"=",
"'void {}_axi(\\n'",
".",
"format",
"(",
"model",
".",
"config",
".",
"get_project_name",
"(",
")",
")",
"elif",
"'//hls-fpga-machine-learning insert definitions'",
"in",
"line",
":",
"newline",
"=",
"''",
"newline",
"+=",
"'static const unsigned N_IN = {};\\n'",
".",
"format",
"(",
"inp",
".",
"size",
"(",
")",
")",
"newline",
"+=",
"'static const unsigned N_OUT = {};\\n'",
".",
"format",
"(",
"out",
".",
"size",
"(",
")",
")",
"if",
"self",
".",
"vivado_accelerator_config",
".",
"get_interface",
"(",
")",
"==",
"'axi_stream'",
":",
"newline",
"+=",
"'typedef {} T_in;\\n'",
".",
"format",
"(",
"inp_axi_t",
")",
"newline",
"+=",
"'typedef {} T_out;\\n'",
".",
"format",
"(",
"out_axi_t",
")",
"newline",
"+=",
"'typedef struct in_struct {\\n'",
"+",
"indent",
"+",
"'T_in data;\\n'",
"+",
"indent",
"+",
"'ap_uint<1> last;\\n'",
"+",
"indent",
"+",
"'in_struct(const T_in& data, const ap_uint<1>& last){this->data = data; this->last = last;};\\n'",
"+",
"indent",
"+",
"'in_struct(){this->data = 0; this->last = 0;};\\n'",
"+",
"indent",
"+",
"'friend std::ostream& operator<<(std::ostream& stream, const in_struct& in)\\n'",
"+",
"indent",
"+",
"'{ return stream << \"{ data: \" << in.data << \", last: \" << in.last << \" }\" << std::endl; }\\n'",
"+",
"indent",
"+",
"'operator float() const {return this->data;}\\n'",
"+",
"indent",
"+",
"'operator double() const {return this->data;}\\n'",
"+",
"indent",
"+",
"'in_struct(float data) {this->data = data; this->last = 0;}\\n'",
"+",
"indent",
"+",
"'in_struct(double data) {this->data = data; this->last = 0;}\\n'",
"+",
"'} input_axi_t;\\n'",
"newline",
"+=",
"'typedef struct out_struct {\\n'",
"+",
"indent",
"+",
"'T_out data;\\n'",
"+",
"indent",
"+",
"'ap_uint<1> last;\\n'",
"+",
"indent",
"+",
"'out_struct(const T_out& data, const ap_uint<1>& last){this->data = data; this->last = last;};\\n'",
"+",
"indent",
"+",
"'out_struct(){this->data = 0; this->last = 0;};\\n'",
"+",
"indent",
"+",
"'friend std::ostream& operator<<(std::ostream& stream, const out_struct& out)\\n'",
"+",
"indent",
"+",
"'{ return stream << \"{ data: \" << out.data << \", last: \" << out.last << \" }\" << std::endl; }\\n'",
"+",
"indent",
"+",
"'operator float() const {return this->data;}\\n'",
"+",
"indent",
"+",
"'operator double() const {return this->data;}\\n'",
"+",
"indent",
"+",
"'out_struct(float data) {this->data = data; this->last = 0;}\\n'",
"+",
"indent",
"+",
"'out_struct(double data) {this->data = data; this->last = 0;}\\n'",
"+",
"'} output_axi_t;\\n'",
"else",
":",
"newline",
"+=",
"'typedef {} input_axi_t;\\n'",
".",
"format",
"(",
"inp_axi_t",
")",
"newline",
"+=",
"'typedef {} output_axi_t;\\n'",
".",
"format",
"(",
"out_axi_t",
")",
"else",
":",
"newline",
"=",
"line",
"fout",
".",
"write",
"(",
"newline",
")",
"f",
".",
"close",
"(",
")",
"fout",
".",
"close",
"(",
")",
"#######################",
"## myproject_axi.cpp",
"#######################",
"f",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"filedir",
",",
"'../templates/vivado_accelerator/myproject_axi.cpp'",
")",
",",
"'r'",
")",
"fout",
"=",
"open",
"(",
"'{}/firmware/{}_axi.cpp'",
".",
"format",
"(",
"model",
".",
"config",
".",
"get_output_dir",
"(",
")",
",",
"model",
".",
"config",
".",
"get_project_name",
"(",
")",
")",
",",
"'w'",
")",
"io_type",
"=",
"model",
".",
"config",
".",
"get_config_value",
"(",
"\"IOType\"",
")",
"for",
"line",
"in",
"f",
".",
"readlines",
"(",
")",
":",
"if",
"'void myproject('",
"in",
"line",
":",
"newline",
"=",
"'void {}_axi(\\n'",
".",
"format",
"(",
"model",
".",
"config",
".",
"get_project_name",
"(",
")",
")",
"elif",
"'//hls-fpga-machine-learning insert include'",
"in",
"line",
":",
"newline",
"=",
"'#include \"{}_axi.h\"\\n'",
".",
"format",
"(",
"model",
".",
"config",
".",
"get_project_name",
"(",
")",
")",
"elif",
"'//hls-fpga-machine-learning insert local vars'",
"in",
"line",
":",
"newline",
"=",
"''",
"if",
"self",
".",
"vivado_accelerator_config",
".",
"get_interface",
"(",
")",
"==",
"'axi_stream'",
":",
"newline",
"+=",
"indent",
"+",
"'bool is_last = false;\\n'",
"if",
"io_type",
"==",
"'io_parallel'",
":",
"newline",
"+=",
"indent",
"+",
"inp",
".",
"type",
".",
"name",
"+",
"' in_local[N_IN];\\n'",
"newline",
"+=",
"indent",
"+",
"out",
".",
"type",
".",
"name",
"+",
"' out_local[N_OUT];\\n'",
"elif",
"io_type",
"==",
"'io_stream'",
":",
"newline",
"+=",
"indent",
"+",
"'hls::stream<'",
"+",
"inp",
".",
"type",
".",
"name",
"+",
"'> in_local(\"input_1\");\\n'",
"newline",
"+=",
"indent",
"+",
"'hls::stream<'",
"+",
"out",
".",
"type",
".",
"name",
"+",
"'> out_local(\"output_1\");\\n\\n'",
"newline",
"+=",
"indent",
"+",
"'#pragma HLS STREAM variable=in_local depth=N_IN\\n'",
"newline",
"+=",
"indent",
"+",
"'#pragma HLS STREAM variable=out_local depth=N_OUT\\n'",
"elif",
"'//hls-fpga-machine-learning insert call'",
"in",
"line",
":",
"newline",
"=",
"indent",
"+",
"'{}(in_local, out_local, in_size, out_size);\\n'",
".",
"format",
"(",
"model",
".",
"config",
".",
"get_project_name",
"(",
")",
")",
"elif",
"'//hls-fpga-machine-learning insert interface'",
"in",
"line",
":",
"if",
"self",
".",
"vivado_accelerator_config",
".",
"get_interface",
"(",
")",
"==",
"'axi_lite'",
":",
"newline",
"=",
"''",
"newline",
"+=",
"indent",
"+",
"'#pragma HLS INTERFACE ap_ctrl_none port=return\\n'",
"newline",
"+=",
"indent",
"+",
"'#pragma HLS INTERFACE s_axilite port=in\\n'",
"newline",
"+=",
"indent",
"+",
"'#pragma HLS INTERFACE s_axilite port=out\\n'",
"elif",
"self",
".",
"vivado_accelerator_config",
".",
"get_interface",
"(",
")",
"==",
"'axi_master'",
":",
"newline",
"=",
"''",
"newline",
"+=",
"indent",
"+",
"'#pragma HLS INTERFACE s_axilite port=return bundle=CTRL_BUS\\n'",
"newline",
"+=",
"indent",
"+",
"'#pragma HLS INTERFACE m_axi depth=N_IN port=in offset=slave bundle=IN_BUS\\n'",
"newline",
"+=",
"indent",
"+",
"'#pragma HLS INTERFACE m_axi depth=N_OUT port=out offset=slave bundle=OUT_BUS\\n'",
"elif",
"self",
".",
"vivado_accelerator_config",
".",
"get_interface",
"(",
")",
"==",
"'axi_stream'",
":",
"newline",
"=",
"''",
"newline",
"+=",
"indent",
"+",
"'#pragma HLS INTERFACE axis port=in\\n'",
"newline",
"+=",
"indent",
"+",
"'#pragma HLS INTERFACE axis port=out\\n'",
"newline",
"+=",
"indent",
"+",
"'#pragma HLS INTERFACE ap_ctrl_none port=return\\n'",
"if",
"model",
".",
"config",
".",
"get_config_value",
"(",
"\"IOType\"",
")",
"==",
"'io_stream'",
":",
"newline",
"+=",
"indent",
"+",
"'#pragma HLS DATAFLOW\\n'",
"elif",
"'//hls-fpga-machine-learning insert enqueue'",
"in",
"line",
":",
"io_type",
"=",
"model",
".",
"config",
".",
"get_config_value",
"(",
"\"IOType\"",
")",
"if",
"io_type",
"==",
"'io_parallel'",
":",
"newline",
"=",
"''",
"newline",
"+=",
"indent",
"+",
"'for(unsigned i = 0; i < N_IN; i++){\\n'",
"if",
"self",
".",
"vivado_accelerator_config",
".",
"get_interface",
"(",
")",
"==",
"'axi_stream'",
":",
"newline",
"+=",
"indent",
"+",
"indent",
"+",
"'#pragma HLS PIPELINE\\n'",
"newline",
"+=",
"indent",
"+",
"indent",
"+",
"'in_local[i] = in[i].data; // Read input with cast\\n'",
"newline",
"+=",
"indent",
"+",
"indent",
"+",
"'is_last |= (in[i].last == 1)? true: false;\\n'",
"else",
":",
"newline",
"+=",
"indent",
"+",
"indent",
"+",
"'#pragma HLS UNROLL\\n'",
"newline",
"+=",
"indent",
"+",
"indent",
"+",
"'in_local[i] = in[i]; // Read input with cast\\n'",
"newline",
"+=",
"indent",
"+",
"'}\\n'",
"elif",
"io_type",
"==",
"'io_stream'",
":",
"newline",
"=",
"''",
"newline",
"+=",
"indent",
"+",
"'for(unsigned i = 0; i < N_IN / {input_t}::size; ++i) {{\\n'",
"# newline += indent + indent + '#pragma HLS PIPELINE\\n'",
"newline",
"+=",
"indent",
"+",
"indent",
"+",
"'{input_t} ctype;\\n'",
"newline",
"+=",
"indent",
"+",
"indent",
"+",
"'#pragma HLS DATA_PACK variable=ctype\\n'",
"newline",
"+=",
"indent",
"+",
"indent",
"+",
"'for(unsigned j = 0; j < {input_t}::size; j++) {{\\n'",
"# newline += indent + indent + indent + '#pragma HLS UNROLL\\n'",
"if",
"self",
".",
"vivado_accelerator_config",
".",
"get_interface",
"(",
")",
"==",
"'axi_stream'",
":",
"newline",
"+=",
"indent",
"+",
"indent",
"+",
"indent",
"+",
"'ctype[j] = typename {input_t}::value_type(in[i * {input_t}::size + j].data);\\n'",
"newline",
"+=",
"indent",
"+",
"indent",
"+",
"indent",
"+",
"'is_last |= (in[i * input_t::size + j].last == 1)? true : false;\\n'",
"else",
":",
"newline",
"+=",
"indent",
"+",
"indent",
"+",
"indent",
"+",
"'ctype[j] = typename {input_t}::value_type(in[i * {input_t}::size + j]);\\n'",
"newline",
"+=",
"indent",
"+",
"indent",
"+",
"'}}\\n'",
"newline",
"+=",
"indent",
"+",
"indent",
"+",
"'in_local.write(ctype);\\n'",
"newline",
"+=",
"indent",
"+",
"'}}\\n'",
"newline",
"=",
"newline",
".",
"format",
"(",
"input_t",
"=",
"inp",
".",
"type",
".",
"name",
")",
"elif",
"'//hls-fpga-machine-learning insert dequeue'",
"in",
"line",
":",
"io_type",
"=",
"model",
".",
"config",
".",
"get_config_value",
"(",
"\"IOType\"",
")",
"if",
"io_type",
"==",
"'io_parallel'",
":",
"newline",
"=",
"''",
"newline",
"+=",
"indent",
"+",
"'for(unsigned i = 0; i < N_OUT; i++){\\n'",
"if",
"self",
".",
"vivado_accelerator_config",
".",
"get_interface",
"(",
")",
"==",
"'axi_stream'",
":",
"newline",
"+=",
"indent",
"+",
"indent",
"+",
"'#pragma HLS PIPELINE\\n'",
"newline",
"+=",
"indent",
"+",
"indent",
"+",
"'out[i].data = out_local[i]; // Write output with cast\\n'",
"newline",
"+=",
"indent",
"+",
"indent",
"+",
"'out[i].last = (is_last && (i == N_OUT - 1))? true : false;\\n'",
"else",
":",
"newline",
"+=",
"indent",
"+",
"indent",
"+",
"'#pragma HLS UNROLL\\n'",
"newline",
"+=",
"indent",
"+",
"indent",
"+",
"'out[i] = out_local[i]; // Write output with cast\\n'",
"newline",
"+=",
"indent",
"+",
"'}\\n'",
"elif",
"io_type",
"==",
"'io_stream'",
":",
"newline",
"=",
"''",
"newline",
"+=",
"indent",
"+",
"'for(unsigned i = 0; i < N_OUT / {result_t}::size; ++i) {{\\n'",
"# newline += indent + indent + '#pragma HLS PIPELINE\\n'",
"newline",
"+=",
"indent",
"+",
"indent",
"+",
"'{result_t} ctype = out_local.read();\\n'",
"newline",
"+=",
"indent",
"+",
"indent",
"+",
"'for(unsigned j = 0; j < {result_t}::size; j++) {{\\n'",
"# newline += indent + indent + indent + '#pragma HLS UNROLL\\n'",
"if",
"self",
".",
"vivado_accelerator_config",
".",
"get_interface",
"(",
")",
"==",
"'axi_stream'",
":",
"newline",
"+=",
"indent",
"+",
"indent",
"+",
"indent",
"+",
"'bool last = (is_last && (i * {result_t}::size + j == N_OUT - 1)) ? true : false;\\n'",
"newline",
"+=",
"indent",
"+",
"indent",
"+",
"indent",
"+",
"'out[i * {result_t}::size + j] = output_axi_t(ctype[j], last);\\n'",
"else",
":",
"newline",
"+=",
"indent",
"+",
"indent",
"+",
"indent",
"+",
"'out[i * {result_t}::size + j] = output_axi_t(ctype[j]);\\n'",
"newline",
"+=",
"indent",
"+",
"indent",
"+",
"'}}\\n'",
"newline",
"+=",
"indent",
"+",
"'}}\\n'",
"newline",
"=",
"newline",
".",
"format",
"(",
"result_t",
"=",
"out",
".",
"type",
".",
"name",
")",
"else",
":",
"newline",
"=",
"line",
"fout",
".",
"write",
"(",
"newline",
")",
"f",
".",
"close",
"(",
")",
"fout",
".",
"close",
"(",
")"
] | https://github.com/fastmachinelearning/hls4ml/blob/58d761006250deed721d85fefea91201708f2165/hls4ml/writer/vivado_accelerator_writer.py#L12-L185 |
||
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | tools/code_coverage/croc_scan.py | python | PythonScanner.__init__ | (self) | Constructor. | Constructor. | [
"Constructor",
"."
] | def __init__(self):
"""Constructor."""
Scanner.__init__(self)
# TODO: This breaks for strings ending in more than 2 backslashes. Need
# a pattern which counts only an odd number of backslashes, so the last
# one thus escapes the quote.
self.re_token = re.compile(r'(#|\'\'\'|"""|(?<!(?<!\\)\\)["\'])')
self.comment_to_eol = ['#']
self.comment_start = None
self.comment_end = None | [
"def",
"__init__",
"(",
"self",
")",
":",
"Scanner",
".",
"__init__",
"(",
"self",
")",
"# TODO: This breaks for strings ending in more than 2 backslashes. Need",
"# a pattern which counts only an odd number of backslashes, so the last",
"# one thus escapes the quote.",
"self",
".",
"re_token",
"=",
"re",
".",
"compile",
"(",
"r'(#|\\'\\'\\'|\"\"\"|(?<!(?<!\\\\)\\\\)[\"\\'])'",
")",
"self",
".",
"comment_to_eol",
"=",
"[",
"'#'",
"]",
"self",
".",
"comment_start",
"=",
"None",
"self",
".",
"comment_end",
"=",
"None"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/tools/code_coverage/croc_scan.py#L109-L119 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/_controls.py | python | ListCtrl.IsSelected | (self, idx) | return (self.GetItemState(idx, wx.LIST_STATE_SELECTED) & wx.LIST_STATE_SELECTED) != 0 | return True if the item is selected | return True if the item is selected | [
"return",
"True",
"if",
"the",
"item",
"is",
"selected"
] | def IsSelected(self, idx):
'''return True if the item is selected'''
return (self.GetItemState(idx, wx.LIST_STATE_SELECTED) & wx.LIST_STATE_SELECTED) != 0 | [
"def",
"IsSelected",
"(",
"self",
",",
"idx",
")",
":",
"return",
"(",
"self",
".",
"GetItemState",
"(",
"idx",
",",
"wx",
".",
"LIST_STATE_SELECTED",
")",
"&",
"wx",
".",
"LIST_STATE_SELECTED",
")",
"!=",
"0"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_controls.py#L4785-L4787 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/tornado/tornado-6/tornado/web.py | python | RequestHandler.render_embed_css | (self, css_embed: Iterable[bytes]) | return b'<style type="text/css">\n' + b"\n".join(css_embed) + b"\n</style>" | Default method used to render the final embedded css for the
rendered webpage.
Override this method in a sub-classed controller to change the output. | Default method used to render the final embedded css for the
rendered webpage. | [
"Default",
"method",
"used",
"to",
"render",
"the",
"final",
"embedded",
"css",
"for",
"the",
"rendered",
"webpage",
"."
] | def render_embed_css(self, css_embed: Iterable[bytes]) -> bytes:
"""Default method used to render the final embedded css for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
return b'<style type="text/css">\n' + b"\n".join(css_embed) + b"\n</style>" | [
"def",
"render_embed_css",
"(",
"self",
",",
"css_embed",
":",
"Iterable",
"[",
"bytes",
"]",
")",
"->",
"bytes",
":",
"return",
"b'<style type=\"text/css\">\\n'",
"+",
"b\"\\n\"",
".",
"join",
"(",
"css_embed",
")",
"+",
"b\"\\n</style>\""
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/tornado/tornado-6/tornado/web.py#L980-L986 |
|
idaholab/moose | 9eeebc65e098b4c30f8205fb41591fd5b61eb6ff | python/peacock/Input/ParamsByType.py | python | ParamsByType.save | (self) | Look at the user params in self.block.parameters.
update the type tables
Save type on block | Look at the user params in self.block.parameters.
update the type tables
Save type on block | [
"Look",
"at",
"the",
"user",
"params",
"in",
"self",
".",
"block",
".",
"parameters",
".",
"update",
"the",
"type",
"tables",
"Save",
"type",
"on",
"block"
] | def save(self):
"""
Look at the user params in self.block.parameters.
update the type tables
Save type on block
"""
t = self.getTable()
if t:
t.save()
self.block.setBlockType(self.combo.currentText()) | [
"def",
"save",
"(",
"self",
")",
":",
"t",
"=",
"self",
".",
"getTable",
"(",
")",
"if",
"t",
":",
"t",
".",
"save",
"(",
")",
"self",
".",
"block",
".",
"setBlockType",
"(",
"self",
".",
"combo",
".",
"currentText",
"(",
")",
")"
] | https://github.com/idaholab/moose/blob/9eeebc65e098b4c30f8205fb41591fd5b61eb6ff/python/peacock/Input/ParamsByType.py#L89-L98 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/propgrid.py | python | PropertyGrid.AddToSelection | (*args, **kwargs) | return _propgrid.PropertyGrid_AddToSelection(*args, **kwargs) | AddToSelection(self, PGPropArg id) -> bool | AddToSelection(self, PGPropArg id) -> bool | [
"AddToSelection",
"(",
"self",
"PGPropArg",
"id",
")",
"-",
">",
"bool"
] | def AddToSelection(*args, **kwargs):
"""AddToSelection(self, PGPropArg id) -> bool"""
return _propgrid.PropertyGrid_AddToSelection(*args, **kwargs) | [
"def",
"AddToSelection",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_propgrid",
".",
"PropertyGrid_AddToSelection",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/propgrid.py#L2204-L2206 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.