nwo
stringlengths
5
86
sha
stringlengths
40
40
path
stringlengths
4
189
language
stringclasses
1 value
identifier
stringlengths
1
94
parameters
stringlengths
2
4.03k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
11.5k
docstring
stringlengths
1
33.2k
docstring_summary
stringlengths
0
5.15k
docstring_tokens
sequence
function
stringlengths
34
151k
function_tokens
sequence
url
stringlengths
90
278
junhyukoh/caffe-lstm
598d45456fa2a1b127a644f4aa38daa8fb9fc722
scripts/cpp_lint.py
python
_CppLintState.IncrementErrorCount
(self, category)
Bumps the module's error statistic.
Bumps the module's error statistic.
[ "Bumps", "the", "module", "s", "error", "statistic", "." ]
def IncrementErrorCount(self, category): """Bumps the module's error statistic.""" self.error_count += 1 if self.counting in ('toplevel', 'detailed'): if self.counting != 'detailed': category = category.split('/')[0] if category not in self.errors_by_category: self.errors_by_category[category] = 0 self.errors_by_category[category] += 1
[ "def", "IncrementErrorCount", "(", "self", ",", "category", ")", ":", "self", ".", "error_count", "+=", "1", "if", "self", ".", "counting", "in", "(", "'toplevel'", ",", "'detailed'", ")", ":", "if", "self", ".", "counting", "!=", "'detailed'", ":", "category", "=", "category", ".", "split", "(", "'/'", ")", "[", "0", "]", "if", "category", "not", "in", "self", ".", "errors_by_category", ":", "self", ".", "errors_by_category", "[", "category", "]", "=", "0", "self", ".", "errors_by_category", "[", "category", "]", "+=", "1" ]
https://github.com/junhyukoh/caffe-lstm/blob/598d45456fa2a1b127a644f4aa38daa8fb9fc722/scripts/cpp_lint.py#L747-L755
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/_controls.py
python
GenericDirCtrl.SelectPath
(*args, **kwargs)
return _controls_.GenericDirCtrl_SelectPath(*args, **kwargs)
SelectPath(self, String path, bool select=True)
SelectPath(self, String path, bool select=True)
[ "SelectPath", "(", "self", "String", "path", "bool", "select", "=", "True", ")" ]
def SelectPath(*args, **kwargs): """SelectPath(self, String path, bool select=True)""" return _controls_.GenericDirCtrl_SelectPath(*args, **kwargs)
[ "def", "SelectPath", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_controls_", ".", "GenericDirCtrl_SelectPath", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_controls.py#L5712-L5714
Tencent/CMONGO
c40380caa14e05509f46993aa8b8da966b09b0b5
src/third_party/scons-2.5.0/scons-time.py
python
SConsTimer.logfile_name
(self, invocation)
return os.path.join(self.outdir, name)
Returns the absolute path of a log file for the specificed invocation number.
Returns the absolute path of a log file for the specificed invocation number.
[ "Returns", "the", "absolute", "path", "of", "a", "log", "file", "for", "the", "specificed", "invocation", "number", "." ]
def logfile_name(self, invocation): """ Returns the absolute path of a log file for the specificed invocation number. """ name = self.prefix_run + '-%d.log' % invocation return os.path.join(self.outdir, name)
[ "def", "logfile_name", "(", "self", ",", "invocation", ")", ":", "name", "=", "self", ".", "prefix_run", "+", "'-%d.log'", "%", "invocation", "return", "os", ".", "path", ".", "join", "(", "self", ".", "outdir", ",", "name", ")" ]
https://github.com/Tencent/CMONGO/blob/c40380caa14e05509f46993aa8b8da966b09b0b5/src/third_party/scons-2.5.0/scons-time.py#L586-L592
FlightGear/flightgear
cf4801e11c5b69b107f87191584eefda3c5a9b26
scripts/python/FlightGear.py
python
FGTelnet.ls
(self,dir=None)
return self._getresp()
Returns a list of properties.
Returns a list of properties.
[ "Returns", "a", "list", "of", "properties", "." ]
def ls(self,dir=None): """ Returns a list of properties. """ if dir is None: self._putcmd('ls') else: self._putcmd('ls %s' % dir ) return self._getresp()
[ "def", "ls", "(", "self", ",", "dir", "=", "None", ")", ":", "if", "dir", "is", "None", ":", "self", ".", "_putcmd", "(", "'ls'", ")", "else", ":", "self", ".", "_putcmd", "(", "'ls %s'", "%", "dir", ")", "return", "self", ".", "_getresp", "(", ")" ]
https://github.com/FlightGear/flightgear/blob/cf4801e11c5b69b107f87191584eefda3c5a9b26/scripts/python/FlightGear.py#L21-L29
mongodb/mongo
d8ff665343ad29cf286ee2cf4a1960d29371937b
buildscripts/combine_reports.py
python
report_exit
(combined_test_report)
return ret
Return report exit code. The exit code of this script is based on the following: 0: All tests have status "pass", or only non-dynamic tests have status "silentfail". 31: At least one test has status "fail" or "timeout". Note: A test can be considered dynamic if its name contains a ":" character.
Return report exit code.
[ "Return", "report", "exit", "code", "." ]
def report_exit(combined_test_report): """Return report exit code. The exit code of this script is based on the following: 0: All tests have status "pass", or only non-dynamic tests have status "silentfail". 31: At least one test has status "fail" or "timeout". Note: A test can be considered dynamic if its name contains a ":" character. """ ret = 0 for test in combined_test_report.test_infos: if test.status in ["fail", "timeout"]: return 31 return ret
[ "def", "report_exit", "(", "combined_test_report", ")", ":", "ret", "=", "0", "for", "test", "in", "combined_test_report", ".", "test_infos", ":", "if", "test", ".", "status", "in", "[", "\"fail\"", ",", "\"timeout\"", "]", ":", "return", "31", "return", "ret" ]
https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/buildscripts/combine_reports.py#L24-L37
weolar/miniblink49
1c4678db0594a4abde23d3ebbcc7cd13c3170777
third_party/jinja2/runtime.py
python
Context.get_all
(self)
return dict(self.parent, **self.vars)
Return a copy of the complete context as dict including the exported variables.
Return a copy of the complete context as dict including the exported variables.
[ "Return", "a", "copy", "of", "the", "complete", "context", "as", "dict", "including", "the", "exported", "variables", "." ]
def get_all(self): """Return a copy of the complete context as dict including the exported variables. """ return dict(self.parent, **self.vars)
[ "def", "get_all", "(", "self", ")", ":", "return", "dict", "(", "self", ".", "parent", ",", "*", "*", "self", ".", "vars", ")" ]
https://github.com/weolar/miniblink49/blob/1c4678db0594a4abde23d3ebbcc7cd13c3170777/third_party/jinja2/runtime.py#L160-L164
shogun-toolbox/shogun
9b8d856971af5a295dd6ad70623ae45647a6334c
examples/undocumented/python/structure_graphcuts.py
python
define_factor_types
(num_vars, len_feat, edge_table)
return v_factor_types
Define factor types Args: num_vars: number of variables in factor graph len_feat: length of the feature vector edge_table: edge table defines pair-wise node indeces Returns: v_factor_types: list of all unary and pair-wise factor types
Define factor types
[ "Define", "factor", "types" ]
def define_factor_types(num_vars, len_feat, edge_table): """ Define factor types Args: num_vars: number of variables in factor graph len_feat: length of the feature vector edge_table: edge table defines pair-wise node indeces Returns: v_factor_types: list of all unary and pair-wise factor types """ n_stats = 2 # for binary status v_factor_types = {} n_edges = edge_table.shape[0] # unary factors cards_u = np.array([n_stats], np.int32) w_u = np.zeros(n_stats*len_feat) for i in range(num_vars): v_factor_types[i] = sg.create_factor_type("TableFactorType",type_id=i, cards=cards_u, w=w_u) # pair-wise factors cards_pw = np.array([n_stats, n_stats], np.int32) w_pw = np.zeros(n_stats*n_stats) for j in range(n_edges): v_factor_types[j + num_vars] = sg.create_factor_type("TableFactorType", type_id=j + num_vars, cards=cards_pw, w=w_pw) return v_factor_types
[ "def", "define_factor_types", "(", "num_vars", ",", "len_feat", ",", "edge_table", ")", ":", "n_stats", "=", "2", "# for binary status", "v_factor_types", "=", "{", "}", "n_edges", "=", "edge_table", ".", "shape", "[", "0", "]", "# unary factors", "cards_u", "=", "np", ".", "array", "(", "[", "n_stats", "]", ",", "np", ".", "int32", ")", "w_u", "=", "np", ".", "zeros", "(", "n_stats", "*", "len_feat", ")", "for", "i", "in", "range", "(", "num_vars", ")", ":", "v_factor_types", "[", "i", "]", "=", "sg", ".", "create_factor_type", "(", "\"TableFactorType\"", ",", "type_id", "=", "i", ",", "cards", "=", "cards_u", ",", "w", "=", "w_u", ")", "# pair-wise factors", "cards_pw", "=", "np", ".", "array", "(", "[", "n_stats", ",", "n_stats", "]", ",", "np", ".", "int32", ")", "w_pw", "=", "np", ".", "zeros", "(", "n_stats", "*", "n_stats", ")", "for", "j", "in", "range", "(", "n_edges", ")", ":", "v_factor_types", "[", "j", "+", "num_vars", "]", "=", "sg", ".", "create_factor_type", "(", "\"TableFactorType\"", ",", "type_id", "=", "j", "+", "num_vars", ",", "cards", "=", "cards_pw", ",", "w", "=", "w_pw", ")", "return", "v_factor_types" ]
https://github.com/shogun-toolbox/shogun/blob/9b8d856971af5a295dd6ad70623ae45647a6334c/examples/undocumented/python/structure_graphcuts.py#L43-L71
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/stc.py
python
StyledTextCtrl.IndicatorValueAt
(*args, **kwargs)
return _stc.StyledTextCtrl_IndicatorValueAt(*args, **kwargs)
IndicatorValueAt(self, int indicator, int position) -> int What value does a particular indicator have at at a position?
IndicatorValueAt(self, int indicator, int position) -> int
[ "IndicatorValueAt", "(", "self", "int", "indicator", "int", "position", ")", "-", ">", "int" ]
def IndicatorValueAt(*args, **kwargs): """ IndicatorValueAt(self, int indicator, int position) -> int What value does a particular indicator have at at a position? """ return _stc.StyledTextCtrl_IndicatorValueAt(*args, **kwargs)
[ "def", "IndicatorValueAt", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_stc", ".", "StyledTextCtrl_IndicatorValueAt", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/stc.py#L5703-L5709
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/build/waf-1.7.13/waflib/Build.py
python
BuildContext.add_manual_dependency
(self, path, value)
Adds a dependency from a node object to a value:: def build(bld): bld.add_manual_dependency( bld.path.find_resource('wscript'), bld.root.find_resource('/etc/fstab')) :param path: file path :type path: string or :py:class:`waflib.Node.Node` :param value: value to depend on :type value: :py:class:`waflib.Node.Node`, string, or function returning a string
Adds a dependency from a node object to a value::
[ "Adds", "a", "dependency", "from", "a", "node", "object", "to", "a", "value", "::" ]
def add_manual_dependency(self, path, value): """ Adds a dependency from a node object to a value:: def build(bld): bld.add_manual_dependency( bld.path.find_resource('wscript'), bld.root.find_resource('/etc/fstab')) :param path: file path :type path: string or :py:class:`waflib.Node.Node` :param value: value to depend on :type value: :py:class:`waflib.Node.Node`, string, or function returning a string """ if path is None: raise ValueError('Invalid input') if isinstance(path, waflib.Node.Node): node = path elif os.path.isabs(path): node = self.root.find_resource(path) else: node = self.path.find_resource(path) if isinstance(value, list): self.deps_man[id(node)].extend(value) else: self.deps_man[id(node)].append(value)
[ "def", "add_manual_dependency", "(", "self", ",", "path", ",", "value", ")", ":", "if", "path", "is", "None", ":", "raise", "ValueError", "(", "'Invalid input'", ")", "if", "isinstance", "(", "path", ",", "waflib", ".", "Node", ".", "Node", ")", ":", "node", "=", "path", "elif", "os", ".", "path", ".", "isabs", "(", "path", ")", ":", "node", "=", "self", ".", "root", ".", "find_resource", "(", "path", ")", "else", ":", "node", "=", "self", ".", "path", ".", "find_resource", "(", "path", ")", "if", "isinstance", "(", "value", ",", "list", ")", ":", "self", ".", "deps_man", "[", "id", "(", "node", ")", "]", ".", "extend", "(", "value", ")", "else", ":", "self", ".", "deps_man", "[", "id", "(", "node", ")", "]", ".", "append", "(", "value", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/build/waf-1.7.13/waflib/Build.py#L414-L441
Z3Prover/z3
d745d03afdfdf638d66093e2bfbacaf87187f35b
scripts/mk_util.py
python
MakeRuleCmd.install_root
(cls)
return "$(DESTDIR)$(PREFIX)/"
Returns a string that will expand to the install location when used in a makefile rule.
Returns a string that will expand to the install location when used in a makefile rule.
[ "Returns", "a", "string", "that", "will", "expand", "to", "the", "install", "location", "when", "used", "in", "a", "makefile", "rule", "." ]
def install_root(cls): """ Returns a string that will expand to the install location when used in a makefile rule. """ # Note: DESTDIR is to support staged installs return "$(DESTDIR)$(PREFIX)/"
[ "def", "install_root", "(", "cls", ")", ":", "# Note: DESTDIR is to support staged installs", "return", "\"$(DESTDIR)$(PREFIX)/\"" ]
https://github.com/Z3Prover/z3/blob/d745d03afdfdf638d66093e2bfbacaf87187f35b/scripts/mk_util.py#L3297-L3303
apple/turicreate
cce55aa5311300e3ce6af93cb45ba791fd1bdf49
src/python/turicreate/data_structures/sarray.py
python
SArray.__div__
(self, other)
If other is a scalar value, divides each element of the current array by the value, returning the result. If other is an SArray, performs an element-wise division of the two arrays.
If other is a scalar value, divides each element of the current array by the value, returning the result. If other is an SArray, performs an element-wise division of the two arrays.
[ "If", "other", "is", "a", "scalar", "value", "divides", "each", "element", "of", "the", "current", "array", "by", "the", "value", "returning", "the", "result", ".", "If", "other", "is", "an", "SArray", "performs", "an", "element", "-", "wise", "division", "of", "the", "two", "arrays", "." ]
def __div__(self, other): """ If other is a scalar value, divides each element of the current array by the value, returning the result. If other is an SArray, performs an element-wise division of the two arrays. """ with cython_context(): if type(other) is SArray: return SArray( _proxy=self.__proxy__.vector_operator(other.__proxy__, "/") ) else: return SArray(_proxy=self.__proxy__.left_scalar_operator(other, "/"))
[ "def", "__div__", "(", "self", ",", "other", ")", ":", "with", "cython_context", "(", ")", ":", "if", "type", "(", "other", ")", "is", "SArray", ":", "return", "SArray", "(", "_proxy", "=", "self", ".", "__proxy__", ".", "vector_operator", "(", "other", ".", "__proxy__", ",", "\"/\"", ")", ")", "else", ":", "return", "SArray", "(", "_proxy", "=", "self", ".", "__proxy__", ".", "left_scalar_operator", "(", "other", ",", "\"/\"", ")", ")" ]
https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/src/python/turicreate/data_structures/sarray.py#L1086-L1098
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/collections.py
python
Counter.__add__
(self, other)
return result
Add counts from two counters. >>> Counter('abbb') + Counter('bcc') Counter({'b': 4, 'c': 2, 'a': 1})
Add counts from two counters.
[ "Add", "counts", "from", "two", "counters", "." ]
def __add__(self, other): '''Add counts from two counters. >>> Counter('abbb') + Counter('bcc') Counter({'b': 4, 'c': 2, 'a': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem, count in self.items(): newcount = count + other[elem] if newcount > 0: result[elem] = newcount for elem, count in other.items(): if elem not in self and count > 0: result[elem] = count return result
[ "def", "__add__", "(", "self", ",", "other", ")", ":", "if", "not", "isinstance", "(", "other", ",", "Counter", ")", ":", "return", "NotImplemented", "result", "=", "Counter", "(", ")", "for", "elem", ",", "count", "in", "self", ".", "items", "(", ")", ":", "newcount", "=", "count", "+", "other", "[", "elem", "]", "if", "newcount", ">", "0", ":", "result", "[", "elem", "]", "=", "newcount", "for", "elem", ",", "count", "in", "other", ".", "items", "(", ")", ":", "if", "elem", "not", "in", "self", "and", "count", ">", "0", ":", "result", "[", "elem", "]", "=", "count", "return", "result" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/collections.py#L584-L601
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/distutils/fancy_getopt.py
python
FancyGetopt._grok_option_table
(self)
Populate the various data structures that keep tabs on the option table. Called by 'getopt()' before it can do anything worthwhile.
Populate the various data structures that keep tabs on the option table. Called by 'getopt()' before it can do anything worthwhile.
[ "Populate", "the", "various", "data", "structures", "that", "keep", "tabs", "on", "the", "option", "table", ".", "Called", "by", "getopt", "()", "before", "it", "can", "do", "anything", "worthwhile", "." ]
def _grok_option_table (self): """Populate the various data structures that keep tabs on the option table. Called by 'getopt()' before it can do anything worthwhile. """ self.long_opts = [] self.short_opts = [] self.short2long.clear() self.repeat = {} for option in self.option_table: if len(option) == 3: long, short, help = option repeat = 0 elif len(option) == 4: long, short, help, repeat = option else: # the option table is part of the code, so simply # assert that it is correct raise ValueError, "invalid option tuple: %r" % (option,) # Type- and value-check the option names if not isinstance(long, str) or len(long) < 2: raise DistutilsGetoptError, \ ("invalid long option '%s': " "must be a string of length >= 2") % long if (not ((short is None) or (isinstance(short, str) and len(short) == 1))): raise DistutilsGetoptError, \ ("invalid short option '%s': " "must a single character or None") % short self.repeat[long] = repeat self.long_opts.append(long) if long[-1] == '=': # option takes an argument? if short: short = short + ':' long = long[0:-1] self.takes_arg[long] = 1 else: # Is option is a "negative alias" for some other option (eg. # "quiet" == "!verbose")? alias_to = self.negative_alias.get(long) if alias_to is not None: if self.takes_arg[alias_to]: raise DistutilsGetoptError, \ ("invalid negative alias '%s': " "aliased option '%s' takes a value") % \ (long, alias_to) self.long_opts[-1] = long # XXX redundant?! self.takes_arg[long] = 0 else: self.takes_arg[long] = 0 # If this is an alias option, make sure its "takes arg" flag is # the same as the option it's aliased to. alias_to = self.alias.get(long) if alias_to is not None: if self.takes_arg[long] != self.takes_arg[alias_to]: raise DistutilsGetoptError, \ ("invalid alias '%s': inconsistent with " "aliased option '%s' (one of them takes a value, " "the other doesn't") % (long, alias_to) # Now enforce some bondage on the long option name, so we can # later translate it to an attribute name on some object. Have # to do this a bit late to make sure we've removed any trailing # '='. if not longopt_re.match(long): raise DistutilsGetoptError, \ ("invalid long option name '%s' " + "(must be letters, numbers, hyphens only") % long self.attr_name[long] = self.get_attr_name(long) if short: self.short_opts.append(short) self.short2long[short[0]] = long
[ "def", "_grok_option_table", "(", "self", ")", ":", "self", ".", "long_opts", "=", "[", "]", "self", ".", "short_opts", "=", "[", "]", "self", ".", "short2long", ".", "clear", "(", ")", "self", ".", "repeat", "=", "{", "}", "for", "option", "in", "self", ".", "option_table", ":", "if", "len", "(", "option", ")", "==", "3", ":", "long", ",", "short", ",", "help", "=", "option", "repeat", "=", "0", "elif", "len", "(", "option", ")", "==", "4", ":", "long", ",", "short", ",", "help", ",", "repeat", "=", "option", "else", ":", "# the option table is part of the code, so simply", "# assert that it is correct", "raise", "ValueError", ",", "\"invalid option tuple: %r\"", "%", "(", "option", ",", ")", "# Type- and value-check the option names", "if", "not", "isinstance", "(", "long", ",", "str", ")", "or", "len", "(", "long", ")", "<", "2", ":", "raise", "DistutilsGetoptError", ",", "(", "\"invalid long option '%s': \"", "\"must be a string of length >= 2\"", ")", "%", "long", "if", "(", "not", "(", "(", "short", "is", "None", ")", "or", "(", "isinstance", "(", "short", ",", "str", ")", "and", "len", "(", "short", ")", "==", "1", ")", ")", ")", ":", "raise", "DistutilsGetoptError", ",", "(", "\"invalid short option '%s': \"", "\"must a single character or None\"", ")", "%", "short", "self", ".", "repeat", "[", "long", "]", "=", "repeat", "self", ".", "long_opts", ".", "append", "(", "long", ")", "if", "long", "[", "-", "1", "]", "==", "'='", ":", "# option takes an argument?", "if", "short", ":", "short", "=", "short", "+", "':'", "long", "=", "long", "[", "0", ":", "-", "1", "]", "self", ".", "takes_arg", "[", "long", "]", "=", "1", "else", ":", "# Is option is a \"negative alias\" for some other option (eg.", "# \"quiet\" == \"!verbose\")?", "alias_to", "=", "self", ".", "negative_alias", ".", "get", "(", "long", ")", "if", "alias_to", "is", "not", "None", ":", "if", "self", ".", "takes_arg", "[", "alias_to", "]", ":", "raise", "DistutilsGetoptError", ",", "(", "\"invalid negative alias '%s': \"", "\"aliased option '%s' takes a value\"", ")", "%", "(", "long", ",", "alias_to", ")", "self", ".", "long_opts", "[", "-", "1", "]", "=", "long", "# XXX redundant?!", "self", ".", "takes_arg", "[", "long", "]", "=", "0", "else", ":", "self", ".", "takes_arg", "[", "long", "]", "=", "0", "# If this is an alias option, make sure its \"takes arg\" flag is", "# the same as the option it's aliased to.", "alias_to", "=", "self", ".", "alias", ".", "get", "(", "long", ")", "if", "alias_to", "is", "not", "None", ":", "if", "self", ".", "takes_arg", "[", "long", "]", "!=", "self", ".", "takes_arg", "[", "alias_to", "]", ":", "raise", "DistutilsGetoptError", ",", "(", "\"invalid alias '%s': inconsistent with \"", "\"aliased option '%s' (one of them takes a value, \"", "\"the other doesn't\"", ")", "%", "(", "long", ",", "alias_to", ")", "# Now enforce some bondage on the long option name, so we can", "# later translate it to an attribute name on some object. Have", "# to do this a bit late to make sure we've removed any trailing", "# '='.", "if", "not", "longopt_re", ".", "match", "(", "long", ")", ":", "raise", "DistutilsGetoptError", ",", "(", "\"invalid long option name '%s' \"", "+", "\"(must be letters, numbers, hyphens only\"", ")", "%", "long", "self", ".", "attr_name", "[", "long", "]", "=", "self", ".", "get_attr_name", "(", "long", ")", "if", "short", ":", "self", ".", "short_opts", ".", "append", "(", "short", ")", "self", ".", "short2long", "[", "short", "[", "0", "]", "]", "=", "long" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/distutils/fancy_getopt.py#L146-L227
letscontrolit/ESPEasy
acb2c9e695d6f61d8d67adf0fe4037c08d4baedd
lib/IRremoteESP8266/tools/auto_analyse_raw_data.py
python
RawIRMessage.add_data_code
(self, bin_str, name="", footer=True)
return code
Add the common "data" sequence of code to send the bulk of a message.
Add the common "data" sequence of code to send the bulk of a message.
[ "Add", "the", "common", "data", "sequence", "of", "code", "to", "send", "the", "bulk", "of", "a", "message", "." ]
def add_data_code(self, bin_str, name="", footer=True): """Add the common "data" sequence of code to send the bulk of a message.""" # pylint: disable=no-self-use code = [] nbits = len(bin_str) code.append(f" // Data Section #{self.section_count}") code.append(f" // e.g. data = 0x{int(bin_str, 2):X}, nbits = {nbits}") code.append(f" sendData(k{name}BitMark, k{name}OneSpace, k{name}BitMark," f" k{name}ZeroSpace, send_data, {nbits}, true);") code.append(f" send_data >>= {nbits};") if footer: code.append(" // Footer") code.append(f" mark(k{name}BitMark);") return code
[ "def", "add_data_code", "(", "self", ",", "bin_str", ",", "name", "=", "\"\"", ",", "footer", "=", "True", ")", ":", "# pylint: disable=no-self-use", "code", "=", "[", "]", "nbits", "=", "len", "(", "bin_str", ")", "code", ".", "append", "(", "f\" // Data Section #{self.section_count}\"", ")", "code", ".", "append", "(", "f\" // e.g. data = 0x{int(bin_str, 2):X}, nbits = {nbits}\"", ")", "code", ".", "append", "(", "f\" sendData(k{name}BitMark, k{name}OneSpace, k{name}BitMark,\"", "f\" k{name}ZeroSpace, send_data, {nbits}, true);\"", ")", "code", ".", "append", "(", "f\" send_data >>= {nbits};\"", ")", "if", "footer", ":", "code", ".", "append", "(", "\" // Footer\"", ")", "code", ".", "append", "(", "f\" mark(k{name}BitMark);\"", ")", "return", "code" ]
https://github.com/letscontrolit/ESPEasy/blob/acb2c9e695d6f61d8d67adf0fe4037c08d4baedd/lib/IRremoteESP8266/tools/auto_analyse_raw_data.py#L101-L114
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/x86/toolchain/lib/python2.7/lib-tk/Tkinter.py
python
Menu.add_command
(self, cnf={}, **kw)
Add command menu item.
Add command menu item.
[ "Add", "command", "menu", "item", "." ]
def add_command(self, cnf={}, **kw): """Add command menu item.""" self.add('command', cnf or kw)
[ "def", "add_command", "(", "self", ",", "cnf", "=", "{", "}", ",", "*", "*", "kw", ")", ":", "self", ".", "add", "(", "'command'", ",", "cnf", "or", "kw", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/lib-tk/Tkinter.py#L2681-L2683
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/xrc.py
python
XmlResourceHandler.AddWindowStyles
(*args, **kwargs)
return _xrc.XmlResourceHandler_AddWindowStyles(*args, **kwargs)
AddWindowStyles(self)
AddWindowStyles(self)
[ "AddWindowStyles", "(", "self", ")" ]
def AddWindowStyles(*args, **kwargs): """AddWindowStyles(self)""" return _xrc.XmlResourceHandler_AddWindowStyles(*args, **kwargs)
[ "def", "AddWindowStyles", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_xrc", ".", "XmlResourceHandler_AddWindowStyles", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/xrc.py#L651-L653
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/distribute/values.py
python
MirroredVariable._gather_saveables_for_checkpoint
(self)
return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}
Overrides Trackable method. This allows both name-based and object-based save and restore of MirroredVariables. Returns: A dictionary mapping attribute names to `SaveableObject` factories.
Overrides Trackable method.
[ "Overrides", "Trackable", "method", "." ]
def _gather_saveables_for_checkpoint(self): """Overrides Trackable method. This allows both name-based and object-based save and restore of MirroredVariables. Returns: A dictionary mapping attribute names to `SaveableObject` factories. """ def _saveable_factory(name=self._common_name): return _MirroredSaveable(self, self.primary, name) return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}
[ "def", "_gather_saveables_for_checkpoint", "(", "self", ")", ":", "def", "_saveable_factory", "(", "name", "=", "self", ".", "_common_name", ")", ":", "return", "_MirroredSaveable", "(", "self", ",", "self", ".", "primary", ",", "name", ")", "return", "{", "trackable", ".", "VARIABLE_VALUE_KEY", ":", "_saveable_factory", "}" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/distribute/values.py#L1055-L1066
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/python/framework/dtypes.py
python
as_dtype
(type_value)
Converts the given `type_value` to a `DType`. Args: type_value: A value that can be converted to a `tf.DType` object. This may currently be a `tf.DType` object, a [`DataType` enum](https://www.tensorflow.org/code/tensorflow/core/framework/types.proto), a string type name, or a `numpy.dtype`. Returns: A `DType` corresponding to `type_value`. Raises: TypeError: If `type_value` cannot be converted to a `DType`.
Converts the given `type_value` to a `DType`.
[ "Converts", "the", "given", "type_value", "to", "a", "DType", "." ]
def as_dtype(type_value): """Converts the given `type_value` to a `DType`. Args: type_value: A value that can be converted to a `tf.DType` object. This may currently be a `tf.DType` object, a [`DataType` enum](https://www.tensorflow.org/code/tensorflow/core/framework/types.proto), a string type name, or a `numpy.dtype`. Returns: A `DType` corresponding to `type_value`. Raises: TypeError: If `type_value` cannot be converted to a `DType`. """ if isinstance(type_value, DType): return type_value try: return _INTERN_TABLE[type_value] except KeyError: pass try: return _STRING_TO_TF[type_value] except KeyError: pass if isinstance(type_value, np.dtype): # The numpy dtype for strings is variable length. We can not compare # dtype with a single constant (np.string does not exist) to decide # dtype is a "string" type. We need to compare the dtype.type to be # sure it's a string type. if type_value.type == np.string_ or type_value.type == np.unicode_: return string for key, val in _NP_TO_TF: try: if key == type_value: return val except TypeError as e: raise TypeError("Cannot convert {} to a dtype. {}".format(type_value, e)) raise TypeError( "Cannot convert value %r to a TensorFlow DType." % type_value)
[ "def", "as_dtype", "(", "type_value", ")", ":", "if", "isinstance", "(", "type_value", ",", "DType", ")", ":", "return", "type_value", "try", ":", "return", "_INTERN_TABLE", "[", "type_value", "]", "except", "KeyError", ":", "pass", "try", ":", "return", "_STRING_TO_TF", "[", "type_value", "]", "except", "KeyError", ":", "pass", "if", "isinstance", "(", "type_value", ",", "np", ".", "dtype", ")", ":", "# The numpy dtype for strings is variable length. We can not compare", "# dtype with a single constant (np.string does not exist) to decide", "# dtype is a \"string\" type. We need to compare the dtype.type to be", "# sure it's a string type.", "if", "type_value", ".", "type", "==", "np", ".", "string_", "or", "type_value", ".", "type", "==", "np", ".", "unicode_", ":", "return", "string", "for", "key", ",", "val", "in", "_NP_TO_TF", ":", "try", ":", "if", "key", "==", "type_value", ":", "return", "val", "except", "TypeError", "as", "e", ":", "raise", "TypeError", "(", "\"Cannot convert {} to a dtype. {}\"", ".", "format", "(", "type_value", ",", "e", ")", ")", "raise", "TypeError", "(", "\"Cannot convert value %r to a TensorFlow DType.\"", "%", "type_value", ")" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/framework/dtypes.py#L540-L584
zhuli19901106/leetcode-zhuli
0f8fc29ccb8c33ea91149ecb2d4e961024c11db7
explore/hash-table/1179_two-sum-iii-data-structure-design_1_AC.py
python
TwoSum.__init__
(self)
Initialize your data structure here.
Initialize your data structure here.
[ "Initialize", "your", "data", "structure", "here", "." ]
def __init__(self): """ Initialize your data structure here. """ self.mm = {}
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "mm", "=", "{", "}" ]
https://github.com/zhuli19901106/leetcode-zhuli/blob/0f8fc29ccb8c33ea91149ecb2d4e961024c11db7/explore/hash-table/1179_two-sum-iii-data-structure-design_1_AC.py#L4-L8
apache/impala
8ddac48f3428c86f2cbd037ced89cfb903298b12
shell/TSSLSocketWithWildcardSAN.py
python
TSSLSocketWithWildcardSAN._match_hostname
(self, cert, hostname)
Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 rules are followed, but IP addresses are not accepted for *hostname*. CertificateError is raised on failure. On success, the function returns nothing.
Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 rules are followed, but IP addresses are not accepted for *hostname*.
[ "Verify", "that", "*", "cert", "*", "(", "in", "decoded", "format", "as", "returned", "by", "SSLSocket", ".", "getpeercert", "()", ")", "matches", "the", "*", "hostname", "*", ".", "RFC", "2818", "and", "RFC", "6125", "rules", "are", "followed", "but", "IP", "addresses", "are", "not", "accepted", "for", "*", "hostname", "*", "." ]
def _match_hostname(self, cert, hostname): """Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 rules are followed, but IP addresses are not accepted for *hostname*. CertificateError is raised on failure. On success, the function returns nothing. """ dnsnames = [] san = cert.get('subjectAltName', ()) for key, value in san: if key == 'DNS': if self._dnsname_match(value, hostname): return dnsnames.append(value) if not dnsnames: # The subject is only checked when there is no dNSName entry # in subjectAltName for sub in cert.get('subject', ()): for key, value in sub: # XXX according to RFC 2818, the most specific Common Name # must be used. if key == 'commonName': if self._dnsname_match(value, hostname): return dnsnames.append(value) if len(dnsnames) > 1: raise CertificateError("hostname %r " "doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames)))) elif len(dnsnames) == 1: raise CertificateError("hostname %r " "doesn't match %r" % (hostname, dnsnames[0])) else: raise CertificateError("no appropriate commonName or " "subjectAltName fields were found")
[ "def", "_match_hostname", "(", "self", ",", "cert", ",", "hostname", ")", ":", "dnsnames", "=", "[", "]", "san", "=", "cert", ".", "get", "(", "'subjectAltName'", ",", "(", ")", ")", "for", "key", ",", "value", "in", "san", ":", "if", "key", "==", "'DNS'", ":", "if", "self", ".", "_dnsname_match", "(", "value", ",", "hostname", ")", ":", "return", "dnsnames", ".", "append", "(", "value", ")", "if", "not", "dnsnames", ":", "# The subject is only checked when there is no dNSName entry", "# in subjectAltName", "for", "sub", "in", "cert", ".", "get", "(", "'subject'", ",", "(", ")", ")", ":", "for", "key", ",", "value", "in", "sub", ":", "# XXX according to RFC 2818, the most specific Common Name", "# must be used.", "if", "key", "==", "'commonName'", ":", "if", "self", ".", "_dnsname_match", "(", "value", ",", "hostname", ")", ":", "return", "dnsnames", ".", "append", "(", "value", ")", "if", "len", "(", "dnsnames", ")", ">", "1", ":", "raise", "CertificateError", "(", "\"hostname %r \"", "\"doesn't match either of %s\"", "%", "(", "hostname", ",", "', '", ".", "join", "(", "map", "(", "repr", ",", "dnsnames", ")", ")", ")", ")", "elif", "len", "(", "dnsnames", ")", "==", "1", ":", "raise", "CertificateError", "(", "\"hostname %r \"", "\"doesn't match %r\"", "%", "(", "hostname", ",", "dnsnames", "[", "0", "]", ")", ")", "else", ":", "raise", "CertificateError", "(", "\"no appropriate commonName or \"", "\"subjectAltName fields were found\"", ")" ]
https://github.com/apache/impala/blob/8ddac48f3428c86f2cbd037ced89cfb903298b12/shell/TSSLSocketWithWildcardSAN.py#L78-L114
junhyukoh/caffe-lstm
598d45456fa2a1b127a644f4aa38daa8fb9fc722
tools/extra/parse_log.py
python
parse_line_for_net_output
(regex_obj, row, row_dict_list, line, iteration, seconds, learning_rate)
return row_dict_list, row
Parse a single line for training or test output Returns a a tuple with (row_dict_list, row) row: may be either a new row or an augmented version of the current row row_dict_list: may be either the current row_dict_list or an augmented version of the current row_dict_list
Parse a single line for training or test output
[ "Parse", "a", "single", "line", "for", "training", "or", "test", "output" ]
def parse_line_for_net_output(regex_obj, row, row_dict_list, line, iteration, seconds, learning_rate): """Parse a single line for training or test output Returns a a tuple with (row_dict_list, row) row: may be either a new row or an augmented version of the current row row_dict_list: may be either the current row_dict_list or an augmented version of the current row_dict_list """ output_match = regex_obj.search(line) if output_match: if not row or row['NumIters'] != iteration: # Push the last row and start a new one if row: # If we're on a new iteration, push the last row # This will probably only happen for the first row; otherwise # the full row checking logic below will push and clear full # rows row_dict_list.append(row) row = OrderedDict([ ('NumIters', iteration), ('Seconds', seconds), ('LearningRate', learning_rate) ]) # output_num is not used; may be used in the future # output_num = output_match.group(1) output_name = output_match.group(2) output_val = output_match.group(3) row[output_name] = float(output_val) if row and len(row_dict_list) >= 1 and len(row) == len(row_dict_list[0]): # The row is full, based on the fact that it has the same number of # columns as the first row; append it to the list row_dict_list.append(row) row = None return row_dict_list, row
[ "def", "parse_line_for_net_output", "(", "regex_obj", ",", "row", ",", "row_dict_list", ",", "line", ",", "iteration", ",", "seconds", ",", "learning_rate", ")", ":", "output_match", "=", "regex_obj", ".", "search", "(", "line", ")", "if", "output_match", ":", "if", "not", "row", "or", "row", "[", "'NumIters'", "]", "!=", "iteration", ":", "# Push the last row and start a new one", "if", "row", ":", "# If we're on a new iteration, push the last row", "# This will probably only happen for the first row; otherwise", "# the full row checking logic below will push and clear full", "# rows", "row_dict_list", ".", "append", "(", "row", ")", "row", "=", "OrderedDict", "(", "[", "(", "'NumIters'", ",", "iteration", ")", ",", "(", "'Seconds'", ",", "seconds", ")", ",", "(", "'LearningRate'", ",", "learning_rate", ")", "]", ")", "# output_num is not used; may be used in the future", "# output_num = output_match.group(1)", "output_name", "=", "output_match", ".", "group", "(", "2", ")", "output_val", "=", "output_match", ".", "group", "(", "3", ")", "row", "[", "output_name", "]", "=", "float", "(", "output_val", ")", "if", "row", "and", "len", "(", "row_dict_list", ")", ">=", "1", "and", "len", "(", "row", ")", "==", "len", "(", "row_dict_list", "[", "0", "]", ")", ":", "# The row is full, based on the fact that it has the same number of", "# columns as the first row; append it to the list", "row_dict_list", ".", "append", "(", "row", ")", "row", "=", "None", "return", "row_dict_list", ",", "row" ]
https://github.com/junhyukoh/caffe-lstm/blob/598d45456fa2a1b127a644f4aa38daa8fb9fc722/tools/extra/parse_log.py#L77-L116
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/framework/error_interpolation.py
python
interpolate
(message, graph)
return "\n".join(error_message)
Interpolates an error message. The error message can contain tags of form `{{node_type node_name}}` which will be parsed to identify the tf.Graph and op. If the op contains traceback, the traceback will be attached to the error message. Args: message: A string to interpolate. graph: ops.Graph object containing all nodes referenced in the error message. Returns: The error message string with node definition traceback.
Interpolates an error message.
[ "Interpolates", "an", "error", "message", "." ]
def interpolate(message, graph): """Interpolates an error message. The error message can contain tags of form `{{node_type node_name}}` which will be parsed to identify the tf.Graph and op. If the op contains traceback, the traceback will be attached to the error message. Args: message: A string to interpolate. graph: ops.Graph object containing all nodes referenced in the error message. Returns: The error message string with node definition traceback. """ parsed_messaged, _, node_tags = parse_message(message) error_message = ["Graph execution error:", ""] for tag in node_tags: try: op = graph.get_operation_by_name(tag.name) except KeyError: continue else: error_message.append(_build_node_error_message(op)) error_message.append(parsed_messaged.strip()) return "\n".join(error_message)
[ "def", "interpolate", "(", "message", ",", "graph", ")", ":", "parsed_messaged", ",", "_", ",", "node_tags", "=", "parse_message", "(", "message", ")", "error_message", "=", "[", "\"Graph execution error:\"", ",", "\"\"", "]", "for", "tag", "in", "node_tags", ":", "try", ":", "op", "=", "graph", ".", "get_operation_by_name", "(", "tag", ".", "name", ")", "except", "KeyError", ":", "continue", "else", ":", "error_message", ".", "append", "(", "_build_node_error_message", "(", "op", ")", ")", "error_message", ".", "append", "(", "parsed_messaged", ".", "strip", "(", ")", ")", "return", "\"\\n\"", ".", "join", "(", "error_message", ")" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/framework/error_interpolation.py#L446-L472
kushview/Element
1cc16380caa2ab79461246ba758b9de1f46db2a5
waflib/Options.py
python
OptionsContext.jobs
(self)
return count
Finds the optimal amount of cpu cores to use for parallel jobs. At runtime the options can be obtained from :py:const:`waflib.Options.options` :: from waflib.Options import options njobs = options.jobs :return: the amount of cpu cores :rtype: int
Finds the optimal amount of cpu cores to use for parallel jobs. At runtime the options can be obtained from :py:const:`waflib.Options.options` ::
[ "Finds", "the", "optimal", "amount", "of", "cpu", "cores", "to", "use", "for", "parallel", "jobs", ".", "At", "runtime", "the", "options", "can", "be", "obtained", "from", ":", "py", ":", "const", ":", "waflib", ".", "Options", ".", "options", "::" ]
def jobs(self): """ Finds the optimal amount of cpu cores to use for parallel jobs. At runtime the options can be obtained from :py:const:`waflib.Options.options` :: from waflib.Options import options njobs = options.jobs :return: the amount of cpu cores :rtype: int """ count = int(os.environ.get('JOBS', 0)) if count < 1: if 'NUMBER_OF_PROCESSORS' in os.environ: # on Windows, use the NUMBER_OF_PROCESSORS environment variable count = int(os.environ.get('NUMBER_OF_PROCESSORS', 1)) else: # on everything else, first try the POSIX sysconf values if hasattr(os, 'sysconf_names'): if 'SC_NPROCESSORS_ONLN' in os.sysconf_names: count = int(os.sysconf('SC_NPROCESSORS_ONLN')) elif 'SC_NPROCESSORS_CONF' in os.sysconf_names: count = int(os.sysconf('SC_NPROCESSORS_CONF')) if not count and os.name not in ('nt', 'java'): try: tmp = self.cmd_and_log(['sysctl', '-n', 'hw.ncpu'], quiet=0) except Errors.WafError: pass else: if re.match('^[0-9]+$', tmp): count = int(tmp) if count < 1: count = 1 elif count > 1024: count = 1024 return count
[ "def", "jobs", "(", "self", ")", ":", "count", "=", "int", "(", "os", ".", "environ", ".", "get", "(", "'JOBS'", ",", "0", ")", ")", "if", "count", "<", "1", ":", "if", "'NUMBER_OF_PROCESSORS'", "in", "os", ".", "environ", ":", "# on Windows, use the NUMBER_OF_PROCESSORS environment variable", "count", "=", "int", "(", "os", ".", "environ", ".", "get", "(", "'NUMBER_OF_PROCESSORS'", ",", "1", ")", ")", "else", ":", "# on everything else, first try the POSIX sysconf values", "if", "hasattr", "(", "os", ",", "'sysconf_names'", ")", ":", "if", "'SC_NPROCESSORS_ONLN'", "in", "os", ".", "sysconf_names", ":", "count", "=", "int", "(", "os", ".", "sysconf", "(", "'SC_NPROCESSORS_ONLN'", ")", ")", "elif", "'SC_NPROCESSORS_CONF'", "in", "os", ".", "sysconf_names", ":", "count", "=", "int", "(", "os", ".", "sysconf", "(", "'SC_NPROCESSORS_CONF'", ")", ")", "if", "not", "count", "and", "os", ".", "name", "not", "in", "(", "'nt'", ",", "'java'", ")", ":", "try", ":", "tmp", "=", "self", ".", "cmd_and_log", "(", "[", "'sysctl'", ",", "'-n'", ",", "'hw.ncpu'", "]", ",", "quiet", "=", "0", ")", "except", "Errors", ".", "WafError", ":", "pass", "else", ":", "if", "re", ".", "match", "(", "'^[0-9]+$'", ",", "tmp", ")", ":", "count", "=", "int", "(", "tmp", ")", "if", "count", "<", "1", ":", "count", "=", "1", "elif", "count", ">", "1024", ":", "count", "=", "1024", "return", "count" ]
https://github.com/kushview/Element/blob/1cc16380caa2ab79461246ba758b9de1f46db2a5/waflib/Options.py#L192-L227
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pkg_resources/_vendor/pyparsing.py
python
ParserElement.ignore
( self, other )
return self
Define expression to be ignored (e.g., comments) while doing pattern matching; may be called repeatedly, to define multiple comment or other ignorable patterns. Example:: patt = OneOrMore(Word(alphas)) patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj'] patt.ignore(cStyleComment) patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
Define expression to be ignored (e.g., comments) while doing pattern matching; may be called repeatedly, to define multiple comment or other ignorable patterns. Example:: patt = OneOrMore(Word(alphas)) patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj'] patt.ignore(cStyleComment) patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
[ "Define", "expression", "to", "be", "ignored", "(", "e", ".", "g", ".", "comments", ")", "while", "doing", "pattern", "matching", ";", "may", "be", "called", "repeatedly", "to", "define", "multiple", "comment", "or", "other", "ignorable", "patterns", ".", "Example", "::", "patt", "=", "OneOrMore", "(", "Word", "(", "alphas", "))", "patt", ".", "parseString", "(", "ablaj", "/", "*", "comment", "*", "/", "lskjd", ")", "#", "-", ">", "[", "ablaj", "]", "patt", ".", "ignore", "(", "cStyleComment", ")", "patt", ".", "parseString", "(", "ablaj", "/", "*", "comment", "*", "/", "lskjd", ")", "#", "-", ">", "[", "ablaj", "lskjd", "]" ]
def ignore( self, other ): """ Define expression to be ignored (e.g., comments) while doing pattern matching; may be called repeatedly, to define multiple comment or other ignorable patterns. Example:: patt = OneOrMore(Word(alphas)) patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj'] patt.ignore(cStyleComment) patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd'] """ if isinstance(other, basestring): other = Suppress(other) if isinstance( other, Suppress ): if other not in self.ignoreExprs: self.ignoreExprs.append(other) else: self.ignoreExprs.append( Suppress( other.copy() ) ) return self
[ "def", "ignore", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "basestring", ")", ":", "other", "=", "Suppress", "(", "other", ")", "if", "isinstance", "(", "other", ",", "Suppress", ")", ":", "if", "other", "not", "in", "self", ".", "ignoreExprs", ":", "self", ".", "ignoreExprs", ".", "append", "(", "other", ")", "else", ":", "self", ".", "ignoreExprs", ".", "append", "(", "Suppress", "(", "other", ".", "copy", "(", ")", ")", ")", "return", "self" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pkg_resources/_vendor/pyparsing.py#L2080-L2101
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/setuptools/_backport/hashlib/_sha.py
python
new
(arg=None)
return crypto
Return a new sha crypto object. If arg is present, the method call update(arg) is made.
Return a new sha crypto object.
[ "Return", "a", "new", "sha", "crypto", "object", "." ]
def new(arg=None): """Return a new sha crypto object. If arg is present, the method call update(arg) is made. """ crypto = sha() if arg: crypto.update(arg) return crypto
[ "def", "new", "(", "arg", "=", "None", ")", ":", "crypto", "=", "sha", "(", ")", "if", "arg", ":", "crypto", ".", "update", "(", "arg", ")", "return", "crypto" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/setuptools/_backport/hashlib/_sha.py#L337-L347
arangodb/arangodb
0d658689c7d1b721b314fa3ca27d38303e1570c8
3rdParty/V8/gyp/generator/eclipse.py
python
GenerateOutput
(target_list, target_dicts, data, params)
Generate an XML settings file that can be imported into a CDT project.
Generate an XML settings file that can be imported into a CDT project.
[ "Generate", "an", "XML", "settings", "file", "that", "can", "be", "imported", "into", "a", "CDT", "project", "." ]
def GenerateOutput(target_list, target_dicts, data, params): """Generate an XML settings file that can be imported into a CDT project.""" if params['options'].generator_output: raise NotImplementedError("--generator_output not implemented for eclipse") user_config = params.get('generator_flags', {}).get('config', None) if user_config: GenerateOutputForConfig(target_list, target_dicts, data, params, user_config) else: config_names = target_dicts[target_list[0]]['configurations'] for config_name in config_names: GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
[ "def", "GenerateOutput", "(", "target_list", ",", "target_dicts", ",", "data", ",", "params", ")", ":", "if", "params", "[", "'options'", "]", ".", "generator_output", ":", "raise", "NotImplementedError", "(", "\"--generator_output not implemented for eclipse\"", ")", "user_config", "=", "params", ".", "get", "(", "'generator_flags'", ",", "{", "}", ")", ".", "get", "(", "'config'", ",", "None", ")", "if", "user_config", ":", "GenerateOutputForConfig", "(", "target_list", ",", "target_dicts", ",", "data", ",", "params", ",", "user_config", ")", "else", ":", "config_names", "=", "target_dicts", "[", "target_list", "[", "0", "]", "]", "[", "'configurations'", "]", "for", "config_name", "in", "config_names", ":", "GenerateOutputForConfig", "(", "target_list", ",", "target_dicts", ",", "data", ",", "params", ",", "config_name", ")" ]
https://github.com/arangodb/arangodb/blob/0d658689c7d1b721b314fa3ca27d38303e1570c8/3rdParty/V8/gyp/generator/eclipse.py#L391-L405
pytorch/pytorch
7176c92687d3cc847cc046bf002269c6949a21c2
caffe2/python/helpers/dropout.py
python
dropout
(model, blob_in, blob_out, use_cudnn=False, **kwargs)
return model.net.Dropout( blob_in, [blob_out, "_" + blob_out + "_mask"], **kwargs)[0]
dropout
dropout
[ "dropout" ]
def dropout(model, blob_in, blob_out, use_cudnn=False, **kwargs): """dropout""" if use_cudnn: kwargs['engine'] = 'CUDNN' else: kwargs['engine'] = 'DEFAULT' assert 'is_test' in kwargs, "Argument 'is_test' is required" return model.net.Dropout( blob_in, [blob_out, "_" + blob_out + "_mask"], **kwargs)[0]
[ "def", "dropout", "(", "model", ",", "blob_in", ",", "blob_out", ",", "use_cudnn", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "use_cudnn", ":", "kwargs", "[", "'engine'", "]", "=", "'CUDNN'", "else", ":", "kwargs", "[", "'engine'", "]", "=", "'DEFAULT'", "assert", "'is_test'", "in", "kwargs", ",", "\"Argument 'is_test' is required\"", "return", "model", ".", "net", ".", "Dropout", "(", "blob_in", ",", "[", "blob_out", ",", "\"_\"", "+", "blob_out", "+", "\"_mask\"", "]", ",", "*", "*", "kwargs", ")", "[", "0", "]" ]
https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/caffe2/python/helpers/dropout.py#L9-L17
nyuwireless-unipd/ns3-mmwave
4ff9e87e8079764e04cbeccd8e85bff15ae16fb3
utils/grid.py
python
TimelineDataRange.__search
(self, key)
return - 1
! Search @param self this object @param key key @return index if found or -1 if not found
! Search
[ "!", "Search" ]
def __search(self, key): """! Search @param self this object @param key key @return index if found or -1 if not found """ l = 0 u = len(self.ranges)-1 while l <= u: i = int((l + u) / 2) if key >= self.ranges[i].start and key <= self.ranges[i].end: return i elif key < self.ranges[i].start: u = i - 1 else: # key > self.ranges[i].end l = i + 1 return - 1
[ "def", "__search", "(", "self", ",", "key", ")", ":", "l", "=", "0", "u", "=", "len", "(", "self", ".", "ranges", ")", "-", "1", "while", "l", "<=", "u", ":", "i", "=", "int", "(", "(", "l", "+", "u", ")", "/", "2", ")", "if", "key", ">=", "self", ".", "ranges", "[", "i", "]", ".", "start", "and", "key", "<=", "self", ".", "ranges", "[", "i", "]", ".", "end", ":", "return", "i", "elif", "key", "<", "self", ".", "ranges", "[", "i", "]", ".", "start", ":", "u", "=", "i", "-", "1", "else", ":", "# key > self.ranges[i].end", "l", "=", "i", "+", "1", "return", "-", "1" ]
https://github.com/nyuwireless-unipd/ns3-mmwave/blob/4ff9e87e8079764e04cbeccd8e85bff15ae16fb3/utils/grid.py#L105-L122
hpi-xnor/BMXNet
ed0b201da6667887222b8e4b5f997c4f6b61943d
example/fcn-xs/data.py
python
FileIter.provide_data
(self)
return [(k, tuple([1] + list(v.shape[1:]))) for k, v in self.data]
The name and shape of data provided by this iterator
The name and shape of data provided by this iterator
[ "The", "name", "and", "shape", "of", "data", "provided", "by", "this", "iterator" ]
def provide_data(self): """The name and shape of data provided by this iterator""" return [(k, tuple([1] + list(v.shape[1:]))) for k, v in self.data]
[ "def", "provide_data", "(", "self", ")", ":", "return", "[", "(", "k", ",", "tuple", "(", "[", "1", "]", "+", "list", "(", "v", ".", "shape", "[", "1", ":", "]", ")", ")", ")", "for", "k", ",", "v", "in", "self", ".", "data", "]" ]
https://github.com/hpi-xnor/BMXNet/blob/ed0b201da6667887222b8e4b5f997c4f6b61943d/example/fcn-xs/data.py#L108-L110
yue/yue
619d62c191b13c51c01be451dc48917c34a5aefc
building/tools/cpplint.py
python
ParseArguments
(args)
return filenames
Parses the command line arguments. This may set the output format and verbosity level as side-effects. Args: args: The command line arguments: Returns: The list of filenames to lint.
Parses the command line arguments.
[ "Parses", "the", "command", "line", "arguments", "." ]
def ParseArguments(args): """Parses the command line arguments. This may set the output format and verbosity level as side-effects. Args: args: The command line arguments: Returns: The list of filenames to lint. """ try: (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=', 'counting=', 'filter=', 'root=', 'linelength=', 'extensions=', 'headers=', 'quiet']) except getopt.GetoptError: PrintUsage('Invalid arguments.') verbosity = _VerboseLevel() output_format = _OutputFormat() filters = '' quiet = _Quiet() counting_style = '' for (opt, val) in opts: if opt == '--help': PrintUsage(None) elif opt == '--output': if val not in ('emacs', 'vs7', 'eclipse'): PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.') output_format = val elif opt == '--quiet': quiet = True elif opt == '--verbose': verbosity = int(val) elif opt == '--filter': filters = val if not filters: PrintCategories() elif opt == '--counting': if val not in ('total', 'toplevel', 'detailed'): PrintUsage('Valid counting options are total, toplevel, and detailed') counting_style = val elif opt == '--root': global _root _root = val elif opt == '--linelength': global _line_length try: _line_length = int(val) except ValueError: PrintUsage('Line length must be digits.') elif opt == '--extensions': global _valid_extensions try: _valid_extensions = set(val.split(',')) except ValueError: PrintUsage('Extensions must be comma seperated list.') elif opt == '--headers': ProcessHppHeadersOption(val) if not filenames: PrintUsage('No files were specified.') _SetOutputFormat(output_format) _SetQuiet(quiet) _SetVerboseLevel(verbosity) _SetFilters(filters) _SetCountingStyle(counting_style) return filenames
[ "def", "ParseArguments", "(", "args", ")", ":", "try", ":", "(", "opts", ",", "filenames", ")", "=", "getopt", ".", "getopt", "(", "args", ",", "''", ",", "[", "'help'", ",", "'output='", ",", "'verbose='", ",", "'counting='", ",", "'filter='", ",", "'root='", ",", "'linelength='", ",", "'extensions='", ",", "'headers='", ",", "'quiet'", "]", ")", "except", "getopt", ".", "GetoptError", ":", "PrintUsage", "(", "'Invalid arguments.'", ")", "verbosity", "=", "_VerboseLevel", "(", ")", "output_format", "=", "_OutputFormat", "(", ")", "filters", "=", "''", "quiet", "=", "_Quiet", "(", ")", "counting_style", "=", "''", "for", "(", "opt", ",", "val", ")", "in", "opts", ":", "if", "opt", "==", "'--help'", ":", "PrintUsage", "(", "None", ")", "elif", "opt", "==", "'--output'", ":", "if", "val", "not", "in", "(", "'emacs'", ",", "'vs7'", ",", "'eclipse'", ")", ":", "PrintUsage", "(", "'The only allowed output formats are emacs, vs7 and eclipse.'", ")", "output_format", "=", "val", "elif", "opt", "==", "'--quiet'", ":", "quiet", "=", "True", "elif", "opt", "==", "'--verbose'", ":", "verbosity", "=", "int", "(", "val", ")", "elif", "opt", "==", "'--filter'", ":", "filters", "=", "val", "if", "not", "filters", ":", "PrintCategories", "(", ")", "elif", "opt", "==", "'--counting'", ":", "if", "val", "not", "in", "(", "'total'", ",", "'toplevel'", ",", "'detailed'", ")", ":", "PrintUsage", "(", "'Valid counting options are total, toplevel, and detailed'", ")", "counting_style", "=", "val", "elif", "opt", "==", "'--root'", ":", "global", "_root", "_root", "=", "val", "elif", "opt", "==", "'--linelength'", ":", "global", "_line_length", "try", ":", "_line_length", "=", "int", "(", "val", ")", "except", "ValueError", ":", "PrintUsage", "(", "'Line length must be digits.'", ")", "elif", "opt", "==", "'--extensions'", ":", "global", "_valid_extensions", "try", ":", "_valid_extensions", "=", "set", "(", "val", ".", "split", "(", "','", ")", ")", "except", "ValueError", ":", "PrintUsage", "(", "'Extensions must be comma seperated list.'", ")", "elif", "opt", "==", "'--headers'", ":", "ProcessHppHeadersOption", "(", "val", ")", "if", "not", "filenames", ":", "PrintUsage", "(", "'No files were specified.'", ")", "_SetOutputFormat", "(", "output_format", ")", "_SetQuiet", "(", "quiet", ")", "_SetVerboseLevel", "(", "verbosity", ")", "_SetFilters", "(", "filters", ")", "_SetCountingStyle", "(", "counting_style", ")", "return", "filenames" ]
https://github.com/yue/yue/blob/619d62c191b13c51c01be451dc48917c34a5aefc/building/tools/cpplint.py#L6129-L6204
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scipy/py3/scipy/cluster/hierarchy.py
python
is_valid_im
(R, warning=False, throw=False, name=None)
return valid
Return True if the inconsistency matrix passed is valid. It must be a :math:`n` by 4 array of doubles. The standard deviations ``R[:,1]`` must be nonnegative. The link counts ``R[:,2]`` must be positive and no greater than :math:`n-1`. Parameters ---------- R : ndarray The inconsistency matrix to check for validity. warning : bool, optional When True, issues a Python warning if the linkage matrix passed is invalid. throw : bool, optional When True, throws a Python exception if the linkage matrix passed is invalid. name : str, optional This string refers to the variable name of the invalid linkage matrix. Returns ------- b : bool True if the inconsistency matrix is valid. See Also -------- linkage: for a description of what a linkage matrix is. inconsistent: for the creation of a inconsistency matrix. Examples -------- >>> from scipy.cluster.hierarchy import ward, inconsistent, is_valid_im >>> from scipy.spatial.distance import pdist Given a data set ``X``, we can apply a clustering method to obtain a linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can be also used to obtain the inconsistency matrix ``R`` associated to this clustering process: >>> X = [[0, 0], [0, 1], [1, 0], ... [0, 4], [0, 3], [1, 4], ... [4, 0], [3, 0], [4, 1], ... [4, 4], [3, 4], [4, 3]] >>> Z = ward(pdist(X)) >>> R = inconsistent(Z) >>> Z array([[ 0. , 1. , 1. , 2. ], [ 3. , 4. , 1. , 2. ], [ 6. , 7. , 1. , 2. ], [ 9. , 10. , 1. , 2. ], [ 2. , 12. , 1.29099445, 3. ], [ 5. , 13. , 1.29099445, 3. ], [ 8. , 14. , 1.29099445, 3. ], [11. , 15. , 1.29099445, 3. ], [16. , 17. , 5.77350269, 6. ], [18. , 19. , 5.77350269, 6. ], [20. , 21. , 8.16496581, 12. ]]) >>> R array([[1. , 0. , 1. , 0. ], [1. , 0. , 1. , 0. ], [1. , 0. , 1. , 0. ], [1. , 0. , 1. , 0. ], [1.14549722, 0.20576415, 2. , 0.70710678], [1.14549722, 0.20576415, 2. , 0.70710678], [1.14549722, 0.20576415, 2. , 0.70710678], [1.14549722, 0.20576415, 2. , 0.70710678], [2.78516386, 2.58797734, 3. , 1.15470054], [2.78516386, 2.58797734, 3. , 1.15470054], [6.57065706, 1.38071187, 3. , 1.15470054]]) Now we can use `scipy.cluster.hierarchy.is_valid_im` to verify that ``R`` is correct: >>> is_valid_im(R) True However, if ``R`` is wrongly constructed (e.g one of the standard deviations is set to a negative value) then the check will fail: >>> R[-1,1] = R[-1,1] * -1 >>> is_valid_im(R) False
Return True if the inconsistency matrix passed is valid.
[ "Return", "True", "if", "the", "inconsistency", "matrix", "passed", "is", "valid", "." ]
def is_valid_im(R, warning=False, throw=False, name=None): """Return True if the inconsistency matrix passed is valid. It must be a :math:`n` by 4 array of doubles. The standard deviations ``R[:,1]`` must be nonnegative. The link counts ``R[:,2]`` must be positive and no greater than :math:`n-1`. Parameters ---------- R : ndarray The inconsistency matrix to check for validity. warning : bool, optional When True, issues a Python warning if the linkage matrix passed is invalid. throw : bool, optional When True, throws a Python exception if the linkage matrix passed is invalid. name : str, optional This string refers to the variable name of the invalid linkage matrix. Returns ------- b : bool True if the inconsistency matrix is valid. See Also -------- linkage: for a description of what a linkage matrix is. inconsistent: for the creation of a inconsistency matrix. Examples -------- >>> from scipy.cluster.hierarchy import ward, inconsistent, is_valid_im >>> from scipy.spatial.distance import pdist Given a data set ``X``, we can apply a clustering method to obtain a linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can be also used to obtain the inconsistency matrix ``R`` associated to this clustering process: >>> X = [[0, 0], [0, 1], [1, 0], ... [0, 4], [0, 3], [1, 4], ... [4, 0], [3, 0], [4, 1], ... [4, 4], [3, 4], [4, 3]] >>> Z = ward(pdist(X)) >>> R = inconsistent(Z) >>> Z array([[ 0. , 1. , 1. , 2. ], [ 3. , 4. , 1. , 2. ], [ 6. , 7. , 1. , 2. ], [ 9. , 10. , 1. , 2. ], [ 2. , 12. , 1.29099445, 3. ], [ 5. , 13. , 1.29099445, 3. ], [ 8. , 14. , 1.29099445, 3. ], [11. , 15. , 1.29099445, 3. ], [16. , 17. , 5.77350269, 6. ], [18. , 19. , 5.77350269, 6. ], [20. , 21. , 8.16496581, 12. ]]) >>> R array([[1. , 0. , 1. , 0. ], [1. , 0. , 1. , 0. ], [1. , 0. , 1. , 0. ], [1. , 0. , 1. , 0. ], [1.14549722, 0.20576415, 2. , 0.70710678], [1.14549722, 0.20576415, 2. , 0.70710678], [1.14549722, 0.20576415, 2. , 0.70710678], [1.14549722, 0.20576415, 2. , 0.70710678], [2.78516386, 2.58797734, 3. , 1.15470054], [2.78516386, 2.58797734, 3. , 1.15470054], [6.57065706, 1.38071187, 3. , 1.15470054]]) Now we can use `scipy.cluster.hierarchy.is_valid_im` to verify that ``R`` is correct: >>> is_valid_im(R) True However, if ``R`` is wrongly constructed (e.g one of the standard deviations is set to a negative value) then the check will fail: >>> R[-1,1] = R[-1,1] * -1 >>> is_valid_im(R) False """ R = np.asarray(R, order='c') valid = True name_str = "%r " % name if name else '' try: if type(R) != np.ndarray: raise TypeError('Variable %spassed as inconsistency matrix is not ' 'a numpy array.' % name_str) if R.dtype != np.double: raise TypeError('Inconsistency matrix %smust contain doubles ' '(double).' % name_str) if len(R.shape) != 2: raise ValueError('Inconsistency matrix %smust have shape=2 (i.e. ' 'be two-dimensional).' % name_str) if R.shape[1] != 4: raise ValueError('Inconsistency matrix %smust have 4 columns.' % name_str) if R.shape[0] < 1: raise ValueError('Inconsistency matrix %smust have at least one ' 'row.' % name_str) if (R[:, 0] < 0).any(): raise ValueError('Inconsistency matrix %scontains negative link ' 'height means.' % name_str) if (R[:, 1] < 0).any(): raise ValueError('Inconsistency matrix %scontains negative link ' 'height standard deviations.' % name_str) if (R[:, 2] < 0).any(): raise ValueError('Inconsistency matrix %scontains negative link ' 'counts.' % name_str) except Exception as e: if throw: raise if warning: _warning(str(e)) valid = False return valid
[ "def", "is_valid_im", "(", "R", ",", "warning", "=", "False", ",", "throw", "=", "False", ",", "name", "=", "None", ")", ":", "R", "=", "np", ".", "asarray", "(", "R", ",", "order", "=", "'c'", ")", "valid", "=", "True", "name_str", "=", "\"%r \"", "%", "name", "if", "name", "else", "''", "try", ":", "if", "type", "(", "R", ")", "!=", "np", ".", "ndarray", ":", "raise", "TypeError", "(", "'Variable %spassed as inconsistency matrix is not '", "'a numpy array.'", "%", "name_str", ")", "if", "R", ".", "dtype", "!=", "np", ".", "double", ":", "raise", "TypeError", "(", "'Inconsistency matrix %smust contain doubles '", "'(double).'", "%", "name_str", ")", "if", "len", "(", "R", ".", "shape", ")", "!=", "2", ":", "raise", "ValueError", "(", "'Inconsistency matrix %smust have shape=2 (i.e. '", "'be two-dimensional).'", "%", "name_str", ")", "if", "R", ".", "shape", "[", "1", "]", "!=", "4", ":", "raise", "ValueError", "(", "'Inconsistency matrix %smust have 4 columns.'", "%", "name_str", ")", "if", "R", ".", "shape", "[", "0", "]", "<", "1", ":", "raise", "ValueError", "(", "'Inconsistency matrix %smust have at least one '", "'row.'", "%", "name_str", ")", "if", "(", "R", "[", ":", ",", "0", "]", "<", "0", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "'Inconsistency matrix %scontains negative link '", "'height means.'", "%", "name_str", ")", "if", "(", "R", "[", ":", ",", "1", "]", "<", "0", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "'Inconsistency matrix %scontains negative link '", "'height standard deviations.'", "%", "name_str", ")", "if", "(", "R", "[", ":", ",", "2", "]", "<", "0", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "'Inconsistency matrix %scontains negative link '", "'counts.'", "%", "name_str", ")", "except", "Exception", "as", "e", ":", "if", "throw", ":", "raise", "if", "warning", ":", "_warning", "(", "str", "(", "e", ")", ")", "valid", "=", "False", "return", "valid" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py3/scipy/cluster/hierarchy.py#L2091-L2213
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/pycc/platform.py
python
Toolchain.link_shared
(self, output, objects, libraries=(), library_dirs=(), export_symbols=(), extra_ldflags=None)
Create a shared library *output* linking the given *objects* and *libraries* (all strings).
Create a shared library *output* linking the given *objects* and *libraries* (all strings).
[ "Create", "a", "shared", "library", "*", "output", "*", "linking", "the", "given", "*", "objects", "*", "and", "*", "libraries", "*", "(", "all", "strings", ")", "." ]
def link_shared(self, output, objects, libraries=(), library_dirs=(), export_symbols=(), extra_ldflags=None): """ Create a shared library *output* linking the given *objects* and *libraries* (all strings). """ output_dir, output_filename = os.path.split(output) self._compiler.link(CCompiler.SHARED_OBJECT, objects, output_filename, output_dir, libraries, library_dirs, export_symbols=export_symbols, extra_preargs=extra_ldflags)
[ "def", "link_shared", "(", "self", ",", "output", ",", "objects", ",", "libraries", "=", "(", ")", ",", "library_dirs", "=", "(", ")", ",", "export_symbols", "=", "(", ")", ",", "extra_ldflags", "=", "None", ")", ":", "output_dir", ",", "output_filename", "=", "os", ".", "path", ".", "split", "(", "output", ")", "self", ".", "_compiler", ".", "link", "(", "CCompiler", ".", "SHARED_OBJECT", ",", "objects", ",", "output_filename", ",", "output_dir", ",", "libraries", ",", "library_dirs", ",", "export_symbols", "=", "export_symbols", ",", "extra_preargs", "=", "extra_ldflags", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/pycc/platform.py#L147-L159
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/_pyio.py
python
TextIOWrapper._rewind_decoded_chars
(self, n)
Rewind the _decoded_chars buffer.
Rewind the _decoded_chars buffer.
[ "Rewind", "the", "_decoded_chars", "buffer", "." ]
def _rewind_decoded_chars(self, n): """Rewind the _decoded_chars buffer.""" if self._decoded_chars_used < n: raise AssertionError("rewind decoded_chars out of bounds") self._decoded_chars_used -= n
[ "def", "_rewind_decoded_chars", "(", "self", ",", "n", ")", ":", "if", "self", ".", "_decoded_chars_used", "<", "n", ":", "raise", "AssertionError", "(", "\"rewind decoded_chars out of bounds\"", ")", "self", ".", "_decoded_chars_used", "-=", "n" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/_pyio.py#L1644-L1648
krishauser/Klampt
972cc83ea5befac3f653c1ba20f80155768ad519
Python/klampt/vis/backends/qtbackend.py
python
QtBackend.run
(self)
Starts the main loop
Starts the main loop
[ "Starts", "the", "main", "loop" ]
def run(self): """Starts the main loop""" assert self.window != None, "No windows create()'ed" self.window.show() self.app.exec_()
[ "def", "run", "(", "self", ")", ":", "assert", "self", ".", "window", "!=", "None", ",", "\"No windows create()'ed\"", "self", ".", "window", ".", "show", "(", ")", "self", ".", "app", ".", "exec_", "(", ")" ]
https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/klampt/vis/backends/qtbackend.py#L453-L457
apple/swift-clang
d7403439fc6641751840b723e7165fb02f52db95
bindings/python/clang/cindex.py
python
CompilationDatabase.getCompileCommands
(self, filename)
return conf.lib.clang_CompilationDatabase_getCompileCommands(self, fspath(filename))
Get an iterable object providing all the CompileCommands available to build filename. Returns None if filename is not found in the database.
Get an iterable object providing all the CompileCommands available to build filename. Returns None if filename is not found in the database.
[ "Get", "an", "iterable", "object", "providing", "all", "the", "CompileCommands", "available", "to", "build", "filename", ".", "Returns", "None", "if", "filename", "is", "not", "found", "in", "the", "database", "." ]
def getCompileCommands(self, filename): """ Get an iterable object providing all the CompileCommands available to build filename. Returns None if filename is not found in the database. """ return conf.lib.clang_CompilationDatabase_getCompileCommands(self, fspath(filename))
[ "def", "getCompileCommands", "(", "self", ",", "filename", ")", ":", "return", "conf", ".", "lib", ".", "clang_CompilationDatabase_getCompileCommands", "(", "self", ",", "fspath", "(", "filename", ")", ")" ]
https://github.com/apple/swift-clang/blob/d7403439fc6641751840b723e7165fb02f52db95/bindings/python/clang/cindex.py#L3255-L3261
mongodb/mongo
d8ff665343ad29cf286ee2cf4a1960d29371937b
src/third_party/mozjs/extract/js/src/devtools/automation/autospider.py
python
set_vars_from_script
(script, vars)
Run a shell script, then dump out chosen environment variables. The build system uses shell scripts to do some configuration that we need to borrow. On Windows, the script itself must output the variable settings (in the form "export FOO=<value>"), since otherwise there will be problems with mismatched Windows/POSIX formats.
Run a shell script, then dump out chosen environment variables. The build system uses shell scripts to do some configuration that we need to borrow. On Windows, the script itself must output the variable settings (in the form "export FOO=<value>"), since otherwise there will be problems with mismatched Windows/POSIX formats.
[ "Run", "a", "shell", "script", "then", "dump", "out", "chosen", "environment", "variables", ".", "The", "build", "system", "uses", "shell", "scripts", "to", "do", "some", "configuration", "that", "we", "need", "to", "borrow", ".", "On", "Windows", "the", "script", "itself", "must", "output", "the", "variable", "settings", "(", "in", "the", "form", "export", "FOO", "=", "<value", ">", ")", "since", "otherwise", "there", "will", "be", "problems", "with", "mismatched", "Windows", "/", "POSIX", "formats", "." ]
def set_vars_from_script(script, vars): '''Run a shell script, then dump out chosen environment variables. The build system uses shell scripts to do some configuration that we need to borrow. On Windows, the script itself must output the variable settings (in the form "export FOO=<value>"), since otherwise there will be problems with mismatched Windows/POSIX formats. ''' script_text = 'source %s' % script if platform.system() == 'Windows': parse_state = 'parsing exports' else: script_text += '; echo VAR SETTINGS:; ' script_text += '; '.join('echo $' + var for var in vars) parse_state = 'scanning' stdout = subprocess.check_output(['sh', '-x', '-c', script_text]) tograb = vars[:] originals = {} for line in stdout.splitlines(): if parse_state == 'scanning': if line == 'VAR SETTINGS:': parse_state = 'grabbing' elif parse_state == 'grabbing': var = tograb.pop(0) env[var] = line elif parse_state == 'parsing exports': m = re.match(r'export (\w+)=(.*)', line) if m: var, value = m.groups() if var in tograb: env[var] = value print("Setting %s = %s" % (var, value)) if var.startswith("ORIGINAL_"): originals[var[9:]] = value # An added wrinkle: on Windows developer systems, the sourced script will # blow away current settings for eg LIBS, to point to the ones that would # be installed via automation. So we will append the original settings. (On # an automation system, the original settings will be empty or point to # nonexistent stuff.) if platform.system() == 'Windows': for var in vars: if var in originals and len(originals[var]) > 0: env[var] = "%s;%s" % (env[var], originals[var])
[ "def", "set_vars_from_script", "(", "script", ",", "vars", ")", ":", "script_text", "=", "'source %s'", "%", "script", "if", "platform", ".", "system", "(", ")", "==", "'Windows'", ":", "parse_state", "=", "'parsing exports'", "else", ":", "script_text", "+=", "'; echo VAR SETTINGS:; '", "script_text", "+=", "'; '", ".", "join", "(", "'echo $'", "+", "var", "for", "var", "in", "vars", ")", "parse_state", "=", "'scanning'", "stdout", "=", "subprocess", ".", "check_output", "(", "[", "'sh'", ",", "'-x'", ",", "'-c'", ",", "script_text", "]", ")", "tograb", "=", "vars", "[", ":", "]", "originals", "=", "{", "}", "for", "line", "in", "stdout", ".", "splitlines", "(", ")", ":", "if", "parse_state", "==", "'scanning'", ":", "if", "line", "==", "'VAR SETTINGS:'", ":", "parse_state", "=", "'grabbing'", "elif", "parse_state", "==", "'grabbing'", ":", "var", "=", "tograb", ".", "pop", "(", "0", ")", "env", "[", "var", "]", "=", "line", "elif", "parse_state", "==", "'parsing exports'", ":", "m", "=", "re", ".", "match", "(", "r'export (\\w+)=(.*)'", ",", "line", ")", "if", "m", ":", "var", ",", "value", "=", "m", ".", "groups", "(", ")", "if", "var", "in", "tograb", ":", "env", "[", "var", "]", "=", "value", "print", "(", "\"Setting %s = %s\"", "%", "(", "var", ",", "value", ")", ")", "if", "var", ".", "startswith", "(", "\"ORIGINAL_\"", ")", ":", "originals", "[", "var", "[", "9", ":", "]", "]", "=", "value", "# An added wrinkle: on Windows developer systems, the sourced script will", "# blow away current settings for eg LIBS, to point to the ones that would", "# be installed via automation. So we will append the original settings. (On", "# an automation system, the original settings will be empty or point to", "# nonexistent stuff.)", "if", "platform", ".", "system", "(", ")", "==", "'Windows'", ":", "for", "var", "in", "vars", ":", "if", "var", "in", "originals", "and", "len", "(", "originals", "[", "var", "]", ")", ">", "0", ":", "env", "[", "var", "]", "=", "\"%s;%s\"", "%", "(", "env", "[", "var", "]", ",", "originals", "[", "var", "]", ")" ]
https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/src/third_party/mozjs/extract/js/src/devtools/automation/autospider.py#L98-L140
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/control_flow_state.py
python
_ControlFlowState.ProcessUnusedLoopExits
(self, pending_count, to_ops_set)
return loop_exits
Process all the "unused" loop exits. The "unused" exits of the loops are added to `unused_exits`. An exit is unused if its pending_count is 0. If there is an exit with real gradient, all these deferred exits will enter the backprop loop with zero gradient. Otherwise, they will enter the backprop loop with None. As an example, people often write: ```python v1, _ = tf.while_loop(p, b, [x1, x2]) result = gradients(v1, x1) ``` The exit node for x2 is not included by the betweenness analysis. But we need to backprop x2 if x2 is involved in computing v1. Args: pending_count: The number of backprop inputs for every op. to_ops_set: The set of ops for ys in gradients(ys, xs) Returns: The set of unused loop exits that we know at this point we need to backprop.
Process all the "unused" loop exits.
[ "Process", "all", "the", "unused", "loop", "exits", "." ]
def ProcessUnusedLoopExits(self, pending_count, to_ops_set): """Process all the "unused" loop exits. The "unused" exits of the loops are added to `unused_exits`. An exit is unused if its pending_count is 0. If there is an exit with real gradient, all these deferred exits will enter the backprop loop with zero gradient. Otherwise, they will enter the backprop loop with None. As an example, people often write: ```python v1, _ = tf.while_loop(p, b, [x1, x2]) result = gradients(v1, x1) ``` The exit node for x2 is not included by the betweenness analysis. But we need to backprop x2 if x2 is involved in computing v1. Args: pending_count: The number of backprop inputs for every op. to_ops_set: The set of ops for ys in gradients(ys, xs) Returns: The set of unused loop exits that we know at this point we need to backprop. """ loop_exits = [] for grad_state in self._map.values(): for y in grad_state.forward_loop_exits: if pending_count[y.op] == 0: grad_state.pending_exits_count -= 1 if y.op not in to_ops_set: grad_state.unused_exits.append(y) if grad_state.pending_exits_count == 0: loop_exits.extend(grad_state.unused_exits) # Need to include Enters in backprop for higher-order gradients. for y in grad_state.forward_context.loop_enters: if pending_count[y.op] == 0: pending_count[y.op] = 1 return loop_exits
[ "def", "ProcessUnusedLoopExits", "(", "self", ",", "pending_count", ",", "to_ops_set", ")", ":", "loop_exits", "=", "[", "]", "for", "grad_state", "in", "self", ".", "_map", ".", "values", "(", ")", ":", "for", "y", "in", "grad_state", ".", "forward_loop_exits", ":", "if", "pending_count", "[", "y", ".", "op", "]", "==", "0", ":", "grad_state", ".", "pending_exits_count", "-=", "1", "if", "y", ".", "op", "not", "in", "to_ops_set", ":", "grad_state", ".", "unused_exits", ".", "append", "(", "y", ")", "if", "grad_state", ".", "pending_exits_count", "==", "0", ":", "loop_exits", ".", "extend", "(", "grad_state", ".", "unused_exits", ")", "# Need to include Enters in backprop for higher-order gradients.", "for", "y", "in", "grad_state", ".", "forward_context", ".", "loop_enters", ":", "if", "pending_count", "[", "y", ".", "op", "]", "==", "0", ":", "pending_count", "[", "y", ".", "op", "]", "=", "1", "return", "loop_exits" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/control_flow_state.py#L511-L549
devpack/android-python27
d42dd67565e104cf7b0b50eb473f615db3e69901
python-build-with-qt/sip-4.11.2/sipconfig.py
python
Makefile.clean_build_file_objects
(self, mfile, build)
Generate the clean target. mfile is the file object. build is the dictionary created from the build file.
Generate the clean target.
[ "Generate", "the", "clean", "target", "." ]
def clean_build_file_objects(self, mfile, build): """Generate the clean target. mfile is the file object. build is the dictionary created from the build file. """ mfile.write("\t-%s $(TARGET)\n" % self.rm) for f in build["objects"].split(): mfile.write("\t-%s %s\n" % (self.rm, f)) for f in build["moc_headers"].split(): root, discard = os.path.splitext(f) mfile.write("\t-%s moc_%s.cpp\n" % (self.rm, root))
[ "def", "clean_build_file_objects", "(", "self", ",", "mfile", ",", "build", ")", ":", "mfile", ".", "write", "(", "\"\\t-%s $(TARGET)\\n\"", "%", "self", ".", "rm", ")", "for", "f", "in", "build", "[", "\"objects\"", "]", ".", "split", "(", ")", ":", "mfile", ".", "write", "(", "\"\\t-%s %s\\n\"", "%", "(", "self", ".", "rm", ",", "f", ")", ")", "for", "f", "in", "build", "[", "\"moc_headers\"", "]", ".", "split", "(", ")", ":", "root", ",", "discard", "=", "os", ".", "path", ".", "splitext", "(", "f", ")", "mfile", ".", "write", "(", "\"\\t-%s moc_%s.cpp\\n\"", "%", "(", "self", ".", "rm", ",", "root", ")", ")" ]
https://github.com/devpack/android-python27/blob/d42dd67565e104cf7b0b50eb473f615db3e69901/python-build-with-qt/sip-4.11.2/sipconfig.py#L1079-L1092
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/tools/quantization/quantize_graph.py
python
GraphRewriter.should_merge_with_fake_quant_node
(self)
return top[1] == 0 and top[0].op in ["FakeQuantWithMinMaxVars"]
Should the current node merge with self.state.output_node_stack[-1]?
Should the current node merge with self.state.output_node_stack[-1]?
[ "Should", "the", "current", "node", "merge", "with", "self", ".", "state", ".", "output_node_stack", "[", "-", "1", "]", "?" ]
def should_merge_with_fake_quant_node(self): """Should the current node merge with self.state.output_node_stack[-1]?""" if not self.state.output_node_stack: return False top = self.state.output_node_stack[-1] return top[1] == 0 and top[0].op in ["FakeQuantWithMinMaxVars"]
[ "def", "should_merge_with_fake_quant_node", "(", "self", ")", ":", "if", "not", "self", ".", "state", ".", "output_node_stack", ":", "return", "False", "top", "=", "self", ".", "state", ".", "output_node_stack", "[", "-", "1", "]", "return", "top", "[", "1", "]", "==", "0", "and", "top", "[", "0", "]", ".", "op", "in", "[", "\"FakeQuantWithMinMaxVars\"", "]" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/tools/quantization/quantize_graph.py#L554-L559
hpi-xnor/BMXNet-v2
af2b1859eafc5c721b1397cef02f946aaf2ce20d
plugin/opencv/opencv.py
python
random_size_crop
(src, size, min_area=0.25, ratio=(3.0/4.0, 4.0/3.0))
return random_crop(src, size)
Randomly crop src with size. Randomize area and aspect ratio
Randomly crop src with size. Randomize area and aspect ratio
[ "Randomly", "crop", "src", "with", "size", ".", "Randomize", "area", "and", "aspect", "ratio" ]
def random_size_crop(src, size, min_area=0.25, ratio=(3.0/4.0, 4.0/3.0)): """Randomly crop src with size. Randomize area and aspect ratio""" h, w, _ = src.shape area = w*h for _ in range(10): new_area = random.uniform(min_area, 1.0) * area new_ratio = random.uniform(*ratio) new_w = int(new_area*new_ratio) new_h = int(new_area/new_ratio) if random.uniform(0., 1.) < 0.5: new_w, new_h = new_h, new_w if new_w > w or new_h > h: continue x0 = random.randint(0, w - new_w) y0 = random.randint(0, h - new_h) out = fixed_crop(src, x0, y0, new_w, new_h, size) return out, (x0, y0, new_w, new_h) return random_crop(src, size)
[ "def", "random_size_crop", "(", "src", ",", "size", ",", "min_area", "=", "0.25", ",", "ratio", "=", "(", "3.0", "/", "4.0", ",", "4.0", "/", "3.0", ")", ")", ":", "h", ",", "w", ",", "_", "=", "src", ".", "shape", "area", "=", "w", "*", "h", "for", "_", "in", "range", "(", "10", ")", ":", "new_area", "=", "random", ".", "uniform", "(", "min_area", ",", "1.0", ")", "*", "area", "new_ratio", "=", "random", ".", "uniform", "(", "*", "ratio", ")", "new_w", "=", "int", "(", "new_area", "*", "new_ratio", ")", "new_h", "=", "int", "(", "new_area", "/", "new_ratio", ")", "if", "random", ".", "uniform", "(", "0.", ",", "1.", ")", "<", "0.5", ":", "new_w", ",", "new_h", "=", "new_h", ",", "new_w", "if", "new_w", ">", "w", "or", "new_h", ">", "h", ":", "continue", "x0", "=", "random", ".", "randint", "(", "0", ",", "w", "-", "new_w", ")", "y0", "=", "random", ".", "randint", "(", "0", ",", "h", "-", "new_h", ")", "out", "=", "fixed_crop", "(", "src", ",", "x0", ",", "y0", ",", "new_w", ",", "new_h", ",", "size", ")", "return", "out", ",", "(", "x0", ",", "y0", ",", "new_w", ",", "new_h", ")", "return", "random_crop", "(", "src", ",", "size", ")" ]
https://github.com/hpi-xnor/BMXNet-v2/blob/af2b1859eafc5c721b1397cef02f946aaf2ce20d/plugin/opencv/opencv.py#L131-L153
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/cuda/cudadrv/autotune.py
python
AutoTuner.by_occupancy
(self)
return self._by_occupancy
A list of tuple-2 of (occupancy, thread-per-block) sorted in descending. The first item has the highest occupancy and the lowest number of thread-per-block.
A list of tuple-2 of (occupancy, thread-per-block) sorted in descending.
[ "A", "list", "of", "tuple", "-", "2", "of", "(", "occupancy", "thread", "-", "per", "-", "block", ")", "sorted", "in", "descending", "." ]
def by_occupancy(self): """A list of tuple-2 of (occupancy, thread-per-block) sorted in descending. The first item has the highest occupancy and the lowest number of thread-per-block. """ return self._by_occupancy
[ "def", "by_occupancy", "(", "self", ")", ":", "return", "self", ".", "_by_occupancy" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/cuda/cudadrv/autotune.py#L66-L73
DaFuCoding/MTCNN_Caffe
09c30c3ff391bd9cb6b249c1910afaf147767ab3
scripts/cpp_lint.py
python
_CppLintState.SetFilters
(self, filters)
Sets the error-message filters. These filters are applied when deciding whether to emit a given error message. Args: filters: A string of comma-separated filters (eg "+whitespace/indent"). Each filter should start with + or -; else we die. Raises: ValueError: The comma-separated filters did not all start with '+' or '-'. E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
Sets the error-message filters.
[ "Sets", "the", "error", "-", "message", "filters", "." ]
def SetFilters(self, filters): """Sets the error-message filters. These filters are applied when deciding whether to emit a given error message. Args: filters: A string of comma-separated filters (eg "+whitespace/indent"). Each filter should start with + or -; else we die. Raises: ValueError: The comma-separated filters did not all start with '+' or '-'. E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter" """ # Default filters always have less priority than the flag ones. self.filters = _DEFAULT_FILTERS[:] for filt in filters.split(','): clean_filt = filt.strip() if clean_filt: self.filters.append(clean_filt) for filt in self.filters: if not (filt.startswith('+') or filt.startswith('-')): raise ValueError('Every filter in --filters must start with + or -' ' (%s does not)' % filt)
[ "def", "SetFilters", "(", "self", ",", "filters", ")", ":", "# Default filters always have less priority than the flag ones.", "self", ".", "filters", "=", "_DEFAULT_FILTERS", "[", ":", "]", "for", "filt", "in", "filters", ".", "split", "(", "','", ")", ":", "clean_filt", "=", "filt", ".", "strip", "(", ")", "if", "clean_filt", ":", "self", ".", "filters", ".", "append", "(", "clean_filt", ")", "for", "filt", "in", "self", ".", "filters", ":", "if", "not", "(", "filt", ".", "startswith", "(", "'+'", ")", "or", "filt", ".", "startswith", "(", "'-'", ")", ")", ":", "raise", "ValueError", "(", "'Every filter in --filters must start with + or -'", "' (%s does not)'", "%", "filt", ")" ]
https://github.com/DaFuCoding/MTCNN_Caffe/blob/09c30c3ff391bd9cb6b249c1910afaf147767ab3/scripts/cpp_lint.py#L717-L740
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/communication/_comm_helper.py
python
_get_rank_helper
(group, backend)
return rank_id
The Helper to do get_rank_id. Args: group (str): The communication group. backend (str): The backend, like "hccl". Raises: ValueError: If backend is invalid. Returns: Integer. The local rank id of the calling process.
The Helper to do get_rank_id.
[ "The", "Helper", "to", "do", "get_rank_id", "." ]
def _get_rank_helper(group, backend): """ The Helper to do get_rank_id. Args: group (str): The communication group. backend (str): The backend, like "hccl". Raises: ValueError: If backend is invalid. Returns: Integer. The local rank id of the calling process. """ rank_id = None if _is_role_pserver() or _is_role_sched(): rank_id = 0 return rank_id if backend == Backend.HCCL_MPI: rank_id = mpi.get_rank_id(group) elif backend == Backend.HCCL: if group == HCCL_WORLD_COMM_GROUP: rank_id = hccl.get_rank_id() else: rank_id = hccl.get_rank_id(group) elif backend == Backend.NCCL: rank_id = get_rank_id(group) else: raise ValueError("For '_get_rank_helper', the argument 'backend' {} is not supported, " "please use hccl_mpi, hccl or nccl.".format(backend)) return rank_id
[ "def", "_get_rank_helper", "(", "group", ",", "backend", ")", ":", "rank_id", "=", "None", "if", "_is_role_pserver", "(", ")", "or", "_is_role_sched", "(", ")", ":", "rank_id", "=", "0", "return", "rank_id", "if", "backend", "==", "Backend", ".", "HCCL_MPI", ":", "rank_id", "=", "mpi", ".", "get_rank_id", "(", "group", ")", "elif", "backend", "==", "Backend", ".", "HCCL", ":", "if", "group", "==", "HCCL_WORLD_COMM_GROUP", ":", "rank_id", "=", "hccl", ".", "get_rank_id", "(", ")", "else", ":", "rank_id", "=", "hccl", ".", "get_rank_id", "(", "group", ")", "elif", "backend", "==", "Backend", ".", "NCCL", ":", "rank_id", "=", "get_rank_id", "(", "group", ")", "else", ":", "raise", "ValueError", "(", "\"For '_get_rank_helper', the argument 'backend' {} is not supported, \"", "\"please use hccl_mpi, hccl or nccl.\"", ".", "format", "(", "backend", ")", ")", "return", "rank_id" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/communication/_comm_helper.py#L188-L218
alexgkendall/caffe-posenet
62aafbd7c45df91acdba14f5d1406d8295c2bc6f
scripts/cpp_lint.py
python
CleanseComments
(line)
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
Removes //-comments and single-line C-style /* */ comments. Args: line: A line of C++ source. Returns: The line with single-line comments removed.
Removes //-comments and single-line C-style /* */ comments.
[ "Removes", "//", "-", "comments", "and", "single", "-", "line", "C", "-", "style", "/", "*", "*", "/", "comments", "." ]
def CleanseComments(line): """Removes //-comments and single-line C-style /* */ comments. Args: line: A line of C++ source. Returns: The line with single-line comments removed. """ commentpos = line.find('//') if commentpos != -1 and not IsCppString(line[:commentpos]): line = line[:commentpos].rstrip() # get rid of /* ... */ return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
[ "def", "CleanseComments", "(", "line", ")", ":", "commentpos", "=", "line", ".", "find", "(", "'//'", ")", "if", "commentpos", "!=", "-", "1", "and", "not", "IsCppString", "(", "line", "[", ":", "commentpos", "]", ")", ":", "line", "=", "line", "[", ":", "commentpos", "]", ".", "rstrip", "(", ")", "# get rid of /* ... */", "return", "_RE_PATTERN_CLEANSE_LINE_C_COMMENTS", ".", "sub", "(", "''", ",", "line", ")" ]
https://github.com/alexgkendall/caffe-posenet/blob/62aafbd7c45df91acdba14f5d1406d8295c2bc6f/scripts/cpp_lint.py#L1167-L1180
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/urllib3/poolmanager.py
python
_default_key_normalizer
(key_class, request_context)
return key_class(**context)
Create a pool key out of a request context dictionary. According to RFC 3986, both the scheme and host are case-insensitive. Therefore, this function normalizes both before constructing the pool key for an HTTPS request. If you wish to change this behaviour, provide alternate callables to ``key_fn_by_scheme``. :param key_class: The class to use when constructing the key. This should be a namedtuple with the ``scheme`` and ``host`` keys at a minimum. :type key_class: namedtuple :param request_context: A dictionary-like object that contain the context for a request. :type request_context: dict :return: A namedtuple that can be used as a connection pool key. :rtype: PoolKey
Create a pool key out of a request context dictionary.
[ "Create", "a", "pool", "key", "out", "of", "a", "request", "context", "dictionary", "." ]
def _default_key_normalizer(key_class, request_context): """ Create a pool key out of a request context dictionary. According to RFC 3986, both the scheme and host are case-insensitive. Therefore, this function normalizes both before constructing the pool key for an HTTPS request. If you wish to change this behaviour, provide alternate callables to ``key_fn_by_scheme``. :param key_class: The class to use when constructing the key. This should be a namedtuple with the ``scheme`` and ``host`` keys at a minimum. :type key_class: namedtuple :param request_context: A dictionary-like object that contain the context for a request. :type request_context: dict :return: A namedtuple that can be used as a connection pool key. :rtype: PoolKey """ # Since we mutate the dictionary, make a copy first context = request_context.copy() context['scheme'] = context['scheme'].lower() context['host'] = context['host'].lower() # These are both dictionaries and need to be transformed into frozensets for key in ('headers', '_proxy_headers', '_socks_options'): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) # The socket_options key may be a list and needs to be transformed into a # tuple. socket_opts = context.get('socket_options') if socket_opts is not None: context['socket_options'] = tuple(socket_opts) # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()): context['key_' + key] = context.pop(key) # Default to ``None`` for keys missing from the context for field in key_class._fields: if field not in context: context[field] = None return key_class(**context)
[ "def", "_default_key_normalizer", "(", "key_class", ",", "request_context", ")", ":", "# Since we mutate the dictionary, make a copy first", "context", "=", "request_context", ".", "copy", "(", ")", "context", "[", "'scheme'", "]", "=", "context", "[", "'scheme'", "]", ".", "lower", "(", ")", "context", "[", "'host'", "]", "=", "context", "[", "'host'", "]", ".", "lower", "(", ")", "# These are both dictionaries and need to be transformed into frozensets", "for", "key", "in", "(", "'headers'", ",", "'_proxy_headers'", ",", "'_socks_options'", ")", ":", "if", "key", "in", "context", "and", "context", "[", "key", "]", "is", "not", "None", ":", "context", "[", "key", "]", "=", "frozenset", "(", "context", "[", "key", "]", ".", "items", "(", ")", ")", "# The socket_options key may be a list and needs to be transformed into a", "# tuple.", "socket_opts", "=", "context", ".", "get", "(", "'socket_options'", ")", "if", "socket_opts", "is", "not", "None", ":", "context", "[", "'socket_options'", "]", "=", "tuple", "(", "socket_opts", ")", "# Map the kwargs to the names in the namedtuple - this is necessary since", "# namedtuples can't have fields starting with '_'.", "for", "key", "in", "list", "(", "context", ".", "keys", "(", ")", ")", ":", "context", "[", "'key_'", "+", "key", "]", "=", "context", ".", "pop", "(", "key", ")", "# Default to ``None`` for keys missing from the context", "for", "field", "in", "key_class", ".", "_fields", ":", "if", "field", "not", "in", "context", ":", "context", "[", "field", "]", "=", "None", "return", "key_class", "(", "*", "*", "context", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/urllib3/poolmanager.py#L57-L103
mantidproject/mantid
03deeb89254ec4289edb8771e0188c2090a02f32
qt/python/mantidqtinterfaces/mantidqtinterfaces/drill/presenter/DrillSettingsPresenter.py
python
DrillSettingsPresenter.onApplied
(self)
Triggered when the apply button is pressed. This removes the initial values kept in memory.
Triggered when the apply button is pressed. This removes the initial values kept in memory.
[ "Triggered", "when", "the", "apply", "button", "is", "pressed", ".", "This", "removes", "the", "initial", "values", "kept", "in", "memory", "." ]
def onApplied(self): """ Triggered when the apply button is pressed. This removes the initial values kept in memory. """ self._initialValues = dict()
[ "def", "onApplied", "(", "self", ")", ":", "self", ".", "_initialValues", "=", "dict", "(", ")" ]
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqtinterfaces/mantidqtinterfaces/drill/presenter/DrillSettingsPresenter.py#L84-L89
Tencent/CMONGO
c40380caa14e05509f46993aa8b8da966b09b0b5
src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Subst.py
python
SpecialAttrWrapper.__init__
(self, lstr, for_signature=None)
The for_signature parameter, if supplied, will be the canonical string we return from for_signature(). Else we will simply return lstr.
The for_signature parameter, if supplied, will be the canonical string we return from for_signature(). Else we will simply return lstr.
[ "The", "for_signature", "parameter", "if", "supplied", "will", "be", "the", "canonical", "string", "we", "return", "from", "for_signature", "()", ".", "Else", "we", "will", "simply", "return", "lstr", "." ]
def __init__(self, lstr, for_signature=None): """The for_signature parameter, if supplied, will be the canonical string we return from for_signature(). Else we will simply return lstr.""" self.lstr = lstr if for_signature: self.forsig = for_signature else: self.forsig = lstr
[ "def", "__init__", "(", "self", ",", "lstr", ",", "for_signature", "=", "None", ")", ":", "self", ".", "lstr", "=", "lstr", "if", "for_signature", ":", "self", ".", "forsig", "=", "for_signature", "else", ":", "self", ".", "forsig", "=", "lstr" ]
https://github.com/Tencent/CMONGO/blob/c40380caa14e05509f46993aa8b8da966b09b0b5/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Subst.py#L98-L106
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python3/src/Lib/importlib/_bootstrap_external.py
python
PathFinder.invalidate_caches
(cls)
Call the invalidate_caches() method on all path entry finders stored in sys.path_importer_caches (where implemented).
Call the invalidate_caches() method on all path entry finders stored in sys.path_importer_caches (where implemented).
[ "Call", "the", "invalidate_caches", "()", "method", "on", "all", "path", "entry", "finders", "stored", "in", "sys", ".", "path_importer_caches", "(", "where", "implemented", ")", "." ]
def invalidate_caches(cls): """Call the invalidate_caches() method on all path entry finders stored in sys.path_importer_caches (where implemented).""" for name, finder in list(sys.path_importer_cache.items()): if finder is None: del sys.path_importer_cache[name] elif hasattr(finder, 'invalidate_caches'): finder.invalidate_caches()
[ "def", "invalidate_caches", "(", "cls", ")", ":", "for", "name", ",", "finder", "in", "list", "(", "sys", ".", "path_importer_cache", ".", "items", "(", ")", ")", ":", "if", "finder", "is", "None", ":", "del", "sys", ".", "path_importer_cache", "[", "name", "]", "elif", "hasattr", "(", "finder", ",", "'invalidate_caches'", ")", ":", "finder", ".", "invalidate_caches", "(", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/importlib/_bootstrap_external.py#L1315-L1322
weolar/miniblink49
1c4678db0594a4abde23d3ebbcc7cd13c3170777
third_party/jinja2/filters.py
python
do_slice
(value, slices, fill_with=None)
Slice an iterator and return a list of lists containing those items. Useful if you want to create a div containing three ul tags that represent columns: .. sourcecode:: html+jinja <div class="columwrapper"> {%- for column in items|slice(3) %} <ul class="column-{{ loop.index }}"> {%- for item in column %} <li>{{ item }}</li> {%- endfor %} </ul> {%- endfor %} </div> If you pass it a second argument it's used to fill missing values on the last iteration.
Slice an iterator and return a list of lists containing those items. Useful if you want to create a div containing three ul tags that represent columns:
[ "Slice", "an", "iterator", "and", "return", "a", "list", "of", "lists", "containing", "those", "items", ".", "Useful", "if", "you", "want", "to", "create", "a", "div", "containing", "three", "ul", "tags", "that", "represent", "columns", ":" ]
def do_slice(value, slices, fill_with=None): """Slice an iterator and return a list of lists containing those items. Useful if you want to create a div containing three ul tags that represent columns: .. sourcecode:: html+jinja <div class="columwrapper"> {%- for column in items|slice(3) %} <ul class="column-{{ loop.index }}"> {%- for item in column %} <li>{{ item }}</li> {%- endfor %} </ul> {%- endfor %} </div> If you pass it a second argument it's used to fill missing values on the last iteration. """ seq = list(value) length = len(seq) items_per_slice = length // slices slices_with_extra = length % slices offset = 0 for slice_number in range(slices): start = offset + slice_number * items_per_slice if slice_number < slices_with_extra: offset += 1 end = offset + (slice_number + 1) * items_per_slice tmp = seq[start:end] if fill_with is not None and slice_number >= slices_with_extra: tmp.append(fill_with) yield tmp
[ "def", "do_slice", "(", "value", ",", "slices", ",", "fill_with", "=", "None", ")", ":", "seq", "=", "list", "(", "value", ")", "length", "=", "len", "(", "seq", ")", "items_per_slice", "=", "length", "//", "slices", "slices_with_extra", "=", "length", "%", "slices", "offset", "=", "0", "for", "slice_number", "in", "range", "(", "slices", ")", ":", "start", "=", "offset", "+", "slice_number", "*", "items_per_slice", "if", "slice_number", "<", "slices_with_extra", ":", "offset", "+=", "1", "end", "=", "offset", "+", "(", "slice_number", "+", "1", ")", "*", "items_per_slice", "tmp", "=", "seq", "[", "start", ":", "end", "]", "if", "fill_with", "is", "not", "None", "and", "slice_number", ">=", "slices_with_extra", ":", "tmp", ".", "append", "(", "fill_with", ")", "yield", "tmp" ]
https://github.com/weolar/miniblink49/blob/1c4678db0594a4abde23d3ebbcc7cd13c3170777/third_party/jinja2/filters.py#L560-L593
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/richtext.py
python
RichTextObject.ExportXML
(*args)
return _richtext.RichTextObject_ExportXML(*args)
ExportXML(self, wxOutputStream stream, int indent, RichTextXMLHandler handler) -> bool ExportXML(self, wxXmlNode parent, RichTextXMLHandler handler) -> bool
ExportXML(self, wxOutputStream stream, int indent, RichTextXMLHandler handler) -> bool ExportXML(self, wxXmlNode parent, RichTextXMLHandler handler) -> bool
[ "ExportXML", "(", "self", "wxOutputStream", "stream", "int", "indent", "RichTextXMLHandler", "handler", ")", "-", ">", "bool", "ExportXML", "(", "self", "wxXmlNode", "parent", "RichTextXMLHandler", "handler", ")", "-", ">", "bool" ]
def ExportXML(*args): """ ExportXML(self, wxOutputStream stream, int indent, RichTextXMLHandler handler) -> bool ExportXML(self, wxXmlNode parent, RichTextXMLHandler handler) -> bool """ return _richtext.RichTextObject_ExportXML(*args)
[ "def", "ExportXML", "(", "*", "args", ")", ":", "return", "_richtext", ".", "RichTextObject_ExportXML", "(", "*", "args", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/richtext.py#L1269-L1274
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/propgrid.py
python
PGMultiButton.GetCount
(*args, **kwargs)
return _propgrid.PGMultiButton_GetCount(*args, **kwargs)
GetCount(self) -> int
GetCount(self) -> int
[ "GetCount", "(", "self", ")", "-", ">", "int" ]
def GetCount(*args, **kwargs): """GetCount(self) -> int""" return _propgrid.PGMultiButton_GetCount(*args, **kwargs)
[ "def", "GetCount", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_propgrid", ".", "PGMultiButton_GetCount", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/propgrid.py#L2835-L2837
panda3d/panda3d
833ad89ebad58395d0af0b7ec08538e5e4308265
makepanda/makepanda.py
python
CompileJava
(target, src, opts)
Compiles a .java file into a .class file.
Compiles a .java file into a .class file.
[ "Compiles", "a", ".", "java", "file", "into", "a", ".", "class", "file", "." ]
def CompileJava(target, src, opts): """Compiles a .java file into a .class file.""" if GetHost() == 'android': cmd = "ecj " else: cmd = "javac -bootclasspath " + BracketNameWithQuotes(SDK["ANDROID_JAR"]) + " " optlevel = GetOptimizeOption(opts) if optlevel >= 4: cmd += "-debug:none " cmd += "-cp " + GetOutputDir() + "/classes " cmd += "-d " + GetOutputDir() + "/classes " cmd += BracketNameWithQuotes(src) oscmd(cmd)
[ "def", "CompileJava", "(", "target", ",", "src", ",", "opts", ")", ":", "if", "GetHost", "(", ")", "==", "'android'", ":", "cmd", "=", "\"ecj \"", "else", ":", "cmd", "=", "\"javac -bootclasspath \"", "+", "BracketNameWithQuotes", "(", "SDK", "[", "\"ANDROID_JAR\"", "]", ")", "+", "\" \"", "optlevel", "=", "GetOptimizeOption", "(", "opts", ")", "if", "optlevel", ">=", "4", ":", "cmd", "+=", "\"-debug:none \"", "cmd", "+=", "\"-cp \"", "+", "GetOutputDir", "(", ")", "+", "\"/classes \"", "cmd", "+=", "\"-d \"", "+", "GetOutputDir", "(", ")", "+", "\"/classes \"", "cmd", "+=", "BracketNameWithQuotes", "(", "src", ")", "oscmd", "(", "cmd", ")" ]
https://github.com/panda3d/panda3d/blob/833ad89ebad58395d0af0b7ec08538e5e4308265/makepanda/makepanda.py#L2042-L2056
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/AWSPythonSDK/1.5.8/botocore/session.py
python
Session.get_available_partitions
(self)
return resolver.get_available_partitions()
Lists the available partitions found on disk :rtype: list :return: Returns a list of partition names (e.g., ["aws", "aws-cn"])
Lists the available partitions found on disk
[ "Lists", "the", "available", "partitions", "found", "on", "disk" ]
def get_available_partitions(self): """Lists the available partitions found on disk :rtype: list :return: Returns a list of partition names (e.g., ["aws", "aws-cn"]) """ resolver = self.get_component('endpoint_resolver') return resolver.get_available_partitions()
[ "def", "get_available_partitions", "(", "self", ")", ":", "resolver", "=", "self", ".", "get_component", "(", "'endpoint_resolver'", ")", "return", "resolver", ".", "get_available_partitions", "(", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/AWSPythonSDK/1.5.8/botocore/session.py#L871-L878
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/tools/docs/pretty_docs.py
python
_Metadata.build_html
(self)
return '\n'.join(parts)
Return the Metadata block as an Html string.
Return the Metadata block as an Html string.
[ "Return", "the", "Metadata", "block", "as", "an", "Html", "string", "." ]
def build_html(self): """Return the Metadata block as an Html string.""" schema = 'http://developers.google.com/ReferenceObject' parts = ['<div itemscope itemtype="%s">' % schema] parts.append('<meta itemprop="name" content="%s" />' % self.name) for item in self._content: parts.append('<meta itemprop="property" content="%s"/>' % item) parts.extend(['</div>', '', '']) return '\n'.join(parts)
[ "def", "build_html", "(", "self", ")", ":", "schema", "=", "'http://developers.google.com/ReferenceObject'", "parts", "=", "[", "'<div itemscope itemtype=\"%s\">'", "%", "schema", "]", "parts", ".", "append", "(", "'<meta itemprop=\"name\" content=\"%s\" />'", "%", "self", ".", "name", ")", "for", "item", "in", "self", ".", "_content", ":", "parts", ".", "append", "(", "'<meta itemprop=\"property\" content=\"%s\"/>'", "%", "item", ")", "parts", ".", "extend", "(", "[", "'</div>'", ",", "''", ",", "''", "]", ")", "return", "'\\n'", ".", "join", "(", "parts", ")" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/tools/docs/pretty_docs.py#L336-L347
rdiankov/openrave
d1a23023fd4b58f077d2ca949ceaf1b91f3f13d7
3rdparty/flann-1.6.6/src/python/pyflann/index.py
python
FLANN.__init__
(self, **kwargs)
Constructor for the class and returns a class that can bind to the flann libraries. Any keyword arguments passed to __init__ override the global defaults given.
Constructor for the class and returns a class that can bind to the flann libraries. Any keyword arguments passed to __init__ override the global defaults given.
[ "Constructor", "for", "the", "class", "and", "returns", "a", "class", "that", "can", "bind", "to", "the", "flann", "libraries", ".", "Any", "keyword", "arguments", "passed", "to", "__init__", "override", "the", "global", "defaults", "given", "." ]
def __init__(self, **kwargs): """ Constructor for the class and returns a class that can bind to the flann libraries. Any keyword arguments passed to __init__ override the global defaults given. """ self.__rn_gen.seed() self.__curindex = None self.__curindex_data = None self.__curindex_type = None self.__flann_parameters = FLANNParameters() self.__flann_parameters.update(kwargs)
[ "def", "__init__", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "__rn_gen", ".", "seed", "(", ")", "self", ".", "__curindex", "=", "None", "self", ".", "__curindex_data", "=", "None", "self", ".", "__curindex_type", "=", "None", "self", ".", "__flann_parameters", "=", "FLANNParameters", "(", ")", "self", ".", "__flann_parameters", ".", "update", "(", "kwargs", ")" ]
https://github.com/rdiankov/openrave/blob/d1a23023fd4b58f077d2ca949ceaf1b91f3f13d7/3rdparty/flann-1.6.6/src/python/pyflann/index.py#L65-L79
natanielruiz/android-yolo
1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f
jni-build/jni/include/tensorflow/examples/image_retraining/retrain.py
python
cache_bottlenecks
(sess, image_lists, image_dir, bottleneck_dir, jpeg_data_tensor, bottleneck_tensor)
Ensures all the training, testing, and validation bottlenecks are cached. Because we're likely to read the same image multiple times (if there are no distortions applied during training) it can speed things up a lot if we calculate the bottleneck layer values once for each image during preprocessing, and then just read those cached values repeatedly during training. Here we go through all the images we've found, calculate those values, and save them off. Args: sess: The current active TensorFlow Session. image_lists: Dictionary of training images for each label. image_dir: Root folder string of the subfolders containing the training images. bottleneck_dir: Folder string holding cached files of bottleneck values. jpeg_data_tensor: Input tensor for jpeg data from file. bottleneck_tensor: The penultimate output layer of the graph. Returns: Nothing.
Ensures all the training, testing, and validation bottlenecks are cached.
[ "Ensures", "all", "the", "training", "testing", "and", "validation", "bottlenecks", "are", "cached", "." ]
def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir, jpeg_data_tensor, bottleneck_tensor): """Ensures all the training, testing, and validation bottlenecks are cached. Because we're likely to read the same image multiple times (if there are no distortions applied during training) it can speed things up a lot if we calculate the bottleneck layer values once for each image during preprocessing, and then just read those cached values repeatedly during training. Here we go through all the images we've found, calculate those values, and save them off. Args: sess: The current active TensorFlow Session. image_lists: Dictionary of training images for each label. image_dir: Root folder string of the subfolders containing the training images. bottleneck_dir: Folder string holding cached files of bottleneck values. jpeg_data_tensor: Input tensor for jpeg data from file. bottleneck_tensor: The penultimate output layer of the graph. Returns: Nothing. """ how_many_bottlenecks = 0 ensure_dir_exists(bottleneck_dir) for label_name, label_lists in image_lists.items(): for category in ['training', 'testing', 'validation']: category_list = label_lists[category] for index, unused_base_name in enumerate(category_list): get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir, category, bottleneck_dir, jpeg_data_tensor, bottleneck_tensor) how_many_bottlenecks += 1 if how_many_bottlenecks % 100 == 0: print(str(how_many_bottlenecks) + ' bottleneck files created.')
[ "def", "cache_bottlenecks", "(", "sess", ",", "image_lists", ",", "image_dir", ",", "bottleneck_dir", ",", "jpeg_data_tensor", ",", "bottleneck_tensor", ")", ":", "how_many_bottlenecks", "=", "0", "ensure_dir_exists", "(", "bottleneck_dir", ")", "for", "label_name", ",", "label_lists", "in", "image_lists", ".", "items", "(", ")", ":", "for", "category", "in", "[", "'training'", ",", "'testing'", ",", "'validation'", "]", ":", "category_list", "=", "label_lists", "[", "category", "]", "for", "index", ",", "unused_base_name", "in", "enumerate", "(", "category_list", ")", ":", "get_or_create_bottleneck", "(", "sess", ",", "image_lists", ",", "label_name", ",", "index", ",", "image_dir", ",", "category", ",", "bottleneck_dir", ",", "jpeg_data_tensor", ",", "bottleneck_tensor", ")", "how_many_bottlenecks", "+=", "1", "if", "how_many_bottlenecks", "%", "100", "==", "0", ":", "print", "(", "str", "(", "how_many_bottlenecks", ")", "+", "' bottleneck files created.'", ")" ]
https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/tensorflow/examples/image_retraining/retrain.py#L425-L459
CleverRaven/Cataclysm-DDA
03e7363df0835ec1b39da973ea29f26f27833b38
tools/json_tools/itemgroups.py
python
get_item_data
(entries, categories=None, ignore=None)
return (item_to_group, item_orphan, item_entry, problems)
Scans the raw data structure from JSON and constructs an item to group map, a dict of orphans, a dict of items, an a list of potential problems
Scans the raw data structure from JSON and constructs an item to group map, a dict of orphans, a dict of items, an a list of potential problems
[ "Scans", "the", "raw", "data", "structure", "from", "JSON", "and", "constructs", "an", "item", "to", "group", "map", "a", "dict", "of", "orphans", "a", "dict", "of", "items", "an", "a", "list", "of", "potential", "problems" ]
def get_item_data(entries, categories=None, ignore=None): """Scans the raw data structure from JSON and constructs an item to group map, a dict of orphans, a dict of items, an a list of potential problems""" ignore_items = ["battery_test"] TYPE_WHITELIST.append("item_group") if ignore: ignore_items = ignore item_categories = DEFAULT_CATEGORIES if categories: item_categories = categories item_group = {} item_entry = {} item_orphan = {} item_to_group = {} problems = [] for entry in entries: copy_from = entry.get("copy-from", "") if copy_from == "fake_item": continue entry_type = entry.get("type") path = entry.pop("original_filename") if not entry_type or entry_type not in TYPE_WHITELIST: continue if entry.get("id") in ignore_items: continue if entry_type == "item_group": igroup_id = entry.get("id") if not igroup_id: problems.append({"type": "missing id", "path": path, "entry": entry}) item_group[igroup_id] = entry continue # if it's not an item_group, it's probably an item item_id = entry.get("id") item_entry[item_id] = entry for igroup in item_group: _recurse_through_igroups(item_group[igroup], item_group, item_to_group) for item in item_entry: itemtype = item_entry[item].get("type") if itemtype not in item_categories: continue if item not in item_to_group: item_orphan[item] = item_entry[item] return (item_to_group, item_orphan, item_entry, problems)
[ "def", "get_item_data", "(", "entries", ",", "categories", "=", "None", ",", "ignore", "=", "None", ")", ":", "ignore_items", "=", "[", "\"battery_test\"", "]", "TYPE_WHITELIST", ".", "append", "(", "\"item_group\"", ")", "if", "ignore", ":", "ignore_items", "=", "ignore", "item_categories", "=", "DEFAULT_CATEGORIES", "if", "categories", ":", "item_categories", "=", "categories", "item_group", "=", "{", "}", "item_entry", "=", "{", "}", "item_orphan", "=", "{", "}", "item_to_group", "=", "{", "}", "problems", "=", "[", "]", "for", "entry", "in", "entries", ":", "copy_from", "=", "entry", ".", "get", "(", "\"copy-from\"", ",", "\"\"", ")", "if", "copy_from", "==", "\"fake_item\"", ":", "continue", "entry_type", "=", "entry", ".", "get", "(", "\"type\"", ")", "path", "=", "entry", ".", "pop", "(", "\"original_filename\"", ")", "if", "not", "entry_type", "or", "entry_type", "not", "in", "TYPE_WHITELIST", ":", "continue", "if", "entry", ".", "get", "(", "\"id\"", ")", "in", "ignore_items", ":", "continue", "if", "entry_type", "==", "\"item_group\"", ":", "igroup_id", "=", "entry", ".", "get", "(", "\"id\"", ")", "if", "not", "igroup_id", ":", "problems", ".", "append", "(", "{", "\"type\"", ":", "\"missing id\"", ",", "\"path\"", ":", "path", ",", "\"entry\"", ":", "entry", "}", ")", "item_group", "[", "igroup_id", "]", "=", "entry", "continue", "# if it's not an item_group, it's probably an item", "item_id", "=", "entry", ".", "get", "(", "\"id\"", ")", "item_entry", "[", "item_id", "]", "=", "entry", "for", "igroup", "in", "item_group", ":", "_recurse_through_igroups", "(", "item_group", "[", "igroup", "]", ",", "item_group", ",", "item_to_group", ")", "for", "item", "in", "item_entry", ":", "itemtype", "=", "item_entry", "[", "item", "]", ".", "get", "(", "\"type\"", ")", "if", "itemtype", "not", "in", "item_categories", ":", "continue", "if", "item", "not", "in", "item_to_group", ":", "item_orphan", "[", "item", "]", "=", "item_entry", "[", "item", "]", "return", "(", "item_to_group", ",", "item_orphan", ",", "item_entry", ",", "problems", ")" ]
https://github.com/CleverRaven/Cataclysm-DDA/blob/03e7363df0835ec1b39da973ea29f26f27833b38/tools/json_tools/itemgroups.py#L137-L182
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/ops/string_ops.py
python
regex_full_match
(input, pattern, name=None)
return gen_string_ops.regex_full_match( input=input, pattern=pattern, name=name)
r"""Match elements of `input` with regex `pattern`. Args: input: string `Tensor`, the source strings to process. pattern: string or scalar string `Tensor`, regular expression to use, see more details at https://github.com/google/re2/wiki/Syntax name: Name of the op. Returns: bool `Tensor` of the same shape as `input` with match results.
r"""Match elements of `input` with regex `pattern`.
[ "r", "Match", "elements", "of", "input", "with", "regex", "pattern", "." ]
def regex_full_match(input, pattern, name=None): r"""Match elements of `input` with regex `pattern`. Args: input: string `Tensor`, the source strings to process. pattern: string or scalar string `Tensor`, regular expression to use, see more details at https://github.com/google/re2/wiki/Syntax name: Name of the op. Returns: bool `Tensor` of the same shape as `input` with match results. """ if isinstance(pattern, util_compat.bytes_or_text_types): # When `pattern` is static through the life of the op we can # use a version which performs the expensive regex compilation once at # creation time. return gen_string_ops.static_regex_full_match( input=input, pattern=pattern, name=name) return gen_string_ops.regex_full_match( input=input, pattern=pattern, name=name)
[ "def", "regex_full_match", "(", "input", ",", "pattern", ",", "name", "=", "None", ")", ":", "if", "isinstance", "(", "pattern", ",", "util_compat", ".", "bytes_or_text_types", ")", ":", "# When `pattern` is static through the life of the op we can", "# use a version which performs the expensive regex compilation once at", "# creation time.", "return", "gen_string_ops", ".", "static_regex_full_match", "(", "input", "=", "input", ",", "pattern", "=", "pattern", ",", "name", "=", "name", ")", "return", "gen_string_ops", ".", "regex_full_match", "(", "input", "=", "input", ",", "pattern", "=", "pattern", ",", "name", "=", "name", ")" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/ops/string_ops.py#L47-L66
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scikit-learn/py2/sklearn/utils/extmath.py
python
norm
(x)
return nrm2(x)
Compute the Euclidean or Frobenius norm of x. Returns the Euclidean norm when x is a vector, the Frobenius norm when x is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
Compute the Euclidean or Frobenius norm of x.
[ "Compute", "the", "Euclidean", "or", "Frobenius", "norm", "of", "x", "." ]
def norm(x): """Compute the Euclidean or Frobenius norm of x. Returns the Euclidean norm when x is a vector, the Frobenius norm when x is a matrix (2-d array). More precise than sqrt(squared_norm(x)). """ x = np.asarray(x) nrm2, = linalg.get_blas_funcs(['nrm2'], [x]) return nrm2(x)
[ "def", "norm", "(", "x", ")", ":", "x", "=", "np", ".", "asarray", "(", "x", ")", "nrm2", ",", "=", "linalg", ".", "get_blas_funcs", "(", "[", "'nrm2'", "]", ",", "[", "x", "]", ")", "return", "nrm2", "(", "x", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scikit-learn/py2/sklearn/utils/extmath.py#L31-L39
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/site-packages/setuptools/_vendor/pyparsing.py
python
ParseResults.getName
(self)
r""" Returns the results name for this token expression. Useful when several different expressions might match at a particular location. Example:: integer = Word(nums) ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") house_number_expr = Suppress('#') + Word(nums, alphanums) user_data = (Group(house_number_expr)("house_number") | Group(ssn_expr)("ssn") | Group(integer)("age")) user_info = OneOrMore(user_data) result = user_info.parseString("22 111-22-3333 #221B") for item in result: print(item.getName(), ':', item[0]) prints:: age : 22 ssn : 111-22-3333 house_number : 221B
r""" Returns the results name for this token expression. Useful when several different expressions might match at a particular location.
[ "r", "Returns", "the", "results", "name", "for", "this", "token", "expression", ".", "Useful", "when", "several", "different", "expressions", "might", "match", "at", "a", "particular", "location", "." ]
def getName(self): r""" Returns the results name for this token expression. Useful when several different expressions might match at a particular location. Example:: integer = Word(nums) ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") house_number_expr = Suppress('#') + Word(nums, alphanums) user_data = (Group(house_number_expr)("house_number") | Group(ssn_expr)("ssn") | Group(integer)("age")) user_info = OneOrMore(user_data) result = user_info.parseString("22 111-22-3333 #221B") for item in result: print(item.getName(), ':', item[0]) prints:: age : 22 ssn : 111-22-3333 house_number : 221B """ if self.__name: return self.__name elif self.__parent: par = self.__parent() if par: return par.__lookup(self) else: return None elif (len(self) == 1 and len(self.__tokdict) == 1 and next(iter(self.__tokdict.values()))[0][1] in (0,-1)): return next(iter(self.__tokdict.keys())) else: return None
[ "def", "getName", "(", "self", ")", ":", "if", "self", ".", "__name", ":", "return", "self", ".", "__name", "elif", "self", ".", "__parent", ":", "par", "=", "self", ".", "__parent", "(", ")", "if", "par", ":", "return", "par", ".", "__lookup", "(", "self", ")", "else", ":", "return", "None", "elif", "(", "len", "(", "self", ")", "==", "1", "and", "len", "(", "self", ".", "__tokdict", ")", "==", "1", "and", "next", "(", "iter", "(", "self", ".", "__tokdict", ".", "values", "(", ")", ")", ")", "[", "0", "]", "[", "1", "]", "in", "(", "0", ",", "-", "1", ")", ")", ":", "return", "next", "(", "iter", "(", "self", ".", "__tokdict", ".", "keys", "(", ")", ")", ")", "else", ":", "return", "None" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/setuptools/_vendor/pyparsing.py#L834-L869
infinit/memo
3a8394d0f647efe03ccb8bfe885a7279cb8be8a6
elle/drake/src/drake/__init__.py
python
WriteBuilder.execute
(self)
return True
Create all the non-existent target nodes as empty files.
Create all the non-existent target nodes as empty files.
[ "Create", "all", "the", "non", "-", "existent", "target", "nodes", "as", "empty", "files", "." ]
def execute(self): """Create all the non-existent target nodes as empty files.""" pretty = 'Write' if len(self.__input) else 'Touch' self.output( '%s %s' % (pretty, ', '.join(map(str, self.targets())))) for node in self.targets(): path = str(node.path()) assert isinstance(node, Node) node.path().touch() with WritePermissions(node): with open(path, 'wb', stat.S_IWUSR) as f: f.write(self.__input) if self.__permissions is not None: _OS.chmod( path, _OS.stat(path).st_mode | self.__permissions) return True
[ "def", "execute", "(", "self", ")", ":", "pretty", "=", "'Write'", "if", "len", "(", "self", ".", "__input", ")", "else", "'Touch'", "self", ".", "output", "(", "'%s %s'", "%", "(", "pretty", ",", "', '", ".", "join", "(", "map", "(", "str", ",", "self", ".", "targets", "(", ")", ")", ")", ")", ")", "for", "node", "in", "self", ".", "targets", "(", ")", ":", "path", "=", "str", "(", "node", ".", "path", "(", ")", ")", "assert", "isinstance", "(", "node", ",", "Node", ")", "node", ".", "path", "(", ")", ".", "touch", "(", ")", "with", "WritePermissions", "(", "node", ")", ":", "with", "open", "(", "path", ",", "'wb'", ",", "stat", ".", "S_IWUSR", ")", "as", "f", ":", "f", ".", "write", "(", "self", ".", "__input", ")", "if", "self", ".", "__permissions", "is", "not", "None", ":", "_OS", ".", "chmod", "(", "path", ",", "_OS", ".", "stat", "(", "path", ")", ".", "st_mode", "|", "self", ".", "__permissions", ")", "return", "True" ]
https://github.com/infinit/memo/blob/3a8394d0f647efe03ccb8bfe885a7279cb8be8a6/elle/drake/src/drake/__init__.py#L3511-L3527
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
tools/symsrc/pefile.py
python
PE.get_qword_at_rva
(self, rva)
Return the quad-word value at the given RVA. Returns None if the value can't be read, i.e. the RVA can't be mapped to a file offset.
Return the quad-word value at the given RVA. Returns None if the value can't be read, i.e. the RVA can't be mapped to a file offset.
[ "Return", "the", "quad", "-", "word", "value", "at", "the", "given", "RVA", ".", "Returns", "None", "if", "the", "value", "can", "t", "be", "read", "i", ".", "e", ".", "the", "RVA", "can", "t", "be", "mapped", "to", "a", "file", "offset", "." ]
def get_qword_at_rva(self, rva): """Return the quad-word value at the given RVA. Returns None if the value can't be read, i.e. the RVA can't be mapped to a file offset. """ try: return self.get_qword_from_data(self.get_data(rva)[:8], 0) except PEFormatError: return None
[ "def", "get_qword_at_rva", "(", "self", ",", "rva", ")", ":", "try", ":", "return", "self", ".", "get_qword_from_data", "(", "self", ".", "get_data", "(", "rva", ")", "[", ":", "8", "]", ",", "0", ")", "except", "PEFormatError", ":", "return", "None" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/symsrc/pefile.py#L3526-L3536
gem5/gem5
141cc37c2d4b93959d4c249b8f7e6a8b2ef75338
src/python/m5/ext/pyfdt/pyfdt.py
python
FdtPropertyStrings.__extract_prop_strings
(cls, value)
return [st for st in \ value.decode('ascii').split('\0') if len(st)]
Extract strings from raw_value
Extract strings from raw_value
[ "Extract", "strings", "from", "raw_value" ]
def __extract_prop_strings(cls, value): """Extract strings from raw_value""" return [st for st in \ value.decode('ascii').split('\0') if len(st)]
[ "def", "__extract_prop_strings", "(", "cls", ",", "value", ")", ":", "return", "[", "st", "for", "st", "in", "value", ".", "decode", "(", "'ascii'", ")", ".", "split", "(", "'\\0'", ")", "if", "len", "(", "st", ")", "]" ]
https://github.com/gem5/gem5/blob/141cc37c2d4b93959d4c249b8f7e6a8b2ef75338/src/python/m5/ext/pyfdt/pyfdt.py#L163-L166
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/x86/toolchain/lib/python2.7/lib-tk/Tkinter.py
python
Listbox.get
(self, first, last=None)
Get list of items from FIRST to LAST (not included).
Get list of items from FIRST to LAST (not included).
[ "Get", "list", "of", "items", "from", "FIRST", "to", "LAST", "(", "not", "included", ")", "." ]
def get(self, first, last=None): """Get list of items from FIRST to LAST (not included).""" if last: return self.tk.splitlist(self.tk.call( self._w, 'get', first, last)) else: return self.tk.call(self._w, 'get', first)
[ "def", "get", "(", "self", ",", "first", ",", "last", "=", "None", ")", ":", "if", "last", ":", "return", "self", ".", "tk", ".", "splitlist", "(", "self", ".", "tk", ".", "call", "(", "self", ".", "_w", ",", "'get'", ",", "first", ",", "last", ")", ")", "else", ":", "return", "self", ".", "tk", ".", "call", "(", "self", ".", "_w", ",", "'get'", ",", "first", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/lib-tk/Tkinter.py#L2566-L2572
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/email/__init__.py
python
message_from_string
(s, *args, **kws)
return Parser(*args, **kws).parsestr(s)
Parse a string into a Message object model. Optional _class and strict are passed to the Parser constructor.
Parse a string into a Message object model.
[ "Parse", "a", "string", "into", "a", "Message", "object", "model", "." ]
def message_from_string(s, *args, **kws): """Parse a string into a Message object model. Optional _class and strict are passed to the Parser constructor. """ from email.parser import Parser return Parser(*args, **kws).parsestr(s)
[ "def", "message_from_string", "(", "s", ",", "*", "args", ",", "*", "*", "kws", ")", ":", "from", "email", ".", "parser", "import", "Parser", "return", "Parser", "(", "*", "args", ",", "*", "*", "kws", ")", ".", "parsestr", "(", "s", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/email/__init__.py#L32-L38
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/nntplib.py
python
NNTP.next
(self)
return self.statcmd('NEXT')
Process a NEXT command. No arguments. Return as for STAT.
Process a NEXT command. No arguments. Return as for STAT.
[ "Process", "a", "NEXT", "command", ".", "No", "arguments", ".", "Return", "as", "for", "STAT", "." ]
def next(self): """Process a NEXT command. No arguments. Return as for STAT.""" return self.statcmd('NEXT')
[ "def", "next", "(", "self", ")", ":", "return", "self", ".", "statcmd", "(", "'NEXT'", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/nntplib.py#L397-L399
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/devil/devil/utils/find_usb_devices.py
python
parse_options
(argv)
return parser.parse_args(argv[1:])
Parses and checks the command-line options. Returns: A tuple containing the options structure and a list of categories to be traced.
Parses and checks the command-line options.
[ "Parses", "and", "checks", "the", "command", "-", "line", "options", "." ]
def parse_options(argv): """Parses and checks the command-line options. Returns: A tuple containing the options structure and a list of categories to be traced. """ USAGE = '''./find_usb_devices [--help] This script shows the mapping between USB devices and port numbers. Clients are not intended to call this script from the command line. Clients are intended to call the functions in this script directly. For instance, GetAllPhysicalPortToSerialMaps(...) Running this script with --help will display this message. Running this script without --help will display information about devices attached, TTY mapping, and serial number mapping, for testing purposes. See design document for API documentation. ''' parser = argparse.ArgumentParser(usage=USAGE) return parser.parse_args(argv[1:])
[ "def", "parse_options", "(", "argv", ")", ":", "USAGE", "=", "'''./find_usb_devices [--help]\n This script shows the mapping between USB devices and port numbers.\n Clients are not intended to call this script from the command line.\n Clients are intended to call the functions in this script directly.\n For instance, GetAllPhysicalPortToSerialMaps(...)\n Running this script with --help will display this message.\n Running this script without --help will display information about\n devices attached, TTY mapping, and serial number mapping,\n for testing purposes. See design document for API documentation.\n '''", "parser", "=", "argparse", ".", "ArgumentParser", "(", "usage", "=", "USAGE", ")", "return", "parser", ".", "parse_args", "(", "argv", "[", "1", ":", "]", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/devil/devil/utils/find_usb_devices.py#L507-L525
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/framework/meta_graph.py
python
export_scoped_meta_graph
(filename=None, graph_def=None, graph=None, export_scope=None, as_text=False, unbound_inputs_col_name="unbound_inputs", clear_devices=False, saver_def=None, clear_extraneous_savers=False, strip_default_attrs=False, save_debug_info=False, **kwargs)
return scoped_meta_graph_def, var_list
Returns `MetaGraphDef` proto. Optionally writes it to filename. This function exports the graph, saver, and collection objects into `MetaGraphDef` protocol buffer with the intention of it being imported at a later time or location to restart training, run inference, or be a subgraph. Args: filename: Optional filename including the path for writing the generated `MetaGraphDef` protocol buffer. graph_def: `GraphDef` protocol buffer. graph: The `Graph` to export. If `None`, use the default graph. export_scope: Optional `string`. Name scope under which to extract the subgraph. The scope name will be stripped from the node definitions for easy import later into new name scopes. If `None`, the whole graph is exported. as_text: If `True`, writes the `MetaGraphDef` as an ASCII proto. unbound_inputs_col_name: Optional `string`. If provided, a string collection with the given name will be added to the returned `MetaGraphDef`, containing the names of tensors that must be remapped when importing the `MetaGraphDef`. clear_devices: Boolean which controls whether to clear device information before exporting the graph. saver_def: `SaverDef` protocol buffer. clear_extraneous_savers: Remove any Saver-related information from the graph (both Save/Restore ops and SaverDefs) that are not associated with the provided SaverDef. strip_default_attrs: Set to true if default valued attributes must be removed while exporting the GraphDef. save_debug_info: If `True`, save the GraphDebugInfo to a separate file, which in the same directory of filename and with `_debug` added before the file extension. **kwargs: Optional keyed arguments, including meta_info_def and collection_list. Returns: A `MetaGraphDef` proto and dictionary of `Variables` in the exported name scope. Raises: ValueError: When the `GraphDef` is larger than 2GB. ValueError: When executing in Eager mode and either `graph_def` or `graph` is undefined.
Returns `MetaGraphDef` proto. Optionally writes it to filename.
[ "Returns", "MetaGraphDef", "proto", ".", "Optionally", "writes", "it", "to", "filename", "." ]
def export_scoped_meta_graph(filename=None, graph_def=None, graph=None, export_scope=None, as_text=False, unbound_inputs_col_name="unbound_inputs", clear_devices=False, saver_def=None, clear_extraneous_savers=False, strip_default_attrs=False, save_debug_info=False, **kwargs): """Returns `MetaGraphDef` proto. Optionally writes it to filename. This function exports the graph, saver, and collection objects into `MetaGraphDef` protocol buffer with the intention of it being imported at a later time or location to restart training, run inference, or be a subgraph. Args: filename: Optional filename including the path for writing the generated `MetaGraphDef` protocol buffer. graph_def: `GraphDef` protocol buffer. graph: The `Graph` to export. If `None`, use the default graph. export_scope: Optional `string`. Name scope under which to extract the subgraph. The scope name will be stripped from the node definitions for easy import later into new name scopes. If `None`, the whole graph is exported. as_text: If `True`, writes the `MetaGraphDef` as an ASCII proto. unbound_inputs_col_name: Optional `string`. If provided, a string collection with the given name will be added to the returned `MetaGraphDef`, containing the names of tensors that must be remapped when importing the `MetaGraphDef`. clear_devices: Boolean which controls whether to clear device information before exporting the graph. saver_def: `SaverDef` protocol buffer. clear_extraneous_savers: Remove any Saver-related information from the graph (both Save/Restore ops and SaverDefs) that are not associated with the provided SaverDef. strip_default_attrs: Set to true if default valued attributes must be removed while exporting the GraphDef. save_debug_info: If `True`, save the GraphDebugInfo to a separate file, which in the same directory of filename and with `_debug` added before the file extension. **kwargs: Optional keyed arguments, including meta_info_def and collection_list. Returns: A `MetaGraphDef` proto and dictionary of `Variables` in the exported name scope. Raises: ValueError: When the `GraphDef` is larger than 2GB. ValueError: When executing in Eager mode and either `graph_def` or `graph` is undefined. """ if context.executing_eagerly() and not (graph_def is not None and graph is not None): raise ValueError("Exporting/importing meta graphs is not supported when " "Eager Execution is enabled.") graph = graph or ops.get_default_graph() exclude_nodes = None unbound_inputs = [] if export_scope or clear_extraneous_savers or clear_devices: if graph_def: new_graph_def = graph_pb2.GraphDef() new_graph_def.versions.CopyFrom(graph_def.versions) new_graph_def.library.CopyFrom(graph_def.library) if clear_extraneous_savers: exclude_nodes = _find_extraneous_saver_nodes(graph_def, saver_def) for node_def in graph_def.node: if _should_include_node(node_def.name, export_scope, exclude_nodes): new_node_def = _node_def(node_def, export_scope, unbound_inputs, clear_devices=clear_devices) new_graph_def.node.extend([new_node_def]) graph_def = new_graph_def else: # Only do this complicated work if we want to remove a name scope. graph_def = graph_pb2.GraphDef() # pylint: disable=protected-access graph_def.versions.CopyFrom(graph.graph_def_versions) bytesize = 0 if clear_extraneous_savers: exclude_nodes = _find_extraneous_saver_nodes(graph.as_graph_def(), saver_def) for key in sorted(graph._nodes_by_id): if _should_include_node(graph._nodes_by_id[key].name, export_scope, exclude_nodes): value = graph._nodes_by_id[key] # pylint: enable=protected-access node_def = _node_def(value.node_def, export_scope, unbound_inputs, clear_devices=clear_devices) graph_def.node.extend([node_def]) if value.outputs: assert "_output_shapes" not in graph_def.node[-1].attr graph_def.node[-1].attr["_output_shapes"].list.shape.extend([ output.get_shape().as_proto() for output in value.outputs]) bytesize += value.node_def.ByteSize() if bytesize >= (1 << 31) or bytesize < 0: raise ValueError("GraphDef cannot be larger than 2GB.") graph._copy_functions_to_graph_def(graph_def, bytesize) # pylint: disable=protected-access # It's possible that not all the inputs are in the export_scope. # If we would like such information included in the exported meta_graph, # add them to a special unbound_inputs collection. if unbound_inputs_col_name: # Clears the unbound_inputs collections. graph.clear_collection(unbound_inputs_col_name) for k in unbound_inputs: graph.add_to_collection(unbound_inputs_col_name, k) var_list = {} variables = graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, scope=export_scope) for v in variables: if _should_include_node(v, export_scope, exclude_nodes): var_list[ops.strip_name_scope(v.name, export_scope)] = v scoped_meta_graph_def = create_meta_graph_def( graph_def=graph_def, graph=graph, export_scope=export_scope, exclude_nodes=exclude_nodes, clear_extraneous_savers=clear_extraneous_savers, saver_def=saver_def, strip_default_attrs=strip_default_attrs, **kwargs) if filename: graph_io.write_graph( scoped_meta_graph_def, os.path.dirname(filename), os.path.basename(filename), as_text=as_text) if save_debug_info: name, _ = os.path.splitext(filename) debug_filename = "{name}{ext}".format(name=name, ext=".debug") # Gets the operation from the graph by the name. Exludes variable nodes, # so only the nodes in the frozen models are included. # TODO(liufengdb): fix this for functions. ops_to_export = [] for node in scoped_meta_graph_def.graph_def.node: scoped_op_name = ops.prepend_name_scope(node.name, export_scope) ops_to_export.append(("", graph.get_operation_by_name(scoped_op_name))) graph_debug_info = error_interpolation.create_graph_debug_info_def( ops_to_export) graph_io.write_graph( graph_debug_info, os.path.dirname(debug_filename), os.path.basename(debug_filename), as_text=as_text) return scoped_meta_graph_def, var_list
[ "def", "export_scoped_meta_graph", "(", "filename", "=", "None", ",", "graph_def", "=", "None", ",", "graph", "=", "None", ",", "export_scope", "=", "None", ",", "as_text", "=", "False", ",", "unbound_inputs_col_name", "=", "\"unbound_inputs\"", ",", "clear_devices", "=", "False", ",", "saver_def", "=", "None", ",", "clear_extraneous_savers", "=", "False", ",", "strip_default_attrs", "=", "False", ",", "save_debug_info", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "context", ".", "executing_eagerly", "(", ")", "and", "not", "(", "graph_def", "is", "not", "None", "and", "graph", "is", "not", "None", ")", ":", "raise", "ValueError", "(", "\"Exporting/importing meta graphs is not supported when \"", "\"Eager Execution is enabled.\"", ")", "graph", "=", "graph", "or", "ops", ".", "get_default_graph", "(", ")", "exclude_nodes", "=", "None", "unbound_inputs", "=", "[", "]", "if", "export_scope", "or", "clear_extraneous_savers", "or", "clear_devices", ":", "if", "graph_def", ":", "new_graph_def", "=", "graph_pb2", ".", "GraphDef", "(", ")", "new_graph_def", ".", "versions", ".", "CopyFrom", "(", "graph_def", ".", "versions", ")", "new_graph_def", ".", "library", ".", "CopyFrom", "(", "graph_def", ".", "library", ")", "if", "clear_extraneous_savers", ":", "exclude_nodes", "=", "_find_extraneous_saver_nodes", "(", "graph_def", ",", "saver_def", ")", "for", "node_def", "in", "graph_def", ".", "node", ":", "if", "_should_include_node", "(", "node_def", ".", "name", ",", "export_scope", ",", "exclude_nodes", ")", ":", "new_node_def", "=", "_node_def", "(", "node_def", ",", "export_scope", ",", "unbound_inputs", ",", "clear_devices", "=", "clear_devices", ")", "new_graph_def", ".", "node", ".", "extend", "(", "[", "new_node_def", "]", ")", "graph_def", "=", "new_graph_def", "else", ":", "# Only do this complicated work if we want to remove a name scope.", "graph_def", "=", "graph_pb2", ".", "GraphDef", "(", ")", "# pylint: disable=protected-access", "graph_def", ".", "versions", ".", "CopyFrom", "(", "graph", ".", "graph_def_versions", ")", "bytesize", "=", "0", "if", "clear_extraneous_savers", ":", "exclude_nodes", "=", "_find_extraneous_saver_nodes", "(", "graph", ".", "as_graph_def", "(", ")", ",", "saver_def", ")", "for", "key", "in", "sorted", "(", "graph", ".", "_nodes_by_id", ")", ":", "if", "_should_include_node", "(", "graph", ".", "_nodes_by_id", "[", "key", "]", ".", "name", ",", "export_scope", ",", "exclude_nodes", ")", ":", "value", "=", "graph", ".", "_nodes_by_id", "[", "key", "]", "# pylint: enable=protected-access", "node_def", "=", "_node_def", "(", "value", ".", "node_def", ",", "export_scope", ",", "unbound_inputs", ",", "clear_devices", "=", "clear_devices", ")", "graph_def", ".", "node", ".", "extend", "(", "[", "node_def", "]", ")", "if", "value", ".", "outputs", ":", "assert", "\"_output_shapes\"", "not", "in", "graph_def", ".", "node", "[", "-", "1", "]", ".", "attr", "graph_def", ".", "node", "[", "-", "1", "]", ".", "attr", "[", "\"_output_shapes\"", "]", ".", "list", ".", "shape", ".", "extend", "(", "[", "output", ".", "get_shape", "(", ")", ".", "as_proto", "(", ")", "for", "output", "in", "value", ".", "outputs", "]", ")", "bytesize", "+=", "value", ".", "node_def", ".", "ByteSize", "(", ")", "if", "bytesize", ">=", "(", "1", "<<", "31", ")", "or", "bytesize", "<", "0", ":", "raise", "ValueError", "(", "\"GraphDef cannot be larger than 2GB.\"", ")", "graph", ".", "_copy_functions_to_graph_def", "(", "graph_def", ",", "bytesize", ")", "# pylint: disable=protected-access", "# It's possible that not all the inputs are in the export_scope.", "# If we would like such information included in the exported meta_graph,", "# add them to a special unbound_inputs collection.", "if", "unbound_inputs_col_name", ":", "# Clears the unbound_inputs collections.", "graph", ".", "clear_collection", "(", "unbound_inputs_col_name", ")", "for", "k", "in", "unbound_inputs", ":", "graph", ".", "add_to_collection", "(", "unbound_inputs_col_name", ",", "k", ")", "var_list", "=", "{", "}", "variables", "=", "graph", ".", "get_collection", "(", "ops", ".", "GraphKeys", ".", "GLOBAL_VARIABLES", ",", "scope", "=", "export_scope", ")", "for", "v", "in", "variables", ":", "if", "_should_include_node", "(", "v", ",", "export_scope", ",", "exclude_nodes", ")", ":", "var_list", "[", "ops", ".", "strip_name_scope", "(", "v", ".", "name", ",", "export_scope", ")", "]", "=", "v", "scoped_meta_graph_def", "=", "create_meta_graph_def", "(", "graph_def", "=", "graph_def", ",", "graph", "=", "graph", ",", "export_scope", "=", "export_scope", ",", "exclude_nodes", "=", "exclude_nodes", ",", "clear_extraneous_savers", "=", "clear_extraneous_savers", ",", "saver_def", "=", "saver_def", ",", "strip_default_attrs", "=", "strip_default_attrs", ",", "*", "*", "kwargs", ")", "if", "filename", ":", "graph_io", ".", "write_graph", "(", "scoped_meta_graph_def", ",", "os", ".", "path", ".", "dirname", "(", "filename", ")", ",", "os", ".", "path", ".", "basename", "(", "filename", ")", ",", "as_text", "=", "as_text", ")", "if", "save_debug_info", ":", "name", ",", "_", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "debug_filename", "=", "\"{name}{ext}\"", ".", "format", "(", "name", "=", "name", ",", "ext", "=", "\".debug\"", ")", "# Gets the operation from the graph by the name. Exludes variable nodes,", "# so only the nodes in the frozen models are included.", "# TODO(liufengdb): fix this for functions.", "ops_to_export", "=", "[", "]", "for", "node", "in", "scoped_meta_graph_def", ".", "graph_def", ".", "node", ":", "scoped_op_name", "=", "ops", ".", "prepend_name_scope", "(", "node", ".", "name", ",", "export_scope", ")", "ops_to_export", ".", "append", "(", "(", "\"\"", ",", "graph", ".", "get_operation_by_name", "(", "scoped_op_name", ")", ")", ")", "graph_debug_info", "=", "error_interpolation", ".", "create_graph_debug_info_def", "(", "ops_to_export", ")", "graph_io", ".", "write_graph", "(", "graph_debug_info", ",", "os", ".", "path", ".", "dirname", "(", "debug_filename", ")", ",", "os", ".", "path", ".", "basename", "(", "debug_filename", ")", ",", "as_text", "=", "as_text", ")", "return", "scoped_meta_graph_def", ",", "var_list" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/framework/meta_graph.py#L913-L1075
yyzybb537/libgo
4af17b7c67643c4d54aa354dcc77963ea07847d0
third_party/boost.context/tools/build/src/build/feature.py
python
Feature.parent
(self)
return self._parent
For subfeatures, return pair of (parent_feature, value). Value may be None if this subfeature is not specific to any value of the parent feature.
For subfeatures, return pair of (parent_feature, value).
[ "For", "subfeatures", "return", "pair", "of", "(", "parent_feature", "value", ")", "." ]
def parent(self): """For subfeatures, return pair of (parent_feature, value). Value may be None if this subfeature is not specific to any value of the parent feature. """ return self._parent
[ "def", "parent", "(", "self", ")", ":", "return", "self", ".", "_parent" ]
https://github.com/yyzybb537/libgo/blob/4af17b7c67643c4d54aa354dcc77963ea07847d0/third_party/boost.context/tools/build/src/build/feature.py#L77-L83
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/stc.py
python
StyledTextCtrl.GetText
(*args, **kwargs)
return _stc.StyledTextCtrl_GetText(*args, **kwargs)
GetText(self) -> String Retrieve all the text in the document.
GetText(self) -> String
[ "GetText", "(", "self", ")", "-", ">", "String" ]
def GetText(*args, **kwargs): """ GetText(self) -> String Retrieve all the text in the document. """ return _stc.StyledTextCtrl_GetText(*args, **kwargs)
[ "def", "GetText", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_stc", ".", "StyledTextCtrl_GetText", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/stc.py#L3665-L3671
bitconch/bitconch-core
5537f3215b3e3b76f6720d6f908676a6c34bc5db
deploy-nightly.py
python
rmtree_onerror
(self, func, file_path, exc_info)
Error handler for ``shutil.rmtree``. If the error is due to an access error (read only file) it attempts to add write permission and then retries. If the error is for another reason it re-raises the error. Usage : ``shutil.rmtree(path, onerror=onerror)``
Error handler for ``shutil.rmtree``. If the error is due to an access error (read only file) it attempts to add write permission and then retries. If the error is for another reason it re-raises the error. Usage : ``shutil.rmtree(path, onerror=onerror)``
[ "Error", "handler", "for", "shutil", ".", "rmtree", ".", "If", "the", "error", "is", "due", "to", "an", "access", "error", "(", "read", "only", "file", ")", "it", "attempts", "to", "add", "write", "permission", "and", "then", "retries", ".", "If", "the", "error", "is", "for", "another", "reason", "it", "re", "-", "raises", "the", "error", ".", "Usage", ":", "shutil", ".", "rmtree", "(", "path", "onerror", "=", "onerror", ")" ]
def rmtree_onerror(self, func, file_path, exc_info): """ Error handler for ``shutil.rmtree``. If the error is due to an access error (read only file) it attempts to add write permission and then retries. If the error is for another reason it re-raises the error. Usage : ``shutil.rmtree(path, onerror=onerror)`` """ logging.warning(str(exc_info)) logging.warning("rmtree error,check the file exists or try to chmod the file,then retry rmtree action.") os.chmod(file_path, stat.S_IWRITE) #chmod to writeable if os.path.isdir(file_path): #file exists func(file_path) else: #handle whatever raise
[ "def", "rmtree_onerror", "(", "self", ",", "func", ",", "file_path", ",", "exc_info", ")", ":", "logging", ".", "warning", "(", "str", "(", "exc_info", ")", ")", "logging", ".", "warning", "(", "\"rmtree error,check the file exists or try to chmod the file,then retry rmtree action.\"", ")", "os", ".", "chmod", "(", "file_path", ",", "stat", ".", "S_IWRITE", ")", "#chmod to writeable", "if", "os", ".", "path", ".", "isdir", "(", "file_path", ")", ":", "#file exists", "func", "(", "file_path", ")", "else", ":", "#handle whatever", "raise" ]
https://github.com/bitconch/bitconch-core/blob/5537f3215b3e3b76f6720d6f908676a6c34bc5db/deploy-nightly.py#L18-L34
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/_windows.py
python
StatusBar.PushStatusText
(*args, **kwargs)
return _windows_.StatusBar_PushStatusText(*args, **kwargs)
PushStatusText(self, String text, int number=0)
PushStatusText(self, String text, int number=0)
[ "PushStatusText", "(", "self", "String", "text", "int", "number", "=", "0", ")" ]
def PushStatusText(*args, **kwargs): """PushStatusText(self, String text, int number=0)""" return _windows_.StatusBar_PushStatusText(*args, **kwargs)
[ "def", "PushStatusText", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_windows_", ".", "StatusBar_PushStatusText", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_windows.py#L1255-L1257
Tencent/Pebble
68315f176d9e328a233ace29b7579a829f89879f
tools/blade/src/blade/target.py
python
Target._clone_env
(self)
Clone target's environment.
Clone target's environment.
[ "Clone", "target", "s", "environment", "." ]
def _clone_env(self): """Clone target's environment. """ self._write_rule('%s = top_env.Clone()' % self._env_name())
[ "def", "_clone_env", "(", "self", ")", ":", "self", ".", "_write_rule", "(", "'%s = top_env.Clone()'", "%", "self", ".", "_env_name", "(", ")", ")" ]
https://github.com/Tencent/Pebble/blob/68315f176d9e328a233ace29b7579a829f89879f/tools/blade/src/blade/target.py#L58-L60
sc0ty/subsync
be5390d00ff475b6543eb0140c7e65b34317d95b
subsync/assets/item.py
python
Asset.remoteVersion
(self)
return utils.parseVersion(self._remote.get('version'))
Get version of asset available on server. Returns ------- tuple of int or None Remote version number or `None` if not available on server.
Get version of asset available on server.
[ "Get", "version", "of", "asset", "available", "on", "server", "." ]
def remoteVersion(self): """Get version of asset available on server. Returns ------- tuple of int or None Remote version number or `None` if not available on server. """ return utils.parseVersion(self._remote.get('version'))
[ "def", "remoteVersion", "(", "self", ")", ":", "return", "utils", ".", "parseVersion", "(", "self", ".", "_remote", ".", "get", "(", "'version'", ")", ")" ]
https://github.com/sc0ty/subsync/blob/be5390d00ff475b6543eb0140c7e65b34317d95b/subsync/assets/item.py#L71-L79
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/combo.py
python
ComboCtrl.SetPopupMinWidth
(*args, **kwargs)
return _combo.ComboCtrl_SetPopupMinWidth(*args, **kwargs)
SetPopupMinWidth(self, int width) Sets minimum width of the popup. If wider than combo control, it will extend to the left. A value of -1 indicates to use the default. The popup implementation may choose to ignore this.
SetPopupMinWidth(self, int width)
[ "SetPopupMinWidth", "(", "self", "int", "width", ")" ]
def SetPopupMinWidth(*args, **kwargs): """ SetPopupMinWidth(self, int width) Sets minimum width of the popup. If wider than combo control, it will extend to the left. A value of -1 indicates to use the default. The popup implementation may choose to ignore this. """ return _combo.ComboCtrl_SetPopupMinWidth(*args, **kwargs)
[ "def", "SetPopupMinWidth", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_combo", ".", "ComboCtrl_SetPopupMinWidth", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/combo.py#L273-L281
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/aui.py
python
AuiManager._GetPaneByWidget
(*args, **kwargs)
return _aui.AuiManager__GetPaneByWidget(*args, **kwargs)
_GetPaneByWidget(self, Window window) -> AuiPaneInfo
_GetPaneByWidget(self, Window window) -> AuiPaneInfo
[ "_GetPaneByWidget", "(", "self", "Window", "window", ")", "-", ">", "AuiPaneInfo" ]
def _GetPaneByWidget(*args, **kwargs): """_GetPaneByWidget(self, Window window) -> AuiPaneInfo""" return _aui.AuiManager__GetPaneByWidget(*args, **kwargs)
[ "def", "_GetPaneByWidget", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_aui", ".", "AuiManager__GetPaneByWidget", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/aui.py#L627-L629
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/nn/layer/pooling.py
python
AvgPool1d.__init__
(self, kernel_size=1, stride=1, pad_mode="valid")
Initialize AvgPool1d.
Initialize AvgPool1d.
[ "Initialize", "AvgPool1d", "." ]
def __init__(self, kernel_size=1, stride=1, pad_mode="valid"): """Initialize AvgPool1d.""" validator.check_value_type('kernel_size', kernel_size, [int], self.cls_name) validator.check_value_type('stride', stride, [int], self.cls_name) self.pad_mode = validator.check_string(pad_mode.upper(), ['VALID', 'SAME'], 'pad_mode', self.cls_name) validator.check_int(kernel_size, 1, Rel.GE, "kernel_size", self.cls_name) validator.check_int(stride, 1, Rel.GE, "stride", self.cls_name) super(AvgPool1d, self).__init__(kernel_size, stride, pad_mode) self.kernel_size = (1, kernel_size) self.stride = (1, stride) self.avg_pool = P.AvgPool(kernel_size=self.kernel_size, strides=self.stride, pad_mode=self.pad_mode) self.shape = F.shape self.reduce_mean = P.ReduceMean(keep_dims=True) self.slice = P.Slice() self.expand = P.ExpandDims() self.squeeze = P.Squeeze(2)
[ "def", "__init__", "(", "self", ",", "kernel_size", "=", "1", ",", "stride", "=", "1", ",", "pad_mode", "=", "\"valid\"", ")", ":", "validator", ".", "check_value_type", "(", "'kernel_size'", ",", "kernel_size", ",", "[", "int", "]", ",", "self", ".", "cls_name", ")", "validator", ".", "check_value_type", "(", "'stride'", ",", "stride", ",", "[", "int", "]", ",", "self", ".", "cls_name", ")", "self", ".", "pad_mode", "=", "validator", ".", "check_string", "(", "pad_mode", ".", "upper", "(", ")", ",", "[", "'VALID'", ",", "'SAME'", "]", ",", "'pad_mode'", ",", "self", ".", "cls_name", ")", "validator", ".", "check_int", "(", "kernel_size", ",", "1", ",", "Rel", ".", "GE", ",", "\"kernel_size\"", ",", "self", ".", "cls_name", ")", "validator", ".", "check_int", "(", "stride", ",", "1", ",", "Rel", ".", "GE", ",", "\"stride\"", ",", "self", ".", "cls_name", ")", "super", "(", "AvgPool1d", ",", "self", ")", ".", "__init__", "(", "kernel_size", ",", "stride", ",", "pad_mode", ")", "self", ".", "kernel_size", "=", "(", "1", ",", "kernel_size", ")", "self", ".", "stride", "=", "(", "1", ",", "stride", ")", "self", ".", "avg_pool", "=", "P", ".", "AvgPool", "(", "kernel_size", "=", "self", ".", "kernel_size", ",", "strides", "=", "self", ".", "stride", ",", "pad_mode", "=", "self", ".", "pad_mode", ")", "self", ".", "shape", "=", "F", ".", "shape", "self", ".", "reduce_mean", "=", "P", ".", "ReduceMean", "(", "keep_dims", "=", "True", ")", "self", ".", "slice", "=", "P", ".", "Slice", "(", ")", "self", ".", "expand", "=", "P", ".", "ExpandDims", "(", ")", "self", ".", "squeeze", "=", "P", ".", "Squeeze", "(", "2", ")" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/nn/layer/pooling.py#L364-L384
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python/src/Lib/idlelib/configHandler.py
python
IdleConfParser.GetOptionList
(self, section)
Return a list of options for given section, else [].
Return a list of options for given section, else [].
[ "Return", "a", "list", "of", "options", "for", "given", "section", "else", "[]", "." ]
def GetOptionList(self, section): "Return a list of options for given section, else []." if self.has_section(section): return self.options(section) else: #return a default value return []
[ "def", "GetOptionList", "(", "self", ",", "section", ")", ":", "if", "self", ".", "has_section", "(", "section", ")", ":", "return", "self", ".", "options", "(", "section", ")", "else", ":", "#return a default value", "return", "[", "]" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/idlelib/configHandler.py#L62-L67
root-project/root
fcd3583bb14852bf2e8cd2415717cbaac0e75896
bindings/pyroot/pythonizations/python/ROOT/_pythonization/_roofit/__init__.py
python
get_defined_attributes
(klass, consider_base_classes=False)
return sorted([attr for attr in dir(klass) if is_defined(attr)])
Get all class attributes that are defined in a given class or optionally in any of its base classes (except for `object`).
Get all class attributes that are defined in a given class or optionally in any of its base classes (except for `object`).
[ "Get", "all", "class", "attributes", "that", "are", "defined", "in", "a", "given", "class", "or", "optionally", "in", "any", "of", "its", "base", "classes", "(", "except", "for", "object", ")", "." ]
def get_defined_attributes(klass, consider_base_classes=False): """ Get all class attributes that are defined in a given class or optionally in any of its base classes (except for `object`). """ blacklist = ["__dict__", "__doc__", "__hash__", "__module__", "__weakref__"] if not consider_base_classes: return sorted([attr for attr in klass.__dict__.keys() if attr not in blacklist]) # get a list of this class and all its base classes, excluding `object` method_resolution_order = klass.mro() if object in method_resolution_order: method_resolution_order.remove(object) def is_defined(funcname): if funcname in blacklist: return False in_any_dict = False for mro_class in method_resolution_order: if funcname in mro_class.__dict__: in_any_dict = True return in_any_dict return sorted([attr for attr in dir(klass) if is_defined(attr)])
[ "def", "get_defined_attributes", "(", "klass", ",", "consider_base_classes", "=", "False", ")", ":", "blacklist", "=", "[", "\"__dict__\"", ",", "\"__doc__\"", ",", "\"__hash__\"", ",", "\"__module__\"", ",", "\"__weakref__\"", "]", "if", "not", "consider_base_classes", ":", "return", "sorted", "(", "[", "attr", "for", "attr", "in", "klass", ".", "__dict__", ".", "keys", "(", ")", "if", "attr", "not", "in", "blacklist", "]", ")", "# get a list of this class and all its base classes, excluding `object`", "method_resolution_order", "=", "klass", ".", "mro", "(", ")", "if", "object", "in", "method_resolution_order", ":", "method_resolution_order", ".", "remove", "(", "object", ")", "def", "is_defined", "(", "funcname", ")", ":", "if", "funcname", "in", "blacklist", ":", "return", "False", "in_any_dict", "=", "False", "for", "mro_class", "in", "method_resolution_order", ":", "if", "funcname", "in", "mro_class", ".", "__dict__", ":", "in_any_dict", "=", "True", "return", "in_any_dict", "return", "sorted", "(", "[", "attr", "for", "attr", "in", "dir", "(", "klass", ")", "if", "is_defined", "(", "attr", ")", "]", ")" ]
https://github.com/root-project/root/blob/fcd3583bb14852bf2e8cd2415717cbaac0e75896/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_roofit/__init__.py#L119-L148
miyosuda/TensorFlowAndroidDemo
35903e0221aa5f109ea2dbef27f20b52e317f42d
jni-build/jni/include/tensorflow/python/ops/state_ops.py
python
_AssignUpdateShape
(op)
return [op.inputs[0].get_shape().merge_with(op.inputs[1].get_shape())]
Shape function for the AssignAdd and AssignSub dense update ops.
Shape function for the AssignAdd and AssignSub dense update ops.
[ "Shape", "function", "for", "the", "AssignAdd", "and", "AssignSub", "dense", "update", "ops", "." ]
def _AssignUpdateShape(op): """Shape function for the AssignAdd and AssignSub dense update ops.""" return [op.inputs[0].get_shape().merge_with(op.inputs[1].get_shape())]
[ "def", "_AssignUpdateShape", "(", "op", ")", ":", "return", "[", "op", ".", "inputs", "[", "0", "]", ".", "get_shape", "(", ")", ".", "merge_with", "(", "op", ".", "inputs", "[", "1", "]", ".", "get_shape", "(", ")", ")", "]" ]
https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/python/ops/state_ops.py#L216-L218
Yelp/MOE
5b5a6a2c6c3cf47320126f7f5894e2a83e347f5c
moe/optimal_learning/python/python_version/log_likelihood.py
python
GaussianProcessLogMarginalLikelihood.dim
(self)
return self._historical_data.dim
Return the number of spatial dimensions.
Return the number of spatial dimensions.
[ "Return", "the", "number", "of", "spatial", "dimensions", "." ]
def dim(self): """Return the number of spatial dimensions.""" return self._historical_data.dim
[ "def", "dim", "(", "self", ")", ":", "return", "self", ".", "_historical_data", ".", "dim" ]
https://github.com/Yelp/MOE/blob/5b5a6a2c6c3cf47320126f7f5894e2a83e347f5c/moe/optimal_learning/python/python_version/log_likelihood.py#L204-L206
albertz/openlierox
d316c14a8eb57848ef56e9bfa7b23a56f694a51b
tools/DedicatedServerVideo/gdata/finance/service.py
python
FinanceService.AddTransaction
(self, transaction_entry=None, transaction_feed = None, position_entry=None, portfolio_id=None, ticker_id=None)
return self.Post(transaction_entry, uri, converter=gdata.finance.TransactionEntryFromString)
Args: transaction_entry: TransactionEntry (required) transaction_feed: TransactionFeed (optional; see Notes) position_entry: PositionEntry (optional; see Notes) portfolio_id: string (optional; see Notes) This may be obtained from a PortfolioEntry's portfolio_id attribute. ticker_id: string (optional; see Notes) This may be obtained from a PositionEntry's ticker_id attribute. Alternatively it can be constructed using the security's exchange and symbol, e.g. 'NASDAQ:GOOG' Notes: Either a TransactionFeed OR a PositionEntry OR (a portfolio ID AND ticker ID) must be provided.
Args: transaction_entry: TransactionEntry (required) transaction_feed: TransactionFeed (optional; see Notes) position_entry: PositionEntry (optional; see Notes) portfolio_id: string (optional; see Notes) This may be obtained from a PortfolioEntry's portfolio_id attribute. ticker_id: string (optional; see Notes) This may be obtained from a PositionEntry's ticker_id attribute. Alternatively it can be constructed using the security's exchange and symbol, e.g. 'NASDAQ:GOOG'
[ "Args", ":", "transaction_entry", ":", "TransactionEntry", "(", "required", ")", "transaction_feed", ":", "TransactionFeed", "(", "optional", ";", "see", "Notes", ")", "position_entry", ":", "PositionEntry", "(", "optional", ";", "see", "Notes", ")", "portfolio_id", ":", "string", "(", "optional", ";", "see", "Notes", ")", "This", "may", "be", "obtained", "from", "a", "PortfolioEntry", "s", "portfolio_id", "attribute", ".", "ticker_id", ":", "string", "(", "optional", ";", "see", "Notes", ")", "This", "may", "be", "obtained", "from", "a", "PositionEntry", "s", "ticker_id", "attribute", ".", "Alternatively", "it", "can", "be", "constructed", "using", "the", "security", "s", "exchange", "and", "symbol", "e", ".", "g", ".", "NASDAQ", ":", "GOOG" ]
def AddTransaction(self, transaction_entry=None, transaction_feed = None, position_entry=None, portfolio_id=None, ticker_id=None): """ Args: transaction_entry: TransactionEntry (required) transaction_feed: TransactionFeed (optional; see Notes) position_entry: PositionEntry (optional; see Notes) portfolio_id: string (optional; see Notes) This may be obtained from a PortfolioEntry's portfolio_id attribute. ticker_id: string (optional; see Notes) This may be obtained from a PositionEntry's ticker_id attribute. Alternatively it can be constructed using the security's exchange and symbol, e.g. 'NASDAQ:GOOG' Notes: Either a TransactionFeed OR a PositionEntry OR (a portfolio ID AND ticker ID) must be provided. """ if transaction_feed: uri = transaction_feed.GetPostLink().href elif position_entry: uri = position_entry.GetSelfLink().href + '/transactions' elif portfolio_id and ticker_id: uri = '/finance/feeds/default/portfolios/%s/positions/%s/transactions' \ % (portfolio_id, ticker_id) return self.Post(transaction_entry, uri, converter=gdata.finance.TransactionEntryFromString)
[ "def", "AddTransaction", "(", "self", ",", "transaction_entry", "=", "None", ",", "transaction_feed", "=", "None", ",", "position_entry", "=", "None", ",", "portfolio_id", "=", "None", ",", "ticker_id", "=", "None", ")", ":", "if", "transaction_feed", ":", "uri", "=", "transaction_feed", ".", "GetPostLink", "(", ")", ".", "href", "elif", "position_entry", ":", "uri", "=", "position_entry", ".", "GetSelfLink", "(", ")", ".", "href", "+", "'/transactions'", "elif", "portfolio_id", "and", "ticker_id", ":", "uri", "=", "'/finance/feeds/default/portfolios/%s/positions/%s/transactions'", "%", "(", "portfolio_id", ",", "ticker_id", ")", "return", "self", ".", "Post", "(", "transaction_entry", ",", "uri", ",", "converter", "=", "gdata", ".", "finance", ".", "TransactionEntryFromString", ")" ]
https://github.com/albertz/openlierox/blob/d316c14a8eb57848ef56e9bfa7b23a56f694a51b/tools/DedicatedServerVideo/gdata/finance/service.py#L208-L234
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/dataview.py
python
DataViewListCtrl.SetValue
(*args, **kwargs)
return _dataview.DataViewListCtrl_SetValue(*args, **kwargs)
SetValue(self, wxVariant value, unsigned int row, unsigned int col)
SetValue(self, wxVariant value, unsigned int row, unsigned int col)
[ "SetValue", "(", "self", "wxVariant", "value", "unsigned", "int", "row", "unsigned", "int", "col", ")" ]
def SetValue(*args, **kwargs): """SetValue(self, wxVariant value, unsigned int row, unsigned int col)""" return _dataview.DataViewListCtrl_SetValue(*args, **kwargs)
[ "def", "SetValue", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_dataview", ".", "DataViewListCtrl_SetValue", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/dataview.py#L2177-L2179
ros-planning/moveit
ee48dc5cedc981d0869352aa3db0b41469c2735c
moveit_commander/src/moveit_commander/planning_scene_interface.py
python
PlanningSceneInterface.__make_existing
(name)
return co
Create an empty Collision Object. Used when the object already exists
Create an empty Collision Object. Used when the object already exists
[ "Create", "an", "empty", "Collision", "Object", ".", "Used", "when", "the", "object", "already", "exists" ]
def __make_existing(name): """ Create an empty Collision Object. Used when the object already exists """ co = CollisionObject() co.id = name return co
[ "def", "__make_existing", "(", "name", ")", ":", "co", "=", "CollisionObject", "(", ")", "co", ".", "id", "=", "name", "return", "co" ]
https://github.com/ros-planning/moveit/blob/ee48dc5cedc981d0869352aa3db0b41469c2735c/moveit_commander/src/moveit_commander/planning_scene_interface.py#L256-L262
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/inspect.py
python
ismethod
(object)
return isinstance(object, types.MethodType)
Return true if the object is an instance method. Instance method objects provide these attributes: __doc__ documentation string __name__ name with which this method was defined __func__ function object containing implementation of method __self__ instance to which this method is bound
Return true if the object is an instance method.
[ "Return", "true", "if", "the", "object", "is", "an", "instance", "method", "." ]
def ismethod(object): """Return true if the object is an instance method. Instance method objects provide these attributes: __doc__ documentation string __name__ name with which this method was defined __func__ function object containing implementation of method __self__ instance to which this method is bound""" return isinstance(object, types.MethodType)
[ "def", "ismethod", "(", "object", ")", ":", "return", "isinstance", "(", "object", ",", "types", ".", "MethodType", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/inspect.py#L80-L88
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/dispatcher.py
python
_DispatcherBase.inspect_llvm
(self, signature=None)
return dict((sig, self.inspect_llvm(sig)) for sig in self.signatures)
Get the LLVM intermediate representation generated by compilation. Parameters ---------- signature : tuple of numba types, optional Specify a signature for which to obtain the LLVM IR. If None, the IR is returned for all available signatures. Returns ------- llvm : dict[signature, str] or str Either the LLVM IR string for the specified signature, or, if no signature was given, a dictionary mapping signatures to LLVM IR strings.
Get the LLVM intermediate representation generated by compilation.
[ "Get", "the", "LLVM", "intermediate", "representation", "generated", "by", "compilation", "." ]
def inspect_llvm(self, signature=None): """Get the LLVM intermediate representation generated by compilation. Parameters ---------- signature : tuple of numba types, optional Specify a signature for which to obtain the LLVM IR. If None, the IR is returned for all available signatures. Returns ------- llvm : dict[signature, str] or str Either the LLVM IR string for the specified signature, or, if no signature was given, a dictionary mapping signatures to LLVM IR strings. """ if signature is not None: lib = self.overloads[signature].library return lib.get_llvm_str() return dict((sig, self.inspect_llvm(sig)) for sig in self.signatures)
[ "def", "inspect_llvm", "(", "self", ",", "signature", "=", "None", ")", ":", "if", "signature", "is", "not", "None", ":", "lib", "=", "self", ".", "overloads", "[", "signature", "]", ".", "library", "return", "lib", ".", "get_llvm_str", "(", ")", "return", "dict", "(", "(", "sig", ",", "self", ".", "inspect_llvm", "(", "sig", ")", ")", "for", "sig", "in", "self", ".", "signatures", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/dispatcher.py#L422-L442
ceph/ceph
959663007321a369c83218414a29bd9dbc8bda3a
src/ceph-volume/ceph_volume/devices/lvm/zap.py
python
Zap.zap_lv
(self, device)
Device examples: vg-name/lv-name, /dev/vg-name/lv-name Requirements: Must be a logical volume (LV)
Device examples: vg-name/lv-name, /dev/vg-name/lv-name Requirements: Must be a logical volume (LV)
[ "Device", "examples", ":", "vg", "-", "name", "/", "lv", "-", "name", "/", "dev", "/", "vg", "-", "name", "/", "lv", "-", "name", "Requirements", ":", "Must", "be", "a", "logical", "volume", "(", "LV", ")" ]
def zap_lv(self, device): """ Device examples: vg-name/lv-name, /dev/vg-name/lv-name Requirements: Must be a logical volume (LV) """ lv = api.get_single_lv(filters={'lv_name': device.lv_name, 'vg_name': device.vg_name}) self.unmount_lv(lv) wipefs(device.abspath) zap_data(device.abspath) if self.args.destroy: lvs = api.get_lvs(filters={'vg_name': device.vg_name}) if lvs == []: mlogger.info('No LVs left, exiting', device.vg_name) return elif len(lvs) <= 1: mlogger.info('Only 1 LV left in VG, will proceed to destroy ' 'volume group %s', device.vg_name) api.remove_vg(device.vg_name) else: mlogger.info('More than 1 LV left in VG, will proceed to ' 'destroy LV only') mlogger.info('Removing LV because --destroy was given: %s', device.abspath) api.remove_lv(device.abspath) elif lv: # just remove all lvm metadata, leaving the LV around lv.clear_tags()
[ "def", "zap_lv", "(", "self", ",", "device", ")", ":", "lv", "=", "api", ".", "get_single_lv", "(", "filters", "=", "{", "'lv_name'", ":", "device", ".", "lv_name", ",", "'vg_name'", ":", "device", ".", "vg_name", "}", ")", "self", ".", "unmount_lv", "(", "lv", ")", "wipefs", "(", "device", ".", "abspath", ")", "zap_data", "(", "device", ".", "abspath", ")", "if", "self", ".", "args", ".", "destroy", ":", "lvs", "=", "api", ".", "get_lvs", "(", "filters", "=", "{", "'vg_name'", ":", "device", ".", "vg_name", "}", ")", "if", "lvs", "==", "[", "]", ":", "mlogger", ".", "info", "(", "'No LVs left, exiting'", ",", "device", ".", "vg_name", ")", "return", "elif", "len", "(", "lvs", ")", "<=", "1", ":", "mlogger", ".", "info", "(", "'Only 1 LV left in VG, will proceed to destroy '", "'volume group %s'", ",", "device", ".", "vg_name", ")", "api", ".", "remove_vg", "(", "device", ".", "vg_name", ")", "else", ":", "mlogger", ".", "info", "(", "'More than 1 LV left in VG, will proceed to '", "'destroy LV only'", ")", "mlogger", ".", "info", "(", "'Removing LV because --destroy was given: %s'", ",", "device", ".", "abspath", ")", "api", ".", "remove_lv", "(", "device", ".", "abspath", ")", "elif", "lv", ":", "# just remove all lvm metadata, leaving the LV around", "lv", ".", "clear_tags", "(", ")" ]
https://github.com/ceph/ceph/blob/959663007321a369c83218414a29bd9dbc8bda3a/src/ceph-volume/ceph_volume/devices/lvm/zap.py#L164-L193
pybox2d/pybox2d
09643321fd363f0850087d1bde8af3f4afd82163
library/Box2D/examples/backends/pyglet_framework.py
python
PygletFramework.Keyboard
(self, key)
Callback indicating 'key' has been pressed down.
Callback indicating 'key' has been pressed down.
[ "Callback", "indicating", "key", "has", "been", "pressed", "down", "." ]
def Keyboard(self, key): """ Callback indicating 'key' has been pressed down. """ pass
[ "def", "Keyboard", "(", "self", ",", "key", ")", ":", "pass" ]
https://github.com/pybox2d/pybox2d/blob/09643321fd363f0850087d1bde8af3f4afd82163/library/Box2D/examples/backends/pyglet_framework.py#L686-L690
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/inspect.py
python
getblock
(lines)
return lines[:blockfinder.last]
Extract the block of code at the top of the given list of lines.
Extract the block of code at the top of the given list of lines.
[ "Extract", "the", "block", "of", "code", "at", "the", "top", "of", "the", "given", "list", "of", "lines", "." ]
def getblock(lines): """Extract the block of code at the top of the given list of lines.""" blockfinder = BlockFinder() try: tokens = tokenize.generate_tokens(iter(lines).__next__) for _token in tokens: blockfinder.tokeneater(*_token) except (EndOfBlock, IndentationError): pass return lines[:blockfinder.last]
[ "def", "getblock", "(", "lines", ")", ":", "blockfinder", "=", "BlockFinder", "(", ")", "try", ":", "tokens", "=", "tokenize", ".", "generate_tokens", "(", "iter", "(", "lines", ")", ".", "__next__", ")", "for", "_token", "in", "tokens", ":", "blockfinder", ".", "tokeneater", "(", "*", "_token", ")", "except", "(", "EndOfBlock", ",", "IndentationError", ")", ":", "pass", "return", "lines", "[", ":", "blockfinder", ".", "last", "]" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/inspect.py#L935-L944
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/xrc.py
python
XmlNode.DeleteProperty
(*args, **kwargs)
return _xrc.XmlNode_DeleteProperty(*args, **kwargs)
DeleteProperty(self, String name) -> bool
DeleteProperty(self, String name) -> bool
[ "DeleteProperty", "(", "self", "String", "name", ")", "-", ">", "bool" ]
def DeleteProperty(*args, **kwargs): """DeleteProperty(self, String name) -> bool""" return _xrc.XmlNode_DeleteProperty(*args, **kwargs)
[ "def", "DeleteProperty", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_xrc", ".", "XmlNode_DeleteProperty", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/xrc.py#L382-L384
greatscottgadgets/gr-bluetooth
c2a7d7d810e047f8a18902a4e3d1a152420655bb
docs/doxygen/swig_doc.py
python
make_block_entry
(di, block)
return "\n\n".join(output)
Create class and function docstrings of a gnuradio block for a swig interface file.
Create class and function docstrings of a gnuradio block for a swig interface file.
[ "Create", "class", "and", "function", "docstrings", "of", "a", "gnuradio", "block", "for", "a", "swig", "interface", "file", "." ]
def make_block_entry(di, block): """ Create class and function docstrings of a gnuradio block for a swig interface file. """ descriptions = [] # Get the documentation associated with the class. class_desc = combine_descriptions(block) if class_desc: descriptions.append(class_desc) # Get the documentation associated with the make function make_func = di.get_member(make_name(block.name()), DoxyFunction) make_func_desc = combine_descriptions(make_func) if make_func_desc: descriptions.append(make_func_desc) # Get the documentation associated with the file try: block_file = di.get_member(block.name() + ".h", DoxyFile) file_desc = combine_descriptions(block_file) if file_desc: descriptions.append(file_desc) except base.Base.NoSuchMember: # Don't worry if we can't find a matching file. pass # And join them all together to make a super duper description. super_description = "\n\n".join(descriptions) # Associate the combined description with the class and # the make function. output = [] output.append(make_class_entry(block, description=super_description)) creator = block.get_member(block.name(), DoxyFunction) output.append(make_func_entry(make_func, description=super_description, params=creator.params)) return "\n\n".join(output)
[ "def", "make_block_entry", "(", "di", ",", "block", ")", ":", "descriptions", "=", "[", "]", "# Get the documentation associated with the class.", "class_desc", "=", "combine_descriptions", "(", "block", ")", "if", "class_desc", ":", "descriptions", ".", "append", "(", "class_desc", ")", "# Get the documentation associated with the make function", "make_func", "=", "di", ".", "get_member", "(", "make_name", "(", "block", ".", "name", "(", ")", ")", ",", "DoxyFunction", ")", "make_func_desc", "=", "combine_descriptions", "(", "make_func", ")", "if", "make_func_desc", ":", "descriptions", ".", "append", "(", "make_func_desc", ")", "# Get the documentation associated with the file", "try", ":", "block_file", "=", "di", ".", "get_member", "(", "block", ".", "name", "(", ")", "+", "\".h\"", ",", "DoxyFile", ")", "file_desc", "=", "combine_descriptions", "(", "block_file", ")", "if", "file_desc", ":", "descriptions", ".", "append", "(", "file_desc", ")", "except", "base", ".", "Base", ".", "NoSuchMember", ":", "# Don't worry if we can't find a matching file.", "pass", "# And join them all together to make a super duper description.", "super_description", "=", "\"\\n\\n\"", ".", "join", "(", "descriptions", ")", "# Associate the combined description with the class and", "# the make function.", "output", "=", "[", "]", "output", ".", "append", "(", "make_class_entry", "(", "block", ",", "description", "=", "super_description", ")", ")", "creator", "=", "block", ".", "get_member", "(", "block", ".", "name", "(", ")", ",", "DoxyFunction", ")", "output", ".", "append", "(", "make_func_entry", "(", "make_func", ",", "description", "=", "super_description", ",", "params", "=", "creator", ".", "params", ")", ")", "return", "\"\\n\\n\"", ".", "join", "(", "output", ")" ]
https://github.com/greatscottgadgets/gr-bluetooth/blob/c2a7d7d810e047f8a18902a4e3d1a152420655bb/docs/doxygen/swig_doc.py#L148-L181
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/gsutil/third_party/rsa/rsa/_version200.py
python
are_relatively_prime
(a, b)
return (d == 1)
Returns True if a and b are relatively prime, and False if they are not. >>> are_relatively_prime(2, 3) 1 >>> are_relatively_prime(2, 4) 0
Returns True if a and b are relatively prime, and False if they are not.
[ "Returns", "True", "if", "a", "and", "b", "are", "relatively", "prime", "and", "False", "if", "they", "are", "not", "." ]
def are_relatively_prime(a, b): """Returns True if a and b are relatively prime, and False if they are not. >>> are_relatively_prime(2, 3) 1 >>> are_relatively_prime(2, 4) 0 """ d = gcd(a, b) return (d == 1)
[ "def", "are_relatively_prime", "(", "a", ",", "b", ")", ":", "d", "=", "gcd", "(", "a", ",", "b", ")", "return", "(", "d", "==", "1", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/rsa/rsa/_version200.py#L298-L309
pmq20/node-packer
12c46c6e44fbc14d9ee645ebd17d5296b324f7e0
lts/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/msvs_emulation.py
python
PrecompiledHeader._PchHeader
(self)
return os.path.split(self.settings.msvs_precompiled_header[self.config])[1]
Get the header that will appear in an #include line for all source files.
Get the header that will appear in an #include line for all source files.
[ "Get", "the", "header", "that", "will", "appear", "in", "an", "#include", "line", "for", "all", "source", "files", "." ]
def _PchHeader(self): """Get the header that will appear in an #include line for all source files.""" return os.path.split(self.settings.msvs_precompiled_header[self.config])[1]
[ "def", "_PchHeader", "(", "self", ")", ":", "return", "os", ".", "path", ".", "split", "(", "self", ".", "settings", ".", "msvs_precompiled_header", "[", "self", ".", "config", "]", ")", "[", "1", "]" ]
https://github.com/pmq20/node-packer/blob/12c46c6e44fbc14d9ee645ebd17d5296b324f7e0/lts/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/msvs_emulation.py#L901-L904
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/keras/backend.py
python
elu
(x, alpha=1.)
Exponential linear unit. Arguments: x: A tensor or variable to compute the activation function for. alpha: A scalar, slope of negative section. Returns: A tensor.
Exponential linear unit.
[ "Exponential", "linear", "unit", "." ]
def elu(x, alpha=1.): """Exponential linear unit. Arguments: x: A tensor or variable to compute the activation function for. alpha: A scalar, slope of negative section. Returns: A tensor. """ res = nn.elu(x) if alpha == 1: return res else: return array_ops.where(x > 0, res, alpha * res)
[ "def", "elu", "(", "x", ",", "alpha", "=", "1.", ")", ":", "res", "=", "nn", ".", "elu", "(", "x", ")", "if", "alpha", "==", "1", ":", "return", "res", "else", ":", "return", "array_ops", ".", "where", "(", "x", ">", "0", ",", "res", ",", "alpha", "*", "res", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/keras/backend.py#L4263-L4277
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/idlelib/hyperparser.py
python
HyperParser.get_expression
(self)
return rawtext[last_identifier_pos:self.indexinrawtext]
Return a string with the Python expression which ends at the given index, which is empty if there is no real one.
Return a string with the Python expression which ends at the given index, which is empty if there is no real one.
[ "Return", "a", "string", "with", "the", "Python", "expression", "which", "ends", "at", "the", "given", "index", "which", "is", "empty", "if", "there", "is", "no", "real", "one", "." ]
def get_expression(self): """Return a string with the Python expression which ends at the given index, which is empty if there is no real one. """ if not self.is_in_code(): raise ValueError("get_expression should only be called " "if index is inside a code.") rawtext = self.rawtext bracketing = self.bracketing brck_index = self.indexbracket brck_limit = bracketing[brck_index][0] pos = self.indexinrawtext last_identifier_pos = pos postdot_phase = True while 1: # Eat whitespaces, comments, and if postdot_phase is False - a dot while 1: if pos>brck_limit and rawtext[pos-1] in self._whitespace_chars: # Eat a whitespace pos -= 1 elif (not postdot_phase and pos > brck_limit and rawtext[pos-1] == '.'): # Eat a dot pos -= 1 postdot_phase = True # The next line will fail if we are *inside* a comment, # but we shouldn't be. elif (pos == brck_limit and brck_index > 0 and rawtext[bracketing[brck_index-1][0]] == '#'): # Eat a comment brck_index -= 2 brck_limit = bracketing[brck_index][0] pos = bracketing[brck_index+1][0] else: # If we didn't eat anything, quit. break if not postdot_phase: # We didn't find a dot, so the expression end at the # last identifier pos. break ret = self._eat_identifier(rawtext, brck_limit, pos) if ret: # There is an identifier to eat pos = pos - ret last_identifier_pos = pos # Now, to continue the search, we must find a dot. postdot_phase = False # (the loop continues now) elif pos == brck_limit: # We are at a bracketing limit. If it is a closing # bracket, eat the bracket, otherwise, stop the search. level = bracketing[brck_index][1] while brck_index > 0 and bracketing[brck_index-1][1] > level: brck_index -= 1 if bracketing[brck_index][0] == brck_limit: # We were not at the end of a closing bracket break pos = bracketing[brck_index][0] brck_index -= 1 brck_limit = bracketing[brck_index][0] last_identifier_pos = pos if rawtext[pos] in "([": # [] and () may be used after an identifier, so we # continue. postdot_phase is True, so we don't allow a dot. pass else: # We can't continue after other types of brackets if rawtext[pos] in "'\"": # Scan a string prefix while pos > 0 and rawtext[pos - 1] in "rRbBuU": pos -= 1 last_identifier_pos = pos break else: # We've found an operator or something. break return rawtext[last_identifier_pos:self.indexinrawtext]
[ "def", "get_expression", "(", "self", ")", ":", "if", "not", "self", ".", "is_in_code", "(", ")", ":", "raise", "ValueError", "(", "\"get_expression should only be called \"", "\"if index is inside a code.\"", ")", "rawtext", "=", "self", ".", "rawtext", "bracketing", "=", "self", ".", "bracketing", "brck_index", "=", "self", ".", "indexbracket", "brck_limit", "=", "bracketing", "[", "brck_index", "]", "[", "0", "]", "pos", "=", "self", ".", "indexinrawtext", "last_identifier_pos", "=", "pos", "postdot_phase", "=", "True", "while", "1", ":", "# Eat whitespaces, comments, and if postdot_phase is False - a dot", "while", "1", ":", "if", "pos", ">", "brck_limit", "and", "rawtext", "[", "pos", "-", "1", "]", "in", "self", ".", "_whitespace_chars", ":", "# Eat a whitespace", "pos", "-=", "1", "elif", "(", "not", "postdot_phase", "and", "pos", ">", "brck_limit", "and", "rawtext", "[", "pos", "-", "1", "]", "==", "'.'", ")", ":", "# Eat a dot", "pos", "-=", "1", "postdot_phase", "=", "True", "# The next line will fail if we are *inside* a comment,", "# but we shouldn't be.", "elif", "(", "pos", "==", "brck_limit", "and", "brck_index", ">", "0", "and", "rawtext", "[", "bracketing", "[", "brck_index", "-", "1", "]", "[", "0", "]", "]", "==", "'#'", ")", ":", "# Eat a comment", "brck_index", "-=", "2", "brck_limit", "=", "bracketing", "[", "brck_index", "]", "[", "0", "]", "pos", "=", "bracketing", "[", "brck_index", "+", "1", "]", "[", "0", "]", "else", ":", "# If we didn't eat anything, quit.", "break", "if", "not", "postdot_phase", ":", "# We didn't find a dot, so the expression end at the", "# last identifier pos.", "break", "ret", "=", "self", ".", "_eat_identifier", "(", "rawtext", ",", "brck_limit", ",", "pos", ")", "if", "ret", ":", "# There is an identifier to eat", "pos", "=", "pos", "-", "ret", "last_identifier_pos", "=", "pos", "# Now, to continue the search, we must find a dot.", "postdot_phase", "=", "False", "# (the loop continues now)", "elif", "pos", "==", "brck_limit", ":", "# We are at a bracketing limit. If it is a closing", "# bracket, eat the bracket, otherwise, stop the search.", "level", "=", "bracketing", "[", "brck_index", "]", "[", "1", "]", "while", "brck_index", ">", "0", "and", "bracketing", "[", "brck_index", "-", "1", "]", "[", "1", "]", ">", "level", ":", "brck_index", "-=", "1", "if", "bracketing", "[", "brck_index", "]", "[", "0", "]", "==", "brck_limit", ":", "# We were not at the end of a closing bracket", "break", "pos", "=", "bracketing", "[", "brck_index", "]", "[", "0", "]", "brck_index", "-=", "1", "brck_limit", "=", "bracketing", "[", "brck_index", "]", "[", "0", "]", "last_identifier_pos", "=", "pos", "if", "rawtext", "[", "pos", "]", "in", "\"([\"", ":", "# [] and () may be used after an identifier, so we", "# continue. postdot_phase is True, so we don't allow a dot.", "pass", "else", ":", "# We can't continue after other types of brackets", "if", "rawtext", "[", "pos", "]", "in", "\"'\\\"\"", ":", "# Scan a string prefix", "while", "pos", ">", "0", "and", "rawtext", "[", "pos", "-", "1", "]", "in", "\"rRbBuU\"", ":", "pos", "-=", "1", "last_identifier_pos", "=", "pos", "break", "else", ":", "# We've found an operator or something.", "break", "return", "rawtext", "[", "last_identifier_pos", ":", "self", ".", "indexinrawtext", "]" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/idlelib/hyperparser.py#L222-L307
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/site-packages/setuptools/dist.py
python
Distribution.get_command_class
(self, command)
Pluggable version of get_command_class()
Pluggable version of get_command_class()
[ "Pluggable", "version", "of", "get_command_class", "()" ]
def get_command_class(self, command): """Pluggable version of get_command_class()""" if command in self.cmdclass: return self.cmdclass[command] eps = pkg_resources.iter_entry_points('distutils.commands', command) for ep in eps: ep.require(installer=self.fetch_build_egg) self.cmdclass[command] = cmdclass = ep.load() return cmdclass else: return _Distribution.get_command_class(self, command)
[ "def", "get_command_class", "(", "self", ",", "command", ")", ":", "if", "command", "in", "self", ".", "cmdclass", ":", "return", "self", ".", "cmdclass", "[", "command", "]", "eps", "=", "pkg_resources", ".", "iter_entry_points", "(", "'distutils.commands'", ",", "command", ")", "for", "ep", "in", "eps", ":", "ep", ".", "require", "(", "installer", "=", "self", ".", "fetch_build_egg", ")", "self", ".", "cmdclass", "[", "command", "]", "=", "cmdclass", "=", "ep", ".", "load", "(", ")", "return", "cmdclass", "else", ":", "return", "_Distribution", ".", "get_command_class", "(", "self", ",", "command", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/setuptools/dist.py#L756-L767
CMU-Perceptual-Computing-Lab/caffe_rtpose
a4778bb1c3eb74d7250402016047216f77b4dba6
scripts/cpp_lint.py
python
CheckLanguage
(filename, clean_lines, linenum, file_extension, include_state, nesting_state, error)
Checks rules from the 'C++ language rules' section of cppguide.html. Some of these rules are hard to test (function overloading, using uint32 inappropriately), but we do the best we can. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. file_extension: The extension (without the dot) of the filename. include_state: An _IncludeState instance in which the headers are inserted. nesting_state: A _NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found.
Checks rules from the 'C++ language rules' section of cppguide.html.
[ "Checks", "rules", "from", "the", "C", "++", "language", "rules", "section", "of", "cppguide", ".", "html", "." ]
def CheckLanguage(filename, clean_lines, linenum, file_extension, include_state, nesting_state, error): """Checks rules from the 'C++ language rules' section of cppguide.html. Some of these rules are hard to test (function overloading, using uint32 inappropriately), but we do the best we can. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. file_extension: The extension (without the dot) of the filename. include_state: An _IncludeState instance in which the headers are inserted. nesting_state: A _NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # If the line is empty or consists of entirely a comment, no need to # check it. line = clean_lines.elided[linenum] if not line: return match = _RE_PATTERN_INCLUDE.search(line) if match: CheckIncludeLine(filename, clean_lines, linenum, include_state, error) return # Reset include state across preprocessor directives. This is meant # to silence warnings for conditional includes. if Match(r'^\s*#\s*(?:ifdef|elif|else|endif)\b', line): include_state.ResetSection() # Make Windows paths like Unix. fullname = os.path.abspath(filename).replace('\\', '/') # TODO(unknown): figure out if they're using default arguments in fn proto. # Check to see if they're using an conversion function cast. # I just try to capture the most common basic types, though there are more. # Parameterless conversion functions, such as bool(), are allowed as they are # probably a member operator declaration or default constructor. match = Search( r'(\bnew\s+)?\b' # Grab 'new' operator, if it's there r'(int|float|double|bool|char|int32|uint32|int64|uint64)' r'(\([^)].*)', line) if match: matched_new = match.group(1) matched_type = match.group(2) matched_funcptr = match.group(3) # gMock methods are defined using some variant of MOCK_METHODx(name, type) # where type may be float(), int(string), etc. Without context they are # virtually indistinguishable from int(x) casts. Likewise, gMock's # MockCallback takes a template parameter of the form return_type(arg_type), # which looks much like the cast we're trying to detect. # # std::function<> wrapper has a similar problem. # # Return types for function pointers also look like casts if they # don't have an extra space. if (matched_new is None and # If new operator, then this isn't a cast not (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or Search(r'\bMockCallback<.*>', line) or Search(r'\bstd::function<.*>', line)) and not (matched_funcptr and Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(', matched_funcptr))): # Try a bit harder to catch gmock lines: the only place where # something looks like an old-style cast is where we declare the # return type of the mocked method, and the only time when we # are missing context is if MOCK_METHOD was split across # multiple lines. The missing MOCK_METHOD is usually one or two # lines back, so scan back one or two lines. # # It's not possible for gmock macros to appear in the first 2 # lines, since the class head + section name takes up 2 lines. if (linenum < 2 or not (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$', clean_lines.elided[linenum - 1]) or Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$', clean_lines.elided[linenum - 2]))): error(filename, linenum, 'readability/casting', 4, 'Using deprecated casting style. ' 'Use static_cast<%s>(...) instead' % matched_type) CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], 'static_cast', r'\((int|float|double|bool|char|u?int(16|32|64))\)', error) # This doesn't catch all cases. Consider (const char * const)"hello". # # (char *) "foo" should always be a const_cast (reinterpret_cast won't # compile). if CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], 'const_cast', r'\((char\s?\*+\s?)\)\s*"', error): pass else: # Check pointer casts for other than string constants CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], 'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error) # In addition, we look for people taking the address of a cast. This # is dangerous -- casts can assign to temporaries, so the pointer doesn't # point where you think. match = Search( r'(?:&\(([^)]+)\)[\w(])|' r'(?:&(static|dynamic|down|reinterpret)_cast\b)', line) if match and match.group(1) != '*': error(filename, linenum, 'runtime/casting', 4, ('Are you taking an address of a cast? ' 'This is dangerous: could be a temp var. ' 'Take the address before doing the cast, rather than after')) # Create an extended_line, which is the concatenation of the current and # next lines, for more effective checking of code that may span more than one # line. if linenum + 1 < clean_lines.NumLines(): extended_line = line + clean_lines.elided[linenum + 1] else: extended_line = line # Check for people declaring static/global STL strings at the top level. # This is dangerous because the C++ language does not guarantee that # globals with constructors are initialized before the first access. match = Match( r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)', line) # Make sure it's not a function. # Function template specialization looks like: "string foo<Type>(...". # Class template definitions look like: "string Foo<Type>::Method(...". # # Also ignore things that look like operators. These are matched separately # because operator names cross non-word boundaries. If we change the pattern # above, we would decrease the accuracy of matching identifiers. if (match and not Search(r'\boperator\W', line) and not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)', match.group(3))): error(filename, linenum, 'runtime/string', 4, 'For a static/global string constant, use a C style string instead: ' '"%schar %s[]".' % (match.group(1), match.group(2))) if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line): error(filename, linenum, 'runtime/init', 4, 'You seem to be initializing a member variable with itself.') if file_extension == 'h': # TODO(unknown): check that 1-arg constructors are explicit. # How to tell it's a constructor? # (handled in CheckForNonStandardConstructs for now) # TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS # (level 1 error) pass # Check if people are using the verboten C basic types. The only exception # we regularly allow is "unsigned short port" for port. if Search(r'\bshort port\b', line): if not Search(r'\bunsigned short port\b', line): error(filename, linenum, 'runtime/int', 4, 'Use "unsigned short" for ports, not "short"') else: match = Search(r'\b(short|long(?! +double)|long long)\b', line) if match: error(filename, linenum, 'runtime/int', 4, 'Use int16/int64/etc, rather than the C type %s' % match.group(1)) # When snprintf is used, the second argument shouldn't be a literal. match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line) if match and match.group(2) != '0': # If 2nd arg is zero, snprintf is used to calculate size. error(filename, linenum, 'runtime/printf', 3, 'If you can, use sizeof(%s) instead of %s as the 2nd arg ' 'to snprintf.' % (match.group(1), match.group(2))) # Check if some verboten C functions are being used. if Search(r'\bsprintf\b', line): error(filename, linenum, 'runtime/printf', 5, 'Never use sprintf. Use snprintf instead.') match = Search(r'\b(strcpy|strcat)\b', line) if match: error(filename, linenum, 'runtime/printf', 4, 'Almost always, snprintf is better than %s' % match.group(1)) # Check if some verboten operator overloading is going on # TODO(unknown): catch out-of-line unary operator&: # class X {}; # int operator&(const X& x) { return 42; } // unary operator& # The trick is it's hard to tell apart from binary operator&: # class Y { int operator&(const Y& x) { return 23; } }; // binary operator& if Search(r'\boperator\s*&\s*\(\s*\)', line): error(filename, linenum, 'runtime/operator', 4, 'Unary operator& is dangerous. Do not use it.') # Check for suspicious usage of "if" like # } if (a == b) { if Search(r'\}\s*if\s*\(', line): error(filename, linenum, 'readability/braces', 4, 'Did you mean "else if"? If not, start a new line for "if".') # Check for potential format string bugs like printf(foo). # We constrain the pattern not to pick things like DocidForPrintf(foo). # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str()) # TODO(sugawarayu): Catch the following case. Need to change the calling # convention of the whole function to process multiple line to handle it. # printf( # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line); printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(') if printf_args: match = Match(r'([\w.\->()]+)$', printf_args) if match and match.group(1) != '__VA_ARGS__': function_name = re.search(r'\b((?:string)?printf)\s*\(', line, re.I).group(1) error(filename, linenum, 'runtime/printf', 4, 'Potential format string bug. Do %s("%%s", %s) instead.' % (function_name, match.group(1))) # Check for potential memset bugs like memset(buf, sizeof(buf), 0). match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line) if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)): error(filename, linenum, 'runtime/memset', 4, 'Did you mean "memset(%s, 0, %s)"?' % (match.group(1), match.group(2))) if Search(r'\busing namespace\b', line): error(filename, linenum, 'build/namespaces', 5, 'Do not use namespace using-directives. ' 'Use using-declarations instead.') # Detect variable-length arrays. match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line) if (match and match.group(2) != 'return' and match.group(2) != 'delete' and match.group(3).find(']') == -1): # Split the size using space and arithmetic operators as delimiters. # If any of the resulting tokens are not compile time constants then # report the error. tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3)) is_const = True skip_next = False for tok in tokens: if skip_next: skip_next = False continue if Search(r'sizeof\(.+\)', tok): continue if Search(r'arraysize\(\w+\)', tok): continue tok = tok.lstrip('(') tok = tok.rstrip(')') if not tok: continue if Match(r'\d+', tok): continue if Match(r'0[xX][0-9a-fA-F]+', tok): continue if Match(r'k[A-Z0-9]\w*', tok): continue if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue # A catch all for tricky sizeof cases, including 'sizeof expression', # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)' # requires skipping the next token because we split on ' ' and '*'. if tok.startswith('sizeof'): skip_next = True continue is_const = False break if not is_const: error(filename, linenum, 'runtime/arrays', 1, 'Do not use variable-length arrays. Use an appropriately named ' "('k' followed by CamelCase) compile-time constant for the size.") # If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or # DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing # in the class declaration. match = Match( (r'\s*' r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))' r'\(.*\);$'), line) if match and linenum + 1 < clean_lines.NumLines(): next_line = clean_lines.elided[linenum + 1] # We allow some, but not all, declarations of variables to be present # in the statement that defines the class. The [\w\*,\s]* fragment of # the regular expression below allows users to declare instances of # the class or pointers to instances, but not less common types such # as function pointers or arrays. It's a tradeoff between allowing # reasonable code and avoiding trying to parse more C++ using regexps. if not Search(r'^\s*}[\w\*,\s]*;', next_line): error(filename, linenum, 'readability/constructors', 3, match.group(1) + ' should be the last thing in the class') # Check for use of unnamed namespaces in header files. Registration # macros are typically OK, so we allow use of "namespace {" on lines # that end with backslashes. if (file_extension == 'h' and Search(r'\bnamespace\s*{', line) and line[-1] != '\\'): error(filename, linenum, 'build/namespaces', 4, 'Do not use unnamed namespaces in header files. See ' 'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces' ' for more information.')
[ "def", "CheckLanguage", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "file_extension", ",", "include_state", ",", "nesting_state", ",", "error", ")", ":", "# If the line is empty or consists of entirely a comment, no need to", "# check it.", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "if", "not", "line", ":", "return", "match", "=", "_RE_PATTERN_INCLUDE", ".", "search", "(", "line", ")", "if", "match", ":", "CheckIncludeLine", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "include_state", ",", "error", ")", "return", "# Reset include state across preprocessor directives. This is meant", "# to silence warnings for conditional includes.", "if", "Match", "(", "r'^\\s*#\\s*(?:ifdef|elif|else|endif)\\b'", ",", "line", ")", ":", "include_state", ".", "ResetSection", "(", ")", "# Make Windows paths like Unix.", "fullname", "=", "os", ".", "path", ".", "abspath", "(", "filename", ")", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "# TODO(unknown): figure out if they're using default arguments in fn proto.", "# Check to see if they're using an conversion function cast.", "# I just try to capture the most common basic types, though there are more.", "# Parameterless conversion functions, such as bool(), are allowed as they are", "# probably a member operator declaration or default constructor.", "match", "=", "Search", "(", "r'(\\bnew\\s+)?\\b'", "# Grab 'new' operator, if it's there", "r'(int|float|double|bool|char|int32|uint32|int64|uint64)'", "r'(\\([^)].*)'", ",", "line", ")", "if", "match", ":", "matched_new", "=", "match", ".", "group", "(", "1", ")", "matched_type", "=", "match", ".", "group", "(", "2", ")", "matched_funcptr", "=", "match", ".", "group", "(", "3", ")", "# gMock methods are defined using some variant of MOCK_METHODx(name, type)", "# where type may be float(), int(string), etc. Without context they are", "# virtually indistinguishable from int(x) casts. Likewise, gMock's", "# MockCallback takes a template parameter of the form return_type(arg_type),", "# which looks much like the cast we're trying to detect.", "#", "# std::function<> wrapper has a similar problem.", "#", "# Return types for function pointers also look like casts if they", "# don't have an extra space.", "if", "(", "matched_new", "is", "None", "and", "# If new operator, then this isn't a cast", "not", "(", "Match", "(", "r'^\\s*MOCK_(CONST_)?METHOD\\d+(_T)?\\('", ",", "line", ")", "or", "Search", "(", "r'\\bMockCallback<.*>'", ",", "line", ")", "or", "Search", "(", "r'\\bstd::function<.*>'", ",", "line", ")", ")", "and", "not", "(", "matched_funcptr", "and", "Match", "(", "r'\\((?:[^() ]+::\\s*\\*\\s*)?[^() ]+\\)\\s*\\('", ",", "matched_funcptr", ")", ")", ")", ":", "# Try a bit harder to catch gmock lines: the only place where", "# something looks like an old-style cast is where we declare the", "# return type of the mocked method, and the only time when we", "# are missing context is if MOCK_METHOD was split across", "# multiple lines. The missing MOCK_METHOD is usually one or two", "# lines back, so scan back one or two lines.", "#", "# It's not possible for gmock macros to appear in the first 2", "# lines, since the class head + section name takes up 2 lines.", "if", "(", "linenum", "<", "2", "or", "not", "(", "Match", "(", "r'^\\s*MOCK_(?:CONST_)?METHOD\\d+(?:_T)?\\((?:\\S+,)?\\s*$'", ",", "clean_lines", ".", "elided", "[", "linenum", "-", "1", "]", ")", "or", "Match", "(", "r'^\\s*MOCK_(?:CONST_)?METHOD\\d+(?:_T)?\\(\\s*$'", ",", "clean_lines", ".", "elided", "[", "linenum", "-", "2", "]", ")", ")", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'readability/casting'", ",", "4", ",", "'Using deprecated casting style. '", "'Use static_cast<%s>(...) instead'", "%", "matched_type", ")", "CheckCStyleCast", "(", "filename", ",", "linenum", ",", "line", ",", "clean_lines", ".", "raw_lines", "[", "linenum", "]", ",", "'static_cast'", ",", "r'\\((int|float|double|bool|char|u?int(16|32|64))\\)'", ",", "error", ")", "# This doesn't catch all cases. Consider (const char * const)\"hello\".", "#", "# (char *) \"foo\" should always be a const_cast (reinterpret_cast won't", "# compile).", "if", "CheckCStyleCast", "(", "filename", ",", "linenum", ",", "line", ",", "clean_lines", ".", "raw_lines", "[", "linenum", "]", ",", "'const_cast'", ",", "r'\\((char\\s?\\*+\\s?)\\)\\s*\"'", ",", "error", ")", ":", "pass", "else", ":", "# Check pointer casts for other than string constants", "CheckCStyleCast", "(", "filename", ",", "linenum", ",", "line", ",", "clean_lines", ".", "raw_lines", "[", "linenum", "]", ",", "'reinterpret_cast'", ",", "r'\\((\\w+\\s?\\*+\\s?)\\)'", ",", "error", ")", "# In addition, we look for people taking the address of a cast. This", "# is dangerous -- casts can assign to temporaries, so the pointer doesn't", "# point where you think.", "match", "=", "Search", "(", "r'(?:&\\(([^)]+)\\)[\\w(])|'", "r'(?:&(static|dynamic|down|reinterpret)_cast\\b)'", ",", "line", ")", "if", "match", "and", "match", ".", "group", "(", "1", ")", "!=", "'*'", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/casting'", ",", "4", ",", "(", "'Are you taking an address of a cast? '", "'This is dangerous: could be a temp var. '", "'Take the address before doing the cast, rather than after'", ")", ")", "# Create an extended_line, which is the concatenation of the current and", "# next lines, for more effective checking of code that may span more than one", "# line.", "if", "linenum", "+", "1", "<", "clean_lines", ".", "NumLines", "(", ")", ":", "extended_line", "=", "line", "+", "clean_lines", ".", "elided", "[", "linenum", "+", "1", "]", "else", ":", "extended_line", "=", "line", "# Check for people declaring static/global STL strings at the top level.", "# This is dangerous because the C++ language does not guarantee that", "# globals with constructors are initialized before the first access.", "match", "=", "Match", "(", "r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\\b(.*)'", ",", "line", ")", "# Make sure it's not a function.", "# Function template specialization looks like: \"string foo<Type>(...\".", "# Class template definitions look like: \"string Foo<Type>::Method(...\".", "#", "# Also ignore things that look like operators. These are matched separately", "# because operator names cross non-word boundaries. If we change the pattern", "# above, we would decrease the accuracy of matching identifiers.", "if", "(", "match", "and", "not", "Search", "(", "r'\\boperator\\W'", ",", "line", ")", "and", "not", "Match", "(", "r'\\s*(<.*>)?(::[a-zA-Z0-9_]+)?\\s*\\(([^\"]|$)'", ",", "match", ".", "group", "(", "3", ")", ")", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/string'", ",", "4", ",", "'For a static/global string constant, use a C style string instead: '", "'\"%schar %s[]\".'", "%", "(", "match", ".", "group", "(", "1", ")", ",", "match", ".", "group", "(", "2", ")", ")", ")", "if", "Search", "(", "r'\\b([A-Za-z0-9_]*_)\\(\\1\\)'", ",", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/init'", ",", "4", ",", "'You seem to be initializing a member variable with itself.'", ")", "if", "file_extension", "==", "'h'", ":", "# TODO(unknown): check that 1-arg constructors are explicit.", "# How to tell it's a constructor?", "# (handled in CheckForNonStandardConstructs for now)", "# TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS", "# (level 1 error)", "pass", "# Check if people are using the verboten C basic types. The only exception", "# we regularly allow is \"unsigned short port\" for port.", "if", "Search", "(", "r'\\bshort port\\b'", ",", "line", ")", ":", "if", "not", "Search", "(", "r'\\bunsigned short port\\b'", ",", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/int'", ",", "4", ",", "'Use \"unsigned short\" for ports, not \"short\"'", ")", "else", ":", "match", "=", "Search", "(", "r'\\b(short|long(?! +double)|long long)\\b'", ",", "line", ")", "if", "match", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/int'", ",", "4", ",", "'Use int16/int64/etc, rather than the C type %s'", "%", "match", ".", "group", "(", "1", ")", ")", "# When snprintf is used, the second argument shouldn't be a literal.", "match", "=", "Search", "(", "r'snprintf\\s*\\(([^,]*),\\s*([0-9]*)\\s*,'", ",", "line", ")", "if", "match", "and", "match", ".", "group", "(", "2", ")", "!=", "'0'", ":", "# If 2nd arg is zero, snprintf is used to calculate size.", "error", "(", "filename", ",", "linenum", ",", "'runtime/printf'", ",", "3", ",", "'If you can, use sizeof(%s) instead of %s as the 2nd arg '", "'to snprintf.'", "%", "(", "match", ".", "group", "(", "1", ")", ",", "match", ".", "group", "(", "2", ")", ")", ")", "# Check if some verboten C functions are being used.", "if", "Search", "(", "r'\\bsprintf\\b'", ",", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/printf'", ",", "5", ",", "'Never use sprintf. Use snprintf instead.'", ")", "match", "=", "Search", "(", "r'\\b(strcpy|strcat)\\b'", ",", "line", ")", "if", "match", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/printf'", ",", "4", ",", "'Almost always, snprintf is better than %s'", "%", "match", ".", "group", "(", "1", ")", ")", "# Check if some verboten operator overloading is going on", "# TODO(unknown): catch out-of-line unary operator&:", "# class X {};", "# int operator&(const X& x) { return 42; } // unary operator&", "# The trick is it's hard to tell apart from binary operator&:", "# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&", "if", "Search", "(", "r'\\boperator\\s*&\\s*\\(\\s*\\)'", ",", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/operator'", ",", "4", ",", "'Unary operator& is dangerous. Do not use it.'", ")", "# Check for suspicious usage of \"if\" like", "# } if (a == b) {", "if", "Search", "(", "r'\\}\\s*if\\s*\\('", ",", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'readability/braces'", ",", "4", ",", "'Did you mean \"else if\"? If not, start a new line for \"if\".'", ")", "# Check for potential format string bugs like printf(foo).", "# We constrain the pattern not to pick things like DocidForPrintf(foo).", "# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())", "# TODO(sugawarayu): Catch the following case. Need to change the calling", "# convention of the whole function to process multiple line to handle it.", "# printf(", "# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);", "printf_args", "=", "_GetTextInside", "(", "line", ",", "r'(?i)\\b(string)?printf\\s*\\('", ")", "if", "printf_args", ":", "match", "=", "Match", "(", "r'([\\w.\\->()]+)$'", ",", "printf_args", ")", "if", "match", "and", "match", ".", "group", "(", "1", ")", "!=", "'__VA_ARGS__'", ":", "function_name", "=", "re", ".", "search", "(", "r'\\b((?:string)?printf)\\s*\\('", ",", "line", ",", "re", ".", "I", ")", ".", "group", "(", "1", ")", "error", "(", "filename", ",", "linenum", ",", "'runtime/printf'", ",", "4", ",", "'Potential format string bug. Do %s(\"%%s\", %s) instead.'", "%", "(", "function_name", ",", "match", ".", "group", "(", "1", ")", ")", ")", "# Check for potential memset bugs like memset(buf, sizeof(buf), 0).", "match", "=", "Search", "(", "r'memset\\s*\\(([^,]*),\\s*([^,]*),\\s*0\\s*\\)'", ",", "line", ")", "if", "match", "and", "not", "Match", "(", "r\"^''|-?[0-9]+|0x[0-9A-Fa-f]$\"", ",", "match", ".", "group", "(", "2", ")", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/memset'", ",", "4", ",", "'Did you mean \"memset(%s, 0, %s)\"?'", "%", "(", "match", ".", "group", "(", "1", ")", ",", "match", ".", "group", "(", "2", ")", ")", ")", "if", "Search", "(", "r'\\busing namespace\\b'", ",", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'build/namespaces'", ",", "5", ",", "'Do not use namespace using-directives. '", "'Use using-declarations instead.'", ")", "# Detect variable-length arrays.", "match", "=", "Match", "(", "r'\\s*(.+::)?(\\w+) [a-z]\\w*\\[(.+)];'", ",", "line", ")", "if", "(", "match", "and", "match", ".", "group", "(", "2", ")", "!=", "'return'", "and", "match", ".", "group", "(", "2", ")", "!=", "'delete'", "and", "match", ".", "group", "(", "3", ")", ".", "find", "(", "']'", ")", "==", "-", "1", ")", ":", "# Split the size using space and arithmetic operators as delimiters.", "# If any of the resulting tokens are not compile time constants then", "# report the error.", "tokens", "=", "re", ".", "split", "(", "r'\\s|\\+|\\-|\\*|\\/|<<|>>]'", ",", "match", ".", "group", "(", "3", ")", ")", "is_const", "=", "True", "skip_next", "=", "False", "for", "tok", "in", "tokens", ":", "if", "skip_next", ":", "skip_next", "=", "False", "continue", "if", "Search", "(", "r'sizeof\\(.+\\)'", ",", "tok", ")", ":", "continue", "if", "Search", "(", "r'arraysize\\(\\w+\\)'", ",", "tok", ")", ":", "continue", "tok", "=", "tok", ".", "lstrip", "(", "'('", ")", "tok", "=", "tok", ".", "rstrip", "(", "')'", ")", "if", "not", "tok", ":", "continue", "if", "Match", "(", "r'\\d+'", ",", "tok", ")", ":", "continue", "if", "Match", "(", "r'0[xX][0-9a-fA-F]+'", ",", "tok", ")", ":", "continue", "if", "Match", "(", "r'k[A-Z0-9]\\w*'", ",", "tok", ")", ":", "continue", "if", "Match", "(", "r'(.+::)?k[A-Z0-9]\\w*'", ",", "tok", ")", ":", "continue", "if", "Match", "(", "r'(.+::)?[A-Z][A-Z0-9_]*'", ",", "tok", ")", ":", "continue", "# A catch all for tricky sizeof cases, including 'sizeof expression',", "# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'", "# requires skipping the next token because we split on ' ' and '*'.", "if", "tok", ".", "startswith", "(", "'sizeof'", ")", ":", "skip_next", "=", "True", "continue", "is_const", "=", "False", "break", "if", "not", "is_const", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/arrays'", ",", "1", ",", "'Do not use variable-length arrays. Use an appropriately named '", "\"('k' followed by CamelCase) compile-time constant for the size.\"", ")", "# If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or", "# DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing", "# in the class declaration.", "match", "=", "Match", "(", "(", "r'\\s*'", "r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'", "r'\\(.*\\);$'", ")", ",", "line", ")", "if", "match", "and", "linenum", "+", "1", "<", "clean_lines", ".", "NumLines", "(", ")", ":", "next_line", "=", "clean_lines", ".", "elided", "[", "linenum", "+", "1", "]", "# We allow some, but not all, declarations of variables to be present", "# in the statement that defines the class. The [\\w\\*,\\s]* fragment of", "# the regular expression below allows users to declare instances of", "# the class or pointers to instances, but not less common types such", "# as function pointers or arrays. It's a tradeoff between allowing", "# reasonable code and avoiding trying to parse more C++ using regexps.", "if", "not", "Search", "(", "r'^\\s*}[\\w\\*,\\s]*;'", ",", "next_line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'readability/constructors'", ",", "3", ",", "match", ".", "group", "(", "1", ")", "+", "' should be the last thing in the class'", ")", "# Check for use of unnamed namespaces in header files. Registration", "# macros are typically OK, so we allow use of \"namespace {\" on lines", "# that end with backslashes.", "if", "(", "file_extension", "==", "'h'", "and", "Search", "(", "r'\\bnamespace\\s*{'", ",", "line", ")", "and", "line", "[", "-", "1", "]", "!=", "'\\\\'", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'build/namespaces'", ",", "4", ",", "'Do not use unnamed namespaces in header files. See '", "'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'", "' for more information.'", ")" ]
https://github.com/CMU-Perceptual-Computing-Lab/caffe_rtpose/blob/a4778bb1c3eb74d7250402016047216f77b4dba6/scripts/cpp_lint.py#L3834-L4132
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/constrained_optimization/python/swap_regret_optimizer.py
python
_project_log_stochastic_matrix_wrt_kl_divergence
(log_matrix)
return log_matrix
Projects its argument onto the set of log-left-stochastic matrices. Args: log_matrix: 2d square tensor, the element-wise logarithm of the matrix to project. Returns: The 2d square tensor that results from projecting exp(`matrix`) onto the set of left-stochastic matrices w.r.t. the KL-divergence applied column-wise.
Projects its argument onto the set of log-left-stochastic matrices.
[ "Projects", "its", "argument", "onto", "the", "set", "of", "log", "-", "left", "-", "stochastic", "matrices", "." ]
def _project_log_stochastic_matrix_wrt_kl_divergence(log_matrix): """Projects its argument onto the set of log-left-stochastic matrices. Args: log_matrix: 2d square tensor, the element-wise logarithm of the matrix to project. Returns: The 2d square tensor that results from projecting exp(`matrix`) onto the set of left-stochastic matrices w.r.t. the KL-divergence applied column-wise. """ # For numerical reasons, make sure that the largest matrix element is zero # before exponentiating. log_matrix = log_matrix - standard_ops.reduce_max( log_matrix, axis=0, keepdims=True) log_matrix = log_matrix - standard_ops.log( standard_ops.reduce_sum( standard_ops.exp(log_matrix), axis=0, keepdims=True)) return log_matrix
[ "def", "_project_log_stochastic_matrix_wrt_kl_divergence", "(", "log_matrix", ")", ":", "# For numerical reasons, make sure that the largest matrix element is zero", "# before exponentiating.", "log_matrix", "=", "log_matrix", "-", "standard_ops", ".", "reduce_max", "(", "log_matrix", ",", "axis", "=", "0", ",", "keepdims", "=", "True", ")", "log_matrix", "=", "log_matrix", "-", "standard_ops", ".", "log", "(", "standard_ops", ".", "reduce_sum", "(", "standard_ops", ".", "exp", "(", "log_matrix", ")", ",", "axis", "=", "0", ",", "keepdims", "=", "True", ")", ")", "return", "log_matrix" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/constrained_optimization/python/swap_regret_optimizer.py#L198-L217
generalized-intelligence/GAAS
29ab17d3e8a4ba18edef3a57c36d8db6329fac73
algorithms/src/SystemManagement/json_request_response_lib/src/third_party/nlohmann_json/third_party/cpplint/cpplint.py
python
_DropCommonSuffixes
(filename)
return os.path.splitext(filename)[0]
Drops common suffixes like _test.cc or -inl.h from filename. For example: >>> _DropCommonSuffixes('foo/foo-inl.h') 'foo/foo' >>> _DropCommonSuffixes('foo/bar/foo.cc') 'foo/bar/foo' >>> _DropCommonSuffixes('foo/foo_internal.h') 'foo/foo' >>> _DropCommonSuffixes('foo/foo_unusualinternal.h') 'foo/foo_unusualinternal' Args: filename: The input filename. Returns: The filename with the common suffix removed.
Drops common suffixes like _test.cc or -inl.h from filename.
[ "Drops", "common", "suffixes", "like", "_test", ".", "cc", "or", "-", "inl", ".", "h", "from", "filename", "." ]
def _DropCommonSuffixes(filename): """Drops common suffixes like _test.cc or -inl.h from filename. For example: >>> _DropCommonSuffixes('foo/foo-inl.h') 'foo/foo' >>> _DropCommonSuffixes('foo/bar/foo.cc') 'foo/bar/foo' >>> _DropCommonSuffixes('foo/foo_internal.h') 'foo/foo' >>> _DropCommonSuffixes('foo/foo_unusualinternal.h') 'foo/foo_unusualinternal' Args: filename: The input filename. Returns: The filename with the common suffix removed. """ for suffix in itertools.chain( ('%s.%s' % (test_suffix.lstrip('_'), ext) for test_suffix, ext in itertools.product(_test_suffixes, GetNonHeaderExtensions())), ('%s.%s' % (suffix, ext) for suffix, ext in itertools.product(['inl', 'imp', 'internal'], GetHeaderExtensions()))): if (filename.endswith(suffix) and len(filename) > len(suffix) and filename[-len(suffix) - 1] in ('-', '_')): return filename[:-len(suffix) - 1] return os.path.splitext(filename)[0]
[ "def", "_DropCommonSuffixes", "(", "filename", ")", ":", "for", "suffix", "in", "itertools", ".", "chain", "(", "(", "'%s.%s'", "%", "(", "test_suffix", ".", "lstrip", "(", "'_'", ")", ",", "ext", ")", "for", "test_suffix", ",", "ext", "in", "itertools", ".", "product", "(", "_test_suffixes", ",", "GetNonHeaderExtensions", "(", ")", ")", ")", ",", "(", "'%s.%s'", "%", "(", "suffix", ",", "ext", ")", "for", "suffix", ",", "ext", "in", "itertools", ".", "product", "(", "[", "'inl'", ",", "'imp'", ",", "'internal'", "]", ",", "GetHeaderExtensions", "(", ")", ")", ")", ")", ":", "if", "(", "filename", ".", "endswith", "(", "suffix", ")", "and", "len", "(", "filename", ")", ">", "len", "(", "suffix", ")", "and", "filename", "[", "-", "len", "(", "suffix", ")", "-", "1", "]", "in", "(", "'-'", ",", "'_'", ")", ")", ":", "return", "filename", "[", ":", "-", "len", "(", "suffix", ")", "-", "1", "]", "return", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "0", "]" ]
https://github.com/generalized-intelligence/GAAS/blob/29ab17d3e8a4ba18edef3a57c36d8db6329fac73/algorithms/src/SystemManagement/json_request_response_lib/src/third_party/nlohmann_json/third_party/cpplint/cpplint.py#L4672-L4699
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/python/training/sync_replicas_optimizer.py
python
SyncReplicasOptimizer.get_slot
(self, *args, **kwargs)
return self._opt.get_slot(*args, **kwargs)
Return a slot named "name" created for "var" by the Optimizer. This simply wraps the get_slot() from the actual optimizer. Args: *args: Arguments for get_slot(). **kwargs: Keyword arguments for get_slot(). Returns: The `Variable` for the slot if it was created, `None` otherwise.
Return a slot named "name" created for "var" by the Optimizer.
[ "Return", "a", "slot", "named", "name", "created", "for", "var", "by", "the", "Optimizer", "." ]
def get_slot(self, *args, **kwargs): """Return a slot named "name" created for "var" by the Optimizer. This simply wraps the get_slot() from the actual optimizer. Args: *args: Arguments for get_slot(). **kwargs: Keyword arguments for get_slot(). Returns: The `Variable` for the slot if it was created, `None` otherwise. """ return self._opt.get_slot(*args, **kwargs)
[ "def", "get_slot", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_opt", ".", "get_slot", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/training/sync_replicas_optimizer.py#L363-L375