nwo
stringlengths 5
86
| sha
stringlengths 40
40
| path
stringlengths 4
189
| language
stringclasses 1
value | identifier
stringlengths 1
94
| parameters
stringlengths 2
4.03k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
11.5k
| docstring
stringlengths 1
33.2k
| docstring_summary
stringlengths 0
5.15k
| docstring_tokens
sequence | function
stringlengths 34
151k
| function_tokens
sequence | url
stringlengths 90
278
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
apple/swift | 469f72fdae2ea828b3b6c0d7d62d7e4cf98c4893 | utils/build_swift/build_swift/shell.py | python | _normalize_args | (args) | return list(_flatmap(normalize_arg, args)) | Normalizes a list of arguments containing one or more strings and
CommandWrapper instances into a one-dimensional list of strings. | Normalizes a list of arguments containing one or more strings and
CommandWrapper instances into a one-dimensional list of strings. | [
"Normalizes",
"a",
"list",
"of",
"arguments",
"containing",
"one",
"or",
"more",
"strings",
"and",
"CommandWrapper",
"instances",
"into",
"a",
"one",
"-",
"dimensional",
"list",
"of",
"strings",
"."
] | def _normalize_args(args):
"""Normalizes a list of arguments containing one or more strings and
CommandWrapper instances into a one-dimensional list of strings.
"""
if isinstance(args, six.string_types):
return shlex.split(args)
def normalize_arg(arg):
arg = _convert_pathlib_path(arg)
if isinstance(arg, six.string_types):
return [six.text_type(arg)]
if isinstance(arg, AbstractWrapper):
return list(map(_convert_pathlib_path, arg.command))
raise ValueError('Invalid argument type: {}'.format(
type(arg).__name__))
if isinstance(args, AbstractWrapper):
return normalize_arg(args)
return list(_flatmap(normalize_arg, args)) | [
"def",
"_normalize_args",
"(",
"args",
")",
":",
"if",
"isinstance",
"(",
"args",
",",
"six",
".",
"string_types",
")",
":",
"return",
"shlex",
".",
"split",
"(",
"args",
")",
"def",
"normalize_arg",
"(",
"arg",
")",
":",
"arg",
"=",
"_convert_pathlib_path",
"(",
"arg",
")",
"if",
"isinstance",
"(",
"arg",
",",
"six",
".",
"string_types",
")",
":",
"return",
"[",
"six",
".",
"text_type",
"(",
"arg",
")",
"]",
"if",
"isinstance",
"(",
"arg",
",",
"AbstractWrapper",
")",
":",
"return",
"list",
"(",
"map",
"(",
"_convert_pathlib_path",
",",
"arg",
".",
"command",
")",
")",
"raise",
"ValueError",
"(",
"'Invalid argument type: {}'",
".",
"format",
"(",
"type",
"(",
"arg",
")",
".",
"__name__",
")",
")",
"if",
"isinstance",
"(",
"args",
",",
"AbstractWrapper",
")",
":",
"return",
"normalize_arg",
"(",
"args",
")",
"return",
"list",
"(",
"_flatmap",
"(",
"normalize_arg",
",",
"args",
")",
")"
] | https://github.com/apple/swift/blob/469f72fdae2ea828b3b6c0d7d62d7e4cf98c4893/utils/build_swift/build_swift/shell.py#L148-L170 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python3/src/Lib/mailbox.py | python | _singlefileMailbox.lock | (self) | Lock the mailbox. | Lock the mailbox. | [
"Lock",
"the",
"mailbox",
"."
] | def lock(self):
"""Lock the mailbox."""
if not self._locked:
_lock_file(self._file)
self._locked = True | [
"def",
"lock",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_locked",
":",
"_lock_file",
"(",
"self",
".",
"_file",
")",
"self",
".",
"_locked",
"=",
"True"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/mailbox.py#L640-L644 |
||
psi4/psi4 | be533f7f426b6ccc263904e55122899b16663395 | psi4/driver/driver.py | python | _filter_renamed_methods | (compute, method) | r"""Raises UpgradeHelper when a method has been renamed. | r"""Raises UpgradeHelper when a method has been renamed. | [
"r",
"Raises",
"UpgradeHelper",
"when",
"a",
"method",
"has",
"been",
"renamed",
"."
] | def _filter_renamed_methods(compute, method):
r"""Raises UpgradeHelper when a method has been renamed."""
if method == "dcft":
raise UpgradeHelper(compute + "('dcft')", compute + "('dct')", 1.4, " All instances of 'dcft' should be replaced with 'dct'.") | [
"def",
"_filter_renamed_methods",
"(",
"compute",
",",
"method",
")",
":",
"if",
"method",
"==",
"\"dcft\"",
":",
"raise",
"UpgradeHelper",
"(",
"compute",
"+",
"\"('dcft')\"",
",",
"compute",
"+",
"\"('dct')\"",
",",
"1.4",
",",
"\" All instances of 'dcft' should be replaced with 'dct'.\"",
")"
] | https://github.com/psi4/psi4/blob/be533f7f426b6ccc263904e55122899b16663395/psi4/driver/driver.py#L207-L210 |
||
BlzFans/wke | b0fa21158312e40c5fbd84682d643022b6c34a93 | cygwin/lib/python2.6/mailbox.py | python | Mailbox._dump_message | (self, message, target, mangle_from_=False) | Dump message contents to target file. | Dump message contents to target file. | [
"Dump",
"message",
"contents",
"to",
"target",
"file",
"."
] | def _dump_message(self, message, target, mangle_from_=False):
# Most files are opened in binary mode to allow predictable seeking.
# To get native line endings on disk, the user-friendly \n line endings
# used in strings and by email.Message are translated here.
"""Dump message contents to target file."""
if isinstance(message, email.message.Message):
buffer = StringIO.StringIO()
gen = email.generator.Generator(buffer, mangle_from_, 0)
gen.flatten(message)
buffer.seek(0)
target.write(buffer.read().replace('\n', os.linesep))
elif isinstance(message, str):
if mangle_from_:
message = message.replace('\nFrom ', '\n>From ')
message = message.replace('\n', os.linesep)
target.write(message)
elif hasattr(message, 'read'):
while True:
line = message.readline()
if line == '':
break
if mangle_from_ and line.startswith('From '):
line = '>From ' + line[5:]
line = line.replace('\n', os.linesep)
target.write(line)
else:
raise TypeError('Invalid message type: %s' % type(message)) | [
"def",
"_dump_message",
"(",
"self",
",",
"message",
",",
"target",
",",
"mangle_from_",
"=",
"False",
")",
":",
"# Most files are opened in binary mode to allow predictable seeking.",
"# To get native line endings on disk, the user-friendly \\n line endings",
"# used in strings and by email.Message are translated here.",
"if",
"isinstance",
"(",
"message",
",",
"email",
".",
"message",
".",
"Message",
")",
":",
"buffer",
"=",
"StringIO",
".",
"StringIO",
"(",
")",
"gen",
"=",
"email",
".",
"generator",
".",
"Generator",
"(",
"buffer",
",",
"mangle_from_",
",",
"0",
")",
"gen",
".",
"flatten",
"(",
"message",
")",
"buffer",
".",
"seek",
"(",
"0",
")",
"target",
".",
"write",
"(",
"buffer",
".",
"read",
"(",
")",
".",
"replace",
"(",
"'\\n'",
",",
"os",
".",
"linesep",
")",
")",
"elif",
"isinstance",
"(",
"message",
",",
"str",
")",
":",
"if",
"mangle_from_",
":",
"message",
"=",
"message",
".",
"replace",
"(",
"'\\nFrom '",
",",
"'\\n>From '",
")",
"message",
"=",
"message",
".",
"replace",
"(",
"'\\n'",
",",
"os",
".",
"linesep",
")",
"target",
".",
"write",
"(",
"message",
")",
"elif",
"hasattr",
"(",
"message",
",",
"'read'",
")",
":",
"while",
"True",
":",
"line",
"=",
"message",
".",
"readline",
"(",
")",
"if",
"line",
"==",
"''",
":",
"break",
"if",
"mangle_from_",
"and",
"line",
".",
"startswith",
"(",
"'From '",
")",
":",
"line",
"=",
"'>From '",
"+",
"line",
"[",
"5",
":",
"]",
"line",
"=",
"line",
".",
"replace",
"(",
"'\\n'",
",",
"os",
".",
"linesep",
")",
"target",
".",
"write",
"(",
"line",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Invalid message type: %s'",
"%",
"type",
"(",
"message",
")",
")"
] | https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/mailbox.py#L194-L220 |
||
BlzFans/wke | b0fa21158312e40c5fbd84682d643022b6c34a93 | cygwin/lib/python2.6/optparse.py | python | HelpFormatter.format_option_strings | (self, option) | return ", ".join(opts) | Return a comma-separated list of option strings & metavariables. | Return a comma-separated list of option strings & metavariables. | [
"Return",
"a",
"comma",
"-",
"separated",
"list",
"of",
"option",
"strings",
"&",
"metavariables",
"."
] | def format_option_strings(self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
short_opts = [self._short_opt_fmt % (sopt, metavar)
for sopt in option._short_opts]
long_opts = [self._long_opt_fmt % (lopt, metavar)
for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
if self.short_first:
opts = short_opts + long_opts
else:
opts = long_opts + short_opts
return ", ".join(opts) | [
"def",
"format_option_strings",
"(",
"self",
",",
"option",
")",
":",
"if",
"option",
".",
"takes_value",
"(",
")",
":",
"metavar",
"=",
"option",
".",
"metavar",
"or",
"option",
".",
"dest",
".",
"upper",
"(",
")",
"short_opts",
"=",
"[",
"self",
".",
"_short_opt_fmt",
"%",
"(",
"sopt",
",",
"metavar",
")",
"for",
"sopt",
"in",
"option",
".",
"_short_opts",
"]",
"long_opts",
"=",
"[",
"self",
".",
"_long_opt_fmt",
"%",
"(",
"lopt",
",",
"metavar",
")",
"for",
"lopt",
"in",
"option",
".",
"_long_opts",
"]",
"else",
":",
"short_opts",
"=",
"option",
".",
"_short_opts",
"long_opts",
"=",
"option",
".",
"_long_opts",
"if",
"self",
".",
"short_first",
":",
"opts",
"=",
"short_opts",
"+",
"long_opts",
"else",
":",
"opts",
"=",
"long_opts",
"+",
"short_opts",
"return",
"\", \"",
".",
"join",
"(",
"opts",
")"
] | https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/optparse.py#L342-L359 |
|
LiquidPlayer/LiquidCore | 9405979363f2353ac9a71ad8ab59685dd7f919c9 | deps/node-10.15.3/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/msvs.py | python | _GetDefines | (config) | return defines | Returns the list of preprocessor definitions for this configuation.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of preprocessor definitions. | Returns the list of preprocessor definitions for this configuation. | [
"Returns",
"the",
"list",
"of",
"preprocessor",
"definitions",
"for",
"this",
"configuation",
"."
] | def _GetDefines(config):
"""Returns the list of preprocessor definitions for this configuation.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of preprocessor definitions.
"""
defines = []
for d in config.get('defines', []):
if type(d) == list:
fd = '='.join([str(dpart) for dpart in d])
else:
fd = str(d)
defines.append(fd)
return defines | [
"def",
"_GetDefines",
"(",
"config",
")",
":",
"defines",
"=",
"[",
"]",
"for",
"d",
"in",
"config",
".",
"get",
"(",
"'defines'",
",",
"[",
"]",
")",
":",
"if",
"type",
"(",
"d",
")",
"==",
"list",
":",
"fd",
"=",
"'='",
".",
"join",
"(",
"[",
"str",
"(",
"dpart",
")",
"for",
"dpart",
"in",
"d",
"]",
")",
"else",
":",
"fd",
"=",
"str",
"(",
"d",
")",
"defines",
".",
"append",
"(",
"fd",
")",
"return",
"defines"
] | https://github.com/LiquidPlayer/LiquidCore/blob/9405979363f2353ac9a71ad8ab59685dd7f919c9/deps/node-10.15.3/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/msvs.py#L1324-L1340 |
|
papyrussolution/OpenPapyrus | bbfb5ec2ea2109b8e2f125edd838e12eaf7b8b91 | Src/OSF/protobuf-3.19.1/python/google/protobuf/descriptor_pool.py | python | DescriptorPool.FindMessageTypeByName | (self, full_name) | return self._descriptors[full_name] | Loads the named descriptor from the pool.
Args:
full_name (str): The full name of the descriptor to load.
Returns:
Descriptor: The descriptor for the named type.
Raises:
KeyError: if the message cannot be found in the pool. | Loads the named descriptor from the pool. | [
"Loads",
"the",
"named",
"descriptor",
"from",
"the",
"pool",
"."
] | def FindMessageTypeByName(self, full_name):
"""Loads the named descriptor from the pool.
Args:
full_name (str): The full name of the descriptor to load.
Returns:
Descriptor: The descriptor for the named type.
Raises:
KeyError: if the message cannot be found in the pool.
"""
full_name = _NormalizeFullyQualifiedName(full_name)
if full_name not in self._descriptors:
self._FindFileContainingSymbolInDb(full_name)
return self._descriptors[full_name] | [
"def",
"FindMessageTypeByName",
"(",
"self",
",",
"full_name",
")",
":",
"full_name",
"=",
"_NormalizeFullyQualifiedName",
"(",
"full_name",
")",
"if",
"full_name",
"not",
"in",
"self",
".",
"_descriptors",
":",
"self",
".",
"_FindFileContainingSymbolInDb",
"(",
"full_name",
")",
"return",
"self",
".",
"_descriptors",
"[",
"full_name",
"]"
] | https://github.com/papyrussolution/OpenPapyrus/blob/bbfb5ec2ea2109b8e2f125edd838e12eaf7b8b91/Src/OSF/protobuf-3.19.1/python/google/protobuf/descriptor_pool.py#L501-L517 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/setuptools/_vendor/pyparsing.py | python | ParserElement.setDebugActions | ( self, startAction, successAction, exceptionAction ) | return self | Enable display of debugging messages while doing pattern matching. | [] | def setDebugActions( self, startAction, successAction, exceptionAction ):
"""
Enable display of debugging messages while doing pattern matching.
"""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self | [
"def",
"setDebugActions",
"(",
"self",
",",
"startAction",
",",
"successAction",
",",
"exceptionAction",
")",
":",
"self",
".",
"debugActions",
"=",
"(",
"startAction",
"or",
"_defaultStartDebugAction",
",",
"successAction",
"or",
"_defaultSuccessDebugAction",
",",
"exceptionAction",
"or",
"_defaultExceptionDebugAction",
")",
"self",
".",
"debug",
"=",
"True",
"return",
"self"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/setuptools/_vendor/pyparsing.py#L4203-L4219 |
||
apache/arrow | af33dd1157eb8d7d9bfac25ebf61445b793b7943 | dev/archery/archery/cli.py | python | linking | (obj) | Quick and dirty utilities for checking library linkage. | Quick and dirty utilities for checking library linkage. | [
"Quick",
"and",
"dirty",
"utilities",
"for",
"checking",
"library",
"linkage",
"."
] | def linking(obj):
"""
Quick and dirty utilities for checking library linkage.
"""
pass | [
"def",
"linking",
"(",
"obj",
")",
":",
"pass"
] | https://github.com/apache/arrow/blob/af33dd1157eb8d7d9bfac25ebf61445b793b7943/dev/archery/archery/cli.py#L914-L918 |
||
swift/swift | 12d031cf8177fdec0137f9aa7e2912fa23c4416b | 3rdParty/SCons/scons-3.0.1/engine/SCons/Environment.py | python | SubstitutionEnvironment._init_special | (self) | Initial the dispatch tables for special handling of
special construction variables. | Initial the dispatch tables for special handling of
special construction variables. | [
"Initial",
"the",
"dispatch",
"tables",
"for",
"special",
"handling",
"of",
"special",
"construction",
"variables",
"."
] | def _init_special(self):
"""Initial the dispatch tables for special handling of
special construction variables."""
self._special_del = {}
self._special_del['SCANNERS'] = _del_SCANNERS
self._special_set = {}
for key in reserved_construction_var_names:
self._special_set[key] = _set_reserved
for key in future_reserved_construction_var_names:
self._special_set[key] = _set_future_reserved
self._special_set['BUILDERS'] = _set_BUILDERS
self._special_set['SCANNERS'] = _set_SCANNERS
# Freeze the keys of self._special_set in a list for use by
# methods that need to check. (Empirically, list scanning has
# gotten better than dict.has_key() in Python 2.5.)
self._special_set_keys = list(self._special_set.keys()) | [
"def",
"_init_special",
"(",
"self",
")",
":",
"self",
".",
"_special_del",
"=",
"{",
"}",
"self",
".",
"_special_del",
"[",
"'SCANNERS'",
"]",
"=",
"_del_SCANNERS",
"self",
".",
"_special_set",
"=",
"{",
"}",
"for",
"key",
"in",
"reserved_construction_var_names",
":",
"self",
".",
"_special_set",
"[",
"key",
"]",
"=",
"_set_reserved",
"for",
"key",
"in",
"future_reserved_construction_var_names",
":",
"self",
".",
"_special_set",
"[",
"key",
"]",
"=",
"_set_future_reserved",
"self",
".",
"_special_set",
"[",
"'BUILDERS'",
"]",
"=",
"_set_BUILDERS",
"self",
".",
"_special_set",
"[",
"'SCANNERS'",
"]",
"=",
"_set_SCANNERS",
"# Freeze the keys of self._special_set in a list for use by",
"# methods that need to check. (Empirically, list scanning has",
"# gotten better than dict.has_key() in Python 2.5.)",
"self",
".",
"_special_set_keys",
"=",
"list",
"(",
"self",
".",
"_special_set",
".",
"keys",
"(",
")",
")"
] | https://github.com/swift/swift/blob/12d031cf8177fdec0137f9aa7e2912fa23c4416b/3rdParty/SCons/scons-3.0.1/engine/SCons/Environment.py#L380-L397 |
||
hpi-xnor/BMXNet-v2 | af2b1859eafc5c721b1397cef02f946aaf2ce20d | python/mxnet/io/io.py | python | DataIter.reset | (self) | Reset the iterator to the begin of the data. | Reset the iterator to the begin of the data. | [
"Reset",
"the",
"iterator",
"to",
"the",
"begin",
"of",
"the",
"data",
"."
] | def reset(self):
"""Reset the iterator to the begin of the data."""
pass | [
"def",
"reset",
"(",
"self",
")",
":",
"pass"
] | https://github.com/hpi-xnor/BMXNet-v2/blob/af2b1859eafc5c721b1397cef02f946aaf2ce20d/python/mxnet/io/io.py#L206-L208 |
||
papyrussolution/OpenPapyrus | bbfb5ec2ea2109b8e2f125edd838e12eaf7b8b91 | Src/OSF/xapian/xapian-bindings/python3/doxy2swig.py | python | Doxy2SWIG.parse | (self, node) | Parse a given node. This function in turn calls the
`parse_<nodeType>` functions which handle the respective
nodes. | Parse a given node. This function in turn calls the
`parse_<nodeType>` functions which handle the respective
nodes. | [
"Parse",
"a",
"given",
"node",
".",
"This",
"function",
"in",
"turn",
"calls",
"the",
"parse_<nodeType",
">",
"functions",
"which",
"handle",
"the",
"respective",
"nodes",
"."
] | def parse(self, node):
"""Parse a given node. This function in turn calls the
`parse_<nodeType>` functions which handle the respective
nodes.
"""
pm = getattr(self, "parse_%s"%node.__class__.__name__)
pm(node) | [
"def",
"parse",
"(",
"self",
",",
"node",
")",
":",
"pm",
"=",
"getattr",
"(",
"self",
",",
"\"parse_%s\"",
"%",
"node",
".",
"__class__",
".",
"__name__",
")",
"pm",
"(",
"node",
")"
] | https://github.com/papyrussolution/OpenPapyrus/blob/bbfb5ec2ea2109b8e2f125edd838e12eaf7b8b91/Src/OSF/xapian/xapian-bindings/python3/doxy2swig.py#L87-L94 |
||
hfinkel/llvm-project-cxxjit | 91084ef018240bbb8e24235ff5cd8c355a9c1a1e | clang/bindings/python/clang/cindex.py | python | TranslationUnit.cursor | (self) | return conf.lib.clang_getTranslationUnitCursor(self) | Retrieve the cursor that represents the given translation unit. | Retrieve the cursor that represents the given translation unit. | [
"Retrieve",
"the",
"cursor",
"that",
"represents",
"the",
"given",
"translation",
"unit",
"."
] | def cursor(self):
"""Retrieve the cursor that represents the given translation unit."""
return conf.lib.clang_getTranslationUnitCursor(self) | [
"def",
"cursor",
"(",
"self",
")",
":",
"return",
"conf",
".",
"lib",
".",
"clang_getTranslationUnitCursor",
"(",
"self",
")"
] | https://github.com/hfinkel/llvm-project-cxxjit/blob/91084ef018240bbb8e24235ff5cd8c355a9c1a1e/clang/bindings/python/clang/cindex.py#L2872-L2874 |
|
PaddlePaddle/Paddle | 1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c | python/paddle/fluid/dataset.py | python | InMemoryDataset.postprocess_instance | (self) | Divide pv instance and convey it to input_channel.
Examples:
.. code-block:: python
import paddle.fluid as fluid
dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
filelist = ["a.txt", "b.txt"]
dataset.set_filelist(filelist)
dataset.load_into_memory()
dataset.preprocess_instance()
exe.train_from_dataset(dataset)
dataset.postprocess_instance() | Divide pv instance and convey it to input_channel. | [
"Divide",
"pv",
"instance",
"and",
"convey",
"it",
"to",
"input_channel",
"."
] | def postprocess_instance(self):
"""
Divide pv instance and convey it to input_channel.
Examples:
.. code-block:: python
import paddle.fluid as fluid
dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
filelist = ["a.txt", "b.txt"]
dataset.set_filelist(filelist)
dataset.load_into_memory()
dataset.preprocess_instance()
exe.train_from_dataset(dataset)
dataset.postprocess_instance()
"""
self.dataset.postprocess_instance() | [
"def",
"postprocess_instance",
"(",
"self",
")",
":",
"self",
".",
"dataset",
".",
"postprocess_instance",
"(",
")"
] | https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/fluid/dataset.py#L617-L634 |
||
hszhao/PSPNet | cf7e5a99ba37e46118026e96be5821a9bc63bde0 | scripts/cpp_lint.py | python | _NestingState.Update | (self, filename, clean_lines, linenum, error) | Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. | Update nesting state with current line. | [
"Update",
"nesting",
"state",
"with",
"current",
"line",
"."
] | def Update(self, filename, clean_lines, linenum, error):
"""Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Update pp_stack first
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
#
# Templates with class arguments may confuse the parser, for example:
# template <class T
# class Comparator = less<T>,
# class Vector = vector<T> >
# class HeapQueue {
#
# Because this parser has no nesting state about templates, by the
# time it saw "class Comparator", it may think that it's a new class.
# Nested templates have a similar problem:
# template <
# typename ExportedType,
# typename TupleType,
# template <typename, typename> class ImplTemplate>
#
# To avoid these cases, we ignore classes that are followed by '=' or '>'
class_decl_match = Match(
r'\s*(template\s*<[\w\s<>,:]*>\s*)?'
r'(class|struct)\s+([A-Z_]+\s+)*(\w+(?:::\w+)*)'
r'(([^=>]|<[^<>]*>|<[^<>]*<[^<>]*>\s*>)*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
self.stack.append(_ClassInfo(
class_decl_match.group(4), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(5)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
access_match = Match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
if access_match:
classinfo.access = access_match.group(2)
# Check that access keywords are indented +1 space. Skip this
# check if the keywords are not preceded by whitespaces.
indent = access_match.group(1)
if (len(indent) != classinfo.class_indent + 1 and
Match(r'^\s*$', indent)):
if classinfo.is_struct:
parent = 'struct ' + classinfo.name
else:
parent = 'class ' + classinfo.name
slots = ''
if access_match.group(3):
slots = access_match.group(3)
error(filename, linenum, 'whitespace/indent', 3,
'%s%s: should be indented +1 space inside %s' % (
access_match.group(2), slots, parent))
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen a opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
else:
self.stack.append(_BlockInfo(True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# a semicolon, this is probably a forward declaration. Pop
# the stack for these.
#
# Similarly, if we haven't seen an opening brace yet, but we
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2) | [
"def",
"Update",
"(",
"self",
",",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
":",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"# Update pp_stack first",
"self",
".",
"UpdatePreprocessor",
"(",
"line",
")",
"# Count parentheses. This is to avoid adding struct arguments to",
"# the nesting stack.",
"if",
"self",
".",
"stack",
":",
"inner_block",
"=",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
"depth_change",
"=",
"line",
".",
"count",
"(",
"'('",
")",
"-",
"line",
".",
"count",
"(",
"')'",
")",
"inner_block",
".",
"open_parentheses",
"+=",
"depth_change",
"# Also check if we are starting or ending an inline assembly block.",
"if",
"inner_block",
".",
"inline_asm",
"in",
"(",
"_NO_ASM",
",",
"_END_ASM",
")",
":",
"if",
"(",
"depth_change",
"!=",
"0",
"and",
"inner_block",
".",
"open_parentheses",
"==",
"1",
"and",
"_MATCH_ASM",
".",
"match",
"(",
"line",
")",
")",
":",
"# Enter assembly block",
"inner_block",
".",
"inline_asm",
"=",
"_INSIDE_ASM",
"else",
":",
"# Not entering assembly block. If previous line was _END_ASM,",
"# we will now shift to _NO_ASM state.",
"inner_block",
".",
"inline_asm",
"=",
"_NO_ASM",
"elif",
"(",
"inner_block",
".",
"inline_asm",
"==",
"_INSIDE_ASM",
"and",
"inner_block",
".",
"open_parentheses",
"==",
"0",
")",
":",
"# Exit assembly block",
"inner_block",
".",
"inline_asm",
"=",
"_END_ASM",
"# Consume namespace declaration at the beginning of the line. Do",
"# this in a loop so that we catch same line declarations like this:",
"# namespace proto2 { namespace bridge { class MessageSet; } }",
"while",
"True",
":",
"# Match start of namespace. The \"\\b\\s*\" below catches namespace",
"# declarations even if it weren't followed by a whitespace, this",
"# is so that we don't confuse our namespace checker. The",
"# missing spaces will be flagged by CheckSpacing.",
"namespace_decl_match",
"=",
"Match",
"(",
"r'^\\s*namespace\\b\\s*([:\\w]+)?(.*)$'",
",",
"line",
")",
"if",
"not",
"namespace_decl_match",
":",
"break",
"new_namespace",
"=",
"_NamespaceInfo",
"(",
"namespace_decl_match",
".",
"group",
"(",
"1",
")",
",",
"linenum",
")",
"self",
".",
"stack",
".",
"append",
"(",
"new_namespace",
")",
"line",
"=",
"namespace_decl_match",
".",
"group",
"(",
"2",
")",
"if",
"line",
".",
"find",
"(",
"'{'",
")",
"!=",
"-",
"1",
":",
"new_namespace",
".",
"seen_open_brace",
"=",
"True",
"line",
"=",
"line",
"[",
"line",
".",
"find",
"(",
"'{'",
")",
"+",
"1",
":",
"]",
"# Look for a class declaration in whatever is left of the line",
"# after parsing namespaces. The regexp accounts for decorated classes",
"# such as in:",
"# class LOCKABLE API Object {",
"# };",
"#",
"# Templates with class arguments may confuse the parser, for example:",
"# template <class T",
"# class Comparator = less<T>,",
"# class Vector = vector<T> >",
"# class HeapQueue {",
"#",
"# Because this parser has no nesting state about templates, by the",
"# time it saw \"class Comparator\", it may think that it's a new class.",
"# Nested templates have a similar problem:",
"# template <",
"# typename ExportedType,",
"# typename TupleType,",
"# template <typename, typename> class ImplTemplate>",
"#",
"# To avoid these cases, we ignore classes that are followed by '=' or '>'",
"class_decl_match",
"=",
"Match",
"(",
"r'\\s*(template\\s*<[\\w\\s<>,:]*>\\s*)?'",
"r'(class|struct)\\s+([A-Z_]+\\s+)*(\\w+(?:::\\w+)*)'",
"r'(([^=>]|<[^<>]*>|<[^<>]*<[^<>]*>\\s*>)*)$'",
",",
"line",
")",
"if",
"(",
"class_decl_match",
"and",
"(",
"not",
"self",
".",
"stack",
"or",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
".",
"open_parentheses",
"==",
"0",
")",
")",
":",
"self",
".",
"stack",
".",
"append",
"(",
"_ClassInfo",
"(",
"class_decl_match",
".",
"group",
"(",
"4",
")",
",",
"class_decl_match",
".",
"group",
"(",
"2",
")",
",",
"clean_lines",
",",
"linenum",
")",
")",
"line",
"=",
"class_decl_match",
".",
"group",
"(",
"5",
")",
"# If we have not yet seen the opening brace for the innermost block,",
"# run checks here.",
"if",
"not",
"self",
".",
"SeenOpenBrace",
"(",
")",
":",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
".",
"CheckBegin",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
"# Update access control if we are inside a class/struct",
"if",
"self",
".",
"stack",
"and",
"isinstance",
"(",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
",",
"_ClassInfo",
")",
":",
"classinfo",
"=",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
"access_match",
"=",
"Match",
"(",
"r'^(.*)\\b(public|private|protected|signals)(\\s+(?:slots\\s*)?)?'",
"r':(?:[^:]|$)'",
",",
"line",
")",
"if",
"access_match",
":",
"classinfo",
".",
"access",
"=",
"access_match",
".",
"group",
"(",
"2",
")",
"# Check that access keywords are indented +1 space. Skip this",
"# check if the keywords are not preceded by whitespaces.",
"indent",
"=",
"access_match",
".",
"group",
"(",
"1",
")",
"if",
"(",
"len",
"(",
"indent",
")",
"!=",
"classinfo",
".",
"class_indent",
"+",
"1",
"and",
"Match",
"(",
"r'^\\s*$'",
",",
"indent",
")",
")",
":",
"if",
"classinfo",
".",
"is_struct",
":",
"parent",
"=",
"'struct '",
"+",
"classinfo",
".",
"name",
"else",
":",
"parent",
"=",
"'class '",
"+",
"classinfo",
".",
"name",
"slots",
"=",
"''",
"if",
"access_match",
".",
"group",
"(",
"3",
")",
":",
"slots",
"=",
"access_match",
".",
"group",
"(",
"3",
")",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/indent'",
",",
"3",
",",
"'%s%s: should be indented +1 space inside %s'",
"%",
"(",
"access_match",
".",
"group",
"(",
"2",
")",
",",
"slots",
",",
"parent",
")",
")",
"# Consume braces or semicolons from what's left of the line",
"while",
"True",
":",
"# Match first brace, semicolon, or closed parenthesis.",
"matched",
"=",
"Match",
"(",
"r'^[^{;)}]*([{;)}])(.*)$'",
",",
"line",
")",
"if",
"not",
"matched",
":",
"break",
"token",
"=",
"matched",
".",
"group",
"(",
"1",
")",
"if",
"token",
"==",
"'{'",
":",
"# If namespace or class hasn't seen a opening brace yet, mark",
"# namespace/class head as complete. Push a new block onto the",
"# stack otherwise.",
"if",
"not",
"self",
".",
"SeenOpenBrace",
"(",
")",
":",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
".",
"seen_open_brace",
"=",
"True",
"else",
":",
"self",
".",
"stack",
".",
"append",
"(",
"_BlockInfo",
"(",
"True",
")",
")",
"if",
"_MATCH_ASM",
".",
"match",
"(",
"line",
")",
":",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
".",
"inline_asm",
"=",
"_BLOCK_ASM",
"elif",
"token",
"==",
"';'",
"or",
"token",
"==",
"')'",
":",
"# If we haven't seen an opening brace yet, but we already saw",
"# a semicolon, this is probably a forward declaration. Pop",
"# the stack for these.",
"#",
"# Similarly, if we haven't seen an opening brace yet, but we",
"# already saw a closing parenthesis, then these are probably",
"# function arguments with extra \"class\" or \"struct\" keywords.",
"# Also pop these stack for these.",
"if",
"not",
"self",
".",
"SeenOpenBrace",
"(",
")",
":",
"self",
".",
"stack",
".",
"pop",
"(",
")",
"else",
":",
"# token == '}'",
"# Perform end of block checks and pop the stack.",
"if",
"self",
".",
"stack",
":",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
".",
"CheckEnd",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
"self",
".",
"stack",
".",
"pop",
"(",
")",
"line",
"=",
"matched",
".",
"group",
"(",
"2",
")"
] | https://github.com/hszhao/PSPNet/blob/cf7e5a99ba37e46118026e96be5821a9bc63bde0/scripts/cpp_lint.py#L2004-L2158 |
||
Jittor/jittor | e9aca0444c2bdc8e2389d99122954cd0903eec46 | python/jittor/misc.py | python | index_add_ | (x, dim, index, tensor) | Take out each index subscript vector of the dim dimension and add the corresponding tensor variable.
Example:
x = jt.ones((5,3))
tensor = jt.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
index = jt.array([0,4,2])
x.index_add_(0, index, tensor)
print(x)
>>> jt.Var([[ 2., 3., 4.],
[ 1., 1., 1.],
[ 8., 9., 10.],
[ 1., 1., 1.],
[ 5., 6., 7.]]) | Take out each index subscript vector of the dim dimension and add the corresponding tensor variable.
Example: | [
"Take",
"out",
"each",
"index",
"subscript",
"vector",
"of",
"the",
"dim",
"dimension",
"and",
"add",
"the",
"corresponding",
"tensor",
"variable",
".",
"Example",
":"
] | def index_add_(x, dim, index, tensor):
""" Take out each index subscript vector of the dim dimension and add the corresponding tensor variable.
Example:
x = jt.ones((5,3))
tensor = jt.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
index = jt.array([0,4,2])
x.index_add_(0, index, tensor)
print(x)
>>> jt.Var([[ 2., 3., 4.],
[ 1., 1., 1.],
[ 8., 9., 10.],
[ 1., 1., 1.],
[ 5., 6., 7.]])
"""
assert len(index.shape) == 1
assert tensor.shape[0] == index.shape[0]
x[(slice(None,),)*dim+(index,)] += tensor | [
"def",
"index_add_",
"(",
"x",
",",
"dim",
",",
"index",
",",
"tensor",
")",
":",
"assert",
"len",
"(",
"index",
".",
"shape",
")",
"==",
"1",
"assert",
"tensor",
".",
"shape",
"[",
"0",
"]",
"==",
"index",
".",
"shape",
"[",
"0",
"]",
"x",
"[",
"(",
"slice",
"(",
"None",
",",
")",
",",
")",
"*",
"dim",
"+",
"(",
"index",
",",
")",
"]",
"+=",
"tensor"
] | https://github.com/Jittor/jittor/blob/e9aca0444c2bdc8e2389d99122954cd0903eec46/python/jittor/misc.py#L16-L35 |
||
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/tools/docs/pretty_docs.py | python | _build_function_page | (page_info) | return ''.join(parts) | Given a FunctionPageInfo object Return the page as an md string. | Given a FunctionPageInfo object Return the page as an md string. | [
"Given",
"a",
"FunctionPageInfo",
"object",
"Return",
"the",
"page",
"as",
"an",
"md",
"string",
"."
] | def _build_function_page(page_info):
"""Given a FunctionPageInfo object Return the page as an md string."""
parts = [_Metadata(page_info.full_name).build_html()]
parts.append('# %s\n\n' % page_info.full_name)
if len(page_info.aliases) > 1:
parts.append('### Aliases:\n\n')
parts.extend('* `%s`\n' % name for name in page_info.aliases)
parts.append('\n')
if page_info.signature is not None:
parts.append(_build_signature(page_info))
if page_info.defined_in:
parts.append('\n\n')
parts.append(str(page_info.defined_in))
parts.append(page_info.guides)
parts.append(page_info.doc.docstring)
parts.append(_build_function_details(page_info.doc.function_details))
parts.append(_build_compatibility(page_info.doc.compatibility))
return ''.join(parts) | [
"def",
"_build_function_page",
"(",
"page_info",
")",
":",
"parts",
"=",
"[",
"_Metadata",
"(",
"page_info",
".",
"full_name",
")",
".",
"build_html",
"(",
")",
"]",
"parts",
".",
"append",
"(",
"'# %s\\n\\n'",
"%",
"page_info",
".",
"full_name",
")",
"if",
"len",
"(",
"page_info",
".",
"aliases",
")",
">",
"1",
":",
"parts",
".",
"append",
"(",
"'### Aliases:\\n\\n'",
")",
"parts",
".",
"extend",
"(",
"'* `%s`\\n'",
"%",
"name",
"for",
"name",
"in",
"page_info",
".",
"aliases",
")",
"parts",
".",
"append",
"(",
"'\\n'",
")",
"if",
"page_info",
".",
"signature",
"is",
"not",
"None",
":",
"parts",
".",
"append",
"(",
"_build_signature",
"(",
"page_info",
")",
")",
"if",
"page_info",
".",
"defined_in",
":",
"parts",
".",
"append",
"(",
"'\\n\\n'",
")",
"parts",
".",
"append",
"(",
"str",
"(",
"page_info",
".",
"defined_in",
")",
")",
"parts",
".",
"append",
"(",
"page_info",
".",
"guides",
")",
"parts",
".",
"append",
"(",
"page_info",
".",
"doc",
".",
"docstring",
")",
"parts",
".",
"append",
"(",
"_build_function_details",
"(",
"page_info",
".",
"doc",
".",
"function_details",
")",
")",
"parts",
".",
"append",
"(",
"_build_compatibility",
"(",
"page_info",
".",
"doc",
".",
"compatibility",
")",
")",
"return",
"''",
".",
"join",
"(",
"parts",
")"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/tools/docs/pretty_docs.py#L59-L81 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/numpy/py2/numpy/distutils/misc_util.py | python | Configuration.make_config_py | (self,name='__config__') | Generate package __config__.py file containing system_info
information used during building the package.
This file is installed to the
package installation directory. | Generate package __config__.py file containing system_info
information used during building the package. | [
"Generate",
"package",
"__config__",
".",
"py",
"file",
"containing",
"system_info",
"information",
"used",
"during",
"building",
"the",
"package",
"."
] | def make_config_py(self,name='__config__'):
"""Generate package __config__.py file containing system_info
information used during building the package.
This file is installed to the
package installation directory.
"""
self.py_modules.append((self.name, name, generate_config_py)) | [
"def",
"make_config_py",
"(",
"self",
",",
"name",
"=",
"'__config__'",
")",
":",
"self",
".",
"py_modules",
".",
"append",
"(",
"(",
"self",
".",
"name",
",",
"name",
",",
"generate_config_py",
")",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/numpy/py2/numpy/distutils/misc_util.py#L2055-L2063 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/pandas/py3/pandas/core/frame.py | python | DataFrame.itertuples | (
self, index: bool = True, name: str | None = "Pandas"
) | return zip(*arrays) | Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str or None, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Returns
-------
iterator
An object to iterate over namedtuples for each row in the
DataFrame with the first field possibly being the index and
following fields being the column values.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
On python versions < 3.7 regular tuples are returned for DataFrames
with a large number of columns (>254).
Examples
--------
>>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='dog', num_legs=4, num_wings=0)
Pandas(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
Pandas(num_legs=4, num_wings=0)
Pandas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2) | Iterate over DataFrame rows as namedtuples. | [
"Iterate",
"over",
"DataFrame",
"rows",
"as",
"namedtuples",
"."
] | def itertuples(
self, index: bool = True, name: str | None = "Pandas"
) -> Iterable[tuple[Any, ...]]:
"""
Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str or None, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Returns
-------
iterator
An object to iterate over namedtuples for each row in the
DataFrame with the first field possibly being the index and
following fields being the column values.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
On python versions < 3.7 regular tuples are returned for DataFrames
with a large number of columns (>254).
Examples
--------
>>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='dog', num_legs=4, num_wings=0)
Pandas(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
Pandas(num_legs=4, num_wings=0)
Pandas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2)
"""
arrays = []
fields = list(self.columns)
if index:
arrays.append(self.index)
fields.insert(0, "Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
if name is not None:
# https://github.com/python/mypy/issues/9046
# error: namedtuple() expects a string literal as the first argument
itertuple = collections.namedtuple( # type: ignore[misc]
name, fields, rename=True
)
return map(itertuple._make, zip(*arrays))
# fallback to regular tuples
return zip(*arrays) | [
"def",
"itertuples",
"(",
"self",
",",
"index",
":",
"bool",
"=",
"True",
",",
"name",
":",
"str",
"|",
"None",
"=",
"\"Pandas\"",
")",
"->",
"Iterable",
"[",
"tuple",
"[",
"Any",
",",
"...",
"]",
"]",
":",
"arrays",
"=",
"[",
"]",
"fields",
"=",
"list",
"(",
"self",
".",
"columns",
")",
"if",
"index",
":",
"arrays",
".",
"append",
"(",
"self",
".",
"index",
")",
"fields",
".",
"insert",
"(",
"0",
",",
"\"Index\"",
")",
"# use integer indexing because of possible duplicate column names",
"arrays",
".",
"extend",
"(",
"self",
".",
"iloc",
"[",
":",
",",
"k",
"]",
"for",
"k",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"columns",
")",
")",
")",
"if",
"name",
"is",
"not",
"None",
":",
"# https://github.com/python/mypy/issues/9046",
"# error: namedtuple() expects a string literal as the first argument",
"itertuple",
"=",
"collections",
".",
"namedtuple",
"(",
"# type: ignore[misc]",
"name",
",",
"fields",
",",
"rename",
"=",
"True",
")",
"return",
"map",
"(",
"itertuple",
".",
"_make",
",",
"zip",
"(",
"*",
"arrays",
")",
")",
"# fallback to regular tuples",
"return",
"zip",
"(",
"*",
"arrays",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py3/pandas/core/frame.py#L1266-L1350 |
|
SoarGroup/Soar | a1c5e249499137a27da60533c72969eef3b8ab6b | scons/scons-local-4.1.0/SCons/Tool/tex.py | python | tex_pdf_emitter | (target, source, env) | return (target, source) | An emitter for TeX and LaTeX sources when
executing pdftex or pdflatex. It will accept graphics
files of types .pdf, .jpg, .png, .gif, and .tif | An emitter for TeX and LaTeX sources when
executing pdftex or pdflatex. It will accept graphics
files of types .pdf, .jpg, .png, .gif, and .tif | [
"An",
"emitter",
"for",
"TeX",
"and",
"LaTeX",
"sources",
"when",
"executing",
"pdftex",
"or",
"pdflatex",
".",
"It",
"will",
"accept",
"graphics",
"files",
"of",
"types",
".",
"pdf",
".",
"jpg",
".",
"png",
".",
"gif",
"and",
".",
"tif"
] | def tex_pdf_emitter(target, source, env):
"""An emitter for TeX and LaTeX sources when
executing pdftex or pdflatex. It will accept graphics
files of types .pdf, .jpg, .png, .gif, and .tif
"""
(target, source) = tex_emitter_core(target, source, env, LatexGraphics)
return (target, source) | [
"def",
"tex_pdf_emitter",
"(",
"target",
",",
"source",
",",
"env",
")",
":",
"(",
"target",
",",
"source",
")",
"=",
"tex_emitter_core",
"(",
"target",
",",
"source",
",",
"env",
",",
"LatexGraphics",
")",
"return",
"(",
"target",
",",
"source",
")"
] | https://github.com/SoarGroup/Soar/blob/a1c5e249499137a27da60533c72969eef3b8ab6b/scons/scons-local-4.1.0/SCons/Tool/tex.py#L617-L624 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/botocore/retries/standard.py | python | ExponentialBackoff.delay_amount | (self, context) | return min(
self._random() * (self._base ** (context.attempt_number - 1)),
self._max_backoff
) | Calculates delay based on exponential backoff.
This class implements truncated binary exponential backoff
with jitter::
t_i = min(rand(0, 1) * 2 ** attempt, MAX_BACKOFF)
where ``i`` is the request attempt (0 based). | Calculates delay based on exponential backoff. | [
"Calculates",
"delay",
"based",
"on",
"exponential",
"backoff",
"."
] | def delay_amount(self, context):
"""Calculates delay based on exponential backoff.
This class implements truncated binary exponential backoff
with jitter::
t_i = min(rand(0, 1) * 2 ** attempt, MAX_BACKOFF)
where ``i`` is the request attempt (0 based).
"""
# The context.attempt_number is a 1-based value, but we have
# to calculate the delay based on i based a 0-based value. We
# want the first delay to just be ``rand(0, 1)``.
return min(
self._random() * (self._base ** (context.attempt_number - 1)),
self._max_backoff
) | [
"def",
"delay_amount",
"(",
"self",
",",
"context",
")",
":",
"# The context.attempt_number is a 1-based value, but we have",
"# to calculate the delay based on i based a 0-based value. We",
"# want the first delay to just be ``rand(0, 1)``.",
"return",
"min",
"(",
"self",
".",
"_random",
"(",
")",
"*",
"(",
"self",
".",
"_base",
"**",
"(",
"context",
".",
"attempt_number",
"-",
"1",
")",
")",
",",
"self",
".",
"_max_backoff",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/botocore/retries/standard.py#L240-L257 |
|
yuxng/PoseCNN | 9f3dd7b7bce21dcafc05e8f18ccc90da3caabd04 | lib/datasets/ycb_single.py | python | ycb_single.depth_path_at | (self, i) | return self.depth_path_from_index(self.image_index[i]) | Return the absolute path to depth i in the image sequence. | Return the absolute path to depth i in the image sequence. | [
"Return",
"the",
"absolute",
"path",
"to",
"depth",
"i",
"in",
"the",
"image",
"sequence",
"."
] | def depth_path_at(self, i):
"""
Return the absolute path to depth i in the image sequence.
"""
return self.depth_path_from_index(self.image_index[i]) | [
"def",
"depth_path_at",
"(",
"self",
",",
"i",
")",
":",
"return",
"self",
".",
"depth_path_from_index",
"(",
"self",
".",
"image_index",
"[",
"i",
"]",
")"
] | https://github.com/yuxng/PoseCNN/blob/9f3dd7b7bce21dcafc05e8f18ccc90da3caabd04/lib/datasets/ycb_single.py#L90-L94 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/Jinja2/py3/jinja2/utils.py | python | Cycler.next | (self) | return rv | Return the current item, then advance :attr:`current` to the
next item. | Return the current item, then advance :attr:`current` to the
next item. | [
"Return",
"the",
"current",
"item",
"then",
"advance",
":",
"attr",
":",
"current",
"to",
"the",
"next",
"item",
"."
] | def next(self) -> t.Any:
"""Return the current item, then advance :attr:`current` to the
next item.
"""
rv = self.current
self.pos = (self.pos + 1) % len(self.items)
return rv | [
"def",
"next",
"(",
"self",
")",
"->",
"t",
".",
"Any",
":",
"rv",
"=",
"self",
".",
"current",
"self",
".",
"pos",
"=",
"(",
"self",
".",
"pos",
"+",
"1",
")",
"%",
"len",
"(",
"self",
".",
"items",
")",
"return",
"rv"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/Jinja2/py3/jinja2/utils.py#L787-L793 |
|
microsoft/ivy | 9f3c7ecc0b2383129fdd0953e10890d98d09a82d | ivy/ivy_transrel.py | python | conjoin | (clauses1,clauses2,annot_op=None) | return and_clauses(clauses1,rename_distinct(clauses2,clauses1),annot_op=annot_op) | Conjoin clause sets, taking into account skolems | Conjoin clause sets, taking into account skolems | [
"Conjoin",
"clause",
"sets",
"taking",
"into",
"account",
"skolems"
] | def conjoin(clauses1,clauses2,annot_op=None):
""" Conjoin clause sets, taking into account skolems """
return and_clauses(clauses1,rename_distinct(clauses2,clauses1),annot_op=annot_op) | [
"def",
"conjoin",
"(",
"clauses1",
",",
"clauses2",
",",
"annot_op",
"=",
"None",
")",
":",
"return",
"and_clauses",
"(",
"clauses1",
",",
"rename_distinct",
"(",
"clauses2",
",",
"clauses1",
")",
",",
"annot_op",
"=",
"annot_op",
")"
] | https://github.com/microsoft/ivy/blob/9f3c7ecc0b2383129fdd0953e10890d98d09a82d/ivy/ivy_transrel.py#L356-L358 |
|
deepmind/open_spiel | 4ca53bea32bb2875c7385d215424048ae92f78c8 | open_spiel/python/mfg/games/linear_quadratic.py | python | MFGLinearQuadraticState.rewards | (self) | return [self._rewards()] | Rewards for all players. | Rewards for all players. | [
"Rewards",
"for",
"all",
"players",
"."
] | def rewards(self) -> List[float]:
"""Rewards for all players."""
# For now, only single-population (single-player) mean field games
# are supported.
return [self._rewards()] | [
"def",
"rewards",
"(",
"self",
")",
"->",
"List",
"[",
"float",
"]",
":",
"# For now, only single-population (single-player) mean field games",
"# are supported.",
"return",
"[",
"self",
".",
"_rewards",
"(",
")",
"]"
] | https://github.com/deepmind/open_spiel/blob/4ca53bea32bb2875c7385d215424048ae92f78c8/open_spiel/python/mfg/games/linear_quadratic.py#L331-L335 |
|
devsisters/libquic | 8954789a056d8e7d5fcb6452fd1572ca57eb5c4e | cpp.py | python | CPP_to_Python | (s) | return s | Converts a C pre-processor expression into an equivalent
Python expression that can be evaluated. | Converts a C pre-processor expression into an equivalent
Python expression that can be evaluated. | [
"Converts",
"a",
"C",
"pre",
"-",
"processor",
"expression",
"into",
"an",
"equivalent",
"Python",
"expression",
"that",
"can",
"be",
"evaluated",
"."
] | def CPP_to_Python(s):
"""
Converts a C pre-processor expression into an equivalent
Python expression that can be evaluated.
"""
s = CPP_to_Python_Ops_Expression.sub(CPP_to_Python_Ops_Sub, s)
for expr, repl in CPP_to_Python_Eval_List:
s = expr.sub(repl, s)
return s | [
"def",
"CPP_to_Python",
"(",
"s",
")",
":",
"s",
"=",
"CPP_to_Python_Ops_Expression",
".",
"sub",
"(",
"CPP_to_Python_Ops_Sub",
",",
"s",
")",
"for",
"expr",
",",
"repl",
"in",
"CPP_to_Python_Eval_List",
":",
"s",
"=",
"expr",
".",
"sub",
"(",
"repl",
",",
"s",
")",
"return",
"s"
] | https://github.com/devsisters/libquic/blob/8954789a056d8e7d5fcb6452fd1572ca57eb5c4e/cpp.py#L157-L165 |
|
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | qt/python/mantidqtinterfaces/mantidqtinterfaces/HFIR_4Circle_Reduction/hfctables.py | python | UBMatrixPeakTable.setup | (self) | return | Init setup
:return: | Init setup
:return: | [
"Init",
"setup",
":",
"return",
":"
] | def setup(self):
"""
Init setup
:return:
"""
self.init_setup(UBMatrixPeakTable.UB_Peak_Table_Setup)
self.set_status_column_name('Selected')
# define all the _colIndex
self._colIndexScan = self._myColumnNameList.index('Scan')
self._colIndexSpiceHKL = self._myColumnNameList.index('Spice HKL')
self._colIndexCalculatedHKL = self._myColumnNameList.index('Calculated HKL')
self._colIndexQSample = self._myColumnNameList.index('Q-Sample')
self._colIndexWavelength = self._myColumnNameList.index('Wavelength')
self._colIndexError = self._myColumnNameList.index('Error')
# set up the width of some columns
self.setColumnWidth(self._colIndexSpiceHKL, 240)
self.setColumnWidth(self._colIndexCalculatedHKL, 240)
self.setColumnWidth(4, 240)
return | [
"def",
"setup",
"(",
"self",
")",
":",
"self",
".",
"init_setup",
"(",
"UBMatrixPeakTable",
".",
"UB_Peak_Table_Setup",
")",
"self",
".",
"set_status_column_name",
"(",
"'Selected'",
")",
"# define all the _colIndex",
"self",
".",
"_colIndexScan",
"=",
"self",
".",
"_myColumnNameList",
".",
"index",
"(",
"'Scan'",
")",
"self",
".",
"_colIndexSpiceHKL",
"=",
"self",
".",
"_myColumnNameList",
".",
"index",
"(",
"'Spice HKL'",
")",
"self",
".",
"_colIndexCalculatedHKL",
"=",
"self",
".",
"_myColumnNameList",
".",
"index",
"(",
"'Calculated HKL'",
")",
"self",
".",
"_colIndexQSample",
"=",
"self",
".",
"_myColumnNameList",
".",
"index",
"(",
"'Q-Sample'",
")",
"self",
".",
"_colIndexWavelength",
"=",
"self",
".",
"_myColumnNameList",
".",
"index",
"(",
"'Wavelength'",
")",
"self",
".",
"_colIndexError",
"=",
"self",
".",
"_myColumnNameList",
".",
"index",
"(",
"'Error'",
")",
"# set up the width of some columns",
"self",
".",
"setColumnWidth",
"(",
"self",
".",
"_colIndexSpiceHKL",
",",
"240",
")",
"self",
".",
"setColumnWidth",
"(",
"self",
".",
"_colIndexCalculatedHKL",
",",
"240",
")",
"self",
".",
"setColumnWidth",
"(",
"4",
",",
"240",
")",
"return"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqtinterfaces/mantidqtinterfaces/HFIR_4Circle_Reduction/hfctables.py#L1829-L1850 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/protobuf/py3/google/protobuf/internal/well_known_types.py | python | _StrConvert | (value) | return value | Converts value to str if it is not. | Converts value to str if it is not. | [
"Converts",
"value",
"to",
"str",
"if",
"it",
"is",
"not",
"."
] | def _StrConvert(value):
"""Converts value to str if it is not."""
# This file is imported by c extension and some methods like ClearField
# requires string for the field name. py2/py3 has different text
# type and may use unicode.
if not isinstance(value, str):
return value.encode('utf-8')
return value | [
"def",
"_StrConvert",
"(",
"value",
")",
":",
"# This file is imported by c extension and some methods like ClearField",
"# requires string for the field name. py2/py3 has different text",
"# type and may use unicode.",
"if",
"not",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"return",
"value",
".",
"encode",
"(",
"'utf-8'",
")",
"return",
"value"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/protobuf/py3/google/protobuf/internal/well_known_types.py#L654-L661 |
|
envoyproxy/envoy | 65541accdafe255e72310b4298d646e091da2d80 | contrib/kafka/filters/network/source/serialization/launcher.py | python | main | () | Serialization composite code generator
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Generates main source code files for composite deserializers.
The files are generated, as they are extremely repetitive (composite deserializer for 0..9
sub-deserializers).
Usage:
launcher.py LOCATION_OF_OUTPUT_FILE
where:
LOCATION_OF_OUTPUT_FILE : location of 'serialization_composite.h'.
Creates 'serialization_composite.h' - header with declarations of
CompositeDeserializerWith???Delegates classes.
Template used: 'serialization_composite_h.j2'. | Serialization composite code generator
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Generates main source code files for composite deserializers.
The files are generated, as they are extremely repetitive (composite deserializer for 0..9
sub-deserializers). | [
"Serialization",
"composite",
"code",
"generator",
"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~",
"Generates",
"main",
"source",
"code",
"files",
"for",
"composite",
"deserializers",
".",
"The",
"files",
"are",
"generated",
"as",
"they",
"are",
"extremely",
"repetitive",
"(",
"composite",
"deserializer",
"for",
"0",
"..",
"9",
"sub",
"-",
"deserializers",
")",
"."
] | def main():
"""
Serialization composite code generator
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Generates main source code files for composite deserializers.
The files are generated, as they are extremely repetitive (composite deserializer for 0..9
sub-deserializers).
Usage:
launcher.py LOCATION_OF_OUTPUT_FILE
where:
LOCATION_OF_OUTPUT_FILE : location of 'serialization_composite.h'.
Creates 'serialization_composite.h' - header with declarations of
CompositeDeserializerWith???Delegates classes.
Template used: 'serialization_composite_h.j2'.
"""
serialization_composite_h_file = os.path.abspath(sys.argv[1])
generator.generate_main_code(serialization_composite_h_file) | [
"def",
"main",
"(",
")",
":",
"serialization_composite_h_file",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"sys",
".",
"argv",
"[",
"1",
"]",
")",
"generator",
".",
"generate_main_code",
"(",
"serialization_composite_h_file",
")"
] | https://github.com/envoyproxy/envoy/blob/65541accdafe255e72310b4298d646e091da2d80/contrib/kafka/filters/network/source/serialization/launcher.py#L10-L29 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_misc.py | python | TimeSpan_Millisecond | (*args) | return _misc_.TimeSpan_Millisecond(*args) | TimeSpan_Millisecond() -> TimeSpan | TimeSpan_Millisecond() -> TimeSpan | [
"TimeSpan_Millisecond",
"()",
"-",
">",
"TimeSpan"
] | def TimeSpan_Millisecond(*args):
"""TimeSpan_Millisecond() -> TimeSpan"""
return _misc_.TimeSpan_Millisecond(*args) | [
"def",
"TimeSpan_Millisecond",
"(",
"*",
"args",
")",
":",
"return",
"_misc_",
".",
"TimeSpan_Millisecond",
"(",
"*",
"args",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_misc.py#L4560-L4562 |
|
baidu/unit-uskit | ee283f3be42a1dadaef80751d4ed4358ee83c2e8 | conf/us/demo/conf_generator.py | python | generate_rank_conf | () | Generate rank.conf | Generate rank.conf | [
"Generate",
"rank",
".",
"conf"
] | def generate_rank_conf():
"""
Generate rank.conf
"""
print('generating rank conf...')
with open('./conf_templates/rank.conf.template') as fin, open('rank.conf', 'w') as fout:
skill_rank = '\n'.join([' order: "{}"'.format(x) for x in options['skill_rank']])
template = ConfTemplate(fin.read())
print(template.substitute({'skill_rank' : skill_rank}), file=fout) | [
"def",
"generate_rank_conf",
"(",
")",
":",
"print",
"(",
"'generating rank conf...'",
")",
"with",
"open",
"(",
"'./conf_templates/rank.conf.template'",
")",
"as",
"fin",
",",
"open",
"(",
"'rank.conf'",
",",
"'w'",
")",
"as",
"fout",
":",
"skill_rank",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"' order: \"{}\"'",
".",
"format",
"(",
"x",
")",
"for",
"x",
"in",
"options",
"[",
"'skill_rank'",
"]",
"]",
")",
"template",
"=",
"ConfTemplate",
"(",
"fin",
".",
"read",
"(",
")",
")",
"print",
"(",
"template",
".",
"substitute",
"(",
"{",
"'skill_rank'",
":",
"skill_rank",
"}",
")",
",",
"file",
"=",
"fout",
")"
] | https://github.com/baidu/unit-uskit/blob/ee283f3be42a1dadaef80751d4ed4358ee83c2e8/conf/us/demo/conf_generator.py#L107-L115 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/py3/scipy/interpolate/interpolate.py | python | interp2d.__call__ | (self, x, y, dx=0, dy=0, assume_sorted=False) | return array(z) | Interpolate the function.
Parameters
----------
x : 1D array
x-coordinates of the mesh on which to interpolate.
y : 1D array
y-coordinates of the mesh on which to interpolate.
dx : int >= 0, < kx
Order of partial derivatives in x.
dy : int >= 0, < ky
Order of partial derivatives in y.
assume_sorted : bool, optional
If False, values of `x` and `y` can be in any order and they are
sorted first.
If True, `x` and `y` have to be arrays of monotonically
increasing values.
Returns
-------
z : 2D array with shape (len(y), len(x))
The interpolated values. | Interpolate the function. | [
"Interpolate",
"the",
"function",
"."
] | def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
"""Interpolate the function.
Parameters
----------
x : 1D array
x-coordinates of the mesh on which to interpolate.
y : 1D array
y-coordinates of the mesh on which to interpolate.
dx : int >= 0, < kx
Order of partial derivatives in x.
dy : int >= 0, < ky
Order of partial derivatives in y.
assume_sorted : bool, optional
If False, values of `x` and `y` can be in any order and they are
sorted first.
If True, `x` and `y` have to be arrays of monotonically
increasing values.
Returns
-------
z : 2D array with shape (len(y), len(x))
The interpolated values.
"""
x = atleast_1d(x)
y = atleast_1d(y)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should both be 1-D arrays")
if not assume_sorted:
x = np.sort(x)
y = np.sort(y)
if self.bounds_error or self.fill_value is not None:
out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
any_out_of_bounds_x = np.any(out_of_bounds_x)
any_out_of_bounds_y = np.any(out_of_bounds_y)
if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
raise ValueError("Values out of range; x must be in %r, y in %r"
% ((self.x_min, self.x_max),
(self.y_min, self.y_max)))
z = fitpack.bisplev(x, y, self.tck, dx, dy)
z = atleast_2d(z)
z = transpose(z)
if self.fill_value is not None:
if any_out_of_bounds_x:
z[:, out_of_bounds_x] = self.fill_value
if any_out_of_bounds_y:
z[out_of_bounds_y, :] = self.fill_value
if len(z) == 1:
z = z[0]
return array(z) | [
"def",
"__call__",
"(",
"self",
",",
"x",
",",
"y",
",",
"dx",
"=",
"0",
",",
"dy",
"=",
"0",
",",
"assume_sorted",
"=",
"False",
")",
":",
"x",
"=",
"atleast_1d",
"(",
"x",
")",
"y",
"=",
"atleast_1d",
"(",
"y",
")",
"if",
"x",
".",
"ndim",
"!=",
"1",
"or",
"y",
".",
"ndim",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"x and y should both be 1-D arrays\"",
")",
"if",
"not",
"assume_sorted",
":",
"x",
"=",
"np",
".",
"sort",
"(",
"x",
")",
"y",
"=",
"np",
".",
"sort",
"(",
"y",
")",
"if",
"self",
".",
"bounds_error",
"or",
"self",
".",
"fill_value",
"is",
"not",
"None",
":",
"out_of_bounds_x",
"=",
"(",
"x",
"<",
"self",
".",
"x_min",
")",
"|",
"(",
"x",
">",
"self",
".",
"x_max",
")",
"out_of_bounds_y",
"=",
"(",
"y",
"<",
"self",
".",
"y_min",
")",
"|",
"(",
"y",
">",
"self",
".",
"y_max",
")",
"any_out_of_bounds_x",
"=",
"np",
".",
"any",
"(",
"out_of_bounds_x",
")",
"any_out_of_bounds_y",
"=",
"np",
".",
"any",
"(",
"out_of_bounds_y",
")",
"if",
"self",
".",
"bounds_error",
"and",
"(",
"any_out_of_bounds_x",
"or",
"any_out_of_bounds_y",
")",
":",
"raise",
"ValueError",
"(",
"\"Values out of range; x must be in %r, y in %r\"",
"%",
"(",
"(",
"self",
".",
"x_min",
",",
"self",
".",
"x_max",
")",
",",
"(",
"self",
".",
"y_min",
",",
"self",
".",
"y_max",
")",
")",
")",
"z",
"=",
"fitpack",
".",
"bisplev",
"(",
"x",
",",
"y",
",",
"self",
".",
"tck",
",",
"dx",
",",
"dy",
")",
"z",
"=",
"atleast_2d",
"(",
"z",
")",
"z",
"=",
"transpose",
"(",
"z",
")",
"if",
"self",
".",
"fill_value",
"is",
"not",
"None",
":",
"if",
"any_out_of_bounds_x",
":",
"z",
"[",
":",
",",
"out_of_bounds_x",
"]",
"=",
"self",
".",
"fill_value",
"if",
"any_out_of_bounds_y",
":",
"z",
"[",
"out_of_bounds_y",
",",
":",
"]",
"=",
"self",
".",
"fill_value",
"if",
"len",
"(",
"z",
")",
"==",
"1",
":",
"z",
"=",
"z",
"[",
"0",
"]",
"return",
"array",
"(",
"z",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py3/scipy/interpolate/interpolate.py#L260-L319 |
|
apache/madlib | be297fe6beada0640f93317e8948834032718e32 | src/madpack/upgrade_util.py | python | ChangeHandler._add_to_dict | (cls, src_dict, dest_dict) | Update dictionary with contents of another dictionary
This function performs the same function as dict.update except it adds
to an existing value (instead of replacing it) if the value is an
Iterable. | Update dictionary with contents of another dictionary | [
"Update",
"dictionary",
"with",
"contents",
"of",
"another",
"dictionary"
] | def _add_to_dict(cls, src_dict, dest_dict):
""" Update dictionary with contents of another dictionary
This function performs the same function as dict.update except it adds
to an existing value (instead of replacing it) if the value is an
Iterable.
"""
if src_dict:
for k, v in src_dict.items():
if k in dest_dict:
if (isinstance(dest_dict[k], Iterable) and isinstance(v, Iterable)):
dest_dict[k] += v
elif isinstance(dest_dict[k], Iterable):
dest_dict[k].append(v)
else:
dest_dict[k] = v
else:
dest_dict[k] = v
return dest_dict | [
"def",
"_add_to_dict",
"(",
"cls",
",",
"src_dict",
",",
"dest_dict",
")",
":",
"if",
"src_dict",
":",
"for",
"k",
",",
"v",
"in",
"src_dict",
".",
"items",
"(",
")",
":",
"if",
"k",
"in",
"dest_dict",
":",
"if",
"(",
"isinstance",
"(",
"dest_dict",
"[",
"k",
"]",
",",
"Iterable",
")",
"and",
"isinstance",
"(",
"v",
",",
"Iterable",
")",
")",
":",
"dest_dict",
"[",
"k",
"]",
"+=",
"v",
"elif",
"isinstance",
"(",
"dest_dict",
"[",
"k",
"]",
",",
"Iterable",
")",
":",
"dest_dict",
"[",
"k",
"]",
".",
"append",
"(",
"v",
")",
"else",
":",
"dest_dict",
"[",
"k",
"]",
"=",
"v",
"else",
":",
"dest_dict",
"[",
"k",
"]",
"=",
"v",
"return",
"dest_dict"
] | https://github.com/apache/madlib/blob/be297fe6beada0640f93317e8948834032718e32/src/madpack/upgrade_util.py#L190-L208 |
||
fatih/subvim | 241b6d170597857105da219c9b7d36059e9f11fb | vim/base/YouCompleteMe/third_party/jedi/jedi/refactoring.py | python | Refactoring.__init__ | (self, change_dct) | :param change_dct: dict(old_path=(new_path, old_lines, new_lines)) | :param change_dct: dict(old_path=(new_path, old_lines, new_lines)) | [
":",
"param",
"change_dct",
":",
"dict",
"(",
"old_path",
"=",
"(",
"new_path",
"old_lines",
"new_lines",
"))"
] | def __init__(self, change_dct):
"""
:param change_dct: dict(old_path=(new_path, old_lines, new_lines))
"""
self.change_dct = change_dct | [
"def",
"__init__",
"(",
"self",
",",
"change_dct",
")",
":",
"self",
".",
"change_dct",
"=",
"change_dct"
] | https://github.com/fatih/subvim/blob/241b6d170597857105da219c9b7d36059e9f11fb/vim/base/YouCompleteMe/third_party/jedi/jedi/refactoring.py#L26-L30 |
||
microsoft/clang | 86d4513d3e0daa4d5a29b0b1de7c854ca15f9fe5 | bindings/python/clang/cindex.py | python | File.time | (self) | return conf.lib.clang_getFileTime(self) | Return the last modification time of the file. | Return the last modification time of the file. | [
"Return",
"the",
"last",
"modification",
"time",
"of",
"the",
"file",
"."
] | def time(self):
"""Return the last modification time of the file."""
return conf.lib.clang_getFileTime(self) | [
"def",
"time",
"(",
"self",
")",
":",
"return",
"conf",
".",
"lib",
".",
"clang_getFileTime",
"(",
"self",
")"
] | https://github.com/microsoft/clang/blob/86d4513d3e0daa4d5a29b0b1de7c854ca15f9fe5/bindings/python/clang/cindex.py#L3089-L3091 |
|
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/mapreduce/mapreduce/model.py | python | TransientShardState.from_request | (cls, request) | return cls(mapreduce_spec.params["base_path"],
mapreduce_spec,
str(request.get("shard_id")),
int(request.get("slice_id")),
input_reader,
initial_input_reader,
output_writer=output_writer,
retries=int(request.get("retries")),
handler=handler) | Create new TransientShardState from webapp request. | Create new TransientShardState from webapp request. | [
"Create",
"new",
"TransientShardState",
"from",
"webapp",
"request",
"."
] | def from_request(cls, request):
"""Create new TransientShardState from webapp request."""
mapreduce_spec = MapreduceSpec.from_json_str(request.get("mapreduce_spec"))
mapper_spec = mapreduce_spec.mapper
input_reader_spec_dict = json.loads(request.get("input_reader_state"),
cls=json_util.JsonDecoder)
input_reader = mapper_spec.input_reader_class().from_json(
input_reader_spec_dict)
initial_input_reader_spec_dict = json.loads(
request.get("initial_input_reader_state"), cls=json_util.JsonDecoder)
initial_input_reader = mapper_spec.input_reader_class().from_json(
initial_input_reader_spec_dict)
output_writer = None
if mapper_spec.output_writer_class():
output_writer = mapper_spec.output_writer_class().from_json(
json.loads(request.get("output_writer_state", "{}"),
cls=json_util.JsonDecoder))
assert isinstance(output_writer, mapper_spec.output_writer_class()), (
"%s.from_json returned an instance of wrong class: %s" % (
mapper_spec.output_writer_class(),
output_writer.__class__))
handler = util.try_deserialize_handler(request.get("serialized_handler"))
if not handler:
handler = mapreduce_spec.mapper.handler
return cls(mapreduce_spec.params["base_path"],
mapreduce_spec,
str(request.get("shard_id")),
int(request.get("slice_id")),
input_reader,
initial_input_reader,
output_writer=output_writer,
retries=int(request.get("retries")),
handler=handler) | [
"def",
"from_request",
"(",
"cls",
",",
"request",
")",
":",
"mapreduce_spec",
"=",
"MapreduceSpec",
".",
"from_json_str",
"(",
"request",
".",
"get",
"(",
"\"mapreduce_spec\"",
")",
")",
"mapper_spec",
"=",
"mapreduce_spec",
".",
"mapper",
"input_reader_spec_dict",
"=",
"json",
".",
"loads",
"(",
"request",
".",
"get",
"(",
"\"input_reader_state\"",
")",
",",
"cls",
"=",
"json_util",
".",
"JsonDecoder",
")",
"input_reader",
"=",
"mapper_spec",
".",
"input_reader_class",
"(",
")",
".",
"from_json",
"(",
"input_reader_spec_dict",
")",
"initial_input_reader_spec_dict",
"=",
"json",
".",
"loads",
"(",
"request",
".",
"get",
"(",
"\"initial_input_reader_state\"",
")",
",",
"cls",
"=",
"json_util",
".",
"JsonDecoder",
")",
"initial_input_reader",
"=",
"mapper_spec",
".",
"input_reader_class",
"(",
")",
".",
"from_json",
"(",
"initial_input_reader_spec_dict",
")",
"output_writer",
"=",
"None",
"if",
"mapper_spec",
".",
"output_writer_class",
"(",
")",
":",
"output_writer",
"=",
"mapper_spec",
".",
"output_writer_class",
"(",
")",
".",
"from_json",
"(",
"json",
".",
"loads",
"(",
"request",
".",
"get",
"(",
"\"output_writer_state\"",
",",
"\"{}\"",
")",
",",
"cls",
"=",
"json_util",
".",
"JsonDecoder",
")",
")",
"assert",
"isinstance",
"(",
"output_writer",
",",
"mapper_spec",
".",
"output_writer_class",
"(",
")",
")",
",",
"(",
"\"%s.from_json returned an instance of wrong class: %s\"",
"%",
"(",
"mapper_spec",
".",
"output_writer_class",
"(",
")",
",",
"output_writer",
".",
"__class__",
")",
")",
"handler",
"=",
"util",
".",
"try_deserialize_handler",
"(",
"request",
".",
"get",
"(",
"\"serialized_handler\"",
")",
")",
"if",
"not",
"handler",
":",
"handler",
"=",
"mapreduce_spec",
".",
"mapper",
".",
"handler",
"return",
"cls",
"(",
"mapreduce_spec",
".",
"params",
"[",
"\"base_path\"",
"]",
",",
"mapreduce_spec",
",",
"str",
"(",
"request",
".",
"get",
"(",
"\"shard_id\"",
")",
")",
",",
"int",
"(",
"request",
".",
"get",
"(",
"\"slice_id\"",
")",
")",
",",
"input_reader",
",",
"initial_input_reader",
",",
"output_writer",
"=",
"output_writer",
",",
"retries",
"=",
"int",
"(",
"request",
".",
"get",
"(",
"\"retries\"",
")",
")",
",",
"handler",
"=",
"handler",
")"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/mapreduce/mapreduce/model.py#L820-L855 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/requests/cookies.py | python | RequestsCookieJar.itervalues | (self) | Dict-like itervalues() that returns an iterator of values of cookies
from the jar.
.. seealso:: iterkeys() and iteritems(). | Dict-like itervalues() that returns an iterator of values of cookies
from the jar. | [
"Dict",
"-",
"like",
"itervalues",
"()",
"that",
"returns",
"an",
"iterator",
"of",
"values",
"of",
"cookies",
"from",
"the",
"jar",
"."
] | def itervalues(self):
"""Dict-like itervalues() that returns an iterator of values of cookies
from the jar.
.. seealso:: iterkeys() and iteritems().
"""
for cookie in iter(self):
yield cookie.value | [
"def",
"itervalues",
"(",
"self",
")",
":",
"for",
"cookie",
"in",
"iter",
"(",
"self",
")",
":",
"yield",
"cookie",
".",
"value"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/requests/cookies.py#L235-L242 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/numpy/py3/numpy/distutils/fcompiler/__init__.py | python | FCompiler.customize | (self, dist = None) | Customize Fortran compiler.
This method gets Fortran compiler specific information from
(i) class definition, (ii) environment, (iii) distutils config
files, and (iv) command line (later overrides earlier).
This method should be always called after constructing a
compiler instance. But not in __init__ because Distribution
instance is needed for (iii) and (iv). | Customize Fortran compiler. | [
"Customize",
"Fortran",
"compiler",
"."
] | def customize(self, dist = None):
"""Customize Fortran compiler.
This method gets Fortran compiler specific information from
(i) class definition, (ii) environment, (iii) distutils config
files, and (iv) command line (later overrides earlier).
This method should be always called after constructing a
compiler instance. But not in __init__ because Distribution
instance is needed for (iii) and (iv).
"""
log.info('customize %s' % (self.__class__.__name__))
self._is_customised = True
self.distutils_vars.use_distribution(dist)
self.command_vars.use_distribution(dist)
self.flag_vars.use_distribution(dist)
self.update_executables()
# find_executables takes care of setting the compiler commands,
# version_cmd, linker_so, linker_exe, ar, and ranlib
self.find_executables()
noopt = self.distutils_vars.get('noopt', False)
noarch = self.distutils_vars.get('noarch', noopt)
debug = self.distutils_vars.get('debug', False)
f77 = self.command_vars.compiler_f77
f90 = self.command_vars.compiler_f90
f77flags = []
f90flags = []
freeflags = []
fixflags = []
if f77:
f77 = _shell_utils.NativeParser.split(f77)
f77flags = self.flag_vars.f77
if f90:
f90 = _shell_utils.NativeParser.split(f90)
f90flags = self.flag_vars.f90
freeflags = self.flag_vars.free
# XXX Assuming that free format is default for f90 compiler.
fix = self.command_vars.compiler_fix
# NOTE: this and similar examples are probably just
# excluding --coverage flag when F90 = gfortran --coverage
# instead of putting that flag somewhere more appropriate
# this and similar examples where a Fortran compiler
# environment variable has been customized by CI or a user
# should perhaps eventually be more thoroughly tested and more
# robustly handled
if fix:
fix = _shell_utils.NativeParser.split(fix)
fixflags = self.flag_vars.fix + f90flags
oflags, aflags, dflags = [], [], []
# examine get_flags_<tag>_<compiler> for extra flags
# only add them if the method is different from get_flags_<tag>
def get_flags(tag, flags):
# note that self.flag_vars.<tag> calls self.get_flags_<tag>()
flags.extend(getattr(self.flag_vars, tag))
this_get = getattr(self, 'get_flags_' + tag)
for name, c, flagvar in [('f77', f77, f77flags),
('f90', f90, f90flags),
('f90', fix, fixflags)]:
t = '%s_%s' % (tag, name)
if c and this_get is not getattr(self, 'get_flags_' + t):
flagvar.extend(getattr(self.flag_vars, t))
if not noopt:
get_flags('opt', oflags)
if not noarch:
get_flags('arch', aflags)
if debug:
get_flags('debug', dflags)
fflags = self.flag_vars.flags + dflags + oflags + aflags
if f77:
self.set_commands(compiler_f77=f77+f77flags+fflags)
if f90:
self.set_commands(compiler_f90=f90+freeflags+f90flags+fflags)
if fix:
self.set_commands(compiler_fix=fix+fixflags+fflags)
#XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS
linker_so = self.linker_so
if linker_so:
linker_so_flags = self.flag_vars.linker_so
if sys.platform.startswith('aix'):
python_lib = get_python_lib(standard_lib=1)
ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix')
python_exp = os.path.join(python_lib, 'config', 'python.exp')
linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp]
self.set_commands(linker_so=linker_so+linker_so_flags)
linker_exe = self.linker_exe
if linker_exe:
linker_exe_flags = self.flag_vars.linker_exe
self.set_commands(linker_exe=linker_exe+linker_exe_flags)
ar = self.command_vars.archiver
if ar:
arflags = self.flag_vars.ar
self.set_commands(archiver=[ar]+arflags)
self.set_library_dirs(self.get_library_dirs())
self.set_libraries(self.get_libraries()) | [
"def",
"customize",
"(",
"self",
",",
"dist",
"=",
"None",
")",
":",
"log",
".",
"info",
"(",
"'customize %s'",
"%",
"(",
"self",
".",
"__class__",
".",
"__name__",
")",
")",
"self",
".",
"_is_customised",
"=",
"True",
"self",
".",
"distutils_vars",
".",
"use_distribution",
"(",
"dist",
")",
"self",
".",
"command_vars",
".",
"use_distribution",
"(",
"dist",
")",
"self",
".",
"flag_vars",
".",
"use_distribution",
"(",
"dist",
")",
"self",
".",
"update_executables",
"(",
")",
"# find_executables takes care of setting the compiler commands,",
"# version_cmd, linker_so, linker_exe, ar, and ranlib",
"self",
".",
"find_executables",
"(",
")",
"noopt",
"=",
"self",
".",
"distutils_vars",
".",
"get",
"(",
"'noopt'",
",",
"False",
")",
"noarch",
"=",
"self",
".",
"distutils_vars",
".",
"get",
"(",
"'noarch'",
",",
"noopt",
")",
"debug",
"=",
"self",
".",
"distutils_vars",
".",
"get",
"(",
"'debug'",
",",
"False",
")",
"f77",
"=",
"self",
".",
"command_vars",
".",
"compiler_f77",
"f90",
"=",
"self",
".",
"command_vars",
".",
"compiler_f90",
"f77flags",
"=",
"[",
"]",
"f90flags",
"=",
"[",
"]",
"freeflags",
"=",
"[",
"]",
"fixflags",
"=",
"[",
"]",
"if",
"f77",
":",
"f77",
"=",
"_shell_utils",
".",
"NativeParser",
".",
"split",
"(",
"f77",
")",
"f77flags",
"=",
"self",
".",
"flag_vars",
".",
"f77",
"if",
"f90",
":",
"f90",
"=",
"_shell_utils",
".",
"NativeParser",
".",
"split",
"(",
"f90",
")",
"f90flags",
"=",
"self",
".",
"flag_vars",
".",
"f90",
"freeflags",
"=",
"self",
".",
"flag_vars",
".",
"free",
"# XXX Assuming that free format is default for f90 compiler.",
"fix",
"=",
"self",
".",
"command_vars",
".",
"compiler_fix",
"# NOTE: this and similar examples are probably just",
"# excluding --coverage flag when F90 = gfortran --coverage",
"# instead of putting that flag somewhere more appropriate",
"# this and similar examples where a Fortran compiler",
"# environment variable has been customized by CI or a user",
"# should perhaps eventually be more thoroughly tested and more",
"# robustly handled",
"if",
"fix",
":",
"fix",
"=",
"_shell_utils",
".",
"NativeParser",
".",
"split",
"(",
"fix",
")",
"fixflags",
"=",
"self",
".",
"flag_vars",
".",
"fix",
"+",
"f90flags",
"oflags",
",",
"aflags",
",",
"dflags",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"# examine get_flags_<tag>_<compiler> for extra flags",
"# only add them if the method is different from get_flags_<tag>",
"def",
"get_flags",
"(",
"tag",
",",
"flags",
")",
":",
"# note that self.flag_vars.<tag> calls self.get_flags_<tag>()",
"flags",
".",
"extend",
"(",
"getattr",
"(",
"self",
".",
"flag_vars",
",",
"tag",
")",
")",
"this_get",
"=",
"getattr",
"(",
"self",
",",
"'get_flags_'",
"+",
"tag",
")",
"for",
"name",
",",
"c",
",",
"flagvar",
"in",
"[",
"(",
"'f77'",
",",
"f77",
",",
"f77flags",
")",
",",
"(",
"'f90'",
",",
"f90",
",",
"f90flags",
")",
",",
"(",
"'f90'",
",",
"fix",
",",
"fixflags",
")",
"]",
":",
"t",
"=",
"'%s_%s'",
"%",
"(",
"tag",
",",
"name",
")",
"if",
"c",
"and",
"this_get",
"is",
"not",
"getattr",
"(",
"self",
",",
"'get_flags_'",
"+",
"t",
")",
":",
"flagvar",
".",
"extend",
"(",
"getattr",
"(",
"self",
".",
"flag_vars",
",",
"t",
")",
")",
"if",
"not",
"noopt",
":",
"get_flags",
"(",
"'opt'",
",",
"oflags",
")",
"if",
"not",
"noarch",
":",
"get_flags",
"(",
"'arch'",
",",
"aflags",
")",
"if",
"debug",
":",
"get_flags",
"(",
"'debug'",
",",
"dflags",
")",
"fflags",
"=",
"self",
".",
"flag_vars",
".",
"flags",
"+",
"dflags",
"+",
"oflags",
"+",
"aflags",
"if",
"f77",
":",
"self",
".",
"set_commands",
"(",
"compiler_f77",
"=",
"f77",
"+",
"f77flags",
"+",
"fflags",
")",
"if",
"f90",
":",
"self",
".",
"set_commands",
"(",
"compiler_f90",
"=",
"f90",
"+",
"freeflags",
"+",
"f90flags",
"+",
"fflags",
")",
"if",
"fix",
":",
"self",
".",
"set_commands",
"(",
"compiler_fix",
"=",
"fix",
"+",
"fixflags",
"+",
"fflags",
")",
"#XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS",
"linker_so",
"=",
"self",
".",
"linker_so",
"if",
"linker_so",
":",
"linker_so_flags",
"=",
"self",
".",
"flag_vars",
".",
"linker_so",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'aix'",
")",
":",
"python_lib",
"=",
"get_python_lib",
"(",
"standard_lib",
"=",
"1",
")",
"ld_so_aix",
"=",
"os",
".",
"path",
".",
"join",
"(",
"python_lib",
",",
"'config'",
",",
"'ld_so_aix'",
")",
"python_exp",
"=",
"os",
".",
"path",
".",
"join",
"(",
"python_lib",
",",
"'config'",
",",
"'python.exp'",
")",
"linker_so",
"=",
"[",
"ld_so_aix",
"]",
"+",
"linker_so",
"+",
"[",
"'-bI:'",
"+",
"python_exp",
"]",
"self",
".",
"set_commands",
"(",
"linker_so",
"=",
"linker_so",
"+",
"linker_so_flags",
")",
"linker_exe",
"=",
"self",
".",
"linker_exe",
"if",
"linker_exe",
":",
"linker_exe_flags",
"=",
"self",
".",
"flag_vars",
".",
"linker_exe",
"self",
".",
"set_commands",
"(",
"linker_exe",
"=",
"linker_exe",
"+",
"linker_exe_flags",
")",
"ar",
"=",
"self",
".",
"command_vars",
".",
"archiver",
"if",
"ar",
":",
"arflags",
"=",
"self",
".",
"flag_vars",
".",
"ar",
"self",
".",
"set_commands",
"(",
"archiver",
"=",
"[",
"ar",
"]",
"+",
"arflags",
")",
"self",
".",
"set_library_dirs",
"(",
"self",
".",
"get_library_dirs",
"(",
")",
")",
"self",
".",
"set_libraries",
"(",
"self",
".",
"get_libraries",
"(",
")",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/numpy/py3/numpy/distutils/fcompiler/__init__.py#L434-L543 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/psutil/_pswindows.py | python | convert_dos_path | (s) | return os.path.join(driveletter, remainder) | r"""Convert paths using native DOS format like:
"\Device\HarddiskVolume1\Windows\systemew\file.txt"
into:
"C:\Windows\systemew\file.txt" | r"""Convert paths using native DOS format like:
"\Device\HarddiskVolume1\Windows\systemew\file.txt"
into:
"C:\Windows\systemew\file.txt" | [
"r",
"Convert",
"paths",
"using",
"native",
"DOS",
"format",
"like",
":",
"\\",
"Device",
"\\",
"HarddiskVolume1",
"\\",
"Windows",
"\\",
"systemew",
"\\",
"file",
".",
"txt",
"into",
":",
"C",
":",
"\\",
"Windows",
"\\",
"systemew",
"\\",
"file",
".",
"txt"
] | def convert_dos_path(s):
r"""Convert paths using native DOS format like:
"\Device\HarddiskVolume1\Windows\systemew\file.txt"
into:
"C:\Windows\systemew\file.txt"
"""
rawdrive = '\\'.join(s.split('\\')[:3])
driveletter = cext.win32_QueryDosDevice(rawdrive)
remainder = s[len(rawdrive):]
return os.path.join(driveletter, remainder) | [
"def",
"convert_dos_path",
"(",
"s",
")",
":",
"rawdrive",
"=",
"'\\\\'",
".",
"join",
"(",
"s",
".",
"split",
"(",
"'\\\\'",
")",
"[",
":",
"3",
"]",
")",
"driveletter",
"=",
"cext",
".",
"win32_QueryDosDevice",
"(",
"rawdrive",
")",
"remainder",
"=",
"s",
"[",
"len",
"(",
"rawdrive",
")",
":",
"]",
"return",
"os",
".",
"path",
".",
"join",
"(",
"driveletter",
",",
"remainder",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/psutil/_pswindows.py#L193-L202 |
|
apache/arrow | af33dd1157eb8d7d9bfac25ebf61445b793b7943 | python/benchmarks/common.py | python | BuiltinsGenerator.generate_decimal_list | (self, n, none_prob=DEFAULT_NONE_PROB,
use_nan=False) | return data | Generate a list of Python Decimals with *none_prob* probability of
an entry being None (or NaN if *use_nan* is true). | Generate a list of Python Decimals with *none_prob* probability of
an entry being None (or NaN if *use_nan* is true). | [
"Generate",
"a",
"list",
"of",
"Python",
"Decimals",
"with",
"*",
"none_prob",
"*",
"probability",
"of",
"an",
"entry",
"being",
"None",
"(",
"or",
"NaN",
"if",
"*",
"use_nan",
"*",
"is",
"true",
")",
"."
] | def generate_decimal_list(self, n, none_prob=DEFAULT_NONE_PROB,
use_nan=False):
"""
Generate a list of Python Decimals with *none_prob* probability of
an entry being None (or NaN if *use_nan* is true).
"""
data = [decimal.Decimal('%.9f' % f)
for f in self.rnd.uniform(0.0, 1.0, n)]
assert len(data) == n
self.sprinkle(data, none_prob,
value=decimal.Decimal('nan') if use_nan else None)
return data | [
"def",
"generate_decimal_list",
"(",
"self",
",",
"n",
",",
"none_prob",
"=",
"DEFAULT_NONE_PROB",
",",
"use_nan",
"=",
"False",
")",
":",
"data",
"=",
"[",
"decimal",
".",
"Decimal",
"(",
"'%.9f'",
"%",
"f",
")",
"for",
"f",
"in",
"self",
".",
"rnd",
".",
"uniform",
"(",
"0.0",
",",
"1.0",
",",
"n",
")",
"]",
"assert",
"len",
"(",
"data",
")",
"==",
"n",
"self",
".",
"sprinkle",
"(",
"data",
",",
"none_prob",
",",
"value",
"=",
"decimal",
".",
"Decimal",
"(",
"'nan'",
")",
"if",
"use_nan",
"else",
"None",
")",
"return",
"data"
] | https://github.com/apache/arrow/blob/af33dd1157eb8d7d9bfac25ebf61445b793b7943/python/benchmarks/common.py#L159-L170 |
|
OGRECave/ogre-next | 287307980e6de8910f04f3cc0994451b075071fd | Tools/BlenderExport/ogrepkg/gui.py | python | Widget.removeFromParent | (self) | return | Remove this widget from parent widget.
Remove a widget from its parent before deleting it. Overwrite
this to also remove all button actions separately with a call
to <code>self.parent._removeButtonAction()</code>. This is not
done in the destructor as Python's garbage collector does not
guarantee to delete objects. | Remove this widget from parent widget.
Remove a widget from its parent before deleting it. Overwrite
this to also remove all button actions separately with a call
to <code>self.parent._removeButtonAction()</code>. This is not
done in the destructor as Python's garbage collector does not
guarantee to delete objects. | [
"Remove",
"this",
"widget",
"from",
"parent",
"widget",
".",
"Remove",
"a",
"widget",
"from",
"its",
"parent",
"before",
"deleting",
"it",
".",
"Overwrite",
"this",
"to",
"also",
"remove",
"all",
"button",
"actions",
"separately",
"with",
"a",
"call",
"to",
"<code",
">",
"self",
".",
"parent",
".",
"_removeButtonAction",
"()",
"<",
"/",
"code",
">",
".",
"This",
"is",
"not",
"done",
"in",
"the",
"destructor",
"as",
"Python",
"s",
"garbage",
"collector",
"does",
"not",
"guarantee",
"to",
"delete",
"objects",
"."
] | def removeFromParent(self):
"""Remove this widget from parent widget.
Remove a widget from its parent before deleting it. Overwrite
this to also remove all button actions separately with a call
to <code>self.parent._removeButtonAction()</code>. This is not
done in the destructor as Python's garbage collector does not
guarantee to delete objects.
"""
self.parent._removeWidget(self)
return | [
"def",
"removeFromParent",
"(",
"self",
")",
":",
"self",
".",
"parent",
".",
"_removeWidget",
"(",
"self",
")",
"return"
] | https://github.com/OGRECave/ogre-next/blob/287307980e6de8910f04f3cc0994451b075071fd/Tools/BlenderExport/ogrepkg/gui.py#L138-L148 |
|
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/x86/toolchain/lib/python2.7/wsgiref/handlers.py | python | BaseHandler.send_preamble | (self) | Transmit version/status/date/server, via self._write() | Transmit version/status/date/server, via self._write() | [
"Transmit",
"version",
"/",
"status",
"/",
"date",
"/",
"server",
"via",
"self",
".",
"_write",
"()"
] | def send_preamble(self):
"""Transmit version/status/date/server, via self._write()"""
if self.origin_server:
if self.client_is_modern():
self._write('HTTP/%s %s\r\n' % (self.http_version,self.status))
if 'Date' not in self.headers:
self._write(
'Date: %s\r\n' % format_date_time(time.time())
)
if self.server_software and 'Server' not in self.headers:
self._write('Server: %s\r\n' % self.server_software)
else:
self._write('Status: %s\r\n' % self.status) | [
"def",
"send_preamble",
"(",
"self",
")",
":",
"if",
"self",
".",
"origin_server",
":",
"if",
"self",
".",
"client_is_modern",
"(",
")",
":",
"self",
".",
"_write",
"(",
"'HTTP/%s %s\\r\\n'",
"%",
"(",
"self",
".",
"http_version",
",",
"self",
".",
"status",
")",
")",
"if",
"'Date'",
"not",
"in",
"self",
".",
"headers",
":",
"self",
".",
"_write",
"(",
"'Date: %s\\r\\n'",
"%",
"format_date_time",
"(",
"time",
".",
"time",
"(",
")",
")",
")",
"if",
"self",
".",
"server_software",
"and",
"'Server'",
"not",
"in",
"self",
".",
"headers",
":",
"self",
".",
"_write",
"(",
"'Server: %s\\r\\n'",
"%",
"self",
".",
"server_software",
")",
"else",
":",
"self",
".",
"_write",
"(",
"'Status: %s\\r\\n'",
"%",
"self",
".",
"status",
")"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/wsgiref/handlers.py#L187-L199 |
||
hughperkins/tf-coriander | 970d3df6c11400ad68405f22b0c42a52374e94ca | tensorflow/python/ops/image_ops.py | python | flip_up_down | (image) | return array_ops.reverse(image, [True, False, False]) | Flip an image horizontally (upside down).
Outputs the contents of `image` flipped along the first dimension, which is
`height`.
See also `reverse()`.
Args:
image: A 3-D tensor of shape `[height, width, channels].`
Returns:
A 3-D tensor of the same type and shape as `image`.
Raises:
ValueError: if the shape of `image` not supported. | Flip an image horizontally (upside down). | [
"Flip",
"an",
"image",
"horizontally",
"(",
"upside",
"down",
")",
"."
] | def flip_up_down(image):
"""Flip an image horizontally (upside down).
Outputs the contents of `image` flipped along the first dimension, which is
`height`.
See also `reverse()`.
Args:
image: A 3-D tensor of shape `[height, width, channels].`
Returns:
A 3-D tensor of the same type and shape as `image`.
Raises:
ValueError: if the shape of `image` not supported.
"""
image = ops.convert_to_tensor(image, name='image')
_Check3DImage(image, require_static=False)
return array_ops.reverse(image, [True, False, False]) | [
"def",
"flip_up_down",
"(",
"image",
")",
":",
"image",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"image",
",",
"name",
"=",
"'image'",
")",
"_Check3DImage",
"(",
"image",
",",
"require_static",
"=",
"False",
")",
"return",
"array_ops",
".",
"reverse",
"(",
"image",
",",
"[",
"True",
",",
"False",
",",
"False",
"]",
")"
] | https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/python/ops/image_ops.py#L382-L401 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scikit-learn/py3/sklearn/linear_model/_logistic.py | python | _logistic_loss | (w, X, y, alpha, sample_weight=None) | return out | Computes the logistic loss.
Parameters
----------
w : ndarray of shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like of shape (n_samples,) default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss. | Computes the logistic loss. | [
"Computes",
"the",
"logistic",
"loss",
"."
] | def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray of shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like of shape (n_samples,) default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out | [
"def",
"_logistic_loss",
"(",
"w",
",",
"X",
",",
"y",
",",
"alpha",
",",
"sample_weight",
"=",
"None",
")",
":",
"w",
",",
"c",
",",
"yz",
"=",
"_intercept_dot",
"(",
"w",
",",
"X",
",",
"y",
")",
"if",
"sample_weight",
"is",
"None",
":",
"sample_weight",
"=",
"np",
".",
"ones",
"(",
"y",
".",
"shape",
"[",
"0",
"]",
")",
"# Logistic loss is the negative of the log of the logistic function.",
"out",
"=",
"-",
"np",
".",
"sum",
"(",
"sample_weight",
"*",
"log_logistic",
"(",
"yz",
")",
")",
"+",
".5",
"*",
"alpha",
"*",
"np",
".",
"dot",
"(",
"w",
",",
"w",
")",
"return",
"out"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scikit-learn/py3/sklearn/linear_model/_logistic.py#L137-L170 |
|
albertz/openlierox | d316c14a8eb57848ef56e9bfa7b23a56f694a51b | tools/DedicatedServerVideo/gdata/apps/groups/service.py | python | GroupsService.CreateGroup | (self, group_id, group_name, description, email_permission) | return self._PostProperties(uri, properties) | Create a group.
Args:
group_id: The ID of the group (e.g. us-sales).
group_name: The name of the group.
description: A description of the group
email_permission: The subscription permission of the group.
Returns:
A dict containing the result of the create operation. | Create a group. | [
"Create",
"a",
"group",
"."
] | def CreateGroup(self, group_id, group_name, description, email_permission):
"""Create a group.
Args:
group_id: The ID of the group (e.g. us-sales).
group_name: The name of the group.
description: A description of the group
email_permission: The subscription permission of the group.
Returns:
A dict containing the result of the create operation.
"""
uri = self._ServiceUrl('group', False, group_id, '', '', '', '')
properties = {}
properties['groupId'] = group_id
properties['groupName'] = group_name
properties['description'] = description
properties['emailPermission'] = email_permission
return self._PostProperties(uri, properties) | [
"def",
"CreateGroup",
"(",
"self",
",",
"group_id",
",",
"group_name",
",",
"description",
",",
"email_permission",
")",
":",
"uri",
"=",
"self",
".",
"_ServiceUrl",
"(",
"'group'",
",",
"False",
",",
"group_id",
",",
"''",
",",
"''",
",",
"''",
",",
"''",
")",
"properties",
"=",
"{",
"}",
"properties",
"[",
"'groupId'",
"]",
"=",
"group_id",
"properties",
"[",
"'groupName'",
"]",
"=",
"group_name",
"properties",
"[",
"'description'",
"]",
"=",
"description",
"properties",
"[",
"'emailPermission'",
"]",
"=",
"email_permission",
"return",
"self",
".",
"_PostProperties",
"(",
"uri",
",",
"properties",
")"
] | https://github.com/albertz/openlierox/blob/d316c14a8eb57848ef56e9bfa7b23a56f694a51b/tools/DedicatedServerVideo/gdata/apps/groups/service.py#L91-L109 |
|
apache/thrift | 0b29261a4f3c6882ef3b09aae47914f0012b0472 | lib/py/src/server/TNonblockingServer.py | python | Connection.is_readable | (self) | return self.status in (WAIT_LEN, WAIT_MESSAGE) | Return True if connection should be added to read list of select | Return True if connection should be added to read list of select | [
"Return",
"True",
"if",
"connection",
"should",
"be",
"added",
"to",
"read",
"list",
"of",
"select"
] | def is_readable(self):
"""Return True if connection should be added to read list of select"""
return self.status in (WAIT_LEN, WAIT_MESSAGE) | [
"def",
"is_readable",
"(",
"self",
")",
":",
"return",
"self",
".",
"status",
"in",
"(",
"WAIT_LEN",
",",
"WAIT_MESSAGE",
")"
] | https://github.com/apache/thrift/blob/0b29261a4f3c6882ef3b09aae47914f0012b0472/lib/py/src/server/TNonblockingServer.py#L214-L216 |
|
miyosuda/TensorFlowAndroidMNIST | 7b5a4603d2780a8a2834575706e9001977524007 | jni-build/jni/include/tensorflow/contrib/learn/python/learn/estimators/dnn.py | python | DNNRegressor._get_predict_ops | (self, features) | return super(DNNRegressor, self)._get_predict_ops(features) | See base class. | See base class. | [
"See",
"base",
"class",
"."
] | def _get_predict_ops(self, features):
"""See base class."""
self._validate_dnn_feature_columns(features)
return super(DNNRegressor, self)._get_predict_ops(features) | [
"def",
"_get_predict_ops",
"(",
"self",
",",
"features",
")",
":",
"self",
".",
"_validate_dnn_feature_columns",
"(",
"features",
")",
"return",
"super",
"(",
"DNNRegressor",
",",
"self",
")",
".",
"_get_predict_ops",
"(",
"features",
")"
] | https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/contrib/learn/python/learn/estimators/dnn.py#L347-L350 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/Pygments/py3/pygments/lexer.py | python | RegexLexer.get_tokens_unprocessed | (self, text, stack=('root',)) | Split ``text`` into (tokentype, text) pairs.
``stack`` is the inital stack (default: ``['root']``) | Split ``text`` into (tokentype, text) pairs. | [
"Split",
"text",
"into",
"(",
"tokentype",
"text",
")",
"pairs",
"."
] | def get_tokens_unprocessed(self, text, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the inital stack (default: ``['root']``)
"""
pos = 0
tokendefs = self._tokens
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
if action is not None:
if type(action) is _TokenType:
yield pos, action, m.group()
else:
yield from action(self, m)
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
if len(statestack) > 1:
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
else:
statestack.append(state)
elif isinstance(new_state, int):
# pop, but keep at least one state on the stack
# (random code leading to unexpected pops should
# not allow exceptions)
if abs(new_state) >= len(statestack):
del statestack[1:]
else:
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[statestack[-1]]
break
else:
# We are here only if all state tokens have been considered
# and there was not a match on any of them.
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
statestack = ['root']
statetokens = tokendefs['root']
yield pos, Text, '\n'
pos += 1
continue
yield pos, Error, text[pos]
pos += 1
except IndexError:
break | [
"def",
"get_tokens_unprocessed",
"(",
"self",
",",
"text",
",",
"stack",
"=",
"(",
"'root'",
",",
")",
")",
":",
"pos",
"=",
"0",
"tokendefs",
"=",
"self",
".",
"_tokens",
"statestack",
"=",
"list",
"(",
"stack",
")",
"statetokens",
"=",
"tokendefs",
"[",
"statestack",
"[",
"-",
"1",
"]",
"]",
"while",
"1",
":",
"for",
"rexmatch",
",",
"action",
",",
"new_state",
"in",
"statetokens",
":",
"m",
"=",
"rexmatch",
"(",
"text",
",",
"pos",
")",
"if",
"m",
":",
"if",
"action",
"is",
"not",
"None",
":",
"if",
"type",
"(",
"action",
")",
"is",
"_TokenType",
":",
"yield",
"pos",
",",
"action",
",",
"m",
".",
"group",
"(",
")",
"else",
":",
"yield",
"from",
"action",
"(",
"self",
",",
"m",
")",
"pos",
"=",
"m",
".",
"end",
"(",
")",
"if",
"new_state",
"is",
"not",
"None",
":",
"# state transition",
"if",
"isinstance",
"(",
"new_state",
",",
"tuple",
")",
":",
"for",
"state",
"in",
"new_state",
":",
"if",
"state",
"==",
"'#pop'",
":",
"if",
"len",
"(",
"statestack",
")",
">",
"1",
":",
"statestack",
".",
"pop",
"(",
")",
"elif",
"state",
"==",
"'#push'",
":",
"statestack",
".",
"append",
"(",
"statestack",
"[",
"-",
"1",
"]",
")",
"else",
":",
"statestack",
".",
"append",
"(",
"state",
")",
"elif",
"isinstance",
"(",
"new_state",
",",
"int",
")",
":",
"# pop, but keep at least one state on the stack",
"# (random code leading to unexpected pops should",
"# not allow exceptions)",
"if",
"abs",
"(",
"new_state",
")",
">=",
"len",
"(",
"statestack",
")",
":",
"del",
"statestack",
"[",
"1",
":",
"]",
"else",
":",
"del",
"statestack",
"[",
"new_state",
":",
"]",
"elif",
"new_state",
"==",
"'#push'",
":",
"statestack",
".",
"append",
"(",
"statestack",
"[",
"-",
"1",
"]",
")",
"else",
":",
"assert",
"False",
",",
"\"wrong state def: %r\"",
"%",
"new_state",
"statetokens",
"=",
"tokendefs",
"[",
"statestack",
"[",
"-",
"1",
"]",
"]",
"break",
"else",
":",
"# We are here only if all state tokens have been considered",
"# and there was not a match on any of them.",
"try",
":",
"if",
"text",
"[",
"pos",
"]",
"==",
"'\\n'",
":",
"# at EOL, reset state to \"root\"",
"statestack",
"=",
"[",
"'root'",
"]",
"statetokens",
"=",
"tokendefs",
"[",
"'root'",
"]",
"yield",
"pos",
",",
"Text",
",",
"'\\n'",
"pos",
"+=",
"1",
"continue",
"yield",
"pos",
",",
"Error",
",",
"text",
"[",
"pos",
"]",
"pos",
"+=",
"1",
"except",
"IndexError",
":",
"break"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/Pygments/py3/pygments/lexer.py#L617-L676 |
||
CRYTEK/CRYENGINE | 232227c59a220cbbd311576f0fbeba7bb53b2a8c | Editor/Python/windows/Lib/site-packages/setuptools/__init__.py | python | Command.ensure_string_list | (self, option) | r"""Ensure that 'option' is a list of strings. If 'option' is
currently a string, we split it either on /,\s*/ or /\s+/, so
"foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
["foo", "bar", "baz"]. | r"""Ensure that 'option' is a list of strings. If 'option' is
currently a string, we split it either on /,\s*/ or /\s+/, so
"foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
["foo", "bar", "baz"]. | [
"r",
"Ensure",
"that",
"option",
"is",
"a",
"list",
"of",
"strings",
".",
"If",
"option",
"is",
"currently",
"a",
"string",
"we",
"split",
"it",
"either",
"on",
"/",
"\\",
"s",
"*",
"/",
"or",
"/",
"\\",
"s",
"+",
"/",
"so",
"foo",
"bar",
"baz",
"foo",
"bar",
"baz",
"and",
"foo",
"bar",
"baz",
"all",
"become",
"[",
"foo",
"bar",
"baz",
"]",
"."
] | def ensure_string_list(self, option):
r"""Ensure that 'option' is a list of strings. If 'option' is
currently a string, we split it either on /,\s*/ or /\s+/, so
"foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
["foo", "bar", "baz"].
"""
val = getattr(self, option)
if val is None:
return
elif isinstance(val, string_types):
setattr(self, option, re.split(r',\s*|\s+', val))
else:
if isinstance(val, list):
ok = all(isinstance(v, string_types) for v in val)
else:
ok = False
if not ok:
raise DistutilsOptionError(
"'%s' must be a list of strings (got %r)"
% (option, val)) | [
"def",
"ensure_string_list",
"(",
"self",
",",
"option",
")",
":",
"val",
"=",
"getattr",
"(",
"self",
",",
"option",
")",
"if",
"val",
"is",
"None",
":",
"return",
"elif",
"isinstance",
"(",
"val",
",",
"string_types",
")",
":",
"setattr",
"(",
"self",
",",
"option",
",",
"re",
".",
"split",
"(",
"r',\\s*|\\s+'",
",",
"val",
")",
")",
"else",
":",
"if",
"isinstance",
"(",
"val",
",",
"list",
")",
":",
"ok",
"=",
"all",
"(",
"isinstance",
"(",
"v",
",",
"string_types",
")",
"for",
"v",
"in",
"val",
")",
"else",
":",
"ok",
"=",
"False",
"if",
"not",
"ok",
":",
"raise",
"DistutilsOptionError",
"(",
"\"'%s' must be a list of strings (got %r)\"",
"%",
"(",
"option",
",",
"val",
")",
")"
] | https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Editor/Python/windows/Lib/site-packages/setuptools/__init__.py#L176-L195 |
||
bigartm/bigartm | 47e37f982de87aa67bfd475ff1f39da696b181b3 | 3rdparty/protobuf-3.0.0/python/google/protobuf/internal/python_message.py | python | _AddReprMethod | (message_descriptor, cls) | Helper for _AddMessageMethods(). | Helper for _AddMessageMethods(). | [
"Helper",
"for",
"_AddMessageMethods",
"()",
"."
] | def _AddReprMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __repr__(self):
return text_format.MessageToString(self)
cls.__repr__ = __repr__ | [
"def",
"_AddReprMethod",
"(",
"message_descriptor",
",",
"cls",
")",
":",
"def",
"__repr__",
"(",
"self",
")",
":",
"return",
"text_format",
".",
"MessageToString",
"(",
"self",
")",
"cls",
".",
"__repr__",
"=",
"__repr__"
] | https://github.com/bigartm/bigartm/blob/47e37f982de87aa67bfd475ff1f39da696b181b3/3rdparty/protobuf-3.0.0/python/google/protobuf/internal/python_message.py#L988-L992 |
||
albertz/openlierox | d316c14a8eb57848ef56e9bfa7b23a56f694a51b | tools/DedicatedServerVideo/atom/mock_http_core.py | python | MockHttpClient._dump | (self) | return output | Provides debug information in a string. | Provides debug information in a string. | [
"Provides",
"debug",
"information",
"in",
"a",
"string",
"."
] | def _dump(self):
"""Provides debug information in a string."""
output = 'MockHttpClient\n real_client: %s\n cache file name: %s\n' % (
self.real_client, self.get_cache_file_name())
output += ' recordings:\n'
i = 0
for recording in self._recordings:
output += ' recording %i is for: %s %s\n' % (
i, recording[0].method, str(recording[0].uri))
i += 1
return output | [
"def",
"_dump",
"(",
"self",
")",
":",
"output",
"=",
"'MockHttpClient\\n real_client: %s\\n cache file name: %s\\n'",
"%",
"(",
"self",
".",
"real_client",
",",
"self",
".",
"get_cache_file_name",
"(",
")",
")",
"output",
"+=",
"' recordings:\\n'",
"i",
"=",
"0",
"for",
"recording",
"in",
"self",
".",
"_recordings",
":",
"output",
"+=",
"' recording %i is for: %s %s\\n'",
"%",
"(",
"i",
",",
"recording",
"[",
"0",
"]",
".",
"method",
",",
"str",
"(",
"recording",
"[",
"0",
"]",
".",
"uri",
")",
")",
"i",
"+=",
"1",
"return",
"output"
] | https://github.com/albertz/openlierox/blob/d316c14a8eb57848ef56e9bfa7b23a56f694a51b/tools/DedicatedServerVideo/atom/mock_http_core.py#L165-L175 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/difflib.py | python | SequenceMatcher.quick_ratio | (self) | return _calculate_ratio(matches, len(self.a) + len(self.b)) | Return an upper bound on ratio() relatively quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute. | Return an upper bound on ratio() relatively quickly. | [
"Return",
"an",
"upper",
"bound",
"on",
"ratio",
"()",
"relatively",
"quickly",
"."
] | def quick_ratio(self):
"""Return an upper bound on ratio() relatively quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute.
"""
# viewing a and b as multisets, set matches to the cardinality
# of their intersection; this counts the number of matches
# without regard to order, so is clearly an upper bound
if self.fullbcount is None:
self.fullbcount = fullbcount = {}
for elt in self.b:
fullbcount[elt] = fullbcount.get(elt, 0) + 1
fullbcount = self.fullbcount
# avail[x] is the number of times x appears in 'b' less the
# number of times we've seen it in 'a' so far ... kinda
avail = {}
availhas, matches = avail.__contains__, 0
for elt in self.a:
if availhas(elt):
numb = avail[elt]
else:
numb = fullbcount.get(elt, 0)
avail[elt] = numb - 1
if numb > 0:
matches = matches + 1
return _calculate_ratio(matches, len(self.a) + len(self.b)) | [
"def",
"quick_ratio",
"(",
"self",
")",
":",
"# viewing a and b as multisets, set matches to the cardinality",
"# of their intersection; this counts the number of matches",
"# without regard to order, so is clearly an upper bound",
"if",
"self",
".",
"fullbcount",
"is",
"None",
":",
"self",
".",
"fullbcount",
"=",
"fullbcount",
"=",
"{",
"}",
"for",
"elt",
"in",
"self",
".",
"b",
":",
"fullbcount",
"[",
"elt",
"]",
"=",
"fullbcount",
".",
"get",
"(",
"elt",
",",
"0",
")",
"+",
"1",
"fullbcount",
"=",
"self",
".",
"fullbcount",
"# avail[x] is the number of times x appears in 'b' less the",
"# number of times we've seen it in 'a' so far ... kinda",
"avail",
"=",
"{",
"}",
"availhas",
",",
"matches",
"=",
"avail",
".",
"__contains__",
",",
"0",
"for",
"elt",
"in",
"self",
".",
"a",
":",
"if",
"availhas",
"(",
"elt",
")",
":",
"numb",
"=",
"avail",
"[",
"elt",
"]",
"else",
":",
"numb",
"=",
"fullbcount",
".",
"get",
"(",
"elt",
",",
"0",
")",
"avail",
"[",
"elt",
"]",
"=",
"numb",
"-",
"1",
"if",
"numb",
">",
"0",
":",
"matches",
"=",
"matches",
"+",
"1",
"return",
"_calculate_ratio",
"(",
"matches",
",",
"len",
"(",
"self",
".",
"a",
")",
"+",
"len",
"(",
"self",
".",
"b",
")",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/difflib.py#L647-L674 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/agw/zoombar.py | python | ZoomBar.EnableButton | (self, index, enable=True) | return True | Enables/disables the button at position `index`.
:param `index`: the index of the button to enable/disable;
:param `enable`: ``True`` to enable the button, ``False`` to disable it. | Enables/disables the button at position `index`. | [
"Enables",
"/",
"disables",
"the",
"button",
"at",
"position",
"index",
"."
] | def EnableButton(self, index, enable=True):
"""
Enables/disables the button at position `index`.
:param `index`: the index of the button to enable/disable;
:param `enable`: ``True`` to enable the button, ``False`` to disable it.
"""
if index < 0 or index >= len(self._buttons):
return False
self._buttons[index].Enable(enable)
self.Refresh()
return True | [
"def",
"EnableButton",
"(",
"self",
",",
"index",
",",
"enable",
"=",
"True",
")",
":",
"if",
"index",
"<",
"0",
"or",
"index",
">=",
"len",
"(",
"self",
".",
"_buttons",
")",
":",
"return",
"False",
"self",
".",
"_buttons",
"[",
"index",
"]",
".",
"Enable",
"(",
"enable",
")",
"self",
".",
"Refresh",
"(",
")",
"return",
"True"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/zoombar.py#L987-L1001 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/py/filling.py | python | FillingTree.setText | (self, text) | Display information about the current selection. | Display information about the current selection. | [
"Display",
"information",
"about",
"the",
"current",
"selection",
"."
] | def setText(self, text):
"""Display information about the current selection."""
# This method will likely be replaced by the enclosing app to
# do something more interesting, like write to a text control.
print text | [
"def",
"setText",
"(",
"self",
",",
"text",
")",
":",
"# This method will likely be replaced by the enclosing app to",
"# do something more interesting, like write to a text control.",
"print",
"text"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/py/filling.py#L232-L237 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/scipy/stats/_multivariate.py | python | special_ortho_group_gen.rvs | (self, dim, size=1, random_state=None) | return H | Draw random samples from SO(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim) | Draw random samples from SO(N). | [
"Draw",
"random",
"samples",
"from",
"SO",
"(",
"N",
")",
"."
] | def rvs(self, dim, size=1, random_state=None):
"""
Draw random samples from SO(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
random_state = self._get_random_state(random_state)
H = np.eye(dim)
D = np.ones((dim,))
for n in range(1, dim):
x = random_state.normal(size=(dim-n+1,))
D[n-1] = np.sign(x[0])
x[0] -= D[n-1]*np.sqrt((x*x).sum())
# Householder transformation
Hx = (np.eye(dim-n+1)
- 2.*np.outer(x, x)/(x*x).sum())
mat = np.eye(dim)
mat[n-1:, n-1:] = Hx
H = np.dot(H, mat)
# Fix the last sign such that the determinant is 1
D[-1] = (-1)**(1-(dim % 2))*D.prod()
# Equivalent to np.dot(np.diag(D), H) but faster, apparently
H = (D*H.T).T
return H | [
"def",
"rvs",
"(",
"self",
",",
"dim",
",",
"size",
"=",
"1",
",",
"random_state",
"=",
"None",
")",
":",
"size",
"=",
"int",
"(",
"size",
")",
"if",
"size",
">",
"1",
":",
"return",
"np",
".",
"array",
"(",
"[",
"self",
".",
"rvs",
"(",
"dim",
",",
"size",
"=",
"1",
",",
"random_state",
"=",
"random_state",
")",
"for",
"i",
"in",
"range",
"(",
"size",
")",
"]",
")",
"dim",
"=",
"self",
".",
"_process_parameters",
"(",
"dim",
")",
"random_state",
"=",
"self",
".",
"_get_random_state",
"(",
"random_state",
")",
"H",
"=",
"np",
".",
"eye",
"(",
"dim",
")",
"D",
"=",
"np",
".",
"ones",
"(",
"(",
"dim",
",",
")",
")",
"for",
"n",
"in",
"range",
"(",
"1",
",",
"dim",
")",
":",
"x",
"=",
"random_state",
".",
"normal",
"(",
"size",
"=",
"(",
"dim",
"-",
"n",
"+",
"1",
",",
")",
")",
"D",
"[",
"n",
"-",
"1",
"]",
"=",
"np",
".",
"sign",
"(",
"x",
"[",
"0",
"]",
")",
"x",
"[",
"0",
"]",
"-=",
"D",
"[",
"n",
"-",
"1",
"]",
"*",
"np",
".",
"sqrt",
"(",
"(",
"x",
"*",
"x",
")",
".",
"sum",
"(",
")",
")",
"# Householder transformation",
"Hx",
"=",
"(",
"np",
".",
"eye",
"(",
"dim",
"-",
"n",
"+",
"1",
")",
"-",
"2.",
"*",
"np",
".",
"outer",
"(",
"x",
",",
"x",
")",
"/",
"(",
"x",
"*",
"x",
")",
".",
"sum",
"(",
")",
")",
"mat",
"=",
"np",
".",
"eye",
"(",
"dim",
")",
"mat",
"[",
"n",
"-",
"1",
":",
",",
"n",
"-",
"1",
":",
"]",
"=",
"Hx",
"H",
"=",
"np",
".",
"dot",
"(",
"H",
",",
"mat",
")",
"# Fix the last sign such that the determinant is 1",
"D",
"[",
"-",
"1",
"]",
"=",
"(",
"-",
"1",
")",
"**",
"(",
"1",
"-",
"(",
"dim",
"%",
"2",
")",
")",
"*",
"D",
".",
"prod",
"(",
")",
"# Equivalent to np.dot(np.diag(D), H) but faster, apparently",
"H",
"=",
"(",
"D",
"*",
"H",
".",
"T",
")",
".",
"T",
"return",
"H"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/scipy/stats/_multivariate.py#L2729-L2772 |
|
microsoft/checkedc-clang | a173fefde5d7877b7750e7ce96dd08cf18baebf2 | lldb/third_party/Python/module/pexpect-4.6/pexpect/pty_spawn.py | python | spawn.writelines | (self, sequence) | This calls write() for each element in the sequence. The sequence
can be any iterable object producing strings, typically a list of
strings. This does not add line separators. There is no return value. | This calls write() for each element in the sequence. The sequence
can be any iterable object producing strings, typically a list of
strings. This does not add line separators. There is no return value. | [
"This",
"calls",
"write",
"()",
"for",
"each",
"element",
"in",
"the",
"sequence",
".",
"The",
"sequence",
"can",
"be",
"any",
"iterable",
"object",
"producing",
"strings",
"typically",
"a",
"list",
"of",
"strings",
".",
"This",
"does",
"not",
"add",
"line",
"separators",
".",
"There",
"is",
"no",
"return",
"value",
"."
] | def writelines(self, sequence):
'''This calls write() for each element in the sequence. The sequence
can be any iterable object producing strings, typically a list of
strings. This does not add line separators. There is no return value.
'''
for s in sequence:
self.write(s) | [
"def",
"writelines",
"(",
"self",
",",
"sequence",
")",
":",
"for",
"s",
"in",
"sequence",
":",
"self",
".",
"write",
"(",
"s",
")"
] | https://github.com/microsoft/checkedc-clang/blob/a173fefde5d7877b7750e7ce96dd08cf18baebf2/lldb/third_party/Python/module/pexpect-4.6/pexpect/pty_spawn.py#L495-L502 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/tools/Editra/src/autocomp/pycomp.py | python | Scope.pop | (self, indent) | return outer | Pop the scope until it is at the level of the given
indent.
@param indent: indent level to pop scope to
@return: scope of given indent level | Pop the scope until it is at the level of the given
indent.
@param indent: indent level to pop scope to
@return: scope of given indent level | [
"Pop",
"the",
"scope",
"until",
"it",
"is",
"at",
"the",
"level",
"of",
"the",
"given",
"indent",
".",
"@param",
"indent",
":",
"indent",
"level",
"to",
"pop",
"scope",
"to",
"@return",
":",
"scope",
"of",
"given",
"indent",
"level"
] | def pop(self, indent):
"""Pop the scope until it is at the level of the given
indent.
@param indent: indent level to pop scope to
@return: scope of given indent level
"""
outer = self
while outer.parent != None and outer.indent >= indent:
outer = outer.parent
return outer | [
"def",
"pop",
"(",
"self",
",",
"indent",
")",
":",
"outer",
"=",
"self",
"while",
"outer",
".",
"parent",
"!=",
"None",
"and",
"outer",
".",
"indent",
">=",
"indent",
":",
"outer",
"=",
"outer",
".",
"parent",
"return",
"outer"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/autocomp/pycomp.py#L511-L521 |
|
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/contrib/timeseries/python/timeseries/state_space_models/structural_ensemble.py | python | _replicate_level_trend_models | (multivariate_configuration,
univariate_configuration) | return adder_part | Helper function to construct a multivariate level/trend component. | Helper function to construct a multivariate level/trend component. | [
"Helper",
"function",
"to",
"construct",
"a",
"multivariate",
"level",
"/",
"trend",
"component",
"."
] | def _replicate_level_trend_models(multivariate_configuration,
univariate_configuration):
"""Helper function to construct a multivariate level/trend component."""
with variable_scope.variable_scope("adder"):
# Construct a level and trend model for each feature, with correlated
# transition noise.
adder_features = []
for feature in range(multivariate_configuration.num_features):
with variable_scope.variable_scope("feature{}".format(feature)):
adder_features.append(level_trend.AdderStateSpaceModel(
configuration=univariate_configuration))
adder_part = state_space_model.StateSpaceCorrelatedFeaturesEnsemble(
ensemble_members=adder_features,
configuration=multivariate_configuration)
return adder_part | [
"def",
"_replicate_level_trend_models",
"(",
"multivariate_configuration",
",",
"univariate_configuration",
")",
":",
"with",
"variable_scope",
".",
"variable_scope",
"(",
"\"adder\"",
")",
":",
"# Construct a level and trend model for each feature, with correlated",
"# transition noise.",
"adder_features",
"=",
"[",
"]",
"for",
"feature",
"in",
"range",
"(",
"multivariate_configuration",
".",
"num_features",
")",
":",
"with",
"variable_scope",
".",
"variable_scope",
"(",
"\"feature{}\"",
".",
"format",
"(",
"feature",
")",
")",
":",
"adder_features",
".",
"append",
"(",
"level_trend",
".",
"AdderStateSpaceModel",
"(",
"configuration",
"=",
"univariate_configuration",
")",
")",
"adder_part",
"=",
"state_space_model",
".",
"StateSpaceCorrelatedFeaturesEnsemble",
"(",
"ensemble_members",
"=",
"adder_features",
",",
"configuration",
"=",
"multivariate_configuration",
")",
"return",
"adder_part"
] | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/timeseries/python/timeseries/state_space_models/structural_ensemble.py#L30-L44 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/build/waf-1.7.13/lmbrwaflib/artifacts_cache.py | python | uid | (self) | Override uid computation, and make it to be engine path and 3rdParty path independent | Override uid computation, and make it to be engine path and 3rdParty path independent | [
"Override",
"uid",
"computation",
"and",
"make",
"it",
"to",
"be",
"engine",
"path",
"and",
"3rdParty",
"path",
"independent"
] | def uid(self):
"""
Override uid computation, and make it to be engine path and 3rdParty path independent
"""
try:
return self.uid_
except AttributeError:
# this is not a real hot zone, but we want to avoid surprises here
m = Utils.md5()
up = m.update
up(self.__class__.__name__.encode())
for k in self.inputs + self.outputs:
s = replace_engine_path_and_tp_root_in_string(self.generator.bld, k.abspath())
up(s)
self.uid_ = m.digest()
return self.uid_ | [
"def",
"uid",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"uid_",
"except",
"AttributeError",
":",
"# this is not a real hot zone, but we want to avoid surprises here",
"m",
"=",
"Utils",
".",
"md5",
"(",
")",
"up",
"=",
"m",
".",
"update",
"up",
"(",
"self",
".",
"__class__",
".",
"__name__",
".",
"encode",
"(",
")",
")",
"for",
"k",
"in",
"self",
".",
"inputs",
"+",
"self",
".",
"outputs",
":",
"s",
"=",
"replace_engine_path_and_tp_root_in_string",
"(",
"self",
".",
"generator",
".",
"bld",
",",
"k",
".",
"abspath",
"(",
")",
")",
"up",
"(",
"s",
")",
"self",
".",
"uid_",
"=",
"m",
".",
"digest",
"(",
")",
"return",
"self",
".",
"uid_"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/build/waf-1.7.13/lmbrwaflib/artifacts_cache.py#L228-L243 |
||
facebook/fboss | 60063db1df37c2ec0e7dcd0955c54885ea9bf7f0 | build/fbcode_builder/getdeps/manifest.py | python | ManifestParser.update_hash | (self, hasher, ctx) | Compute a hash over the configuration for the given
context. The goal is for the hash to change if the config
for that context changes, but not if a change is made to
the config only for a different platform than that expressed
by ctx. The hash is intended to be used to help invalidate
a future cache for the third party build products.
The hasher argument is a hash object returned from hashlib. | Compute a hash over the configuration for the given
context. The goal is for the hash to change if the config
for that context changes, but not if a change is made to
the config only for a different platform than that expressed
by ctx. The hash is intended to be used to help invalidate
a future cache for the third party build products.
The hasher argument is a hash object returned from hashlib. | [
"Compute",
"a",
"hash",
"over",
"the",
"configuration",
"for",
"the",
"given",
"context",
".",
"The",
"goal",
"is",
"for",
"the",
"hash",
"to",
"change",
"if",
"the",
"config",
"for",
"that",
"context",
"changes",
"but",
"not",
"if",
"a",
"change",
"is",
"made",
"to",
"the",
"config",
"only",
"for",
"a",
"different",
"platform",
"than",
"that",
"expressed",
"by",
"ctx",
".",
"The",
"hash",
"is",
"intended",
"to",
"be",
"used",
"to",
"help",
"invalidate",
"a",
"future",
"cache",
"for",
"the",
"third",
"party",
"build",
"products",
".",
"The",
"hasher",
"argument",
"is",
"a",
"hash",
"object",
"returned",
"from",
"hashlib",
"."
] | def update_hash(self, hasher, ctx):
"""Compute a hash over the configuration for the given
context. The goal is for the hash to change if the config
for that context changes, but not if a change is made to
the config only for a different platform than that expressed
by ctx. The hash is intended to be used to help invalidate
a future cache for the third party build products.
The hasher argument is a hash object returned from hashlib."""
for section in sorted(SCHEMA.keys()):
hasher.update(section.encode("utf-8"))
# Note: at the time of writing, nothing in the implementation
# relies on keys in any config section being ordered.
# In theory we could have conflicting flags in different
# config sections and later flags override earlier flags.
# For the purposes of computing a hash we're not super
# concerned about this: manifest changes should be rare
# enough and we'd rather that this trigger an invalidation
# than strive for a cache hit at this time.
pairs = self.get_section_as_ordered_pairs(section, ctx)
pairs.sort(key=lambda pair: pair[0])
for key, value in pairs:
hasher.update(key.encode("utf-8"))
if value is not None:
hasher.update(value.encode("utf-8")) | [
"def",
"update_hash",
"(",
"self",
",",
"hasher",
",",
"ctx",
")",
":",
"for",
"section",
"in",
"sorted",
"(",
"SCHEMA",
".",
"keys",
"(",
")",
")",
":",
"hasher",
".",
"update",
"(",
"section",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
"# Note: at the time of writing, nothing in the implementation",
"# relies on keys in any config section being ordered.",
"# In theory we could have conflicting flags in different",
"# config sections and later flags override earlier flags.",
"# For the purposes of computing a hash we're not super",
"# concerned about this: manifest changes should be rare",
"# enough and we'd rather that this trigger an invalidation",
"# than strive for a cache hit at this time.",
"pairs",
"=",
"self",
".",
"get_section_as_ordered_pairs",
"(",
"section",
",",
"ctx",
")",
"pairs",
".",
"sort",
"(",
"key",
"=",
"lambda",
"pair",
":",
"pair",
"[",
"0",
"]",
")",
"for",
"key",
",",
"value",
"in",
"pairs",
":",
"hasher",
".",
"update",
"(",
"key",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
"if",
"value",
"is",
"not",
"None",
":",
"hasher",
".",
"update",
"(",
"value",
".",
"encode",
"(",
"\"utf-8\"",
")",
")"
] | https://github.com/facebook/fboss/blob/60063db1df37c2ec0e7dcd0955c54885ea9bf7f0/build/fbcode_builder/getdeps/manifest.py#L328-L352 |
||
krishauser/Klampt | 972cc83ea5befac3f653c1ba20f80155768ad519 | Python/python2_version/klampt/src/robotsim.py | python | RobotModel.interpolateDeriv | (self, a, b) | return _robotsim.RobotModel_interpolateDeriv(self, a, b) | interpolateDeriv(RobotModel self, doubleVector a, doubleVector b)
Returns the configuration derivative at a as you interpolate toward b at unit
speed. | interpolateDeriv(RobotModel self, doubleVector a, doubleVector b) | [
"interpolateDeriv",
"(",
"RobotModel",
"self",
"doubleVector",
"a",
"doubleVector",
"b",
")"
] | def interpolateDeriv(self, a, b):
"""
interpolateDeriv(RobotModel self, doubleVector a, doubleVector b)
Returns the configuration derivative at a as you interpolate toward b at unit
speed.
"""
return _robotsim.RobotModel_interpolateDeriv(self, a, b) | [
"def",
"interpolateDeriv",
"(",
"self",
",",
"a",
",",
"b",
")",
":",
"return",
"_robotsim",
".",
"RobotModel_interpolateDeriv",
"(",
"self",
",",
"a",
",",
"b",
")"
] | https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/python2_version/klampt/src/robotsim.py#L5084-L5094 |
|
tfwu/FaceDetection-ConvNet-3D | f9251c48eb40c5aec8fba7455115c355466555be | python/build/lib.linux-x86_64-2.7/mxnet/symbol.py | python | Symbol._get_ndarray_inputs | (arg_key, args, arg_names, allow_missing) | return c_array(NDArrayHandle, arg_handles), arg_arrays | Helper function to get ndarray lists handles from various inputs.
Parameters
----------
arg_key : str
The name of argument, used for error message.
args : list of NDArray or dict of str to NDArray
Input arguments to the symbols.
If type is list of NDArray, the position is in the same order of arg_names.
If type is dict of str to NDArray, then it maps the name of arguments
to the corresponding NDArray,
args_names : list of string
List of argument names.
allow_missing : boolean
Whether missing argument is allowed.
When allowed, the missing handle will be set to None(null)
Returns
-------
handles : list of NDArrayHandle
The positional list of NDArrayHandles generated from input. | Helper function to get ndarray lists handles from various inputs. | [
"Helper",
"function",
"to",
"get",
"ndarray",
"lists",
"handles",
"from",
"various",
"inputs",
"."
] | def _get_ndarray_inputs(arg_key, args, arg_names, allow_missing):
"""Helper function to get ndarray lists handles from various inputs.
Parameters
----------
arg_key : str
The name of argument, used for error message.
args : list of NDArray or dict of str to NDArray
Input arguments to the symbols.
If type is list of NDArray, the position is in the same order of arg_names.
If type is dict of str to NDArray, then it maps the name of arguments
to the corresponding NDArray,
args_names : list of string
List of argument names.
allow_missing : boolean
Whether missing argument is allowed.
When allowed, the missing handle will be set to None(null)
Returns
-------
handles : list of NDArrayHandle
The positional list of NDArrayHandles generated from input.
"""
# setup args
arg_handles = []
arg_arrays = []
if isinstance(args, list):
if len(args) != len(arg_names):
raise ValueError('Length of %s do not match number of arguments' % arg_key)
for narr in args:
if not isinstance(narr, NDArray):
raise TypeError('Only Accept list of NDArrays or dict of str to NDArray')
arg_handles.append(narr.handle)
arg_arrays = args
elif isinstance(args, dict):
for name in arg_names:
if name in args:
narr = args[name]
if not isinstance(narr, NDArray):
raise TypeError('Only Accept list of NDArrays or dict of str to NDArray')
arg_handles.append(narr.handle)
arg_arrays.append(narr)
else:
if allow_missing:
arg_handles.append(None)
arg_arrays.append(None)
else:
raise ValueError('Must specify all the arguments in %s' % arg_key)
else:
raise TypeError('Only Accept list of NDArrays or dict of str to NDArray')
return c_array(NDArrayHandle, arg_handles), arg_arrays | [
"def",
"_get_ndarray_inputs",
"(",
"arg_key",
",",
"args",
",",
"arg_names",
",",
"allow_missing",
")",
":",
"# setup args",
"arg_handles",
"=",
"[",
"]",
"arg_arrays",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"args",
",",
"list",
")",
":",
"if",
"len",
"(",
"args",
")",
"!=",
"len",
"(",
"arg_names",
")",
":",
"raise",
"ValueError",
"(",
"'Length of %s do not match number of arguments'",
"%",
"arg_key",
")",
"for",
"narr",
"in",
"args",
":",
"if",
"not",
"isinstance",
"(",
"narr",
",",
"NDArray",
")",
":",
"raise",
"TypeError",
"(",
"'Only Accept list of NDArrays or dict of str to NDArray'",
")",
"arg_handles",
".",
"append",
"(",
"narr",
".",
"handle",
")",
"arg_arrays",
"=",
"args",
"elif",
"isinstance",
"(",
"args",
",",
"dict",
")",
":",
"for",
"name",
"in",
"arg_names",
":",
"if",
"name",
"in",
"args",
":",
"narr",
"=",
"args",
"[",
"name",
"]",
"if",
"not",
"isinstance",
"(",
"narr",
",",
"NDArray",
")",
":",
"raise",
"TypeError",
"(",
"'Only Accept list of NDArrays or dict of str to NDArray'",
")",
"arg_handles",
".",
"append",
"(",
"narr",
".",
"handle",
")",
"arg_arrays",
".",
"append",
"(",
"narr",
")",
"else",
":",
"if",
"allow_missing",
":",
"arg_handles",
".",
"append",
"(",
"None",
")",
"arg_arrays",
".",
"append",
"(",
"None",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Must specify all the arguments in %s'",
"%",
"arg_key",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Only Accept list of NDArrays or dict of str to NDArray'",
")",
"return",
"c_array",
"(",
"NDArrayHandle",
",",
"arg_handles",
")",
",",
"arg_arrays"
] | https://github.com/tfwu/FaceDetection-ConvNet-3D/blob/f9251c48eb40c5aec8fba7455115c355466555be/python/build/lib.linux-x86_64-2.7/mxnet/symbol.py#L535-L588 |
|
htcondor/htcondor | 4829724575176d1d6c936e4693dfd78a728569b0 | src/condor_contrib/condor_pigeon/src/condor_pigeon_client/skype_linux_tools/Skype4Py/skype.py | python | ISkype.CreateSms | (self, MessageType, *TargetNumbers) | return ISmsMessage(chop(self._DoCommand('CREATE SMS %s %s' % (MessageType, ', '.join(TargetNumbers))), 2)[1], self) | Creates an SMS message.
@param MessageType: Message type.
@type MessageType: L{SMS message type<enums.smsMessageTypeUnknown>}
@param TargetNumbers: One or more target SMS numbers.
@type TargetNumbers: unicode
@return: An sms message object.
@rtype: L{ISmsMessage} | Creates an SMS message. | [
"Creates",
"an",
"SMS",
"message",
"."
] | def CreateSms(self, MessageType, *TargetNumbers):
'''Creates an SMS message.
@param MessageType: Message type.
@type MessageType: L{SMS message type<enums.smsMessageTypeUnknown>}
@param TargetNumbers: One or more target SMS numbers.
@type TargetNumbers: unicode
@return: An sms message object.
@rtype: L{ISmsMessage}
'''
return ISmsMessage(chop(self._DoCommand('CREATE SMS %s %s' % (MessageType, ', '.join(TargetNumbers))), 2)[1], self) | [
"def",
"CreateSms",
"(",
"self",
",",
"MessageType",
",",
"*",
"TargetNumbers",
")",
":",
"return",
"ISmsMessage",
"(",
"chop",
"(",
"self",
".",
"_DoCommand",
"(",
"'CREATE SMS %s %s'",
"%",
"(",
"MessageType",
",",
"', '",
".",
"join",
"(",
"TargetNumbers",
")",
")",
")",
",",
"2",
")",
"[",
"1",
"]",
",",
"self",
")"
] | https://github.com/htcondor/htcondor/blob/4829724575176d1d6c936e4693dfd78a728569b0/src/condor_contrib/condor_pigeon/src/condor_pigeon_client/skype_linux_tools/Skype4Py/skype.py#L636-L646 |
|
hpi-xnor/BMXNet | ed0b201da6667887222b8e4b5f997c4f6b61943d | python/mxnet/operator.py | python | PythonOp.list_arguments | (self) | return ['data'] | Interface for ``list_arguments``. Can override when creating new operators.
Returns
-------
in_shape : list
list of argument shapes in the same order as
declared in list_arguments. | Interface for ``list_arguments``. Can override when creating new operators. | [
"Interface",
"for",
"list_arguments",
".",
"Can",
"override",
"when",
"creating",
"new",
"operators",
"."
] | def list_arguments(self):
"""Interface for ``list_arguments``. Can override when creating new operators.
Returns
-------
in_shape : list
list of argument shapes in the same order as
declared in list_arguments.
"""
return ['data'] | [
"def",
"list_arguments",
"(",
"self",
")",
":",
"return",
"[",
"'data'",
"]"
] | https://github.com/hpi-xnor/BMXNet/blob/ed0b201da6667887222b8e4b5f997c4f6b61943d/python/mxnet/operator.py#L122-L131 |
|
pytorch/pytorch | 7176c92687d3cc847cc046bf002269c6949a21c2 | torch/nn/functional.py | python | adaptive_max_pool1d_with_indices | (
input: Tensor, output_size: BroadcastingList1[int], return_indices: bool = False
) | return torch.adaptive_max_pool1d(input, output_size) | r"""Applies a 1D adaptive max pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveMaxPool1d` for details and output shape.
Args:
output_size: the target output size (single integer)
return_indices: whether to return pooling indices. Default: ``False`` | r"""Applies a 1D adaptive max pooling over an input signal composed of
several input planes. | [
"r",
"Applies",
"a",
"1D",
"adaptive",
"max",
"pooling",
"over",
"an",
"input",
"signal",
"composed",
"of",
"several",
"input",
"planes",
"."
] | def adaptive_max_pool1d_with_indices(
input: Tensor, output_size: BroadcastingList1[int], return_indices: bool = False
) -> Tuple[Tensor, Tensor]:
r"""Applies a 1D adaptive max pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveMaxPool1d` for details and output shape.
Args:
output_size: the target output size (single integer)
return_indices: whether to return pooling indices. Default: ``False``
"""
if has_torch_function_unary(input):
return handle_torch_function(
adaptive_max_pool1d_with_indices, (input,), input, output_size, return_indices=return_indices
)
return torch.adaptive_max_pool1d(input, output_size) | [
"def",
"adaptive_max_pool1d_with_indices",
"(",
"input",
":",
"Tensor",
",",
"output_size",
":",
"BroadcastingList1",
"[",
"int",
"]",
",",
"return_indices",
":",
"bool",
"=",
"False",
")",
"->",
"Tuple",
"[",
"Tensor",
",",
"Tensor",
"]",
":",
"if",
"has_torch_function_unary",
"(",
"input",
")",
":",
"return",
"handle_torch_function",
"(",
"adaptive_max_pool1d_with_indices",
",",
"(",
"input",
",",
")",
",",
"input",
",",
"output_size",
",",
"return_indices",
"=",
"return_indices",
")",
"return",
"torch",
".",
"adaptive_max_pool1d",
"(",
"input",
",",
"output_size",
")"
] | https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/nn/functional.py#L1091-L1107 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/scipy/linalg/special_matrices.py | python | hilbert | (n) | return h | Create a Hilbert matrix of order `n`.
Returns the `n` by `n` array with entries `h[i,j] = 1 / (i + j + 1)`.
Parameters
----------
n : int
The size of the array to create.
Returns
-------
h : (n, n) ndarray
The Hilbert matrix.
See Also
--------
invhilbert : Compute the inverse of a Hilbert matrix.
Notes
-----
.. versionadded:: 0.10.0
Examples
--------
>>> from scipy.linalg import hilbert
>>> hilbert(3)
array([[ 1. , 0.5 , 0.33333333],
[ 0.5 , 0.33333333, 0.25 ],
[ 0.33333333, 0.25 , 0.2 ]]) | Create a Hilbert matrix of order `n`. | [
"Create",
"a",
"Hilbert",
"matrix",
"of",
"order",
"n",
"."
] | def hilbert(n):
"""
Create a Hilbert matrix of order `n`.
Returns the `n` by `n` array with entries `h[i,j] = 1 / (i + j + 1)`.
Parameters
----------
n : int
The size of the array to create.
Returns
-------
h : (n, n) ndarray
The Hilbert matrix.
See Also
--------
invhilbert : Compute the inverse of a Hilbert matrix.
Notes
-----
.. versionadded:: 0.10.0
Examples
--------
>>> from scipy.linalg import hilbert
>>> hilbert(3)
array([[ 1. , 0.5 , 0.33333333],
[ 0.5 , 0.33333333, 0.25 ],
[ 0.33333333, 0.25 , 0.2 ]])
"""
values = 1.0 / (1.0 + np.arange(2 * n - 1))
h = hankel(values[:n], r=values[n - 1:])
return h | [
"def",
"hilbert",
"(",
"n",
")",
":",
"values",
"=",
"1.0",
"/",
"(",
"1.0",
"+",
"np",
".",
"arange",
"(",
"2",
"*",
"n",
"-",
"1",
")",
")",
"h",
"=",
"hankel",
"(",
"values",
"[",
":",
"n",
"]",
",",
"r",
"=",
"values",
"[",
"n",
"-",
"1",
":",
"]",
")",
"return",
"h"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/scipy/linalg/special_matrices.py#L654-L689 |
|
natanielruiz/android-yolo | 1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f | jni-build/jni/include/tensorflow/python/ops/math_grad.py | python | _BatchMatMul | (op, grad) | return grad_x, grad_y | Returns the gradient of x and y given the gradient of x * y. | Returns the gradient of x and y given the gradient of x * y. | [
"Returns",
"the",
"gradient",
"of",
"x",
"and",
"y",
"given",
"the",
"gradient",
"of",
"x",
"*",
"y",
"."
] | def _BatchMatMul(op, grad):
"""Returns the gradient of x and y given the gradient of x * y."""
x = op.inputs[0]
y = op.inputs[1]
adj_x = op.get_attr("adj_x")
adj_y = op.get_attr("adj_y")
if not adj_x:
if not adj_y:
grad_x = math_ops.batch_matmul(grad, y, False, True)
grad_y = math_ops.batch_matmul(x, grad, True, False)
else:
grad_x = math_ops.batch_matmul(grad, y, False, False)
grad_y = math_ops.batch_matmul(grad, x, True, False)
else:
if not adj_y:
grad_x = math_ops.batch_matmul(y, grad, False, True)
grad_y = math_ops.batch_matmul(x, grad, False, False)
else:
grad_x = math_ops.batch_matmul(y, grad, True, True)
grad_y = math_ops.batch_matmul(grad, x, True, True)
return grad_x, grad_y | [
"def",
"_BatchMatMul",
"(",
"op",
",",
"grad",
")",
":",
"x",
"=",
"op",
".",
"inputs",
"[",
"0",
"]",
"y",
"=",
"op",
".",
"inputs",
"[",
"1",
"]",
"adj_x",
"=",
"op",
".",
"get_attr",
"(",
"\"adj_x\"",
")",
"adj_y",
"=",
"op",
".",
"get_attr",
"(",
"\"adj_y\"",
")",
"if",
"not",
"adj_x",
":",
"if",
"not",
"adj_y",
":",
"grad_x",
"=",
"math_ops",
".",
"batch_matmul",
"(",
"grad",
",",
"y",
",",
"False",
",",
"True",
")",
"grad_y",
"=",
"math_ops",
".",
"batch_matmul",
"(",
"x",
",",
"grad",
",",
"True",
",",
"False",
")",
"else",
":",
"grad_x",
"=",
"math_ops",
".",
"batch_matmul",
"(",
"grad",
",",
"y",
",",
"False",
",",
"False",
")",
"grad_y",
"=",
"math_ops",
".",
"batch_matmul",
"(",
"grad",
",",
"x",
",",
"True",
",",
"False",
")",
"else",
":",
"if",
"not",
"adj_y",
":",
"grad_x",
"=",
"math_ops",
".",
"batch_matmul",
"(",
"y",
",",
"grad",
",",
"False",
",",
"True",
")",
"grad_y",
"=",
"math_ops",
".",
"batch_matmul",
"(",
"x",
",",
"grad",
",",
"False",
",",
"False",
")",
"else",
":",
"grad_x",
"=",
"math_ops",
".",
"batch_matmul",
"(",
"y",
",",
"grad",
",",
"True",
",",
"True",
")",
"grad_y",
"=",
"math_ops",
".",
"batch_matmul",
"(",
"grad",
",",
"x",
",",
"True",
",",
"True",
")",
"return",
"grad_x",
",",
"grad_y"
] | https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/tensorflow/python/ops/math_grad.py#L706-L728 |
|
devsisters/libquic | 8954789a056d8e7d5fcb6452fd1572ca57eb5c4e | boringssl/util/generate-asm-lcov.py | python | is_asm | (l) | return True | Returns whether a line should be considered to be an instruction. | Returns whether a line should be considered to be an instruction. | [
"Returns",
"whether",
"a",
"line",
"should",
"be",
"considered",
"to",
"be",
"an",
"instruction",
"."
] | def is_asm(l):
"""Returns whether a line should be considered to be an instruction."""
l = l.strip()
# Empty lines
if l == '':
return False
# Comments
if l.startswith('#'):
return False
# Assembly Macros
if l.startswith('.'):
return False
# Label
if l.endswith(':'):
return False
return True | [
"def",
"is_asm",
"(",
"l",
")",
":",
"l",
"=",
"l",
".",
"strip",
"(",
")",
"# Empty lines",
"if",
"l",
"==",
"''",
":",
"return",
"False",
"# Comments",
"if",
"l",
".",
"startswith",
"(",
"'#'",
")",
":",
"return",
"False",
"# Assembly Macros",
"if",
"l",
".",
"startswith",
"(",
"'.'",
")",
":",
"return",
"False",
"# Label",
"if",
"l",
".",
"endswith",
"(",
"':'",
")",
":",
"return",
"False",
"return",
"True"
] | https://github.com/devsisters/libquic/blob/8954789a056d8e7d5fcb6452fd1572ca57eb5c4e/boringssl/util/generate-asm-lcov.py#L33-L48 |
|
macchina-io/macchina.io | ef24ba0e18379c3dd48fb84e6dbf991101cb8db0 | platform/JS/V8/tools/gyp/pylib/gyp/msvs_emulation.py | python | MsvsSettings.HasExplicitAsmRules | (self, spec) | return self._HasExplicitRuleForExtension(spec, 'asm') | Determine if there's an explicit rule for asm files. When there isn't we
need to generate implicit rules to assemble .asm files. | Determine if there's an explicit rule for asm files. When there isn't we
need to generate implicit rules to assemble .asm files. | [
"Determine",
"if",
"there",
"s",
"an",
"explicit",
"rule",
"for",
"asm",
"files",
".",
"When",
"there",
"isn",
"t",
"we",
"need",
"to",
"generate",
"implicit",
"rules",
"to",
"assemble",
".",
"asm",
"files",
"."
] | def HasExplicitAsmRules(self, spec):
"""Determine if there's an explicit rule for asm files. When there isn't we
need to generate implicit rules to assemble .asm files."""
return self._HasExplicitRuleForExtension(spec, 'asm') | [
"def",
"HasExplicitAsmRules",
"(",
"self",
",",
"spec",
")",
":",
"return",
"self",
".",
"_HasExplicitRuleForExtension",
"(",
"spec",
",",
"'asm'",
")"
] | https://github.com/macchina-io/macchina.io/blob/ef24ba0e18379c3dd48fb84e6dbf991101cb8db0/platform/JS/V8/tools/gyp/pylib/gyp/msvs_emulation.py#L847-L850 |
|
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/python/framework/dtypes.py | python | DType.is_floating | (self) | return self.is_numpy_compatible and issubclass(self.as_numpy_dtype,
np.floating) | Returns whether this is a (non-quantized, real) floating point type. | Returns whether this is a (non-quantized, real) floating point type. | [
"Returns",
"whether",
"this",
"is",
"a",
"(",
"non",
"-",
"quantized",
"real",
")",
"floating",
"point",
"type",
"."
] | def is_floating(self):
"""Returns whether this is a (non-quantized, real) floating point type."""
return self.is_numpy_compatible and issubclass(self.as_numpy_dtype,
np.floating) | [
"def",
"is_floating",
"(",
"self",
")",
":",
"return",
"self",
".",
"is_numpy_compatible",
"and",
"issubclass",
"(",
"self",
".",
"as_numpy_dtype",
",",
"np",
".",
"floating",
")"
] | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/framework/dtypes.py#L141-L144 |
|
google/earthenterprise | 0fe84e29be470cd857e3a0e52e5d0afd5bb8cee9 | earth_enterprise/src/server/wsgi/serve/snippets/util/proto_reflection.py | python | _IsFieldPathPrimitive | (fieldpath, protobuf) | return (not rest) and (fd.type != fd.TYPE_MESSAGE) | Whether the fieldpath is a (true) leaf in protobuf.
whether protobuf structurally contains the path and, that it ends
at a primitive (ie rejects subtree paths).
Args:
fieldpath: path of the field within the protobuf
protobuf: container of the protobuf
Returns:
whether the fieldpath ends at a true leaf ie, value field. | Whether the fieldpath is a (true) leaf in protobuf. | [
"Whether",
"the",
"fieldpath",
"is",
"a",
"(",
"true",
")",
"leaf",
"in",
"protobuf",
"."
] | def _IsFieldPathPrimitive(fieldpath, protobuf):
"""Whether the fieldpath is a (true) leaf in protobuf.
whether protobuf structurally contains the path and, that it ends
at a primitive (ie rejects subtree paths).
Args:
fieldpath: path of the field within the protobuf
protobuf: container of the protobuf
Returns:
whether the fieldpath ends at a true leaf ie, value field.
"""
toks = path_utils.SplitAbstract(path_utils.AsAbstract(fieldpath))
fname, rest = path_utils.FirstRest(toks)
fd = protobuf.DESCRIPTOR.fields_by_name.get(fname, None)
if fd is None:
return False
while bool(rest) and fd.type == fd.TYPE_MESSAGE:
fname, rest = path_utils.FirstRest(rest)
fd = fd.message_type.fields_by_name.get(fname, None)
if fd is None:
# path contains something not in the protobuf
return False
# Both ended on a primitive.
return (not rest) and (fd.type != fd.TYPE_MESSAGE) | [
"def",
"_IsFieldPathPrimitive",
"(",
"fieldpath",
",",
"protobuf",
")",
":",
"toks",
"=",
"path_utils",
".",
"SplitAbstract",
"(",
"path_utils",
".",
"AsAbstract",
"(",
"fieldpath",
")",
")",
"fname",
",",
"rest",
"=",
"path_utils",
".",
"FirstRest",
"(",
"toks",
")",
"fd",
"=",
"protobuf",
".",
"DESCRIPTOR",
".",
"fields_by_name",
".",
"get",
"(",
"fname",
",",
"None",
")",
"if",
"fd",
"is",
"None",
":",
"return",
"False",
"while",
"bool",
"(",
"rest",
")",
"and",
"fd",
".",
"type",
"==",
"fd",
".",
"TYPE_MESSAGE",
":",
"fname",
",",
"rest",
"=",
"path_utils",
".",
"FirstRest",
"(",
"rest",
")",
"fd",
"=",
"fd",
".",
"message_type",
".",
"fields_by_name",
".",
"get",
"(",
"fname",
",",
"None",
")",
"if",
"fd",
"is",
"None",
":",
"# path contains something not in the protobuf",
"return",
"False",
"# Both ended on a primitive.",
"return",
"(",
"not",
"rest",
")",
"and",
"(",
"fd",
".",
"type",
"!=",
"fd",
".",
"TYPE_MESSAGE",
")"
] | https://github.com/google/earthenterprise/blob/0fe84e29be470cd857e3a0e52e5d0afd5bb8cee9/earth_enterprise/src/server/wsgi/serve/snippets/util/proto_reflection.py#L320-L344 |
|
polyworld/polyworld | eb7e6bbc82fe77ba79e3bc48c3da2ad8c8238c26 | scripts/agent/brain.py | python | BrainAnatomy.cxnmatrix | (self) | return cxn | Lazy loading of connection matrix | Lazy loading of connection matrix | [
"Lazy",
"loading",
"of",
"connection",
"matrix"
] | def cxnmatrix(self):
''' Lazy loading of connection matrix '''
# Open brain anatomy, clean lines
f = open(filename)
lines = [ x.strip('\n; ') for x in f.readlines()]
lines = lines[1:]
f.close()
assert len(lines) == self.num_neurons, "#lines != num_neurons"
# import all neurons and build the connection matrix with sanity checks
cells = [ float(cell) for row in lines for cell in row.split(' ') ]
assert len(cells) == self.num_neurons*self.num_neurons,\
"Did not find a square matrix in anatomy file"
cxn = array(cells).reshape(self.num_neurons, self.num_neurons)
assert -self.max_weight <= self.cxn.all() <= self.max_weight,\
"anatomy matrix wasn't within [-max_weight,max_weight]"
return cxn | [
"def",
"cxnmatrix",
"(",
"self",
")",
":",
"# Open brain anatomy, clean lines",
"f",
"=",
"open",
"(",
"filename",
")",
"lines",
"=",
"[",
"x",
".",
"strip",
"(",
"'\\n; '",
")",
"for",
"x",
"in",
"f",
".",
"readlines",
"(",
")",
"]",
"lines",
"=",
"lines",
"[",
"1",
":",
"]",
"f",
".",
"close",
"(",
")",
"assert",
"len",
"(",
"lines",
")",
"==",
"self",
".",
"num_neurons",
",",
"\"#lines != num_neurons\"",
"# import all neurons and build the connection matrix with sanity checks",
"cells",
"=",
"[",
"float",
"(",
"cell",
")",
"for",
"row",
"in",
"lines",
"for",
"cell",
"in",
"row",
".",
"split",
"(",
"' '",
")",
"]",
"assert",
"len",
"(",
"cells",
")",
"==",
"self",
".",
"num_neurons",
"*",
"self",
".",
"num_neurons",
",",
"\"Did not find a square matrix in anatomy file\"",
"cxn",
"=",
"array",
"(",
"cells",
")",
".",
"reshape",
"(",
"self",
".",
"num_neurons",
",",
"self",
".",
"num_neurons",
")",
"assert",
"-",
"self",
".",
"max_weight",
"<=",
"self",
".",
"cxn",
".",
"all",
"(",
")",
"<=",
"self",
".",
"max_weight",
",",
"\"anatomy matrix wasn't within [-max_weight,max_weight]\"",
"return",
"cxn"
] | https://github.com/polyworld/polyworld/blob/eb7e6bbc82fe77ba79e3bc48c3da2ad8c8238c26/scripts/agent/brain.py#L42-L62 |
|
mongodb/mongo | d8ff665343ad29cf286ee2cf4a1960d29371937b | src/third_party/scons-3.1.2/scons-local-3.1.2/SCons/Tool/msginit.py | python | _optional_no_translator_flag | (env) | Return '--no-translator' flag if we run *msginit(1)* in non-interactive
mode. | Return '--no-translator' flag if we run *msginit(1)* in non-interactive
mode. | [
"Return",
"--",
"no",
"-",
"translator",
"flag",
"if",
"we",
"run",
"*",
"msginit",
"(",
"1",
")",
"*",
"in",
"non",
"-",
"interactive",
"mode",
"."
] | def _optional_no_translator_flag(env):
""" Return '--no-translator' flag if we run *msginit(1)* in non-interactive
mode."""
import SCons.Util
if 'POAUTOINIT' in env:
autoinit = env['POAUTOINIT']
else:
autoinit = False
if autoinit:
return [SCons.Util.CLVar('--no-translator')]
else:
return [SCons.Util.CLVar('')] | [
"def",
"_optional_no_translator_flag",
"(",
"env",
")",
":",
"import",
"SCons",
".",
"Util",
"if",
"'POAUTOINIT'",
"in",
"env",
":",
"autoinit",
"=",
"env",
"[",
"'POAUTOINIT'",
"]",
"else",
":",
"autoinit",
"=",
"False",
"if",
"autoinit",
":",
"return",
"[",
"SCons",
".",
"Util",
".",
"CLVar",
"(",
"'--no-translator'",
")",
"]",
"else",
":",
"return",
"[",
"SCons",
".",
"Util",
".",
"CLVar",
"(",
"''",
")",
"]"
] | https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/src/third_party/scons-3.1.2/scons-local-3.1.2/SCons/Tool/msginit.py#L34-L45 |
||
baidu/bigflow | 449245016c0df7d1252e85581e588bfc60cefad3 | bigflow_python/python/bigflow/transforms.py | python | cogroup | (*pcollections, **options) | return bigflow.transform_impls.cogroup.cogroup(*pcollections, **options) | 对传入的所有pcollection进行协同分组。
cogroup要求所有传入的PCollection的每个元素都是一个(k, v)对,
cogroup会用k来作为分组的key,对多个输入PCollection进行协同分组,
返回一个PTable表示分组结果。
这个返回的PTable的每个value为一个tuple,tuple的每个元素是一个PCollection,
其中第n个PCollection表示输入的第n个PCollection在当前key下的全部数据。
如果某个输入PCollection在某个key下无数据,则对应的PCollection为一个空PCollection。
目前不能像group_by指定key_extractor。
group_by_key可以理解成是cogroup只传有一个参数的特殊情况。
Args:
*pcollections: 输入PCollection
**options: 可配置选项
Returns:
PTable: 分组结果
>>> from bigflow import transforms
>>> _p1 = _pipeline.parallelize([("A", 1), ("A", 2), ("B", 3)])
>>> _p2 = _pipeline.parallelize([("A", 4)])
>>> _p = transforms.cogroup(_p1, _p2)
# _p的值为{"A": ([1, 2], [4]), "B": ([3], [])} ,但由于实现难度较大,PTable的value为tuple of PCollection时的get操作暂不支持。
>>> _p.apply_values(lambda x, y: transforms.union(x, y)).get()
{"A": [1, 2, 4], "B": [3]}
>>> def distinct_and_join(p, q): # 去重,并join
... return p.cogroup(q) \\
... .apply_values(lambda a, b: (a.distinct(), b.distinct())) \\
... .apply_values(transforms.cartesian) \\
... .flatten()
>>> _p1 = _pipeline.parallelize([("A", 1), ("A", 2), ("A", 1), ("C", 1)])
>>> _p2 = _pipeline.parallelize([("A", 3), ("A", 3), ("B", 2)])
>>> print distinct_and_join(_p1, _p2).get()
[("A", (1, 3)), ("A", (2, 3))]
>>> # 未来bigflow会自动将p.distinct().join(q.distinct())优化成上边的样子(正在进行中)
>>> def semi_join(p, q): # 同key的join结果只输出一条
... return p.cogroup(q) \\
... .apply_values(lambda a, b: (a.take(1), b.take(1))) \\
... .apply_values(transforms.cartesian) \\
... .flatten()
>>> print semi_join(_p1, _p2).get()
[("A", (1, 3))] | 对传入的所有pcollection进行协同分组。 | [
"对传入的所有pcollection进行协同分组。"
] | def cogroup(*pcollections, **options):
"""
对传入的所有pcollection进行协同分组。
cogroup要求所有传入的PCollection的每个元素都是一个(k, v)对,
cogroup会用k来作为分组的key,对多个输入PCollection进行协同分组,
返回一个PTable表示分组结果。
这个返回的PTable的每个value为一个tuple,tuple的每个元素是一个PCollection,
其中第n个PCollection表示输入的第n个PCollection在当前key下的全部数据。
如果某个输入PCollection在某个key下无数据,则对应的PCollection为一个空PCollection。
目前不能像group_by指定key_extractor。
group_by_key可以理解成是cogroup只传有一个参数的特殊情况。
Args:
*pcollections: 输入PCollection
**options: 可配置选项
Returns:
PTable: 分组结果
>>> from bigflow import transforms
>>> _p1 = _pipeline.parallelize([("A", 1), ("A", 2), ("B", 3)])
>>> _p2 = _pipeline.parallelize([("A", 4)])
>>> _p = transforms.cogroup(_p1, _p2)
# _p的值为{"A": ([1, 2], [4]), "B": ([3], [])} ,但由于实现难度较大,PTable的value为tuple of PCollection时的get操作暂不支持。
>>> _p.apply_values(lambda x, y: transforms.union(x, y)).get()
{"A": [1, 2, 4], "B": [3]}
>>> def distinct_and_join(p, q): # 去重,并join
... return p.cogroup(q) \\
... .apply_values(lambda a, b: (a.distinct(), b.distinct())) \\
... .apply_values(transforms.cartesian) \\
... .flatten()
>>> _p1 = _pipeline.parallelize([("A", 1), ("A", 2), ("A", 1), ("C", 1)])
>>> _p2 = _pipeline.parallelize([("A", 3), ("A", 3), ("B", 2)])
>>> print distinct_and_join(_p1, _p2).get()
[("A", (1, 3)), ("A", (2, 3))]
>>> # 未来bigflow会自动将p.distinct().join(q.distinct())优化成上边的样子(正在进行中)
>>> def semi_join(p, q): # 同key的join结果只输出一条
... return p.cogroup(q) \\
... .apply_values(lambda a, b: (a.take(1), b.take(1))) \\
... .apply_values(transforms.cartesian) \\
... .flatten()
>>> print semi_join(_p1, _p2).get()
[("A", (1, 3))]
"""
import bigflow.transform_impls.cogroup
return bigflow.transform_impls.cogroup.cogroup(*pcollections, **options) | [
"def",
"cogroup",
"(",
"*",
"pcollections",
",",
"*",
"*",
"options",
")",
":",
"import",
"bigflow",
".",
"transform_impls",
".",
"cogroup",
"return",
"bigflow",
".",
"transform_impls",
".",
"cogroup",
".",
"cogroup",
"(",
"*",
"pcollections",
",",
"*",
"*",
"options",
")"
] | https://github.com/baidu/bigflow/blob/449245016c0df7d1252e85581e588bfc60cefad3/bigflow_python/python/bigflow/transforms.py#L146-L198 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python3/src/Lib/xml/sax/xmlreader.py | python | XMLReader.getEntityResolver | (self) | return self._ent_handler | Returns the current EntityResolver. | Returns the current EntityResolver. | [
"Returns",
"the",
"current",
"EntityResolver",
"."
] | def getEntityResolver(self):
"Returns the current EntityResolver."
return self._ent_handler | [
"def",
"getEntityResolver",
"(",
"self",
")",
":",
"return",
"self",
".",
"_ent_handler"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/xml/sax/xmlreader.py#L50-L52 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python3/src/Lib/asyncio/sslproto.py | python | _SSLProtocolTransport.get_write_buffer_size | (self) | return self._ssl_protocol._transport.get_write_buffer_size() | Return the current size of the write buffer. | Return the current size of the write buffer. | [
"Return",
"the",
"current",
"size",
"of",
"the",
"write",
"buffer",
"."
] | def get_write_buffer_size(self):
"""Return the current size of the write buffer."""
return self._ssl_protocol._transport.get_write_buffer_size() | [
"def",
"get_write_buffer_size",
"(",
"self",
")",
":",
"return",
"self",
".",
"_ssl_protocol",
".",
"_transport",
".",
"get_write_buffer_size",
"(",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/asyncio/sslproto.py#L366-L368 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/pydoc.py | python | HTMLDoc.markup | (self, text, escape=None, funcs={}, classes={}, methods={}) | return ''.join(results) | Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names. | Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names. | [
"Mark",
"up",
"some",
"plain",
"text",
"given",
"a",
"context",
"of",
"symbols",
"to",
"look",
"for",
".",
"Each",
"context",
"dictionary",
"maps",
"object",
"names",
"to",
"anchor",
"names",
"."
] | def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif selfdot:
# Create a link for methods like 'self.method(...)'
# and use <strong> for attributes like 'self.attr'
if text[end:end+1] == '(':
results.append('self.' + self.namelink(name, methods))
else:
results.append('self.<strong>%s</strong>' % name)
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results) | [
"def",
"markup",
"(",
"self",
",",
"text",
",",
"escape",
"=",
"None",
",",
"funcs",
"=",
"{",
"}",
",",
"classes",
"=",
"{",
"}",
",",
"methods",
"=",
"{",
"}",
")",
":",
"escape",
"=",
"escape",
"or",
"self",
".",
"escape",
"results",
"=",
"[",
"]",
"here",
"=",
"0",
"pattern",
"=",
"re",
".",
"compile",
"(",
"r'\\b((http|ftp)://\\S+[\\w/]|'",
"r'RFC[- ]?(\\d+)|'",
"r'PEP[- ]?(\\d+)|'",
"r'(self\\.)?(\\w+))'",
")",
"while",
"True",
":",
"match",
"=",
"pattern",
".",
"search",
"(",
"text",
",",
"here",
")",
"if",
"not",
"match",
":",
"break",
"start",
",",
"end",
"=",
"match",
".",
"span",
"(",
")",
"results",
".",
"append",
"(",
"escape",
"(",
"text",
"[",
"here",
":",
"start",
"]",
")",
")",
"all",
",",
"scheme",
",",
"rfc",
",",
"pep",
",",
"selfdot",
",",
"name",
"=",
"match",
".",
"groups",
"(",
")",
"if",
"scheme",
":",
"url",
"=",
"escape",
"(",
"all",
")",
".",
"replace",
"(",
"'\"'",
",",
"'"'",
")",
"results",
".",
"append",
"(",
"'<a href=\"%s\">%s</a>'",
"%",
"(",
"url",
",",
"url",
")",
")",
"elif",
"rfc",
":",
"url",
"=",
"'http://www.rfc-editor.org/rfc/rfc%d.txt'",
"%",
"int",
"(",
"rfc",
")",
"results",
".",
"append",
"(",
"'<a href=\"%s\">%s</a>'",
"%",
"(",
"url",
",",
"escape",
"(",
"all",
")",
")",
")",
"elif",
"pep",
":",
"url",
"=",
"'http://www.python.org/dev/peps/pep-%04d/'",
"%",
"int",
"(",
"pep",
")",
"results",
".",
"append",
"(",
"'<a href=\"%s\">%s</a>'",
"%",
"(",
"url",
",",
"escape",
"(",
"all",
")",
")",
")",
"elif",
"selfdot",
":",
"# Create a link for methods like 'self.method(...)'",
"# and use <strong> for attributes like 'self.attr'",
"if",
"text",
"[",
"end",
":",
"end",
"+",
"1",
"]",
"==",
"'('",
":",
"results",
".",
"append",
"(",
"'self.'",
"+",
"self",
".",
"namelink",
"(",
"name",
",",
"methods",
")",
")",
"else",
":",
"results",
".",
"append",
"(",
"'self.<strong>%s</strong>'",
"%",
"name",
")",
"elif",
"text",
"[",
"end",
":",
"end",
"+",
"1",
"]",
"==",
"'('",
":",
"results",
".",
"append",
"(",
"self",
".",
"namelink",
"(",
"name",
",",
"methods",
",",
"funcs",
",",
"classes",
")",
")",
"else",
":",
"results",
".",
"append",
"(",
"self",
".",
"namelink",
"(",
"name",
",",
"classes",
")",
")",
"here",
"=",
"end",
"results",
".",
"append",
"(",
"escape",
"(",
"text",
"[",
"here",
":",
"]",
")",
")",
"return",
"''",
".",
"join",
"(",
"results",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/pydoc.py#L587-L626 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/agw/aui/auibook.py | python | AuiNotebook.RemovePage | (self, page_idx) | return True | Removes a page, without deleting the window pointer.
:param integer `page_idx`: the page index to be removed.
:note:
:meth:`RemovePage` removes a tab from the multi-notebook, but does not destroy the window.
:see: :meth:`DeletePage` | Removes a page, without deleting the window pointer. | [
"Removes",
"a",
"page",
"without",
"deleting",
"the",
"window",
"pointer",
"."
] | def RemovePage(self, page_idx):
"""
Removes a page, without deleting the window pointer.
:param integer `page_idx`: the page index to be removed.
:note:
:meth:`RemovePage` removes a tab from the multi-notebook, but does not destroy the window.
:see: :meth:`DeletePage`
"""
# save active window pointer
active_wnd = None
if self._curpage >= 0:
active_wnd = self._tabs.GetWindowFromIdx(self._curpage)
# save pointer of window being deleted
wnd = self._tabs.GetWindowFromIdx(page_idx)
new_active = None
# make sure we found the page
if not wnd:
return False
# find out which onscreen tab ctrl owns this tab
ctrl, ctrl_idx = self.FindTab(wnd)
if not ctrl:
return False
currentPage = ctrl.GetPage(ctrl_idx)
is_curpage = (self._curpage == page_idx)
is_active_in_split = currentPage.active
# remove the tab from main catalog
if not self._tabs.RemovePage(wnd):
return False
# remove the tab from the onscreen tab ctrl
ctrl.RemovePage(wnd)
if is_active_in_split:
ctrl_new_page_count = ctrl.GetPageCount()
if ctrl_idx >= ctrl_new_page_count:
ctrl_idx = ctrl_new_page_count - 1
if ctrl_idx >= 0 and ctrl_idx < ctrl.GetPageCount():
ctrl_idx = self.FindNextActiveTab(ctrl_idx, ctrl)
# set new page as active in the tab split
ctrl.SetActivePage(ctrl_idx)
# if the page deleted was the current page for the
# entire tab control, then record the window
# pointer of the new active page for activation
if is_curpage:
new_active = ctrl.GetWindowFromIdx(ctrl_idx)
else:
# we are not deleting the active page, so keep it the same
new_active = active_wnd
if not new_active:
# we haven't yet found a new page to active,
# so select the next page from the main tab
# catalogue
if 0 <= page_idx < self._tabs.GetPageCount():
new_active = self._tabs.GetPage(page_idx).window
if not new_active and self._tabs.GetPageCount() > 0:
new_active = self._tabs.GetPage(0).window
self.RemoveEmptyTabFrames()
# set new active pane
if new_active:
if not self.IsBeingDeleted():
self._curpage = -1
self.SetSelectionToWindow(new_active)
else:
self._curpage = -1
self._tabs.SetNoneActive()
return True | [
"def",
"RemovePage",
"(",
"self",
",",
"page_idx",
")",
":",
"# save active window pointer",
"active_wnd",
"=",
"None",
"if",
"self",
".",
"_curpage",
">=",
"0",
":",
"active_wnd",
"=",
"self",
".",
"_tabs",
".",
"GetWindowFromIdx",
"(",
"self",
".",
"_curpage",
")",
"# save pointer of window being deleted",
"wnd",
"=",
"self",
".",
"_tabs",
".",
"GetWindowFromIdx",
"(",
"page_idx",
")",
"new_active",
"=",
"None",
"# make sure we found the page",
"if",
"not",
"wnd",
":",
"return",
"False",
"# find out which onscreen tab ctrl owns this tab",
"ctrl",
",",
"ctrl_idx",
"=",
"self",
".",
"FindTab",
"(",
"wnd",
")",
"if",
"not",
"ctrl",
":",
"return",
"False",
"currentPage",
"=",
"ctrl",
".",
"GetPage",
"(",
"ctrl_idx",
")",
"is_curpage",
"=",
"(",
"self",
".",
"_curpage",
"==",
"page_idx",
")",
"is_active_in_split",
"=",
"currentPage",
".",
"active",
"# remove the tab from main catalog",
"if",
"not",
"self",
".",
"_tabs",
".",
"RemovePage",
"(",
"wnd",
")",
":",
"return",
"False",
"# remove the tab from the onscreen tab ctrl",
"ctrl",
".",
"RemovePage",
"(",
"wnd",
")",
"if",
"is_active_in_split",
":",
"ctrl_new_page_count",
"=",
"ctrl",
".",
"GetPageCount",
"(",
")",
"if",
"ctrl_idx",
">=",
"ctrl_new_page_count",
":",
"ctrl_idx",
"=",
"ctrl_new_page_count",
"-",
"1",
"if",
"ctrl_idx",
">=",
"0",
"and",
"ctrl_idx",
"<",
"ctrl",
".",
"GetPageCount",
"(",
")",
":",
"ctrl_idx",
"=",
"self",
".",
"FindNextActiveTab",
"(",
"ctrl_idx",
",",
"ctrl",
")",
"# set new page as active in the tab split",
"ctrl",
".",
"SetActivePage",
"(",
"ctrl_idx",
")",
"# if the page deleted was the current page for the",
"# entire tab control, then record the window",
"# pointer of the new active page for activation",
"if",
"is_curpage",
":",
"new_active",
"=",
"ctrl",
".",
"GetWindowFromIdx",
"(",
"ctrl_idx",
")",
"else",
":",
"# we are not deleting the active page, so keep it the same",
"new_active",
"=",
"active_wnd",
"if",
"not",
"new_active",
":",
"# we haven't yet found a new page to active,",
"# so select the next page from the main tab",
"# catalogue",
"if",
"0",
"<=",
"page_idx",
"<",
"self",
".",
"_tabs",
".",
"GetPageCount",
"(",
")",
":",
"new_active",
"=",
"self",
".",
"_tabs",
".",
"GetPage",
"(",
"page_idx",
")",
".",
"window",
"if",
"not",
"new_active",
"and",
"self",
".",
"_tabs",
".",
"GetPageCount",
"(",
")",
">",
"0",
":",
"new_active",
"=",
"self",
".",
"_tabs",
".",
"GetPage",
"(",
"0",
")",
".",
"window",
"self",
".",
"RemoveEmptyTabFrames",
"(",
")",
"# set new active pane",
"if",
"new_active",
":",
"if",
"not",
"self",
".",
"IsBeingDeleted",
"(",
")",
":",
"self",
".",
"_curpage",
"=",
"-",
"1",
"self",
".",
"SetSelectionToWindow",
"(",
"new_active",
")",
"else",
":",
"self",
".",
"_curpage",
"=",
"-",
"1",
"self",
".",
"_tabs",
".",
"SetNoneActive",
"(",
")",
"return",
"True"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/aui/auibook.py#L3527-L3616 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/xrc.py | python | XmlProperty.__init__ | (self, *args, **kwargs) | __init__(self, String name=EmptyString, String value=EmptyString,
XmlProperty next=None) -> XmlProperty | __init__(self, String name=EmptyString, String value=EmptyString,
XmlProperty next=None) -> XmlProperty | [
"__init__",
"(",
"self",
"String",
"name",
"=",
"EmptyString",
"String",
"value",
"=",
"EmptyString",
"XmlProperty",
"next",
"=",
"None",
")",
"-",
">",
"XmlProperty"
] | def __init__(self, *args, **kwargs):
"""
__init__(self, String name=EmptyString, String value=EmptyString,
XmlProperty next=None) -> XmlProperty
"""
_xrc.XmlProperty_swiginit(self,_xrc.new_XmlProperty(*args, **kwargs)) | [
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"_xrc",
".",
"XmlProperty_swiginit",
"(",
"self",
",",
"_xrc",
".",
"new_XmlProperty",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/xrc.py#L310-L315 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python/src/Lib/decimal.py | python | Context.to_integral_value | (self, a) | return a.to_integral_value(context=self) | Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting, except that no flags will
be set. The rounding mode is taken from the context.
>>> ExtendedContext.to_integral_value(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_value(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_value(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_value(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_value(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_value(Decimal('-Inf'))
Decimal('-Infinity') | Rounds to an integer. | [
"Rounds",
"to",
"an",
"integer",
"."
] | def to_integral_value(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting, except that no flags will
be set. The rounding mode is taken from the context.
>>> ExtendedContext.to_integral_value(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_value(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_value(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_value(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_value(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_value(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_value(context=self) | [
"def",
"to_integral_value",
"(",
"self",
",",
"a",
")",
":",
"a",
"=",
"_convert_other",
"(",
"a",
",",
"raiseit",
"=",
"True",
")",
"return",
"a",
".",
"to_integral_value",
"(",
"context",
"=",
"self",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/decimal.py#L5406-L5433 |
|
NGSolve/ngsolve | 4a22558b6d5852f3d7e6cd86f1233b1ad716f395 | python/krylovspace.py | python | MinRes | (mat, rhs, pre=None, sol=None, maxsteps = 100, printrates = True, initialize = True, tol = 1e-7) | return MinResSolver(mat=mat, pre=pre, maxiter=maxsteps,
printrates=printrates,
tol=tol).Solve(rhs=rhs, sol=sol,
initialize=initialize) | Minimal Residuum method
Parameters
----------
mat : Matrix
The left hand side of the equation to solve
rhs : Vector
The right hand side of the equation.
pre : Preconditioner
If provided the preconditioner is used.
sol : Vector
Start vector for MinRes method, if initialize is set False. Gets overwritten by the solution vector. If sol = None then a new vector is created.
maxsteps : int
Number of maximal steps for MinRes. If the maximal number is reached before the tolerance is reached MinRes stops.
printrates : bool
If set to True then the error of the iterations is displayed.
initialize : bool
If set to True then the initial guess for the MinRes method is set to zero. Otherwise the values of the vector sol, if prevented, is used.
tol : double
Tolerance of the residuum. MinRes stops if tolerance is reached.
Returns
-------
(vector)
Solution vector of the MinRes method. | Minimal Residuum method | [
"Minimal",
"Residuum",
"method"
] | def MinRes(mat, rhs, pre=None, sol=None, maxsteps = 100, printrates = True, initialize = True, tol = 1e-7):
"""Minimal Residuum method
Parameters
----------
mat : Matrix
The left hand side of the equation to solve
rhs : Vector
The right hand side of the equation.
pre : Preconditioner
If provided the preconditioner is used.
sol : Vector
Start vector for MinRes method, if initialize is set False. Gets overwritten by the solution vector. If sol = None then a new vector is created.
maxsteps : int
Number of maximal steps for MinRes. If the maximal number is reached before the tolerance is reached MinRes stops.
printrates : bool
If set to True then the error of the iterations is displayed.
initialize : bool
If set to True then the initial guess for the MinRes method is set to zero. Otherwise the values of the vector sol, if prevented, is used.
tol : double
Tolerance of the residuum. MinRes stops if tolerance is reached.
Returns
-------
(vector)
Solution vector of the MinRes method.
"""
return MinResSolver(mat=mat, pre=pre, maxiter=maxsteps,
printrates=printrates,
tol=tol).Solve(rhs=rhs, sol=sol,
initialize=initialize) | [
"def",
"MinRes",
"(",
"mat",
",",
"rhs",
",",
"pre",
"=",
"None",
",",
"sol",
"=",
"None",
",",
"maxsteps",
"=",
"100",
",",
"printrates",
"=",
"True",
",",
"initialize",
"=",
"True",
",",
"tol",
"=",
"1e-7",
")",
":",
"return",
"MinResSolver",
"(",
"mat",
"=",
"mat",
",",
"pre",
"=",
"pre",
",",
"maxiter",
"=",
"maxsteps",
",",
"printrates",
"=",
"printrates",
",",
"tol",
"=",
"tol",
")",
".",
"Solve",
"(",
"rhs",
"=",
"rhs",
",",
"sol",
"=",
"sol",
",",
"initialize",
"=",
"initialize",
")"
] | https://github.com/NGSolve/ngsolve/blob/4a22558b6d5852f3d7e6cd86f1233b1ad716f395/python/krylovspace.py#L558-L599 |
|
pytorch/pytorch | 7176c92687d3cc847cc046bf002269c6949a21c2 | torch/fx/experimental/fx2trt/converters/converter_utils.py | python | get_axes_for_reduce_op | (
dim: Union[int, Sequence[int]],
has_implicit_batch_dimension: bool,
) | return axes | TensorRT reduce layer relies on the binary representation of axes to
determine which dims to reduce. For example, if we want to reduce on
dim 1 and 2 then axes should be 6(110).
Args:
dim (Union[int, Sequence[int]]): An integer or a sequence of integers
that will be used to generate axes for TensorRT.
has_implicit_batch_dimension (bool): Whether the TensorRT network is
using implicit batch dimension.
Returns:
An integer which binary form can be used as axes for TensorRT reduce
layer. | TensorRT reduce layer relies on the binary representation of axes to
determine which dims to reduce. For example, if we want to reduce on
dim 1 and 2 then axes should be 6(110). | [
"TensorRT",
"reduce",
"layer",
"relies",
"on",
"the",
"binary",
"representation",
"of",
"axes",
"to",
"determine",
"which",
"dims",
"to",
"reduce",
".",
"For",
"example",
"if",
"we",
"want",
"to",
"reduce",
"on",
"dim",
"1",
"and",
"2",
"then",
"axes",
"should",
"be",
"6",
"(",
"110",
")",
"."
] | def get_axes_for_reduce_op(
dim: Union[int, Sequence[int]],
has_implicit_batch_dimension: bool,
) -> int:
"""
TensorRT reduce layer relies on the binary representation of axes to
determine which dims to reduce. For example, if we want to reduce on
dim 1 and 2 then axes should be 6(110).
Args:
dim (Union[int, Sequence[int]]): An integer or a sequence of integers
that will be used to generate axes for TensorRT.
has_implicit_batch_dimension (bool): Whether the TensorRT network is
using implicit batch dimension.
Returns:
An integer which binary form can be used as axes for TensorRT reduce
layer.
"""
if isinstance(dim, int):
dim = (dim,)
if has_implicit_batch_dimension:
assert 0 not in dim, "Can't reduce over batch dimension when it's implicit."
axes = 0
for d in dim:
axes |= 1 << (d - (1 if has_implicit_batch_dimension else 0))
return axes | [
"def",
"get_axes_for_reduce_op",
"(",
"dim",
":",
"Union",
"[",
"int",
",",
"Sequence",
"[",
"int",
"]",
"]",
",",
"has_implicit_batch_dimension",
":",
"bool",
",",
")",
"->",
"int",
":",
"if",
"isinstance",
"(",
"dim",
",",
"int",
")",
":",
"dim",
"=",
"(",
"dim",
",",
")",
"if",
"has_implicit_batch_dimension",
":",
"assert",
"0",
"not",
"in",
"dim",
",",
"\"Can't reduce over batch dimension when it's implicit.\"",
"axes",
"=",
"0",
"for",
"d",
"in",
"dim",
":",
"axes",
"|=",
"1",
"<<",
"(",
"d",
"-",
"(",
"1",
"if",
"has_implicit_batch_dimension",
"else",
"0",
")",
")",
"return",
"axes"
] | https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/fx/experimental/fx2trt/converters/converter_utils.py#L135-L164 |
|
ApolloAuto/apollo-platform | 86d9dc6743b496ead18d597748ebabd34a513289 | ros/third_party/lib_x86_64/python2.7/dist-packages/numpy/compat/_inspect.py | python | strseq | (object, convert, join=joinseq) | Recursively walk a sequence, stringifying each element. | Recursively walk a sequence, stringifying each element. | [
"Recursively",
"walk",
"a",
"sequence",
"stringifying",
"each",
"element",
"."
] | def strseq(object, convert, join=joinseq):
"""Recursively walk a sequence, stringifying each element."""
if type(object) in [list, tuple]:
return join([strseq(_o, convert, join) for _o in object])
else:
return convert(object) | [
"def",
"strseq",
"(",
"object",
",",
"convert",
",",
"join",
"=",
"joinseq",
")",
":",
"if",
"type",
"(",
"object",
")",
"in",
"[",
"list",
",",
"tuple",
"]",
":",
"return",
"join",
"(",
"[",
"strseq",
"(",
"_o",
",",
"convert",
",",
"join",
")",
"for",
"_o",
"in",
"object",
"]",
")",
"else",
":",
"return",
"convert",
"(",
"object",
")"
] | https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/third_party/lib_x86_64/python2.7/dist-packages/numpy/compat/_inspect.py#L152-L157 |
||
miyosuda/TensorFlowAndroidMNIST | 7b5a4603d2780a8a2834575706e9001977524007 | jni-build/jni/include/tensorflow/python/framework/errors.py | python | FailedPreconditionError.__init__ | (self, node_def, op, message) | Creates a `FailedPreconditionError`. | Creates a `FailedPreconditionError`. | [
"Creates",
"a",
"FailedPreconditionError",
"."
] | def __init__(self, node_def, op, message):
"""Creates a `FailedPreconditionError`."""
super(FailedPreconditionError, self).__init__(node_def, op, message,
FAILED_PRECONDITION) | [
"def",
"__init__",
"(",
"self",
",",
"node_def",
",",
"op",
",",
"message",
")",
":",
"super",
"(",
"FailedPreconditionError",
",",
"self",
")",
".",
"__init__",
"(",
"node_def",
",",
"op",
",",
"message",
",",
"FAILED_PRECONDITION",
")"
] | https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/python/framework/errors.py#L310-L313 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/xrc.py | python | XmlResource.InsertHandler | (*args, **kwargs) | return _xrc.XmlResource_InsertHandler(*args, **kwargs) | InsertHandler(self, XmlResourceHandler handler) | InsertHandler(self, XmlResourceHandler handler) | [
"InsertHandler",
"(",
"self",
"XmlResourceHandler",
"handler",
")"
] | def InsertHandler(*args, **kwargs):
"""InsertHandler(self, XmlResourceHandler handler)"""
return _xrc.XmlResource_InsertHandler(*args, **kwargs) | [
"def",
"InsertHandler",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_xrc",
".",
"XmlResource_InsertHandler",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/xrc.py#L106-L108 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/stc.py | python | StyledTextCtrl.LineScroll | (*args, **kwargs) | return _stc.StyledTextCtrl_LineScroll(*args, **kwargs) | LineScroll(self, int columns, int lines)
Scroll horizontally and vertically. | LineScroll(self, int columns, int lines) | [
"LineScroll",
"(",
"self",
"int",
"columns",
"int",
"lines",
")"
] | def LineScroll(*args, **kwargs):
"""
LineScroll(self, int columns, int lines)
Scroll horizontally and vertically.
"""
return _stc.StyledTextCtrl_LineScroll(*args, **kwargs) | [
"def",
"LineScroll",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_stc",
".",
"StyledTextCtrl_LineScroll",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/stc.py#L3617-L3623 |
|
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/telemetry/third_party/pyserial/serial/urlhandler/protocol_loop.py | python | LoopbackSerial._reconfigurePort | (self) | Set communication parameters on opened port. for the loop://
protocol all settings are ignored! | Set communication parameters on opened port. for the loop://
protocol all settings are ignored! | [
"Set",
"communication",
"parameters",
"on",
"opened",
"port",
".",
"for",
"the",
"loop",
":",
"//",
"protocol",
"all",
"settings",
"are",
"ignored!"
] | def _reconfigurePort(self):
"""Set communication parameters on opened port. for the loop://
protocol all settings are ignored!"""
# not that's it of any real use, but it helps in the unit tests
if not isinstance(self._baudrate, (int, long)) or not 0 < self._baudrate < 2**32:
raise ValueError("invalid baudrate: %r" % (self._baudrate))
if self.logger:
self.logger.info('_reconfigurePort()') | [
"def",
"_reconfigurePort",
"(",
"self",
")",
":",
"# not that's it of any real use, but it helps in the unit tests",
"if",
"not",
"isinstance",
"(",
"self",
".",
"_baudrate",
",",
"(",
"int",
",",
"long",
")",
")",
"or",
"not",
"0",
"<",
"self",
".",
"_baudrate",
"<",
"2",
"**",
"32",
":",
"raise",
"ValueError",
"(",
"\"invalid baudrate: %r\"",
"%",
"(",
"self",
".",
"_baudrate",
")",
")",
"if",
"self",
".",
"logger",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'_reconfigurePort()'",
")"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/telemetry/third_party/pyserial/serial/urlhandler/protocol_loop.py#L65-L72 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/pandas/py2/pandas/core/series.py | python | Series.__array_prepare__ | (self, result, context=None) | return result | Gets called prior to a ufunc. | Gets called prior to a ufunc. | [
"Gets",
"called",
"prior",
"to",
"a",
"ufunc",
"."
] | def __array_prepare__(self, result, context=None):
"""
Gets called prior to a ufunc.
"""
# nice error message for non-ufunc types
if (context is not None and
(not isinstance(self._values, (np.ndarray, ExtensionArray))
or isinstance(self._values, Categorical))):
obj = context[1][0]
raise TypeError("{obj} with dtype {dtype} cannot perform "
"the numpy op {op}".format(
obj=type(obj).__name__,
dtype=getattr(obj, 'dtype', None),
op=context[0].__name__))
return result | [
"def",
"__array_prepare__",
"(",
"self",
",",
"result",
",",
"context",
"=",
"None",
")",
":",
"# nice error message for non-ufunc types",
"if",
"(",
"context",
"is",
"not",
"None",
"and",
"(",
"not",
"isinstance",
"(",
"self",
".",
"_values",
",",
"(",
"np",
".",
"ndarray",
",",
"ExtensionArray",
")",
")",
"or",
"isinstance",
"(",
"self",
".",
"_values",
",",
"Categorical",
")",
")",
")",
":",
"obj",
"=",
"context",
"[",
"1",
"]",
"[",
"0",
"]",
"raise",
"TypeError",
"(",
"\"{obj} with dtype {dtype} cannot perform \"",
"\"the numpy op {op}\"",
".",
"format",
"(",
"obj",
"=",
"type",
"(",
"obj",
")",
".",
"__name__",
",",
"dtype",
"=",
"getattr",
"(",
"obj",
",",
"'dtype'",
",",
"None",
")",
",",
"op",
"=",
"context",
"[",
"0",
"]",
".",
"__name__",
")",
")",
"return",
"result"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/core/series.py#L737-L752 |
|
pmq20/node-packer | 12c46c6e44fbc14d9ee645ebd17d5296b324f7e0 | lts/deps/v8/third_party/jinja2/environment.py | python | Environment.get_template | (self, name, parent=None, globals=None) | return self._load_template(name, self.make_globals(globals)) | Load a template from the loader. If a loader is configured this
method asks the loader for the template and returns a :class:`Template`.
If the `parent` parameter is not `None`, :meth:`join_path` is called
to get the real template name before loading.
The `globals` parameter can be used to provide template wide globals.
These variables are available in the context at render time.
If the template does not exist a :exc:`TemplateNotFound` exception is
raised.
.. versionchanged:: 2.4
If `name` is a :class:`Template` object it is returned from the
function unchanged. | Load a template from the loader. If a loader is configured this
method asks the loader for the template and returns a :class:`Template`.
If the `parent` parameter is not `None`, :meth:`join_path` is called
to get the real template name before loading. | [
"Load",
"a",
"template",
"from",
"the",
"loader",
".",
"If",
"a",
"loader",
"is",
"configured",
"this",
"method",
"asks",
"the",
"loader",
"for",
"the",
"template",
"and",
"returns",
"a",
":",
"class",
":",
"Template",
".",
"If",
"the",
"parent",
"parameter",
"is",
"not",
"None",
":",
"meth",
":",
"join_path",
"is",
"called",
"to",
"get",
"the",
"real",
"template",
"name",
"before",
"loading",
"."
] | def get_template(self, name, parent=None, globals=None):
"""Load a template from the loader. If a loader is configured this
method asks the loader for the template and returns a :class:`Template`.
If the `parent` parameter is not `None`, :meth:`join_path` is called
to get the real template name before loading.
The `globals` parameter can be used to provide template wide globals.
These variables are available in the context at render time.
If the template does not exist a :exc:`TemplateNotFound` exception is
raised.
.. versionchanged:: 2.4
If `name` is a :class:`Template` object it is returned from the
function unchanged.
"""
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
return self._load_template(name, self.make_globals(globals)) | [
"def",
"get_template",
"(",
"self",
",",
"name",
",",
"parent",
"=",
"None",
",",
"globals",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"name",
",",
"Template",
")",
":",
"return",
"name",
"if",
"parent",
"is",
"not",
"None",
":",
"name",
"=",
"self",
".",
"join_path",
"(",
"name",
",",
"parent",
")",
"return",
"self",
".",
"_load_template",
"(",
"name",
",",
"self",
".",
"make_globals",
"(",
"globals",
")",
")"
] | https://github.com/pmq20/node-packer/blob/12c46c6e44fbc14d9ee645ebd17d5296b324f7e0/lts/deps/v8/third_party/jinja2/environment.py#L810-L830 |
|
krishauser/Klampt | 972cc83ea5befac3f653c1ba20f80155768ad519 | Python/klampt/robotsim.py | python | Mass.getMass | (self) | return _robotsim.Mass_getMass(self) | r""" | r""" | [
"r"
] | def getMass(self) ->float:
r"""
"""
return _robotsim.Mass_getMass(self) | [
"def",
"getMass",
"(",
"self",
")",
"->",
"float",
":",
"return",
"_robotsim",
".",
"Mass_getMass",
"(",
"self",
")"
] | https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/klampt/robotsim.py#L3783-L3786 |
|
mindspore-ai/mindspore | fb8fd3338605bb34fa5cea054e535a8b1d753fab | mindspore/python/mindspore/train/callback/_summary_collector.py | python | SummaryCollector._save_metadata | (self, step_per_epoch, unit, num_samples, landscape_size, create_landscape) | Save meta data to json file. | Save meta data to json file. | [
"Save",
"meta",
"data",
"to",
"json",
"file",
"."
] | def _save_metadata(self, step_per_epoch, unit, num_samples, landscape_size, create_landscape):
"""Save meta data to json file."""
data = {
"epoch_group": self._epoch_group,
"model_params_file_map": self._model_params_file_map,
"step_per_epoch": step_per_epoch,
"unit": unit,
"num_samples": num_samples,
"landscape_size": landscape_size,
"create_landscape": create_landscape
}
meta_path = os.path.join(self._ckpt_dir, 'train_metadata.json')
try:
with open(meta_path, 'w') as file:
json.dump(data, file)
os.chmod(meta_path, stat.S_IRUSR)
except OSError as e:
logger.error("Write meta data %s failed, detail: %s" % (meta_path, str(e))) | [
"def",
"_save_metadata",
"(",
"self",
",",
"step_per_epoch",
",",
"unit",
",",
"num_samples",
",",
"landscape_size",
",",
"create_landscape",
")",
":",
"data",
"=",
"{",
"\"epoch_group\"",
":",
"self",
".",
"_epoch_group",
",",
"\"model_params_file_map\"",
":",
"self",
".",
"_model_params_file_map",
",",
"\"step_per_epoch\"",
":",
"step_per_epoch",
",",
"\"unit\"",
":",
"unit",
",",
"\"num_samples\"",
":",
"num_samples",
",",
"\"landscape_size\"",
":",
"landscape_size",
",",
"\"create_landscape\"",
":",
"create_landscape",
"}",
"meta_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_ckpt_dir",
",",
"'train_metadata.json'",
")",
"try",
":",
"with",
"open",
"(",
"meta_path",
",",
"'w'",
")",
"as",
"file",
":",
"json",
".",
"dump",
"(",
"data",
",",
"file",
")",
"os",
".",
"chmod",
"(",
"meta_path",
",",
"stat",
".",
"S_IRUSR",
")",
"except",
"OSError",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"\"Write meta data %s failed, detail: %s\"",
"%",
"(",
"meta_path",
",",
"str",
"(",
"e",
")",
")",
")"
] | https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/train/callback/_summary_collector.py#L575-L592 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/_controls.py | python | TreeCtrl.IsVisible | (*args, **kwargs) | return _controls_.TreeCtrl_IsVisible(*args, **kwargs) | IsVisible(self, TreeItemId item) -> bool | IsVisible(self, TreeItemId item) -> bool | [
"IsVisible",
"(",
"self",
"TreeItemId",
"item",
")",
"-",
">",
"bool"
] | def IsVisible(*args, **kwargs):
"""IsVisible(self, TreeItemId item) -> bool"""
return _controls_.TreeCtrl_IsVisible(*args, **kwargs) | [
"def",
"IsVisible",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"TreeCtrl_IsVisible",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_controls.py#L5331-L5333 |
|
mindspore-ai/mindspore | fb8fd3338605bb34fa5cea054e535a8b1d753fab | mindspore/python/mindspore/ops/operations/nn_ops.py | python | MaxPool3D.__init__ | (self, kernel_size=1, strides=1, pad_mode="VALID", pad_list=0, ceil_mode=None, data_format="NCDHW") | Initialize MaxPool3D. | Initialize MaxPool3D. | [
"Initialize",
"MaxPool3D",
"."
] | def __init__(self, kernel_size=1, strides=1, pad_mode="VALID", pad_list=0, ceil_mode=None, data_format="NCDHW"):
"""Initialize MaxPool3D."""
self.init_prim_io_names(inputs=['x'], outputs=['output'])
validator.check_value_type('kernel_size', kernel_size, [int, tuple], self.name)
validator.check_value_type('strides', strides, [int, tuple], self.name)
validator.check_value_type('pad_mode', pad_mode, [str], self.name)
self.pad_mode = validator.check_string(pad_mode.upper(), ['VALID', 'SAME', 'PAD'], 'pad_mode', self.name)
if pad_mode.upper() == "PAD":
self.pad_mode = "CALCULATED"
self.add_prim_attr("pad_mode", self.pad_mode)
self.data_format = validator.check_string(data_format, ['NCDHW'], 'data_format', self.name)
self.kernel_size = _check_3d_int_or_tuple("kernel_size", kernel_size, self.name,
allow_five=False, ret_five=True)
self.add_prim_attr("kernel_size", self.kernel_size)
self.strides = _check_3d_int_or_tuple("strides", strides, self.name, allow_five=False, ret_five=True)
self.add_prim_attr("strides", self.strides)
if ceil_mode is None:
self.ceil_mode = not self.pad_mode == "CALCULATED"
else:
self.ceil_mode = validator.check_value_type('ceil_mode', ceil_mode, [bool], self.name)
if self.pad_mode != "CALCULATED":
raise ValueError("When the 'pad_mode' is 'same' or 'valid', the 'ceil_mode' only supports 'None'.")
self.add_prim_attr("ceil_mode", int(self.ceil_mode))
validator.check_value_type('pad_list', pad_list, (int, tuple), self.name)
self.pad_list = pad_list
if isinstance(self.pad_list, int):
self.pad_list = (self.pad_list,) * 6
if len(self.pad_list) == 3:
self.pad_list = (pad_list[0], pad_list[0], pad_list[1], pad_list[1], pad_list[2], pad_list[2])
if len(self.pad_list) != 3 and len(self.pad_list) != 6:
raise ValueError(f"For '{self.name}', attr 'pad_list' should be an positive int number or a tuple of "
f"three or six positive int numbers, but got {len(self.pad_list)} numbers.")
if self.pad_mode != 'CALCULATED' and self.pad_list != (0, 0, 0, 0, 0, 0):
raise ValueError(f"For '{self.name}', the 'pad_list' must be zero or (0, 0, 0, 0, 0, 0) when 'pad_mode' "
f"is not \"pad\", but got 'pad_list' is {pad_list} and 'pad_mode' is {pad_mode}.")
if self.pad_mode == 'CALCULATED':
for item in self.pad_list:
validator.check_non_negative_int(item, 'pad_list item', self.name)
self.add_prim_attr("pad_list", self.pad_list) | [
"def",
"__init__",
"(",
"self",
",",
"kernel_size",
"=",
"1",
",",
"strides",
"=",
"1",
",",
"pad_mode",
"=",
"\"VALID\"",
",",
"pad_list",
"=",
"0",
",",
"ceil_mode",
"=",
"None",
",",
"data_format",
"=",
"\"NCDHW\"",
")",
":",
"self",
".",
"init_prim_io_names",
"(",
"inputs",
"=",
"[",
"'x'",
"]",
",",
"outputs",
"=",
"[",
"'output'",
"]",
")",
"validator",
".",
"check_value_type",
"(",
"'kernel_size'",
",",
"kernel_size",
",",
"[",
"int",
",",
"tuple",
"]",
",",
"self",
".",
"name",
")",
"validator",
".",
"check_value_type",
"(",
"'strides'",
",",
"strides",
",",
"[",
"int",
",",
"tuple",
"]",
",",
"self",
".",
"name",
")",
"validator",
".",
"check_value_type",
"(",
"'pad_mode'",
",",
"pad_mode",
",",
"[",
"str",
"]",
",",
"self",
".",
"name",
")",
"self",
".",
"pad_mode",
"=",
"validator",
".",
"check_string",
"(",
"pad_mode",
".",
"upper",
"(",
")",
",",
"[",
"'VALID'",
",",
"'SAME'",
",",
"'PAD'",
"]",
",",
"'pad_mode'",
",",
"self",
".",
"name",
")",
"if",
"pad_mode",
".",
"upper",
"(",
")",
"==",
"\"PAD\"",
":",
"self",
".",
"pad_mode",
"=",
"\"CALCULATED\"",
"self",
".",
"add_prim_attr",
"(",
"\"pad_mode\"",
",",
"self",
".",
"pad_mode",
")",
"self",
".",
"data_format",
"=",
"validator",
".",
"check_string",
"(",
"data_format",
",",
"[",
"'NCDHW'",
"]",
",",
"'data_format'",
",",
"self",
".",
"name",
")",
"self",
".",
"kernel_size",
"=",
"_check_3d_int_or_tuple",
"(",
"\"kernel_size\"",
",",
"kernel_size",
",",
"self",
".",
"name",
",",
"allow_five",
"=",
"False",
",",
"ret_five",
"=",
"True",
")",
"self",
".",
"add_prim_attr",
"(",
"\"kernel_size\"",
",",
"self",
".",
"kernel_size",
")",
"self",
".",
"strides",
"=",
"_check_3d_int_or_tuple",
"(",
"\"strides\"",
",",
"strides",
",",
"self",
".",
"name",
",",
"allow_five",
"=",
"False",
",",
"ret_five",
"=",
"True",
")",
"self",
".",
"add_prim_attr",
"(",
"\"strides\"",
",",
"self",
".",
"strides",
")",
"if",
"ceil_mode",
"is",
"None",
":",
"self",
".",
"ceil_mode",
"=",
"not",
"self",
".",
"pad_mode",
"==",
"\"CALCULATED\"",
"else",
":",
"self",
".",
"ceil_mode",
"=",
"validator",
".",
"check_value_type",
"(",
"'ceil_mode'",
",",
"ceil_mode",
",",
"[",
"bool",
"]",
",",
"self",
".",
"name",
")",
"if",
"self",
".",
"pad_mode",
"!=",
"\"CALCULATED\"",
":",
"raise",
"ValueError",
"(",
"\"When the 'pad_mode' is 'same' or 'valid', the 'ceil_mode' only supports 'None'.\"",
")",
"self",
".",
"add_prim_attr",
"(",
"\"ceil_mode\"",
",",
"int",
"(",
"self",
".",
"ceil_mode",
")",
")",
"validator",
".",
"check_value_type",
"(",
"'pad_list'",
",",
"pad_list",
",",
"(",
"int",
",",
"tuple",
")",
",",
"self",
".",
"name",
")",
"self",
".",
"pad_list",
"=",
"pad_list",
"if",
"isinstance",
"(",
"self",
".",
"pad_list",
",",
"int",
")",
":",
"self",
".",
"pad_list",
"=",
"(",
"self",
".",
"pad_list",
",",
")",
"*",
"6",
"if",
"len",
"(",
"self",
".",
"pad_list",
")",
"==",
"3",
":",
"self",
".",
"pad_list",
"=",
"(",
"pad_list",
"[",
"0",
"]",
",",
"pad_list",
"[",
"0",
"]",
",",
"pad_list",
"[",
"1",
"]",
",",
"pad_list",
"[",
"1",
"]",
",",
"pad_list",
"[",
"2",
"]",
",",
"pad_list",
"[",
"2",
"]",
")",
"if",
"len",
"(",
"self",
".",
"pad_list",
")",
"!=",
"3",
"and",
"len",
"(",
"self",
".",
"pad_list",
")",
"!=",
"6",
":",
"raise",
"ValueError",
"(",
"f\"For '{self.name}', attr 'pad_list' should be an positive int number or a tuple of \"",
"f\"three or six positive int numbers, but got {len(self.pad_list)} numbers.\"",
")",
"if",
"self",
".",
"pad_mode",
"!=",
"'CALCULATED'",
"and",
"self",
".",
"pad_list",
"!=",
"(",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
")",
":",
"raise",
"ValueError",
"(",
"f\"For '{self.name}', the 'pad_list' must be zero or (0, 0, 0, 0, 0, 0) when 'pad_mode' \"",
"f\"is not \\\"pad\\\", but got 'pad_list' is {pad_list} and 'pad_mode' is {pad_mode}.\"",
")",
"if",
"self",
".",
"pad_mode",
"==",
"'CALCULATED'",
":",
"for",
"item",
"in",
"self",
".",
"pad_list",
":",
"validator",
".",
"check_non_negative_int",
"(",
"item",
",",
"'pad_list item'",
",",
"self",
".",
"name",
")",
"self",
".",
"add_prim_attr",
"(",
"\"pad_list\"",
",",
"self",
".",
"pad_list",
")"
] | https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/ops/operations/nn_ops.py#L1815-L1854 |
||
smilehao/xlua-framework | a03801538be2b0e92d39332d445b22caca1ef61f | ConfigData/trunk/tools/protobuf-2.5.0/protobuf-2.5.0/python/build/lib/google/protobuf/text_format.py | python | _Tokenizer.ConsumeString | (self) | Consumes a string value.
Returns:
The string parsed.
Raises:
ParseError: If a string value couldn't be consumed. | Consumes a string value. | [
"Consumes",
"a",
"string",
"value",
"."
] | def ConsumeString(self):
"""Consumes a string value.
Returns:
The string parsed.
Raises:
ParseError: If a string value couldn't be consumed.
"""
bytes = self.ConsumeByteString()
try:
return unicode(bytes, 'utf-8')
except UnicodeDecodeError, e:
raise self._StringParseError(e) | [
"def",
"ConsumeString",
"(",
"self",
")",
":",
"bytes",
"=",
"self",
".",
"ConsumeByteString",
"(",
")",
"try",
":",
"return",
"unicode",
"(",
"bytes",
",",
"'utf-8'",
")",
"except",
"UnicodeDecodeError",
",",
"e",
":",
"raise",
"self",
".",
"_StringParseError",
"(",
"e",
")"
] | https://github.com/smilehao/xlua-framework/blob/a03801538be2b0e92d39332d445b22caca1ef61f/ConfigData/trunk/tools/protobuf-2.5.0/protobuf-2.5.0/python/build/lib/google/protobuf/text_format.py#L491-L504 |
||
wyrover/book-code | 7f4883d9030d553bc6bcfa3da685e34789839900 | 3rdparty/protobuf/python/google/protobuf/internal/containers.py | python | MessageMap.__init__ | (self, message_listener, message_descriptor, key_checker) | Args:
message_listener: A MessageListener implementation.
The ScalarMap will call this object's Modified() method when it
is modified.
key_checker: A type_checkers.ValueChecker instance to run on keys
inserted into this container.
value_checker: A type_checkers.ValueChecker instance to run on values
inserted into this container. | Args:
message_listener: A MessageListener implementation.
The ScalarMap will call this object's Modified() method when it
is modified.
key_checker: A type_checkers.ValueChecker instance to run on keys
inserted into this container.
value_checker: A type_checkers.ValueChecker instance to run on values
inserted into this container. | [
"Args",
":",
"message_listener",
":",
"A",
"MessageListener",
"implementation",
".",
"The",
"ScalarMap",
"will",
"call",
"this",
"object",
"s",
"Modified",
"()",
"method",
"when",
"it",
"is",
"modified",
".",
"key_checker",
":",
"A",
"type_checkers",
".",
"ValueChecker",
"instance",
"to",
"run",
"on",
"keys",
"inserted",
"into",
"this",
"container",
".",
"value_checker",
":",
"A",
"type_checkers",
".",
"ValueChecker",
"instance",
"to",
"run",
"on",
"values",
"inserted",
"into",
"this",
"container",
"."
] | def __init__(self, message_listener, message_descriptor, key_checker):
"""
Args:
message_listener: A MessageListener implementation.
The ScalarMap will call this object's Modified() method when it
is modified.
key_checker: A type_checkers.ValueChecker instance to run on keys
inserted into this container.
value_checker: A type_checkers.ValueChecker instance to run on values
inserted into this container.
"""
self._message_listener = message_listener
self._message_descriptor = message_descriptor
self._key_checker = key_checker
self._values = {} | [
"def",
"__init__",
"(",
"self",
",",
"message_listener",
",",
"message_descriptor",
",",
"key_checker",
")",
":",
"self",
".",
"_message_listener",
"=",
"message_listener",
"self",
".",
"_message_descriptor",
"=",
"message_descriptor",
"self",
".",
"_key_checker",
"=",
"key_checker",
"self",
".",
"_values",
"=",
"{",
"}"
] | https://github.com/wyrover/book-code/blob/7f4883d9030d553bc6bcfa3da685e34789839900/3rdparty/protobuf/python/google/protobuf/internal/containers.py#L525-L539 |
||
mongodb/mongo | d8ff665343ad29cf286ee2cf4a1960d29371937b | buildscripts/resmokelib/core/process.py | python | Process.as_command | (self) | return " ".join(sb) | Return an equivalent command line invocation of the process. | Return an equivalent command line invocation of the process. | [
"Return",
"an",
"equivalent",
"command",
"line",
"invocation",
"of",
"the",
"process",
"."
] | def as_command(self):
"""Return an equivalent command line invocation of the process."""
default_env = os.environ
env_diff = self.env.copy()
# Remove environment variables that appear in both 'os.environ' and 'self.env'.
for env_var in default_env:
if env_var in env_diff and env_diff[env_var] == default_env[env_var]:
del env_diff[env_var]
sb = [] # String builder.
for env_var in env_diff:
sb.append(quote("%s=%s" % (env_var, env_diff[env_var])))
sb.extend(map(quote, self.args))
return " ".join(sb) | [
"def",
"as_command",
"(",
"self",
")",
":",
"default_env",
"=",
"os",
".",
"environ",
"env_diff",
"=",
"self",
".",
"env",
".",
"copy",
"(",
")",
"# Remove environment variables that appear in both 'os.environ' and 'self.env'.",
"for",
"env_var",
"in",
"default_env",
":",
"if",
"env_var",
"in",
"env_diff",
"and",
"env_diff",
"[",
"env_var",
"]",
"==",
"default_env",
"[",
"env_var",
"]",
":",
"del",
"env_diff",
"[",
"env_var",
"]",
"sb",
"=",
"[",
"]",
"# String builder.",
"for",
"env_var",
"in",
"env_diff",
":",
"sb",
".",
"append",
"(",
"quote",
"(",
"\"%s=%s\"",
"%",
"(",
"env_var",
",",
"env_diff",
"[",
"env_var",
"]",
")",
")",
")",
"sb",
".",
"extend",
"(",
"map",
"(",
"quote",
",",
"self",
".",
"args",
")",
")",
"return",
"\" \"",
".",
"join",
"(",
"sb",
")"
] | https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/buildscripts/resmokelib/core/process.py#L262-L278 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/propgrid.py | python | PropertyGridInterface.SetPropertyValueString | (*args, **kwargs) | return _propgrid.PropertyGridInterface_SetPropertyValueString(*args, **kwargs) | SetPropertyValueString(self, PGPropArg id, String value) | SetPropertyValueString(self, PGPropArg id, String value) | [
"SetPropertyValueString",
"(",
"self",
"PGPropArg",
"id",
"String",
"value",
")"
] | def SetPropertyValueString(*args, **kwargs):
"""SetPropertyValueString(self, PGPropArg id, String value)"""
return _propgrid.PropertyGridInterface_SetPropertyValueString(*args, **kwargs) | [
"def",
"SetPropertyValueString",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_propgrid",
".",
"PropertyGridInterface_SetPropertyValueString",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/propgrid.py#L1449-L1451 |
|
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/ops/distributions/util.py | python | _smallest_integer_by_dtype | (dt) | return -1 * _largest_integer_by_dtype(dt) | Helper returning the smallest integer exactly representable by dtype. | Helper returning the smallest integer exactly representable by dtype. | [
"Helper",
"returning",
"the",
"smallest",
"integer",
"exactly",
"representable",
"by",
"dtype",
"."
] | def _smallest_integer_by_dtype(dt):
"""Helper returning the smallest integer exactly representable by dtype."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
if _is_known_unsigned_by_dtype(dt):
return 0
return -1 * _largest_integer_by_dtype(dt) | [
"def",
"_smallest_integer_by_dtype",
"(",
"dt",
")",
":",
"if",
"not",
"_is_known_dtype",
"(",
"dt",
")",
":",
"raise",
"TypeError",
"(",
"\"Unrecognized dtype: {}\"",
".",
"format",
"(",
"dt",
".",
"name",
")",
")",
"if",
"_is_known_unsigned_by_dtype",
"(",
"dt",
")",
":",
"return",
"0",
"return",
"-",
"1",
"*",
"_largest_integer_by_dtype",
"(",
"dt",
")"
] | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/ops/distributions/util.py#L282-L288 |
|
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/while_v2_indexed_slices_rewriter.py | python | _create_grad_indexed_slices_init | (grad_output_slices, forward_input) | return ops.IndexedSlices(values=values, indices=indices, dense_shape=shape) | Creates an IndexedSlices to pass as input to the while grad function.
Args:
grad_output_slices: IndexedSlices. The corresponding while grad function
output.
forward_input: Tensor. The corresonding input to the forward while op.
Returns:
Zeros IndexedSlices, created in current Graph. | Creates an IndexedSlices to pass as input to the while grad function. | [
"Creates",
"an",
"IndexedSlices",
"to",
"pass",
"as",
"input",
"to",
"the",
"while",
"grad",
"function",
"."
] | def _create_grad_indexed_slices_init(grad_output_slices, forward_input):
"""Creates an IndexedSlices to pass as input to the while grad function.
Args:
grad_output_slices: IndexedSlices. The corresponding while grad function
output.
forward_input: Tensor. The corresonding input to the forward while op.
Returns:
Zeros IndexedSlices, created in current Graph.
"""
assert isinstance(grad_output_slices, ops.IndexedSlices)
assert isinstance(forward_input, ops.Tensor)
values_out = grad_output_slices.values
indices_out = grad_output_slices.indices
# Create the initial values tensor.
if values_out.shape.is_fully_defined():
values_shape = tensor_shape.TensorShape([0] +
values_out.shape.as_list()[1:])
values = array_ops.zeros(values_shape, dtype=values_out.dtype,
name="values_init")
else:
if forward_input.dtype == dtypes.resource:
forward_shape = gen_resource_variable_ops.variable_shape(forward_input)
else:
forward_shape = array_ops.shape(forward_input)
values_shape = array_ops.concat([[0], forward_shape[1:]], 0)
values = array_ops.zeros(values_shape, dtype=values_out.dtype,
name="values_init")
# Create the initial indices tensor.
indices = constant_op.constant([], indices_out.dtype, name="indices_init")
# Create the initial dense_shape tensor. We assume is the same shape as
# forward_input, since captured tensors don't change shape across loop
# iterations.
if forward_input.dtype == dtypes.resource:
shape = gen_resource_variable_ops.variable_shape(forward_input,
name="shape_init")
else:
shape = array_ops.shape(forward_input, name="shape_init")
return ops.IndexedSlices(values=values, indices=indices, dense_shape=shape) | [
"def",
"_create_grad_indexed_slices_init",
"(",
"grad_output_slices",
",",
"forward_input",
")",
":",
"assert",
"isinstance",
"(",
"grad_output_slices",
",",
"ops",
".",
"IndexedSlices",
")",
"assert",
"isinstance",
"(",
"forward_input",
",",
"ops",
".",
"Tensor",
")",
"values_out",
"=",
"grad_output_slices",
".",
"values",
"indices_out",
"=",
"grad_output_slices",
".",
"indices",
"# Create the initial values tensor.",
"if",
"values_out",
".",
"shape",
".",
"is_fully_defined",
"(",
")",
":",
"values_shape",
"=",
"tensor_shape",
".",
"TensorShape",
"(",
"[",
"0",
"]",
"+",
"values_out",
".",
"shape",
".",
"as_list",
"(",
")",
"[",
"1",
":",
"]",
")",
"values",
"=",
"array_ops",
".",
"zeros",
"(",
"values_shape",
",",
"dtype",
"=",
"values_out",
".",
"dtype",
",",
"name",
"=",
"\"values_init\"",
")",
"else",
":",
"if",
"forward_input",
".",
"dtype",
"==",
"dtypes",
".",
"resource",
":",
"forward_shape",
"=",
"gen_resource_variable_ops",
".",
"variable_shape",
"(",
"forward_input",
")",
"else",
":",
"forward_shape",
"=",
"array_ops",
".",
"shape",
"(",
"forward_input",
")",
"values_shape",
"=",
"array_ops",
".",
"concat",
"(",
"[",
"[",
"0",
"]",
",",
"forward_shape",
"[",
"1",
":",
"]",
"]",
",",
"0",
")",
"values",
"=",
"array_ops",
".",
"zeros",
"(",
"values_shape",
",",
"dtype",
"=",
"values_out",
".",
"dtype",
",",
"name",
"=",
"\"values_init\"",
")",
"# Create the initial indices tensor.",
"indices",
"=",
"constant_op",
".",
"constant",
"(",
"[",
"]",
",",
"indices_out",
".",
"dtype",
",",
"name",
"=",
"\"indices_init\"",
")",
"# Create the initial dense_shape tensor. We assume is the same shape as",
"# forward_input, since captured tensors don't change shape across loop",
"# iterations.",
"if",
"forward_input",
".",
"dtype",
"==",
"dtypes",
".",
"resource",
":",
"shape",
"=",
"gen_resource_variable_ops",
".",
"variable_shape",
"(",
"forward_input",
",",
"name",
"=",
"\"shape_init\"",
")",
"else",
":",
"shape",
"=",
"array_ops",
".",
"shape",
"(",
"forward_input",
",",
"name",
"=",
"\"shape_init\"",
")",
"return",
"ops",
".",
"IndexedSlices",
"(",
"values",
"=",
"values",
",",
"indices",
"=",
"indices",
",",
"dense_shape",
"=",
"shape",
")"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/while_v2_indexed_slices_rewriter.py#L158-L201 |
|
chromiumembedded/cef | 80caf947f3fe2210e5344713c5281d8af9bdc295 | tools/yapf/yapf/yapflib/reformatter.py | python | _LineContainsI18n | (uwline) | return False | Return true if there are i18n comments or function calls in the line.
I18n comments and pseudo-function calls are closely related. They cannot
be moved apart without breaking i18n.
Arguments:
uwline: (unwrapped_line.UnwrappedLine) The line currently being formatted.
Returns:
True if the line contains i18n comments or function calls. False otherwise. | Return true if there are i18n comments or function calls in the line. | [
"Return",
"true",
"if",
"there",
"are",
"i18n",
"comments",
"or",
"function",
"calls",
"in",
"the",
"line",
"."
] | def _LineContainsI18n(uwline):
"""Return true if there are i18n comments or function calls in the line.
I18n comments and pseudo-function calls are closely related. They cannot
be moved apart without breaking i18n.
Arguments:
uwline: (unwrapped_line.UnwrappedLine) The line currently being formatted.
Returns:
True if the line contains i18n comments or function calls. False otherwise.
"""
if style.Get('I18N_COMMENT'):
for tok in uwline.tokens:
if tok.is_comment and re.match(style.Get('I18N_COMMENT'), tok.value):
# Contains an i18n comment.
return True
if style.Get('I18N_FUNCTION_CALL'):
length = len(uwline.tokens)
index = 0
while index < length - 1:
if (uwline.tokens[index + 1].value == '(' and
uwline.tokens[index].value in style.Get('I18N_FUNCTION_CALL')):
return True
index += 1
return False | [
"def",
"_LineContainsI18n",
"(",
"uwline",
")",
":",
"if",
"style",
".",
"Get",
"(",
"'I18N_COMMENT'",
")",
":",
"for",
"tok",
"in",
"uwline",
".",
"tokens",
":",
"if",
"tok",
".",
"is_comment",
"and",
"re",
".",
"match",
"(",
"style",
".",
"Get",
"(",
"'I18N_COMMENT'",
")",
",",
"tok",
".",
"value",
")",
":",
"# Contains an i18n comment.",
"return",
"True",
"if",
"style",
".",
"Get",
"(",
"'I18N_FUNCTION_CALL'",
")",
":",
"length",
"=",
"len",
"(",
"uwline",
".",
"tokens",
")",
"index",
"=",
"0",
"while",
"index",
"<",
"length",
"-",
"1",
":",
"if",
"(",
"uwline",
".",
"tokens",
"[",
"index",
"+",
"1",
"]",
".",
"value",
"==",
"'('",
"and",
"uwline",
".",
"tokens",
"[",
"index",
"]",
".",
"value",
"in",
"style",
".",
"Get",
"(",
"'I18N_FUNCTION_CALL'",
")",
")",
":",
"return",
"True",
"index",
"+=",
"1",
"return",
"False"
] | https://github.com/chromiumembedded/cef/blob/80caf947f3fe2210e5344713c5281d8af9bdc295/tools/yapf/yapf/yapflib/reformatter.py#L177-L204 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.