nwo
stringlengths
5
86
sha
stringlengths
40
40
path
stringlengths
4
189
language
stringclasses
1 value
identifier
stringlengths
1
94
parameters
stringlengths
2
4.03k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
11.5k
docstring
stringlengths
1
33.2k
docstring_summary
stringlengths
0
5.15k
docstring_tokens
sequence
function
stringlengths
34
151k
function_tokens
sequence
url
stringlengths
90
278
hfinkel/llvm-project-cxxjit
91084ef018240bbb8e24235ff5cd8c355a9c1a1e
clang/bindings/python/clang/cindex.py
python
TranslationUnit.codeComplete
(self, path, line, column, unsaved_files=None, include_macros=False, include_code_patterns=False, include_brief_comments=False)
return None
Code complete in this translation unit. In-memory contents for files can be provided by passing a list of pairs as unsaved_files, the first items should be the filenames to be mapped and the second should be the contents to be substituted for the file. The contents may be passed as strings or file objects.
Code complete in this translation unit.
[ "Code", "complete", "in", "this", "translation", "unit", "." ]
def codeComplete(self, path, line, column, unsaved_files=None, include_macros=False, include_code_patterns=False, include_brief_comments=False): """ Code complete in this translation unit. In-memory contents for files can be provided by passing a list of pairs as unsaved_files, the first items should be the filenames to be mapped and the second should be the contents to be substituted for the file. The contents may be passed as strings or file objects. """ options = 0 if include_macros: options += 1 if include_code_patterns: options += 2 if include_brief_comments: options += 4 if unsaved_files is None: unsaved_files = [] unsaved_files_array = 0 if len(unsaved_files): unsaved_files_array = (_CXUnsavedFile * len(unsaved_files))() for i,(name,contents) in enumerate(unsaved_files): if hasattr(contents, "read"): contents = contents.read() contents = b(contents) unsaved_files_array[i].name = b(fspath(name)) unsaved_files_array[i].contents = contents unsaved_files_array[i].length = len(contents) ptr = conf.lib.clang_codeCompleteAt(self, fspath(path), line, column, unsaved_files_array, len(unsaved_files), options) if ptr: return CodeCompletionResults(ptr) return None
[ "def", "codeComplete", "(", "self", ",", "path", ",", "line", ",", "column", ",", "unsaved_files", "=", "None", ",", "include_macros", "=", "False", ",", "include_code_patterns", "=", "False", ",", "include_brief_comments", "=", "False", ")", ":", "options", "=", "0", "if", "include_macros", ":", "options", "+=", "1", "if", "include_code_patterns", ":", "options", "+=", "2", "if", "include_brief_comments", ":", "options", "+=", "4", "if", "unsaved_files", "is", "None", ":", "unsaved_files", "=", "[", "]", "unsaved_files_array", "=", "0", "if", "len", "(", "unsaved_files", ")", ":", "unsaved_files_array", "=", "(", "_CXUnsavedFile", "*", "len", "(", "unsaved_files", ")", ")", "(", ")", "for", "i", ",", "(", "name", ",", "contents", ")", "in", "enumerate", "(", "unsaved_files", ")", ":", "if", "hasattr", "(", "contents", ",", "\"read\"", ")", ":", "contents", "=", "contents", ".", "read", "(", ")", "contents", "=", "b", "(", "contents", ")", "unsaved_files_array", "[", "i", "]", ".", "name", "=", "b", "(", "fspath", "(", "name", ")", ")", "unsaved_files_array", "[", "i", "]", ".", "contents", "=", "contents", "unsaved_files_array", "[", "i", "]", ".", "length", "=", "len", "(", "contents", ")", "ptr", "=", "conf", ".", "lib", ".", "clang_codeCompleteAt", "(", "self", ",", "fspath", "(", "path", ")", ",", "line", ",", "column", ",", "unsaved_files_array", ",", "len", "(", "unsaved_files", ")", ",", "options", ")", "if", "ptr", ":", "return", "CodeCompletionResults", "(", "ptr", ")", "return", "None" ]
https://github.com/hfinkel/llvm-project-cxxjit/blob/91084ef018240bbb8e24235ff5cd8c355a9c1a1e/clang/bindings/python/clang/cindex.py#L3029-L3068
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/contrib/lookup/lookup_ops.py
python
index_table_from_tensor
(mapping, num_oov_buckets=0, default_value=-1, hasher_spec=FastHashSpec, dtype=dtypes.string, name=None)
return lookup_ops.index_table_from_tensor( vocabulary_list=mapping, num_oov_buckets=num_oov_buckets, default_value=default_value, hasher_spec=hasher_spec, dtype=dtype, name=name)
Returns a lookup table that converts a string tensor into int64 IDs. This operation constructs a lookup table to convert tensor of strings into int64 IDs. The mapping can be initialized from a string `mapping` 1-D tensor where each element is a key and corresponding index within the tensor is the value. Any lookup of an out-of-vocabulary token will return a bucket ID based on its hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the `default_value`. The bucket ID range is `[mapping size, mapping size + num_oov_buckets - 1]`. The underlying table must be initialized by calling `tf.tables_initializer.run()` or `table.init.run()` once. Elements in `mapping` cannot have duplicates, otherwise when executing the table initializer op, it will throw a `FailedPreconditionError`. Sample Usages: ```python mapping_strings = tf.constant(["emerson", "lake", "palmer"]) table = tf.contrib.lookup.index_table_from_tensor( mapping=mapping_strings, num_oov_buckets=1, default_value=-1) features = tf.constant(["emerson", "lake", "and", "palmer"]) ids = table.lookup(features) ... tf.tables_initializer().run() ids.eval() ==> [0, 1, 4, 2] ``` Args: mapping: A 1-D `Tensor` that specifies the mapping of keys to indices. The type of this object must be castable to `dtype`. num_oov_buckets: The number of out-of-vocabulary buckets. default_value: The value to use for out-of-vocabulary feature values. Defaults to -1. hasher_spec: A `HasherSpec` to specify the hash function to use for assignment of out-of-vocabulary buckets. dtype: The type of values passed to `lookup`. Only string and integers are supported. name: A name for this op (optional). Returns: The lookup table to map an input `Tensor` to index `int64` `Tensor`. Raises: ValueError: If `mapping` is invalid. ValueError: If `num_oov_buckets` is negative.
Returns a lookup table that converts a string tensor into int64 IDs.
[ "Returns", "a", "lookup", "table", "that", "converts", "a", "string", "tensor", "into", "int64", "IDs", "." ]
def index_table_from_tensor(mapping, num_oov_buckets=0, default_value=-1, hasher_spec=FastHashSpec, dtype=dtypes.string, name=None): """Returns a lookup table that converts a string tensor into int64 IDs. This operation constructs a lookup table to convert tensor of strings into int64 IDs. The mapping can be initialized from a string `mapping` 1-D tensor where each element is a key and corresponding index within the tensor is the value. Any lookup of an out-of-vocabulary token will return a bucket ID based on its hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the `default_value`. The bucket ID range is `[mapping size, mapping size + num_oov_buckets - 1]`. The underlying table must be initialized by calling `tf.tables_initializer.run()` or `table.init.run()` once. Elements in `mapping` cannot have duplicates, otherwise when executing the table initializer op, it will throw a `FailedPreconditionError`. Sample Usages: ```python mapping_strings = tf.constant(["emerson", "lake", "palmer"]) table = tf.contrib.lookup.index_table_from_tensor( mapping=mapping_strings, num_oov_buckets=1, default_value=-1) features = tf.constant(["emerson", "lake", "and", "palmer"]) ids = table.lookup(features) ... tf.tables_initializer().run() ids.eval() ==> [0, 1, 4, 2] ``` Args: mapping: A 1-D `Tensor` that specifies the mapping of keys to indices. The type of this object must be castable to `dtype`. num_oov_buckets: The number of out-of-vocabulary buckets. default_value: The value to use for out-of-vocabulary feature values. Defaults to -1. hasher_spec: A `HasherSpec` to specify the hash function to use for assignment of out-of-vocabulary buckets. dtype: The type of values passed to `lookup`. Only string and integers are supported. name: A name for this op (optional). Returns: The lookup table to map an input `Tensor` to index `int64` `Tensor`. Raises: ValueError: If `mapping` is invalid. ValueError: If `num_oov_buckets` is negative. """ if mapping is None: raise ValueError("mapping must be specified.") return lookup_ops.index_table_from_tensor( vocabulary_list=mapping, num_oov_buckets=num_oov_buckets, default_value=default_value, hasher_spec=hasher_spec, dtype=dtype, name=name)
[ "def", "index_table_from_tensor", "(", "mapping", ",", "num_oov_buckets", "=", "0", ",", "default_value", "=", "-", "1", ",", "hasher_spec", "=", "FastHashSpec", ",", "dtype", "=", "dtypes", ".", "string", ",", "name", "=", "None", ")", ":", "if", "mapping", "is", "None", ":", "raise", "ValueError", "(", "\"mapping must be specified.\"", ")", "return", "lookup_ops", ".", "index_table_from_tensor", "(", "vocabulary_list", "=", "mapping", ",", "num_oov_buckets", "=", "num_oov_buckets", ",", "default_value", "=", "default_value", ",", "hasher_spec", "=", "hasher_spec", ",", "dtype", "=", "dtype", ",", "name", "=", "name", ")" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/lookup/lookup_ops.py#L73-L138
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Source/ThirdParty/CEF3/cef_source/tools/gn_args.py
python
GetGNEnvArgs
()
return NameValueListToDict(ShlexEnv('GN_DEFINES'))
Return GN args specified via the GN_DEFINES env variable.
Return GN args specified via the GN_DEFINES env variable.
[ "Return", "GN", "args", "specified", "via", "the", "GN_DEFINES", "env", "variable", "." ]
def GetGNEnvArgs(): """ Return GN args specified via the GN_DEFINES env variable. """ return NameValueListToDict(ShlexEnv('GN_DEFINES'))
[ "def", "GetGNEnvArgs", "(", ")", ":", "return", "NameValueListToDict", "(", "ShlexEnv", "(", "'GN_DEFINES'", ")", ")" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Source/ThirdParty/CEF3/cef_source/tools/gn_args.py#L212-L216
mapnik/mapnik
f3da900c355e1d15059c4a91b00203dcc9d9f0ef
scons/scons-local-4.1.0/SCons/Environment.py
python
Base.WhereIs
(self, prog, path=None, pathext=None, reject=[])
return None
Find prog in the path.
Find prog in the path.
[ "Find", "prog", "in", "the", "path", "." ]
def WhereIs(self, prog, path=None, pathext=None, reject=[]): """Find prog in the path. """ if path is None: try: path = self['ENV']['PATH'] except KeyError: pass elif is_String(path): path = self.subst(path) if pathext is None: try: pathext = self['ENV']['PATHEXT'] except KeyError: pass elif is_String(pathext): pathext = self.subst(pathext) prog = CLVar(self.subst(prog)) # support "program --with-args" path = WhereIs(prog[0], path, pathext, reject) if path: return path return None
[ "def", "WhereIs", "(", "self", ",", "prog", ",", "path", "=", "None", ",", "pathext", "=", "None", ",", "reject", "=", "[", "]", ")", ":", "if", "path", "is", "None", ":", "try", ":", "path", "=", "self", "[", "'ENV'", "]", "[", "'PATH'", "]", "except", "KeyError", ":", "pass", "elif", "is_String", "(", "path", ")", ":", "path", "=", "self", ".", "subst", "(", "path", ")", "if", "pathext", "is", "None", ":", "try", ":", "pathext", "=", "self", "[", "'ENV'", "]", "[", "'PATHEXT'", "]", "except", "KeyError", ":", "pass", "elif", "is_String", "(", "pathext", ")", ":", "pathext", "=", "self", ".", "subst", "(", "pathext", ")", "prog", "=", "CLVar", "(", "self", ".", "subst", "(", "prog", ")", ")", "# support \"program --with-args\"", "path", "=", "WhereIs", "(", "prog", "[", "0", "]", ",", "path", ",", "pathext", ",", "reject", ")", "if", "path", ":", "return", "path", "return", "None" ]
https://github.com/mapnik/mapnik/blob/f3da900c355e1d15059c4a91b00203dcc9d9f0ef/scons/scons-local-4.1.0/SCons/Environment.py#L1873-L1893
giuspen/cherrytree
84712f206478fcf9acf30174009ad28c648c6344
pygtk2/modules/core.py
python
CherryTree.cut_as_plain_text
(self, *args)
Copy as Plain Text
Copy as Plain Text
[ "Copy", "as", "Plain", "Text" ]
def cut_as_plain_text(self, *args): """Copy as Plain Text""" self.clipboard_handler.force_plain_text = True anchor = self.codeboxes_handler.codebox_in_use_get_anchor() if anchor is not None: anchor.sourceview.emit("cut-clipboard") else: self.sourceview.emit("cut-clipboard")
[ "def", "cut_as_plain_text", "(", "self", ",", "*", "args", ")", ":", "self", ".", "clipboard_handler", ".", "force_plain_text", "=", "True", "anchor", "=", "self", ".", "codeboxes_handler", ".", "codebox_in_use_get_anchor", "(", ")", "if", "anchor", "is", "not", "None", ":", "anchor", ".", "sourceview", ".", "emit", "(", "\"cut-clipboard\"", ")", "else", ":", "self", ".", "sourceview", ".", "emit", "(", "\"cut-clipboard\"", ")" ]
https://github.com/giuspen/cherrytree/blob/84712f206478fcf9acf30174009ad28c648c6344/pygtk2/modules/core.py#L4343-L4348
adobe/chromium
cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7
gpu/command_buffer/build_gles2_cmd_buffer.py
python
CreateArg
(arg_string)
Creates an Argument.
Creates an Argument.
[ "Creates", "an", "Argument", "." ]
def CreateArg(arg_string): """Creates an Argument.""" arg_parts = arg_string.split() if len(arg_parts) == 1 and arg_parts[0] == 'void': return None # Is this a pointer argument? elif arg_string.find('*') >= 0: if arg_parts[0] == 'NonImmediate': return NonImmediatePointerArgument( arg_parts[-1], " ".join(arg_parts[1:-1])) else: return PointerArgument( arg_parts[-1], " ".join(arg_parts[0:-1])) # Is this a resource argument? Must come after pointer check. elif arg_parts[0].startswith('GLidBind'): return ResourceIdBindArgument(arg_parts[-1], " ".join(arg_parts[0:-1])) elif arg_parts[0].startswith('GLidZero'): return ResourceIdZeroArgument(arg_parts[-1], " ".join(arg_parts[0:-1])) elif arg_parts[0].startswith('GLid'): return ResourceIdArgument(arg_parts[-1], " ".join(arg_parts[0:-1])) elif arg_parts[0].startswith('GLenum') and len(arg_parts[0]) > 6: return EnumArgument(arg_parts[-1], " ".join(arg_parts[0:-1])) elif arg_parts[0].startswith('GLboolean') and len(arg_parts[0]) > 9: return ValidatedBoolArgument(arg_parts[-1], " ".join(arg_parts[0:-1])) elif arg_parts[0].startswith('GLboolean'): return BoolArgument(arg_parts[-1], " ".join(arg_parts[0:-1])) elif arg_parts[0].startswith('GLintUniformLocation'): return UniformLocationArgument(arg_parts[-1]) elif (arg_parts[0].startswith('GLint') and len(arg_parts[0]) > 5 and not arg_parts[0].startswith('GLintptr')): return IntArgument(arg_parts[-1], " ".join(arg_parts[0:-1])) elif (arg_parts[0].startswith('GLsizeiNotNegative') or arg_parts[0].startswith('GLintptrNotNegative')): return SizeNotNegativeArgument(arg_parts[-1], " ".join(arg_parts[0:-1]), arg_parts[0][0:-11]) elif arg_parts[0].startswith('GLsize'): return SizeArgument(arg_parts[-1], " ".join(arg_parts[0:-1])) else: return Argument(arg_parts[-1], " ".join(arg_parts[0:-1]))
[ "def", "CreateArg", "(", "arg_string", ")", ":", "arg_parts", "=", "arg_string", ".", "split", "(", ")", "if", "len", "(", "arg_parts", ")", "==", "1", "and", "arg_parts", "[", "0", "]", "==", "'void'", ":", "return", "None", "# Is this a pointer argument?", "elif", "arg_string", ".", "find", "(", "'*'", ")", ">=", "0", ":", "if", "arg_parts", "[", "0", "]", "==", "'NonImmediate'", ":", "return", "NonImmediatePointerArgument", "(", "arg_parts", "[", "-", "1", "]", ",", "\" \"", ".", "join", "(", "arg_parts", "[", "1", ":", "-", "1", "]", ")", ")", "else", ":", "return", "PointerArgument", "(", "arg_parts", "[", "-", "1", "]", ",", "\" \"", ".", "join", "(", "arg_parts", "[", "0", ":", "-", "1", "]", ")", ")", "# Is this a resource argument? Must come after pointer check.", "elif", "arg_parts", "[", "0", "]", ".", "startswith", "(", "'GLidBind'", ")", ":", "return", "ResourceIdBindArgument", "(", "arg_parts", "[", "-", "1", "]", ",", "\" \"", ".", "join", "(", "arg_parts", "[", "0", ":", "-", "1", "]", ")", ")", "elif", "arg_parts", "[", "0", "]", ".", "startswith", "(", "'GLidZero'", ")", ":", "return", "ResourceIdZeroArgument", "(", "arg_parts", "[", "-", "1", "]", ",", "\" \"", ".", "join", "(", "arg_parts", "[", "0", ":", "-", "1", "]", ")", ")", "elif", "arg_parts", "[", "0", "]", ".", "startswith", "(", "'GLid'", ")", ":", "return", "ResourceIdArgument", "(", "arg_parts", "[", "-", "1", "]", ",", "\" \"", ".", "join", "(", "arg_parts", "[", "0", ":", "-", "1", "]", ")", ")", "elif", "arg_parts", "[", "0", "]", ".", "startswith", "(", "'GLenum'", ")", "and", "len", "(", "arg_parts", "[", "0", "]", ")", ">", "6", ":", "return", "EnumArgument", "(", "arg_parts", "[", "-", "1", "]", ",", "\" \"", ".", "join", "(", "arg_parts", "[", "0", ":", "-", "1", "]", ")", ")", "elif", "arg_parts", "[", "0", "]", ".", "startswith", "(", "'GLboolean'", ")", "and", "len", "(", "arg_parts", "[", "0", "]", ")", ">", "9", ":", "return", "ValidatedBoolArgument", "(", "arg_parts", "[", "-", "1", "]", ",", "\" \"", ".", "join", "(", "arg_parts", "[", "0", ":", "-", "1", "]", ")", ")", "elif", "arg_parts", "[", "0", "]", ".", "startswith", "(", "'GLboolean'", ")", ":", "return", "BoolArgument", "(", "arg_parts", "[", "-", "1", "]", ",", "\" \"", ".", "join", "(", "arg_parts", "[", "0", ":", "-", "1", "]", ")", ")", "elif", "arg_parts", "[", "0", "]", ".", "startswith", "(", "'GLintUniformLocation'", ")", ":", "return", "UniformLocationArgument", "(", "arg_parts", "[", "-", "1", "]", ")", "elif", "(", "arg_parts", "[", "0", "]", ".", "startswith", "(", "'GLint'", ")", "and", "len", "(", "arg_parts", "[", "0", "]", ")", ">", "5", "and", "not", "arg_parts", "[", "0", "]", ".", "startswith", "(", "'GLintptr'", ")", ")", ":", "return", "IntArgument", "(", "arg_parts", "[", "-", "1", "]", ",", "\" \"", ".", "join", "(", "arg_parts", "[", "0", ":", "-", "1", "]", ")", ")", "elif", "(", "arg_parts", "[", "0", "]", ".", "startswith", "(", "'GLsizeiNotNegative'", ")", "or", "arg_parts", "[", "0", "]", ".", "startswith", "(", "'GLintptrNotNegative'", ")", ")", ":", "return", "SizeNotNegativeArgument", "(", "arg_parts", "[", "-", "1", "]", ",", "\" \"", ".", "join", "(", "arg_parts", "[", "0", ":", "-", "1", "]", ")", ",", "arg_parts", "[", "0", "]", "[", "0", ":", "-", "11", "]", ")", "elif", "arg_parts", "[", "0", "]", ".", "startswith", "(", "'GLsize'", ")", ":", "return", "SizeArgument", "(", "arg_parts", "[", "-", "1", "]", ",", "\" \"", ".", "join", "(", "arg_parts", "[", "0", ":", "-", "1", "]", ")", ")", "else", ":", "return", "Argument", "(", "arg_parts", "[", "-", "1", "]", ",", "\" \"", ".", "join", "(", "arg_parts", "[", "0", ":", "-", "1", "]", ")", ")" ]
https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/gpu/command_buffer/build_gles2_cmd_buffer.py#L5527-L5568
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/internals/blocks.py
python
Block.replace
( self, to_replace, value, inplace=False, filter=None, regex=False, convert=True )
return blocks
replace the to_replace value with value, possible to create new blocks here this is just a call to putmask. regex is not used here. It is used in ObjectBlocks. It is here for API compatibility.
replace the to_replace value with value, possible to create new blocks here this is just a call to putmask. regex is not used here. It is used in ObjectBlocks. It is here for API compatibility.
[ "replace", "the", "to_replace", "value", "with", "value", "possible", "to", "create", "new", "blocks", "here", "this", "is", "just", "a", "call", "to", "putmask", ".", "regex", "is", "not", "used", "here", ".", "It", "is", "used", "in", "ObjectBlocks", ".", "It", "is", "here", "for", "API", "compatibility", "." ]
def replace( self, to_replace, value, inplace=False, filter=None, regex=False, convert=True ): """replace the to_replace value with value, possible to create new blocks here this is just a call to putmask. regex is not used here. It is used in ObjectBlocks. It is here for API compatibility. """ inplace = validate_bool_kwarg(inplace, "inplace") original_to_replace = to_replace # If we cannot replace with own dtype, convert to ObjectBlock and # retry if not self._can_hold_element(to_replace): if not isinstance(to_replace, list): if inplace: return [self] return [self.copy()] to_replace = [x for x in to_replace if self._can_hold_element(x)] if not len(to_replace): # GH#28084 avoid costly checks since we can infer # that there is nothing to replace in this block if inplace: return [self] return [self.copy()] if len(to_replace) == 1: # _can_hold_element checks have reduced this back to the # scalar case and we can avoid a costly object cast return self.replace( to_replace[0], value, inplace=inplace, filter=filter, regex=regex, convert=convert, ) # GH 22083, TypeError or ValueError occurred within error handling # causes infinite loop. Cast and retry only if not objectblock. if is_object_dtype(self): raise AssertionError # try again with a compatible block block = self.astype(object) return block.replace( to_replace=to_replace, value=value, inplace=inplace, filter=filter, regex=regex, convert=convert, ) values = self.values if lib.is_scalar(to_replace) and isinstance(values, np.ndarray): # The only non-DatetimeLike class that also has a non-trivial # try_coerce_args is ObjectBlock, but that overrides replace, # so does not get here. to_replace = convert_scalar(values, to_replace) mask = missing.mask_missing(values, to_replace) if filter is not None: filtered_out = ~self.mgr_locs.isin(filter) mask[filtered_out.nonzero()[0]] = False if not mask.any(): if inplace: return [self] return [self.copy()] try: blocks = self.putmask(mask, value, inplace=inplace) # Note: it is _not_ the case that self._can_hold_element(value) # is always true at this point. In particular, that can fail # for: # "2u" with bool-dtype, float-dtype # 0.5 with int64-dtype # np.nan with int64-dtype except (TypeError, ValueError): # GH 22083, TypeError or ValueError occurred within error handling # causes infinite loop. Cast and retry only if not objectblock. if is_object_dtype(self): raise assert not self._can_hold_element(value), value # try again with a compatible block block = self.astype(object) return block.replace( to_replace=original_to_replace, value=value, inplace=inplace, filter=filter, regex=regex, convert=convert, ) if convert: blocks = [b.convert(numeric=False, copy=not inplace) for b in blocks] return blocks
[ "def", "replace", "(", "self", ",", "to_replace", ",", "value", ",", "inplace", "=", "False", ",", "filter", "=", "None", ",", "regex", "=", "False", ",", "convert", "=", "True", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "\"inplace\"", ")", "original_to_replace", "=", "to_replace", "# If we cannot replace with own dtype, convert to ObjectBlock and", "# retry", "if", "not", "self", ".", "_can_hold_element", "(", "to_replace", ")", ":", "if", "not", "isinstance", "(", "to_replace", ",", "list", ")", ":", "if", "inplace", ":", "return", "[", "self", "]", "return", "[", "self", ".", "copy", "(", ")", "]", "to_replace", "=", "[", "x", "for", "x", "in", "to_replace", "if", "self", ".", "_can_hold_element", "(", "x", ")", "]", "if", "not", "len", "(", "to_replace", ")", ":", "# GH#28084 avoid costly checks since we can infer", "# that there is nothing to replace in this block", "if", "inplace", ":", "return", "[", "self", "]", "return", "[", "self", ".", "copy", "(", ")", "]", "if", "len", "(", "to_replace", ")", "==", "1", ":", "# _can_hold_element checks have reduced this back to the", "# scalar case and we can avoid a costly object cast", "return", "self", ".", "replace", "(", "to_replace", "[", "0", "]", ",", "value", ",", "inplace", "=", "inplace", ",", "filter", "=", "filter", ",", "regex", "=", "regex", ",", "convert", "=", "convert", ",", ")", "# GH 22083, TypeError or ValueError occurred within error handling", "# causes infinite loop. Cast and retry only if not objectblock.", "if", "is_object_dtype", "(", "self", ")", ":", "raise", "AssertionError", "# try again with a compatible block", "block", "=", "self", ".", "astype", "(", "object", ")", "return", "block", ".", "replace", "(", "to_replace", "=", "to_replace", ",", "value", "=", "value", ",", "inplace", "=", "inplace", ",", "filter", "=", "filter", ",", "regex", "=", "regex", ",", "convert", "=", "convert", ",", ")", "values", "=", "self", ".", "values", "if", "lib", ".", "is_scalar", "(", "to_replace", ")", "and", "isinstance", "(", "values", ",", "np", ".", "ndarray", ")", ":", "# The only non-DatetimeLike class that also has a non-trivial", "# try_coerce_args is ObjectBlock, but that overrides replace,", "# so does not get here.", "to_replace", "=", "convert_scalar", "(", "values", ",", "to_replace", ")", "mask", "=", "missing", ".", "mask_missing", "(", "values", ",", "to_replace", ")", "if", "filter", "is", "not", "None", ":", "filtered_out", "=", "~", "self", ".", "mgr_locs", ".", "isin", "(", "filter", ")", "mask", "[", "filtered_out", ".", "nonzero", "(", ")", "[", "0", "]", "]", "=", "False", "if", "not", "mask", ".", "any", "(", ")", ":", "if", "inplace", ":", "return", "[", "self", "]", "return", "[", "self", ".", "copy", "(", ")", "]", "try", ":", "blocks", "=", "self", ".", "putmask", "(", "mask", ",", "value", ",", "inplace", "=", "inplace", ")", "# Note: it is _not_ the case that self._can_hold_element(value)", "# is always true at this point. In particular, that can fail", "# for:", "# \"2u\" with bool-dtype, float-dtype", "# 0.5 with int64-dtype", "# np.nan with int64-dtype", "except", "(", "TypeError", ",", "ValueError", ")", ":", "# GH 22083, TypeError or ValueError occurred within error handling", "# causes infinite loop. Cast and retry only if not objectblock.", "if", "is_object_dtype", "(", "self", ")", ":", "raise", "assert", "not", "self", ".", "_can_hold_element", "(", "value", ")", ",", "value", "# try again with a compatible block", "block", "=", "self", ".", "astype", "(", "object", ")", "return", "block", ".", "replace", "(", "to_replace", "=", "original_to_replace", ",", "value", "=", "value", ",", "inplace", "=", "inplace", ",", "filter", "=", "filter", ",", "regex", "=", "regex", ",", "convert", "=", "convert", ",", ")", "if", "convert", ":", "blocks", "=", "[", "b", ".", "convert", "(", "numeric", "=", "False", ",", "copy", "=", "not", "inplace", ")", "for", "b", "in", "blocks", "]", "return", "blocks" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/internals/blocks.py#L701-L801
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/_controls.py
python
ToolBarBase.DeleteToolByPos
(*args, **kwargs)
return _controls_.ToolBarBase_DeleteToolByPos(*args, **kwargs)
DeleteToolByPos(self, size_t pos) -> bool
DeleteToolByPos(self, size_t pos) -> bool
[ "DeleteToolByPos", "(", "self", "size_t", "pos", ")", "-", ">", "bool" ]
def DeleteToolByPos(*args, **kwargs): """DeleteToolByPos(self, size_t pos) -> bool""" return _controls_.ToolBarBase_DeleteToolByPos(*args, **kwargs)
[ "def", "DeleteToolByPos", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_controls_", ".", "ToolBarBase_DeleteToolByPos", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_controls.py#L3775-L3777
rdkit/rdkit
ede860ae316d12d8568daf5ee800921c3389c84e
rdkit/ML/Descriptors/Parser.py
python
DEV
(strArg, composList, atomDict)
return accum / nSoFar
*Calculator Method* calculates the average deviation of a descriptor across a composition **Arguments** - strArg: the arguments in string form - compos: the composition vector - atomDict: the atomic dictionary **Returns** a float
*Calculator Method*
[ "*", "Calculator", "Method", "*" ]
def DEV(strArg, composList, atomDict): """ *Calculator Method* calculates the average deviation of a descriptor across a composition **Arguments** - strArg: the arguments in string form - compos: the composition vector - atomDict: the atomic dictionary **Returns** a float """ avg = MEAN(strArg, composList, atomDict) accum = 0.0 nSoFar = 0.0 for atom, num in composList: tStr = strArg.replace('DEADBEEF', atom) accum = accum + abs(eval(tStr) - avg) * num nSoFar = nSoFar + num return accum / nSoFar
[ "def", "DEV", "(", "strArg", ",", "composList", ",", "atomDict", ")", ":", "avg", "=", "MEAN", "(", "strArg", ",", "composList", ",", "atomDict", ")", "accum", "=", "0.0", "nSoFar", "=", "0.0", "for", "atom", ",", "num", "in", "composList", ":", "tStr", "=", "strArg", ".", "replace", "(", "'DEADBEEF'", ",", "atom", ")", "accum", "=", "accum", "+", "abs", "(", "eval", "(", "tStr", ")", "-", "avg", ")", "*", "num", "nSoFar", "=", "nSoFar", "+", "num", "return", "accum", "/", "nSoFar" ]
https://github.com/rdkit/rdkit/blob/ede860ae316d12d8568daf5ee800921c3389c84e/rdkit/ML/Descriptors/Parser.py#L151-L176
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/contrib/distributions/python/ops/shape.py
python
_DistributionShape._is_all_constant_helper
(self, *args)
return all(tensor_util.constant_value(x) is not None for x in args)
Helper which returns True if all inputs are constant_value.
Helper which returns True if all inputs are constant_value.
[ "Helper", "which", "returns", "True", "if", "all", "inputs", "are", "constant_value", "." ]
def _is_all_constant_helper(self, *args): """Helper which returns True if all inputs are constant_value.""" return all(tensor_util.constant_value(x) is not None for x in args)
[ "def", "_is_all_constant_helper", "(", "self", ",", "*", "args", ")", ":", "return", "all", "(", "tensor_util", ".", "constant_value", "(", "x", ")", "is", "not", "None", "for", "x", "in", "args", ")" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/distributions/python/ops/shape.py#L461-L463
PaddlePaddle/Paddle
1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c
python/paddle/vision/transforms/functional_cv2.py
python
crop
(img, top, left, height, width)
return img[top:top + height, left:left + width, :]
Crops the given image. Args: img (np.array): Image to be cropped. (0,0) denotes the top left corner of the image. top (int): Vertical component of the top left corner of the crop box. left (int): Horizontal component of the top left corner of the crop box. height (int): Height of the crop box. width (int): Width of the crop box. Returns: np.array: Cropped image.
Crops the given image.
[ "Crops", "the", "given", "image", "." ]
def crop(img, top, left, height, width): """Crops the given image. Args: img (np.array): Image to be cropped. (0,0) denotes the top left corner of the image. top (int): Vertical component of the top left corner of the crop box. left (int): Horizontal component of the top left corner of the crop box. height (int): Height of the crop box. width (int): Width of the crop box. Returns: np.array: Cropped image. """ return img[top:top + height, left:left + width, :]
[ "def", "crop", "(", "img", ",", "top", ",", "left", ",", "height", ",", "width", ")", ":", "return", "img", "[", "top", ":", "top", "+", "height", ",", "left", ":", "left", "+", "width", ",", ":", "]" ]
https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/vision/transforms/functional_cv2.py#L224-L240
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/profiler/internal/flops_registry.py
python
_list_product
(lst)
return result
Computes product of element of the list.
Computes product of element of the list.
[ "Computes", "product", "of", "element", "of", "the", "list", "." ]
def _list_product(lst): """Computes product of element of the list.""" result = 1 for item in lst: result *= item return result
[ "def", "_list_product", "(", "lst", ")", ":", "result", "=", "1", "for", "item", "in", "lst", ":", "result", "*=", "item", "return", "result" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/profiler/internal/flops_registry.py#L48-L53
miyosuda/TensorFlowAndroidMNIST
7b5a4603d2780a8a2834575706e9001977524007
jni-build/jni/include/tensorflow/contrib/graph_editor/match.py
python
op_type
(op_types, op=None)
Check if an op is of the given type. Args: op_types: tuple of strings containing the types to check against. For instance: ("Add", "Const") op: the operation to check (or None). Returns: if op is not None, return True if the op is of the correct type. if op is None, return a lambda function which does the type checking.
Check if an op is of the given type.
[ "Check", "if", "an", "op", "is", "of", "the", "given", "type", "." ]
def op_type(op_types, op=None): """Check if an op is of the given type. Args: op_types: tuple of strings containing the types to check against. For instance: ("Add", "Const") op: the operation to check (or None). Returns: if op is not None, return True if the op is of the correct type. if op is None, return a lambda function which does the type checking. """ if isinstance(op_types, string_types): op_types = (op_types) if op is None: return lambda op: op.node_def.op in op_types else: return op.node_def.op in op_types
[ "def", "op_type", "(", "op_types", ",", "op", "=", "None", ")", ":", "if", "isinstance", "(", "op_types", ",", "string_types", ")", ":", "op_types", "=", "(", "op_types", ")", "if", "op", "is", "None", ":", "return", "lambda", "op", ":", "op", ".", "node_def", ".", "op", "in", "op_types", "else", ":", "return", "op", ".", "node_def", ".", "op", "in", "op_types" ]
https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/contrib/graph_editor/match.py#L36-L52
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/email/quoprimime.py
python
unquote
(s)
return chr(int(s[1:3], 16))
Turn a string in the form =AB to the ASCII character with value 0xab
Turn a string in the form =AB to the ASCII character with value 0xab
[ "Turn", "a", "string", "in", "the", "form", "=", "AB", "to", "the", "ASCII", "character", "with", "value", "0xab" ]
def unquote(s): """Turn a string in the form =AB to the ASCII character with value 0xab""" return chr(int(s[1:3], 16))
[ "def", "unquote", "(", "s", ")", ":", "return", "chr", "(", "int", "(", "s", "[", "1", ":", "3", "]", ",", "16", ")", ")" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/email/quoprimime.py#L104-L106
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/re.py
python
search
(pattern, string, flags=0)
return _compile(pattern, flags).search(string)
Scan through string looking for a match to the pattern, returning a Match object, or None if no match was found.
Scan through string looking for a match to the pattern, returning a Match object, or None if no match was found.
[ "Scan", "through", "string", "looking", "for", "a", "match", "to", "the", "pattern", "returning", "a", "Match", "object", "or", "None", "if", "no", "match", "was", "found", "." ]
def search(pattern, string, flags=0): """Scan through string looking for a match to the pattern, returning a Match object, or None if no match was found.""" return _compile(pattern, flags).search(string)
[ "def", "search", "(", "pattern", ",", "string", ",", "flags", "=", "0", ")", ":", "return", "_compile", "(", "pattern", ",", "flags", ")", ".", "search", "(", "string", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/re.py#L182-L185
SpenceKonde/megaTinyCore
1c4a70b18a149fe6bcb551dfa6db11ca50b8997b
megaavr/tools/libs/pymcuprog/nvmserialupdi.py
python
NvmAccessProviderSerial.write
(self, memory_info, offset, data, blocksize=0, pagewrite_delay=0)
Write the memory with data :param memory_info: dictionary for the memory as provided by the DeviceMemoryInfo class :param offset: relative offset within the memory type :param data: the data to program :return: None
Write the memory with data
[ "Write", "the", "memory", "with", "data" ]
def write(self, memory_info, offset, data, blocksize=0, pagewrite_delay=0): """ Write the memory with data :param memory_info: dictionary for the memory as provided by the DeviceMemoryInfo class :param offset: relative offset within the memory type :param data: the data to program :return: None """ # Make sure the data is aligned to a memory page data_aligned, offset_aligned = utils.pagealign(data, offset, memory_info[DeviceMemoryInfoKeys.PAGE_SIZE], memory_info[DeviceMemoryInfoKeys.WRITE_SIZE]) memtype_string = memory_info[DeviceMemoryInfoKeys.NAME] offset_aligned += memory_info[DeviceMemoryInfoKeys.ADDRESS] if memtype_string in (MemoryNames.FLASH, MemoryNames.EEPROM, MemoryNames.FUSES): write_chunk_size = memory_info[DeviceMemoryInfoKeys.PAGE_SIZE] else: write_chunk_size = len(data_aligned) n_chunk = math.ceil(len(data_aligned)/write_chunk_size) bar = progress_bar.ProgressBar(n_chunk, hide=n_chunk == 1) while data_aligned: if len(data_aligned) < write_chunk_size: write_chunk_size = len(data_aligned) chunk = data_aligned[0:write_chunk_size] self.logger.debug("Writing %d bytes to address 0x%06X", write_chunk_size, offset_aligned) if memtype_string == MemoryNames.FUSES: self.avr.nvm.write_fuse(offset_aligned, chunk) elif memtype_string == MemoryNames.EEPROM: self.avr.nvm.write_eeprom(offset_aligned, chunk) else: # Spence Konde, 5/8/2021: # As far as I can tell, this is the only point where, we're writing a hex file, we know both the page size # AND are in the path of blocksize parameter. So - if its 0 or not given we should "do the old behavior", then # blocksize=2. The special value -1 tells us to have it write blocks equal to chunk/page size. Any other number # will be used as blocksize. Negative numbers beyond -1 were replaced with zero way at the beginning, as they would # result in crazy behavior and make everything fall over. # megaTinyCore and DxCore will always pass -1 as blocksize unless we find something where that doesn't work. # # Also, we are now finally in the section of the code specific to serialupdi. Up until we get here, 0 is the default # and if that's what we got, we omit it when making other calls, because there are almost certainly calls elsewhere # that. Now that we are here, the default value is 2 (ie, one word at a time) but that won'ty be something we see often. # # It strikes me that here is *ALSO* where we know whether we are on the first, a middle, or the last page. Say we # kept count of how many pages had been written already - if it was 0 and nChunk > 1, we would pass an argument that says # This is the first page we are writing, do all that stuff we need to do at the start of a bulk write. # if it was nChunk - 1, we would send a different value for that argumennt, saying it was the last one of a bulk write # so it should do the stuff to end the bulk write mode. And otherwise, it gets a third value that gets treated as # a signal to omit all of those. for the streamlined write protocol, which could improve performance by another 22-45% # If you agree, we should do that. # What we currently do is grossly inefficient, because (due to the penalty for small packets) we spend half of our time # for every page: Setting the address pointer (only need to do this at the beginning - when reading second and subsequent pages # the previous writes left the pointer at exactly the location we then set it to.). Setting NVM cmd to FLWR - only needs to be done # at the start of a bulk write, assuming we also stop setting NVM command to NOOP after every page. Setting RSD - if we # do all I'm talking about here, we can set it at start of bulk write. And we can juyst check for for NVM errors before # the first and after the last page, not before and after every page. My models suggest this should improve performance # by 22% at 115200 baud, and 44% and 345600 baud (which is 1.5x 230400 baud - and happens to be about the fastest you can # do a bulk write that is consistent with the datasheet flash write time spec. # # See also my comment below in read() - these two places are where we can achieve the last noticable performance leaps. # -Spence bulk = 1 if n_chunk == 1: #if omly one chunk, it is NOT a bulk write. bulk = 0 elif len(data_aligned) <= write_chunk_size: # We are on the last page of a bulk write bulk = 2 if blocksize == 0: self.avr.nvm.write_flash(offset_aligned, chunk, pagewrite_delay=pagewrite_delay) else: self.avr.nvm.write_flash(offset_aligned, chunk, blocksize=blocksize, bulkwrite = bulk, pagewrite_delay=pagewrite_delay) offset_aligned += write_chunk_size data_aligned = data_aligned[write_chunk_size:] bar.step()
[ "def", "write", "(", "self", ",", "memory_info", ",", "offset", ",", "data", ",", "blocksize", "=", "0", ",", "pagewrite_delay", "=", "0", ")", ":", "# Make sure the data is aligned to a memory page", "data_aligned", ",", "offset_aligned", "=", "utils", ".", "pagealign", "(", "data", ",", "offset", ",", "memory_info", "[", "DeviceMemoryInfoKeys", ".", "PAGE_SIZE", "]", ",", "memory_info", "[", "DeviceMemoryInfoKeys", ".", "WRITE_SIZE", "]", ")", "memtype_string", "=", "memory_info", "[", "DeviceMemoryInfoKeys", ".", "NAME", "]", "offset_aligned", "+=", "memory_info", "[", "DeviceMemoryInfoKeys", ".", "ADDRESS", "]", "if", "memtype_string", "in", "(", "MemoryNames", ".", "FLASH", ",", "MemoryNames", ".", "EEPROM", ",", "MemoryNames", ".", "FUSES", ")", ":", "write_chunk_size", "=", "memory_info", "[", "DeviceMemoryInfoKeys", ".", "PAGE_SIZE", "]", "else", ":", "write_chunk_size", "=", "len", "(", "data_aligned", ")", "n_chunk", "=", "math", ".", "ceil", "(", "len", "(", "data_aligned", ")", "/", "write_chunk_size", ")", "bar", "=", "progress_bar", ".", "ProgressBar", "(", "n_chunk", ",", "hide", "=", "n_chunk", "==", "1", ")", "while", "data_aligned", ":", "if", "len", "(", "data_aligned", ")", "<", "write_chunk_size", ":", "write_chunk_size", "=", "len", "(", "data_aligned", ")", "chunk", "=", "data_aligned", "[", "0", ":", "write_chunk_size", "]", "self", ".", "logger", ".", "debug", "(", "\"Writing %d bytes to address 0x%06X\"", ",", "write_chunk_size", ",", "offset_aligned", ")", "if", "memtype_string", "==", "MemoryNames", ".", "FUSES", ":", "self", ".", "avr", ".", "nvm", ".", "write_fuse", "(", "offset_aligned", ",", "chunk", ")", "elif", "memtype_string", "==", "MemoryNames", ".", "EEPROM", ":", "self", ".", "avr", ".", "nvm", ".", "write_eeprom", "(", "offset_aligned", ",", "chunk", ")", "else", ":", "# Spence Konde, 5/8/2021:", "# As far as I can tell, this is the only point where, we're writing a hex file, we know both the page size", "# AND are in the path of blocksize parameter. So - if its 0 or not given we should \"do the old behavior\", then", "# blocksize=2. The special value -1 tells us to have it write blocks equal to chunk/page size. Any other number", "# will be used as blocksize. Negative numbers beyond -1 were replaced with zero way at the beginning, as they would", "# result in crazy behavior and make everything fall over.", "# megaTinyCore and DxCore will always pass -1 as blocksize unless we find something where that doesn't work.", "#", "# Also, we are now finally in the section of the code specific to serialupdi. Up until we get here, 0 is the default", "# and if that's what we got, we omit it when making other calls, because there are almost certainly calls elsewhere", "# that. Now that we are here, the default value is 2 (ie, one word at a time) but that won'ty be something we see often.", "#", "# It strikes me that here is *ALSO* where we know whether we are on the first, a middle, or the last page. Say we", "# kept count of how many pages had been written already - if it was 0 and nChunk > 1, we would pass an argument that says", "# This is the first page we are writing, do all that stuff we need to do at the start of a bulk write.", "# if it was nChunk - 1, we would send a different value for that argumennt, saying it was the last one of a bulk write", "# so it should do the stuff to end the bulk write mode. And otherwise, it gets a third value that gets treated as", "# a signal to omit all of those. for the streamlined write protocol, which could improve performance by another 22-45%", "# If you agree, we should do that.", "# What we currently do is grossly inefficient, because (due to the penalty for small packets) we spend half of our time", "# for every page: Setting the address pointer (only need to do this at the beginning - when reading second and subsequent pages", "# the previous writes left the pointer at exactly the location we then set it to.). Setting NVM cmd to FLWR - only needs to be done", "# at the start of a bulk write, assuming we also stop setting NVM command to NOOP after every page. Setting RSD - if we", "# do all I'm talking about here, we can set it at start of bulk write. And we can juyst check for for NVM errors before", "# the first and after the last page, not before and after every page. My models suggest this should improve performance", "# by 22% at 115200 baud, and 44% and 345600 baud (which is 1.5x 230400 baud - and happens to be about the fastest you can", "# do a bulk write that is consistent with the datasheet flash write time spec.", "#", "# See also my comment below in read() - these two places are where we can achieve the last noticable performance leaps.", "# -Spence", "bulk", "=", "1", "if", "n_chunk", "==", "1", ":", "#if omly one chunk, it is NOT a bulk write.", "bulk", "=", "0", "elif", "len", "(", "data_aligned", ")", "<=", "write_chunk_size", ":", "# We are on the last page of a bulk write", "bulk", "=", "2", "if", "blocksize", "==", "0", ":", "self", ".", "avr", ".", "nvm", ".", "write_flash", "(", "offset_aligned", ",", "chunk", ",", "pagewrite_delay", "=", "pagewrite_delay", ")", "else", ":", "self", ".", "avr", ".", "nvm", ".", "write_flash", "(", "offset_aligned", ",", "chunk", ",", "blocksize", "=", "blocksize", ",", "bulkwrite", "=", "bulk", ",", "pagewrite_delay", "=", "pagewrite_delay", ")", "offset_aligned", "+=", "write_chunk_size", "data_aligned", "=", "data_aligned", "[", "write_chunk_size", ":", "]", "bar", ".", "step", "(", ")" ]
https://github.com/SpenceKonde/megaTinyCore/blob/1c4a70b18a149fe6bcb551dfa6db11ca50b8997b/megaavr/tools/libs/pymcuprog/nvmserialupdi.py#L100-L178
msitt/blpapi-python
bebcf43668c9e5f5467b1f685f9baebbfc45bc87
src/blpapi/schema.py
python
SchemaTypeDefinition.datatype
(self)
return internals.blpapi_SchemaTypeDefinition_datatype(self.__handle)
Returns: int: The data type of this :class:`SchemaTypeDefinition`. The possible return values are enumerated in :class:`DataType`.
Returns: int: The data type of this :class:`SchemaTypeDefinition`.
[ "Returns", ":", "int", ":", "The", "data", "type", "of", "this", ":", "class", ":", "SchemaTypeDefinition", "." ]
def datatype(self): """ Returns: int: The data type of this :class:`SchemaTypeDefinition`. The possible return values are enumerated in :class:`DataType`. """ return internals.blpapi_SchemaTypeDefinition_datatype(self.__handle)
[ "def", "datatype", "(", "self", ")", ":", "return", "internals", ".", "blpapi_SchemaTypeDefinition_datatype", "(", "self", ".", "__handle", ")" ]
https://github.com/msitt/blpapi-python/blob/bebcf43668c9e5f5467b1f685f9baebbfc45bc87/src/blpapi/schema.py#L245-L253
CRYTEK/CRYENGINE
232227c59a220cbbd311576f0fbeba7bb53b2a8c
Code/Tools/waf-1.7.13/waflib/Tools/c_config.py
python
is_defined
(self, key)
return False
:param key: define name :type key: string :return: True if the define is set :rtype: bool
:param key: define name :type key: string :return: True if the define is set :rtype: bool
[ ":", "param", "key", ":", "define", "name", ":", "type", "key", ":", "string", ":", "return", ":", "True", "if", "the", "define", "is", "set", ":", "rtype", ":", "bool" ]
def is_defined(self, key): """ :param key: define name :type key: string :return: True if the define is set :rtype: bool """ assert key and isinstance(key, str) ban = key + '=' for x in self.env['DEFINES']: if x.startswith(ban): return True return False
[ "def", "is_defined", "(", "self", ",", "key", ")", ":", "assert", "key", "and", "isinstance", "(", "key", ",", "str", ")", "ban", "=", "key", "+", "'='", "for", "x", "in", "self", ".", "env", "[", "'DEFINES'", "]", ":", "if", "x", ".", "startswith", "(", "ban", ")", ":", "return", "True", "return", "False" ]
https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Code/Tools/waf-1.7.13/waflib/Tools/c_config.py#L875-L888
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/lib-tk/Tkinter.py
python
Wm.wm_deiconify
(self)
return self.tk.call('wm', 'deiconify', self._w)
Deiconify this widget. If it was never mapped it will not be mapped. On Windows it will raise this widget and give it the focus.
Deiconify this widget. If it was never mapped it will not be mapped. On Windows it will raise this widget and give it the focus.
[ "Deiconify", "this", "widget", ".", "If", "it", "was", "never", "mapped", "it", "will", "not", "be", "mapped", ".", "On", "Windows", "it", "will", "raise", "this", "widget", "and", "give", "it", "the", "focus", "." ]
def wm_deiconify(self): """Deiconify this widget. If it was never mapped it will not be mapped. On Windows it will raise this widget and give it the focus.""" return self.tk.call('wm', 'deiconify', self._w)
[ "def", "wm_deiconify", "(", "self", ")", ":", "return", "self", ".", "tk", ".", "call", "(", "'wm'", ",", "'deiconify'", ",", "self", ".", "_w", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/lib-tk/Tkinter.py#L1576-L1579
apache/singa
93fd9da72694e68bfe3fb29d0183a65263d238a1
python/singa/model.py
python
Model.save_states
(self, fpath, aux_states={})
Save states. Args: fpath: output file path (without the extension) aux_states(dict): values are standard data types or Tensor, e.g., epoch ID, learning rate, optimizer states
Save states.
[ "Save", "states", "." ]
def save_states(self, fpath, aux_states={}): """Save states. Args: fpath: output file path (without the extension) aux_states(dict): values are standard data types or Tensor, e.g., epoch ID, learning rate, optimizer states """ assert not os.path.isfile(fpath), ( "Failed to save states, %s is already existed." % fpath) states = self.get_states() # save states data and attr tensor_dict = {} states_attr = {} for k, v in states.items(): assert isinstance(v, tensor.Tensor), "Only tensor state is allowed" tensor_dict[k] = tensor.to_numpy(v) states_attr[k] = { 'state_type': self.MODEL_STATE_TYPE, 'shape': v.shape, 'dtype': v.dtype } for k, v in aux_states.items(): assert isinstance(v, tensor.Tensor), "Only tensor aux state is allowed" tensor_dict[k] = tensor.to_numpy(v) states_attr[k] = { 'state_type': self.AUX_STATE_TYPE, 'shape': v.shape, 'dtype': v.dtype } # save to files timestamp = time.time() tmp_dir = '/tmp/singa_save_states_%s' % timestamp os.mkdir(tmp_dir) tensor_dict_fp = tmp_dir + self.TENSOR_DICT_FILENAME states_attr_fp = tmp_dir + self.STATES_ATTR_FILENAME np.savez(tensor_dict_fp, **tensor_dict) with open(states_attr_fp, 'w') as fp: json.dump(states_attr, fp) compression = zipfile.ZIP_DEFLATED with zipfile.ZipFile(fpath, mode="w") as zf: zf.write(tensor_dict_fp, os.path.basename(tensor_dict_fp), compress_type=compression) zf.write(states_attr_fp, os.path.basename(states_attr_fp), compress_type=compression) # clean up tmp files os.remove(tensor_dict_fp) os.remove(states_attr_fp) os.rmdir(tmp_dir)
[ "def", "save_states", "(", "self", ",", "fpath", ",", "aux_states", "=", "{", "}", ")", ":", "assert", "not", "os", ".", "path", ".", "isfile", "(", "fpath", ")", ",", "(", "\"Failed to save states, %s is already existed.\"", "%", "fpath", ")", "states", "=", "self", ".", "get_states", "(", ")", "# save states data and attr", "tensor_dict", "=", "{", "}", "states_attr", "=", "{", "}", "for", "k", ",", "v", "in", "states", ".", "items", "(", ")", ":", "assert", "isinstance", "(", "v", ",", "tensor", ".", "Tensor", ")", ",", "\"Only tensor state is allowed\"", "tensor_dict", "[", "k", "]", "=", "tensor", ".", "to_numpy", "(", "v", ")", "states_attr", "[", "k", "]", "=", "{", "'state_type'", ":", "self", ".", "MODEL_STATE_TYPE", ",", "'shape'", ":", "v", ".", "shape", ",", "'dtype'", ":", "v", ".", "dtype", "}", "for", "k", ",", "v", "in", "aux_states", ".", "items", "(", ")", ":", "assert", "isinstance", "(", "v", ",", "tensor", ".", "Tensor", ")", ",", "\"Only tensor aux state is allowed\"", "tensor_dict", "[", "k", "]", "=", "tensor", ".", "to_numpy", "(", "v", ")", "states_attr", "[", "k", "]", "=", "{", "'state_type'", ":", "self", ".", "AUX_STATE_TYPE", ",", "'shape'", ":", "v", ".", "shape", ",", "'dtype'", ":", "v", ".", "dtype", "}", "# save to files", "timestamp", "=", "time", ".", "time", "(", ")", "tmp_dir", "=", "'/tmp/singa_save_states_%s'", "%", "timestamp", "os", ".", "mkdir", "(", "tmp_dir", ")", "tensor_dict_fp", "=", "tmp_dir", "+", "self", ".", "TENSOR_DICT_FILENAME", "states_attr_fp", "=", "tmp_dir", "+", "self", ".", "STATES_ATTR_FILENAME", "np", ".", "savez", "(", "tensor_dict_fp", ",", "*", "*", "tensor_dict", ")", "with", "open", "(", "states_attr_fp", ",", "'w'", ")", "as", "fp", ":", "json", ".", "dump", "(", "states_attr", ",", "fp", ")", "compression", "=", "zipfile", ".", "ZIP_DEFLATED", "with", "zipfile", ".", "ZipFile", "(", "fpath", ",", "mode", "=", "\"w\"", ")", "as", "zf", ":", "zf", ".", "write", "(", "tensor_dict_fp", ",", "os", ".", "path", ".", "basename", "(", "tensor_dict_fp", ")", ",", "compress_type", "=", "compression", ")", "zf", ".", "write", "(", "states_attr_fp", ",", "os", ".", "path", ".", "basename", "(", "states_attr_fp", ")", ",", "compress_type", "=", "compression", ")", "# clean up tmp files", "os", ".", "remove", "(", "tensor_dict_fp", ")", "os", ".", "remove", "(", "states_attr_fp", ")", "os", ".", "rmdir", "(", "tmp_dir", ")" ]
https://github.com/apache/singa/blob/93fd9da72694e68bfe3fb29d0183a65263d238a1/python/singa/model.py#L244-L303
CRYTEK/CRYENGINE
232227c59a220cbbd311576f0fbeba7bb53b2a8c
Code/Tools/waf-1.7.13/waflib/Node.py
python
Node.nice_path
(self, env=None)
return self.path_from(self.ctx.launch_node())
Return the path seen from the launch directory. Can be used for opening files easily (copy-paste in the console).
Return the path seen from the launch directory. Can be used for opening files easily (copy-paste in the console).
[ "Return", "the", "path", "seen", "from", "the", "launch", "directory", ".", "Can", "be", "used", "for", "opening", "files", "easily", "(", "copy", "-", "paste", "in", "the", "console", ")", "." ]
def nice_path(self, env=None): """ Return the path seen from the launch directory. Can be used for opening files easily (copy-paste in the console). """ return self.path_from(self.ctx.launch_node())
[ "def", "nice_path", "(", "self", ",", "env", "=", "None", ")", ":", "return", "self", ".", "path_from", "(", "self", ".", "ctx", ".", "launch_node", "(", ")", ")" ]
https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Code/Tools/waf-1.7.13/waflib/Node.py#L759-L764
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/protobuf/py3/google/protobuf/service_reflection.py
python
GeneratedServiceStubType.__init__
(cls, name, bases, dictionary)
Creates a message service stub class. Args: name: Name of the class (ignored, here). bases: Base classes of the class being constructed. dictionary: The class dictionary of the class being constructed. dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object describing this protocol service type.
Creates a message service stub class.
[ "Creates", "a", "message", "service", "stub", "class", "." ]
def __init__(cls, name, bases, dictionary): """Creates a message service stub class. Args: name: Name of the class (ignored, here). bases: Base classes of the class being constructed. dictionary: The class dictionary of the class being constructed. dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object describing this protocol service type. """ super(GeneratedServiceStubType, cls).__init__(name, bases, dictionary) # Don't do anything if this class doesn't have a descriptor. This happens # when a service stub is subclassed. if GeneratedServiceStubType._DESCRIPTOR_KEY not in dictionary: return descriptor = dictionary[GeneratedServiceStubType._DESCRIPTOR_KEY] service_stub_builder = _ServiceStubBuilder(descriptor) service_stub_builder.BuildServiceStub(cls)
[ "def", "__init__", "(", "cls", ",", "name", ",", "bases", ",", "dictionary", ")", ":", "super", "(", "GeneratedServiceStubType", ",", "cls", ")", ".", "__init__", "(", "name", ",", "bases", ",", "dictionary", ")", "# Don't do anything if this class doesn't have a descriptor. This happens", "# when a service stub is subclassed.", "if", "GeneratedServiceStubType", ".", "_DESCRIPTOR_KEY", "not", "in", "dictionary", ":", "return", "descriptor", "=", "dictionary", "[", "GeneratedServiceStubType", ".", "_DESCRIPTOR_KEY", "]", "service_stub_builder", "=", "_ServiceStubBuilder", "(", "descriptor", ")", "service_stub_builder", ".", "BuildServiceStub", "(", "cls", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/protobuf/py3/google/protobuf/service_reflection.py#L96-L114
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/framework/ops.py
python
_eval_using_default_session
(tensors, feed_dict, graph, session=None)
return session.run(tensors, feed_dict)
Uses the default session to evaluate one or more tensors. Args: tensors: A single Tensor, or a list of Tensor objects. feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists, numpy ndarrays, TensorProtos, or strings. graph: The graph in which the tensors are defined. session: (Optional) A different session to use to evaluate "tensors". Returns: Either a single numpy ndarray if "tensors" is a single tensor; or a list of numpy ndarrays that each correspond to the respective element in "tensors". Raises: ValueError: If no default session is available; the default session does not have "graph" as its graph; or if "session" is specified, and it does not have "graph" as its graph.
Uses the default session to evaluate one or more tensors.
[ "Uses", "the", "default", "session", "to", "evaluate", "one", "or", "more", "tensors", "." ]
def _eval_using_default_session(tensors, feed_dict, graph, session=None): """Uses the default session to evaluate one or more tensors. Args: tensors: A single Tensor, or a list of Tensor objects. feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists, numpy ndarrays, TensorProtos, or strings. graph: The graph in which the tensors are defined. session: (Optional) A different session to use to evaluate "tensors". Returns: Either a single numpy ndarray if "tensors" is a single tensor; or a list of numpy ndarrays that each correspond to the respective element in "tensors". Raises: ValueError: If no default session is available; the default session does not have "graph" as its graph; or if "session" is specified, and it does not have "graph" as its graph. """ if session is None: session = get_default_session() if session is None: raise ValueError("Cannot evaluate tensor using `eval()`: No default " "session is registered. Use `with " "sess.as_default()` or pass an explicit session to " "`eval(session=sess)`") if session.graph is not graph: raise ValueError("Cannot use the default session to evaluate tensor: " "the tensor's graph is different from the session's " "graph. Pass an explicit session to " "`eval(session=sess)`.") else: if session.graph is not graph: raise ValueError("Cannot use the given session to evaluate tensor: " "the tensor's graph is different from the session's " "graph.") return session.run(tensors, feed_dict)
[ "def", "_eval_using_default_session", "(", "tensors", ",", "feed_dict", ",", "graph", ",", "session", "=", "None", ")", ":", "if", "session", "is", "None", ":", "session", "=", "get_default_session", "(", ")", "if", "session", "is", "None", ":", "raise", "ValueError", "(", "\"Cannot evaluate tensor using `eval()`: No default \"", "\"session is registered. Use `with \"", "\"sess.as_default()` or pass an explicit session to \"", "\"`eval(session=sess)`\"", ")", "if", "session", ".", "graph", "is", "not", "graph", ":", "raise", "ValueError", "(", "\"Cannot use the default session to evaluate tensor: \"", "\"the tensor's graph is different from the session's \"", "\"graph. Pass an explicit session to \"", "\"`eval(session=sess)`.\"", ")", "else", ":", "if", "session", ".", "graph", "is", "not", "graph", ":", "raise", "ValueError", "(", "\"Cannot use the given session to evaluate tensor: \"", "\"the tensor's graph is different from the session's \"", "\"graph.\"", ")", "return", "session", ".", "run", "(", "tensors", ",", "feed_dict", ")" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/framework/ops.py#L5735-L5772
dmlc/xgboost
2775c2a1abd4b5b759ff517617434c8b9aeb4cc0
demo/dask/gpu_training.py
python
using_quantile_device_dmatrix
(client: Client, X, y)
return prediction
`DaskDeviceQuantileDMatrix` is a data type specialized for `gpu_hist`, tree method that reduces memory overhead. When training on GPU pipeline, it's preferred over `DaskDMatrix`. .. versionadded:: 1.2.0
`DaskDeviceQuantileDMatrix` is a data type specialized for `gpu_hist`, tree method that reduces memory overhead. When training on GPU pipeline, it's preferred over `DaskDMatrix`.
[ "DaskDeviceQuantileDMatrix", "is", "a", "data", "type", "specialized", "for", "gpu_hist", "tree", "method", "that", "reduces", "memory", "overhead", ".", "When", "training", "on", "GPU", "pipeline", "it", "s", "preferred", "over", "DaskDMatrix", "." ]
def using_quantile_device_dmatrix(client: Client, X, y): '''`DaskDeviceQuantileDMatrix` is a data type specialized for `gpu_hist`, tree method that reduces memory overhead. When training on GPU pipeline, it's preferred over `DaskDMatrix`. .. versionadded:: 1.2.0 ''' # Input must be on GPU for `DaskDeviceQuantileDMatrix`. X = X.map_blocks(cp.array) y = y.map_blocks(cp.array) # `DaskDeviceQuantileDMatrix` is used instead of `DaskDMatrix`, be careful # that it can not be used for anything else than training. dtrain = dxgb.DaskDeviceQuantileDMatrix(client, X, y) output = xgb.dask.train(client, {'verbosity': 2, 'tree_method': 'gpu_hist'}, dtrain, num_boost_round=4) prediction = xgb.dask.predict(client, output, X) return prediction
[ "def", "using_quantile_device_dmatrix", "(", "client", ":", "Client", ",", "X", ",", "y", ")", ":", "# Input must be on GPU for `DaskDeviceQuantileDMatrix`.", "X", "=", "X", ".", "map_blocks", "(", "cp", ".", "array", ")", "y", "=", "y", ".", "map_blocks", "(", "cp", ".", "array", ")", "# `DaskDeviceQuantileDMatrix` is used instead of `DaskDMatrix`, be careful", "# that it can not be used for anything else than training.", "dtrain", "=", "dxgb", ".", "DaskDeviceQuantileDMatrix", "(", "client", ",", "X", ",", "y", ")", "output", "=", "xgb", ".", "dask", ".", "train", "(", "client", ",", "{", "'verbosity'", ":", "2", ",", "'tree_method'", ":", "'gpu_hist'", "}", ",", "dtrain", ",", "num_boost_round", "=", "4", ")", "prediction", "=", "xgb", ".", "dask", ".", "predict", "(", "client", ",", "output", ",", "X", ")", "return", "prediction" ]
https://github.com/dmlc/xgboost/blob/2775c2a1abd4b5b759ff517617434c8b9aeb4cc0/demo/dask/gpu_training.py#L39-L61
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/ops/linalg_ops.py
python
_RegularizedGramianCholesky
(matrix, l2_regularizer, first_kind)
return gen_linalg_ops.cholesky(gramian)
r"""Computes Cholesky factorization of regularized gramian matrix. Below we will use the following notation for each pair of matrix and right-hand sides in the batch: `matrix`=\\(A \in \Re^{m \times n}\\), `output`=\\(C \in \Re^{\min(m, n) \times \min(m,n)}\\), `l2_regularizer`=\\(\lambda\\). If `first_kind` is True, returns the Cholesky factorization \\(L\\) such that \\(L L^H = A^H A + \lambda I\\). If `first_kind` is False, returns the Cholesky factorization \\(L\\) such that \\(L L^H = A A^H + \lambda I\\). Args: matrix: `Tensor` of shape `[..., M, N]`. l2_regularizer: 0-D `double` `Tensor`. Ignored if `fast=False`. first_kind: bool. Controls what gramian matrix to factor. Returns: output: `Tensor` of shape `[..., min(M,N), min(M,N)]` whose inner-most 2 dimensions contain the Cholesky factors \\(L\\) described above.
r"""Computes Cholesky factorization of regularized gramian matrix.
[ "r", "Computes", "Cholesky", "factorization", "of", "regularized", "gramian", "matrix", "." ]
def _RegularizedGramianCholesky(matrix, l2_regularizer, first_kind): r"""Computes Cholesky factorization of regularized gramian matrix. Below we will use the following notation for each pair of matrix and right-hand sides in the batch: `matrix`=\\(A \in \Re^{m \times n}\\), `output`=\\(C \in \Re^{\min(m, n) \times \min(m,n)}\\), `l2_regularizer`=\\(\lambda\\). If `first_kind` is True, returns the Cholesky factorization \\(L\\) such that \\(L L^H = A^H A + \lambda I\\). If `first_kind` is False, returns the Cholesky factorization \\(L\\) such that \\(L L^H = A A^H + \lambda I\\). Args: matrix: `Tensor` of shape `[..., M, N]`. l2_regularizer: 0-D `double` `Tensor`. Ignored if `fast=False`. first_kind: bool. Controls what gramian matrix to factor. Returns: output: `Tensor` of shape `[..., min(M,N), min(M,N)]` whose inner-most 2 dimensions contain the Cholesky factors \\(L\\) described above. """ gramian = math_ops.matmul( matrix, matrix, adjoint_a=first_kind, adjoint_b=not first_kind) if isinstance(l2_regularizer, ops.Tensor) or l2_regularizer != 0: matrix_shape = array_ops.shape(matrix) batch_shape = matrix_shape[:-2] if first_kind: small_dim = matrix_shape[-1] else: small_dim = matrix_shape[-2] identity = eye(small_dim, batch_shape=batch_shape, dtype=matrix.dtype) small_dim_static = matrix.shape[-1 if first_kind else -2] identity.set_shape( matrix.shape[:-2].concatenate([small_dim_static, small_dim_static])) gramian += l2_regularizer * identity return gen_linalg_ops.cholesky(gramian)
[ "def", "_RegularizedGramianCholesky", "(", "matrix", ",", "l2_regularizer", ",", "first_kind", ")", ":", "gramian", "=", "math_ops", ".", "matmul", "(", "matrix", ",", "matrix", ",", "adjoint_a", "=", "first_kind", ",", "adjoint_b", "=", "not", "first_kind", ")", "if", "isinstance", "(", "l2_regularizer", ",", "ops", ".", "Tensor", ")", "or", "l2_regularizer", "!=", "0", ":", "matrix_shape", "=", "array_ops", ".", "shape", "(", "matrix", ")", "batch_shape", "=", "matrix_shape", "[", ":", "-", "2", "]", "if", "first_kind", ":", "small_dim", "=", "matrix_shape", "[", "-", "1", "]", "else", ":", "small_dim", "=", "matrix_shape", "[", "-", "2", "]", "identity", "=", "eye", "(", "small_dim", ",", "batch_shape", "=", "batch_shape", ",", "dtype", "=", "matrix", ".", "dtype", ")", "small_dim_static", "=", "matrix", ".", "shape", "[", "-", "1", "if", "first_kind", "else", "-", "2", "]", "identity", ".", "set_shape", "(", "matrix", ".", "shape", "[", ":", "-", "2", "]", ".", "concatenate", "(", "[", "small_dim_static", ",", "small_dim_static", "]", ")", ")", "gramian", "+=", "l2_regularizer", "*", "identity", "return", "gen_linalg_ops", ".", "cholesky", "(", "gramian", ")" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/ops/linalg_ops.py#L39-L77
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/pathlib.py
python
PurePath.parent
(self)
return self._from_parsed_parts(drv, root, parts[:-1])
The logical parent of the path.
The logical parent of the path.
[ "The", "logical", "parent", "of", "the", "path", "." ]
def parent(self): """The logical parent of the path.""" drv = self._drv root = self._root parts = self._parts if len(parts) == 1 and (drv or root): return self return self._from_parsed_parts(drv, root, parts[:-1])
[ "def", "parent", "(", "self", ")", ":", "drv", "=", "self", ".", "_drv", "root", "=", "self", ".", "_root", "parts", "=", "self", ".", "_parts", "if", "len", "(", "parts", ")", "==", "1", "and", "(", "drv", "or", "root", ")", ":", "return", "self", "return", "self", ".", "_from_parsed_parts", "(", "drv", ",", "root", ",", "parts", "[", ":", "-", "1", "]", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/pathlib.py#L931-L938
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/controlflow.py
python
CFGraph.dump
(self, file=None)
Dump extensive debug information.
Dump extensive debug information.
[ "Dump", "extensive", "debug", "information", "." ]
def dump(self, file=None): """ Dump extensive debug information. """ import pprint file = file or sys.stdout if 1: print("CFG adjacency lists:", file=file) self._dump_adj_lists(file) print("CFG dominators:", file=file) pprint.pprint(self._doms, stream=file) print("CFG post-dominators:", file=file) pprint.pprint(self._post_doms, stream=file) print("CFG back edges:", sorted(self._back_edges), file=file) print("CFG loops:", file=file) pprint.pprint(self._loops, stream=file) print("CFG node-to-loops:", file=file) pprint.pprint(self._in_loops, stream=file) print("CFG backbone:", file=file) pprint.pprint(self.backbone(), stream=file)
[ "def", "dump", "(", "self", ",", "file", "=", "None", ")", ":", "import", "pprint", "file", "=", "file", "or", "sys", ".", "stdout", "if", "1", ":", "print", "(", "\"CFG adjacency lists:\"", ",", "file", "=", "file", ")", "self", ".", "_dump_adj_lists", "(", "file", ")", "print", "(", "\"CFG dominators:\"", ",", "file", "=", "file", ")", "pprint", ".", "pprint", "(", "self", ".", "_doms", ",", "stream", "=", "file", ")", "print", "(", "\"CFG post-dominators:\"", ",", "file", "=", "file", ")", "pprint", ".", "pprint", "(", "self", ".", "_post_doms", ",", "stream", "=", "file", ")", "print", "(", "\"CFG back edges:\"", ",", "sorted", "(", "self", ".", "_back_edges", ")", ",", "file", "=", "file", ")", "print", "(", "\"CFG loops:\"", ",", "file", "=", "file", ")", "pprint", ".", "pprint", "(", "self", ".", "_loops", ",", "stream", "=", "file", ")", "print", "(", "\"CFG node-to-loops:\"", ",", "file", "=", "file", ")", "pprint", ".", "pprint", "(", "self", ".", "_in_loops", ",", "stream", "=", "file", ")", "print", "(", "\"CFG backbone:\"", ",", "file", "=", "file", ")", "pprint", ".", "pprint", "(", "self", ".", "backbone", "(", ")", ",", "stream", "=", "file", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/controlflow.py#L258-L277
0ad/0ad
f58db82e0e925016d83f4e3fa7ca599e3866e2af
source/tools/fontbuilder2/Packer.py
python
Point.__cmp__
(self, other)
return self.x - other.x
Compares the starting position of height slices
Compares the starting position of height slices
[ "Compares", "the", "starting", "position", "of", "height", "slices" ]
def __cmp__(self, other): """Compares the starting position of height slices""" return self.x - other.x
[ "def", "__cmp__", "(", "self", ",", "other", ")", ":", "return", "self", ".", "x", "-", "other", ".", "x" ]
https://github.com/0ad/0ad/blob/f58db82e0e925016d83f4e3fa7ca599e3866e2af/source/tools/fontbuilder2/Packer.py#L28-L30
y123456yz/reading-and-annotate-mongodb-3.6
93280293672ca7586dc24af18132aa61e4ed7fcf
mongo/buildscripts/idl/idl/generator.py
python
_CppFileWriterBase.gen_namespace_block
(self, namespace)
return writer.NamespaceScopeBlock(self._writer, namespace_list)
Generate a namespace block.
Generate a namespace block.
[ "Generate", "a", "namespace", "block", "." ]
def gen_namespace_block(self, namespace): # type: (unicode) -> writer.NamespaceScopeBlock """Generate a namespace block.""" namespace_list = namespace.split("::") return writer.NamespaceScopeBlock(self._writer, namespace_list)
[ "def", "gen_namespace_block", "(", "self", ",", "namespace", ")", ":", "# type: (unicode) -> writer.NamespaceScopeBlock", "namespace_list", "=", "namespace", ".", "split", "(", "\"::\"", ")", "return", "writer", ".", "NamespaceScopeBlock", "(", "self", ".", "_writer", ",", "namespace_list", ")" ]
https://github.com/y123456yz/reading-and-annotate-mongodb-3.6/blob/93280293672ca7586dc24af18132aa61e4ed7fcf/mongo/buildscripts/idl/idl/generator.py#L302-L307
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/tools/Editra/src/ed_vim.py
python
EditraCommander.MoveDown
(self, repeat=1)
Move caret down @keyword repeat: int
Move caret down @keyword repeat: int
[ "Move", "caret", "down", "@keyword", "repeat", ":", "int" ]
def MoveDown(self, repeat=1): """Move caret down @keyword repeat: int """ for i in range(repeat): self.stc.LineDown()
[ "def", "MoveDown", "(", "self", ",", "repeat", "=", "1", ")", ":", "for", "i", "in", "range", "(", "repeat", ")", ":", "self", ".", "stc", ".", "LineDown", "(", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/ed_vim.py#L338-L344
okex/V3-Open-API-SDK
c5abb0db7e2287718e0055e17e57672ce0ec7fd9
okex-python-sdk-api/venv/Lib/site-packages/pip-19.0.3-py3.8.egg/pip/_vendor/distlib/locators.py
python
Locator.get_project
(self, name)
return result
For a given project, get a dictionary mapping available versions to Distribution instances. This calls _get_project to do all the work, and just implements a caching layer on top.
For a given project, get a dictionary mapping available versions to Distribution instances.
[ "For", "a", "given", "project", "get", "a", "dictionary", "mapping", "available", "versions", "to", "Distribution", "instances", "." ]
def get_project(self, name): """ For a given project, get a dictionary mapping available versions to Distribution instances. This calls _get_project to do all the work, and just implements a caching layer on top. """ if self._cache is None: # pragma: no cover result = self._get_project(name) elif name in self._cache: result = self._cache[name] else: self.clear_errors() result = self._get_project(name) self._cache[name] = result return result
[ "def", "get_project", "(", "self", ",", "name", ")", ":", "if", "self", ".", "_cache", "is", "None", ":", "# pragma: no cover", "result", "=", "self", ".", "_get_project", "(", "name", ")", "elif", "name", "in", "self", ".", "_cache", ":", "result", "=", "self", ".", "_cache", "[", "name", "]", "else", ":", "self", ".", "clear_errors", "(", ")", "result", "=", "self", ".", "_get_project", "(", "name", ")", "self", ".", "_cache", "[", "name", "]", "=", "result", "return", "result" ]
https://github.com/okex/V3-Open-API-SDK/blob/c5abb0db7e2287718e0055e17e57672ce0ec7fd9/okex-python-sdk-api/venv/Lib/site-packages/pip-19.0.3-py3.8.egg/pip/_vendor/distlib/locators.py#L171-L186
floatlazer/semantic_slam
657814a1ba484de6b7f6f9d07c564566c8121f13
semantic_cloud/train_cnn/train.py
python
train
(args)
# freeze all parameters except for final classification if doing fine tuning if do_finetuning: for param in model.parameters(): param.requires_grad = False for param in model.classification.parameters(): param.requires_grad = True for param in model.cbr_final.parameters(): param.requires_grad = True
# freeze all parameters except for final classification if doing fine tuning if do_finetuning: for param in model.parameters(): param.requires_grad = False for param in model.classification.parameters(): param.requires_grad = True for param in model.cbr_final.parameters(): param.requires_grad = True
[ "#", "freeze", "all", "parameters", "except", "for", "final", "classification", "if", "doing", "fine", "tuning", "if", "do_finetuning", ":", "for", "param", "in", "model", ".", "parameters", "()", ":", "param", ".", "requires_grad", "=", "False", "for", "param", "in", "model", ".", "classification", ".", "parameters", "()", ":", "param", ".", "requires_grad", "=", "True", "for", "param", "in", "model", ".", "cbr_final", ".", "parameters", "()", ":", "param", ".", "requires_grad", "=", "True" ]
def train(args): do_finetuning = True data_parallel = False # whether split a batch on multiple GPUs to accelerate if data_parallel: print('Using data parallel.') device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") else: device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu") # use GPU1 if train on one GPU # Setup Augmentations data_aug= Compose([RandomSized(args.img_rows), RandomHorizontallyFlip(), RandomSizedCrop(args.img_rows)]) # Setup Dataloader data_loader = get_loader(args.dataset) data_path = get_data_path(args.dataset) t_loader = data_loader(data_path, is_transform=True, img_size=(args.img_rows, args.img_cols), augmentations=data_aug, img_norm=False) v_loader = data_loader(data_path, is_transform=True, split='val', img_size=(args.img_rows, args.img_cols), img_norm=False) n_classes = t_loader.n_classes trainloader = data.DataLoader(t_loader, batch_size=args.batch_size, num_workers=8, shuffle=True, drop_last = True) valloader = data.DataLoader(v_loader, batch_size=args.batch_size, num_workers=8, drop_last = True) # Setup Metrics running_metrics = runningScore(n_classes) # Setup visdom for visualization if args.visdom: vis = visdom.Visdom() # window for training loss loss_window = vis.line(X=torch.ones((1)), Y=torch.zeros((1)), opts=dict(xlabel='epoch', ylabel='Loss', title='Training Loss', legend=['Loss'], width = 400, height = 400)) # window for example training image image_train_window = vis.images(torch.zeros((3, 3, args.img_rows, args.img_cols)), opts=dict(nrow = 3, caption = 'input-prediction-groundtruth', title = 'Training example image')) # window for train and validation accuracy acc_window = vis.line(X=torch.ones((1,2)), Y=torch.zeros((1,2)), opts=dict(xlabel='epoch', ylabel='mean IoU', title='Mean IoU', legend=['train','validation'], width = 400, height = 400)) # window for example validation image image_val_window = vis.images(torch.zeros((3, 3, args.img_rows, args.img_cols)), opts=dict(nrow = 3, caption = 'input-prediction-groundtruth', title = 'Validation example image')) # Setup Model model_name = 'pspnet' model = get_model(model_name, n_classes, version = args.dataset+'_res50') #model = get_model(model_name, n_classes, version = args.dataset+'_res101') if do_finetuning: # pspnet pretrained on ade20k pretrained_model_path = '/home/interns/xuan/models/pspnet_50_ade20k.pth' # pspnet pretrained on pascal VOC #pretrained_model_path = '/home/interns/xuan/models/pspnet_101_pascalvoc.pth' pretrained_state = convert_state_dict(torch.load(pretrained_model_path)['model_state']) # remove 'module' in keys # Load parameters except for last classification layer to fine tuning print('Setting up for fine tuning') # 1. filter out unnecessary keys pretrained_state = {k: v for k, v in pretrained_state.items() if k not in ['classification.weight', 'classification.bias', 'aux_cls.weight', 'aux_cls.bias']} # 2. overwrite entries in the existing state dict model_state_dict = model.state_dict() model_state_dict.update(pretrained_state) # 3. load the new state dict model.load_state_dict(model_state_dict) # load checkpoint to continue training if args.resume is not None: if os.path.isfile(args.resume): print("Loading model from checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) model.load_state_dict(checkpoint) print("Loaded checkpoint '{}'" .format(args.resume)) else: print("No checkpoint found at '{}'".format(args.resume)) ''' # freeze all parameters except for final classification if doing fine tuning if do_finetuning: for param in model.parameters(): param.requires_grad = False for param in model.classification.parameters(): param.requires_grad = True for param in model.cbr_final.parameters(): param.requires_grad = True ''' # Set up optimizer opt_dict = {'name': 'SGD', 'learning_rate': args.l_rate, 'momentum': 0.9, 'weight_decay': 1e-3} optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), opt_dict['learning_rate'], opt_dict['momentum'], opt_dict['weight_decay']) #scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda = lambda epoch: 0.9**epoch) # train on multiple GPU if data_parallel: model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count())) # move parameters to GPU model = model.to(device) best_iou = -100.0 statistics = {} best_model_stat = {} # print params print('optimizer', opt_dict) print('batch size', args.batch_size) since = time() # start time # for every epoch.train then validate. Keep the best model in validation for epoch in range(1, args.n_epoch + 1): print('=>Epoch %d / %d' % (epoch, args.n_epoch)) # -------- train -------- model.train() # Freeze BatchNorm2d layers because we have small batch size #print('Freeze BatchNorm2d layers') #model.apply(freeze_batchnorm2d) print(' =>Training') loss_epoch = 0. # average loss in an epoch #scheduler.step() for i, (images, labels) in tqdm(enumerate(trainloader), total = len(trainloader)): images = images.to(device) labels = labels.to(device) # zero the parameter gradients optimizer.zero_grad() outputs = model(images) # if use aux loss, loss_fn = multi_scale_cross_entropy2d # if ignore aux loss, loss_fn = cross_entropy2d loss = multi_scale_cross_entropy2d(input=outputs, target=labels, device = device) loss.backward() optimizer.step() # update average loss loss_epoch += loss.item() # update train accuracy (mIoU) pred = outputs[0].data.max(1)[1].cpu().numpy() gt = labels.data.cpu().numpy() running_metrics.update(gt, pred) loss_epoch /= len(trainloader) print('Average training loss: %f' % loss_epoch) # draw train loss every epoch if args.visdom: vis.line( X=torch.Tensor([epoch]), Y=torch.Tensor([loss_epoch]).unsqueeze(0), win=loss_window, update='append') # get train accuracy for this epoch scores_train, class_iou_train = running_metrics.get_scores() running_metrics.reset() print('Training mean IoU: %f' % scores_train['Mean IoU']) # -------- validate -------- model.eval() print(' =>Validation') with torch.no_grad(): for i_val, (images_val, labels_val) in tqdm(enumerate(valloader), total = len(valloader)): images_val = images_val.to(device) labels_val = labels_val.to(device) outputs = model(images_val) pred = outputs.data.max(1)[1].cpu().numpy() gt = labels_val.data.cpu().numpy() running_metrics.update(gt, pred) scores_val, class_iou_val = running_metrics.get_scores() running_metrics.reset() for k, v in scores_val.items(): print(k+': %f' % v) # --------save best model -------- if scores_val['Mean IoU'] >= best_iou: best_iou = scores_val['Mean IoU'] best_model = model.state_dict() if data_parallel: best_model = convert_state_dict(best_model) # remove 'module' in keys to be competible with single GPU torch.save(best_model, "{}_{}_best_model.pth".format(model_name, args.dataset)) print('Best model updated!') print(class_iou_val) best_model_stat = {'epoch': epoch, 'scores_val': scores_val, 'class_iou_val': class_iou_val} # -------- draw -------- if args.visdom: # draw accuracy for training and validation vis.line( X=torch.Tensor([epoch]), Y=torch.Tensor([scores_train['Mean IoU'], scores_val['Mean IoU']]).unsqueeze(0), win=acc_window, update='append') # show example train image with torch.no_grad(): (image_train, label_train) = t_loader[0] gt = t_loader.decode_segmap(label_train.numpy()) image_train = image_train.unsqueeze(0) image_train = image_train.to(device) label_train = label_train.to(device) outputs = model(image_train) pred = np.squeeze(outputs.data.max(1)[1].cpu().numpy(), axis=0) decoded = t_loader.decode_segmap(pred) vis.images([image_train.data.cpu().squeeze(0), decoded.transpose(2,0,1)*255.0, gt.transpose(2,0,1)*255.0], win = image_train_window) # show example validation image with torch.no_grad(): (image_val, label_val) = v_loader[0] gt = v_loader.decode_segmap(label_val.numpy()) image_val = image_val.unsqueeze(0) image_val = image_val.to(device) label_val = label_val.to(device) outputs = model(image_val) pred = np.squeeze(outputs.data.max(1)[1].cpu().numpy(), axis=0) decoded = v_loader.decode_segmap(pred) vis.images([image_val.data.cpu().squeeze(0), decoded.transpose(2,0,1)*255.0, gt.transpose(2,0,1)*255.0], win = image_val_window) # -------- save training statistics -------- statistics['epoch %d' % epoch] = {'train_loss': loss_epoch, 'scores_train': scores_train, 'scores_val': scores_val} with open('train_statistics.json', 'w') as outfile: json.dump({ 'optimizer': opt_dict, 'batch_size': args.batch_size, 'data_parallel': data_parallel, 'Training hours': (time() - since)/3600.0, 'best_model': best_model_stat, 'statistics': statistics }, outfile)
[ "def", "train", "(", "args", ")", ":", "do_finetuning", "=", "True", "data_parallel", "=", "False", "# whether split a batch on multiple GPUs to accelerate", "if", "data_parallel", ":", "print", "(", "'Using data parallel.'", ")", "device", "=", "torch", ".", "device", "(", "\"cuda:0\"", "if", "torch", ".", "cuda", ".", "is_available", "(", ")", "else", "\"cpu\"", ")", "else", ":", "device", "=", "torch", ".", "device", "(", "\"cuda:1\"", "if", "torch", ".", "cuda", ".", "is_available", "(", ")", "else", "\"cpu\"", ")", "# use GPU1 if train on one GPU", "# Setup Augmentations", "data_aug", "=", "Compose", "(", "[", "RandomSized", "(", "args", ".", "img_rows", ")", ",", "RandomHorizontallyFlip", "(", ")", ",", "RandomSizedCrop", "(", "args", ".", "img_rows", ")", "]", ")", "# Setup Dataloader", "data_loader", "=", "get_loader", "(", "args", ".", "dataset", ")", "data_path", "=", "get_data_path", "(", "args", ".", "dataset", ")", "t_loader", "=", "data_loader", "(", "data_path", ",", "is_transform", "=", "True", ",", "img_size", "=", "(", "args", ".", "img_rows", ",", "args", ".", "img_cols", ")", ",", "augmentations", "=", "data_aug", ",", "img_norm", "=", "False", ")", "v_loader", "=", "data_loader", "(", "data_path", ",", "is_transform", "=", "True", ",", "split", "=", "'val'", ",", "img_size", "=", "(", "args", ".", "img_rows", ",", "args", ".", "img_cols", ")", ",", "img_norm", "=", "False", ")", "n_classes", "=", "t_loader", ".", "n_classes", "trainloader", "=", "data", ".", "DataLoader", "(", "t_loader", ",", "batch_size", "=", "args", ".", "batch_size", ",", "num_workers", "=", "8", ",", "shuffle", "=", "True", ",", "drop_last", "=", "True", ")", "valloader", "=", "data", ".", "DataLoader", "(", "v_loader", ",", "batch_size", "=", "args", ".", "batch_size", ",", "num_workers", "=", "8", ",", "drop_last", "=", "True", ")", "# Setup Metrics", "running_metrics", "=", "runningScore", "(", "n_classes", ")", "# Setup visdom for visualization", "if", "args", ".", "visdom", ":", "vis", "=", "visdom", ".", "Visdom", "(", ")", "# window for training loss", "loss_window", "=", "vis", ".", "line", "(", "X", "=", "torch", ".", "ones", "(", "(", "1", ")", ")", ",", "Y", "=", "torch", ".", "zeros", "(", "(", "1", ")", ")", ",", "opts", "=", "dict", "(", "xlabel", "=", "'epoch'", ",", "ylabel", "=", "'Loss'", ",", "title", "=", "'Training Loss'", ",", "legend", "=", "[", "'Loss'", "]", ",", "width", "=", "400", ",", "height", "=", "400", ")", ")", "# window for example training image", "image_train_window", "=", "vis", ".", "images", "(", "torch", ".", "zeros", "(", "(", "3", ",", "3", ",", "args", ".", "img_rows", ",", "args", ".", "img_cols", ")", ")", ",", "opts", "=", "dict", "(", "nrow", "=", "3", ",", "caption", "=", "'input-prediction-groundtruth'", ",", "title", "=", "'Training example image'", ")", ")", "# window for train and validation accuracy", "acc_window", "=", "vis", ".", "line", "(", "X", "=", "torch", ".", "ones", "(", "(", "1", ",", "2", ")", ")", ",", "Y", "=", "torch", ".", "zeros", "(", "(", "1", ",", "2", ")", ")", ",", "opts", "=", "dict", "(", "xlabel", "=", "'epoch'", ",", "ylabel", "=", "'mean IoU'", ",", "title", "=", "'Mean IoU'", ",", "legend", "=", "[", "'train'", ",", "'validation'", "]", ",", "width", "=", "400", ",", "height", "=", "400", ")", ")", "# window for example validation image", "image_val_window", "=", "vis", ".", "images", "(", "torch", ".", "zeros", "(", "(", "3", ",", "3", ",", "args", ".", "img_rows", ",", "args", ".", "img_cols", ")", ")", ",", "opts", "=", "dict", "(", "nrow", "=", "3", ",", "caption", "=", "'input-prediction-groundtruth'", ",", "title", "=", "'Validation example image'", ")", ")", "# Setup Model", "model_name", "=", "'pspnet'", "model", "=", "get_model", "(", "model_name", ",", "n_classes", ",", "version", "=", "args", ".", "dataset", "+", "'_res50'", ")", "#model = get_model(model_name, n_classes, version = args.dataset+'_res101')", "if", "do_finetuning", ":", "# pspnet pretrained on ade20k", "pretrained_model_path", "=", "'/home/interns/xuan/models/pspnet_50_ade20k.pth'", "# pspnet pretrained on pascal VOC", "#pretrained_model_path = '/home/interns/xuan/models/pspnet_101_pascalvoc.pth'", "pretrained_state", "=", "convert_state_dict", "(", "torch", ".", "load", "(", "pretrained_model_path", ")", "[", "'model_state'", "]", ")", "# remove 'module' in keys", "# Load parameters except for last classification layer to fine tuning", "print", "(", "'Setting up for fine tuning'", ")", "# 1. filter out unnecessary keys", "pretrained_state", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "pretrained_state", ".", "items", "(", ")", "if", "k", "not", "in", "[", "'classification.weight'", ",", "'classification.bias'", ",", "'aux_cls.weight'", ",", "'aux_cls.bias'", "]", "}", "# 2. overwrite entries in the existing state dict", "model_state_dict", "=", "model", ".", "state_dict", "(", ")", "model_state_dict", ".", "update", "(", "pretrained_state", ")", "# 3. load the new state dict", "model", ".", "load_state_dict", "(", "model_state_dict", ")", "# load checkpoint to continue training", "if", "args", ".", "resume", "is", "not", "None", ":", "if", "os", ".", "path", ".", "isfile", "(", "args", ".", "resume", ")", ":", "print", "(", "\"Loading model from checkpoint '{}'\"", ".", "format", "(", "args", ".", "resume", ")", ")", "checkpoint", "=", "torch", ".", "load", "(", "args", ".", "resume", ")", "model", ".", "load_state_dict", "(", "checkpoint", ")", "print", "(", "\"Loaded checkpoint '{}'\"", ".", "format", "(", "args", ".", "resume", ")", ")", "else", ":", "print", "(", "\"No checkpoint found at '{}'\"", ".", "format", "(", "args", ".", "resume", ")", ")", "# Set up optimizer", "opt_dict", "=", "{", "'name'", ":", "'SGD'", ",", "'learning_rate'", ":", "args", ".", "l_rate", ",", "'momentum'", ":", "0.9", ",", "'weight_decay'", ":", "1e-3", "}", "optimizer", "=", "torch", ".", "optim", ".", "SGD", "(", "filter", "(", "lambda", "p", ":", "p", ".", "requires_grad", ",", "model", ".", "parameters", "(", ")", ")", ",", "opt_dict", "[", "'learning_rate'", "]", ",", "opt_dict", "[", "'momentum'", "]", ",", "opt_dict", "[", "'weight_decay'", "]", ")", "#scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda = lambda epoch: 0.9**epoch)", "# train on multiple GPU", "if", "data_parallel", ":", "model", "=", "torch", ".", "nn", ".", "DataParallel", "(", "model", ",", "device_ids", "=", "range", "(", "torch", ".", "cuda", ".", "device_count", "(", ")", ")", ")", "# move parameters to GPU", "model", "=", "model", ".", "to", "(", "device", ")", "best_iou", "=", "-", "100.0", "statistics", "=", "{", "}", "best_model_stat", "=", "{", "}", "# print params", "print", "(", "'optimizer'", ",", "opt_dict", ")", "print", "(", "'batch size'", ",", "args", ".", "batch_size", ")", "since", "=", "time", "(", ")", "# start time", "# for every epoch.train then validate. Keep the best model in validation", "for", "epoch", "in", "range", "(", "1", ",", "args", ".", "n_epoch", "+", "1", ")", ":", "print", "(", "'=>Epoch %d / %d'", "%", "(", "epoch", ",", "args", ".", "n_epoch", ")", ")", "# -------- train --------", "model", ".", "train", "(", ")", "# Freeze BatchNorm2d layers because we have small batch size", "#print('Freeze BatchNorm2d layers')", "#model.apply(freeze_batchnorm2d)", "print", "(", "' =>Training'", ")", "loss_epoch", "=", "0.", "# average loss in an epoch", "#scheduler.step()", "for", "i", ",", "(", "images", ",", "labels", ")", "in", "tqdm", "(", "enumerate", "(", "trainloader", ")", ",", "total", "=", "len", "(", "trainloader", ")", ")", ":", "images", "=", "images", ".", "to", "(", "device", ")", "labels", "=", "labels", ".", "to", "(", "device", ")", "# zero the parameter gradients", "optimizer", ".", "zero_grad", "(", ")", "outputs", "=", "model", "(", "images", ")", "# if use aux loss, loss_fn = multi_scale_cross_entropy2d", "# if ignore aux loss, loss_fn = cross_entropy2d", "loss", "=", "multi_scale_cross_entropy2d", "(", "input", "=", "outputs", ",", "target", "=", "labels", ",", "device", "=", "device", ")", "loss", ".", "backward", "(", ")", "optimizer", ".", "step", "(", ")", "# update average loss", "loss_epoch", "+=", "loss", ".", "item", "(", ")", "# update train accuracy (mIoU)", "pred", "=", "outputs", "[", "0", "]", ".", "data", ".", "max", "(", "1", ")", "[", "1", "]", ".", "cpu", "(", ")", ".", "numpy", "(", ")", "gt", "=", "labels", ".", "data", ".", "cpu", "(", ")", ".", "numpy", "(", ")", "running_metrics", ".", "update", "(", "gt", ",", "pred", ")", "loss_epoch", "/=", "len", "(", "trainloader", ")", "print", "(", "'Average training loss: %f'", "%", "loss_epoch", ")", "# draw train loss every epoch", "if", "args", ".", "visdom", ":", "vis", ".", "line", "(", "X", "=", "torch", ".", "Tensor", "(", "[", "epoch", "]", ")", ",", "Y", "=", "torch", ".", "Tensor", "(", "[", "loss_epoch", "]", ")", ".", "unsqueeze", "(", "0", ")", ",", "win", "=", "loss_window", ",", "update", "=", "'append'", ")", "# get train accuracy for this epoch", "scores_train", ",", "class_iou_train", "=", "running_metrics", ".", "get_scores", "(", ")", "running_metrics", ".", "reset", "(", ")", "print", "(", "'Training mean IoU: %f'", "%", "scores_train", "[", "'Mean IoU'", "]", ")", "# -------- validate --------", "model", ".", "eval", "(", ")", "print", "(", "' =>Validation'", ")", "with", "torch", ".", "no_grad", "(", ")", ":", "for", "i_val", ",", "(", "images_val", ",", "labels_val", ")", "in", "tqdm", "(", "enumerate", "(", "valloader", ")", ",", "total", "=", "len", "(", "valloader", ")", ")", ":", "images_val", "=", "images_val", ".", "to", "(", "device", ")", "labels_val", "=", "labels_val", ".", "to", "(", "device", ")", "outputs", "=", "model", "(", "images_val", ")", "pred", "=", "outputs", ".", "data", ".", "max", "(", "1", ")", "[", "1", "]", ".", "cpu", "(", ")", ".", "numpy", "(", ")", "gt", "=", "labels_val", ".", "data", ".", "cpu", "(", ")", ".", "numpy", "(", ")", "running_metrics", ".", "update", "(", "gt", ",", "pred", ")", "scores_val", ",", "class_iou_val", "=", "running_metrics", ".", "get_scores", "(", ")", "running_metrics", ".", "reset", "(", ")", "for", "k", ",", "v", "in", "scores_val", ".", "items", "(", ")", ":", "print", "(", "k", "+", "': %f'", "%", "v", ")", "# --------save best model --------", "if", "scores_val", "[", "'Mean IoU'", "]", ">=", "best_iou", ":", "best_iou", "=", "scores_val", "[", "'Mean IoU'", "]", "best_model", "=", "model", ".", "state_dict", "(", ")", "if", "data_parallel", ":", "best_model", "=", "convert_state_dict", "(", "best_model", ")", "# remove 'module' in keys to be competible with single GPU", "torch", ".", "save", "(", "best_model", ",", "\"{}_{}_best_model.pth\"", ".", "format", "(", "model_name", ",", "args", ".", "dataset", ")", ")", "print", "(", "'Best model updated!'", ")", "print", "(", "class_iou_val", ")", "best_model_stat", "=", "{", "'epoch'", ":", "epoch", ",", "'scores_val'", ":", "scores_val", ",", "'class_iou_val'", ":", "class_iou_val", "}", "# -------- draw --------", "if", "args", ".", "visdom", ":", "# draw accuracy for training and validation", "vis", ".", "line", "(", "X", "=", "torch", ".", "Tensor", "(", "[", "epoch", "]", ")", ",", "Y", "=", "torch", ".", "Tensor", "(", "[", "scores_train", "[", "'Mean IoU'", "]", ",", "scores_val", "[", "'Mean IoU'", "]", "]", ")", ".", "unsqueeze", "(", "0", ")", ",", "win", "=", "acc_window", ",", "update", "=", "'append'", ")", "# show example train image", "with", "torch", ".", "no_grad", "(", ")", ":", "(", "image_train", ",", "label_train", ")", "=", "t_loader", "[", "0", "]", "gt", "=", "t_loader", ".", "decode_segmap", "(", "label_train", ".", "numpy", "(", ")", ")", "image_train", "=", "image_train", ".", "unsqueeze", "(", "0", ")", "image_train", "=", "image_train", ".", "to", "(", "device", ")", "label_train", "=", "label_train", ".", "to", "(", "device", ")", "outputs", "=", "model", "(", "image_train", ")", "pred", "=", "np", ".", "squeeze", "(", "outputs", ".", "data", ".", "max", "(", "1", ")", "[", "1", "]", ".", "cpu", "(", ")", ".", "numpy", "(", ")", ",", "axis", "=", "0", ")", "decoded", "=", "t_loader", ".", "decode_segmap", "(", "pred", ")", "vis", ".", "images", "(", "[", "image_train", ".", "data", ".", "cpu", "(", ")", ".", "squeeze", "(", "0", ")", ",", "decoded", ".", "transpose", "(", "2", ",", "0", ",", "1", ")", "*", "255.0", ",", "gt", ".", "transpose", "(", "2", ",", "0", ",", "1", ")", "*", "255.0", "]", ",", "win", "=", "image_train_window", ")", "# show example validation image", "with", "torch", ".", "no_grad", "(", ")", ":", "(", "image_val", ",", "label_val", ")", "=", "v_loader", "[", "0", "]", "gt", "=", "v_loader", ".", "decode_segmap", "(", "label_val", ".", "numpy", "(", ")", ")", "image_val", "=", "image_val", ".", "unsqueeze", "(", "0", ")", "image_val", "=", "image_val", ".", "to", "(", "device", ")", "label_val", "=", "label_val", ".", "to", "(", "device", ")", "outputs", "=", "model", "(", "image_val", ")", "pred", "=", "np", ".", "squeeze", "(", "outputs", ".", "data", ".", "max", "(", "1", ")", "[", "1", "]", ".", "cpu", "(", ")", ".", "numpy", "(", ")", ",", "axis", "=", "0", ")", "decoded", "=", "v_loader", ".", "decode_segmap", "(", "pred", ")", "vis", ".", "images", "(", "[", "image_val", ".", "data", ".", "cpu", "(", ")", ".", "squeeze", "(", "0", ")", ",", "decoded", ".", "transpose", "(", "2", ",", "0", ",", "1", ")", "*", "255.0", ",", "gt", ".", "transpose", "(", "2", ",", "0", ",", "1", ")", "*", "255.0", "]", ",", "win", "=", "image_val_window", ")", "# -------- save training statistics --------", "statistics", "[", "'epoch %d'", "%", "epoch", "]", "=", "{", "'train_loss'", ":", "loss_epoch", ",", "'scores_train'", ":", "scores_train", ",", "'scores_val'", ":", "scores_val", "}", "with", "open", "(", "'train_statistics.json'", ",", "'w'", ")", "as", "outfile", ":", "json", ".", "dump", "(", "{", "'optimizer'", ":", "opt_dict", ",", "'batch_size'", ":", "args", ".", "batch_size", ",", "'data_parallel'", ":", "data_parallel", ",", "'Training hours'", ":", "(", "time", "(", ")", "-", "since", ")", "/", "3600.0", ",", "'best_model'", ":", "best_model_stat", ",", "'statistics'", ":", "statistics", "}", ",", "outfile", ")" ]
https://github.com/floatlazer/semantic_slam/blob/657814a1ba484de6b7f6f9d07c564566c8121f13/semantic_cloud/train_cnn/train.py#L35-L265
bundy-dns/bundy
3d41934996b82b0cd2fe22dd74d2abc1daba835d
src/lib/python/bundy/memmgr/datasrc_info.py
python
SegmentInfo.sync_reader
(self, reader_session_id)
return self.__sync_reader_helper()
Synchronize segment info with a reader. memmgr should call it when it receives the "segment_update_ack" message from a reader module. If this method is called in the SYNCHRONIZING state, it moves the given ID from the set of reader modules that are using the "old" version of the segment to the set of reader modules that are using the "current" version of the segment, and if there are no reader modules using the "old" version of the segment, the state is changed to COPYING. If the state has changed to COPYING, it pops the head (oldest) event from the pending events queue and returns it; otherwise it returns None. This method can also be called in other states if the reader newly subscribes and is notified of a readable segment. In this case this method effectively does nothing. But the caller doesn't have to care about the differences based on the internal state.
Synchronize segment info with a reader.
[ "Synchronize", "segment", "info", "with", "a", "reader", "." ]
def sync_reader(self, reader_session_id): """Synchronize segment info with a reader. memmgr should call it when it receives the "segment_update_ack" message from a reader module. If this method is called in the SYNCHRONIZING state, it moves the given ID from the set of reader modules that are using the "old" version of the segment to the set of reader modules that are using the "current" version of the segment, and if there are no reader modules using the "old" version of the segment, the state is changed to COPYING. If the state has changed to COPYING, it pops the head (oldest) event from the pending events queue and returns it; otherwise it returns None. This method can also be called in other states if the reader newly subscribes and is notified of a readable segment. In this case this method effectively does nothing. But the caller doesn't have to care about the differences based on the internal state. """ if self.__state not in (self.V_SYNCHRONIZING, self.SYNCHRONIZING, self.SYNCHRONIZING2): return None if reader_session_id not in self.__old_readers: raise SegmentInfoError('Reader session ID is not in old readers ' + 'set: ' + str(reader_session_id)) if reader_session_id in self.__readers: raise SegmentInfoError('Reader session ID is already in readers ' + 'set: ' + str(reader_session_id)) self.__old_readers.remove(reader_session_id) self.__readers.add(reader_session_id) return self.__sync_reader_helper()
[ "def", "sync_reader", "(", "self", ",", "reader_session_id", ")", ":", "if", "self", ".", "__state", "not", "in", "(", "self", ".", "V_SYNCHRONIZING", ",", "self", ".", "SYNCHRONIZING", ",", "self", ".", "SYNCHRONIZING2", ")", ":", "return", "None", "if", "reader_session_id", "not", "in", "self", ".", "__old_readers", ":", "raise", "SegmentInfoError", "(", "'Reader session ID is not in old readers '", "+", "'set: '", "+", "str", "(", "reader_session_id", ")", ")", "if", "reader_session_id", "in", "self", ".", "__readers", ":", "raise", "SegmentInfoError", "(", "'Reader session ID is already in readers '", "+", "'set: '", "+", "str", "(", "reader_session_id", ")", ")", "self", ".", "__old_readers", ".", "remove", "(", "reader_session_id", ")", "self", ".", "__readers", ".", "add", "(", "reader_session_id", ")", "return", "self", ".", "__sync_reader_helper", "(", ")" ]
https://github.com/bundy-dns/bundy/blob/3d41934996b82b0cd2fe22dd74d2abc1daba835d/src/lib/python/bundy/memmgr/datasrc_info.py#L365-L400
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/idlelib/runscript.py
python
ScriptBinding._run_module_event
(self, event, *, customize=False)
return 'break'
Run the module after setting up the environment. First check the syntax. Next get customization. If OK, make sure the shell is active and then transfer the arguments, set the run environment's working directory to the directory of the module being executed and also add that directory to its sys.path if not already included.
Run the module after setting up the environment.
[ "Run", "the", "module", "after", "setting", "up", "the", "environment", "." ]
def _run_module_event(self, event, *, customize=False): """Run the module after setting up the environment. First check the syntax. Next get customization. If OK, make sure the shell is active and then transfer the arguments, set the run environment's working directory to the directory of the module being executed and also add that directory to its sys.path if not already included. """ if isinstance(self.editwin, outwin.OutputWindow): self.editwin.text.bell() return 'break' filename = self.getfilename() if not filename: return 'break' code = self.checksyntax(filename) if not code: return 'break' if not self.tabnanny(filename): return 'break' if customize: title = f"Customize {self.editwin.short_title()} Run" run_args = CustomRun(self.shell.text, title, cli_args=self.cli_args).result if not run_args: # User cancelled. return 'break' self.cli_args, restart = run_args if customize else ([], True) interp = self.shell.interp if pyshell.use_subprocess and restart: interp.restart_subprocess( with_cwd=False, filename=filename) dirname = os.path.dirname(filename) argv = [filename] if self.cli_args: argv += self.cli_args interp.runcommand(f"""if 1: __file__ = {filename!r} import sys as _sys from os.path import basename as _basename argv = {argv!r} if (not _sys.argv or _basename(_sys.argv[0]) != _basename(__file__) or len(argv) > 1): _sys.argv = argv import os as _os _os.chdir({dirname!r}) del _sys, argv, _basename, _os \n""") interp.prepend_syspath(filename) # XXX KBK 03Jul04 When run w/o subprocess, runtime warnings still # go to __stderr__. With subprocess, they go to the shell. # Need to change streams in pyshell.ModifiedInterpreter. interp.runcode(code) return 'break'
[ "def", "_run_module_event", "(", "self", ",", "event", ",", "*", ",", "customize", "=", "False", ")", ":", "if", "isinstance", "(", "self", ".", "editwin", ",", "outwin", ".", "OutputWindow", ")", ":", "self", ".", "editwin", ".", "text", ".", "bell", "(", ")", "return", "'break'", "filename", "=", "self", ".", "getfilename", "(", ")", "if", "not", "filename", ":", "return", "'break'", "code", "=", "self", ".", "checksyntax", "(", "filename", ")", "if", "not", "code", ":", "return", "'break'", "if", "not", "self", ".", "tabnanny", "(", "filename", ")", ":", "return", "'break'", "if", "customize", ":", "title", "=", "f\"Customize {self.editwin.short_title()} Run\"", "run_args", "=", "CustomRun", "(", "self", ".", "shell", ".", "text", ",", "title", ",", "cli_args", "=", "self", ".", "cli_args", ")", ".", "result", "if", "not", "run_args", ":", "# User cancelled.", "return", "'break'", "self", ".", "cli_args", ",", "restart", "=", "run_args", "if", "customize", "else", "(", "[", "]", ",", "True", ")", "interp", "=", "self", ".", "shell", ".", "interp", "if", "pyshell", ".", "use_subprocess", "and", "restart", ":", "interp", ".", "restart_subprocess", "(", "with_cwd", "=", "False", ",", "filename", "=", "filename", ")", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "filename", ")", "argv", "=", "[", "filename", "]", "if", "self", ".", "cli_args", ":", "argv", "+=", "self", ".", "cli_args", "interp", ".", "runcommand", "(", "f\"\"\"if 1:\n __file__ = {filename!r}\n import sys as _sys\n from os.path import basename as _basename\n argv = {argv!r}\n if (not _sys.argv or\n _basename(_sys.argv[0]) != _basename(__file__) or\n len(argv) > 1):\n _sys.argv = argv\n import os as _os\n _os.chdir({dirname!r})\n del _sys, argv, _basename, _os\n \\n\"\"\"", ")", "interp", ".", "prepend_syspath", "(", "filename", ")", "# XXX KBK 03Jul04 When run w/o subprocess, runtime warnings still", "# go to __stderr__. With subprocess, they go to the shell.", "# Need to change streams in pyshell.ModifiedInterpreter.", "interp", ".", "runcode", "(", "code", ")", "return", "'break'" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/idlelib/runscript.py#L127-L180
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/stc.py
python
StyledTextCtrl.WordEndPosition
(*args, **kwargs)
return _stc.StyledTextCtrl_WordEndPosition(*args, **kwargs)
WordEndPosition(self, int pos, bool onlyWordCharacters) -> int Get position of end of word.
WordEndPosition(self, int pos, bool onlyWordCharacters) -> int
[ "WordEndPosition", "(", "self", "int", "pos", "bool", "onlyWordCharacters", ")", "-", ">", "int" ]
def WordEndPosition(*args, **kwargs): """ WordEndPosition(self, int pos, bool onlyWordCharacters) -> int Get position of end of word. """ return _stc.StyledTextCtrl_WordEndPosition(*args, **kwargs)
[ "def", "WordEndPosition", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_stc", ".", "StyledTextCtrl_WordEndPosition", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/stc.py#L4063-L4069
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/roc/hsadrv/driver.py
python
device_pointer
(obj)
return device_ctypes_pointer(obj).value
Get the device pointer as an integer
Get the device pointer as an integer
[ "Get", "the", "device", "pointer", "as", "an", "integer" ]
def device_pointer(obj): "Get the device pointer as an integer" return device_ctypes_pointer(obj).value
[ "def", "device_pointer", "(", "obj", ")", ":", "return", "device_ctypes_pointer", "(", "obj", ")", ".", "value" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/roc/hsadrv/driver.py#L1424-L1426
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/importlib/_bootstrap_external.py
python
_get_supported_file_loaders
()
return [extensions, source, bytecode]
Returns a list of file-based module loaders. Each item is a tuple (loader, suffixes).
Returns a list of file-based module loaders.
[ "Returns", "a", "list", "of", "file", "-", "based", "module", "loaders", "." ]
def _get_supported_file_loaders(): """Returns a list of file-based module loaders. Each item is a tuple (loader, suffixes). """ extensions = ExtensionFileLoader, _imp.extension_suffixes() source = SourceFileLoader, SOURCE_SUFFIXES bytecode = SourcelessFileLoader, BYTECODE_SUFFIXES return [extensions, source, bytecode]
[ "def", "_get_supported_file_loaders", "(", ")", ":", "extensions", "=", "ExtensionFileLoader", ",", "_imp", ".", "extension_suffixes", "(", ")", "source", "=", "SourceFileLoader", ",", "SOURCE_SUFFIXES", "bytecode", "=", "SourcelessFileLoader", ",", "BYTECODE_SUFFIXES", "return", "[", "extensions", ",", "source", ",", "bytecode", "]" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/importlib/_bootstrap_external.py#L1482-L1490
nvdla/sw
79538ba1b52b040a4a4645f630e457fa01839e90
umd/external/protobuf-2.6/python/google/protobuf/descriptor.py
python
_ParseOptions
(message, string)
return message
Parses serialized options. This helper function is used to parse serialized options in generated proto2 files. It must not be used outside proto2.
Parses serialized options.
[ "Parses", "serialized", "options", "." ]
def _ParseOptions(message, string): """Parses serialized options. This helper function is used to parse serialized options in generated proto2 files. It must not be used outside proto2. """ message.ParseFromString(string) return message
[ "def", "_ParseOptions", "(", "message", ",", "string", ")", ":", "message", ".", "ParseFromString", "(", "string", ")", "return", "message" ]
https://github.com/nvdla/sw/blob/79538ba1b52b040a4a4645f630e457fa01839e90/umd/external/protobuf-2.6/python/google/protobuf/descriptor.py#L747-L754
bulletphysics/bullet3
f0f2a952e146f016096db6f85cf0c44ed75b0b9a
examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur.py
python
Minitaur._BuildUrdfIds
(self)
Build the link Ids from its name in the URDF file.
Build the link Ids from its name in the URDF file.
[ "Build", "the", "link", "Ids", "from", "its", "name", "in", "the", "URDF", "file", "." ]
def _BuildUrdfIds(self): """Build the link Ids from its name in the URDF file.""" num_joints = self._pybullet_client.getNumJoints(self.quadruped) self._chassis_link_ids = [-1] # the self._leg_link_ids include both the upper and lower links of the leg. self._leg_link_ids = [] self._motor_link_ids = [] self._foot_link_ids = [] for i in range(num_joints): joint_info = self._pybullet_client.getJointInfo(self.quadruped, i) joint_name = joint_info[1].decode("UTF-8") joint_id = self._joint_name_to_id[joint_name] if _CHASSIS_NAME_PATTERN.match(joint_name): self._chassis_link_ids.append(joint_id) elif _MOTOR_NAME_PATTERN.match(joint_name): self._motor_link_ids.append(joint_id) elif _KNEE_NAME_PATTERN.match(joint_name): self._foot_link_ids.append(joint_id) else: self._leg_link_ids.append(joint_id) self._leg_link_ids.extend(self._foot_link_ids) self._chassis_link_ids.sort() self._motor_link_ids.sort() self._foot_link_ids.sort() self._leg_link_ids.sort()
[ "def", "_BuildUrdfIds", "(", "self", ")", ":", "num_joints", "=", "self", ".", "_pybullet_client", ".", "getNumJoints", "(", "self", ".", "quadruped", ")", "self", ".", "_chassis_link_ids", "=", "[", "-", "1", "]", "# the self._leg_link_ids include both the upper and lower links of the leg.", "self", ".", "_leg_link_ids", "=", "[", "]", "self", ".", "_motor_link_ids", "=", "[", "]", "self", ".", "_foot_link_ids", "=", "[", "]", "for", "i", "in", "range", "(", "num_joints", ")", ":", "joint_info", "=", "self", ".", "_pybullet_client", ".", "getJointInfo", "(", "self", ".", "quadruped", ",", "i", ")", "joint_name", "=", "joint_info", "[", "1", "]", ".", "decode", "(", "\"UTF-8\"", ")", "joint_id", "=", "self", ".", "_joint_name_to_id", "[", "joint_name", "]", "if", "_CHASSIS_NAME_PATTERN", ".", "match", "(", "joint_name", ")", ":", "self", ".", "_chassis_link_ids", ".", "append", "(", "joint_id", ")", "elif", "_MOTOR_NAME_PATTERN", ".", "match", "(", "joint_name", ")", ":", "self", ".", "_motor_link_ids", ".", "append", "(", "joint_id", ")", "elif", "_KNEE_NAME_PATTERN", ".", "match", "(", "joint_name", ")", ":", "self", ".", "_foot_link_ids", ".", "append", "(", "joint_id", ")", "else", ":", "self", ".", "_leg_link_ids", ".", "append", "(", "joint_id", ")", "self", ".", "_leg_link_ids", ".", "extend", "(", "self", ".", "_foot_link_ids", ")", "self", ".", "_chassis_link_ids", ".", "sort", "(", ")", "self", ".", "_motor_link_ids", ".", "sort", "(", ")", "self", ".", "_foot_link_ids", ".", "sort", "(", ")", "self", ".", "_leg_link_ids", ".", "sort", "(", ")" ]
https://github.com/bulletphysics/bullet3/blob/f0f2a952e146f016096db6f85cf0c44ed75b0b9a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur.py#L207-L231
CanalTP/navitia
cb84ce9859070187e708818b058e6a7e0b7f891b
source/navitiacommon/navitiacommon/utils.py
python
type_of_data
(filename)
return None, None
return the type of data contains in a file + the path to load it this type can be one in: - 'gtfs' - 'fusio' - 'fare' - 'osm' - 'geopal' - 'fusio' - 'poi' - 'synonym' - 'shape' if only_one_file is True, so consider only a zip for all pt data else we consider also them for multi files for 'fusio', 'gtfs', 'fares' and 'poi', we return the directory since there are several file to load
return the type of data contains in a file + the path to load it
[ "return", "the", "type", "of", "data", "contains", "in", "a", "file", "+", "the", "path", "to", "load", "it" ]
def type_of_data(filename): """ return the type of data contains in a file + the path to load it this type can be one in: - 'gtfs' - 'fusio' - 'fare' - 'osm' - 'geopal' - 'fusio' - 'poi' - 'synonym' - 'shape' if only_one_file is True, so consider only a zip for all pt data else we consider also them for multi files for 'fusio', 'gtfs', 'fares' and 'poi', we return the directory since there are several file to load """ def files_type(files): # first we try fusio, because it can load fares too if any(f for f in files if f.endswith("contributors.txt")): return 'fusio' if any(f for f in files if f.endswith("fares.csv")): return 'fare' if any(f for f in files if f.endswith("stops.txt")): return 'gtfs' if any(f for f in files if f.endswith("adresse.txt")): return 'geopal' if any(f for f in files if f.endswith("poi.txt")): return 'poi' if any(f for f in files if f.endswith(".pbf")): return 'osm' return None if not isinstance(filename, list): if os.path.isdir(filename): files = glob.glob(filename + "/*") else: files = [filename] else: files = filename # we test if we recognize a ptfile in the list of files t = files_type(files) if t and t in [ 'fusio', 'gtfs', 'fare', 'poi', ]: # the path to load the data is the directory since there are several files return t, os.path.dirname(files[0]) if t and t in ['osm']: return t, files[0] for filename in files: if filename.endswith('.pbf'): return 'osm', filename if filename.endswith('.zip'): try: zipf = zipfile.ZipFile(filename) except Exception as e: logging.exception('Corrupted source file : {} error {}'.format(filename, e)) raise pt_type = files_type(zipf.namelist()) if not pt_type: return None, None return pt_type, filename if filename.endswith('.geopal'): return 'geopal', filename if filename.endswith('.poi'): return 'poi', os.path.dirname(filename) if filename.endswith("synonyms.txt"): return 'synonym', filename if filename.endswith(".poly") or filename.endswith(".wkt"): return 'shape', filename return None, None
[ "def", "type_of_data", "(", "filename", ")", ":", "def", "files_type", "(", "files", ")", ":", "# first we try fusio, because it can load fares too", "if", "any", "(", "f", "for", "f", "in", "files", "if", "f", ".", "endswith", "(", "\"contributors.txt\"", ")", ")", ":", "return", "'fusio'", "if", "any", "(", "f", "for", "f", "in", "files", "if", "f", ".", "endswith", "(", "\"fares.csv\"", ")", ")", ":", "return", "'fare'", "if", "any", "(", "f", "for", "f", "in", "files", "if", "f", ".", "endswith", "(", "\"stops.txt\"", ")", ")", ":", "return", "'gtfs'", "if", "any", "(", "f", "for", "f", "in", "files", "if", "f", ".", "endswith", "(", "\"adresse.txt\"", ")", ")", ":", "return", "'geopal'", "if", "any", "(", "f", "for", "f", "in", "files", "if", "f", ".", "endswith", "(", "\"poi.txt\"", ")", ")", ":", "return", "'poi'", "if", "any", "(", "f", "for", "f", "in", "files", "if", "f", ".", "endswith", "(", "\".pbf\"", ")", ")", ":", "return", "'osm'", "return", "None", "if", "not", "isinstance", "(", "filename", ",", "list", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "filename", ")", ":", "files", "=", "glob", ".", "glob", "(", "filename", "+", "\"/*\"", ")", "else", ":", "files", "=", "[", "filename", "]", "else", ":", "files", "=", "filename", "# we test if we recognize a ptfile in the list of files", "t", "=", "files_type", "(", "files", ")", "if", "t", "and", "t", "in", "[", "'fusio'", ",", "'gtfs'", ",", "'fare'", ",", "'poi'", ",", "]", ":", "# the path to load the data is the directory since there are several files", "return", "t", ",", "os", ".", "path", ".", "dirname", "(", "files", "[", "0", "]", ")", "if", "t", "and", "t", "in", "[", "'osm'", "]", ":", "return", "t", ",", "files", "[", "0", "]", "for", "filename", "in", "files", ":", "if", "filename", ".", "endswith", "(", "'.pbf'", ")", ":", "return", "'osm'", ",", "filename", "if", "filename", ".", "endswith", "(", "'.zip'", ")", ":", "try", ":", "zipf", "=", "zipfile", ".", "ZipFile", "(", "filename", ")", "except", "Exception", "as", "e", ":", "logging", ".", "exception", "(", "'Corrupted source file : {} error {}'", ".", "format", "(", "filename", ",", "e", ")", ")", "raise", "pt_type", "=", "files_type", "(", "zipf", ".", "namelist", "(", ")", ")", "if", "not", "pt_type", ":", "return", "None", ",", "None", "return", "pt_type", ",", "filename", "if", "filename", ".", "endswith", "(", "'.geopal'", ")", ":", "return", "'geopal'", ",", "filename", "if", "filename", ".", "endswith", "(", "'.poi'", ")", ":", "return", "'poi'", ",", "os", ".", "path", ".", "dirname", "(", "filename", ")", "if", "filename", ".", "endswith", "(", "\"synonyms.txt\"", ")", ":", "return", "'synonym'", ",", "filename", "if", "filename", ".", "endswith", "(", "\".poly\"", ")", "or", "filename", ".", "endswith", "(", "\".wkt\"", ")", ":", "return", "'shape'", ",", "filename", "return", "None", ",", "None" ]
https://github.com/CanalTP/navitia/blob/cb84ce9859070187e708818b058e6a7e0b7f891b/source/navitiacommon/navitiacommon/utils.py#L90-L170
apple/turicreate
cce55aa5311300e3ce6af93cb45ba791fd1bdf49
src/external/coremltools_wrap/coremltools/coremltools/models/nearest_neighbors/builder.py
python
KNearestNeighborsClassifierBuilder.description
(self)
return self.spec.description.metadata.shortDescription
Get the description for the KNearestNeighborsClassifier model :return: the description
Get the description for the KNearestNeighborsClassifier model :return: the description
[ "Get", "the", "description", "for", "the", "KNearestNeighborsClassifier", "model", ":", "return", ":", "the", "description" ]
def description(self): """ Get the description for the KNearestNeighborsClassifier model :return: the description """ return self.spec.description.metadata.shortDescription
[ "def", "description", "(", "self", ")", ":", "return", "self", ".", "spec", ".", "description", ".", "metadata", ".", "shortDescription" ]
https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/src/external/coremltools_wrap/coremltools/coremltools/models/nearest_neighbors/builder.py#L209-L214
eventql/eventql
7ca0dbb2e683b525620ea30dc40540a22d5eb227
deps/3rdparty/spidermonkey/mozjs/python/bitstring/bitstring.py
python
tokenparser
(fmt, keys=None, token_cache={})
return stretchy_token, return_values
Divide the format string into tokens and parse them. Return stretchy token and list of [initialiser, length, value] initialiser is one of: hex, oct, bin, uint, int, se, ue, 0x, 0o, 0b etc. length is None if not known, as is value. If the token is in the keyword dictionary (keys) then it counts as a special case and isn't messed with. tokens must be of the form: [factor*][initialiser][:][length][=value]
Divide the format string into tokens and parse them.
[ "Divide", "the", "format", "string", "into", "tokens", "and", "parse", "them", "." ]
def tokenparser(fmt, keys=None, token_cache={}): """Divide the format string into tokens and parse them. Return stretchy token and list of [initialiser, length, value] initialiser is one of: hex, oct, bin, uint, int, se, ue, 0x, 0o, 0b etc. length is None if not known, as is value. If the token is in the keyword dictionary (keys) then it counts as a special case and isn't messed with. tokens must be of the form: [factor*][initialiser][:][length][=value] """ try: return token_cache[(fmt, keys)] except KeyError: token_key = (fmt, keys) # Very inefficient expanding of brackets. fmt = expand_brackets(fmt) # Split tokens by ',' and remove whitespace # The meta_tokens can either be ordinary single tokens or multiple # struct-format token strings. meta_tokens = (''.join(f.split()) for f in fmt.split(',')) return_values = [] stretchy_token = False for meta_token in meta_tokens: # See if it has a multiplicative factor m = MULTIPLICATIVE_RE.match(meta_token) if not m: factor = 1 else: factor = int(m.group('factor')) meta_token = m.group('token') # See if it's a struct-like format tokens = structparser(meta_token) ret_vals = [] for token in tokens: if keys and token in keys: # Don't bother parsing it, it's a keyword argument ret_vals.append([token, None, None]) continue value = length = None if token == '': continue # Match literal tokens of the form 0x... 0o... and 0b... m = LITERAL_RE.match(token) if m: name = m.group('name') value = m.group('value') ret_vals.append([name, length, value]) continue # Match everything else: m1 = TOKEN_RE.match(token) if not m1: # and if you don't specify a 'name' then the default is 'uint': m2 = DEFAULT_UINT.match(token) if not m2: raise ValueError("Don't understand token '{0}'.".format(token)) if m1: name = m1.group('name') length = m1.group('len') if m1.group('value'): value = m1.group('value') else: assert m2 name = 'uint' length = m2.group('len') if m2.group('value'): value = m2.group('value') if name == 'bool': if length is not None: raise ValueError("You can't specify a length with bool tokens - they are always one bit.") length = 1 if length is None and name not in ('se', 'ue', 'sie', 'uie'): stretchy_token = True if length is not None: # Try converting length to int, otherwise check it's a key. try: length = int(length) if length < 0: raise Error # For the 'bytes' token convert length to bits. if name == 'bytes': length *= 8 except Error: raise ValueError("Can't read a token with a negative length.") except ValueError: if not keys or length not in keys: raise ValueError("Don't understand length '{0}' of token.".format(length)) ret_vals.append([name, length, value]) # This multiplies by the multiplicative factor, but this means that # we can't allow keyword values as multipliers (e.g. n*uint:8). # The only way to do this would be to return the factor in some fashion # (we can't use the key's value here as it would mean that we couldn't # sensibly continue to cache the function's results. (TODO). return_values.extend(ret_vals * factor) return_values = [tuple(x) for x in return_values] if len(token_cache) < CACHE_SIZE: token_cache[token_key] = stretchy_token, return_values return stretchy_token, return_values
[ "def", "tokenparser", "(", "fmt", ",", "keys", "=", "None", ",", "token_cache", "=", "{", "}", ")", ":", "try", ":", "return", "token_cache", "[", "(", "fmt", ",", "keys", ")", "]", "except", "KeyError", ":", "token_key", "=", "(", "fmt", ",", "keys", ")", "# Very inefficient expanding of brackets.", "fmt", "=", "expand_brackets", "(", "fmt", ")", "# Split tokens by ',' and remove whitespace", "# The meta_tokens can either be ordinary single tokens or multiple", "# struct-format token strings.", "meta_tokens", "=", "(", "''", ".", "join", "(", "f", ".", "split", "(", ")", ")", "for", "f", "in", "fmt", ".", "split", "(", "','", ")", ")", "return_values", "=", "[", "]", "stretchy_token", "=", "False", "for", "meta_token", "in", "meta_tokens", ":", "# See if it has a multiplicative factor", "m", "=", "MULTIPLICATIVE_RE", ".", "match", "(", "meta_token", ")", "if", "not", "m", ":", "factor", "=", "1", "else", ":", "factor", "=", "int", "(", "m", ".", "group", "(", "'factor'", ")", ")", "meta_token", "=", "m", ".", "group", "(", "'token'", ")", "# See if it's a struct-like format", "tokens", "=", "structparser", "(", "meta_token", ")", "ret_vals", "=", "[", "]", "for", "token", "in", "tokens", ":", "if", "keys", "and", "token", "in", "keys", ":", "# Don't bother parsing it, it's a keyword argument", "ret_vals", ".", "append", "(", "[", "token", ",", "None", ",", "None", "]", ")", "continue", "value", "=", "length", "=", "None", "if", "token", "==", "''", ":", "continue", "# Match literal tokens of the form 0x... 0o... and 0b...", "m", "=", "LITERAL_RE", ".", "match", "(", "token", ")", "if", "m", ":", "name", "=", "m", ".", "group", "(", "'name'", ")", "value", "=", "m", ".", "group", "(", "'value'", ")", "ret_vals", ".", "append", "(", "[", "name", ",", "length", ",", "value", "]", ")", "continue", "# Match everything else:", "m1", "=", "TOKEN_RE", ".", "match", "(", "token", ")", "if", "not", "m1", ":", "# and if you don't specify a 'name' then the default is 'uint':", "m2", "=", "DEFAULT_UINT", ".", "match", "(", "token", ")", "if", "not", "m2", ":", "raise", "ValueError", "(", "\"Don't understand token '{0}'.\"", ".", "format", "(", "token", ")", ")", "if", "m1", ":", "name", "=", "m1", ".", "group", "(", "'name'", ")", "length", "=", "m1", ".", "group", "(", "'len'", ")", "if", "m1", ".", "group", "(", "'value'", ")", ":", "value", "=", "m1", ".", "group", "(", "'value'", ")", "else", ":", "assert", "m2", "name", "=", "'uint'", "length", "=", "m2", ".", "group", "(", "'len'", ")", "if", "m2", ".", "group", "(", "'value'", ")", ":", "value", "=", "m2", ".", "group", "(", "'value'", ")", "if", "name", "==", "'bool'", ":", "if", "length", "is", "not", "None", ":", "raise", "ValueError", "(", "\"You can't specify a length with bool tokens - they are always one bit.\"", ")", "length", "=", "1", "if", "length", "is", "None", "and", "name", "not", "in", "(", "'se'", ",", "'ue'", ",", "'sie'", ",", "'uie'", ")", ":", "stretchy_token", "=", "True", "if", "length", "is", "not", "None", ":", "# Try converting length to int, otherwise check it's a key.", "try", ":", "length", "=", "int", "(", "length", ")", "if", "length", "<", "0", ":", "raise", "Error", "# For the 'bytes' token convert length to bits.", "if", "name", "==", "'bytes'", ":", "length", "*=", "8", "except", "Error", ":", "raise", "ValueError", "(", "\"Can't read a token with a negative length.\"", ")", "except", "ValueError", ":", "if", "not", "keys", "or", "length", "not", "in", "keys", ":", "raise", "ValueError", "(", "\"Don't understand length '{0}' of token.\"", ".", "format", "(", "length", ")", ")", "ret_vals", ".", "append", "(", "[", "name", ",", "length", ",", "value", "]", ")", "# This multiplies by the multiplicative factor, but this means that", "# we can't allow keyword values as multipliers (e.g. n*uint:8).", "# The only way to do this would be to return the factor in some fashion", "# (we can't use the key's value here as it would mean that we couldn't", "# sensibly continue to cache the function's results. (TODO).", "return_values", ".", "extend", "(", "ret_vals", "*", "factor", ")", "return_values", "=", "[", "tuple", "(", "x", ")", "for", "x", "in", "return_values", "]", "if", "len", "(", "token_cache", ")", "<", "CACHE_SIZE", ":", "token_cache", "[", "token_key", "]", "=", "stretchy_token", ",", "return_values", "return", "stretchy_token", ",", "return_values" ]
https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/python/bitstring/bitstring.py#L534-L633
kamyu104/LeetCode-Solutions
77605708a927ea3b85aee5a479db733938c7c211
Python/first-unique-number.py
python
FirstUnique.__init__
(self, nums)
:type nums: List[int]
:type nums: List[int]
[ ":", "type", "nums", ":", "List", "[", "int", "]" ]
def __init__(self, nums): """ :type nums: List[int] """ self.__q = collections.OrderedDict() self.__dup = set() for num in nums: self.add(num)
[ "def", "__init__", "(", "self", ",", "nums", ")", ":", "self", ".", "__q", "=", "collections", ".", "OrderedDict", "(", ")", "self", ".", "__dup", "=", "set", "(", ")", "for", "num", "in", "nums", ":", "self", ".", "add", "(", "num", ")" ]
https://github.com/kamyu104/LeetCode-Solutions/blob/77605708a927ea3b85aee5a479db733938c7c211/Python/first-unique-number.py#L11-L18
ucbrise/confluo
578883a4f7fbbb4aea78c342d366f5122ef598f7
pyclient/confluo/rpc/rpc_service.py
python
Client.add_aggregate
(self, mid, aggregate_name, filter_id, aggregate_expr)
Parameters: - mid - aggregate_name - filter_id - aggregate_expr
Parameters: - mid - aggregate_name - filter_id - aggregate_expr
[ "Parameters", ":", "-", "mid", "-", "aggregate_name", "-", "filter_id", "-", "aggregate_expr" ]
def add_aggregate(self, mid, aggregate_name, filter_id, aggregate_expr): """ Parameters: - mid - aggregate_name - filter_id - aggregate_expr """ self.send_add_aggregate(mid, aggregate_name, filter_id, aggregate_expr) self.recv_add_aggregate()
[ "def", "add_aggregate", "(", "self", ",", "mid", ",", "aggregate_name", ",", "filter_id", ",", "aggregate_expr", ")", ":", "self", ".", "send_add_aggregate", "(", "mid", ",", "aggregate_name", ",", "filter_id", ",", "aggregate_expr", ")", "self", ".", "recv_add_aggregate", "(", ")" ]
https://github.com/ucbrise/confluo/blob/578883a4f7fbbb4aea78c342d366f5122ef598f7/pyclient/confluo/rpc/rpc_service.py#L615-L625
raspberrypi/tools
13474ee775d0c5ec8a7da4fb0a9fa84187abfc87
arm-bcm2708/gcc-linaro-arm-linux-gnueabihf-raspbian/share/gdb/python/gdb/prompt.py
python
_prompt_bs
(attr)
return '\\'
A backslash.
A backslash.
[ "A", "backslash", "." ]
def _prompt_bs(attr): "A backslash." return '\\'
[ "def", "_prompt_bs", "(", "attr", ")", ":", "return", "'\\\\'" ]
https://github.com/raspberrypi/tools/blob/13474ee775d0c5ec8a7da4fb0a9fa84187abfc87/arm-bcm2708/gcc-linaro-arm-linux-gnueabihf-raspbian/share/gdb/python/gdb/prompt.py#L58-L60
psi4/psi4
be533f7f426b6ccc263904e55122899b16663395
psi4/driver/p4util/fcidump.py
python
fcidump
(wfn, fname='INTDUMP', oe_ints=None)
Save integrals to file in FCIDUMP format as defined in Comp. Phys. Commun. 54 75 (1989) Additional one-electron integrals, including orbital energies, can also be saved. This latter format can be used with the HANDE QMC code but is not standard. :returns: None :raises: ValidationError when SCF wavefunction is not RHF :type wfn: :py:class:`~psi4.core.Wavefunction` :param wfn: set of molecule, basis, orbitals from which to generate cube files :param fname: name of the integrals file, defaults to INTDUMP :param oe_ints: list of additional one-electron integrals to save to file. So far only EIGENVALUES is a valid option. :examples: >>> # [1] Save one- and two-electron integrals to standard FCIDUMP format >>> E, wfn = energy('scf', return_wfn=True) >>> fcidump(wfn) >>> # [2] Save orbital energies, one- and two-electron integrals. >>> E, wfn = energy('scf', return_wfn=True) >>> fcidump(wfn, oe_ints=['EIGENVALUES'])
Save integrals to file in FCIDUMP format as defined in Comp. Phys. Commun. 54 75 (1989) Additional one-electron integrals, including orbital energies, can also be saved. This latter format can be used with the HANDE QMC code but is not standard.
[ "Save", "integrals", "to", "file", "in", "FCIDUMP", "format", "as", "defined", "in", "Comp", ".", "Phys", ".", "Commun", ".", "54", "75", "(", "1989", ")", "Additional", "one", "-", "electron", "integrals", "including", "orbital", "energies", "can", "also", "be", "saved", ".", "This", "latter", "format", "can", "be", "used", "with", "the", "HANDE", "QMC", "code", "but", "is", "not", "standard", "." ]
def fcidump(wfn, fname='INTDUMP', oe_ints=None): """Save integrals to file in FCIDUMP format as defined in Comp. Phys. Commun. 54 75 (1989) Additional one-electron integrals, including orbital energies, can also be saved. This latter format can be used with the HANDE QMC code but is not standard. :returns: None :raises: ValidationError when SCF wavefunction is not RHF :type wfn: :py:class:`~psi4.core.Wavefunction` :param wfn: set of molecule, basis, orbitals from which to generate cube files :param fname: name of the integrals file, defaults to INTDUMP :param oe_ints: list of additional one-electron integrals to save to file. So far only EIGENVALUES is a valid option. :examples: >>> # [1] Save one- and two-electron integrals to standard FCIDUMP format >>> E, wfn = energy('scf', return_wfn=True) >>> fcidump(wfn) >>> # [2] Save orbital energies, one- and two-electron integrals. >>> E, wfn = energy('scf', return_wfn=True) >>> fcidump(wfn, oe_ints=['EIGENVALUES']) """ # Get some options reference = core.get_option('SCF', 'REFERENCE') ints_tolerance = core.get_global_option('INTS_TOLERANCE') # Some sanity checks if reference not in ['RHF', 'UHF']: raise ValidationError('FCIDUMP not implemented for {} references\n'.format(reference)) if oe_ints is None: oe_ints = [] molecule = wfn.molecule() docc = wfn.doccpi() frzcpi = wfn.frzcpi() frzvpi = wfn.frzvpi() active_docc = docc - frzcpi active_socc = wfn.soccpi() active_mopi = wfn.nmopi() - frzcpi - frzvpi nbf = active_mopi.sum() if wfn.same_a_b_orbs() else 2 * active_mopi.sum() nirrep = wfn.nirrep() nelectron = 2 * active_docc.sum() + active_socc.sum() irrep_map = _irrep_map(wfn) wfn_irrep = 0 for h, n_socc in enumerate(active_socc): if n_socc % 2 == 1: wfn_irrep ^= h core.print_out('Writing integrals in FCIDUMP format to ' + fname + '\n') # Generate FCIDUMP header header = '&FCI\n' header += 'NORB={:d},\n'.format(nbf) header += 'NELEC={:d},\n'.format(nelectron) header += 'MS2={:d},\n'.format(wfn.nalpha() - wfn.nbeta()) header += 'UHF=.{}.,\n'.format(not wfn.same_a_b_orbs()).upper() orbsym = '' for h in range(active_mopi.n()): for n in range(frzcpi[h], frzcpi[h] + active_mopi[h]): orbsym += '{:d},'.format(irrep_map[h]) if not wfn.same_a_b_orbs(): orbsym += '{:d},'.format(irrep_map[h]) header += 'ORBSYM={}\n'.format(orbsym) header += 'ISYM={:d},\n'.format(irrep_map[wfn_irrep]) header += '&END\n' with open(fname, 'w') as intdump: intdump.write(header) # Get an IntegralTransform object check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), wfn) spaces = [core.MOSpace.all()] trans_type = core.IntegralTransform.TransformationType.Restricted if not wfn.same_a_b_orbs(): trans_type = core.IntegralTransform.TransformationType.Unrestricted ints = core.IntegralTransform(wfn, spaces, trans_type) ints.transform_tei(core.MOSpace.all(), core.MOSpace.all(), core.MOSpace.all(), core.MOSpace.all()) core.print_out('Integral transformation complete!\n') DPD_info = {'instance_id': ints.get_dpd_id(), 'alpha_MO': ints.DPD_ID('[A>=A]+'), 'beta_MO': 0} if not wfn.same_a_b_orbs(): DPD_info['beta_MO'] = ints.DPD_ID("[a>=a]+") # Write TEI to fname in FCIDUMP format core.fcidump_tei_helper(nirrep, wfn.same_a_b_orbs(), DPD_info, ints_tolerance, fname) # Read-in OEI and write them to fname in FCIDUMP format # Indexing functions to translate from zero-based (C and Python) to # one-based (Fortran) mo_idx = lambda x: x + 1 alpha_mo_idx = lambda x: 2 * x + 1 beta_mo_idx = lambda x: 2 * (x + 1) with open(fname, 'a') as intdump: core.print_out('Writing frozen core operator in FCIDUMP format to ' + fname + '\n') if reference == 'RHF': PSIF_MO_FZC = 'MO-basis Frozen-Core Operator' moH = core.Matrix(PSIF_MO_FZC, wfn.nmopi(), wfn.nmopi()) moH.load(core.IO.shared_object(), psif.PSIF_OEI) mo_slice = core.Slice(frzcpi, active_mopi) MO_FZC = moH.get_block(mo_slice, mo_slice) offset = 0 for h, block in enumerate(MO_FZC.nph): il = np.tril_indices(block.shape[0]) for index, x in np.ndenumerate(block[il]): row = mo_idx(il[0][index] + offset) col = mo_idx(il[1][index] + offset) if (abs(x) > ints_tolerance): intdump.write('{:29.20E}{:4d}{:4d}{:4d}{:4d}\n'.format(x, row, col, 0, 0)) offset += block.shape[0] # Additional one-electron integrals as requested in oe_ints # Orbital energies core.print_out('Writing orbital energies in FCIDUMP format to ' + fname + '\n') if 'EIGENVALUES' in oe_ints: eigs_dump = write_eigenvalues(wfn.epsilon_a().get_block(mo_slice).to_array(), mo_idx) intdump.write(eigs_dump) else: PSIF_MO_A_FZC = 'MO-basis Alpha Frozen-Core Oper' moH_A = core.Matrix(PSIF_MO_A_FZC, wfn.nmopi(), wfn.nmopi()) moH_A.load(core.IO.shared_object(), psif.PSIF_OEI) mo_slice = core.Slice(frzcpi, active_mopi) MO_FZC_A = moH_A.get_block(mo_slice, mo_slice) offset = 0 for h, block in enumerate(MO_FZC_A.nph): il = np.tril_indices(block.shape[0]) for index, x in np.ndenumerate(block[il]): row = alpha_mo_idx(il[0][index] + offset) col = alpha_mo_idx(il[1][index] + offset) if (abs(x) > ints_tolerance): intdump.write('{:29.20E}{:4d}{:4d}{:4d}{:4d}\n'.format(x, row, col, 0, 0)) offset += block.shape[0] PSIF_MO_B_FZC = 'MO-basis Beta Frozen-Core Oper' moH_B = core.Matrix(PSIF_MO_B_FZC, wfn.nmopi(), wfn.nmopi()) moH_B.load(core.IO.shared_object(), psif.PSIF_OEI) mo_slice = core.Slice(frzcpi, active_mopi) MO_FZC_B = moH_B.get_block(mo_slice, mo_slice) offset = 0 for h, block in enumerate(MO_FZC_B.nph): il = np.tril_indices(block.shape[0]) for index, x in np.ndenumerate(block[il]): row = beta_mo_idx(il[0][index] + offset) col = beta_mo_idx(il[1][index] + offset) if (abs(x) > ints_tolerance): intdump.write('{:29.20E}{:4d}{:4d}{:4d}{:4d}\n'.format(x, row, col, 0, 0)) offset += block.shape[0] # Additional one-electron integrals as requested in oe_ints # Orbital energies core.print_out('Writing orbital energies in FCIDUMP format to ' + fname + '\n') if 'EIGENVALUES' in oe_ints: alpha_eigs_dump = write_eigenvalues(wfn.epsilon_a().get_block(mo_slice).to_array(), alpha_mo_idx) beta_eigs_dump = write_eigenvalues(wfn.epsilon_b().get_block(mo_slice).to_array(), beta_mo_idx) intdump.write(alpha_eigs_dump + beta_eigs_dump) # Dipole integrals #core.print_out('Writing dipole moment OEI in FCIDUMP format to ' + fname + '\n') # Traceless quadrupole integrals #core.print_out('Writing traceless quadrupole moment OEI in FCIDUMP format to ' + fname + '\n') # Frozen core + nuclear repulsion energy core.print_out('Writing frozen core + nuclear repulsion energy in FCIDUMP format to ' + fname + '\n') e_fzc = ints.get_frozen_core_energy() e_nuc = molecule.nuclear_repulsion_energy(wfn.get_dipole_field_strength()) intdump.write('{:29.20E}{:4d}{:4d}{:4d}{:4d}\n'.format(e_fzc + e_nuc, 0, 0, 0, 0)) core.print_out('Done generating {} with integrals in FCIDUMP format.\n'.format(fname))
[ "def", "fcidump", "(", "wfn", ",", "fname", "=", "'INTDUMP'", ",", "oe_ints", "=", "None", ")", ":", "# Get some options", "reference", "=", "core", ".", "get_option", "(", "'SCF'", ",", "'REFERENCE'", ")", "ints_tolerance", "=", "core", ".", "get_global_option", "(", "'INTS_TOLERANCE'", ")", "# Some sanity checks", "if", "reference", "not", "in", "[", "'RHF'", ",", "'UHF'", "]", ":", "raise", "ValidationError", "(", "'FCIDUMP not implemented for {} references\\n'", ".", "format", "(", "reference", ")", ")", "if", "oe_ints", "is", "None", ":", "oe_ints", "=", "[", "]", "molecule", "=", "wfn", ".", "molecule", "(", ")", "docc", "=", "wfn", ".", "doccpi", "(", ")", "frzcpi", "=", "wfn", ".", "frzcpi", "(", ")", "frzvpi", "=", "wfn", ".", "frzvpi", "(", ")", "active_docc", "=", "docc", "-", "frzcpi", "active_socc", "=", "wfn", ".", "soccpi", "(", ")", "active_mopi", "=", "wfn", ".", "nmopi", "(", ")", "-", "frzcpi", "-", "frzvpi", "nbf", "=", "active_mopi", ".", "sum", "(", ")", "if", "wfn", ".", "same_a_b_orbs", "(", ")", "else", "2", "*", "active_mopi", ".", "sum", "(", ")", "nirrep", "=", "wfn", ".", "nirrep", "(", ")", "nelectron", "=", "2", "*", "active_docc", ".", "sum", "(", ")", "+", "active_socc", ".", "sum", "(", ")", "irrep_map", "=", "_irrep_map", "(", "wfn", ")", "wfn_irrep", "=", "0", "for", "h", ",", "n_socc", "in", "enumerate", "(", "active_socc", ")", ":", "if", "n_socc", "%", "2", "==", "1", ":", "wfn_irrep", "^=", "h", "core", ".", "print_out", "(", "'Writing integrals in FCIDUMP format to '", "+", "fname", "+", "'\\n'", ")", "# Generate FCIDUMP header", "header", "=", "'&FCI\\n'", "header", "+=", "'NORB={:d},\\n'", ".", "format", "(", "nbf", ")", "header", "+=", "'NELEC={:d},\\n'", ".", "format", "(", "nelectron", ")", "header", "+=", "'MS2={:d},\\n'", ".", "format", "(", "wfn", ".", "nalpha", "(", ")", "-", "wfn", ".", "nbeta", "(", ")", ")", "header", "+=", "'UHF=.{}.,\\n'", ".", "format", "(", "not", "wfn", ".", "same_a_b_orbs", "(", ")", ")", ".", "upper", "(", ")", "orbsym", "=", "''", "for", "h", "in", "range", "(", "active_mopi", ".", "n", "(", ")", ")", ":", "for", "n", "in", "range", "(", "frzcpi", "[", "h", "]", ",", "frzcpi", "[", "h", "]", "+", "active_mopi", "[", "h", "]", ")", ":", "orbsym", "+=", "'{:d},'", ".", "format", "(", "irrep_map", "[", "h", "]", ")", "if", "not", "wfn", ".", "same_a_b_orbs", "(", ")", ":", "orbsym", "+=", "'{:d},'", ".", "format", "(", "irrep_map", "[", "h", "]", ")", "header", "+=", "'ORBSYM={}\\n'", ".", "format", "(", "orbsym", ")", "header", "+=", "'ISYM={:d},\\n'", ".", "format", "(", "irrep_map", "[", "wfn_irrep", "]", ")", "header", "+=", "'&END\\n'", "with", "open", "(", "fname", ",", "'w'", ")", "as", "intdump", ":", "intdump", ".", "write", "(", "header", ")", "# Get an IntegralTransform object", "check_iwl_file_from_scf_type", "(", "core", ".", "get_global_option", "(", "'SCF_TYPE'", ")", ",", "wfn", ")", "spaces", "=", "[", "core", ".", "MOSpace", ".", "all", "(", ")", "]", "trans_type", "=", "core", ".", "IntegralTransform", ".", "TransformationType", ".", "Restricted", "if", "not", "wfn", ".", "same_a_b_orbs", "(", ")", ":", "trans_type", "=", "core", ".", "IntegralTransform", ".", "TransformationType", ".", "Unrestricted", "ints", "=", "core", ".", "IntegralTransform", "(", "wfn", ",", "spaces", ",", "trans_type", ")", "ints", ".", "transform_tei", "(", "core", ".", "MOSpace", ".", "all", "(", ")", ",", "core", ".", "MOSpace", ".", "all", "(", ")", ",", "core", ".", "MOSpace", ".", "all", "(", ")", ",", "core", ".", "MOSpace", ".", "all", "(", ")", ")", "core", ".", "print_out", "(", "'Integral transformation complete!\\n'", ")", "DPD_info", "=", "{", "'instance_id'", ":", "ints", ".", "get_dpd_id", "(", ")", ",", "'alpha_MO'", ":", "ints", ".", "DPD_ID", "(", "'[A>=A]+'", ")", ",", "'beta_MO'", ":", "0", "}", "if", "not", "wfn", ".", "same_a_b_orbs", "(", ")", ":", "DPD_info", "[", "'beta_MO'", "]", "=", "ints", ".", "DPD_ID", "(", "\"[a>=a]+\"", ")", "# Write TEI to fname in FCIDUMP format", "core", ".", "fcidump_tei_helper", "(", "nirrep", ",", "wfn", ".", "same_a_b_orbs", "(", ")", ",", "DPD_info", ",", "ints_tolerance", ",", "fname", ")", "# Read-in OEI and write them to fname in FCIDUMP format", "# Indexing functions to translate from zero-based (C and Python) to", "# one-based (Fortran)", "mo_idx", "=", "lambda", "x", ":", "x", "+", "1", "alpha_mo_idx", "=", "lambda", "x", ":", "2", "*", "x", "+", "1", "beta_mo_idx", "=", "lambda", "x", ":", "2", "*", "(", "x", "+", "1", ")", "with", "open", "(", "fname", ",", "'a'", ")", "as", "intdump", ":", "core", ".", "print_out", "(", "'Writing frozen core operator in FCIDUMP format to '", "+", "fname", "+", "'\\n'", ")", "if", "reference", "==", "'RHF'", ":", "PSIF_MO_FZC", "=", "'MO-basis Frozen-Core Operator'", "moH", "=", "core", ".", "Matrix", "(", "PSIF_MO_FZC", ",", "wfn", ".", "nmopi", "(", ")", ",", "wfn", ".", "nmopi", "(", ")", ")", "moH", ".", "load", "(", "core", ".", "IO", ".", "shared_object", "(", ")", ",", "psif", ".", "PSIF_OEI", ")", "mo_slice", "=", "core", ".", "Slice", "(", "frzcpi", ",", "active_mopi", ")", "MO_FZC", "=", "moH", ".", "get_block", "(", "mo_slice", ",", "mo_slice", ")", "offset", "=", "0", "for", "h", ",", "block", "in", "enumerate", "(", "MO_FZC", ".", "nph", ")", ":", "il", "=", "np", ".", "tril_indices", "(", "block", ".", "shape", "[", "0", "]", ")", "for", "index", ",", "x", "in", "np", ".", "ndenumerate", "(", "block", "[", "il", "]", ")", ":", "row", "=", "mo_idx", "(", "il", "[", "0", "]", "[", "index", "]", "+", "offset", ")", "col", "=", "mo_idx", "(", "il", "[", "1", "]", "[", "index", "]", "+", "offset", ")", "if", "(", "abs", "(", "x", ")", ">", "ints_tolerance", ")", ":", "intdump", ".", "write", "(", "'{:29.20E}{:4d}{:4d}{:4d}{:4d}\\n'", ".", "format", "(", "x", ",", "row", ",", "col", ",", "0", ",", "0", ")", ")", "offset", "+=", "block", ".", "shape", "[", "0", "]", "# Additional one-electron integrals as requested in oe_ints", "# Orbital energies", "core", ".", "print_out", "(", "'Writing orbital energies in FCIDUMP format to '", "+", "fname", "+", "'\\n'", ")", "if", "'EIGENVALUES'", "in", "oe_ints", ":", "eigs_dump", "=", "write_eigenvalues", "(", "wfn", ".", "epsilon_a", "(", ")", ".", "get_block", "(", "mo_slice", ")", ".", "to_array", "(", ")", ",", "mo_idx", ")", "intdump", ".", "write", "(", "eigs_dump", ")", "else", ":", "PSIF_MO_A_FZC", "=", "'MO-basis Alpha Frozen-Core Oper'", "moH_A", "=", "core", ".", "Matrix", "(", "PSIF_MO_A_FZC", ",", "wfn", ".", "nmopi", "(", ")", ",", "wfn", ".", "nmopi", "(", ")", ")", "moH_A", ".", "load", "(", "core", ".", "IO", ".", "shared_object", "(", ")", ",", "psif", ".", "PSIF_OEI", ")", "mo_slice", "=", "core", ".", "Slice", "(", "frzcpi", ",", "active_mopi", ")", "MO_FZC_A", "=", "moH_A", ".", "get_block", "(", "mo_slice", ",", "mo_slice", ")", "offset", "=", "0", "for", "h", ",", "block", "in", "enumerate", "(", "MO_FZC_A", ".", "nph", ")", ":", "il", "=", "np", ".", "tril_indices", "(", "block", ".", "shape", "[", "0", "]", ")", "for", "index", ",", "x", "in", "np", ".", "ndenumerate", "(", "block", "[", "il", "]", ")", ":", "row", "=", "alpha_mo_idx", "(", "il", "[", "0", "]", "[", "index", "]", "+", "offset", ")", "col", "=", "alpha_mo_idx", "(", "il", "[", "1", "]", "[", "index", "]", "+", "offset", ")", "if", "(", "abs", "(", "x", ")", ">", "ints_tolerance", ")", ":", "intdump", ".", "write", "(", "'{:29.20E}{:4d}{:4d}{:4d}{:4d}\\n'", ".", "format", "(", "x", ",", "row", ",", "col", ",", "0", ",", "0", ")", ")", "offset", "+=", "block", ".", "shape", "[", "0", "]", "PSIF_MO_B_FZC", "=", "'MO-basis Beta Frozen-Core Oper'", "moH_B", "=", "core", ".", "Matrix", "(", "PSIF_MO_B_FZC", ",", "wfn", ".", "nmopi", "(", ")", ",", "wfn", ".", "nmopi", "(", ")", ")", "moH_B", ".", "load", "(", "core", ".", "IO", ".", "shared_object", "(", ")", ",", "psif", ".", "PSIF_OEI", ")", "mo_slice", "=", "core", ".", "Slice", "(", "frzcpi", ",", "active_mopi", ")", "MO_FZC_B", "=", "moH_B", ".", "get_block", "(", "mo_slice", ",", "mo_slice", ")", "offset", "=", "0", "for", "h", ",", "block", "in", "enumerate", "(", "MO_FZC_B", ".", "nph", ")", ":", "il", "=", "np", ".", "tril_indices", "(", "block", ".", "shape", "[", "0", "]", ")", "for", "index", ",", "x", "in", "np", ".", "ndenumerate", "(", "block", "[", "il", "]", ")", ":", "row", "=", "beta_mo_idx", "(", "il", "[", "0", "]", "[", "index", "]", "+", "offset", ")", "col", "=", "beta_mo_idx", "(", "il", "[", "1", "]", "[", "index", "]", "+", "offset", ")", "if", "(", "abs", "(", "x", ")", ">", "ints_tolerance", ")", ":", "intdump", ".", "write", "(", "'{:29.20E}{:4d}{:4d}{:4d}{:4d}\\n'", ".", "format", "(", "x", ",", "row", ",", "col", ",", "0", ",", "0", ")", ")", "offset", "+=", "block", ".", "shape", "[", "0", "]", "# Additional one-electron integrals as requested in oe_ints", "# Orbital energies", "core", ".", "print_out", "(", "'Writing orbital energies in FCIDUMP format to '", "+", "fname", "+", "'\\n'", ")", "if", "'EIGENVALUES'", "in", "oe_ints", ":", "alpha_eigs_dump", "=", "write_eigenvalues", "(", "wfn", ".", "epsilon_a", "(", ")", ".", "get_block", "(", "mo_slice", ")", ".", "to_array", "(", ")", ",", "alpha_mo_idx", ")", "beta_eigs_dump", "=", "write_eigenvalues", "(", "wfn", ".", "epsilon_b", "(", ")", ".", "get_block", "(", "mo_slice", ")", ".", "to_array", "(", ")", ",", "beta_mo_idx", ")", "intdump", ".", "write", "(", "alpha_eigs_dump", "+", "beta_eigs_dump", ")", "# Dipole integrals", "#core.print_out('Writing dipole moment OEI in FCIDUMP format to ' + fname + '\\n')", "# Traceless quadrupole integrals", "#core.print_out('Writing traceless quadrupole moment OEI in FCIDUMP format to ' + fname + '\\n')", "# Frozen core + nuclear repulsion energy", "core", ".", "print_out", "(", "'Writing frozen core + nuclear repulsion energy in FCIDUMP format to '", "+", "fname", "+", "'\\n'", ")", "e_fzc", "=", "ints", ".", "get_frozen_core_energy", "(", ")", "e_nuc", "=", "molecule", ".", "nuclear_repulsion_energy", "(", "wfn", ".", "get_dipole_field_strength", "(", ")", ")", "intdump", ".", "write", "(", "'{:29.20E}{:4d}{:4d}{:4d}{:4d}\\n'", ".", "format", "(", "e_fzc", "+", "e_nuc", ",", "0", ",", "0", ",", "0", ",", "0", ")", ")", "core", ".", "print_out", "(", "'Done generating {} with integrals in FCIDUMP format.\\n'", ".", "format", "(", "fname", ")", ")" ]
https://github.com/psi4/psi4/blob/be533f7f426b6ccc263904e55122899b16663395/psi4/driver/p4util/fcidump.py#L40-L203
PaddlePaddle/PaddleOCR
b756bf5f8c90142e0d89d3db0163965c686b6ffe
ppocr/losses/det_basic_loss.py
python
BalanceLoss.__init__
(self, balance_loss=True, main_loss_type='DiceLoss', negative_ratio=3, return_origin=False, eps=1e-6, **kwargs)
The BalanceLoss for Differentiable Binarization text detection args: balance_loss (bool): whether balance loss or not, default is True main_loss_type (str): can only be one of ['CrossEntropy','DiceLoss', 'Euclidean','BCELoss', 'MaskL1Loss'], default is 'DiceLoss'. negative_ratio (int|float): float, default is 3. return_origin (bool): whether return unbalanced loss or not, default is False. eps (float): default is 1e-6.
The BalanceLoss for Differentiable Binarization text detection args: balance_loss (bool): whether balance loss or not, default is True main_loss_type (str): can only be one of ['CrossEntropy','DiceLoss', 'Euclidean','BCELoss', 'MaskL1Loss'], default is 'DiceLoss'. negative_ratio (int|float): float, default is 3. return_origin (bool): whether return unbalanced loss or not, default is False. eps (float): default is 1e-6.
[ "The", "BalanceLoss", "for", "Differentiable", "Binarization", "text", "detection", "args", ":", "balance_loss", "(", "bool", ")", ":", "whether", "balance", "loss", "or", "not", "default", "is", "True", "main_loss_type", "(", "str", ")", ":", "can", "only", "be", "one", "of", "[", "CrossEntropy", "DiceLoss", "Euclidean", "BCELoss", "MaskL1Loss", "]", "default", "is", "DiceLoss", ".", "negative_ratio", "(", "int|float", ")", ":", "float", "default", "is", "3", ".", "return_origin", "(", "bool", ")", ":", "whether", "return", "unbalanced", "loss", "or", "not", "default", "is", "False", ".", "eps", "(", "float", ")", ":", "default", "is", "1e", "-", "6", "." ]
def __init__(self, balance_loss=True, main_loss_type='DiceLoss', negative_ratio=3, return_origin=False, eps=1e-6, **kwargs): """ The BalanceLoss for Differentiable Binarization text detection args: balance_loss (bool): whether balance loss or not, default is True main_loss_type (str): can only be one of ['CrossEntropy','DiceLoss', 'Euclidean','BCELoss', 'MaskL1Loss'], default is 'DiceLoss'. negative_ratio (int|float): float, default is 3. return_origin (bool): whether return unbalanced loss or not, default is False. eps (float): default is 1e-6. """ super(BalanceLoss, self).__init__() self.balance_loss = balance_loss self.main_loss_type = main_loss_type self.negative_ratio = negative_ratio self.return_origin = return_origin self.eps = eps if self.main_loss_type == "CrossEntropy": self.loss = nn.CrossEntropyLoss() elif self.main_loss_type == "Euclidean": self.loss = nn.MSELoss() elif self.main_loss_type == "DiceLoss": self.loss = DiceLoss(self.eps) elif self.main_loss_type == "BCELoss": self.loss = BCELoss(reduction='none') elif self.main_loss_type == "MaskL1Loss": self.loss = MaskL1Loss(self.eps) else: loss_type = [ 'CrossEntropy', 'DiceLoss', 'Euclidean', 'BCELoss', 'MaskL1Loss' ] raise Exception( "main_loss_type in BalanceLoss() can only be one of {}".format( loss_type))
[ "def", "__init__", "(", "self", ",", "balance_loss", "=", "True", ",", "main_loss_type", "=", "'DiceLoss'", ",", "negative_ratio", "=", "3", ",", "return_origin", "=", "False", ",", "eps", "=", "1e-6", ",", "*", "*", "kwargs", ")", ":", "super", "(", "BalanceLoss", ",", "self", ")", ".", "__init__", "(", ")", "self", ".", "balance_loss", "=", "balance_loss", "self", ".", "main_loss_type", "=", "main_loss_type", "self", ".", "negative_ratio", "=", "negative_ratio", "self", ".", "return_origin", "=", "return_origin", "self", ".", "eps", "=", "eps", "if", "self", ".", "main_loss_type", "==", "\"CrossEntropy\"", ":", "self", ".", "loss", "=", "nn", ".", "CrossEntropyLoss", "(", ")", "elif", "self", ".", "main_loss_type", "==", "\"Euclidean\"", ":", "self", ".", "loss", "=", "nn", ".", "MSELoss", "(", ")", "elif", "self", ".", "main_loss_type", "==", "\"DiceLoss\"", ":", "self", ".", "loss", "=", "DiceLoss", "(", "self", ".", "eps", ")", "elif", "self", ".", "main_loss_type", "==", "\"BCELoss\"", ":", "self", ".", "loss", "=", "BCELoss", "(", "reduction", "=", "'none'", ")", "elif", "self", ".", "main_loss_type", "==", "\"MaskL1Loss\"", ":", "self", ".", "loss", "=", "MaskL1Loss", "(", "self", ".", "eps", ")", "else", ":", "loss_type", "=", "[", "'CrossEntropy'", ",", "'DiceLoss'", ",", "'Euclidean'", ",", "'BCELoss'", ",", "'MaskL1Loss'", "]", "raise", "Exception", "(", "\"main_loss_type in BalanceLoss() can only be one of {}\"", ".", "format", "(", "loss_type", ")", ")" ]
https://github.com/PaddlePaddle/PaddleOCR/blob/b756bf5f8c90142e0d89d3db0163965c686b6ffe/ppocr/losses/det_basic_loss.py#L30-L70
gimli-org/gimli
17aa2160de9b15ababd9ef99e89b1bc3277bbb23
pygimli/physics/em/hemmodelling.py
python
FDEM2dFOP.response
(self, model)
return resp
Response as pasted forward responses from all soundings.
Response as pasted forward responses from all soundings.
[ "Response", "as", "pasted", "forward", "responses", "from", "all", "soundings", "." ]
def response(self, model): """Response as pasted forward responses from all soundings.""" modA = np.reshape(model, (self.nx, self.nlay*2-1)) resp = pg.Vector(0) for i, modi in enumerate(modA): resp = pg.cat(resp, self.FOP1d[i].response(modi)) return resp
[ "def", "response", "(", "self", ",", "model", ")", ":", "modA", "=", "np", ".", "reshape", "(", "model", ",", "(", "self", ".", "nx", ",", "self", ".", "nlay", "*", "2", "-", "1", ")", ")", "resp", "=", "pg", ".", "Vector", "(", "0", ")", "for", "i", ",", "modi", "in", "enumerate", "(", "modA", ")", ":", "resp", "=", "pg", ".", "cat", "(", "resp", ",", "self", ".", "FOP1d", "[", "i", "]", ".", "response", "(", "modi", ")", ")", "return", "resp" ]
https://github.com/gimli-org/gimli/blob/17aa2160de9b15ababd9ef99e89b1bc3277bbb23/pygimli/physics/em/hemmodelling.py#L590-L597
p4lang/behavioral-model
81ce0163f0770c6b9d6056a28ce2e0cc035bb6e9
tools/cpplint.py
python
ShouldCheckNamespaceIndentation
(nesting_state, is_namespace_indent_item, raw_lines_no_comments, linenum)
return IsBlockInNameSpace(nesting_state, is_forward_declaration)
This method determines if we should apply our namespace indentation check. Args: nesting_state: The current nesting state. is_namespace_indent_item: If we just put a new class on the stack, True. If the top of the stack is not a class, or we did not recently add the class, False. raw_lines_no_comments: The lines without the comments. linenum: The current line number we are processing. Returns: True if we should apply our namespace indentation check. Currently, it only works for classes and namespaces inside of a namespace.
This method determines if we should apply our namespace indentation check.
[ "This", "method", "determines", "if", "we", "should", "apply", "our", "namespace", "indentation", "check", "." ]
def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item, raw_lines_no_comments, linenum): """This method determines if we should apply our namespace indentation check. Args: nesting_state: The current nesting state. is_namespace_indent_item: If we just put a new class on the stack, True. If the top of the stack is not a class, or we did not recently add the class, False. raw_lines_no_comments: The lines without the comments. linenum: The current line number we are processing. Returns: True if we should apply our namespace indentation check. Currently, it only works for classes and namespaces inside of a namespace. """ is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments, linenum) if not (is_namespace_indent_item or is_forward_declaration): return False # If we are in a macro, we do not want to check the namespace indentation. if IsMacroDefinition(raw_lines_no_comments, linenum): return False return IsBlockInNameSpace(nesting_state, is_forward_declaration)
[ "def", "ShouldCheckNamespaceIndentation", "(", "nesting_state", ",", "is_namespace_indent_item", ",", "raw_lines_no_comments", ",", "linenum", ")", ":", "is_forward_declaration", "=", "IsForwardClassDeclaration", "(", "raw_lines_no_comments", ",", "linenum", ")", "if", "not", "(", "is_namespace_indent_item", "or", "is_forward_declaration", ")", ":", "return", "False", "# If we are in a macro, we do not want to check the namespace indentation.", "if", "IsMacroDefinition", "(", "raw_lines_no_comments", ",", "linenum", ")", ":", "return", "False", "return", "IsBlockInNameSpace", "(", "nesting_state", ",", "is_forward_declaration", ")" ]
https://github.com/p4lang/behavioral-model/blob/81ce0163f0770c6b9d6056a28ce2e0cc035bb6e9/tools/cpplint.py#L6294-L6321
miyosuda/TensorFlowAndroidDemo
35903e0221aa5f109ea2dbef27f20b52e317f42d
jni-build/jni/include/tensorflow/python/ops/linalg_ops.py
python
self_adjoint_eigvals
(matrix, name=None)
return e
Computes the eigenvalues a self-adjoint matrix. Args: matrix: `Tensor` of shape `[N, N]`. name: string, optional name of the operation. Returns: e: Eigenvalues of `matrix`. Shape is `[N]`.
Computes the eigenvalues a self-adjoint matrix.
[ "Computes", "the", "eigenvalues", "a", "self", "-", "adjoint", "matrix", "." ]
def self_adjoint_eigvals(matrix, name=None): """Computes the eigenvalues a self-adjoint matrix. Args: matrix: `Tensor` of shape `[N, N]`. name: string, optional name of the operation. Returns: e: Eigenvalues of `matrix`. Shape is `[N]`. """ e, _ = gen_linalg_ops.self_adjoint_eig_v2(matrix, compute_v=False, name=name) return e
[ "def", "self_adjoint_eigvals", "(", "matrix", ",", "name", "=", "None", ")", ":", "e", ",", "_", "=", "gen_linalg_ops", ".", "self_adjoint_eig_v2", "(", "matrix", ",", "compute_v", "=", "False", ",", "name", "=", "name", ")", "return", "e" ]
https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/python/ops/linalg_ops.py#L449-L460
apple/swift-lldb
d74be846ef3e62de946df343e8c234bde93a8912
scripts/Python/prepare_binding_Python.py
python
get_python_module_path
(options)
Returns the location where the lldb Python module should be placed. @param options dictionary of options parsed from the command line. @return the directory where the lldb module should be placed.
Returns the location where the lldb Python module should be placed.
[ "Returns", "the", "location", "where", "the", "lldb", "Python", "module", "should", "be", "placed", "." ]
def get_python_module_path(options): """Returns the location where the lldb Python module should be placed. @param options dictionary of options parsed from the command line. @return the directory where the lldb module should be placed. """ if options.framework: # Caller wants to use the OS X framework packaging. # We are packaging in an OS X-style framework bundle. The # module dir will be within the # LLDB.framework/Resources/Python subdirectory. return os.path.join( options.target_dir, "LLDB.framework", "Resources", "Python", "lldb") else: from distutils.sysconfig import get_python_lib if options.prefix is not None: module_path = get_python_lib(True, False, options.prefix) else: module_path = get_python_lib(True, False) return os.path.normcase( os.path.join(module_path, "lldb"))
[ "def", "get_python_module_path", "(", "options", ")", ":", "if", "options", ".", "framework", ":", "# Caller wants to use the OS X framework packaging.", "# We are packaging in an OS X-style framework bundle. The", "# module dir will be within the", "# LLDB.framework/Resources/Python subdirectory.", "return", "os", ".", "path", ".", "join", "(", "options", ".", "target_dir", ",", "\"LLDB.framework\"", ",", "\"Resources\"", ",", "\"Python\"", ",", "\"lldb\"", ")", "else", ":", "from", "distutils", ".", "sysconfig", "import", "get_python_lib", "if", "options", ".", "prefix", "is", "not", "None", ":", "module_path", "=", "get_python_lib", "(", "True", ",", "False", ",", "options", ".", "prefix", ")", "else", ":", "module_path", "=", "get_python_lib", "(", "True", ",", "False", ")", "return", "os", ".", "path", ".", "normcase", "(", "os", ".", "path", ".", "join", "(", "module_path", ",", "\"lldb\"", ")", ")" ]
https://github.com/apple/swift-lldb/blob/d74be846ef3e62de946df343e8c234bde93a8912/scripts/Python/prepare_binding_Python.py#L345-L372
plasma-umass/Mesh
ad5947577a2dce68eb79e2d6ec8507ef992d2859
theory/experiment.py
python
filenamer
(meshingList, length, numStrings, attempts, boundList = None, time = False)
return plotname
Generates an appropriate filename for a plot based on experiment parameters.
Generates an appropriate filename for a plot based on experiment parameters.
[ "Generates", "an", "appropriate", "filename", "for", "a", "plot", "based", "on", "experiment", "parameters", "." ]
def filenamer(meshingList, length, numStrings, attempts, boundList = None, time = False): """Generates an appropriate filename for a plot based on experiment parameters.""" plotname = '' ids = [] for bundle in meshingList: identifier = bundle[0] ids.append(identifier) plotname = plotname + '{}, ' for b in boundList: ids.append(b) plotname = plotname + '{}, ' plotname = plotname.format(*ids) plotname = plotname +'comp {}len {}str {}att' if time: plotname = plotname + ' time' plotname += '.png' plotname = plotname.format(length, numStrings, attempts) return plotname
[ "def", "filenamer", "(", "meshingList", ",", "length", ",", "numStrings", ",", "attempts", ",", "boundList", "=", "None", ",", "time", "=", "False", ")", ":", "plotname", "=", "''", "ids", "=", "[", "]", "for", "bundle", "in", "meshingList", ":", "identifier", "=", "bundle", "[", "0", "]", "ids", ".", "append", "(", "identifier", ")", "plotname", "=", "plotname", "+", "'{}, '", "for", "b", "in", "boundList", ":", "ids", ".", "append", "(", "b", ")", "plotname", "=", "plotname", "+", "'{}, '", "plotname", "=", "plotname", ".", "format", "(", "*", "ids", ")", "plotname", "=", "plotname", "+", "'comp {}len {}str {}att'", "if", "time", ":", "plotname", "=", "plotname", "+", "' time'", "plotname", "+=", "'.png'", "plotname", "=", "plotname", ".", "format", "(", "length", ",", "numStrings", ",", "attempts", ")", "return", "plotname" ]
https://github.com/plasma-umass/Mesh/blob/ad5947577a2dce68eb79e2d6ec8507ef992d2859/theory/experiment.py#L69-L88
SFTtech/openage
d6a08c53c48dc1e157807471df92197f6ca9e04d
openage/convert/entity_object/conversion/aoc/genie_civ.py
python
GenieCivilizationGroup.add_unique_tech
(self, tech_group)
Adds a unique tech to the civilization.
Adds a unique tech to the civilization.
[ "Adds", "a", "unique", "tech", "to", "the", "civilization", "." ]
def add_unique_tech(self, tech_group): """ Adds a unique tech to the civilization. """ self.unique_techs.update({tech_group.get_id(): tech_group})
[ "def", "add_unique_tech", "(", "self", ",", "tech_group", ")", ":", "self", ".", "unique_techs", ".", "update", "(", "{", "tech_group", ".", "get_id", "(", ")", ":", "tech_group", "}", ")" ]
https://github.com/SFTtech/openage/blob/d6a08c53c48dc1e157807471df92197f6ca9e04d/openage/convert/entity_object/conversion/aoc/genie_civ.py#L107-L111
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/x86/toolchain/lib/python2.7/BaseHTTPServer.py
python
BaseHTTPRequestHandler.log_error
(self, format, *args)
Log an error. This is called when a request cannot be fulfilled. By default it passes the message on to log_message(). Arguments are the same as for log_message(). XXX This should go to the separate error log.
Log an error.
[ "Log", "an", "error", "." ]
def log_error(self, format, *args): """Log an error. This is called when a request cannot be fulfilled. By default it passes the message on to log_message(). Arguments are the same as for log_message(). XXX This should go to the separate error log. """ self.log_message(format, *args)
[ "def", "log_error", "(", "self", ",", "format", ",", "*", "args", ")", ":", "self", ".", "log_message", "(", "format", ",", "*", "args", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/BaseHTTPServer.py#L424-L436
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python3/src/Lib/email/_header_value_parser.py
python
get_qp_ctext
(value)
return ptext, value
r"""ctext = <printable ascii except \ ( )> This is not the RFC ctext, since we are handling nested comments in comment and unquoting quoted-pairs here. We allow anything except the '()' characters, but if we find any ASCII other than the RFC defined printable ASCII, a NonPrintableDefect is added to the token's defects list. Since quoted pairs are converted to their unquoted values, what is returned is a 'ptext' token. In this case it is a WhiteSpaceTerminal, so it's value is ' '.
r"""ctext = <printable ascii except \ ( )>
[ "r", "ctext", "=", "<printable", "ascii", "except", "\\", "(", ")", ">" ]
def get_qp_ctext(value): r"""ctext = <printable ascii except \ ( )> This is not the RFC ctext, since we are handling nested comments in comment and unquoting quoted-pairs here. We allow anything except the '()' characters, but if we find any ASCII other than the RFC defined printable ASCII, a NonPrintableDefect is added to the token's defects list. Since quoted pairs are converted to their unquoted values, what is returned is a 'ptext' token. In this case it is a WhiteSpaceTerminal, so it's value is ' '. """ ptext, value, _ = _get_ptext_to_endchars(value, '()') ptext = WhiteSpaceTerminal(ptext, 'ptext') _validate_xtext(ptext) return ptext, value
[ "def", "get_qp_ctext", "(", "value", ")", ":", "ptext", ",", "value", ",", "_", "=", "_get_ptext_to_endchars", "(", "value", ",", "'()'", ")", "ptext", "=", "WhiteSpaceTerminal", "(", "ptext", ",", "'ptext'", ")", "_validate_xtext", "(", "ptext", ")", "return", "ptext", ",", "value" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/email/_header_value_parser.py#L1154-L1169
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/plat-mac/gensuitemodule.py
python
ascii
(str)
return rv
Return a string with all non-ascii characters hex-encoded
Return a string with all non-ascii characters hex-encoded
[ "Return", "a", "string", "with", "all", "non", "-", "ascii", "characters", "hex", "-", "encoded" ]
def ascii(str): """Return a string with all non-ascii characters hex-encoded""" if type(str) != type(''): return map(ascii, str) rv = '' for c in str: if c in ('\t', '\n', '\r') or ' ' <= c < chr(0x7f): rv = rv + c else: rv = rv + '\\' + 'x%02.2x' % ord(c) return rv
[ "def", "ascii", "(", "str", ")", ":", "if", "type", "(", "str", ")", "!=", "type", "(", "''", ")", ":", "return", "map", "(", "ascii", ",", "str", ")", "rv", "=", "''", "for", "c", "in", "str", ":", "if", "c", "in", "(", "'\\t'", ",", "'\\n'", ",", "'\\r'", ")", "or", "' '", "<=", "c", "<", "chr", "(", "0x7f", ")", ":", "rv", "=", "rv", "+", "c", "else", ":", "rv", "=", "rv", "+", "'\\\\'", "+", "'x%02.2x'", "%", "ord", "(", "c", ")", "return", "rv" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/plat-mac/gensuitemodule.py#L1177-L1187
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/x86/toolchain/lib/python2.7/nntplib.py
python
NNTP.ihave
(self, id, f)
return self.getresp()
Process an IHAVE command. Arguments: - id: message-id of the article - f: file containing the article Returns: - resp: server response if successful Note that if the server refuses the article an exception is raised.
Process an IHAVE command. Arguments: - id: message-id of the article - f: file containing the article Returns: - resp: server response if successful Note that if the server refuses the article an exception is raised.
[ "Process", "an", "IHAVE", "command", ".", "Arguments", ":", "-", "id", ":", "message", "-", "id", "of", "the", "article", "-", "f", ":", "file", "containing", "the", "article", "Returns", ":", "-", "resp", ":", "server", "response", "if", "successful", "Note", "that", "if", "the", "server", "refuses", "the", "article", "an", "exception", "is", "raised", "." ]
def ihave(self, id, f): """Process an IHAVE command. Arguments: - id: message-id of the article - f: file containing the article Returns: - resp: server response if successful Note that if the server refuses the article an exception is raised.""" resp = self.shortcmd('IHAVE ' + id) # Raises error_??? if the server already has it if resp[0] != '3': raise NNTPReplyError(resp) while 1: line = f.readline() if not line: break if line[-1] == '\n': line = line[:-1] if line[:1] == '.': line = '.' + line self.putline(line) self.putline('.') return self.getresp()
[ "def", "ihave", "(", "self", ",", "id", ",", "f", ")", ":", "resp", "=", "self", ".", "shortcmd", "(", "'IHAVE '", "+", "id", ")", "# Raises error_??? if the server already has it", "if", "resp", "[", "0", "]", "!=", "'3'", ":", "raise", "NNTPReplyError", "(", "resp", ")", "while", "1", ":", "line", "=", "f", ".", "readline", "(", ")", "if", "not", "line", ":", "break", "if", "line", "[", "-", "1", "]", "==", "'\\n'", ":", "line", "=", "line", "[", ":", "-", "1", "]", "if", "line", "[", ":", "1", "]", "==", "'.'", ":", "line", "=", "'.'", "+", "line", "self", ".", "putline", "(", "line", ")", "self", ".", "putline", "(", "'.'", ")", "return", "self", ".", "getresp", "(", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/nntplib.py#L571-L593
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/idlelib/iomenu.py
python
coding_spec
(data)
return name
Return the encoding declaration according to PEP 263. When checking encoded data, only the first two lines should be passed in to avoid a UnicodeDecodeError if the rest of the data is not unicode. The first two lines would contain the encoding specification. Raise a LookupError if the encoding is declared but unknown.
Return the encoding declaration according to PEP 263.
[ "Return", "the", "encoding", "declaration", "according", "to", "PEP", "263", "." ]
def coding_spec(data): """Return the encoding declaration according to PEP 263. When checking encoded data, only the first two lines should be passed in to avoid a UnicodeDecodeError if the rest of the data is not unicode. The first two lines would contain the encoding specification. Raise a LookupError if the encoding is declared but unknown. """ if isinstance(data, bytes): # This encoding might be wrong. However, the coding # spec must be ASCII-only, so any non-ASCII characters # around here will be ignored. Decoding to Latin-1 should # never fail (except for memory outage) lines = data.decode('iso-8859-1') else: lines = data # consider only the first two lines if '\n' in lines: lst = lines.split('\n', 2)[:2] elif '\r' in lines: lst = lines.split('\r', 2)[:2] else: lst = [lines] for line in lst: match = coding_re.match(line) if match is not None: break if not blank_re.match(line): return None else: return None name = match.group(1) try: codecs.lookup(name) except LookupError: # The standard encoding error does not indicate the encoding raise LookupError("Unknown encoding: "+name) return name
[ "def", "coding_spec", "(", "data", ")", ":", "if", "isinstance", "(", "data", ",", "bytes", ")", ":", "# This encoding might be wrong. However, the coding", "# spec must be ASCII-only, so any non-ASCII characters", "# around here will be ignored. Decoding to Latin-1 should", "# never fail (except for memory outage)", "lines", "=", "data", ".", "decode", "(", "'iso-8859-1'", ")", "else", ":", "lines", "=", "data", "# consider only the first two lines", "if", "'\\n'", "in", "lines", ":", "lst", "=", "lines", ".", "split", "(", "'\\n'", ",", "2", ")", "[", ":", "2", "]", "elif", "'\\r'", "in", "lines", ":", "lst", "=", "lines", ".", "split", "(", "'\\r'", ",", "2", ")", "[", ":", "2", "]", "else", ":", "lst", "=", "[", "lines", "]", "for", "line", "in", "lst", ":", "match", "=", "coding_re", ".", "match", "(", "line", ")", "if", "match", "is", "not", "None", ":", "break", "if", "not", "blank_re", ".", "match", "(", "line", ")", ":", "return", "None", "else", ":", "return", "None", "name", "=", "match", ".", "group", "(", "1", ")", "try", ":", "codecs", ".", "lookup", "(", "name", ")", "except", "LookupError", ":", "# The standard encoding error does not indicate the encoding", "raise", "LookupError", "(", "\"Unknown encoding: \"", "+", "name", ")", "return", "name" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/idlelib/iomenu.py#L66-L104
oracle/graaljs
36a56e8e993d45fc40939a3a4d9c0c24990720f1
graal-nodejs/tools/gyp/pylib/gyp/generator/cmake.py
python
CMakeStringEscape
(a)
return a.replace("\\", "\\\\").replace(";", "\\;").replace('"', '\\"')
Escapes the string 'a' for use inside a CMake string. This means escaping '\' otherwise it may be seen as modifying the next character '"' otherwise it will end the string ';' otherwise the string becomes a list The following do not need to be escaped '#' when the lexer is in string state, this does not start a comment The following are yet unknown '$' generator variables (like ${obj}) must not be escaped, but text $ should be escaped what is wanted is to know which $ come from generator variables
Escapes the string 'a' for use inside a CMake string.
[ "Escapes", "the", "string", "a", "for", "use", "inside", "a", "CMake", "string", "." ]
def CMakeStringEscape(a): """Escapes the string 'a' for use inside a CMake string. This means escaping '\' otherwise it may be seen as modifying the next character '"' otherwise it will end the string ';' otherwise the string becomes a list The following do not need to be escaped '#' when the lexer is in string state, this does not start a comment The following are yet unknown '$' generator variables (like ${obj}) must not be escaped, but text $ should be escaped what is wanted is to know which $ come from generator variables """ return a.replace("\\", "\\\\").replace(";", "\\;").replace('"', '\\"')
[ "def", "CMakeStringEscape", "(", "a", ")", ":", "return", "a", ".", "replace", "(", "\"\\\\\"", ",", "\"\\\\\\\\\"", ")", ".", "replace", "(", "\";\"", ",", "\"\\\\;\"", ")", ".", "replace", "(", "'\"'", ",", "'\\\\\"'", ")" ]
https://github.com/oracle/graaljs/blob/36a56e8e993d45fc40939a3a4d9c0c24990720f1/graal-nodejs/tools/gyp/pylib/gyp/generator/cmake.py#L125-L141
eyllanesc/stackoverflow
3837cc9ff94541bf5a500aac1b6182f53669537d
others/MediaManagerV2/qdarkstyle/__init__.py
python
load_stylesheet
(pyside=True)
Loads the stylesheet. Takes care of importing the rc module. :param pyside: True to load the pyside rc file, False to load the PyQt rc file :return the stylesheet string
Loads the stylesheet. Takes care of importing the rc module.
[ "Loads", "the", "stylesheet", ".", "Takes", "care", "of", "importing", "the", "rc", "module", "." ]
def load_stylesheet(pyside=True): """ Loads the stylesheet. Takes care of importing the rc module. :param pyside: True to load the pyside rc file, False to load the PyQt rc file :return the stylesheet string """ # Smart import of the rc file if pyside: import qdarkstyle.pyside_style_rc else: import qdarkstyle.pyqt_style_rc # Load the stylesheet content from resources if not pyside: from PyQt4.QtCore import QFile, QTextStream else: from PySide.QtCore import QFile, QTextStream f = QFile(":qdarkstyle/style.qss") if not f.exists(): _logger().error("Unable to load stylesheet, file not found in " "resources") return "" else: f.open(QFile.ReadOnly | QFile.Text) ts = QTextStream(f) stylesheet = ts.readAll() if platform.system().lower() == 'darwin': # see issue #12 on github mac_fix = ''' QDockWidget::title { background-color: #31363b; text-align: center; height: 12px; } ''' stylesheet += mac_fix return stylesheet
[ "def", "load_stylesheet", "(", "pyside", "=", "True", ")", ":", "# Smart import of the rc file", "if", "pyside", ":", "import", "qdarkstyle", ".", "pyside_style_rc", "else", ":", "import", "qdarkstyle", ".", "pyqt_style_rc", "# Load the stylesheet content from resources", "if", "not", "pyside", ":", "from", "PyQt4", ".", "QtCore", "import", "QFile", ",", "QTextStream", "else", ":", "from", "PySide", ".", "QtCore", "import", "QFile", ",", "QTextStream", "f", "=", "QFile", "(", "\":qdarkstyle/style.qss\"", ")", "if", "not", "f", ".", "exists", "(", ")", ":", "_logger", "(", ")", ".", "error", "(", "\"Unable to load stylesheet, file not found in \"", "\"resources\"", ")", "return", "\"\"", "else", ":", "f", ".", "open", "(", "QFile", ".", "ReadOnly", "|", "QFile", ".", "Text", ")", "ts", "=", "QTextStream", "(", "f", ")", "stylesheet", "=", "ts", ".", "readAll", "(", ")", "if", "platform", ".", "system", "(", ")", ".", "lower", "(", ")", "==", "'darwin'", ":", "# see issue #12 on github", "mac_fix", "=", "'''\n QDockWidget::title\n {\n background-color: #31363b;\n text-align: center;\n height: 12px;\n }\n '''", "stylesheet", "+=", "mac_fix", "return", "stylesheet" ]
https://github.com/eyllanesc/stackoverflow/blob/3837cc9ff94541bf5a500aac1b6182f53669537d/others/MediaManagerV2/qdarkstyle/__init__.py#L42-L81
miyosuda/TensorFlowAndroidMNIST
7b5a4603d2780a8a2834575706e9001977524007
jni-build/jni/include/tensorflow/contrib/distributions/python/ops/shape.py
python
_ShapeUtil.get_shape
(self, x, sample=True, batch=True, event=True, name='get_shape')
Returns subset of tensor's shape (size of dimensions). Args: x: `Tensor`. sample: `Boolean`. Include sample shape or not. batch: `Boolean`. Include batch shape or not. event: `Boolean`. Include event shape or not. name: `String`. The name to give this op. Raises: ValueError: if `x.get_shape().ndims` is `None` Returns: List describing event shape if known statically, `Tensor` otherwise.
Returns subset of tensor's shape (size of dimensions).
[ "Returns", "subset", "of", "tensor", "s", "shape", "(", "size", "of", "dimensions", ")", "." ]
def get_shape(self, x, sample=True, batch=True, event=True, name='get_shape'): """Returns subset of tensor's shape (size of dimensions). Args: x: `Tensor`. sample: `Boolean`. Include sample shape or not. batch: `Boolean`. Include batch shape or not. event: `Boolean`. Include event shape or not. name: `String`. The name to give this op. Raises: ValueError: if `x.get_shape().ndims` is `None` Returns: List describing event shape if known statically, `Tensor` otherwise. """ if not sample and not batch and not event: return [] with ops.name_scope(self._name): with ops.op_scope([x], name): x = ops.convert_to_tensor(x) shape = (x.get_shape().as_list() if x.get_shape().is_fully_defined() else array_ops.shape(x)) if sample and batch and event: return shape sample_start = 0 batch_start = self.get_sample_ndims(x) event_start = batch_start + self.batch_ndims sample_shape = shape[sample_start:batch_start] if sample else [] batch_shape = shape[batch_start:event_start] if batch else [] event_shape = shape[event_start:] if event else [] if not batch and not event: return sample_shape if not sample and not event: return batch_shape if not sample and not batch: return event_shape if x.get_shape().is_fully_defined(): return sample_shape + batch_shape + event_shape else: return array_ops.concat(0, [sample_shape, batch_shape, event_shape])
[ "def", "get_shape", "(", "self", ",", "x", ",", "sample", "=", "True", ",", "batch", "=", "True", ",", "event", "=", "True", ",", "name", "=", "'get_shape'", ")", ":", "if", "not", "sample", "and", "not", "batch", "and", "not", "event", ":", "return", "[", "]", "with", "ops", ".", "name_scope", "(", "self", ".", "_name", ")", ":", "with", "ops", ".", "op_scope", "(", "[", "x", "]", ",", "name", ")", ":", "x", "=", "ops", ".", "convert_to_tensor", "(", "x", ")", "shape", "=", "(", "x", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "if", "x", ".", "get_shape", "(", ")", ".", "is_fully_defined", "(", ")", "else", "array_ops", ".", "shape", "(", "x", ")", ")", "if", "sample", "and", "batch", "and", "event", ":", "return", "shape", "sample_start", "=", "0", "batch_start", "=", "self", ".", "get_sample_ndims", "(", "x", ")", "event_start", "=", "batch_start", "+", "self", ".", "batch_ndims", "sample_shape", "=", "shape", "[", "sample_start", ":", "batch_start", "]", "if", "sample", "else", "[", "]", "batch_shape", "=", "shape", "[", "batch_start", ":", "event_start", "]", "if", "batch", "else", "[", "]", "event_shape", "=", "shape", "[", "event_start", ":", "]", "if", "event", "else", "[", "]", "if", "not", "batch", "and", "not", "event", ":", "return", "sample_shape", "if", "not", "sample", "and", "not", "event", ":", "return", "batch_shape", "if", "not", "sample", "and", "not", "batch", ":", "return", "event_shape", "if", "x", ".", "get_shape", "(", ")", ".", "is_fully_defined", "(", ")", ":", "return", "sample_shape", "+", "batch_shape", "+", "event_shape", "else", ":", "return", "array_ops", ".", "concat", "(", "0", ",", "[", "sample_shape", ",", "batch_shape", ",", "event_shape", "]", ")" ]
https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/contrib/distributions/python/ops/shape.py#L239-L285
eclipse/sumo
7132a9b8b6eea734bdec38479026b4d8c4336d03
tools/contributed/sumopy/plugins/mapmatching/wxgui.py
python
WxGui.on_altrouteanalyze
(self, event=None)
Analyze attributes of matched and alternative routes.
Analyze attributes of matched and alternative routes.
[ "Analyze", "attributes", "of", "matched", "and", "alternative", "routes", "." ]
def on_altrouteanalyze(self, event=None): """ Analyze attributes of matched and alternative routes. """ p = mapmatching.AlternativeRoutesanalyzer('altrouteanalyzer', self._mapmatching, self._results, logger=self._mainframe.get_logger()) dlg = ProcessDialog(self._mainframe, p, immediate_apply=True) dlg.CenterOnScreen() # this does not return until the dialog is closed. val = dlg.ShowModal() # print ' val,val == wx.ID_OK',val,wx.ID_OK,wx.ID_CANCEL,val == wx.ID_CANCEL # print ' status =',dlg.get_status() if dlg.get_status() != 'success': # val == wx.ID_CANCEL: # print ">>>>>>>>>Unsuccessful\n" dlg.Destroy() if dlg.get_status() == 'success': # print ">>>>>>>>>successful\n" # apply current widget values to scenario instance dlg.apply() dlg.Destroy() self._mainframe.browse_obj(self._results) self._is_needs_refresh = True self.refresh_widgets()
[ "def", "on_altrouteanalyze", "(", "self", ",", "event", "=", "None", ")", ":", "p", "=", "mapmatching", ".", "AlternativeRoutesanalyzer", "(", "'altrouteanalyzer'", ",", "self", ".", "_mapmatching", ",", "self", ".", "_results", ",", "logger", "=", "self", ".", "_mainframe", ".", "get_logger", "(", ")", ")", "dlg", "=", "ProcessDialog", "(", "self", ".", "_mainframe", ",", "p", ",", "immediate_apply", "=", "True", ")", "dlg", ".", "CenterOnScreen", "(", ")", "# this does not return until the dialog is closed.", "val", "=", "dlg", ".", "ShowModal", "(", ")", "# print ' val,val == wx.ID_OK',val,wx.ID_OK,wx.ID_CANCEL,val == wx.ID_CANCEL", "# print ' status =',dlg.get_status()", "if", "dlg", ".", "get_status", "(", ")", "!=", "'success'", ":", "# val == wx.ID_CANCEL:", "# print \">>>>>>>>>Unsuccessful\\n\"", "dlg", ".", "Destroy", "(", ")", "if", "dlg", ".", "get_status", "(", ")", "==", "'success'", ":", "# print \">>>>>>>>>successful\\n\"", "# apply current widget values to scenario instance", "dlg", ".", "apply", "(", ")", "dlg", ".", "Destroy", "(", ")", "self", ".", "_mainframe", ".", "browse_obj", "(", "self", ".", "_results", ")", "self", ".", "_is_needs_refresh", "=", "True", "self", ".", "refresh_widgets", "(", ")" ]
https://github.com/eclipse/sumo/blob/7132a9b8b6eea734bdec38479026b4d8c4336d03/tools/contributed/sumopy/plugins/mapmatching/wxgui.py#L1592-L1620
LiquidPlayer/LiquidCore
9405979363f2353ac9a71ad8ab59685dd7f919c9
deps/node-10.15.3/tools/jinja2/ext.py
python
Extension.bind
(self, environment)
return rv
Create a copy of this extension bound to another environment.
Create a copy of this extension bound to another environment.
[ "Create", "a", "copy", "of", "this", "extension", "bound", "to", "another", "environment", "." ]
def bind(self, environment): """Create a copy of this extension bound to another environment.""" rv = object.__new__(self.__class__) rv.__dict__.update(self.__dict__) rv.environment = environment return rv
[ "def", "bind", "(", "self", ",", "environment", ")", ":", "rv", "=", "object", ".", "__new__", "(", "self", ".", "__class__", ")", "rv", ".", "__dict__", ".", "update", "(", "self", ".", "__dict__", ")", "rv", ".", "environment", "=", "environment", "return", "rv" ]
https://github.com/LiquidPlayer/LiquidCore/blob/9405979363f2353ac9a71ad8ab59685dd7f919c9/deps/node-10.15.3/tools/jinja2/ext.py#L75-L80
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python3/src/Lib/xml/dom/expatbuilder.py
python
FragmentBuilder.parseString
(self, string)
return fragment
Parse a document fragment from a string, returning the fragment node.
Parse a document fragment from a string, returning the fragment node.
[ "Parse", "a", "document", "fragment", "from", "a", "string", "returning", "the", "fragment", "node", "." ]
def parseString(self, string): """Parse a document fragment from a string, returning the fragment node.""" self._source = string parser = self.getParser() doctype = self.originalDocument.doctype ident = "" if doctype: subset = doctype.internalSubset or self._getDeclarations() if doctype.publicId: ident = ('PUBLIC "%s" "%s"' % (doctype.publicId, doctype.systemId)) elif doctype.systemId: ident = 'SYSTEM "%s"' % doctype.systemId else: subset = "" nsattrs = self._getNSattrs() # get ns decls from node's ancestors document = _FRAGMENT_BUILDER_TEMPLATE % (ident, subset, nsattrs) try: parser.Parse(document, True) except: self.reset() raise fragment = self.fragment self.reset() ## self._parser = None return fragment
[ "def", "parseString", "(", "self", ",", "string", ")", ":", "self", ".", "_source", "=", "string", "parser", "=", "self", ".", "getParser", "(", ")", "doctype", "=", "self", ".", "originalDocument", ".", "doctype", "ident", "=", "\"\"", "if", "doctype", ":", "subset", "=", "doctype", ".", "internalSubset", "or", "self", ".", "_getDeclarations", "(", ")", "if", "doctype", ".", "publicId", ":", "ident", "=", "(", "'PUBLIC \"%s\" \"%s\"'", "%", "(", "doctype", ".", "publicId", ",", "doctype", ".", "systemId", ")", ")", "elif", "doctype", ".", "systemId", ":", "ident", "=", "'SYSTEM \"%s\"'", "%", "doctype", ".", "systemId", "else", ":", "subset", "=", "\"\"", "nsattrs", "=", "self", ".", "_getNSattrs", "(", ")", "# get ns decls from node's ancestors", "document", "=", "_FRAGMENT_BUILDER_TEMPLATE", "%", "(", "ident", ",", "subset", ",", "nsattrs", ")", "try", ":", "parser", ".", "Parse", "(", "document", ",", "True", ")", "except", ":", "self", ".", "reset", "(", ")", "raise", "fragment", "=", "self", ".", "fragment", "self", ".", "reset", "(", ")", "## self._parser = None", "return", "fragment" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/xml/dom/expatbuilder.py#L621-L647
Samsung/veles
95ed733c2e49bc011ad98ccf2416ecec23fbf352
libVeles/cpplint.py
python
CheckInvalidIncrement
(filename, clean_lines, linenum, error)
Checks for invalid increment *count++. For example following function: void increment_counter(int* count) { *count++; } is invalid, because it effectively does count++, moving pointer, and should be replaced with ++*count, (*count)++ or *count += 1. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
Checks for invalid increment *count++.
[ "Checks", "for", "invalid", "increment", "*", "count", "++", "." ]
def CheckInvalidIncrement(filename, clean_lines, linenum, error): """Checks for invalid increment *count++. For example following function: void increment_counter(int* count) { *count++; } is invalid, because it effectively does count++, moving pointer, and should be replaced with ++*count, (*count)++ or *count += 1. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] if _RE_PATTERN_INVALID_INCREMENT.match(line): error(filename, linenum, 'runtime/invalid_increment', 5, 'Changing pointer instead of value (or unused value of operator*).')
[ "def", "CheckInvalidIncrement", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", ":", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "if", "_RE_PATTERN_INVALID_INCREMENT", ".", "match", "(", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/invalid_increment'", ",", "5", ",", "'Changing pointer instead of value (or unused value of operator*).'", ")" ]
https://github.com/Samsung/veles/blob/95ed733c2e49bc011ad98ccf2416ecec23fbf352/libVeles/cpplint.py#L1337-L1356
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/propgrid.py
python
PGProperty.GetChoiceSelection
(*args, **kwargs)
return _propgrid.PGProperty_GetChoiceSelection(*args, **kwargs)
GetChoiceSelection(self) -> int
GetChoiceSelection(self) -> int
[ "GetChoiceSelection", "(", "self", ")", "-", ">", "int" ]
def GetChoiceSelection(*args, **kwargs): """GetChoiceSelection(self) -> int""" return _propgrid.PGProperty_GetChoiceSelection(*args, **kwargs)
[ "def", "GetChoiceSelection", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_propgrid", ".", "PGProperty_GetChoiceSelection", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/propgrid.py#L409-L411
pytorch/pytorch
7176c92687d3cc847cc046bf002269c6949a21c2
torch/nn/modules/module.py
python
Module.get_extra_state
(self)
Returns any extra state to include in the module's state_dict. Implement this and a corresponding :func:`set_extra_state` for your module if you need to store extra state. This function is called when building the module's `state_dict()`. Note that extra state should be pickleable to ensure working serialization of the state_dict. We only provide provide backwards compatibility guarantees for serializing Tensors; other objects may break backwards compatibility if their serialized pickled form changes. Returns: object: Any extra state to store in the module's state_dict
Returns any extra state to include in the module's state_dict. Implement this and a corresponding :func:`set_extra_state` for your module if you need to store extra state. This function is called when building the module's `state_dict()`.
[ "Returns", "any", "extra", "state", "to", "include", "in", "the", "module", "s", "state_dict", ".", "Implement", "this", "and", "a", "corresponding", ":", "func", ":", "set_extra_state", "for", "your", "module", "if", "you", "need", "to", "store", "extra", "state", ".", "This", "function", "is", "called", "when", "building", "the", "module", "s", "state_dict", "()", "." ]
def get_extra_state(self) -> Any: """ Returns any extra state to include in the module's state_dict. Implement this and a corresponding :func:`set_extra_state` for your module if you need to store extra state. This function is called when building the module's `state_dict()`. Note that extra state should be pickleable to ensure working serialization of the state_dict. We only provide provide backwards compatibility guarantees for serializing Tensors; other objects may break backwards compatibility if their serialized pickled form changes. Returns: object: Any extra state to store in the module's state_dict """ raise RuntimeError( "Reached a code path in Module.get_extra_state() that should never be called. " "Please file an issue at https://github.com/pytorch/pytorch/issues/new?template=bug-report.md " "to report this bug.")
[ "def", "get_extra_state", "(", "self", ")", "->", "Any", ":", "raise", "RuntimeError", "(", "\"Reached a code path in Module.get_extra_state() that should never be called. \"", "\"Please file an issue at https://github.com/pytorch/pytorch/issues/new?template=bug-report.md \"", "\"to report this bug.\"", ")" ]
https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/nn/modules/module.py#L542-L560
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/ops/_op_impl/tbe/logical_not_ds.py
python
_logical_not_ds_tbe
()
return
LogicalNot TBE register
LogicalNot TBE register
[ "LogicalNot", "TBE", "register" ]
def _logical_not_ds_tbe(): """LogicalNot TBE register""" return
[ "def", "_logical_not_ds_tbe", "(", ")", ":", "return" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/ops/_op_impl/tbe/logical_not_ds.py#L35-L37
deepmind/streetlearn
ccf1d60b9c45154894d45a897748aee85d7eb69b
streetlearn/python/agents/goal_nav_agent.py
python
GoalNavAgent._torso
(self, input_)
return conv_out, action_and_reward, goal
Processing of all the visual and language inputs to the LSTM core.
Processing of all the visual and language inputs to the LSTM core.
[ "Processing", "of", "all", "the", "visual", "and", "language", "inputs", "to", "the", "LSTM", "core", "." ]
def _torso(self, input_): """Processing of all the visual and language inputs to the LSTM core.""" # Extract the inputs last_action, env_output = input_ last_reward, _, _, observation = env_output frame = observation[self._idx_frame] goal = observation[self._idx_goal] goal = tf.to_float(goal) # Convert to image to floats and normalise. frame = tf.to_float(frame) frame = snt.FlattenTrailingDimensions(dim_from=3)(frame) frame /= 255.0 # Feed image through convnet. with tf.variable_scope('convnet'): # Convolutional layers. conv_out = self._convnet(frame) # Fully connected layer. conv_out = snt.BatchFlatten()(conv_out) conv_out = snt.Linear(256)(conv_out) conv_out = tf.nn.relu(conv_out) # Concatenate outputs of the visual and instruction pathways. if self._feed_action_and_reward: # Append clipped last reward and one hot last action. tf.logging.info('Append last reward clipped to: %f', self._max_reward) clipped_last_reward = tf.expand_dims( tf.clip_by_value(last_reward, -self._max_reward, self._max_reward), -1) tf.logging.info('Append last action (one-hot of %d)', self._num_actions) one_hot_last_action = tf.one_hot(last_action, self._num_actions) tf.logging.info('Append goal:') tf.logging.info(goal) action_and_reward = tf.concat([clipped_last_reward, one_hot_last_action], axis=1) else: action_and_reward = tf.constant([0], dtype=tf.float32) return conv_out, action_and_reward, goal
[ "def", "_torso", "(", "self", ",", "input_", ")", ":", "# Extract the inputs", "last_action", ",", "env_output", "=", "input_", "last_reward", ",", "_", ",", "_", ",", "observation", "=", "env_output", "frame", "=", "observation", "[", "self", ".", "_idx_frame", "]", "goal", "=", "observation", "[", "self", ".", "_idx_goal", "]", "goal", "=", "tf", ".", "to_float", "(", "goal", ")", "# Convert to image to floats and normalise.", "frame", "=", "tf", ".", "to_float", "(", "frame", ")", "frame", "=", "snt", ".", "FlattenTrailingDimensions", "(", "dim_from", "=", "3", ")", "(", "frame", ")", "frame", "/=", "255.0", "# Feed image through convnet.", "with", "tf", ".", "variable_scope", "(", "'convnet'", ")", ":", "# Convolutional layers.", "conv_out", "=", "self", ".", "_convnet", "(", "frame", ")", "# Fully connected layer.", "conv_out", "=", "snt", ".", "BatchFlatten", "(", ")", "(", "conv_out", ")", "conv_out", "=", "snt", ".", "Linear", "(", "256", ")", "(", "conv_out", ")", "conv_out", "=", "tf", ".", "nn", ".", "relu", "(", "conv_out", ")", "# Concatenate outputs of the visual and instruction pathways.", "if", "self", ".", "_feed_action_and_reward", ":", "# Append clipped last reward and one hot last action.", "tf", ".", "logging", ".", "info", "(", "'Append last reward clipped to: %f'", ",", "self", ".", "_max_reward", ")", "clipped_last_reward", "=", "tf", ".", "expand_dims", "(", "tf", ".", "clip_by_value", "(", "last_reward", ",", "-", "self", ".", "_max_reward", ",", "self", ".", "_max_reward", ")", ",", "-", "1", ")", "tf", ".", "logging", ".", "info", "(", "'Append last action (one-hot of %d)'", ",", "self", ".", "_num_actions", ")", "one_hot_last_action", "=", "tf", ".", "one_hot", "(", "last_action", ",", "self", ".", "_num_actions", ")", "tf", ".", "logging", ".", "info", "(", "'Append goal:'", ")", "tf", ".", "logging", ".", "info", "(", "goal", ")", "action_and_reward", "=", "tf", ".", "concat", "(", "[", "clipped_last_reward", ",", "one_hot_last_action", "]", ",", "axis", "=", "1", ")", "else", ":", "action_and_reward", "=", "tf", ".", "constant", "(", "[", "0", "]", ",", "dtype", "=", "tf", ".", "float32", ")", "return", "conv_out", ",", "action_and_reward", ",", "goal" ]
https://github.com/deepmind/streetlearn/blob/ccf1d60b9c45154894d45a897748aee85d7eb69b/streetlearn/python/agents/goal_nav_agent.py#L134-L173
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/_misc.py
python
Log.Destroy
(*args, **kwargs)
return _misc_.Log_Destroy(*args, **kwargs)
Destroy(self)
Destroy(self)
[ "Destroy", "(", "self", ")" ]
def Destroy(*args, **kwargs): """Destroy(self)""" args[0].this.own(False) return _misc_.Log_Destroy(*args, **kwargs)
[ "def", "Destroy", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "args", "[", "0", "]", ".", "this", ".", "own", "(", "False", ")", "return", "_misc_", ".", "Log_Destroy", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_misc.py#L1617-L1620
PaddlePaddle/PaddleOCR
b756bf5f8c90142e0d89d3db0163965c686b6ffe
ppocr/postprocess/rec_postprocess.py
python
BaseRecLabelDecode.decode
(self, text_index, text_prob=None, is_remove_duplicate=False)
return result_list
convert text-index into text-label.
convert text-index into text-label.
[ "convert", "text", "-", "index", "into", "text", "-", "label", "." ]
def decode(self, text_index, text_prob=None, is_remove_duplicate=False): """ convert text-index into text-label. """ result_list = [] ignored_tokens = self.get_ignored_tokens() batch_size = len(text_index) for batch_idx in range(batch_size): char_list = [] conf_list = [] for idx in range(len(text_index[batch_idx])): if text_index[batch_idx][idx] in ignored_tokens: continue if is_remove_duplicate: # only for predict if idx > 0 and text_index[batch_idx][idx - 1] == text_index[ batch_idx][idx]: continue char_list.append(self.character[int(text_index[batch_idx][ idx])]) if text_prob is not None: conf_list.append(text_prob[batch_idx][idx]) else: conf_list.append(1) text = ''.join(char_list) result_list.append((text, np.mean(conf_list))) return result_list
[ "def", "decode", "(", "self", ",", "text_index", ",", "text_prob", "=", "None", ",", "is_remove_duplicate", "=", "False", ")", ":", "result_list", "=", "[", "]", "ignored_tokens", "=", "self", ".", "get_ignored_tokens", "(", ")", "batch_size", "=", "len", "(", "text_index", ")", "for", "batch_idx", "in", "range", "(", "batch_size", ")", ":", "char_list", "=", "[", "]", "conf_list", "=", "[", "]", "for", "idx", "in", "range", "(", "len", "(", "text_index", "[", "batch_idx", "]", ")", ")", ":", "if", "text_index", "[", "batch_idx", "]", "[", "idx", "]", "in", "ignored_tokens", ":", "continue", "if", "is_remove_duplicate", ":", "# only for predict", "if", "idx", ">", "0", "and", "text_index", "[", "batch_idx", "]", "[", "idx", "-", "1", "]", "==", "text_index", "[", "batch_idx", "]", "[", "idx", "]", ":", "continue", "char_list", ".", "append", "(", "self", ".", "character", "[", "int", "(", "text_index", "[", "batch_idx", "]", "[", "idx", "]", ")", "]", ")", "if", "text_prob", "is", "not", "None", ":", "conf_list", ".", "append", "(", "text_prob", "[", "batch_idx", "]", "[", "idx", "]", ")", "else", ":", "conf_list", ".", "append", "(", "1", ")", "text", "=", "''", ".", "join", "(", "char_list", ")", "result_list", ".", "append", "(", "(", "text", ",", "np", ".", "mean", "(", "conf_list", ")", ")", ")", "return", "result_list" ]
https://github.com/PaddlePaddle/PaddleOCR/blob/b756bf5f8c90142e0d89d3db0163965c686b6ffe/ppocr/postprocess/rec_postprocess.py#L51-L75
wenwei202/caffe
f54a74abaf6951d8485cbdcfa1d74a4c37839466
python/caffe/coord_map.py
python
coord_map
(fn)
Define the coordinate mapping by its - axis - scale: output coord[i * scale] <- input_coord[i] - shift: output coord[i] <- output_coord[i + shift] s.t. the identity mapping, as for pointwise layers like ReLu, is defined by (None, 1, 0) since it is independent of axis and does not transform coords.
Define the coordinate mapping by its - axis - scale: output coord[i * scale] <- input_coord[i] - shift: output coord[i] <- output_coord[i + shift] s.t. the identity mapping, as for pointwise layers like ReLu, is defined by (None, 1, 0) since it is independent of axis and does not transform coords.
[ "Define", "the", "coordinate", "mapping", "by", "its", "-", "axis", "-", "scale", ":", "output", "coord", "[", "i", "*", "scale", "]", "<", "-", "input_coord", "[", "i", "]", "-", "shift", ":", "output", "coord", "[", "i", "]", "<", "-", "output_coord", "[", "i", "+", "shift", "]", "s", ".", "t", ".", "the", "identity", "mapping", "as", "for", "pointwise", "layers", "like", "ReLu", "is", "defined", "by", "(", "None", "1", "0", ")", "since", "it", "is", "independent", "of", "axis", "and", "does", "not", "transform", "coords", "." ]
def coord_map(fn): """ Define the coordinate mapping by its - axis - scale: output coord[i * scale] <- input_coord[i] - shift: output coord[i] <- output_coord[i + shift] s.t. the identity mapping, as for pointwise layers like ReLu, is defined by (None, 1, 0) since it is independent of axis and does not transform coords. """ if fn.type_name in ['Convolution', 'Pooling', 'Im2col']: axis, stride, ks, pad = conv_params(fn) return axis, 1 / stride, (pad - (ks - 1) / 2) / stride elif fn.type_name == 'Deconvolution': axis, stride, ks, pad = conv_params(fn) return axis, stride, (ks - 1) / 2 - pad elif fn.type_name in PASS_THROUGH_LAYERS: return None, 1, 0 elif fn.type_name == 'Crop': axis, offset = crop_params(fn) axis -= 1 # -1 for last non-coordinate dim. return axis, 1, - offset else: raise UndefinedMapException
[ "def", "coord_map", "(", "fn", ")", ":", "if", "fn", ".", "type_name", "in", "[", "'Convolution'", ",", "'Pooling'", ",", "'Im2col'", "]", ":", "axis", ",", "stride", ",", "ks", ",", "pad", "=", "conv_params", "(", "fn", ")", "return", "axis", ",", "1", "/", "stride", ",", "(", "pad", "-", "(", "ks", "-", "1", ")", "/", "2", ")", "/", "stride", "elif", "fn", ".", "type_name", "==", "'Deconvolution'", ":", "axis", ",", "stride", ",", "ks", ",", "pad", "=", "conv_params", "(", "fn", ")", "return", "axis", ",", "stride", ",", "(", "ks", "-", "1", ")", "/", "2", "-", "pad", "elif", "fn", ".", "type_name", "in", "PASS_THROUGH_LAYERS", ":", "return", "None", ",", "1", ",", "0", "elif", "fn", ".", "type_name", "==", "'Crop'", ":", "axis", ",", "offset", "=", "crop_params", "(", "fn", ")", "axis", "-=", "1", "# -1 for last non-coordinate dim.", "return", "axis", ",", "1", ",", "-", "offset", "else", ":", "raise", "UndefinedMapException" ]
https://github.com/wenwei202/caffe/blob/f54a74abaf6951d8485cbdcfa1d74a4c37839466/python/caffe/coord_map.py#L57-L79
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/idlelib/PyShell.py
python
ModifiedInterpreter.unix_terminate
(self)
UNIX: make sure subprocess is terminated and collect status
UNIX: make sure subprocess is terminated and collect status
[ "UNIX", ":", "make", "sure", "subprocess", "is", "terminated", "and", "collect", "status" ]
def unix_terminate(self): "UNIX: make sure subprocess is terminated and collect status" if hasattr(os, 'kill'): try: os.kill(self.rpcpid, SIGTERM) except OSError: # process already terminated: return else: try: os.waitpid(self.rpcpid, 0) except OSError: return
[ "def", "unix_terminate", "(", "self", ")", ":", "if", "hasattr", "(", "os", ",", "'kill'", ")", ":", "try", ":", "os", ".", "kill", "(", "self", ".", "rpcpid", ",", "SIGTERM", ")", "except", "OSError", ":", "# process already terminated:", "return", "else", ":", "try", ":", "os", ".", "waitpid", "(", "self", ".", "rpcpid", ",", "0", ")", "except", "OSError", ":", "return" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/idlelib/PyShell.py#L508-L520
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/xml/sax/handler.py
python
ContentHandler.characters
(self, content)
Receive notification of character data. The Parser will call this method to report each chunk of character data. SAX parsers may return all contiguous character data in a single chunk, or they may split it into several chunks; however, all of the characters in any single event must come from the same external entity so that the Locator provides useful information.
Receive notification of character data.
[ "Receive", "notification", "of", "character", "data", "." ]
def characters(self, content): """Receive notification of character data. The Parser will call this method to report each chunk of character data. SAX parsers may return all contiguous character data in a single chunk, or they may split it into several chunks; however, all of the characters in any single event must come from the same external entity so that the Locator provides useful information."""
[ "def", "characters", "(", "self", ",", "content", ")", ":" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/xml/sax/handler.py#L158-L166
Project-OSRM/osrm-backend
f2e284623e25b5570dd2a5e6985abcb3790fd348
third_party/flatbuffers/python/flatbuffers/encode.py
python
Write
(packer_type, buf, head, n)
Write encodes `n` at buf[head] using `packer_type`.
Write encodes `n` at buf[head] using `packer_type`.
[ "Write", "encodes", "n", "at", "buf", "[", "head", "]", "using", "packer_type", "." ]
def Write(packer_type, buf, head, n): """ Write encodes `n` at buf[head] using `packer_type`. """ packer_type.pack_into(buf, head, n)
[ "def", "Write", "(", "packer_type", ",", "buf", ",", "head", ",", "n", ")", ":", "packer_type", ".", "pack_into", "(", "buf", ",", "head", ",", "n", ")" ]
https://github.com/Project-OSRM/osrm-backend/blob/f2e284623e25b5570dd2a5e6985abcb3790fd348/third_party/flatbuffers/python/flatbuffers/encode.py#L40-L42
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/setuptools/py2/setuptools/_vendor/pyparsing.py
python
_makeTags
(tagStr, xml)
return openTag, closeTag
Internal helper to construct opening and closing tag expressions, given a tag name
Internal helper to construct opening and closing tag expressions, given a tag name
[ "Internal", "helper", "to", "construct", "opening", "and", "closing", "tag", "expressions", "given", "a", "tag", "name" ]
def _makeTags(tagStr, xml): """Internal helper to construct opening and closing tag expressions, given a tag name""" if isinstance(tagStr,basestring): resname = tagStr tagStr = Keyword(tagStr, caseless=not xml) else: resname = tagStr.name tagAttrName = Word(alphas,alphanums+"_-:") if (xml): tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes ) openTag = Suppress("<") + tagStr("tag") + \ Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \ Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") else: printablesLessRAbrack = "".join(c for c in printables if c not in ">") tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack) openTag = Suppress("<") + tagStr("tag") + \ Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \ Optional( Suppress("=") + tagAttrValue ) ))) + \ Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") closeTag = Combine(_L("</") + tagStr + ">") openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname) closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname) openTag.tag = resname closeTag.tag = resname return openTag, closeTag
[ "def", "_makeTags", "(", "tagStr", ",", "xml", ")", ":", "if", "isinstance", "(", "tagStr", ",", "basestring", ")", ":", "resname", "=", "tagStr", "tagStr", "=", "Keyword", "(", "tagStr", ",", "caseless", "=", "not", "xml", ")", "else", ":", "resname", "=", "tagStr", ".", "name", "tagAttrName", "=", "Word", "(", "alphas", ",", "alphanums", "+", "\"_-:\"", ")", "if", "(", "xml", ")", ":", "tagAttrValue", "=", "dblQuotedString", ".", "copy", "(", ")", ".", "setParseAction", "(", "removeQuotes", ")", "openTag", "=", "Suppress", "(", "\"<\"", ")", "+", "tagStr", "(", "\"tag\"", ")", "+", "Dict", "(", "ZeroOrMore", "(", "Group", "(", "tagAttrName", "+", "Suppress", "(", "\"=\"", ")", "+", "tagAttrValue", ")", ")", ")", "+", "Optional", "(", "\"/\"", ",", "default", "=", "[", "False", "]", ")", ".", "setResultsName", "(", "\"empty\"", ")", ".", "setParseAction", "(", "lambda", "s", ",", "l", ",", "t", ":", "t", "[", "0", "]", "==", "'/'", ")", "+", "Suppress", "(", "\">\"", ")", "else", ":", "printablesLessRAbrack", "=", "\"\"", ".", "join", "(", "c", "for", "c", "in", "printables", "if", "c", "not", "in", "\">\"", ")", "tagAttrValue", "=", "quotedString", ".", "copy", "(", ")", ".", "setParseAction", "(", "removeQuotes", ")", "|", "Word", "(", "printablesLessRAbrack", ")", "openTag", "=", "Suppress", "(", "\"<\"", ")", "+", "tagStr", "(", "\"tag\"", ")", "+", "Dict", "(", "ZeroOrMore", "(", "Group", "(", "tagAttrName", ".", "setParseAction", "(", "downcaseTokens", ")", "+", "Optional", "(", "Suppress", "(", "\"=\"", ")", "+", "tagAttrValue", ")", ")", ")", ")", "+", "Optional", "(", "\"/\"", ",", "default", "=", "[", "False", "]", ")", ".", "setResultsName", "(", "\"empty\"", ")", ".", "setParseAction", "(", "lambda", "s", ",", "l", ",", "t", ":", "t", "[", "0", "]", "==", "'/'", ")", "+", "Suppress", "(", "\">\"", ")", "closeTag", "=", "Combine", "(", "_L", "(", "\"</\"", ")", "+", "tagStr", "+", "\">\"", ")", "openTag", "=", "openTag", ".", "setResultsName", "(", "\"start\"", "+", "\"\"", ".", "join", "(", "resname", ".", "replace", "(", "\":\"", ",", "\" \"", ")", ".", "title", "(", ")", ".", "split", "(", ")", ")", ")", ".", "setName", "(", "\"<%s>\"", "%", "resname", ")", "closeTag", "=", "closeTag", ".", "setResultsName", "(", "\"end\"", "+", "\"\"", ".", "join", "(", "resname", ".", "replace", "(", "\":\"", ",", "\" \"", ")", ".", "title", "(", ")", ".", "split", "(", ")", ")", ")", ".", "setName", "(", "\"</%s>\"", "%", "resname", ")", "openTag", ".", "tag", "=", "resname", "closeTag", ".", "tag", "=", "resname", "return", "openTag", ",", "closeTag" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/setuptools/py2/setuptools/_vendor/pyparsing.py#L4875-L4902
apple/swift-lldb
d74be846ef3e62de946df343e8c234bde93a8912
third_party/Python/module/pexpect-4.6/pexpect/expect.py
python
searcher_re.__str__
(self)
return '\n'.join(ss)
This returns a human-readable string that represents the state of the object.
This returns a human-readable string that represents the state of the object.
[ "This", "returns", "a", "human", "-", "readable", "string", "that", "represents", "the", "state", "of", "the", "object", "." ]
def __str__(self): '''This returns a human-readable string that represents the state of the object.''' #ss = [(n, ' %d: re.compile("%s")' % # (n, repr(s.pattern))) for n, s in self._searches] ss = list() for n, s in self._searches: ss.append((n, ' %d: re.compile(%r)' % (n, s.pattern))) ss.append((-1, 'searcher_re:')) if self.eof_index >= 0: ss.append((self.eof_index, ' %d: EOF' % self.eof_index)) if self.timeout_index >= 0: ss.append((self.timeout_index, ' %d: TIMEOUT' % self.timeout_index)) ss.sort() ss = list(zip(*ss))[1] return '\n'.join(ss)
[ "def", "__str__", "(", "self", ")", ":", "#ss = [(n, ' %d: re.compile(\"%s\")' %", "# (n, repr(s.pattern))) for n, s in self._searches]", "ss", "=", "list", "(", ")", "for", "n", ",", "s", "in", "self", ".", "_searches", ":", "ss", ".", "append", "(", "(", "n", ",", "' %d: re.compile(%r)'", "%", "(", "n", ",", "s", ".", "pattern", ")", ")", ")", "ss", ".", "append", "(", "(", "-", "1", ",", "'searcher_re:'", ")", ")", "if", "self", ".", "eof_index", ">=", "0", ":", "ss", ".", "append", "(", "(", "self", ".", "eof_index", ",", "' %d: EOF'", "%", "self", ".", "eof_index", ")", ")", "if", "self", ".", "timeout_index", ">=", "0", ":", "ss", ".", "append", "(", "(", "self", ".", "timeout_index", ",", "' %d: TIMEOUT'", "%", "self", ".", "timeout_index", ")", ")", "ss", ".", "sort", "(", ")", "ss", "=", "list", "(", "zip", "(", "*", "ss", ")", ")", "[", "1", "]", "return", "'\\n'", ".", "join", "(", "ss", ")" ]
https://github.com/apple/swift-lldb/blob/d74be846ef3e62de946df343e8c234bde93a8912/third_party/Python/module/pexpect-4.6/pexpect/expect.py#L256-L273
krishauser/Klampt
972cc83ea5befac3f653c1ba20f80155768ad519
Python/klampt/io/ros.py
python
to_Pose
(klampt_se3)
return ros_pose
From Klamp't se3 element to ROS Pose
From Klamp't se3 element to ROS Pose
[ "From", "Klamp", "t", "se3", "element", "to", "ROS", "Pose" ]
def to_Pose(klampt_se3): """From Klamp't se3 element to ROS Pose""" ros_pose = Pose() ros_pose.orientation = to_Quaternion(klampt_se3[0]) ros_pose.position = to_Point(klampt_se3[1]) return ros_pose
[ "def", "to_Pose", "(", "klampt_se3", ")", ":", "ros_pose", "=", "Pose", "(", ")", "ros_pose", ".", "orientation", "=", "to_Quaternion", "(", "klampt_se3", "[", "0", "]", ")", "ros_pose", ".", "position", "=", "to_Point", "(", "klampt_se3", "[", "1", "]", ")", "return", "ros_pose" ]
https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/klampt/io/ros.py#L66-L71
QMCPACK/qmcpack
d0948ab455e38364458740cc8e2239600a14c5cd
utils/afqmctools/afqmctools/hamiltonian/converter.py
python
check_sym
(ikjl, nmo, sym)
Check permutational symmetry of integral Parameters ---------- ikjl : tuple of ints Orbital indices of ERI. nmo : int Number of orbitals sym : int Desired permutational symmetry to check. Returns ------- sym_allowed : bool True if integral is unique from set of equivalent.
Check permutational symmetry of integral
[ "Check", "permutational", "symmetry", "of", "integral" ]
def check_sym(ikjl, nmo, sym): """Check permutational symmetry of integral Parameters ---------- ikjl : tuple of ints Orbital indices of ERI. nmo : int Number of orbitals sym : int Desired permutational symmetry to check. Returns ------- sym_allowed : bool True if integral is unique from set of equivalent. """ if sym == 1: return True else: i, k, j, l = ikjl if sym == 4: kilj = (k,i,l,j) jlik = (j,l,i,k) ljki = (l,j,k,i) if (ikjl > jlik) or (ikjl > kilj) or (ikjl > ljki): return False else: return True else: ik = i + k*nmo jl = j + l*nmo return (i >= k and j >= l) and ik >= jl
[ "def", "check_sym", "(", "ikjl", ",", "nmo", ",", "sym", ")", ":", "if", "sym", "==", "1", ":", "return", "True", "else", ":", "i", ",", "k", ",", "j", ",", "l", "=", "ikjl", "if", "sym", "==", "4", ":", "kilj", "=", "(", "k", ",", "i", ",", "l", ",", "j", ")", "jlik", "=", "(", "j", ",", "l", ",", "i", ",", "k", ")", "ljki", "=", "(", "l", ",", "j", ",", "k", ",", "i", ")", "if", "(", "ikjl", ">", "jlik", ")", "or", "(", "ikjl", ">", "kilj", ")", "or", "(", "ikjl", ">", "ljki", ")", ":", "return", "False", "else", ":", "return", "True", "else", ":", "ik", "=", "i", "+", "k", "*", "nmo", "jl", "=", "j", "+", "l", "*", "nmo", "return", "(", "i", ">=", "k", "and", "j", ">=", "l", ")", "and", "ik", ">=", "jl" ]
https://github.com/QMCPACK/qmcpack/blob/d0948ab455e38364458740cc8e2239600a14c5cd/utils/afqmctools/afqmctools/hamiltonian/converter.py#L279-L311
lilypond/lilypond
2a14759372979f5b796ee802b0ee3bc15d28b06b
release/binaries/lib/build.py
python
ConfigurePackage.make_install_args
(self)
return []
Return additional parameters to pass to make install
Return additional parameters to pass to make install
[ "Return", "additional", "parameters", "to", "pass", "to", "make", "install" ]
def make_install_args(self) -> List[str]: """Return additional parameters to pass to make install""" return []
[ "def", "make_install_args", "(", "self", ")", "->", "List", "[", "str", "]", ":", "return", "[", "]" ]
https://github.com/lilypond/lilypond/blob/2a14759372979f5b796ee802b0ee3bc15d28b06b/release/binaries/lib/build.py#L277-L279
apache/openoffice
97289b2620590d8b431bcc408f87252db6203818
main/toolkit/src2xml/source/srcparser.py
python
MacroExpander.parseValues
(self)
return values, i
Parse tokens to get macro function variable values. Be aware that there is an implicit quotes around the text between the open paren, the comma(s), and the close paren. For instance, if a macro is defined as FOO(a, b) and is used as FOO(one two three, and four), then the 'a' must be replaced with 'one two three', and the 'b' replaced with 'and four'. In other words, whitespace does not end a token.
Parse tokens to get macro function variable values.
[ "Parse", "tokens", "to", "get", "macro", "function", "variable", "values", "." ]
def parseValues (self): """Parse tokens to get macro function variable values. Be aware that there is an implicit quotes around the text between the open paren, the comma(s), and the close paren. For instance, if a macro is defined as FOO(a, b) and is used as FOO(one two three, and four), then the 'a' must be replaced with 'one two three', and the 'b' replaced with 'and four'. In other words, whitespace does not end a token. """ values = [] i = 1 scope = 0 value = [] while True: try: tk = self.tokens[self.pos+i] except IndexError: progress ("error parsing values (%d)\n"%i) for j in range(0, i): print(self.tokens[self.pos+j], end=' ') print('') srclexer.dumpTokens(self.tokens) srclexer.dumpTokens(self.newtokens) print("tokens expanded so far:") for tk in self.expandedTokens: print("-"*20) print(tk) srclexer.dumpTokens(self.defines[tk].tokens) sys.exit(1) if tk == '(': value = [] scope += 1 elif tk == ',': values.append(value) value = [] elif tk == ')': scope -= 1 values.append(value) value = [] if scope == 0: break else: raise ParseError ('') else: value.append(tk) i += 1 return values, i
[ "def", "parseValues", "(", "self", ")", ":", "values", "=", "[", "]", "i", "=", "1", "scope", "=", "0", "value", "=", "[", "]", "while", "True", ":", "try", ":", "tk", "=", "self", ".", "tokens", "[", "self", ".", "pos", "+", "i", "]", "except", "IndexError", ":", "progress", "(", "\"error parsing values (%d)\\n\"", "%", "i", ")", "for", "j", "in", "range", "(", "0", ",", "i", ")", ":", "print", "(", "self", ".", "tokens", "[", "self", ".", "pos", "+", "j", "]", ",", "end", "=", "' '", ")", "print", "(", "''", ")", "srclexer", ".", "dumpTokens", "(", "self", ".", "tokens", ")", "srclexer", ".", "dumpTokens", "(", "self", ".", "newtokens", ")", "print", "(", "\"tokens expanded so far:\"", ")", "for", "tk", "in", "self", ".", "expandedTokens", ":", "print", "(", "\"-\"", "*", "20", ")", "print", "(", "tk", ")", "srclexer", ".", "dumpTokens", "(", "self", ".", "defines", "[", "tk", "]", ".", "tokens", ")", "sys", ".", "exit", "(", "1", ")", "if", "tk", "==", "'('", ":", "value", "=", "[", "]", "scope", "+=", "1", "elif", "tk", "==", "','", ":", "values", ".", "append", "(", "value", ")", "value", "=", "[", "]", "elif", "tk", "==", "')'", ":", "scope", "-=", "1", "values", ".", "append", "(", "value", ")", "value", "=", "[", "]", "if", "scope", "==", "0", ":", "break", "else", ":", "raise", "ParseError", "(", "''", ")", "else", ":", "value", ".", "append", "(", "tk", ")", "i", "+=", "1", "return", "values", ",", "i" ]
https://github.com/apache/openoffice/blob/97289b2620590d8b431bcc408f87252db6203818/main/toolkit/src2xml/source/srcparser.py#L139-L187
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/tools/Editra/src/ed_keyh.py
python
ViKeyHandler.InsertText
(self, pos, text)
Insert text and store it in the buffer if we're in insert mode i.e. as if it was typed in
Insert text and store it in the buffer if we're in insert mode i.e. as if it was typed in
[ "Insert", "text", "and", "store", "it", "in", "the", "buffer", "if", "we", "re", "in", "insert", "mode", "i", ".", "e", ".", "as", "if", "it", "was", "typed", "in" ]
def InsertText(self, pos, text): """Insert text and store it in the buffer if we're in insert mode i.e. as if it was typed in """ self.stc.InsertText(pos, text) if self.IsInsertMode(): self.buffer += text
[ "def", "InsertText", "(", "self", ",", "pos", ",", "text", ")", ":", "self", ".", "stc", ".", "InsertText", "(", "pos", ",", "text", ")", "if", "self", ".", "IsInsertMode", "(", ")", ":", "self", ".", "buffer", "+=", "text" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/ed_keyh.py#L279-L286
devsisters/libquic
8954789a056d8e7d5fcb6452fd1572ca57eb5c4e
src/third_party/protobuf/python/google/protobuf/service.py
python
RpcController.Reset
(self)
Resets the RpcController to its initial state. After the RpcController has been reset, it may be reused in a new call. Must not be called while an RPC is in progress.
Resets the RpcController to its initial state.
[ "Resets", "the", "RpcController", "to", "its", "initial", "state", "." ]
def Reset(self): """Resets the RpcController to its initial state. After the RpcController has been reset, it may be reused in a new call. Must not be called while an RPC is in progress. """ raise NotImplementedError
[ "def", "Reset", "(", "self", ")", ":", "raise", "NotImplementedError" ]
https://github.com/devsisters/libquic/blob/8954789a056d8e7d5fcb6452fd1572ca57eb5c4e/src/third_party/protobuf/python/google/protobuf/service.py#L132-L138
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numpy/lib/financial.py
python
ppmt
(rate, per, nper, pv, fv=0, when='end')
return total - ipmt(rate, per, nper, pv, fv, when)
Compute the payment against loan principal. .. deprecated:: 1.18 `ppmt` is deprecated; for details, see NEP 32 [1]_. Use the corresponding function in the numpy-financial library, https://pypi.org/project/numpy-financial. Parameters ---------- rate : array_like Rate of interest (per period) per : array_like, int Amount paid against the loan changes. The `per` is the period of interest. nper : array_like Number of compounding periods pv : array_like Present value fv : array_like, optional Future value when : {{'begin', 1}, {'end', 0}}, {string, int} When payments are due ('begin' (1) or 'end' (0)) See Also -------- pmt, pv, ipmt References ---------- .. [1] NumPy Enhancement Proposal (NEP) 32, https://numpy.org/neps/nep-0032-remove-financial-functions.html
Compute the payment against loan principal.
[ "Compute", "the", "payment", "against", "loan", "principal", "." ]
def ppmt(rate, per, nper, pv, fv=0, when='end'): """ Compute the payment against loan principal. .. deprecated:: 1.18 `ppmt` is deprecated; for details, see NEP 32 [1]_. Use the corresponding function in the numpy-financial library, https://pypi.org/project/numpy-financial. Parameters ---------- rate : array_like Rate of interest (per period) per : array_like, int Amount paid against the loan changes. The `per` is the period of interest. nper : array_like Number of compounding periods pv : array_like Present value fv : array_like, optional Future value when : {{'begin', 1}, {'end', 0}}, {string, int} When payments are due ('begin' (1) or 'end' (0)) See Also -------- pmt, pv, ipmt References ---------- .. [1] NumPy Enhancement Proposal (NEP) 32, https://numpy.org/neps/nep-0032-remove-financial-functions.html """ total = pmt(rate, nper, pv, fv, when) return total - ipmt(rate, per, nper, pv, fv, when)
[ "def", "ppmt", "(", "rate", ",", "per", ",", "nper", ",", "pv", ",", "fv", "=", "0", ",", "when", "=", "'end'", ")", ":", "total", "=", "pmt", "(", "rate", ",", "nper", ",", "pv", ",", "fv", ",", "when", ")", "return", "total", "-", "ipmt", "(", "rate", ",", "per", ",", "nper", ",", "pv", ",", "fv", ",", "when", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numpy/lib/financial.py#L483-L520
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_vendor/distlib/util.py
python
Cache.__init__
(self, base)
Initialise an instance. :param base: The base directory where the cache should be located.
[]
def __init__(self, base): """ Initialise an instance. :param base: The base directory where the cache should be located. """ # we use 'isdir' instead of 'exists', because we want to # fail if there's a file with that name if not os.path.isdir(base): # pragma: no cover os.makedirs(base) if (os.stat(base).st_mode & 0o77) != 0: logger.warning('Directory \'%s\' is not private', base) self.base = os.path.abspath(os.path.normpath(base))
[ "def", "__init__", "(", "self", ",", "base", ")", ":", "# we use 'isdir' instead of 'exists', because we want to", "# fail if there's a file with that name", "if", "not", "os", ".", "path", ".", "isdir", "(", "base", ")", ":", "# pragma: no cover", "os", ".", "makedirs", "(", "base", ")", "if", "(", "os", ".", "stat", "(", "base", ")", ".", "st_mode", "&", "0o77", ")", "!=", "0", ":", "logger", ".", "warning", "(", "'Directory \\'%s\\' is not private'", ",", "base", ")", "self", ".", "base", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "normpath", "(", "base", ")", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_vendor/distlib/util.py#L1895-L1919
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/array_analysis.py
python
ArrayAnalysis.get_equiv_set
(self, block_label)
return self.equiv_sets[block_label]
Return the equiv_set object of an block given its label.
Return the equiv_set object of an block given its label.
[ "Return", "the", "equiv_set", "object", "of", "an", "block", "given", "its", "label", "." ]
def get_equiv_set(self, block_label): """Return the equiv_set object of an block given its label. """ return self.equiv_sets[block_label]
[ "def", "get_equiv_set", "(", "self", ",", "block_label", ")", ":", "return", "self", ".", "equiv_sets", "[", "block_label", "]" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/array_analysis.py#L975-L978
thalium/icebox
99d147d5b9269222225443ce171b4fd46d8985d4
third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2class.py
python
htmlReadDoc
(cur, URL, encoding, options)
return xmlDoc(_obj=ret)
parse an XML in-memory document and build a tree.
parse an XML in-memory document and build a tree.
[ "parse", "an", "XML", "in", "-", "memory", "document", "and", "build", "a", "tree", "." ]
def htmlReadDoc(cur, URL, encoding, options): """parse an XML in-memory document and build a tree. """ ret = libxml2mod.htmlReadDoc(cur, URL, encoding, options) if ret is None:raise treeError('htmlReadDoc() failed') return xmlDoc(_obj=ret)
[ "def", "htmlReadDoc", "(", "cur", ",", "URL", ",", "encoding", ",", "options", ")", ":", "ret", "=", "libxml2mod", ".", "htmlReadDoc", "(", "cur", ",", "URL", ",", "encoding", ",", "options", ")", "if", "ret", "is", "None", ":", "raise", "treeError", "(", "'htmlReadDoc() failed'", ")", "return", "xmlDoc", "(", "_obj", "=", "ret", ")" ]
https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2class.py#L42-L46
mongodb/mongo
d8ff665343ad29cf286ee2cf4a1960d29371937b
buildscripts/lldb/lldb_printers.py
python
OptionalPrinter.__init__
(self, valobj, *_args)
Store the valobj and get the value of the optional.
Store the valobj and get the value of the optional.
[ "Store", "the", "valobj", "and", "get", "the", "value", "of", "the", "optional", "." ]
def __init__(self, valobj, *_args): """Store the valobj and get the value of the optional.""" self.valobj = valobj self.update()
[ "def", "__init__", "(", "self", ",", "valobj", ",", "*", "_args", ")", ":", "self", ".", "valobj", "=", "valobj", "self", ".", "update", "(", ")" ]
https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/buildscripts/lldb/lldb_printers.py#L156-L159
BitMEX/api-connectors
37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812
auto-generated/python/swagger_client/api/instrument_api.py
python
InstrumentApi.instrument_get_active
(self, **kwargs)
Get all active instruments and instruments that have expired in <24hrs. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.instrument_get_active(async_req=True) >>> result = thread.get() :param async_req bool :return: list[Instrument] If the method is called asynchronously, returns the request thread.
Get all active instruments and instruments that have expired in <24hrs. # noqa: E501
[ "Get", "all", "active", "instruments", "and", "instruments", "that", "have", "expired", "in", "<24hrs", ".", "#", "noqa", ":", "E501" ]
def instrument_get_active(self, **kwargs): # noqa: E501 """Get all active instruments and instruments that have expired in <24hrs. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.instrument_get_active(async_req=True) >>> result = thread.get() :param async_req bool :return: list[Instrument] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.instrument_get_active_with_http_info(**kwargs) # noqa: E501 else: (data) = self.instrument_get_active_with_http_info(**kwargs) # noqa: E501 return data
[ "def", "instrument_get_active", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "instrument_get_active_with_http_info", "(", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "instrument_get_active_with_http_info", "(", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
https://github.com/BitMEX/api-connectors/blob/37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812/auto-generated/python/swagger_client/api/instrument_api.py#L159-L177
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python/src/Lib/plat-mac/findertools.py
python
select
(file)
return finder.select(file_alias)
select a file in the finder. Specify file by name, fsref or fsspec.
select a file in the finder. Specify file by name, fsref or fsspec.
[ "select", "a", "file", "in", "the", "finder", ".", "Specify", "file", "by", "name", "fsref", "or", "fsspec", "." ]
def select(file): """select a file in the finder. Specify file by name, fsref or fsspec.""" finder = _getfinder() fsr = Carbon.File.FSRef(file) file_alias = fsr.FSNewAliasMinimal() return finder.select(file_alias)
[ "def", "select", "(", "file", ")", ":", "finder", "=", "_getfinder", "(", ")", "fsr", "=", "Carbon", ".", "File", ".", "FSRef", "(", "file", ")", "file_alias", "=", "fsr", ".", "FSNewAliasMinimal", "(", ")", "return", "finder", ".", "select", "(", "file_alias", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/plat-mac/findertools.py#L108-L113
naver/sling
5671cd445a2caae0b4dd0332299e4cfede05062c
webkit/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi00.py
python
Handshaker.__init__
(self, request, dispatcher)
Construct an instance. Args: request: mod_python request. dispatcher: Dispatcher (dispatch.Dispatcher). Handshaker will add attributes such as ws_resource in performing handshake.
Construct an instance.
[ "Construct", "an", "instance", "." ]
def __init__(self, request, dispatcher): """Construct an instance. Args: request: mod_python request. dispatcher: Dispatcher (dispatch.Dispatcher). Handshaker will add attributes such as ws_resource in performing handshake. """ self._logger = util.get_class_logger(self) self._request = request self._dispatcher = dispatcher
[ "def", "__init__", "(", "self", ",", "request", ",", "dispatcher", ")", ":", "self", ".", "_logger", "=", "util", ".", "get_class_logger", "(", "self", ")", "self", ".", "_request", "=", "request", "self", ".", "_dispatcher", "=", "dispatcher" ]
https://github.com/naver/sling/blob/5671cd445a2caae0b4dd0332299e4cfede05062c/webkit/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi00.py#L123-L137
BlzFans/wke
b0fa21158312e40c5fbd84682d643022b6c34a93
cygwin/lib/python2.6/cgi.py
python
escape
(s, quote=None)
return s
Replace special characters "&", "<" and ">" to HTML-safe sequences. If the optional flag quote is true, the quotation mark character (") is also translated.
Replace special characters "&", "<" and ">" to HTML-safe sequences. If the optional flag quote is true, the quotation mark character (") is also translated.
[ "Replace", "special", "characters", "&", "<", "and", ">", "to", "HTML", "-", "safe", "sequences", ".", "If", "the", "optional", "flag", "quote", "is", "true", "the", "quotation", "mark", "character", "(", ")", "is", "also", "translated", "." ]
def escape(s, quote=None): '''Replace special characters "&", "<" and ">" to HTML-safe sequences. If the optional flag quote is true, the quotation mark character (") is also translated.''' s = s.replace("&", "&amp;") # Must be done first! s = s.replace("<", "&lt;") s = s.replace(">", "&gt;") if quote: s = s.replace('"', "&quot;") return s
[ "def", "escape", "(", "s", ",", "quote", "=", "None", ")", ":", "s", "=", "s", ".", "replace", "(", "\"&\"", ",", "\"&amp;\"", ")", "# Must be done first!", "s", "=", "s", ".", "replace", "(", "\"<\"", ",", "\"&lt;\"", ")", "s", "=", "s", ".", "replace", "(", "\">\"", ",", "\"&gt;\"", ")", "if", "quote", ":", "s", "=", "s", ".", "replace", "(", "'\"'", ",", "\"&quot;\"", ")", "return", "s" ]
https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/cgi.py#L1031-L1040
LiquidPlayer/LiquidCore
9405979363f2353ac9a71ad8ab59685dd7f919c9
deps/node-10.15.3/tools/jinja2/nodes.py
python
Node.iter_fields
(self, exclude=None, only=None)
This method iterates over all fields that are defined and yields ``(key, value)`` tuples. Per default all fields are returned, but it's possible to limit that to some fields by providing the `only` parameter or to exclude some using the `exclude` parameter. Both should be sets or tuples of field names.
This method iterates over all fields that are defined and yields ``(key, value)`` tuples. Per default all fields are returned, but it's possible to limit that to some fields by providing the `only` parameter or to exclude some using the `exclude` parameter. Both should be sets or tuples of field names.
[ "This", "method", "iterates", "over", "all", "fields", "that", "are", "defined", "and", "yields", "(", "key", "value", ")", "tuples", ".", "Per", "default", "all", "fields", "are", "returned", "but", "it", "s", "possible", "to", "limit", "that", "to", "some", "fields", "by", "providing", "the", "only", "parameter", "or", "to", "exclude", "some", "using", "the", "exclude", "parameter", ".", "Both", "should", "be", "sets", "or", "tuples", "of", "field", "names", "." ]
def iter_fields(self, exclude=None, only=None): """This method iterates over all fields that are defined and yields ``(key, value)`` tuples. Per default all fields are returned, but it's possible to limit that to some fields by providing the `only` parameter or to exclude some using the `exclude` parameter. Both should be sets or tuples of field names. """ for name in self.fields: if (exclude is only is None) or \ (exclude is not None and name not in exclude) or \ (only is not None and name in only): try: yield name, getattr(self, name) except AttributeError: pass
[ "def", "iter_fields", "(", "self", ",", "exclude", "=", "None", ",", "only", "=", "None", ")", ":", "for", "name", "in", "self", ".", "fields", ":", "if", "(", "exclude", "is", "only", "is", "None", ")", "or", "(", "exclude", "is", "not", "None", "and", "name", "not", "in", "exclude", ")", "or", "(", "only", "is", "not", "None", "and", "name", "in", "only", ")", ":", "try", ":", "yield", "name", ",", "getattr", "(", "self", ",", "name", ")", "except", "AttributeError", ":", "pass" ]
https://github.com/LiquidPlayer/LiquidCore/blob/9405979363f2353ac9a71ad8ab59685dd7f919c9/deps/node-10.15.3/tools/jinja2/nodes.py#L148-L162
y123456yz/reading-and-annotate-mongodb-3.6
93280293672ca7586dc24af18132aa61e4ed7fcf
mongo/buildscripts/idl/idl/generator.py
python
_CppSourceFileWriter._gen_doc_sequence_serializer
(self, struct)
Generate the serialize method portion for fields which can be document sequence.
Generate the serialize method portion for fields which can be document sequence.
[ "Generate", "the", "serialize", "method", "portion", "for", "fields", "which", "can", "be", "document", "sequence", "." ]
def _gen_doc_sequence_serializer(self, struct): # type: (ast.Struct) -> None """Generate the serialize method portion for fields which can be document sequence.""" for field in struct.fields: if not field.supports_doc_sequence: continue member_name = _get_field_member_name(field) optional_block_start = '{' if field.optional: optional_block_start = 'if (%s.is_initialized()) {' % (member_name) with self._block(optional_block_start, '}'): self._writer.write_line('OpMsg::DocumentSequence documentSequence;') self._writer.write_template('documentSequence.name = %s.toString();' % (_get_field_constant_name(field))) with self._block('for (const auto& item : %s) {' % (_access_member(field)), '}'): if not field.struct_type: if field.serializer: self._writer.write_line('documentSequence.objs.push_back(item.%s());' % (writer.get_method_name(field.serializer))) else: self._writer.write_line('documentSequence.objs.push_back(item);') else: self._writer.write_line('BSONObjBuilder builder;') self._writer.write_line('item.serialize(&builder);') self._writer.write_line('documentSequence.objs.push_back(builder.obj());') self._writer.write_template('request.sequences.emplace_back(documentSequence);') # Add a blank line after each block self._writer.write_empty_line()
[ "def", "_gen_doc_sequence_serializer", "(", "self", ",", "struct", ")", ":", "# type: (ast.Struct) -> None", "for", "field", "in", "struct", ".", "fields", ":", "if", "not", "field", ".", "supports_doc_sequence", ":", "continue", "member_name", "=", "_get_field_member_name", "(", "field", ")", "optional_block_start", "=", "'{'", "if", "field", ".", "optional", ":", "optional_block_start", "=", "'if (%s.is_initialized()) {'", "%", "(", "member_name", ")", "with", "self", ".", "_block", "(", "optional_block_start", ",", "'}'", ")", ":", "self", ".", "_writer", ".", "write_line", "(", "'OpMsg::DocumentSequence documentSequence;'", ")", "self", ".", "_writer", ".", "write_template", "(", "'documentSequence.name = %s.toString();'", "%", "(", "_get_field_constant_name", "(", "field", ")", ")", ")", "with", "self", ".", "_block", "(", "'for (const auto& item : %s) {'", "%", "(", "_access_member", "(", "field", ")", ")", ",", "'}'", ")", ":", "if", "not", "field", ".", "struct_type", ":", "if", "field", ".", "serializer", ":", "self", ".", "_writer", ".", "write_line", "(", "'documentSequence.objs.push_back(item.%s());'", "%", "(", "writer", ".", "get_method_name", "(", "field", ".", "serializer", ")", ")", ")", "else", ":", "self", ".", "_writer", ".", "write_line", "(", "'documentSequence.objs.push_back(item);'", ")", "else", ":", "self", ".", "_writer", ".", "write_line", "(", "'BSONObjBuilder builder;'", ")", "self", ".", "_writer", ".", "write_line", "(", "'item.serialize(&builder);'", ")", "self", ".", "_writer", ".", "write_line", "(", "'documentSequence.objs.push_back(builder.obj());'", ")", "self", ".", "_writer", ".", "write_template", "(", "'request.sequences.emplace_back(documentSequence);'", ")", "# Add a blank line after each block", "self", ".", "_writer", ".", "write_empty_line", "(", ")" ]
https://github.com/y123456yz/reading-and-annotate-mongodb-3.6/blob/93280293672ca7586dc24af18132aa61e4ed7fcf/mongo/buildscripts/idl/idl/generator.py#L1271-L1306
luohenyueji/OpenCV-Practical-Exercise
8a747b05b5c56d5be4611c5e72c8983f6b96d81d
ITEM 30-39/ITEM37 OpenCV_BRISQUE/libsvm/python/svmutil.py
python
evaluations
(ty, pv)
return (ACC, MSE, SCC)
evaluations(ty, pv) -> (ACC, MSE, SCC) Calculate accuracy, mean squared error and squared correlation coefficient using the true values (ty) and predicted values (pv).
evaluations(ty, pv) -> (ACC, MSE, SCC)
[ "evaluations", "(", "ty", "pv", ")", "-", ">", "(", "ACC", "MSE", "SCC", ")" ]
def evaluations(ty, pv): """ evaluations(ty, pv) -> (ACC, MSE, SCC) Calculate accuracy, mean squared error and squared correlation coefficient using the true values (ty) and predicted values (pv). """ if len(ty) != len(pv): raise ValueError("len(ty) must equal to len(pv)") total_correct = total_error = 0 sumv = sumy = sumvv = sumyy = sumvy = 0 for v, y in zip(pv, ty): if y == v: total_correct += 1 total_error += (v-y)*(v-y) sumv += v sumy += y sumvv += v*v sumyy += y*y sumvy += v*y l = len(ty) ACC = 100.0*total_correct/l MSE = total_error/l try: SCC = ((l*sumvy-sumv*sumy)*(l*sumvy-sumv*sumy))/((l*sumvv-sumv*sumv)*(l*sumyy-sumy*sumy)) except: SCC = float('nan') return (ACC, MSE, SCC)
[ "def", "evaluations", "(", "ty", ",", "pv", ")", ":", "if", "len", "(", "ty", ")", "!=", "len", "(", "pv", ")", ":", "raise", "ValueError", "(", "\"len(ty) must equal to len(pv)\"", ")", "total_correct", "=", "total_error", "=", "0", "sumv", "=", "sumy", "=", "sumvv", "=", "sumyy", "=", "sumvy", "=", "0", "for", "v", ",", "y", "in", "zip", "(", "pv", ",", "ty", ")", ":", "if", "y", "==", "v", ":", "total_correct", "+=", "1", "total_error", "+=", "(", "v", "-", "y", ")", "*", "(", "v", "-", "y", ")", "sumv", "+=", "v", "sumy", "+=", "y", "sumvv", "+=", "v", "*", "v", "sumyy", "+=", "y", "*", "y", "sumvy", "+=", "v", "*", "y", "l", "=", "len", "(", "ty", ")", "ACC", "=", "100.0", "*", "total_correct", "/", "l", "MSE", "=", "total_error", "/", "l", "try", ":", "SCC", "=", "(", "(", "l", "*", "sumvy", "-", "sumv", "*", "sumy", ")", "*", "(", "l", "*", "sumvy", "-", "sumv", "*", "sumy", ")", ")", "/", "(", "(", "l", "*", "sumvv", "-", "sumv", "*", "sumv", ")", "*", "(", "l", "*", "sumyy", "-", "sumy", "*", "sumy", ")", ")", "except", ":", "SCC", "=", "float", "(", "'nan'", ")", "return", "(", "ACC", ",", "MSE", ",", "SCC", ")" ]
https://github.com/luohenyueji/OpenCV-Practical-Exercise/blob/8a747b05b5c56d5be4611c5e72c8983f6b96d81d/ITEM 30-39/ITEM37 OpenCV_BRISQUE/libsvm/python/svmutil.py#L57-L84
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pkg_resources/__init__.py
python
load_entry_point
(dist, group, name)
return get_distribution(dist).load_entry_point(group, name)
Return `name` entry point of `group` for `dist` or raise ImportError
Return `name` entry point of `group` for `dist` or raise ImportError
[ "Return", "name", "entry", "point", "of", "group", "for", "dist", "or", "raise", "ImportError" ]
def load_entry_point(dist, group, name): """Return `name` entry point of `group` for `dist` or raise ImportError""" return get_distribution(dist).load_entry_point(group, name)
[ "def", "load_entry_point", "(", "dist", ",", "group", ",", "name", ")", ":", "return", "get_distribution", "(", "dist", ")", ".", "load_entry_point", "(", "group", ",", "name", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pkg_resources/__init__.py#L488-L490
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/python/debug/cli/stepper_cli.py
python
NodeStepperCLI.list_sorted_nodes
(self, args, screen_info=None)
return output
List the sorted transitive closure of the stepper's fetches.
List the sorted transitive closure of the stepper's fetches.
[ "List", "the", "sorted", "transitive", "closure", "of", "the", "stepper", "s", "fetches", "." ]
def list_sorted_nodes(self, args, screen_info=None): """List the sorted transitive closure of the stepper's fetches.""" # TODO(cais): Use pattern such as del args, del screen_info python/debug. _ = args _ = screen_info parsed = self.arg_parsers["list_sorted_nodes"].parse_args(args) if parsed.lower_bound != -1 and parsed.upper_bound != -1: index_range = [ max(0, parsed.lower_bound), min(len(self._sorted_nodes), parsed.upper_bound) ] verbose = False else: index_range = [0, len(self._sorted_nodes)] verbose = True handle_node_names = self._node_stepper.handle_node_names() intermediate_tensor_names = self._node_stepper.intermediate_tensor_names() override_names = self._node_stepper.override_names() dirty_variable_names = [ dirty_variable.split(":")[0] for dirty_variable in self._node_stepper.dirty_variables() ] lines = [] if verbose: lines.extend( ["Topologically-sorted transitive input(s) and fetch(es):", ""]) for i, element_name in enumerate(self._sorted_nodes): if i < index_range[0] or i >= index_range[1]: continue # TODO(cais): Use fixed-width text to show node index. if i == self._next: node_prefix = RL(" ") + RL(self.NEXT_NODE_POINTER_STR, "bold") else: node_prefix = RL(" ") node_prefix += "(%d / %d)" % (i + 1, len(self._sorted_nodes)) + " [" node_prefix += self._get_status_labels( element_name, handle_node_names, intermediate_tensor_names, override_names, dirty_variable_names) lines.append(node_prefix + "] " + element_name) output = debugger_cli_common.rich_text_lines_from_rich_line_list(lines) if verbose: output.extend(self._node_status_label_legend()) return output
[ "def", "list_sorted_nodes", "(", "self", ",", "args", ",", "screen_info", "=", "None", ")", ":", "# TODO(cais): Use pattern such as del args, del screen_info python/debug.", "_", "=", "args", "_", "=", "screen_info", "parsed", "=", "self", ".", "arg_parsers", "[", "\"list_sorted_nodes\"", "]", ".", "parse_args", "(", "args", ")", "if", "parsed", ".", "lower_bound", "!=", "-", "1", "and", "parsed", ".", "upper_bound", "!=", "-", "1", ":", "index_range", "=", "[", "max", "(", "0", ",", "parsed", ".", "lower_bound", ")", ",", "min", "(", "len", "(", "self", ".", "_sorted_nodes", ")", ",", "parsed", ".", "upper_bound", ")", "]", "verbose", "=", "False", "else", ":", "index_range", "=", "[", "0", ",", "len", "(", "self", ".", "_sorted_nodes", ")", "]", "verbose", "=", "True", "handle_node_names", "=", "self", ".", "_node_stepper", ".", "handle_node_names", "(", ")", "intermediate_tensor_names", "=", "self", ".", "_node_stepper", ".", "intermediate_tensor_names", "(", ")", "override_names", "=", "self", ".", "_node_stepper", ".", "override_names", "(", ")", "dirty_variable_names", "=", "[", "dirty_variable", ".", "split", "(", "\":\"", ")", "[", "0", "]", "for", "dirty_variable", "in", "self", ".", "_node_stepper", ".", "dirty_variables", "(", ")", "]", "lines", "=", "[", "]", "if", "verbose", ":", "lines", ".", "extend", "(", "[", "\"Topologically-sorted transitive input(s) and fetch(es):\"", ",", "\"\"", "]", ")", "for", "i", ",", "element_name", "in", "enumerate", "(", "self", ".", "_sorted_nodes", ")", ":", "if", "i", "<", "index_range", "[", "0", "]", "or", "i", ">=", "index_range", "[", "1", "]", ":", "continue", "# TODO(cais): Use fixed-width text to show node index.", "if", "i", "==", "self", ".", "_next", ":", "node_prefix", "=", "RL", "(", "\" \"", ")", "+", "RL", "(", "self", ".", "NEXT_NODE_POINTER_STR", ",", "\"bold\"", ")", "else", ":", "node_prefix", "=", "RL", "(", "\" \"", ")", "node_prefix", "+=", "\"(%d / %d)\"", "%", "(", "i", "+", "1", ",", "len", "(", "self", ".", "_sorted_nodes", ")", ")", "+", "\" [\"", "node_prefix", "+=", "self", ".", "_get_status_labels", "(", "element_name", ",", "handle_node_names", ",", "intermediate_tensor_names", ",", "override_names", ",", "dirty_variable_names", ")", "lines", ".", "append", "(", "node_prefix", "+", "\"] \"", "+", "element_name", ")", "output", "=", "debugger_cli_common", ".", "rich_text_lines_from_rich_line_list", "(", "lines", ")", "if", "verbose", ":", "output", ".", "extend", "(", "self", ".", "_node_status_label_legend", "(", ")", ")", "return", "output" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/debug/cli/stepper_cli.py#L222-L279
psnonis/FinBERT
c0c555d833a14e2316a3701e59c0b5156f804b4e
bert/modeling.py
python
BertConfig.from_json_file
(cls, json_file)
return cls.from_dict(json.loads(text))
Constructs a `BertConfig` from a json file of parameters.
Constructs a `BertConfig` from a json file of parameters.
[ "Constructs", "a", "BertConfig", "from", "a", "json", "file", "of", "parameters", "." ]
def from_json_file(cls, json_file): """Constructs a `BertConfig` from a json file of parameters.""" with tf.gfile.GFile(json_file, "r") as reader: text = reader.read() return cls.from_dict(json.loads(text))
[ "def", "from_json_file", "(", "cls", ",", "json_file", ")", ":", "with", "tf", ".", "gfile", ".", "GFile", "(", "json_file", ",", "\"r\"", ")", "as", "reader", ":", "text", "=", "reader", ".", "read", "(", ")", "return", "cls", ".", "from_dict", "(", "json", ".", "loads", "(", "text", ")", ")" ]
https://github.com/psnonis/FinBERT/blob/c0c555d833a14e2316a3701e59c0b5156f804b4e/bert/modeling.py#L91-L95
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scipy/scipy/special/basic.py
python
pbdn_seq
(n, z)
return cpb[:n1+1], cpd[:n1+1]
Parabolic cylinder functions Dn(z) and derivatives. Parameters ---------- n : int Order of the parabolic cylinder function z : complex Value at which to evaluate the function and derivatives Returns ------- dv : ndarray Values of D_i(z), for i=0, ..., i=n. dp : ndarray Derivatives D_i'(z), for i=0, ..., i=n. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 13. http://jin.ece.illinois.edu/specfunc.html
Parabolic cylinder functions Dn(z) and derivatives.
[ "Parabolic", "cylinder", "functions", "Dn", "(", "z", ")", "and", "derivatives", "." ]
def pbdn_seq(n, z): """Parabolic cylinder functions Dn(z) and derivatives. Parameters ---------- n : int Order of the parabolic cylinder function z : complex Value at which to evaluate the function and derivatives Returns ------- dv : ndarray Values of D_i(z), for i=0, ..., i=n. dp : ndarray Derivatives D_i'(z), for i=0, ..., i=n. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 13. http://jin.ece.illinois.edu/specfunc.html """ if not (isscalar(n) and isscalar(z)): raise ValueError("arguments must be scalars.") if (floor(n) != n): raise ValueError("n must be an integer.") if (abs(n) <= 1): n1 = 1 else: n1 = n cpb, cpd = specfun.cpbdn(n1, z) return cpb[:n1+1], cpd[:n1+1]
[ "def", "pbdn_seq", "(", "n", ",", "z", ")", ":", "if", "not", "(", "isscalar", "(", "n", ")", "and", "isscalar", "(", "z", ")", ")", ":", "raise", "ValueError", "(", "\"arguments must be scalars.\"", ")", "if", "(", "floor", "(", "n", ")", "!=", "n", ")", ":", "raise", "ValueError", "(", "\"n must be an integer.\"", ")", "if", "(", "abs", "(", "n", ")", "<=", "1", ")", ":", "n1", "=", "1", "else", ":", "n1", "=", "n", "cpb", ",", "cpd", "=", "specfun", ".", "cpbdn", "(", "n1", ",", "z", ")", "return", "cpb", "[", ":", "n1", "+", "1", "]", ",", "cpd", "[", ":", "n1", "+", "1", "]" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/scipy/special/basic.py#L1848-L1881
krishauser/Klampt
972cc83ea5befac3f653c1ba20f80155768ad519
Python/klampt/src/robotsim.py
python
Simulator.setSimStep
(self, dt: "double")
return _robotsim.Simulator_setSimStep(self, dt)
r""" setSimStep(Simulator self, double dt) Sets the internal simulation substep. Values < 0.01 are recommended.
r""" setSimStep(Simulator self, double dt)
[ "r", "setSimStep", "(", "Simulator", "self", "double", "dt", ")" ]
def setSimStep(self, dt: "double") -> "void": r""" setSimStep(Simulator self, double dt) Sets the internal simulation substep. Values < 0.01 are recommended. """ return _robotsim.Simulator_setSimStep(self, dt)
[ "def", "setSimStep", "(", "self", ",", "dt", ":", "\"double\"", ")", "->", "\"void\"", ":", "return", "_robotsim", ".", "Simulator_setSimStep", "(", "self", ",", "dt", ")" ]
https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/klampt/src/robotsim.py#L8531-L8539