nwo
stringlengths 5
86
| sha
stringlengths 40
40
| path
stringlengths 4
189
| language
stringclasses 1
value | identifier
stringlengths 1
94
| parameters
stringlengths 2
4.03k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
11.5k
| docstring
stringlengths 1
33.2k
| docstring_summary
stringlengths 0
5.15k
| docstring_tokens
sequence | function
stringlengths 34
151k
| function_tokens
sequence | url
stringlengths 90
278
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
openmm/openmm | cb293447c4fc8b03976dfe11399f107bab70f3d9 | wrappers/python/openmm/unit/baseunit.py | python | BaseUnit.iter_base_dimensions | (self) | Returns a dictionary of BaseDimension:exponent pairs, describing the dimension of this unit. | Returns a dictionary of BaseDimension:exponent pairs, describing the dimension of this unit. | [
"Returns",
"a",
"dictionary",
"of",
"BaseDimension",
":",
"exponent",
"pairs",
"describing",
"the",
"dimension",
"of",
"this",
"unit",
"."
] | def iter_base_dimensions(self):
"""
Returns a dictionary of BaseDimension:exponent pairs, describing the dimension of this unit.
"""
yield (self.dimension, 1) | [
"def",
"iter_base_dimensions",
"(",
"self",
")",
":",
"yield",
"(",
"self",
".",
"dimension",
",",
"1",
")"
] | https://github.com/openmm/openmm/blob/cb293447c4fc8b03976dfe11399f107bab70f3d9/wrappers/python/openmm/unit/baseunit.py#L78-L82 |
||
CRYTEK/CRYENGINE | 232227c59a220cbbd311576f0fbeba7bb53b2a8c | Code/Tools/waf-1.7.13/waflib/Task.py | python | TaskBase.__hash__ | (self) | return id(self) | Very fast hashing scheme but not persistent (replace/implement in subclasses and see :py:meth:`waflib.Task.Task.uid`) | Very fast hashing scheme but not persistent (replace/implement in subclasses and see :py:meth:`waflib.Task.Task.uid`) | [
"Very",
"fast",
"hashing",
"scheme",
"but",
"not",
"persistent",
"(",
"replace",
"/",
"implement",
"in",
"subclasses",
"and",
"see",
":",
"py",
":",
"meth",
":",
"waflib",
".",
"Task",
".",
"Task",
".",
"uid",
")"
] | def __hash__(self):
"Very fast hashing scheme but not persistent (replace/implement in subclasses and see :py:meth:`waflib.Task.Task.uid`)"
return id(self) | [
"def",
"__hash__",
"(",
"self",
")",
":",
"return",
"id",
"(",
"self",
")"
] | https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Code/Tools/waf-1.7.13/waflib/Task.py#L193-L195 |
|
mbbill/JSC.js | fc2a917e8a83424fdbd008a8c9202383312855c6 | Source/JavaScriptCore/disassembler/udis86/ud_opcode.py | python | UdOpcodeTable.entryAt | (self, index) | Returns the entry at a given index of the table,
None if there is none. Raises an exception if the
index is out of bounds. | Returns the entry at a given index of the table,
None if there is none. Raises an exception if the
index is out of bounds. | [
"Returns",
"the",
"entry",
"at",
"a",
"given",
"index",
"of",
"the",
"table",
"None",
"if",
"there",
"is",
"none",
".",
"Raises",
"an",
"exception",
"if",
"the",
"index",
"is",
"out",
"of",
"bounds",
"."
] | def entryAt(self, index):
"""Returns the entry at a given index of the table,
None if there is none. Raises an exception if the
index is out of bounds.
"""
if index < self.size():
return self._entries.get(index, None)
raise self.IndexError("index out of bounds: %s" % index) | [
"def",
"entryAt",
"(",
"self",
",",
"index",
")",
":",
"if",
"index",
"<",
"self",
".",
"size",
"(",
")",
":",
"return",
"self",
".",
"_entries",
".",
"get",
"(",
"index",
",",
"None",
")",
"raise",
"self",
".",
"IndexError",
"(",
"\"index out of bounds: %s\"",
"%",
"index",
")"
] | https://github.com/mbbill/JSC.js/blob/fc2a917e8a83424fdbd008a8c9202383312855c6/Source/JavaScriptCore/disassembler/udis86/ud_opcode.py#L215-L222 |
||
mongodb/mongo | d8ff665343ad29cf286ee2cf4a1960d29371937b | buildscripts/resmokelib/plugin.py | python | PluginInterface.add_subcommand | (self, subparsers) | Add parser options for this plugin.
:param subparsers: argparse subparsers | Add parser options for this plugin. | [
"Add",
"parser",
"options",
"for",
"this",
"plugin",
"."
] | def add_subcommand(self, subparsers):
"""
Add parser options for this plugin.
:param subparsers: argparse subparsers
"""
raise NotImplementedError() | [
"def",
"add_subcommand",
"(",
"self",
",",
"subparsers",
")",
":",
"raise",
"NotImplementedError",
"(",
")"
] | https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/buildscripts/resmokelib/plugin.py#L17-L23 |
||
rapidsai/cudf | d5b2448fc69f17509304d594f029d0df56984962 | python/cudf/cudf/core/reshape.py | python | _get_unique | (column, dummy_na) | return unique | Returns unique values in a column, if
dummy_na is False, nan's are also dropped. | Returns unique values in a column, if
dummy_na is False, nan's are also dropped. | [
"Returns",
"unique",
"values",
"in",
"a",
"column",
"if",
"dummy_na",
"is",
"False",
"nan",
"s",
"are",
"also",
"dropped",
"."
] | def _get_unique(column, dummy_na):
"""
Returns unique values in a column, if
dummy_na is False, nan's are also dropped.
"""
if isinstance(column, cudf.core.column.CategoricalColumn):
unique = column.categories
else:
unique = column.unique()
if not dummy_na:
if np.issubdtype(unique.dtype, np.floating):
unique = unique.nans_to_nulls()
unique = unique.dropna()
return unique | [
"def",
"_get_unique",
"(",
"column",
",",
"dummy_na",
")",
":",
"if",
"isinstance",
"(",
"column",
",",
"cudf",
".",
"core",
".",
"column",
".",
"CategoricalColumn",
")",
":",
"unique",
"=",
"column",
".",
"categories",
"else",
":",
"unique",
"=",
"column",
".",
"unique",
"(",
")",
"if",
"not",
"dummy_na",
":",
"if",
"np",
".",
"issubdtype",
"(",
"unique",
".",
"dtype",
",",
"np",
".",
"floating",
")",
":",
"unique",
"=",
"unique",
".",
"nans_to_nulls",
"(",
")",
"unique",
"=",
"unique",
".",
"dropna",
"(",
")",
"return",
"unique"
] | https://github.com/rapidsai/cudf/blob/d5b2448fc69f17509304d594f029d0df56984962/python/cudf/cudf/core/reshape.py#L1074-L1087 |
|
intel/llvm | e6d0547e9d99b5a56430c4749f6c7e328bf221ab | lldb/third_party/Python/module/pexpect-4.6/pexpect/expect.py | python | searcher_re.__str__ | (self) | return '\n'.join(ss) | This returns a human-readable string that represents the state of
the object. | This returns a human-readable string that represents the state of
the object. | [
"This",
"returns",
"a",
"human",
"-",
"readable",
"string",
"that",
"represents",
"the",
"state",
"of",
"the",
"object",
"."
] | def __str__(self):
'''This returns a human-readable string that represents the state of
the object.'''
#ss = [(n, ' %d: re.compile("%s")' %
# (n, repr(s.pattern))) for n, s in self._searches]
ss = list()
for n, s in self._searches:
ss.append((n, ' %d: re.compile(%r)' % (n, s.pattern)))
ss.append((-1, 'searcher_re:'))
if self.eof_index >= 0:
ss.append((self.eof_index, ' %d: EOF' % self.eof_index))
if self.timeout_index >= 0:
ss.append((self.timeout_index, ' %d: TIMEOUT' %
self.timeout_index))
ss.sort()
ss = list(zip(*ss))[1]
return '\n'.join(ss) | [
"def",
"__str__",
"(",
"self",
")",
":",
"#ss = [(n, ' %d: re.compile(\"%s\")' %",
"# (n, repr(s.pattern))) for n, s in self._searches]",
"ss",
"=",
"list",
"(",
")",
"for",
"n",
",",
"s",
"in",
"self",
".",
"_searches",
":",
"ss",
".",
"append",
"(",
"(",
"n",
",",
"' %d: re.compile(%r)'",
"%",
"(",
"n",
",",
"s",
".",
"pattern",
")",
")",
")",
"ss",
".",
"append",
"(",
"(",
"-",
"1",
",",
"'searcher_re:'",
")",
")",
"if",
"self",
".",
"eof_index",
">=",
"0",
":",
"ss",
".",
"append",
"(",
"(",
"self",
".",
"eof_index",
",",
"' %d: EOF'",
"%",
"self",
".",
"eof_index",
")",
")",
"if",
"self",
".",
"timeout_index",
">=",
"0",
":",
"ss",
".",
"append",
"(",
"(",
"self",
".",
"timeout_index",
",",
"' %d: TIMEOUT'",
"%",
"self",
".",
"timeout_index",
")",
")",
"ss",
".",
"sort",
"(",
")",
"ss",
"=",
"list",
"(",
"zip",
"(",
"*",
"ss",
")",
")",
"[",
"1",
"]",
"return",
"'\\n'",
".",
"join",
"(",
"ss",
")"
] | https://github.com/intel/llvm/blob/e6d0547e9d99b5a56430c4749f6c7e328bf221ab/lldb/third_party/Python/module/pexpect-4.6/pexpect/expect.py#L256-L273 |
|
SequoiaDB/SequoiaDB | 2894ed7e5bd6fe57330afc900cf76d0ff0df9f64 | tools/server/php_linux/libxml2/lib/python2.4/site-packages/libxml2.py | python | xmlNode.xpointerNewLocationSetNodes | (self, end) | return xpathObjectRet(ret) | Create a new xmlXPathObjectPtr of type LocationSet and
initialize it with the single range made of the two nodes
@start and @end | Create a new xmlXPathObjectPtr of type LocationSet and
initialize it with the single range made of the two nodes | [
"Create",
"a",
"new",
"xmlXPathObjectPtr",
"of",
"type",
"LocationSet",
"and",
"initialize",
"it",
"with",
"the",
"single",
"range",
"made",
"of",
"the",
"two",
"nodes"
] | def xpointerNewLocationSetNodes(self, end):
"""Create a new xmlXPathObjectPtr of type LocationSet and
initialize it with the single range made of the two nodes
@start and @end """
if end is None: end__o = None
else: end__o = end._o
ret = libxml2mod.xmlXPtrNewLocationSetNodes(self._o, end__o)
if ret is None:raise treeError('xmlXPtrNewLocationSetNodes() failed')
return xpathObjectRet(ret) | [
"def",
"xpointerNewLocationSetNodes",
"(",
"self",
",",
"end",
")",
":",
"if",
"end",
"is",
"None",
":",
"end__o",
"=",
"None",
"else",
":",
"end__o",
"=",
"end",
".",
"_o",
"ret",
"=",
"libxml2mod",
".",
"xmlXPtrNewLocationSetNodes",
"(",
"self",
".",
"_o",
",",
"end__o",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"treeError",
"(",
"'xmlXPtrNewLocationSetNodes() failed'",
")",
"return",
"xpathObjectRet",
"(",
"ret",
")"
] | https://github.com/SequoiaDB/SequoiaDB/blob/2894ed7e5bd6fe57330afc900cf76d0ff0df9f64/tools/server/php_linux/libxml2/lib/python2.4/site-packages/libxml2.py#L3876-L3884 |
|
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | tools/android/loading/cloud/backend/multiprocessing_helper.py | python | _MultiprocessingWrapper | (queue, memory_share, function, args) | Helper function that sets a memory limit on the current process, then
calls |function| on |args| and writes the results to |queue|.
Args:
queue: (multiprocessing.Queue) Queue where the results of the wrapped
function are written.
memory_share: (float) Share coefficient of the total physical memory that
the process can use.
function: The wrapped function.
args: (list) Arguments for the wrapped function. | Helper function that sets a memory limit on the current process, then
calls |function| on |args| and writes the results to |queue|. | [
"Helper",
"function",
"that",
"sets",
"a",
"memory",
"limit",
"on",
"the",
"current",
"process",
"then",
"calls",
"|function|",
"on",
"|args|",
"and",
"writes",
"the",
"results",
"to",
"|queue|",
"."
] | def _MultiprocessingWrapper(queue, memory_share, function, args):
"""Helper function that sets a memory limit on the current process, then
calls |function| on |args| and writes the results to |queue|.
Args:
queue: (multiprocessing.Queue) Queue where the results of the wrapped
function are written.
memory_share: (float) Share coefficient of the total physical memory that
the process can use.
function: The wrapped function.
args: (list) Arguments for the wrapped function.
"""
try:
if memory_share:
_LimitMemory(memory_share)
queue.put(function(*args))
except Exception:
queue.put(None) | [
"def",
"_MultiprocessingWrapper",
"(",
"queue",
",",
"memory_share",
",",
"function",
",",
"args",
")",
":",
"try",
":",
"if",
"memory_share",
":",
"_LimitMemory",
"(",
"memory_share",
")",
"queue",
".",
"put",
"(",
"function",
"(",
"*",
"args",
")",
")",
"except",
"Exception",
":",
"queue",
".",
"put",
"(",
"None",
")"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/tools/android/loading/cloud/backend/multiprocessing_helper.py#L26-L44 |
||
OpenChemistry/tomviz | 0a903679318f191cb7dd3eb5ff5bc3a7d3320d9a | acquisition/tomviz/acquisition/vendors/passive/__init__.py | python | PassiveWatchSource.acquisition_params | (self, **params) | Update and fetch the acquisition parameters.
:param params: The acquisition parameters.
:type params: dict
:returns: The current acquisition parameters | Update and fetch the acquisition parameters.
:param params: The acquisition parameters.
:type params: dict
:returns: The current acquisition parameters | [
"Update",
"and",
"fetch",
"the",
"acquisition",
"parameters",
".",
":",
"param",
"params",
":",
"The",
"acquisition",
"parameters",
".",
":",
"type",
"params",
":",
"dict",
":",
"returns",
":",
"The",
"current",
"acquisition",
"parameters"
] | def acquisition_params(self, **params):
"""
Update and fetch the acquisition parameters.
:param params: The acquisition parameters.
:type params: dict
:returns: The current acquisition parameters
"""
pass | [
"def",
"acquisition_params",
"(",
"self",
",",
"*",
"*",
"params",
")",
":",
"pass"
] | https://github.com/OpenChemistry/tomviz/blob/0a903679318f191cb7dd3eb5ff5bc3a7d3320d9a/acquisition/tomviz/acquisition/vendors/passive/__init__.py#L214-L221 |
||
SequoiaDB/SequoiaDB | 2894ed7e5bd6fe57330afc900cf76d0ff0df9f64 | tools/server/php_linux/libxml2/lib/python2.4/site-packages/libxml2.py | python | uCSIsMongolian | (code) | return ret | Check whether the character is part of Mongolian UCS Block | Check whether the character is part of Mongolian UCS Block | [
"Check",
"whether",
"the",
"character",
"is",
"part",
"of",
"Mongolian",
"UCS",
"Block"
] | def uCSIsMongolian(code):
"""Check whether the character is part of Mongolian UCS Block """
ret = libxml2mod.xmlUCSIsMongolian(code)
return ret | [
"def",
"uCSIsMongolian",
"(",
"code",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlUCSIsMongolian",
"(",
"code",
")",
"return",
"ret"
] | https://github.com/SequoiaDB/SequoiaDB/blob/2894ed7e5bd6fe57330afc900cf76d0ff0df9f64/tools/server/php_linux/libxml2/lib/python2.4/site-packages/libxml2.py#L2720-L2723 |
|
xiaolonw/caffe-video_triplet | c39ea1ad6e937ccf7deba4510b7e555165abf05f | scripts/cpp_lint.py | python | ProcessFile | (filename, vlevel, extra_check_functions=[]) | Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error | Does google-lint on a single file. | [
"Does",
"google",
"-",
"lint",
"on",
"a",
"single",
"file",
"."
] | def ProcessFile(filename, vlevel, extra_check_functions=[]):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below. If it is not expected to be present (i.e. os.linesep !=
# '\r\n' as in Windows), a warning is issued below if this file
# is processed.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
carriage_return_found = False
# Remove trailing '\r'.
for linenum in range(len(lines)):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
carriage_return_found = True
except IOError:
sys.stderr.write(
"Skipping input '%s': Can't open for reading\n" % filename)
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if filename != '-' and file_extension not in _valid_extensions:
sys.stderr.write('Ignoring %s; not a valid file name '
'(%s)\n' % (filename, ', '.join(_valid_extensions)))
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
if carriage_return_found and os.linesep != '\r\n':
# Use 0 for linenum since outputting only one error for potentially
# several lines.
Error(filename, 0, 'whitespace/newline', 1,
'One or more unexpected \\r (^M) found;'
'better to use only a \\n')
sys.stderr.write('Done processing %s\n' % filename) | [
"def",
"ProcessFile",
"(",
"filename",
",",
"vlevel",
",",
"extra_check_functions",
"=",
"[",
"]",
")",
":",
"_SetVerboseLevel",
"(",
"vlevel",
")",
"try",
":",
"# Support the UNIX convention of using \"-\" for stdin. Note that",
"# we are not opening the file with universal newline support",
"# (which codecs doesn't support anyway), so the resulting lines do",
"# contain trailing '\\r' characters if we are reading a file that",
"# has CRLF endings.",
"# If after the split a trailing '\\r' is present, it is removed",
"# below. If it is not expected to be present (i.e. os.linesep !=",
"# '\\r\\n' as in Windows), a warning is issued below if this file",
"# is processed.",
"if",
"filename",
"==",
"'-'",
":",
"lines",
"=",
"codecs",
".",
"StreamReaderWriter",
"(",
"sys",
".",
"stdin",
",",
"codecs",
".",
"getreader",
"(",
"'utf8'",
")",
",",
"codecs",
".",
"getwriter",
"(",
"'utf8'",
")",
",",
"'replace'",
")",
".",
"read",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
"else",
":",
"lines",
"=",
"codecs",
".",
"open",
"(",
"filename",
",",
"'r'",
",",
"'utf8'",
",",
"'replace'",
")",
".",
"read",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
"carriage_return_found",
"=",
"False",
"# Remove trailing '\\r'.",
"for",
"linenum",
"in",
"range",
"(",
"len",
"(",
"lines",
")",
")",
":",
"if",
"lines",
"[",
"linenum",
"]",
".",
"endswith",
"(",
"'\\r'",
")",
":",
"lines",
"[",
"linenum",
"]",
"=",
"lines",
"[",
"linenum",
"]",
".",
"rstrip",
"(",
"'\\r'",
")",
"carriage_return_found",
"=",
"True",
"except",
"IOError",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"Skipping input '%s': Can't open for reading\\n\"",
"%",
"filename",
")",
"return",
"# Note, if no dot is found, this will give the entire filename as the ext.",
"file_extension",
"=",
"filename",
"[",
"filename",
".",
"rfind",
"(",
"'.'",
")",
"+",
"1",
":",
"]",
"# When reading from stdin, the extension is unknown, so no cpplint tests",
"# should rely on the extension.",
"if",
"filename",
"!=",
"'-'",
"and",
"file_extension",
"not",
"in",
"_valid_extensions",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Ignoring %s; not a valid file name '",
"'(%s)\\n'",
"%",
"(",
"filename",
",",
"', '",
".",
"join",
"(",
"_valid_extensions",
")",
")",
")",
"else",
":",
"ProcessFileData",
"(",
"filename",
",",
"file_extension",
",",
"lines",
",",
"Error",
",",
"extra_check_functions",
")",
"if",
"carriage_return_found",
"and",
"os",
".",
"linesep",
"!=",
"'\\r\\n'",
":",
"# Use 0 for linenum since outputting only one error for potentially",
"# several lines.",
"Error",
"(",
"filename",
",",
"0",
",",
"'whitespace/newline'",
",",
"1",
",",
"'One or more unexpected \\\\r (^M) found;'",
"'better to use only a \\\\n'",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Done processing %s\\n'",
"%",
"filename",
")"
] | https://github.com/xiaolonw/caffe-video_triplet/blob/c39ea1ad6e937ccf7deba4510b7e555165abf05f/scripts/cpp_lint.py#L4689-L4754 |
||
oracle/graaljs | 36a56e8e993d45fc40939a3a4d9c0c24990720f1 | graal-nodejs/deps/v8/tools/clusterfuzz/v8_foozzie.py | python | content_bailout | (content, ignore_fun) | Print failure state and return if ignore_fun matches content. | Print failure state and return if ignore_fun matches content. | [
"Print",
"failure",
"state",
"and",
"return",
"if",
"ignore_fun",
"matches",
"content",
"."
] | def content_bailout(content, ignore_fun):
"""Print failure state and return if ignore_fun matches content."""
bug = (ignore_fun(content) or '').strip()
if bug:
raise FailException(FAILURE_HEADER_TEMPLATE % dict(
configs='', source_key='', suppression=bug)) | [
"def",
"content_bailout",
"(",
"content",
",",
"ignore_fun",
")",
":",
"bug",
"=",
"(",
"ignore_fun",
"(",
"content",
")",
"or",
"''",
")",
".",
"strip",
"(",
")",
"if",
"bug",
":",
"raise",
"FailException",
"(",
"FAILURE_HEADER_TEMPLATE",
"%",
"dict",
"(",
"configs",
"=",
"''",
",",
"source_key",
"=",
"''",
",",
"suppression",
"=",
"bug",
")",
")"
] | https://github.com/oracle/graaljs/blob/36a56e8e993d45fc40939a3a4d9c0c24990720f1/graal-nodejs/deps/v8/tools/clusterfuzz/v8_foozzie.py#L316-L321 |
||
FreeCAD/FreeCAD | ba42231b9c6889b89e064d6d563448ed81e376ec | src/Mod/Draft/draftguitools/gui_mirror.py | python | Mirror.numericInput | (self, numx, numy, numz) | Validate the entry fields in the user interface.
This function is called by the toolbar or taskpanel interface
when valid x, y, and z have been entered in the input fields. | Validate the entry fields in the user interface. | [
"Validate",
"the",
"entry",
"fields",
"in",
"the",
"user",
"interface",
"."
] | def numericInput(self, numx, numy, numz):
"""Validate the entry fields in the user interface.
This function is called by the toolbar or taskpanel interface
when valid x, y, and z have been entered in the input fields.
"""
self.point = App.Vector(numx, numy, numz)
if not self.node:
self.node.append(self.point)
if self.ghost:
self.ghost.on()
_msg(translate("draft", "Pick end point of mirror line"))
else:
last = self.node[-1]
if self.ui.isCopy.isChecked():
self.mirror(last, self.point, True)
else:
self.mirror(last, self.point)
self.finish() | [
"def",
"numericInput",
"(",
"self",
",",
"numx",
",",
"numy",
",",
"numz",
")",
":",
"self",
".",
"point",
"=",
"App",
".",
"Vector",
"(",
"numx",
",",
"numy",
",",
"numz",
")",
"if",
"not",
"self",
".",
"node",
":",
"self",
".",
"node",
".",
"append",
"(",
"self",
".",
"point",
")",
"if",
"self",
".",
"ghost",
":",
"self",
".",
"ghost",
".",
"on",
"(",
")",
"_msg",
"(",
"translate",
"(",
"\"draft\"",
",",
"\"Pick end point of mirror line\"",
")",
")",
"else",
":",
"last",
"=",
"self",
".",
"node",
"[",
"-",
"1",
"]",
"if",
"self",
".",
"ui",
".",
"isCopy",
".",
"isChecked",
"(",
")",
":",
"self",
".",
"mirror",
"(",
"last",
",",
"self",
".",
"point",
",",
"True",
")",
"else",
":",
"self",
".",
"mirror",
"(",
"last",
",",
"self",
".",
"point",
")",
"self",
".",
"finish",
"(",
")"
] | https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Draft/draftguitools/gui_mirror.py#L192-L210 |
||
blakesmith/embedded | 61d02c8feed34eff75fe64f29cc8997f1c480867 | nome/hardware/KiBoM/KiBOM/netlist_reader.py | python | netlist.endDocument | (self) | Called when the netlist document has been fully parsed | Called when the netlist document has been fully parsed | [
"Called",
"when",
"the",
"netlist",
"document",
"has",
"been",
"fully",
"parsed"
] | def endDocument(self):
"""Called when the netlist document has been fully parsed"""
# When the document is complete, the library parts must be linked to
# the components as they are seperate in the tree so as not to
# duplicate library part information for every component
for c in self.components:
for p in self.libparts:
if p.getLibName() == c.getLibName():
if p.getPartName() == c.getPartName():
c.setLibPart(p)
break
else:
aliases = p.getAliases()
if aliases and self.aliasMatch( c.getPartName(), aliases ):
c.setLibPart(p)
break;
if not c.getLibPart():
print( 'missing libpart for ref:', c.getRef(), c.getPartName(), c.getLibName() ) | [
"def",
"endDocument",
"(",
"self",
")",
":",
"# When the document is complete, the library parts must be linked to",
"# the components as they are seperate in the tree so as not to",
"# duplicate library part information for every component",
"for",
"c",
"in",
"self",
".",
"components",
":",
"for",
"p",
"in",
"self",
".",
"libparts",
":",
"if",
"p",
".",
"getLibName",
"(",
")",
"==",
"c",
".",
"getLibName",
"(",
")",
":",
"if",
"p",
".",
"getPartName",
"(",
")",
"==",
"c",
".",
"getPartName",
"(",
")",
":",
"c",
".",
"setLibPart",
"(",
"p",
")",
"break",
"else",
":",
"aliases",
"=",
"p",
".",
"getAliases",
"(",
")",
"if",
"aliases",
"and",
"self",
".",
"aliasMatch",
"(",
"c",
".",
"getPartName",
"(",
")",
",",
"aliases",
")",
":",
"c",
".",
"setLibPart",
"(",
"p",
")",
"break",
"if",
"not",
"c",
".",
"getLibPart",
"(",
")",
":",
"print",
"(",
"'missing libpart for ref:'",
",",
"c",
".",
"getRef",
"(",
")",
",",
"c",
".",
"getPartName",
"(",
")",
",",
"c",
".",
"getLibName",
"(",
")",
")"
] | https://github.com/blakesmith/embedded/blob/61d02c8feed34eff75fe64f29cc8997f1c480867/nome/hardware/KiBoM/KiBOM/netlist_reader.py#L330-L348 |
||
kitao/pyxel | f58bd6fe84153219a1e5edc506ae9606614883dc | pyxel/examples/07_snake.py | python | Snake.update | (self) | Update logic of game.
Updates the snake and checks for scoring/win condition. | Update logic of game.
Updates the snake and checks for scoring/win condition. | [
"Update",
"logic",
"of",
"game",
".",
"Updates",
"the",
"snake",
"and",
"checks",
"for",
"scoring",
"/",
"win",
"condition",
"."
] | def update(self):
"""Update logic of game.
Updates the snake and checks for scoring/win condition."""
if not self.death:
self.update_direction()
self.update_snake()
self.check_death()
self.check_apple()
if pyxel.btn(pyxel.KEY_Q):
pyxel.quit()
if pyxel.btnp(pyxel.KEY_R):
self.reset() | [
"def",
"update",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"death",
":",
"self",
".",
"update_direction",
"(",
")",
"self",
".",
"update_snake",
"(",
")",
"self",
".",
"check_death",
"(",
")",
"self",
".",
"check_apple",
"(",
")",
"if",
"pyxel",
".",
"btn",
"(",
"pyxel",
".",
"KEY_Q",
")",
":",
"pyxel",
".",
"quit",
"(",
")",
"if",
"pyxel",
".",
"btnp",
"(",
"pyxel",
".",
"KEY_R",
")",
":",
"self",
".",
"reset",
"(",
")"
] | https://github.com/kitao/pyxel/blob/f58bd6fe84153219a1e5edc506ae9606614883dc/pyxel/examples/07_snake.py#L84-L98 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/setuptools/py3/pkg_resources/__init__.py | python | find_on_path | (importer, path_item, only=False) | Yield distributions accessible on a sys.path directory | Yield distributions accessible on a sys.path directory | [
"Yield",
"distributions",
"accessible",
"on",
"a",
"sys",
".",
"path",
"directory"
] | def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if _is_unpacked_egg(path_item):
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item, 'EGG-INFO')
)
)
return
entries = (
os.path.join(path_item, child)
for child in safe_listdir(path_item)
)
# for performance, before sorting by version,
# screen entries for only those that will yield
# distributions
filtered = (
entry
for entry in entries
if dist_factory(path_item, entry, only)
)
# scan for .egg and .egg-info in directory
path_item_entries = _by_version_descending(filtered)
for entry in path_item_entries:
fullpath = os.path.join(path_item, entry)
factory = dist_factory(path_item, entry, only)
for dist in factory(fullpath):
yield dist | [
"def",
"find_on_path",
"(",
"importer",
",",
"path_item",
",",
"only",
"=",
"False",
")",
":",
"path_item",
"=",
"_normalize_cached",
"(",
"path_item",
")",
"if",
"_is_unpacked_egg",
"(",
"path_item",
")",
":",
"yield",
"Distribution",
".",
"from_filename",
"(",
"path_item",
",",
"metadata",
"=",
"PathMetadata",
"(",
"path_item",
",",
"os",
".",
"path",
".",
"join",
"(",
"path_item",
",",
"'EGG-INFO'",
")",
")",
")",
"return",
"entries",
"=",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path_item",
",",
"child",
")",
"for",
"child",
"in",
"safe_listdir",
"(",
"path_item",
")",
")",
"# for performance, before sorting by version,",
"# screen entries for only those that will yield",
"# distributions",
"filtered",
"=",
"(",
"entry",
"for",
"entry",
"in",
"entries",
"if",
"dist_factory",
"(",
"path_item",
",",
"entry",
",",
"only",
")",
")",
"# scan for .egg and .egg-info in directory",
"path_item_entries",
"=",
"_by_version_descending",
"(",
"filtered",
")",
"for",
"entry",
"in",
"path_item_entries",
":",
"fullpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path_item",
",",
"entry",
")",
"factory",
"=",
"dist_factory",
"(",
"path_item",
",",
"entry",
",",
"only",
")",
"for",
"dist",
"in",
"factory",
"(",
"fullpath",
")",
":",
"yield",
"dist"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/setuptools/py3/pkg_resources/__init__.py#L2046-L2078 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/setuptools/msvc.py | python | EnvironmentInfo.return_env | (self, exists=True) | return env | Return environment dict.
Parameters
----------
exists: bool
It True, only return existing paths.
Return
------
dict
environment | Return environment dict. | [
"Return",
"environment",
"dict",
"."
] | def return_env(self, exists=True):
"""
Return environment dict.
Parameters
----------
exists: bool
It True, only return existing paths.
Return
------
dict
environment
"""
env = dict(
include=self._build_paths('include',
[self.VCIncludes,
self.OSIncludes,
self.UCRTIncludes,
self.NetFxSDKIncludes],
exists),
lib=self._build_paths('lib',
[self.VCLibraries,
self.OSLibraries,
self.FxTools,
self.UCRTLibraries,
self.NetFxSDKLibraries],
exists),
libpath=self._build_paths('libpath',
[self.VCLibraries,
self.FxTools,
self.VCStoreRefs,
self.OSLibpath],
exists),
path=self._build_paths('path',
[self.VCTools,
self.VSTools,
self.VsTDb,
self.SdkTools,
self.SdkSetup,
self.FxTools,
self.MSBuild,
self.HTMLHelpWorkshop,
self.FSharp],
exists),
)
if self.vs_ver >= 14 and isfile(self.VCRuntimeRedist):
env['py_vcruntime_redist'] = self.VCRuntimeRedist
return env | [
"def",
"return_env",
"(",
"self",
",",
"exists",
"=",
"True",
")",
":",
"env",
"=",
"dict",
"(",
"include",
"=",
"self",
".",
"_build_paths",
"(",
"'include'",
",",
"[",
"self",
".",
"VCIncludes",
",",
"self",
".",
"OSIncludes",
",",
"self",
".",
"UCRTIncludes",
",",
"self",
".",
"NetFxSDKIncludes",
"]",
",",
"exists",
")",
",",
"lib",
"=",
"self",
".",
"_build_paths",
"(",
"'lib'",
",",
"[",
"self",
".",
"VCLibraries",
",",
"self",
".",
"OSLibraries",
",",
"self",
".",
"FxTools",
",",
"self",
".",
"UCRTLibraries",
",",
"self",
".",
"NetFxSDKLibraries",
"]",
",",
"exists",
")",
",",
"libpath",
"=",
"self",
".",
"_build_paths",
"(",
"'libpath'",
",",
"[",
"self",
".",
"VCLibraries",
",",
"self",
".",
"FxTools",
",",
"self",
".",
"VCStoreRefs",
",",
"self",
".",
"OSLibpath",
"]",
",",
"exists",
")",
",",
"path",
"=",
"self",
".",
"_build_paths",
"(",
"'path'",
",",
"[",
"self",
".",
"VCTools",
",",
"self",
".",
"VSTools",
",",
"self",
".",
"VsTDb",
",",
"self",
".",
"SdkTools",
",",
"self",
".",
"SdkSetup",
",",
"self",
".",
"FxTools",
",",
"self",
".",
"MSBuild",
",",
"self",
".",
"HTMLHelpWorkshop",
",",
"self",
".",
"FSharp",
"]",
",",
"exists",
")",
",",
")",
"if",
"self",
".",
"vs_ver",
">=",
"14",
"and",
"isfile",
"(",
"self",
".",
"VCRuntimeRedist",
")",
":",
"env",
"[",
"'py_vcruntime_redist'",
"]",
"=",
"self",
".",
"VCRuntimeRedist",
"return",
"env"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/setuptools/msvc.py#L1720-L1768 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_gdi.py | python | DC.SetBackgroundMode | (*args, **kwargs) | return _gdi_.DC_SetBackgroundMode(*args, **kwargs) | SetBackgroundMode(self, int mode)
*mode* may be one of ``wx.SOLID`` and ``wx.TRANSPARENT``. This setting
determines whether text will be drawn with a background colour or
not. | SetBackgroundMode(self, int mode) | [
"SetBackgroundMode",
"(",
"self",
"int",
"mode",
")"
] | def SetBackgroundMode(*args, **kwargs):
"""
SetBackgroundMode(self, int mode)
*mode* may be one of ``wx.SOLID`` and ``wx.TRANSPARENT``. This setting
determines whether text will be drawn with a background colour or
not.
"""
return _gdi_.DC_SetBackgroundMode(*args, **kwargs) | [
"def",
"SetBackgroundMode",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_gdi_",
".",
"DC_SetBackgroundMode",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_gdi.py#L4046-L4054 |
|
hughperkins/tf-coriander | 970d3df6c11400ad68405f22b0c42a52374e94ca | tensorflow/python/framework/tensor_shape.py | python | Dimension.__mod__ | (self, other) | Returns `self` modulo `other.
Dimension moduli are computed as follows:
Dimension(m) % Dimension(n) == Dimension(m % n)
Dimension(m) % Dimension(None) == Dimension(None)
Dimension(None) % Dimension(n) == Dimension(None)
Dimension(None) % Dimension(None) == Dimension(None)
Args:
other: Another Dimension.
Returns:
A Dimension whose value is `self` modulo `other`. | Returns `self` modulo `other. | [
"Returns",
"self",
"modulo",
"other",
"."
] | def __mod__(self, other):
"""Returns `self` modulo `other.
Dimension moduli are computed as follows:
Dimension(m) % Dimension(n) == Dimension(m % n)
Dimension(m) % Dimension(None) == Dimension(None)
Dimension(None) % Dimension(n) == Dimension(None)
Dimension(None) % Dimension(None) == Dimension(None)
Args:
other: Another Dimension.
Returns:
A Dimension whose value is `self` modulo `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value % other.value) | [
"def",
"__mod__",
"(",
"self",
",",
"other",
")",
":",
"other",
"=",
"as_dimension",
"(",
"other",
")",
"if",
"self",
".",
"_value",
"is",
"None",
"or",
"other",
".",
"value",
"is",
"None",
":",
"return",
"Dimension",
"(",
"None",
")",
"else",
":",
"return",
"Dimension",
"(",
"self",
".",
"_value",
"%",
"other",
".",
"value",
")"
] | https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/python/framework/tensor_shape.py#L247-L267 |
||
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/compiler/pyassem.py | python | FlowGraph.getRoot | (self) | return self.entry | Return nodes appropriate for use with dominator | Return nodes appropriate for use with dominator | [
"Return",
"nodes",
"appropriate",
"for",
"use",
"with",
"dominator"
] | def getRoot(self):
"""Return nodes appropriate for use with dominator"""
return self.entry | [
"def",
"getRoot",
"(",
"self",
")",
":",
"return",
"self",
".",
"entry"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/compiler/pyassem.py#L87-L89 |
|
klzgrad/naiveproxy | ed2c513637c77b18721fe428d7ed395b4d284c83 | src/build/fuchsia/boot_data.py | python | ProvisionSSH | () | Generates a key pair and config file for SSH using the GN SDK. | Generates a key pair and config file for SSH using the GN SDK. | [
"Generates",
"a",
"key",
"pair",
"and",
"config",
"file",
"for",
"SSH",
"using",
"the",
"GN",
"SDK",
"."
] | def ProvisionSSH():
"""Generates a key pair and config file for SSH using the GN SDK."""
returncode, out, err = common.RunGnSdkFunction('fuchsia-common.sh',
'check-fuchsia-ssh-config')
if returncode != 0:
logging.error('Command exited with error code %d' % (returncode))
logging.error('Stdout: %s' % out)
logging.error('Stderr: %s' % err)
raise Exception('Failed to provision ssh keys') | [
"def",
"ProvisionSSH",
"(",
")",
":",
"returncode",
",",
"out",
",",
"err",
"=",
"common",
".",
"RunGnSdkFunction",
"(",
"'fuchsia-common.sh'",
",",
"'check-fuchsia-ssh-config'",
")",
"if",
"returncode",
"!=",
"0",
":",
"logging",
".",
"error",
"(",
"'Command exited with error code %d'",
"%",
"(",
"returncode",
")",
")",
"logging",
".",
"error",
"(",
"'Stdout: %s'",
"%",
"out",
")",
"logging",
".",
"error",
"(",
"'Stderr: %s'",
"%",
"err",
")",
"raise",
"Exception",
"(",
"'Failed to provision ssh keys'",
")"
] | https://github.com/klzgrad/naiveproxy/blob/ed2c513637c77b18721fe428d7ed395b4d284c83/src/build/fuchsia/boot_data.py#L50-L59 |
||
KhronosGroup/SPIR | f33c27876d9f3d5810162b60fa89cc13d2b55725 | tools/scan-view/ScanView.py | python | ScanViewRequestHandler.do_POST | (self) | Serve a POST request. | Serve a POST request. | [
"Serve",
"a",
"POST",
"request",
"."
] | def do_POST(self):
"""Serve a POST request."""
try:
length = self.headers.getheader('content-length') or "0"
try:
length = int(length)
except:
length = 0
content = self.rfile.read(length)
fields = parse_query(content)
f = self.send_head(fields)
if f:
self.copyfile(f, self.wfile)
f.close()
except Exception,e:
self.handle_exception(e) | [
"def",
"do_POST",
"(",
"self",
")",
":",
"try",
":",
"length",
"=",
"self",
".",
"headers",
".",
"getheader",
"(",
"'content-length'",
")",
"or",
"\"0\"",
"try",
":",
"length",
"=",
"int",
"(",
"length",
")",
"except",
":",
"length",
"=",
"0",
"content",
"=",
"self",
".",
"rfile",
".",
"read",
"(",
"length",
")",
"fields",
"=",
"parse_query",
"(",
"content",
")",
"f",
"=",
"self",
".",
"send_head",
"(",
"fields",
")",
"if",
"f",
":",
"self",
".",
"copyfile",
"(",
"f",
",",
"self",
".",
"wfile",
")",
"f",
".",
"close",
"(",
")",
"except",
"Exception",
",",
"e",
":",
"self",
".",
"handle_exception",
"(",
"e",
")"
] | https://github.com/KhronosGroup/SPIR/blob/f33c27876d9f3d5810162b60fa89cc13d2b55725/tools/scan-view/ScanView.py#L219-L234 |
||
emlid/Navio | 14b96c83ad57a10580655e3af49c9be1f5fc0ad2 | Python/navio/adafruit_ads1x15.py | python | ADS1x15.readADCDifferential | (self, chP=0, chN=1, pga=6144, sps=250) | Gets a differential ADC reading from channels chP and chN in mV. \
The sample rate for this mode (single-shot) can be used to lower the noise \
(low sps) or to lower the power consumption (high sps) by duty cycling, \
see data sheet page 14 for more info. \
The pga must be given in mV, see page 13 for the supported values. | Gets a differential ADC reading from channels chP and chN in mV. \
The sample rate for this mode (single-shot) can be used to lower the noise \
(low sps) or to lower the power consumption (high sps) by duty cycling, \
see data sheet page 14 for more info. \
The pga must be given in mV, see page 13 for the supported values. | [
"Gets",
"a",
"differential",
"ADC",
"reading",
"from",
"channels",
"chP",
"and",
"chN",
"in",
"mV",
".",
"\\",
"The",
"sample",
"rate",
"for",
"this",
"mode",
"(",
"single",
"-",
"shot",
")",
"can",
"be",
"used",
"to",
"lower",
"the",
"noise",
"\\",
"(",
"low",
"sps",
")",
"or",
"to",
"lower",
"the",
"power",
"consumption",
"(",
"high",
"sps",
")",
"by",
"duty",
"cycling",
"\\",
"see",
"data",
"sheet",
"page",
"14",
"for",
"more",
"info",
".",
"\\",
"The",
"pga",
"must",
"be",
"given",
"in",
"mV",
"see",
"page",
"13",
"for",
"the",
"supported",
"values",
"."
] | def readADCDifferential(self, chP=0, chN=1, pga=6144, sps=250):
"Gets a differential ADC reading from channels chP and chN in mV. \
The sample rate for this mode (single-shot) can be used to lower the noise \
(low sps) or to lower the power consumption (high sps) by duty cycling, \
see data sheet page 14 for more info. \
The pga must be given in mV, see page 13 for the supported values."
# Disable comparator, Non-latching, Alert/Rdy active low
# traditional comparator, single-shot mode
config = self.__ADS1015_REG_CONFIG_CQUE_NONE | \
self.__ADS1015_REG_CONFIG_CLAT_NONLAT | \
self.__ADS1015_REG_CONFIG_CPOL_ACTVLOW | \
self.__ADS1015_REG_CONFIG_CMODE_TRAD | \
self.__ADS1015_REG_CONFIG_MODE_SINGLE
# Set channels
if ( (chP == 0) & (chN == 1) ):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_0_1
elif ( (chP == 0) & (chN == 3) ):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_0_3
elif ( (chP == 2) & (chN == 3) ):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_2_3
elif ( (chP == 1) & (chN == 3) ):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_1_3
else:
if (self.debug):
print "ADS1x15: Invalid channels specified: %d, %d" % (chP, chN)
return -1
# Set sample per seconds, defaults to 250sps
# If sps is in the dictionary (defined in init()) it returns the value of the constant
# othewise it returns the value for 250sps. This saves a lot of if/elif/else code!
if (self.ic == self.__IC_ADS1015):
config |= self.spsADS1015.setdefault(sps, self.__ADS1015_REG_CONFIG_DR_1600SPS)
else:
if ( (sps not in self.spsADS1115) & self.debug):
print "ADS1x15: Invalid pga specified: %d, using 6144mV" % sps
config |= self.spsADS1115.setdefault(sps, self.__ADS1115_REG_CONFIG_DR_250SPS)
# Set PGA/voltage range, defaults to +-6.144V
if ( (pga not in self.pgaADS1x15) & self.debug):
print "ADS1x15: Invalid pga specified: %d, using 6144mV" % sps
config |= self.pgaADS1x15.setdefault(pga, self.__ADS1015_REG_CONFIG_PGA_6_144V)
self.pga = pga
# Set 'start single-conversion' bit
config |= self.__ADS1015_REG_CONFIG_OS_SINGLE
# Write config register to the ADC
bytes = [(config >> 8) & 0xFF, config & 0xFF]
self.i2c.writeList(self.__ADS1015_REG_POINTER_CONFIG, bytes)
# Wait for the ADC conversion to complete
# The minimum delay depends on the sps: delay >= 1/sps
# We add 0.1ms to be sure
delay = 1.0/sps+0.0001
time.sleep(delay)
# Read the conversion results
result = self.i2c.readList(self.__ADS1015_REG_POINTER_CONVERT, 2)
if (self.ic == self.__IC_ADS1015):
# Shift right 4 bits for the 12-bit ADS1015 and convert to mV
return ( ((result[0] << 8) | (result[1] & 0xFF)) >> 4 )*pga/2048.0
else:
# Return a mV value for the ADS1115
# (Take signed values into account as well)
val = (result[0] << 8) | (result[1])
if val > 0x7FFF:
return (val - 0xFFFF)*pga/32768.0
else:
return ( (result[0] << 8) | (result[1]) )*pga/32768.0 | [
"def",
"readADCDifferential",
"(",
"self",
",",
"chP",
"=",
"0",
",",
"chN",
"=",
"1",
",",
"pga",
"=",
"6144",
",",
"sps",
"=",
"250",
")",
":",
"# Disable comparator, Non-latching, Alert/Rdy active low",
"# traditional comparator, single-shot mode",
"config",
"=",
"self",
".",
"__ADS1015_REG_CONFIG_CQUE_NONE",
"|",
"self",
".",
"__ADS1015_REG_CONFIG_CLAT_NONLAT",
"|",
"self",
".",
"__ADS1015_REG_CONFIG_CPOL_ACTVLOW",
"|",
"self",
".",
"__ADS1015_REG_CONFIG_CMODE_TRAD",
"|",
"self",
".",
"__ADS1015_REG_CONFIG_MODE_SINGLE",
"# Set channels",
"if",
"(",
"(",
"chP",
"==",
"0",
")",
"&",
"(",
"chN",
"==",
"1",
")",
")",
":",
"config",
"|=",
"self",
".",
"__ADS1015_REG_CONFIG_MUX_DIFF_0_1",
"elif",
"(",
"(",
"chP",
"==",
"0",
")",
"&",
"(",
"chN",
"==",
"3",
")",
")",
":",
"config",
"|=",
"self",
".",
"__ADS1015_REG_CONFIG_MUX_DIFF_0_3",
"elif",
"(",
"(",
"chP",
"==",
"2",
")",
"&",
"(",
"chN",
"==",
"3",
")",
")",
":",
"config",
"|=",
"self",
".",
"__ADS1015_REG_CONFIG_MUX_DIFF_2_3",
"elif",
"(",
"(",
"chP",
"==",
"1",
")",
"&",
"(",
"chN",
"==",
"3",
")",
")",
":",
"config",
"|=",
"self",
".",
"__ADS1015_REG_CONFIG_MUX_DIFF_1_3",
"else",
":",
"if",
"(",
"self",
".",
"debug",
")",
":",
"print",
"\"ADS1x15: Invalid channels specified: %d, %d\"",
"%",
"(",
"chP",
",",
"chN",
")",
"return",
"-",
"1",
"# Set sample per seconds, defaults to 250sps",
"# If sps is in the dictionary (defined in init()) it returns the value of the constant",
"# othewise it returns the value for 250sps. This saves a lot of if/elif/else code!",
"if",
"(",
"self",
".",
"ic",
"==",
"self",
".",
"__IC_ADS1015",
")",
":",
"config",
"|=",
"self",
".",
"spsADS1015",
".",
"setdefault",
"(",
"sps",
",",
"self",
".",
"__ADS1015_REG_CONFIG_DR_1600SPS",
")",
"else",
":",
"if",
"(",
"(",
"sps",
"not",
"in",
"self",
".",
"spsADS1115",
")",
"&",
"self",
".",
"debug",
")",
":",
"print",
"\"ADS1x15: Invalid pga specified: %d, using 6144mV\"",
"%",
"sps",
"config",
"|=",
"self",
".",
"spsADS1115",
".",
"setdefault",
"(",
"sps",
",",
"self",
".",
"__ADS1115_REG_CONFIG_DR_250SPS",
")",
"# Set PGA/voltage range, defaults to +-6.144V",
"if",
"(",
"(",
"pga",
"not",
"in",
"self",
".",
"pgaADS1x15",
")",
"&",
"self",
".",
"debug",
")",
":",
"print",
"\"ADS1x15: Invalid pga specified: %d, using 6144mV\"",
"%",
"sps",
"config",
"|=",
"self",
".",
"pgaADS1x15",
".",
"setdefault",
"(",
"pga",
",",
"self",
".",
"__ADS1015_REG_CONFIG_PGA_6_144V",
")",
"self",
".",
"pga",
"=",
"pga",
"# Set 'start single-conversion' bit",
"config",
"|=",
"self",
".",
"__ADS1015_REG_CONFIG_OS_SINGLE",
"# Write config register to the ADC",
"bytes",
"=",
"[",
"(",
"config",
">>",
"8",
")",
"&",
"0xFF",
",",
"config",
"&",
"0xFF",
"]",
"self",
".",
"i2c",
".",
"writeList",
"(",
"self",
".",
"__ADS1015_REG_POINTER_CONFIG",
",",
"bytes",
")",
"# Wait for the ADC conversion to complete",
"# The minimum delay depends on the sps: delay >= 1/sps",
"# We add 0.1ms to be sure",
"delay",
"=",
"1.0",
"/",
"sps",
"+",
"0.0001",
"time",
".",
"sleep",
"(",
"delay",
")",
"# Read the conversion results",
"result",
"=",
"self",
".",
"i2c",
".",
"readList",
"(",
"self",
".",
"__ADS1015_REG_POINTER_CONVERT",
",",
"2",
")",
"if",
"(",
"self",
".",
"ic",
"==",
"self",
".",
"__IC_ADS1015",
")",
":",
"# Shift right 4 bits for the 12-bit ADS1015 and convert to mV",
"return",
"(",
"(",
"(",
"result",
"[",
"0",
"]",
"<<",
"8",
")",
"|",
"(",
"result",
"[",
"1",
"]",
"&",
"0xFF",
")",
")",
">>",
"4",
")",
"*",
"pga",
"/",
"2048.0",
"else",
":",
"# Return a mV value for the ADS1115",
"# (Take signed values into account as well)",
"val",
"=",
"(",
"result",
"[",
"0",
"]",
"<<",
"8",
")",
"|",
"(",
"result",
"[",
"1",
"]",
")",
"if",
"val",
">",
"0x7FFF",
":",
"return",
"(",
"val",
"-",
"0xFFFF",
")",
"*",
"pga",
"/",
"32768.0",
"else",
":",
"return",
"(",
"(",
"result",
"[",
"0",
"]",
"<<",
"8",
")",
"|",
"(",
"result",
"[",
"1",
"]",
")",
")",
"*",
"pga",
"/",
"32768.0"
] | https://github.com/emlid/Navio/blob/14b96c83ad57a10580655e3af49c9be1f5fc0ad2/Python/navio/adafruit_ads1x15.py#L230-L300 |
||
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/mapreduce/mapreduce/input_readers.py | python | _ReducerReader.decode_data | (data) | return pickle.loads(base64.b64decode(data)) | Decodes data encoded with the encode_data function. | Decodes data encoded with the encode_data function. | [
"Decodes",
"data",
"encoded",
"with",
"the",
"encode_data",
"function",
"."
] | def decode_data(data):
"""Decodes data encoded with the encode_data function."""
return pickle.loads(base64.b64decode(data)) | [
"def",
"decode_data",
"(",
"data",
")",
":",
"return",
"pickle",
".",
"loads",
"(",
"base64",
".",
"b64decode",
"(",
"data",
")",
")"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/mapreduce/mapreduce/input_readers.py#L2689-L2691 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/py3/scipy/linalg/decomp.py | python | eigvalsh_tridiagonal | (d, e, select='a', select_range=None,
check_finite=True, tol=0., lapack_driver='auto') | return eigh_tridiagonal(
d, e, eigvals_only=True, select=select, select_range=select_range,
check_finite=check_finite, tol=tol, lapack_driver=lapack_driver) | Solve eigenvalue problem for a real symmetric tridiagonal matrix.
Find eigenvalues `w` of ``a``::
a v[:,i] = w[i] v[:,i]
v.H v = identity
For a real symmetric matrix ``a`` with diagonal elements `d` and
off-diagonal elements `e`.
Parameters
----------
d : ndarray, shape (ndim,)
The diagonal elements of the array.
e : ndarray, shape (ndim-1,)
The off-diagonal elements of the array.
select : {'a', 'v', 'i'}, optional
Which eigenvalues to calculate
====== ========================================
select calculated
====== ========================================
'a' All eigenvalues
'v' Eigenvalues in the interval (min, max]
'i' Eigenvalues with indices min <= i <= max
====== ========================================
select_range : (min, max), optional
Range of selected eigenvalues
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
tol : float
The absolute tolerance to which each eigenvalue is required
(only used when ``lapack_driver='stebz'``).
An eigenvalue (or cluster) is considered to have converged if it
lies in an interval of this width. If <= 0. (default),
the value ``eps*|a|`` is used where eps is the machine precision,
and ``|a|`` is the 1-norm of the matrix ``a``.
lapack_driver : str
LAPACK function to use, can be 'auto', 'stemr', 'stebz', 'sterf',
or 'stev'. When 'auto' (default), it will use 'stemr' if ``select='a'``
and 'stebz' otherwise. 'sterf' and 'stev' can only be used when
``select='a'``.
Returns
-------
w : (M,) ndarray
The eigenvalues, in ascending order, each repeated according to its
multiplicity.
Raises
------
LinAlgError
If eigenvalue computation does not converge.
See Also
--------
eigh_tridiagonal : eigenvalues and right eiegenvectors for
symmetric/Hermitian tridiagonal matrices
Examples
--------
>>> from scipy.linalg import eigvalsh_tridiagonal, eigvalsh
>>> d = 3*np.ones(4)
>>> e = -1*np.ones(3)
>>> w = eigvalsh_tridiagonal(d, e)
>>> A = np.diag(d) + np.diag(e, k=1) + np.diag(e, k=-1)
>>> w2 = eigvalsh(A) # Verify with other eigenvalue routines
>>> np.allclose(w - w2, np.zeros(4))
True | Solve eigenvalue problem for a real symmetric tridiagonal matrix. | [
"Solve",
"eigenvalue",
"problem",
"for",
"a",
"real",
"symmetric",
"tridiagonal",
"matrix",
"."
] | def eigvalsh_tridiagonal(d, e, select='a', select_range=None,
check_finite=True, tol=0., lapack_driver='auto'):
"""
Solve eigenvalue problem for a real symmetric tridiagonal matrix.
Find eigenvalues `w` of ``a``::
a v[:,i] = w[i] v[:,i]
v.H v = identity
For a real symmetric matrix ``a`` with diagonal elements `d` and
off-diagonal elements `e`.
Parameters
----------
d : ndarray, shape (ndim,)
The diagonal elements of the array.
e : ndarray, shape (ndim-1,)
The off-diagonal elements of the array.
select : {'a', 'v', 'i'}, optional
Which eigenvalues to calculate
====== ========================================
select calculated
====== ========================================
'a' All eigenvalues
'v' Eigenvalues in the interval (min, max]
'i' Eigenvalues with indices min <= i <= max
====== ========================================
select_range : (min, max), optional
Range of selected eigenvalues
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
tol : float
The absolute tolerance to which each eigenvalue is required
(only used when ``lapack_driver='stebz'``).
An eigenvalue (or cluster) is considered to have converged if it
lies in an interval of this width. If <= 0. (default),
the value ``eps*|a|`` is used where eps is the machine precision,
and ``|a|`` is the 1-norm of the matrix ``a``.
lapack_driver : str
LAPACK function to use, can be 'auto', 'stemr', 'stebz', 'sterf',
or 'stev'. When 'auto' (default), it will use 'stemr' if ``select='a'``
and 'stebz' otherwise. 'sterf' and 'stev' can only be used when
``select='a'``.
Returns
-------
w : (M,) ndarray
The eigenvalues, in ascending order, each repeated according to its
multiplicity.
Raises
------
LinAlgError
If eigenvalue computation does not converge.
See Also
--------
eigh_tridiagonal : eigenvalues and right eiegenvectors for
symmetric/Hermitian tridiagonal matrices
Examples
--------
>>> from scipy.linalg import eigvalsh_tridiagonal, eigvalsh
>>> d = 3*np.ones(4)
>>> e = -1*np.ones(3)
>>> w = eigvalsh_tridiagonal(d, e)
>>> A = np.diag(d) + np.diag(e, k=1) + np.diag(e, k=-1)
>>> w2 = eigvalsh(A) # Verify with other eigenvalue routines
>>> np.allclose(w - w2, np.zeros(4))
True
"""
return eigh_tridiagonal(
d, e, eigvals_only=True, select=select, select_range=select_range,
check_finite=check_finite, tol=tol, lapack_driver=lapack_driver) | [
"def",
"eigvalsh_tridiagonal",
"(",
"d",
",",
"e",
",",
"select",
"=",
"'a'",
",",
"select_range",
"=",
"None",
",",
"check_finite",
"=",
"True",
",",
"tol",
"=",
"0.",
",",
"lapack_driver",
"=",
"'auto'",
")",
":",
"return",
"eigh_tridiagonal",
"(",
"d",
",",
"e",
",",
"eigvals_only",
"=",
"True",
",",
"select",
"=",
"select",
",",
"select_range",
"=",
"select_range",
",",
"check_finite",
"=",
"check_finite",
",",
"tol",
"=",
"tol",
",",
"lapack_driver",
"=",
"lapack_driver",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py3/scipy/linalg/decomp.py#L956-L1033 |
|
windystrife/UnrealEngine_NVIDIAGameWorks | b50e6338a7c5b26374d66306ebc7807541ff815e | Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/wsgiref/util.py | python | is_hop_by_hop | (header_name) | return _hoppish(header_name.lower()) | Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header | Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header | [
"Return",
"true",
"if",
"header_name",
"is",
"an",
"HTTP",
"/",
"1",
".",
"1",
"Hop",
"-",
"by",
"-",
"Hop",
"header"
] | def is_hop_by_hop(header_name):
"""Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header"""
return _hoppish(header_name.lower()) | [
"def",
"is_hop_by_hop",
"(",
"header_name",
")",
":",
"return",
"_hoppish",
"(",
"header_name",
".",
"lower",
"(",
")",
")"
] | https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/wsgiref/util.py#L163-L165 |
|
facebook/ThreatExchange | 31914a51820c73c8a0daffe62ccca29a6e3d359e | hasher-matcher-actioner/hmalib/scripts/cli/soak.py | python | SoakShell.do_latency | (self, arg) | Get the latency of submissions: latency | Get the latency of submissions: latency | [
"Get",
"the",
"latency",
"of",
"submissions",
":",
"latency"
] | def do_latency(self, arg):
"Get the latency of submissions: latency"
if self.listener:
if data := self.listener.get_submission_latencies():
_, _, latencies = list(zip(*data))
latencies = np.array(latencies[-10:])
if latencies.size:
print(
"Rough delay between submit to action request received (10 most recent)"
)
print(f"avg: {latencies.mean()} seconds")
return
print("No requests received yet.")
return
print("No listener found.") | [
"def",
"do_latency",
"(",
"self",
",",
"arg",
")",
":",
"if",
"self",
".",
"listener",
":",
"if",
"data",
":=",
"self",
".",
"listener",
".",
"get_submission_latencies",
"(",
")",
":",
"_",
",",
"_",
",",
"latencies",
"=",
"list",
"(",
"zip",
"(",
"*",
"data",
")",
")",
"latencies",
"=",
"np",
".",
"array",
"(",
"latencies",
"[",
"-",
"10",
":",
"]",
")",
"if",
"latencies",
".",
"size",
":",
"print",
"(",
"\"Rough delay between submit to action request received (10 most recent)\"",
")",
"print",
"(",
"f\"avg: {latencies.mean()} seconds\"",
")",
"return",
"print",
"(",
"\"No requests received yet.\"",
")",
"return",
"print",
"(",
"\"No listener found.\"",
")"
] | https://github.com/facebook/ThreatExchange/blob/31914a51820c73c8a0daffe62ccca29a6e3d359e/hasher-matcher-actioner/hmalib/scripts/cli/soak.py#L208-L222 |
||
ApolloAuto/apollo-platform | 86d9dc6743b496ead18d597748ebabd34a513289 | ros/third_party/lib_x86_64/python2.7/dist-packages/numpy/lib/_datasource.py | python | Repository._fullpath | (self, path) | return result | Return complete path for path. Prepends baseurl if necessary. | Return complete path for path. Prepends baseurl if necessary. | [
"Return",
"complete",
"path",
"for",
"path",
".",
"Prepends",
"baseurl",
"if",
"necessary",
"."
] | def _fullpath(self, path):
"""Return complete path for path. Prepends baseurl if necessary."""
splitpath = path.split(self._baseurl, 2)
if len(splitpath) == 1:
result = os.path.join(self._baseurl, path)
else:
result = path # path contains baseurl already
return result | [
"def",
"_fullpath",
"(",
"self",
",",
"path",
")",
":",
"splitpath",
"=",
"path",
".",
"split",
"(",
"self",
".",
"_baseurl",
",",
"2",
")",
"if",
"len",
"(",
"splitpath",
")",
"==",
"1",
":",
"result",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_baseurl",
",",
"path",
")",
"else",
":",
"result",
"=",
"path",
"# path contains baseurl already",
"return",
"result"
] | https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/third_party/lib_x86_64/python2.7/dist-packages/numpy/lib/_datasource.py#L545-L552 |
|
htcondor/htcondor | 4829724575176d1d6c936e4693dfd78a728569b0 | src/condor_contrib/condor_pigeon/src/condor_pigeon_client/skype_linux_tools/Skype4Py/skype.py | python | ISkypeEvents.SmsMessageStatusChanged | (self, Message, Status) | This event is caused by a change in the SMS message status.
@param Message: SMS message object.
@type Message: L{ISmsMessage}
@param Status: New status of the SMS message.
@type Status: L{SMS message status<enums.smsMessageStatusUnknown>} | This event is caused by a change in the SMS message status. | [
"This",
"event",
"is",
"caused",
"by",
"a",
"change",
"in",
"the",
"SMS",
"message",
"status",
"."
] | def SmsMessageStatusChanged(self, Message, Status):
'''This event is caused by a change in the SMS message status.
@param Message: SMS message object.
@type Message: L{ISmsMessage}
@param Status: New status of the SMS message.
@type Status: L{SMS message status<enums.smsMessageStatusUnknown>}
''' | [
"def",
"SmsMessageStatusChanged",
"(",
"self",
",",
"Message",
",",
"Status",
")",
":"
] | https://github.com/htcondor/htcondor/blob/4829724575176d1d6c936e4693dfd78a728569b0/src/condor_contrib/condor_pigeon/src/condor_pigeon_client/skype_linux_tools/Skype4Py/skype.py#L1639-L1646 |
||
redpony/cdec | f7c4899b174d86bc70b40b1cae68dcad364615cb | realtime/rt/rt.py | python | RealtimeTranslator.grammar | (self, sentence, ctx_name=None) | return grammar_file | Extract a sentence-level grammar on demand (or return cached)
Threadsafe wrt extractor but NOT decoder. Acquire ctx_name lock
before calling. | Extract a sentence-level grammar on demand (or return cached)
Threadsafe wrt extractor but NOT decoder. Acquire ctx_name lock
before calling. | [
"Extract",
"a",
"sentence",
"-",
"level",
"grammar",
"on",
"demand",
"(",
"or",
"return",
"cached",
")",
"Threadsafe",
"wrt",
"extractor",
"but",
"NOT",
"decoder",
".",
"Acquire",
"ctx_name",
"lock",
"before",
"calling",
"."
] | def grammar(self, sentence, ctx_name=None):
'''Extract a sentence-level grammar on demand (or return cached)
Threadsafe wrt extractor but NOT decoder. Acquire ctx_name lock
before calling.'''
self.extractor_lock.acquire()
self.lazy_ctx(ctx_name)
grammar_dict = self.grammar_dict[ctx_name]
grammar_file = grammar_dict.get(sentence, None)
# Cache hit
if grammar_file:
logger.info('({}) Grammar cache hit: {}'.format(ctx_name, grammar_file))
self.extractor_lock.release()
return grammar_file
# Extract and cache
(fid, grammar_file) = tempfile.mkstemp(dir=self.decoders[ctx_name].tmp, prefix='grammar.', suffix='.gz')
os.close(fid)
self.extractor.grammar(sentence, grammar_file, ctx_name)
grammar_files = self.grammar_files[ctx_name]
if len(grammar_files) == self.cache_size:
rm_sent = grammar_files.popleft()
# If not already removed by learn method
if rm_sent in grammar_dict:
rm_grammar = grammar_dict.pop(rm_sent)
os.remove(rm_grammar)
grammar_files.append(sentence)
grammar_dict[sentence] = grammar_file
self.extractor_lock.release()
return grammar_file | [
"def",
"grammar",
"(",
"self",
",",
"sentence",
",",
"ctx_name",
"=",
"None",
")",
":",
"self",
".",
"extractor_lock",
".",
"acquire",
"(",
")",
"self",
".",
"lazy_ctx",
"(",
"ctx_name",
")",
"grammar_dict",
"=",
"self",
".",
"grammar_dict",
"[",
"ctx_name",
"]",
"grammar_file",
"=",
"grammar_dict",
".",
"get",
"(",
"sentence",
",",
"None",
")",
"# Cache hit",
"if",
"grammar_file",
":",
"logger",
".",
"info",
"(",
"'({}) Grammar cache hit: {}'",
".",
"format",
"(",
"ctx_name",
",",
"grammar_file",
")",
")",
"self",
".",
"extractor_lock",
".",
"release",
"(",
")",
"return",
"grammar_file",
"# Extract and cache",
"(",
"fid",
",",
"grammar_file",
")",
"=",
"tempfile",
".",
"mkstemp",
"(",
"dir",
"=",
"self",
".",
"decoders",
"[",
"ctx_name",
"]",
".",
"tmp",
",",
"prefix",
"=",
"'grammar.'",
",",
"suffix",
"=",
"'.gz'",
")",
"os",
".",
"close",
"(",
"fid",
")",
"self",
".",
"extractor",
".",
"grammar",
"(",
"sentence",
",",
"grammar_file",
",",
"ctx_name",
")",
"grammar_files",
"=",
"self",
".",
"grammar_files",
"[",
"ctx_name",
"]",
"if",
"len",
"(",
"grammar_files",
")",
"==",
"self",
".",
"cache_size",
":",
"rm_sent",
"=",
"grammar_files",
".",
"popleft",
"(",
")",
"# If not already removed by learn method",
"if",
"rm_sent",
"in",
"grammar_dict",
":",
"rm_grammar",
"=",
"grammar_dict",
".",
"pop",
"(",
"rm_sent",
")",
"os",
".",
"remove",
"(",
"rm_grammar",
")",
"grammar_files",
".",
"append",
"(",
"sentence",
")",
"grammar_dict",
"[",
"sentence",
"]",
"=",
"grammar_file",
"self",
".",
"extractor_lock",
".",
"release",
"(",
")",
"return",
"grammar_file"
] | https://github.com/redpony/cdec/blob/f7c4899b174d86bc70b40b1cae68dcad364615cb/realtime/rt/rt.py#L254-L281 |
|
y123456yz/reading-and-annotate-mongodb-3.6 | 93280293672ca7586dc24af18132aa61e4ed7fcf | mongo/buildscripts/generate_compile_expansions.py | python | generate_expansions | () | Entry point for the script.
This calls functions to generate version and scons cache expansions and
writes them to a file. | Entry point for the script. | [
"Entry",
"point",
"for",
"the",
"script",
"."
] | def generate_expansions():
"""Entry point for the script.
This calls functions to generate version and scons cache expansions and
writes them to a file.
"""
args = parse_args()
expansions = {}
expansions.update(generate_version_expansions(args))
expansions.update(generate_scons_cache_expansions())
with open(args.out, "w") as out:
print("saving compile expansions to {0}: ({1})".format(args.out, expansions))
yaml.safe_dump(expansions, out, default_flow_style=False) | [
"def",
"generate_expansions",
"(",
")",
":",
"args",
"=",
"parse_args",
"(",
")",
"expansions",
"=",
"{",
"}",
"expansions",
".",
"update",
"(",
"generate_version_expansions",
"(",
"args",
")",
")",
"expansions",
".",
"update",
"(",
"generate_scons_cache_expansions",
"(",
")",
")",
"with",
"open",
"(",
"args",
".",
"out",
",",
"\"w\"",
")",
"as",
"out",
":",
"print",
"(",
"\"saving compile expansions to {0}: ({1})\"",
".",
"format",
"(",
"args",
".",
"out",
",",
"expansions",
")",
")",
"yaml",
".",
"safe_dump",
"(",
"expansions",
",",
"out",
",",
"default_flow_style",
"=",
"False",
")"
] | https://github.com/y123456yz/reading-and-annotate-mongodb-3.6/blob/93280293672ca7586dc24af18132aa61e4ed7fcf/mongo/buildscripts/generate_compile_expansions.py#L20-L33 |
||
ArduPilot/ardupilot | 6e684b3496122b8158ac412b609d00004b7ac306 | Tools/scripts/build_binaries.py | python | build_binaries.skip_board_waf | (self, board) | return True | check if we should skip this build because we do not support the
board in this release | check if we should skip this build because we do not support the
board in this release | [
"check",
"if",
"we",
"should",
"skip",
"this",
"build",
"because",
"we",
"do",
"not",
"support",
"the",
"board",
"in",
"this",
"release"
] | def skip_board_waf(self, board):
'''check if we should skip this build because we do not support the
board in this release
'''
try:
out = self.run_program('waf', ['./waf', 'configure', '--board=BOARDTEST'], False)
lines = out.split('\n')
needles = ["BOARDTEST' (choose from", "BOARDTEST': choices are"]
for line in lines:
for needle in needles:
idx = line.find(needle)
if idx != -1:
break
if idx != -1:
line = line[idx+len(needle):-1]
line = line.replace("'", "")
line = line.replace(" ", "")
boards = line.split(",")
return board not in boards
except IOError as e:
if e.errno != 2:
raise
self.progress("Skipping unsupported board %s" % (board,))
return True | [
"def",
"skip_board_waf",
"(",
"self",
",",
"board",
")",
":",
"try",
":",
"out",
"=",
"self",
".",
"run_program",
"(",
"'waf'",
",",
"[",
"'./waf'",
",",
"'configure'",
",",
"'--board=BOARDTEST'",
"]",
",",
"False",
")",
"lines",
"=",
"out",
".",
"split",
"(",
"'\\n'",
")",
"needles",
"=",
"[",
"\"BOARDTEST' (choose from\"",
",",
"\"BOARDTEST': choices are\"",
"]",
"for",
"line",
"in",
"lines",
":",
"for",
"needle",
"in",
"needles",
":",
"idx",
"=",
"line",
".",
"find",
"(",
"needle",
")",
"if",
"idx",
"!=",
"-",
"1",
":",
"break",
"if",
"idx",
"!=",
"-",
"1",
":",
"line",
"=",
"line",
"[",
"idx",
"+",
"len",
"(",
"needle",
")",
":",
"-",
"1",
"]",
"line",
"=",
"line",
".",
"replace",
"(",
"\"'\"",
",",
"\"\"",
")",
"line",
"=",
"line",
".",
"replace",
"(",
"\" \"",
",",
"\"\"",
")",
"boards",
"=",
"line",
".",
"split",
"(",
"\",\"",
")",
"return",
"board",
"not",
"in",
"boards",
"except",
"IOError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"!=",
"2",
":",
"raise",
"self",
".",
"progress",
"(",
"\"Skipping unsupported board %s\"",
"%",
"(",
"board",
",",
")",
")",
"return",
"True"
] | https://github.com/ArduPilot/ardupilot/blob/6e684b3496122b8158ac412b609d00004b7ac306/Tools/scripts/build_binaries.py#L209-L234 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/scipy/ndimage/filters.py | python | sobel | (input, axis=-1, output=None, mode="reflect", cval=0.0) | return return_value | Calculate a Sobel filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> ascent = misc.ascent()
>>> result = ndimage.sobel(ascent)
>>> plt.gray() # show the filtered result in grayscale
>>> plt.imshow(result) | Calculate a Sobel filter. | [
"Calculate",
"a",
"Sobel",
"filter",
"."
] | def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Sobel filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> ascent = misc.ascent()
>>> result = ndimage.sobel(ascent)
>>> plt.gray() # show the filtered result in grayscale
>>> plt.imshow(result)
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output, return_value = _ni_support._get_output(output, input)
correlate1d(input, [-1, 0, 1], axis, output, mode, cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 2, 1], ii, output, mode, cval, 0)
return return_value | [
"def",
"sobel",
"(",
"input",
",",
"axis",
"=",
"-",
"1",
",",
"output",
"=",
"None",
",",
"mode",
"=",
"\"reflect\"",
",",
"cval",
"=",
"0.0",
")",
":",
"input",
"=",
"numpy",
".",
"asarray",
"(",
"input",
")",
"axis",
"=",
"_ni_support",
".",
"_check_axis",
"(",
"axis",
",",
"input",
".",
"ndim",
")",
"output",
",",
"return_value",
"=",
"_ni_support",
".",
"_get_output",
"(",
"output",
",",
"input",
")",
"correlate1d",
"(",
"input",
",",
"[",
"-",
"1",
",",
"0",
",",
"1",
"]",
",",
"axis",
",",
"output",
",",
"mode",
",",
"cval",
",",
"0",
")",
"axes",
"=",
"[",
"ii",
"for",
"ii",
"in",
"range",
"(",
"input",
".",
"ndim",
")",
"if",
"ii",
"!=",
"axis",
"]",
"for",
"ii",
"in",
"axes",
":",
"correlate1d",
"(",
"output",
",",
"[",
"1",
",",
"2",
",",
"1",
"]",
",",
"ii",
",",
"output",
",",
"mode",
",",
"cval",
",",
"0",
")",
"return",
"return_value"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/scipy/ndimage/filters.py#L361-L388 |
|
protocolbuffers/protobuf | b5ab0b7a18b7336c60130f4ddb2d97c51792f896 | python/google/protobuf/descriptor_pool.py | python | DescriptorPool._ConvertEnumDescriptor | (self, enum_proto, package=None, file_desc=None,
containing_type=None, scope=None, top_level=False) | return desc | Make a protobuf EnumDescriptor given an EnumDescriptorProto protobuf.
Args:
enum_proto: The descriptor_pb2.EnumDescriptorProto protobuf message.
package: Optional package name for the new message EnumDescriptor.
file_desc: The file containing the enum descriptor.
containing_type: The type containing this enum.
scope: Scope containing available types.
top_level: If True, the enum is a top level symbol. If False, the enum
is defined inside a message.
Returns:
The added descriptor | Make a protobuf EnumDescriptor given an EnumDescriptorProto protobuf. | [
"Make",
"a",
"protobuf",
"EnumDescriptor",
"given",
"an",
"EnumDescriptorProto",
"protobuf",
"."
] | def _ConvertEnumDescriptor(self, enum_proto, package=None, file_desc=None,
containing_type=None, scope=None, top_level=False):
"""Make a protobuf EnumDescriptor given an EnumDescriptorProto protobuf.
Args:
enum_proto: The descriptor_pb2.EnumDescriptorProto protobuf message.
package: Optional package name for the new message EnumDescriptor.
file_desc: The file containing the enum descriptor.
containing_type: The type containing this enum.
scope: Scope containing available types.
top_level: If True, the enum is a top level symbol. If False, the enum
is defined inside a message.
Returns:
The added descriptor
"""
if package:
enum_name = '.'.join((package, enum_proto.name))
else:
enum_name = enum_proto.name
if file_desc is None:
file_name = None
else:
file_name = file_desc.name
values = [self._MakeEnumValueDescriptor(value, index)
for index, value in enumerate(enum_proto.value)]
desc = descriptor.EnumDescriptor(name=enum_proto.name,
full_name=enum_name,
filename=file_name,
file=file_desc,
values=values,
containing_type=containing_type,
options=_OptionsOrNone(enum_proto),
# pylint: disable=protected-access
create_key=descriptor._internal_create_key)
scope['.%s' % enum_name] = desc
self._CheckConflictRegister(desc, desc.full_name, desc.file.name)
self._enum_descriptors[enum_name] = desc
# Add top level enum values.
if top_level:
for value in values:
full_name = _NormalizeFullyQualifiedName(
'.'.join((package, value.name)))
self._CheckConflictRegister(value, full_name, file_name)
self._top_enum_values[full_name] = value
return desc | [
"def",
"_ConvertEnumDescriptor",
"(",
"self",
",",
"enum_proto",
",",
"package",
"=",
"None",
",",
"file_desc",
"=",
"None",
",",
"containing_type",
"=",
"None",
",",
"scope",
"=",
"None",
",",
"top_level",
"=",
"False",
")",
":",
"if",
"package",
":",
"enum_name",
"=",
"'.'",
".",
"join",
"(",
"(",
"package",
",",
"enum_proto",
".",
"name",
")",
")",
"else",
":",
"enum_name",
"=",
"enum_proto",
".",
"name",
"if",
"file_desc",
"is",
"None",
":",
"file_name",
"=",
"None",
"else",
":",
"file_name",
"=",
"file_desc",
".",
"name",
"values",
"=",
"[",
"self",
".",
"_MakeEnumValueDescriptor",
"(",
"value",
",",
"index",
")",
"for",
"index",
",",
"value",
"in",
"enumerate",
"(",
"enum_proto",
".",
"value",
")",
"]",
"desc",
"=",
"descriptor",
".",
"EnumDescriptor",
"(",
"name",
"=",
"enum_proto",
".",
"name",
",",
"full_name",
"=",
"enum_name",
",",
"filename",
"=",
"file_name",
",",
"file",
"=",
"file_desc",
",",
"values",
"=",
"values",
",",
"containing_type",
"=",
"containing_type",
",",
"options",
"=",
"_OptionsOrNone",
"(",
"enum_proto",
")",
",",
"# pylint: disable=protected-access",
"create_key",
"=",
"descriptor",
".",
"_internal_create_key",
")",
"scope",
"[",
"'.%s'",
"%",
"enum_name",
"]",
"=",
"desc",
"self",
".",
"_CheckConflictRegister",
"(",
"desc",
",",
"desc",
".",
"full_name",
",",
"desc",
".",
"file",
".",
"name",
")",
"self",
".",
"_enum_descriptors",
"[",
"enum_name",
"]",
"=",
"desc",
"# Add top level enum values.",
"if",
"top_level",
":",
"for",
"value",
"in",
"values",
":",
"full_name",
"=",
"_NormalizeFullyQualifiedName",
"(",
"'.'",
".",
"join",
"(",
"(",
"package",
",",
"value",
".",
"name",
")",
")",
")",
"self",
".",
"_CheckConflictRegister",
"(",
"value",
",",
"full_name",
",",
"file_name",
")",
"self",
".",
"_top_enum_values",
"[",
"full_name",
"]",
"=",
"value",
"return",
"desc"
] | https://github.com/protocolbuffers/protobuf/blob/b5ab0b7a18b7336c60130f4ddb2d97c51792f896/python/google/protobuf/descriptor_pool.py#L922-L972 |
|
FreeCAD/FreeCAD | ba42231b9c6889b89e064d6d563448ed81e376ec | src/Mod/Draft/draftguitools/gui_snapper.py | python | Snapper.setAngle | (self, delta=None) | Keep the current angle. | Keep the current angle. | [
"Keep",
"the",
"current",
"angle",
"."
] | def setAngle(self, delta=None):
"""Keep the current angle."""
if delta:
self.mask = delta
elif isinstance(self.mask, App.Vector):
self.mask = None
elif self.trackLine:
if self.trackLine.Visible:
self.mask = self.trackLine.p2().sub(self.trackLine.p1()) | [
"def",
"setAngle",
"(",
"self",
",",
"delta",
"=",
"None",
")",
":",
"if",
"delta",
":",
"self",
".",
"mask",
"=",
"delta",
"elif",
"isinstance",
"(",
"self",
".",
"mask",
",",
"App",
".",
"Vector",
")",
":",
"self",
".",
"mask",
"=",
"None",
"elif",
"self",
".",
"trackLine",
":",
"if",
"self",
".",
"trackLine",
".",
"Visible",
":",
"self",
".",
"mask",
"=",
"self",
".",
"trackLine",
".",
"p2",
"(",
")",
".",
"sub",
"(",
"self",
".",
"trackLine",
".",
"p1",
"(",
")",
")"
] | https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Draft/draftguitools/gui_snapper.py#L1252-L1260 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/tools/Editra/src/syntax/_inno.py | python | SyntaxData.GetKeywords | (self) | return [SECTION_KW, KEYWORDS, PARAM_KW, PREPROC_KW, PASCAL_KW] | Returns Specified Keywords List | Returns Specified Keywords List | [
"Returns",
"Specified",
"Keywords",
"List"
] | def GetKeywords(self):
"""Returns Specified Keywords List """
return [SECTION_KW, KEYWORDS, PARAM_KW, PREPROC_KW, PASCAL_KW] | [
"def",
"GetKeywords",
"(",
"self",
")",
":",
"return",
"[",
"SECTION_KW",
",",
"KEYWORDS",
",",
"PARAM_KW",
",",
"PREPROC_KW",
",",
"PASCAL_KW",
"]"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/syntax/_inno.py#L129-L131 |
|
msftguy/ssh-rd | a5f3a79daeac5844edebf01916c9613563f1c390 | _3rd/boost_1_48_0/tools/build/v2/util/utility.py | python | on_windows | () | return False | Returns true if running on windows, whether in cygwin or not. | Returns true if running on windows, whether in cygwin or not. | [
"Returns",
"true",
"if",
"running",
"on",
"windows",
"whether",
"in",
"cygwin",
"or",
"not",
"."
] | def on_windows ():
""" Returns true if running on windows, whether in cygwin or not.
"""
if bjam.variable("NT"):
return True
elif bjam.variable("UNIX"):
uname = bjam.variable("JAMUNAME")
if uname and uname[0].startswith("CYGWIN"):
return True
return False | [
"def",
"on_windows",
"(",
")",
":",
"if",
"bjam",
".",
"variable",
"(",
"\"NT\"",
")",
":",
"return",
"True",
"elif",
"bjam",
".",
"variable",
"(",
"\"UNIX\"",
")",
":",
"uname",
"=",
"bjam",
".",
"variable",
"(",
"\"JAMUNAME\"",
")",
"if",
"uname",
"and",
"uname",
"[",
"0",
"]",
".",
"startswith",
"(",
"\"CYGWIN\"",
")",
":",
"return",
"True",
"return",
"False"
] | https://github.com/msftguy/ssh-rd/blob/a5f3a79daeac5844edebf01916c9613563f1c390/_3rd/boost_1_48_0/tools/build/v2/util/utility.py#L143-L155 |
|
adobe/chromium | cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7 | third_party/protobuf/python/google/protobuf/internal/decoder.py | python | _RaiseInvalidWireType | (buffer, pos, end) | Skip function for unknown wire types. Raises an exception. | Skip function for unknown wire types. Raises an exception. | [
"Skip",
"function",
"for",
"unknown",
"wire",
"types",
".",
"Raises",
"an",
"exception",
"."
] | def _RaiseInvalidWireType(buffer, pos, end):
"""Skip function for unknown wire types. Raises an exception."""
raise _DecodeError('Tag had invalid wire type.') | [
"def",
"_RaiseInvalidWireType",
"(",
"buffer",
",",
"pos",
",",
"end",
")",
":",
"raise",
"_DecodeError",
"(",
"'Tag had invalid wire type.'",
")"
] | https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/third_party/protobuf/python/google/protobuf/internal/decoder.py#L676-L679 |
||
domino-team/openwrt-cc | 8b181297c34d14d3ca521cc9f31430d561dbc688 | package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/deps/v8/tools/js2c.py | python | PrepareSources | (source_files, native_type, emit_js) | return result | Read, prepare and assemble the list of source files.
Args:
source_files: List of JavaScript-ish source files. A file named macros.py
will be treated as a list of macros.
native_type: String corresponding to a NativeType enum value, allowing us
to treat different types of sources differently.
emit_js: True if we should skip the byte conversion and just leave the
sources as JS strings.
Returns:
An instance of Sources. | Read, prepare and assemble the list of source files. | [
"Read",
"prepare",
"and",
"assemble",
"the",
"list",
"of",
"source",
"files",
"."
] | def PrepareSources(source_files, native_type, emit_js):
"""Read, prepare and assemble the list of source files.
Args:
source_files: List of JavaScript-ish source files. A file named macros.py
will be treated as a list of macros.
native_type: String corresponding to a NativeType enum value, allowing us
to treat different types of sources differently.
emit_js: True if we should skip the byte conversion and just leave the
sources as JS strings.
Returns:
An instance of Sources.
"""
macro_file = None
macro_files = filter(IsMacroFile, source_files)
assert len(macro_files) in [0, 1]
if macro_files:
source_files.remove(macro_files[0])
macro_file = macro_files[0]
message_template_file = None
message_template_files = filter(IsMessageTemplateFile, source_files)
assert len(message_template_files) in [0, 1]
if message_template_files:
source_files.remove(message_template_files[0])
message_template_file = message_template_files[0]
filters = None
if native_type in ("EXTRAS", "EXPERIMENTAL_EXTRAS"):
filters = BuildExtraFilterChain()
else:
filters = BuildFilterChain(macro_file, message_template_file)
# Sort 'debugger' sources first.
source_files = sorted(source_files,
lambda l,r: IsDebuggerFile(r) - IsDebuggerFile(l))
source_files_and_contents = [(f, ReadFile(f)) for f in source_files]
# Have a single not-quite-empty source file if there are none present;
# otherwise you get errors trying to compile an empty C++ array.
# It cannot be empty (or whitespace, which gets trimmed to empty), as
# the deserialization code assumes each file is nonempty.
if not source_files_and_contents:
source_files_and_contents = [("dummy.js", "(function() {})")]
result = Sources()
for (source, contents) in source_files_and_contents:
try:
lines = filters(contents)
except Error as e:
raise Error("In file %s:\n%s" % (source, str(e)))
result.modules.append(lines)
is_debugger = IsDebuggerFile(source)
result.is_debugger_id.append(is_debugger)
name = os.path.basename(source)[:-3]
result.names.append(name)
return result | [
"def",
"PrepareSources",
"(",
"source_files",
",",
"native_type",
",",
"emit_js",
")",
":",
"macro_file",
"=",
"None",
"macro_files",
"=",
"filter",
"(",
"IsMacroFile",
",",
"source_files",
")",
"assert",
"len",
"(",
"macro_files",
")",
"in",
"[",
"0",
",",
"1",
"]",
"if",
"macro_files",
":",
"source_files",
".",
"remove",
"(",
"macro_files",
"[",
"0",
"]",
")",
"macro_file",
"=",
"macro_files",
"[",
"0",
"]",
"message_template_file",
"=",
"None",
"message_template_files",
"=",
"filter",
"(",
"IsMessageTemplateFile",
",",
"source_files",
")",
"assert",
"len",
"(",
"message_template_files",
")",
"in",
"[",
"0",
",",
"1",
"]",
"if",
"message_template_files",
":",
"source_files",
".",
"remove",
"(",
"message_template_files",
"[",
"0",
"]",
")",
"message_template_file",
"=",
"message_template_files",
"[",
"0",
"]",
"filters",
"=",
"None",
"if",
"native_type",
"in",
"(",
"\"EXTRAS\"",
",",
"\"EXPERIMENTAL_EXTRAS\"",
")",
":",
"filters",
"=",
"BuildExtraFilterChain",
"(",
")",
"else",
":",
"filters",
"=",
"BuildFilterChain",
"(",
"macro_file",
",",
"message_template_file",
")",
"# Sort 'debugger' sources first.",
"source_files",
"=",
"sorted",
"(",
"source_files",
",",
"lambda",
"l",
",",
"r",
":",
"IsDebuggerFile",
"(",
"r",
")",
"-",
"IsDebuggerFile",
"(",
"l",
")",
")",
"source_files_and_contents",
"=",
"[",
"(",
"f",
",",
"ReadFile",
"(",
"f",
")",
")",
"for",
"f",
"in",
"source_files",
"]",
"# Have a single not-quite-empty source file if there are none present;",
"# otherwise you get errors trying to compile an empty C++ array.",
"# It cannot be empty (or whitespace, which gets trimmed to empty), as",
"# the deserialization code assumes each file is nonempty.",
"if",
"not",
"source_files_and_contents",
":",
"source_files_and_contents",
"=",
"[",
"(",
"\"dummy.js\"",
",",
"\"(function() {})\"",
")",
"]",
"result",
"=",
"Sources",
"(",
")",
"for",
"(",
"source",
",",
"contents",
")",
"in",
"source_files_and_contents",
":",
"try",
":",
"lines",
"=",
"filters",
"(",
"contents",
")",
"except",
"Error",
"as",
"e",
":",
"raise",
"Error",
"(",
"\"In file %s:\\n%s\"",
"%",
"(",
"source",
",",
"str",
"(",
"e",
")",
")",
")",
"result",
".",
"modules",
".",
"append",
"(",
"lines",
")",
"is_debugger",
"=",
"IsDebuggerFile",
"(",
"source",
")",
"result",
".",
"is_debugger_id",
".",
"append",
"(",
"is_debugger",
")",
"name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"source",
")",
"[",
":",
"-",
"3",
"]",
"result",
".",
"names",
".",
"append",
"(",
"name",
")",
"return",
"result"
] | https://github.com/domino-team/openwrt-cc/blob/8b181297c34d14d3ca521cc9f31430d561dbc688/package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/deps/v8/tools/js2c.py#L391-L454 |
|
mongodb/mongo | d8ff665343ad29cf286ee2cf4a1960d29371937b | buildscripts/idl/idl/generator.py | python | _CppSourceFileWriter.gen_field_list_entry_lookup_methods | (self, field_list) | Generate the definitions for generic argument or reply field lookup methods. | Generate the definitions for generic argument or reply field lookup methods. | [
"Generate",
"the",
"definitions",
"for",
"generic",
"argument",
"or",
"reply",
"field",
"lookup",
"methods",
"."
] | def gen_field_list_entry_lookup_methods(self, field_list):
# type: (ast.FieldListBase) -> None
"""Generate the definitions for generic argument or reply field lookup methods."""
field_list_info = generic_field_list_types.get_field_list_info(field_list)
defn = field_list_info.get_has_field_method().get_definition()
with self._block('%s {' % (defn, ), '}'):
self._writer.write_line(
'return _genericFields.find(fieldName.toString()) != _genericFields.end();')
self._writer.write_empty_line()
defn = field_list_info.get_should_forward_method().get_definition()
with self._block('%s {' % (defn, ), '}'):
self._writer.write_line('auto it = _genericFields.find(fieldName.toString());')
self._writer.write_line('return (it == _genericFields.end() || it->second);')
self._writer.write_empty_line() | [
"def",
"gen_field_list_entry_lookup_methods",
"(",
"self",
",",
"field_list",
")",
":",
"# type: (ast.FieldListBase) -> None",
"field_list_info",
"=",
"generic_field_list_types",
".",
"get_field_list_info",
"(",
"field_list",
")",
"defn",
"=",
"field_list_info",
".",
"get_has_field_method",
"(",
")",
".",
"get_definition",
"(",
")",
"with",
"self",
".",
"_block",
"(",
"'%s {'",
"%",
"(",
"defn",
",",
")",
",",
"'}'",
")",
":",
"self",
".",
"_writer",
".",
"write_line",
"(",
"'return _genericFields.find(fieldName.toString()) != _genericFields.end();'",
")",
"self",
".",
"_writer",
".",
"write_empty_line",
"(",
")",
"defn",
"=",
"field_list_info",
".",
"get_should_forward_method",
"(",
")",
".",
"get_definition",
"(",
")",
"with",
"self",
".",
"_block",
"(",
"'%s {'",
"%",
"(",
"defn",
",",
")",
",",
"'}'",
")",
":",
"self",
".",
"_writer",
".",
"write_line",
"(",
"'auto it = _genericFields.find(fieldName.toString());'",
")",
"self",
".",
"_writer",
".",
"write_line",
"(",
"'return (it == _genericFields.end() || it->second);'",
")",
"self",
".",
"_writer",
".",
"write_empty_line",
"(",
")"
] | https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/buildscripts/idl/idl/generator.py#L1555-L1571 |
||
infinit/elle | a8154593c42743f45b9df09daf62b44630c24a02 | drake/src/drake/go/__init__.py | python | Config.add_ldflags | (self, flags) | return self | Add ldflags at the end of current ldflags.
:param flags: A list of flags.
:type flags: list of str
:return: self. | Add ldflags at the end of current ldflags. | [
"Add",
"ldflags",
"at",
"the",
"end",
"of",
"current",
"ldflags",
"."
] | def add_ldflags(self, flags):
"""
Add ldflags at the end of current ldflags.
:param flags: A list of flags.
:type flags: list of str
:return: self.
"""
collections.deque(map(self.__ldflags.add, flags))
return self | [
"def",
"add_ldflags",
"(",
"self",
",",
"flags",
")",
":",
"collections",
".",
"deque",
"(",
"map",
"(",
"self",
".",
"__ldflags",
".",
"add",
",",
"flags",
")",
")",
"return",
"self"
] | https://github.com/infinit/elle/blob/a8154593c42743f45b9df09daf62b44630c24a02/drake/src/drake/go/__init__.py#L82-L92 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | contrib/gizmos/gtk/gizmos.py | python | TreeListCtrl.SetColumnText | (*args, **kwargs) | return _gizmos.TreeListCtrl_SetColumnText(*args, **kwargs) | SetColumnText(self, int column, String text) | SetColumnText(self, int column, String text) | [
"SetColumnText",
"(",
"self",
"int",
"column",
"String",
"text",
")"
] | def SetColumnText(*args, **kwargs):
"""SetColumnText(self, int column, String text)"""
return _gizmos.TreeListCtrl_SetColumnText(*args, **kwargs) | [
"def",
"SetColumnText",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_gizmos",
".",
"TreeListCtrl_SetColumnText",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/contrib/gizmos/gtk/gizmos.py#L602-L604 |
|
RegrowthStudios/SoACode-Public | c3ddd69355b534d5e70e2e6d0c489b4e93ab1ffe | utils/git-hooks/cpplint/cpplint.py | python | _IsTestFilename | (filename) | Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise. | Determines if the given filename has a suffix that identifies it as a test. | [
"Determines",
"if",
"the",
"given",
"filename",
"has",
"a",
"suffix",
"that",
"identifies",
"it",
"as",
"a",
"test",
"."
] | def _IsTestFilename(filename):
"""Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise.
"""
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False | [
"def",
"_IsTestFilename",
"(",
"filename",
")",
":",
"if",
"(",
"filename",
".",
"endswith",
"(",
"'_test.cc'",
")",
"or",
"filename",
".",
"endswith",
"(",
"'_unittest.cc'",
")",
"or",
"filename",
".",
"endswith",
"(",
"'_regtest.cc'",
")",
")",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | https://github.com/RegrowthStudios/SoACode-Public/blob/c3ddd69355b534d5e70e2e6d0c489b4e93ab1ffe/utils/git-hooks/cpplint/cpplint.py#L2321-L2335 |
||
FreeCAD/FreeCAD | ba42231b9c6889b89e064d6d563448ed81e376ec | src/Mod/Draft/draftguitools/gui_snapper.py | python | Snapper.snapToExtensions | (self, point, last, constrain, eline) | return point,eline | Return a point snapped to extension or parallel line.
The parallel line of the last object, if any. | Return a point snapped to extension or parallel line. | [
"Return",
"a",
"point",
"snapped",
"to",
"extension",
"or",
"parallel",
"line",
"."
] | def snapToExtensions(self, point, last, constrain, eline):
"""Return a point snapped to extension or parallel line.
The parallel line of the last object, if any.
"""
tsnap = self.snapToHold(point)
if tsnap:
if self.tracker and not self.selectMode:
self.tracker.setCoords(tsnap[2])
self.tracker.setMarker(self.mk[tsnap[1]])
self.tracker.on()
if self.extLine:
self.extLine.p1(tsnap[0])
self.extLine.p2(tsnap[2])
self.extLine.color.rgb = Gui.draftToolBar.getDefaultColor("line")
self.extLine.on()
self.setCursor(tsnap[1])
return tsnap[2], eline
if self.isEnabled("Extension"):
tsnap = self.snapToExtOrtho(last, constrain, eline)
if tsnap:
if (tsnap[0].sub(point)).Length < self.radius:
if self.tracker and not self.selectMode:
self.tracker.setCoords(tsnap[2])
self.tracker.setMarker(self.mk[tsnap[1]])
self.tracker.on()
if self.extLine:
self.extLine.p2(tsnap[2])
self.extLine.color.rgb = Gui.draftToolBar.getDefaultColor("line")
self.extLine.on()
self.setCursor(tsnap[1])
return tsnap[2], eline
else:
tsnap = self.snapToExtPerpendicular(last)
if tsnap:
if (tsnap[0].sub(point)).Length < self.radius:
if self.tracker and not self.selectMode:
self.tracker.setCoords(tsnap[2])
self.tracker.setMarker(self.mk[tsnap[1]])
self.tracker.on()
if self.extLine:
self.extLine.p2(tsnap[2])
self.extLine.color.rgb = Gui.draftToolBar.getDefaultColor("line")
self.extLine.on()
self.setCursor(tsnap[1])
return tsnap[2], eline
for o in self.lastObj:
if (self.isEnabled('Extension')
or self.isEnabled('Parallel')):
ob = App.ActiveDocument.getObject(o)
if not ob:
continue
if not ob.isDerivedFrom("Part::Feature"):
continue
edges = ob.Shape.Edges
if Draft.getType(ob) == "Wall":
for so in [ob]+ob.Additions:
if Draft.getType(so) == "Wall":
if so.Base:
edges.extend(so.Base.Shape.Edges)
edges.reverse()
if (not self.maxEdges) or (len(edges) <= self.maxEdges):
for e in edges:
if DraftGeomUtils.geomType(e) != "Line":
continue
np = self.getPerpendicular(e,point)
if (np.sub(point)).Length < self.radius:
if self.isEnabled('Extension'):
if DraftGeomUtils.isPtOnEdge(np,e):
continue
if np != e.Vertexes[0].Point:
p0 = e.Vertexes[0].Point
if self.tracker and not self.selectMode:
self.tracker.setCoords(np)
self.tracker.setMarker(self.mk['extension'])
self.tracker.on()
if self.extLine:
if self.snapStyle:
dv = np.sub(p0)
self.extLine.p1(p0.add(dv.multiply(0.5)))
else:
self.extLine.p1(p0)
self.extLine.p2(np)
self.extLine.color.rgb = Gui.draftToolBar.getDefaultColor("line")
self.extLine.on()
self.setCursor('extension')
ne = Part.LineSegment(p0,np).toShape()
# storing extension line for intersection calculations later
if len(self.lastExtensions) == 0:
self.lastExtensions.append(ne)
elif len(self.lastExtensions) == 1:
if not DraftGeomUtils.areColinear(ne,self.lastExtensions[0]):
self.lastExtensions.append(self.lastExtensions[0])
self.lastExtensions[0] = ne
else:
if (not DraftGeomUtils.areColinear(ne,self.lastExtensions[0])) and \
(not DraftGeomUtils.areColinear(ne,self.lastExtensions[1])):
self.lastExtensions[1] = self.lastExtensions[0]
self.lastExtensions[0] = ne
return np,ne
elif self.isEnabled('Parallel'):
if last:
ve = DraftGeomUtils.vec(e)
if not DraftVecUtils.isNull(ve):
de = Part.LineSegment(last,last.add(ve)).toShape()
np = self.getPerpendicular(de,point)
if (np.sub(point)).Length < self.radius:
if self.tracker and not self.selectMode:
self.tracker.setCoords(np)
self.tracker.setMarker(self.mk['parallel'])
self.tracker.on()
self.setCursor('parallel')
return np,de
return point,eline | [
"def",
"snapToExtensions",
"(",
"self",
",",
"point",
",",
"last",
",",
"constrain",
",",
"eline",
")",
":",
"tsnap",
"=",
"self",
".",
"snapToHold",
"(",
"point",
")",
"if",
"tsnap",
":",
"if",
"self",
".",
"tracker",
"and",
"not",
"self",
".",
"selectMode",
":",
"self",
".",
"tracker",
".",
"setCoords",
"(",
"tsnap",
"[",
"2",
"]",
")",
"self",
".",
"tracker",
".",
"setMarker",
"(",
"self",
".",
"mk",
"[",
"tsnap",
"[",
"1",
"]",
"]",
")",
"self",
".",
"tracker",
".",
"on",
"(",
")",
"if",
"self",
".",
"extLine",
":",
"self",
".",
"extLine",
".",
"p1",
"(",
"tsnap",
"[",
"0",
"]",
")",
"self",
".",
"extLine",
".",
"p2",
"(",
"tsnap",
"[",
"2",
"]",
")",
"self",
".",
"extLine",
".",
"color",
".",
"rgb",
"=",
"Gui",
".",
"draftToolBar",
".",
"getDefaultColor",
"(",
"\"line\"",
")",
"self",
".",
"extLine",
".",
"on",
"(",
")",
"self",
".",
"setCursor",
"(",
"tsnap",
"[",
"1",
"]",
")",
"return",
"tsnap",
"[",
"2",
"]",
",",
"eline",
"if",
"self",
".",
"isEnabled",
"(",
"\"Extension\"",
")",
":",
"tsnap",
"=",
"self",
".",
"snapToExtOrtho",
"(",
"last",
",",
"constrain",
",",
"eline",
")",
"if",
"tsnap",
":",
"if",
"(",
"tsnap",
"[",
"0",
"]",
".",
"sub",
"(",
"point",
")",
")",
".",
"Length",
"<",
"self",
".",
"radius",
":",
"if",
"self",
".",
"tracker",
"and",
"not",
"self",
".",
"selectMode",
":",
"self",
".",
"tracker",
".",
"setCoords",
"(",
"tsnap",
"[",
"2",
"]",
")",
"self",
".",
"tracker",
".",
"setMarker",
"(",
"self",
".",
"mk",
"[",
"tsnap",
"[",
"1",
"]",
"]",
")",
"self",
".",
"tracker",
".",
"on",
"(",
")",
"if",
"self",
".",
"extLine",
":",
"self",
".",
"extLine",
".",
"p2",
"(",
"tsnap",
"[",
"2",
"]",
")",
"self",
".",
"extLine",
".",
"color",
".",
"rgb",
"=",
"Gui",
".",
"draftToolBar",
".",
"getDefaultColor",
"(",
"\"line\"",
")",
"self",
".",
"extLine",
".",
"on",
"(",
")",
"self",
".",
"setCursor",
"(",
"tsnap",
"[",
"1",
"]",
")",
"return",
"tsnap",
"[",
"2",
"]",
",",
"eline",
"else",
":",
"tsnap",
"=",
"self",
".",
"snapToExtPerpendicular",
"(",
"last",
")",
"if",
"tsnap",
":",
"if",
"(",
"tsnap",
"[",
"0",
"]",
".",
"sub",
"(",
"point",
")",
")",
".",
"Length",
"<",
"self",
".",
"radius",
":",
"if",
"self",
".",
"tracker",
"and",
"not",
"self",
".",
"selectMode",
":",
"self",
".",
"tracker",
".",
"setCoords",
"(",
"tsnap",
"[",
"2",
"]",
")",
"self",
".",
"tracker",
".",
"setMarker",
"(",
"self",
".",
"mk",
"[",
"tsnap",
"[",
"1",
"]",
"]",
")",
"self",
".",
"tracker",
".",
"on",
"(",
")",
"if",
"self",
".",
"extLine",
":",
"self",
".",
"extLine",
".",
"p2",
"(",
"tsnap",
"[",
"2",
"]",
")",
"self",
".",
"extLine",
".",
"color",
".",
"rgb",
"=",
"Gui",
".",
"draftToolBar",
".",
"getDefaultColor",
"(",
"\"line\"",
")",
"self",
".",
"extLine",
".",
"on",
"(",
")",
"self",
".",
"setCursor",
"(",
"tsnap",
"[",
"1",
"]",
")",
"return",
"tsnap",
"[",
"2",
"]",
",",
"eline",
"for",
"o",
"in",
"self",
".",
"lastObj",
":",
"if",
"(",
"self",
".",
"isEnabled",
"(",
"'Extension'",
")",
"or",
"self",
".",
"isEnabled",
"(",
"'Parallel'",
")",
")",
":",
"ob",
"=",
"App",
".",
"ActiveDocument",
".",
"getObject",
"(",
"o",
")",
"if",
"not",
"ob",
":",
"continue",
"if",
"not",
"ob",
".",
"isDerivedFrom",
"(",
"\"Part::Feature\"",
")",
":",
"continue",
"edges",
"=",
"ob",
".",
"Shape",
".",
"Edges",
"if",
"Draft",
".",
"getType",
"(",
"ob",
")",
"==",
"\"Wall\"",
":",
"for",
"so",
"in",
"[",
"ob",
"]",
"+",
"ob",
".",
"Additions",
":",
"if",
"Draft",
".",
"getType",
"(",
"so",
")",
"==",
"\"Wall\"",
":",
"if",
"so",
".",
"Base",
":",
"edges",
".",
"extend",
"(",
"so",
".",
"Base",
".",
"Shape",
".",
"Edges",
")",
"edges",
".",
"reverse",
"(",
")",
"if",
"(",
"not",
"self",
".",
"maxEdges",
")",
"or",
"(",
"len",
"(",
"edges",
")",
"<=",
"self",
".",
"maxEdges",
")",
":",
"for",
"e",
"in",
"edges",
":",
"if",
"DraftGeomUtils",
".",
"geomType",
"(",
"e",
")",
"!=",
"\"Line\"",
":",
"continue",
"np",
"=",
"self",
".",
"getPerpendicular",
"(",
"e",
",",
"point",
")",
"if",
"(",
"np",
".",
"sub",
"(",
"point",
")",
")",
".",
"Length",
"<",
"self",
".",
"radius",
":",
"if",
"self",
".",
"isEnabled",
"(",
"'Extension'",
")",
":",
"if",
"DraftGeomUtils",
".",
"isPtOnEdge",
"(",
"np",
",",
"e",
")",
":",
"continue",
"if",
"np",
"!=",
"e",
".",
"Vertexes",
"[",
"0",
"]",
".",
"Point",
":",
"p0",
"=",
"e",
".",
"Vertexes",
"[",
"0",
"]",
".",
"Point",
"if",
"self",
".",
"tracker",
"and",
"not",
"self",
".",
"selectMode",
":",
"self",
".",
"tracker",
".",
"setCoords",
"(",
"np",
")",
"self",
".",
"tracker",
".",
"setMarker",
"(",
"self",
".",
"mk",
"[",
"'extension'",
"]",
")",
"self",
".",
"tracker",
".",
"on",
"(",
")",
"if",
"self",
".",
"extLine",
":",
"if",
"self",
".",
"snapStyle",
":",
"dv",
"=",
"np",
".",
"sub",
"(",
"p0",
")",
"self",
".",
"extLine",
".",
"p1",
"(",
"p0",
".",
"add",
"(",
"dv",
".",
"multiply",
"(",
"0.5",
")",
")",
")",
"else",
":",
"self",
".",
"extLine",
".",
"p1",
"(",
"p0",
")",
"self",
".",
"extLine",
".",
"p2",
"(",
"np",
")",
"self",
".",
"extLine",
".",
"color",
".",
"rgb",
"=",
"Gui",
".",
"draftToolBar",
".",
"getDefaultColor",
"(",
"\"line\"",
")",
"self",
".",
"extLine",
".",
"on",
"(",
")",
"self",
".",
"setCursor",
"(",
"'extension'",
")",
"ne",
"=",
"Part",
".",
"LineSegment",
"(",
"p0",
",",
"np",
")",
".",
"toShape",
"(",
")",
"# storing extension line for intersection calculations later",
"if",
"len",
"(",
"self",
".",
"lastExtensions",
")",
"==",
"0",
":",
"self",
".",
"lastExtensions",
".",
"append",
"(",
"ne",
")",
"elif",
"len",
"(",
"self",
".",
"lastExtensions",
")",
"==",
"1",
":",
"if",
"not",
"DraftGeomUtils",
".",
"areColinear",
"(",
"ne",
",",
"self",
".",
"lastExtensions",
"[",
"0",
"]",
")",
":",
"self",
".",
"lastExtensions",
".",
"append",
"(",
"self",
".",
"lastExtensions",
"[",
"0",
"]",
")",
"self",
".",
"lastExtensions",
"[",
"0",
"]",
"=",
"ne",
"else",
":",
"if",
"(",
"not",
"DraftGeomUtils",
".",
"areColinear",
"(",
"ne",
",",
"self",
".",
"lastExtensions",
"[",
"0",
"]",
")",
")",
"and",
"(",
"not",
"DraftGeomUtils",
".",
"areColinear",
"(",
"ne",
",",
"self",
".",
"lastExtensions",
"[",
"1",
"]",
")",
")",
":",
"self",
".",
"lastExtensions",
"[",
"1",
"]",
"=",
"self",
".",
"lastExtensions",
"[",
"0",
"]",
"self",
".",
"lastExtensions",
"[",
"0",
"]",
"=",
"ne",
"return",
"np",
",",
"ne",
"elif",
"self",
".",
"isEnabled",
"(",
"'Parallel'",
")",
":",
"if",
"last",
":",
"ve",
"=",
"DraftGeomUtils",
".",
"vec",
"(",
"e",
")",
"if",
"not",
"DraftVecUtils",
".",
"isNull",
"(",
"ve",
")",
":",
"de",
"=",
"Part",
".",
"LineSegment",
"(",
"last",
",",
"last",
".",
"add",
"(",
"ve",
")",
")",
".",
"toShape",
"(",
")",
"np",
"=",
"self",
".",
"getPerpendicular",
"(",
"de",
",",
"point",
")",
"if",
"(",
"np",
".",
"sub",
"(",
"point",
")",
")",
".",
"Length",
"<",
"self",
".",
"radius",
":",
"if",
"self",
".",
"tracker",
"and",
"not",
"self",
".",
"selectMode",
":",
"self",
".",
"tracker",
".",
"setCoords",
"(",
"np",
")",
"self",
".",
"tracker",
".",
"setMarker",
"(",
"self",
".",
"mk",
"[",
"'parallel'",
"]",
")",
"self",
".",
"tracker",
".",
"on",
"(",
")",
"self",
".",
"setCursor",
"(",
"'parallel'",
")",
"return",
"np",
",",
"de",
"return",
"point",
",",
"eline"
] | https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Draft/draftguitools/gui_snapper.py#L568-L682 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/concurrent/futures/_base.py | python | as_completed | (fs, timeout=None) | An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete (finished or
cancelled). If any given Futures are duplicated, they will be returned
once.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout. | An iterator over the given futures that yields each as it completes. | [
"An",
"iterator",
"over",
"the",
"given",
"futures",
"that",
"yields",
"each",
"as",
"it",
"completes",
"."
] | def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete (finished or
cancelled). If any given Futures are duplicated, they will be returned
once.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
"""
if timeout is not None:
end_time = timeout + time.monotonic()
fs = set(fs)
total_futures = len(fs)
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = fs - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
finished = list(finished)
try:
yield from _yield_finished_futures(finished, waiter,
ref_collect=(fs,))
while pending:
if timeout is None:
wait_timeout = None
else:
wait_timeout = end_time - time.monotonic()
if wait_timeout < 0:
raise TimeoutError(
'%d (of %d) futures unfinished' % (
len(pending), total_futures))
waiter.event.wait(wait_timeout)
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
# reverse to keep finishing order
finished.reverse()
yield from _yield_finished_futures(finished, waiter,
ref_collect=(fs, pending))
finally:
# Remove waiter from unfinished futures
for f in fs:
with f._condition:
f._waiters.remove(waiter) | [
"def",
"as_completed",
"(",
"fs",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"timeout",
"is",
"not",
"None",
":",
"end_time",
"=",
"timeout",
"+",
"time",
".",
"monotonic",
"(",
")",
"fs",
"=",
"set",
"(",
"fs",
")",
"total_futures",
"=",
"len",
"(",
"fs",
")",
"with",
"_AcquireFutures",
"(",
"fs",
")",
":",
"finished",
"=",
"set",
"(",
"f",
"for",
"f",
"in",
"fs",
"if",
"f",
".",
"_state",
"in",
"[",
"CANCELLED_AND_NOTIFIED",
",",
"FINISHED",
"]",
")",
"pending",
"=",
"fs",
"-",
"finished",
"waiter",
"=",
"_create_and_install_waiters",
"(",
"fs",
",",
"_AS_COMPLETED",
")",
"finished",
"=",
"list",
"(",
"finished",
")",
"try",
":",
"yield",
"from",
"_yield_finished_futures",
"(",
"finished",
",",
"waiter",
",",
"ref_collect",
"=",
"(",
"fs",
",",
")",
")",
"while",
"pending",
":",
"if",
"timeout",
"is",
"None",
":",
"wait_timeout",
"=",
"None",
"else",
":",
"wait_timeout",
"=",
"end_time",
"-",
"time",
".",
"monotonic",
"(",
")",
"if",
"wait_timeout",
"<",
"0",
":",
"raise",
"TimeoutError",
"(",
"'%d (of %d) futures unfinished'",
"%",
"(",
"len",
"(",
"pending",
")",
",",
"total_futures",
")",
")",
"waiter",
".",
"event",
".",
"wait",
"(",
"wait_timeout",
")",
"with",
"waiter",
".",
"lock",
":",
"finished",
"=",
"waiter",
".",
"finished_futures",
"waiter",
".",
"finished_futures",
"=",
"[",
"]",
"waiter",
".",
"event",
".",
"clear",
"(",
")",
"# reverse to keep finishing order",
"finished",
".",
"reverse",
"(",
")",
"yield",
"from",
"_yield_finished_futures",
"(",
"finished",
",",
"waiter",
",",
"ref_collect",
"=",
"(",
"fs",
",",
"pending",
")",
")",
"finally",
":",
"# Remove waiter from unfinished futures",
"for",
"f",
"in",
"fs",
":",
"with",
"f",
".",
"_condition",
":",
"f",
".",
"_waiters",
".",
"remove",
"(",
"waiter",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/concurrent/futures/_base.py#L196-L256 |
||
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/python/ops/metrics_impl.py | python | true_positives | (labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None) | Sum the weights of true_positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple. | Sum the weights of true_positives. | [
"Sum",
"the",
"weights",
"of",
"true_positives",
"."
] | def true_positives(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'true_positives', (predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
is_true_positive = math_ops.logical_and(math_ops.equal(labels, True),
math_ops.equal(predictions, True))
return _count_condition(is_true_positive, weights, metrics_collections,
updates_collections) | [
"def",
"true_positives",
"(",
"labels",
",",
"predictions",
",",
"weights",
"=",
"None",
",",
"metrics_collections",
"=",
"None",
",",
"updates_collections",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"with",
"variable_scope",
".",
"variable_scope",
"(",
"name",
",",
"'true_positives'",
",",
"(",
"predictions",
",",
"labels",
",",
"weights",
")",
")",
":",
"predictions",
",",
"labels",
",",
"weights",
"=",
"_remove_squeezable_dimensions",
"(",
"predictions",
"=",
"math_ops",
".",
"cast",
"(",
"predictions",
",",
"dtype",
"=",
"dtypes",
".",
"bool",
")",
",",
"labels",
"=",
"math_ops",
".",
"cast",
"(",
"labels",
",",
"dtype",
"=",
"dtypes",
".",
"bool",
")",
",",
"weights",
"=",
"weights",
")",
"is_true_positive",
"=",
"math_ops",
".",
"logical_and",
"(",
"math_ops",
".",
"equal",
"(",
"labels",
",",
"True",
")",
",",
"math_ops",
".",
"equal",
"(",
"predictions",
",",
"True",
")",
")",
"return",
"_count_condition",
"(",
"is_true_positive",
",",
"weights",
",",
"metrics_collections",
",",
"updates_collections",
")"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/ops/metrics_impl.py#L1505-L1547 |
||
xiaolonw/caffe-video_triplet | c39ea1ad6e937ccf7deba4510b7e555165abf05f | scripts/cpp_lint.py | python | CleansedLines._CollapseStrings | (elided) | return elided | Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings. | Collapses strings and chars on a line to simple "" or '' blocks. | [
"Collapses",
"strings",
"and",
"chars",
"on",
"a",
"line",
"to",
"simple",
"or",
"blocks",
"."
] | def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if not _RE_PATTERN_INCLUDE.match(elided):
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided)
elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided)
return elided | [
"def",
"_CollapseStrings",
"(",
"elided",
")",
":",
"if",
"not",
"_RE_PATTERN_INCLUDE",
".",
"match",
"(",
"elided",
")",
":",
"# Remove escaped characters first to make quote/single quote collapsing",
"# basic. Things that look like escaped characters shouldn't occur",
"# outside of strings and chars.",
"elided",
"=",
"_RE_PATTERN_CLEANSE_LINE_ESCAPES",
".",
"sub",
"(",
"''",
",",
"elided",
")",
"elided",
"=",
"_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES",
".",
"sub",
"(",
"\"''\"",
",",
"elided",
")",
"elided",
"=",
"_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES",
".",
"sub",
"(",
"'\"\"'",
",",
"elided",
")",
"return",
"elided"
] | https://github.com/xiaolonw/caffe-video_triplet/blob/c39ea1ad6e937ccf7deba4510b7e555165abf05f/scripts/cpp_lint.py#L1209-L1227 |
|
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/sets.py | python | Set.__isub__ | (self, other) | return self | Remove all elements of another set from this set. | Remove all elements of another set from this set. | [
"Remove",
"all",
"elements",
"of",
"another",
"set",
"from",
"this",
"set",
"."
] | def __isub__(self, other):
"""Remove all elements of another set from this set."""
self._binary_sanity_check(other)
self.difference_update(other)
return self | [
"def",
"__isub__",
"(",
"self",
",",
"other",
")",
":",
"self",
".",
"_binary_sanity_check",
"(",
"other",
")",
"self",
".",
"difference_update",
"(",
"other",
")",
"return",
"self"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/sets.py#L471-L475 |
|
vnpy/vnpy | f50f2535ed39dd33272e0985ed40c7078e4c19f6 | vnpy/chart/widget.py | python | ChartWidget._on_key_right | (self) | Move chart to right. | Move chart to right. | [
"Move",
"chart",
"to",
"right",
"."
] | def _on_key_right(self) -> None:
"""
Move chart to right.
"""
self._right_ix += 1
self._right_ix = min(self._right_ix, self._manager.get_count())
self._update_x_range()
self._cursor.move_right()
self._cursor.update_info() | [
"def",
"_on_key_right",
"(",
"self",
")",
"->",
"None",
":",
"self",
".",
"_right_ix",
"+=",
"1",
"self",
".",
"_right_ix",
"=",
"min",
"(",
"self",
".",
"_right_ix",
",",
"self",
".",
"_manager",
".",
"get_count",
"(",
")",
")",
"self",
".",
"_update_x_range",
"(",
")",
"self",
".",
"_cursor",
".",
"move_right",
"(",
")",
"self",
".",
"_cursor",
".",
"update_info",
"(",
")"
] | https://github.com/vnpy/vnpy/blob/f50f2535ed39dd33272e0985ed40c7078e4c19f6/vnpy/chart/widget.py#L266-L275 |
||
ValveSoftware/source-sdk-2013 | 0d8dceea4310fde5706b3ce1c70609d72a38efdf | sp/src/thirdparty/protobuf-2.3.0/python/google/protobuf/descriptor.py | python | EnumValueDescriptor.__init__ | (self, name, index, number, type=None, options=None) | Arguments are as described in the attribute description above. | Arguments are as described in the attribute description above. | [
"Arguments",
"are",
"as",
"described",
"in",
"the",
"attribute",
"description",
"above",
"."
] | def __init__(self, name, index, number, type=None, options=None):
"""Arguments are as described in the attribute description above."""
super(EnumValueDescriptor, self).__init__(options, 'EnumValueOptions')
self.name = name
self.index = index
self.number = number
self.type = type | [
"def",
"__init__",
"(",
"self",
",",
"name",
",",
"index",
",",
"number",
",",
"type",
"=",
"None",
",",
"options",
"=",
"None",
")",
":",
"super",
"(",
"EnumValueDescriptor",
",",
"self",
")",
".",
"__init__",
"(",
"options",
",",
"'EnumValueOptions'",
")",
"self",
".",
"name",
"=",
"name",
"self",
".",
"index",
"=",
"index",
"self",
".",
"number",
"=",
"number",
"self",
".",
"type",
"=",
"type"
] | https://github.com/ValveSoftware/source-sdk-2013/blob/0d8dceea4310fde5706b3ce1c70609d72a38efdf/sp/src/thirdparty/protobuf-2.3.0/python/google/protobuf/descriptor.py#L473-L479 |
||
vgteam/vg | cf4d516a5e9ee5163c783e4437ddf16b18a4b561 | vgci/vgci.py | python | VGCITest._test_bakeoff | (self, region, graph, skip_indexing, mapper='map', tag_ext='', misc_opts=None,
genotype=False) | Run bakeoff F1 test for NA12878 | Run bakeoff F1 test for NA12878 | [
"Run",
"bakeoff",
"F1",
"test",
"for",
"NA12878"
] | def _test_bakeoff(self, region, graph, skip_indexing, mapper='map', tag_ext='', misc_opts=None,
genotype=False):
""" Run bakeoff F1 test for NA12878 """
assert not tag_ext or tag_ext.startswith('-')
tag = '{}-{}{}'.format(region, graph, tag_ext)
chrom, offset = self._bakeoff_coords(region)
if skip_indexing:
xg_path = None
gcsa_path = self._input('{}-{}.gcsa'.format(graph, region))
else:
xg_path = None
gcsa_path = None
extra_opts = '--vcf_offsets {}'.format(offset)
if misc_opts:
extra_opts += ' {}'.format(misc_opts)
# these are the options these tests were trained on. specify here instead of relying
# on them being baked into toil-vg
extra_opts += ' --min_mapq 15 --filter_opts \' -r 0.9 -fu -m 1 -q 15 -D 999\''
self._toil_vg_run('NA12878', chrom,
self._input('{}-{}.vg'.format(graph, region)),
xg_path, gcsa_path,
self._input('platinum_NA12878_{}.fq.gz'.format(region)),
self._input('platinum_NA12878_{}.vcf.gz'.format(region)),
self._input('chr{}.fa.gz'.format(chrom)), True, mapper,
extra_opts, genotype, tag)
if self.verify:
self._verify_f1('NA12878', tag) | [
"def",
"_test_bakeoff",
"(",
"self",
",",
"region",
",",
"graph",
",",
"skip_indexing",
",",
"mapper",
"=",
"'map'",
",",
"tag_ext",
"=",
"''",
",",
"misc_opts",
"=",
"None",
",",
"genotype",
"=",
"False",
")",
":",
"assert",
"not",
"tag_ext",
"or",
"tag_ext",
".",
"startswith",
"(",
"'-'",
")",
"tag",
"=",
"'{}-{}{}'",
".",
"format",
"(",
"region",
",",
"graph",
",",
"tag_ext",
")",
"chrom",
",",
"offset",
"=",
"self",
".",
"_bakeoff_coords",
"(",
"region",
")",
"if",
"skip_indexing",
":",
"xg_path",
"=",
"None",
"gcsa_path",
"=",
"self",
".",
"_input",
"(",
"'{}-{}.gcsa'",
".",
"format",
"(",
"graph",
",",
"region",
")",
")",
"else",
":",
"xg_path",
"=",
"None",
"gcsa_path",
"=",
"None",
"extra_opts",
"=",
"'--vcf_offsets {}'",
".",
"format",
"(",
"offset",
")",
"if",
"misc_opts",
":",
"extra_opts",
"+=",
"' {}'",
".",
"format",
"(",
"misc_opts",
")",
"# these are the options these tests were trained on. specify here instead of relying",
"# on them being baked into toil-vg",
"extra_opts",
"+=",
"' --min_mapq 15 --filter_opts \\' -r 0.9 -fu -m 1 -q 15 -D 999\\''",
"self",
".",
"_toil_vg_run",
"(",
"'NA12878'",
",",
"chrom",
",",
"self",
".",
"_input",
"(",
"'{}-{}.vg'",
".",
"format",
"(",
"graph",
",",
"region",
")",
")",
",",
"xg_path",
",",
"gcsa_path",
",",
"self",
".",
"_input",
"(",
"'platinum_NA12878_{}.fq.gz'",
".",
"format",
"(",
"region",
")",
")",
",",
"self",
".",
"_input",
"(",
"'platinum_NA12878_{}.vcf.gz'",
".",
"format",
"(",
"region",
")",
")",
",",
"self",
".",
"_input",
"(",
"'chr{}.fa.gz'",
".",
"format",
"(",
"chrom",
")",
")",
",",
"True",
",",
"mapper",
",",
"extra_opts",
",",
"genotype",
",",
"tag",
")",
"if",
"self",
".",
"verify",
":",
"self",
".",
"_verify_f1",
"(",
"'NA12878'",
",",
"tag",
")"
] | https://github.com/vgteam/vg/blob/cf4d516a5e9ee5163c783e4437ddf16b18a4b561/vgci/vgci.py#L507-L535 |
||
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/tools/docs/pretty_docs.py | python | _build_function_page | (page_info) | return ''.join(parts) | Given a FunctionPageInfo object Return the page as an md string. | Given a FunctionPageInfo object Return the page as an md string. | [
"Given",
"a",
"FunctionPageInfo",
"object",
"Return",
"the",
"page",
"as",
"an",
"md",
"string",
"."
] | def _build_function_page(page_info):
"""Given a FunctionPageInfo object Return the page as an md string."""
parts = ['# %s\n\n' % page_info.full_name]
if len(page_info.aliases) > 1:
parts.append('### Aliases:\n\n')
parts.extend('* `%s`\n' % name for name in page_info.aliases)
parts.append('\n')
if page_info.signature is not None:
parts.append(_build_signature(page_info))
if page_info.defined_in:
parts.append('\n\n')
parts.append(str(page_info.defined_in))
parts.append(page_info.guides)
parts.append(page_info.doc.docstring)
parts.append(_build_function_details(page_info.doc.function_details))
parts.append(_build_compatibility(page_info.doc.compatibility))
return ''.join(parts) | [
"def",
"_build_function_page",
"(",
"page_info",
")",
":",
"parts",
"=",
"[",
"'# %s\\n\\n'",
"%",
"page_info",
".",
"full_name",
"]",
"if",
"len",
"(",
"page_info",
".",
"aliases",
")",
">",
"1",
":",
"parts",
".",
"append",
"(",
"'### Aliases:\\n\\n'",
")",
"parts",
".",
"extend",
"(",
"'* `%s`\\n'",
"%",
"name",
"for",
"name",
"in",
"page_info",
".",
"aliases",
")",
"parts",
".",
"append",
"(",
"'\\n'",
")",
"if",
"page_info",
".",
"signature",
"is",
"not",
"None",
":",
"parts",
".",
"append",
"(",
"_build_signature",
"(",
"page_info",
")",
")",
"if",
"page_info",
".",
"defined_in",
":",
"parts",
".",
"append",
"(",
"'\\n\\n'",
")",
"parts",
".",
"append",
"(",
"str",
"(",
"page_info",
".",
"defined_in",
")",
")",
"parts",
".",
"append",
"(",
"page_info",
".",
"guides",
")",
"parts",
".",
"append",
"(",
"page_info",
".",
"doc",
".",
"docstring",
")",
"parts",
".",
"append",
"(",
"_build_function_details",
"(",
"page_info",
".",
"doc",
".",
"function_details",
")",
")",
"parts",
".",
"append",
"(",
"_build_compatibility",
"(",
"page_info",
".",
"doc",
".",
"compatibility",
")",
")",
"return",
"''",
".",
"join",
"(",
"parts",
")"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/tools/docs/pretty_docs.py#L58-L79 |
|
mickem/nscp | 79f89fdbb6da63f91bc9dedb7aea202fe938f237 | scripts/python/lib/google/protobuf/internal/containers.py | python | RepeatedCompositeFieldContainer.__delitem__ | (self, key) | Deletes the item at the specified position. | Deletes the item at the specified position. | [
"Deletes",
"the",
"item",
"at",
"the",
"specified",
"position",
"."
] | def __delitem__(self, key):
"""Deletes the item at the specified position."""
del self._values[key]
self._message_listener.Modified() | [
"def",
"__delitem__",
"(",
"self",
",",
"key",
")",
":",
"del",
"self",
".",
"_values",
"[",
"key",
"]",
"self",
".",
"_message_listener",
".",
"Modified",
"(",
")"
] | https://github.com/mickem/nscp/blob/79f89fdbb6da63f91bc9dedb7aea202fe938f237/scripts/python/lib/google/protobuf/internal/containers.py#L242-L245 |
||
vtraag/leidenalg | b53366829360e10922a2dbf57eb405a516c23bc9 | src/leidenalg/Optimiser.py | python | Optimiser.consider_comms | (self) | return _c_leiden._Optimiser_get_consider_comms(self._optimiser) | Determine how alternative communities are considered for moving
a node for *optimising* a partition.
Nodes will only move to alternative communities that improve the given
quality function.
Notes
-------
This attribute should be set to one of the following values
* :attr:`leidenalg.ALL_NEIGH_COMMS`
Consider all neighbouring communities for moving.
* :attr:`leidenalg.ALL_COMMS`
Consider all communities for moving. This is especially useful in the
case of negative links, in which case it may be better to move a node to
a non-neighbouring community.
* :attr:`leidenalg.RAND_NEIGH_COMM`
Consider a random neighbour community for moving. The probability to
choose a community is proportional to the number of neighbours a node has
in that community.
* :attr:`leidenalg.RAND_COMM`
Consider a random community for moving. The probability to choose a
community is proportional to the number of nodes in that community. | Determine how alternative communities are considered for moving
a node for *optimising* a partition. | [
"Determine",
"how",
"alternative",
"communities",
"are",
"considered",
"for",
"moving",
"a",
"node",
"for",
"*",
"optimising",
"*",
"a",
"partition",
"."
] | def consider_comms(self):
""" Determine how alternative communities are considered for moving
a node for *optimising* a partition.
Nodes will only move to alternative communities that improve the given
quality function.
Notes
-------
This attribute should be set to one of the following values
* :attr:`leidenalg.ALL_NEIGH_COMMS`
Consider all neighbouring communities for moving.
* :attr:`leidenalg.ALL_COMMS`
Consider all communities for moving. This is especially useful in the
case of negative links, in which case it may be better to move a node to
a non-neighbouring community.
* :attr:`leidenalg.RAND_NEIGH_COMM`
Consider a random neighbour community for moving. The probability to
choose a community is proportional to the number of neighbours a node has
in that community.
* :attr:`leidenalg.RAND_COMM`
Consider a random community for moving. The probability to choose a
community is proportional to the number of nodes in that community.
"""
return _c_leiden._Optimiser_get_consider_comms(self._optimiser) | [
"def",
"consider_comms",
"(",
"self",
")",
":",
"return",
"_c_leiden",
".",
"_Optimiser_get_consider_comms",
"(",
"self",
".",
"_optimiser",
")"
] | https://github.com/vtraag/leidenalg/blob/b53366829360e10922a2dbf57eb405a516c23bc9/src/leidenalg/Optimiser.py#L78-L106 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/propgrid.py | python | PropertyGridPopulator.Add | (*args, **kwargs) | return _propgrid.PropertyGridPopulator_Add(*args, **kwargs) | Add(self, String propClass, String propLabel, String propName,
String propValue, PGChoices pChoices=None) -> PGProperty | Add(self, String propClass, String propLabel, String propName,
String propValue, PGChoices pChoices=None) -> PGProperty | [
"Add",
"(",
"self",
"String",
"propClass",
"String",
"propLabel",
"String",
"propName",
"String",
"propValue",
"PGChoices",
"pChoices",
"=",
"None",
")",
"-",
">",
"PGProperty"
] | def Add(*args, **kwargs):
"""
Add(self, String propClass, String propLabel, String propName,
String propValue, PGChoices pChoices=None) -> PGProperty
"""
return _propgrid.PropertyGridPopulator_Add(*args, **kwargs) | [
"def",
"Add",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_propgrid",
".",
"PropertyGridPopulator_Add",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/propgrid.py#L2585-L2590 |
|
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | ppapi/native_client/src/tools/srpcgen.py | python | CountName | (name) | return '%s_bytes' % name | Returns the name of the auxiliary count member used for array typed. | Returns the name of the auxiliary count member used for array typed. | [
"Returns",
"the",
"name",
"of",
"the",
"auxiliary",
"count",
"member",
"used",
"for",
"array",
"typed",
"."
] | def CountName(name):
"""Returns the name of the auxiliary count member used for array typed."""
return '%s_bytes' % name | [
"def",
"CountName",
"(",
"name",
")",
":",
"return",
"'%s_bytes'",
"%",
"name"
] | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/ppapi/native_client/src/tools/srpcgen.py#L127-L129 |
|
bulletphysics/bullet3 | f0f2a952e146f016096db6f85cf0c44ed75b0b9a | examples/pybullet/gym/pybullet_envs/minitaur/agents/baseline_controller/locomotion_controller_example.py | python | _run_example | (max_time=_MAX_TIME_SECONDS,
run_on_robot=False,
use_keyboard=False) | Runs the locomotion controller example. | Runs the locomotion controller example. | [
"Runs",
"the",
"locomotion",
"controller",
"example",
"."
] | def _run_example(max_time=_MAX_TIME_SECONDS,
run_on_robot=False,
use_keyboard=False):
"""Runs the locomotion controller example."""
if use_keyboard:
kb = keyboard_utils.KeyboardInput()
env = env_loader.load()
env.reset()
# To mitigate jittering from the python
gc.collect()
# Wait for the robot to be placed properly.
if run_on_robot:
input("Press Enter to continue when robot is ready.")
lin_speed = np.array([0.0, 0.0, 0.0])
ang_speed = 0.0
controller = locomotion_controller_setup.setup_controller(
env.robot, FLAGS.gait, run_on_robot, FLAGS.use_ground_truth_velocity)
controller.reset()
loop_start_time = env.get_time_since_reset()
loop_elapsed_time = 0
robot_log = {
"timestamps": [],
"motor_angles": [],
"motor_velocities": [],
"base_velocities": [],
"foot_positions": [],
"base_rollpitchyaw": [],
"base_angular_velocities": [],
"actions": []
}
try:
while loop_elapsed_time < max_time:
#if use_keyboard:
# lin_speed, ang_speed = _update_speed_from_kb(kb, lin_speed, ang_speed)
#else:
lin_speed, ang_speed = _generate_example_linear_angular_speed(
loop_elapsed_time)
# Needed before every call to get_action().
_update_controller_params(controller, lin_speed, ang_speed)
controller.update()
hybrid_action = controller.get_action()
# Log the robot data.
robot_log["timestamps"].append(env.robot.GetTimeSinceReset())
robot_log["motor_angles"].append(env.robot.motor_angles)
robot_log["motor_velocities"].append(env.robot.motor_velocities)
robot_log["base_velocities"].append(
controller.state_estimator.com_velocity_body_yaw_aligned_frame)
robot_log["foot_positions"].append(env.robot.foot_positions())
robot_log["base_rollpitchyaw"].append(env.robot.base_roll_pitch_yaw)
robot_log["base_angular_velocities"].append(
env.robot.base_roll_pitch_yaw_rate)
robot_log["actions"].append(hybrid_action)
env.step(hybrid_action)
loop_elapsed_time = env.get_time_since_reset() - loop_start_time
finally:
if FLAGS.run_on_robot:
# Apply zero torques to the robot.
env.robot.apply_action(
[0] * env.robot.num_motors,
motor_control_mode=robot_config.MotorControlMode.TORQUE)
if FLAGS.log_path:
pickle.dump(robot_log, gfile.Open(FLAGS.log_path + "/robot.log", "wb")) | [
"def",
"_run_example",
"(",
"max_time",
"=",
"_MAX_TIME_SECONDS",
",",
"run_on_robot",
"=",
"False",
",",
"use_keyboard",
"=",
"False",
")",
":",
"if",
"use_keyboard",
":",
"kb",
"=",
"keyboard_utils",
".",
"KeyboardInput",
"(",
")",
"env",
"=",
"env_loader",
".",
"load",
"(",
")",
"env",
".",
"reset",
"(",
")",
"# To mitigate jittering from the python",
"gc",
".",
"collect",
"(",
")",
"# Wait for the robot to be placed properly.",
"if",
"run_on_robot",
":",
"input",
"(",
"\"Press Enter to continue when robot is ready.\"",
")",
"lin_speed",
"=",
"np",
".",
"array",
"(",
"[",
"0.0",
",",
"0.0",
",",
"0.0",
"]",
")",
"ang_speed",
"=",
"0.0",
"controller",
"=",
"locomotion_controller_setup",
".",
"setup_controller",
"(",
"env",
".",
"robot",
",",
"FLAGS",
".",
"gait",
",",
"run_on_robot",
",",
"FLAGS",
".",
"use_ground_truth_velocity",
")",
"controller",
".",
"reset",
"(",
")",
"loop_start_time",
"=",
"env",
".",
"get_time_since_reset",
"(",
")",
"loop_elapsed_time",
"=",
"0",
"robot_log",
"=",
"{",
"\"timestamps\"",
":",
"[",
"]",
",",
"\"motor_angles\"",
":",
"[",
"]",
",",
"\"motor_velocities\"",
":",
"[",
"]",
",",
"\"base_velocities\"",
":",
"[",
"]",
",",
"\"foot_positions\"",
":",
"[",
"]",
",",
"\"base_rollpitchyaw\"",
":",
"[",
"]",
",",
"\"base_angular_velocities\"",
":",
"[",
"]",
",",
"\"actions\"",
":",
"[",
"]",
"}",
"try",
":",
"while",
"loop_elapsed_time",
"<",
"max_time",
":",
"#if use_keyboard:",
"# lin_speed, ang_speed = _update_speed_from_kb(kb, lin_speed, ang_speed)",
"#else:",
"lin_speed",
",",
"ang_speed",
"=",
"_generate_example_linear_angular_speed",
"(",
"loop_elapsed_time",
")",
"# Needed before every call to get_action().",
"_update_controller_params",
"(",
"controller",
",",
"lin_speed",
",",
"ang_speed",
")",
"controller",
".",
"update",
"(",
")",
"hybrid_action",
"=",
"controller",
".",
"get_action",
"(",
")",
"# Log the robot data.",
"robot_log",
"[",
"\"timestamps\"",
"]",
".",
"append",
"(",
"env",
".",
"robot",
".",
"GetTimeSinceReset",
"(",
")",
")",
"robot_log",
"[",
"\"motor_angles\"",
"]",
".",
"append",
"(",
"env",
".",
"robot",
".",
"motor_angles",
")",
"robot_log",
"[",
"\"motor_velocities\"",
"]",
".",
"append",
"(",
"env",
".",
"robot",
".",
"motor_velocities",
")",
"robot_log",
"[",
"\"base_velocities\"",
"]",
".",
"append",
"(",
"controller",
".",
"state_estimator",
".",
"com_velocity_body_yaw_aligned_frame",
")",
"robot_log",
"[",
"\"foot_positions\"",
"]",
".",
"append",
"(",
"env",
".",
"robot",
".",
"foot_positions",
"(",
")",
")",
"robot_log",
"[",
"\"base_rollpitchyaw\"",
"]",
".",
"append",
"(",
"env",
".",
"robot",
".",
"base_roll_pitch_yaw",
")",
"robot_log",
"[",
"\"base_angular_velocities\"",
"]",
".",
"append",
"(",
"env",
".",
"robot",
".",
"base_roll_pitch_yaw_rate",
")",
"robot_log",
"[",
"\"actions\"",
"]",
".",
"append",
"(",
"hybrid_action",
")",
"env",
".",
"step",
"(",
"hybrid_action",
")",
"loop_elapsed_time",
"=",
"env",
".",
"get_time_since_reset",
"(",
")",
"-",
"loop_start_time",
"finally",
":",
"if",
"FLAGS",
".",
"run_on_robot",
":",
"# Apply zero torques to the robot.",
"env",
".",
"robot",
".",
"apply_action",
"(",
"[",
"0",
"]",
"*",
"env",
".",
"robot",
".",
"num_motors",
",",
"motor_control_mode",
"=",
"robot_config",
".",
"MotorControlMode",
".",
"TORQUE",
")",
"if",
"FLAGS",
".",
"log_path",
":",
"pickle",
".",
"dump",
"(",
"robot_log",
",",
"gfile",
".",
"Open",
"(",
"FLAGS",
".",
"log_path",
"+",
"\"/robot.log\"",
",",
"\"wb\"",
")",
")"
] | https://github.com/bulletphysics/bullet3/blob/f0f2a952e146f016096db6f85cf0c44ed75b0b9a/examples/pybullet/gym/pybullet_envs/minitaur/agents/baseline_controller/locomotion_controller_example.py#L108-L179 |
||
krishauser/Klampt | 972cc83ea5befac3f653c1ba20f80155768ad519 | Python/python2_version/klampt/model/create/pile.py | python | xy_jiggle | (world,objects,fixed_objects,bmin,bmax,iters,randomize=True,
verbose=0) | return removed | Jiggles the objects' x-y positions within the range bmin - bmax, and randomizes orientation about the z
axis until the objects are collision free. A list of fixed objects (fixed_objects) may be given as well.
Objects for which collision-free resolutions are not found are returned. | Jiggles the objects' x-y positions within the range bmin - bmax, and randomizes orientation about the z
axis until the objects are collision free. A list of fixed objects (fixed_objects) may be given as well. | [
"Jiggles",
"the",
"objects",
"x",
"-",
"y",
"positions",
"within",
"the",
"range",
"bmin",
"-",
"bmax",
"and",
"randomizes",
"orientation",
"about",
"the",
"z",
"axis",
"until",
"the",
"objects",
"are",
"collision",
"free",
".",
"A",
"list",
"of",
"fixed",
"objects",
"(",
"fixed_objects",
")",
"may",
"be",
"given",
"as",
"well",
"."
] | def xy_jiggle(world,objects,fixed_objects,bmin,bmax,iters,randomize=True,
verbose=0):
"""Jiggles the objects' x-y positions within the range bmin - bmax, and randomizes orientation about the z
axis until the objects are collision free. A list of fixed objects (fixed_objects) may be given as well.
Objects for which collision-free resolutions are not found are returned.
"""
if randomize:
for obj in objects:
xy_randomize(obj,bmin,bmax)
inner_iters = 10
while iters > 0:
numConflicts = [0]*len(objects)
for (i,j) in collide.self_collision_iter([o.geometry() for o in objects]):
numConflicts[i] += 1
numConflicts[j] += 1
for (i,j) in collide.group_collision_iter([o.geometry() for o in objects],[o.geometry() for o in fixed_objects]):
numConflicts[i] += 1
amax = max((c,i) for (i,c) in enumerate(numConflicts))[1]
cmax = numConflicts[amax]
if cmax == 0:
#conflict free
return
if verbose:
print cmax,"conflicts with object",objects[amax].getName()
other_geoms = [o.geometry() for o in objects[:amax]+objects[amax+1:]+fixed_objects]
for it in xrange(inner_iters):
xy_randomize(objects[amax],bmin,bmax)
nc = sum([1 for p in collide.group_collision_iter([objects[amax].geometry()],other_geoms)])
if nc < cmax:
break
iters-=1
if verbose:
print "Now",nc,"conflicts with object",objects[amax].getName()
numConflicts = [0]*len(objects)
for (i,j) in collide.self_collision_iter([o.geometry() for o in objects]):
numConflicts[i] += 1
numConflicts[j] += 1
for (i,j) in collide.group_collision_iter([o.geometry() for o in objects],[o.geometry() for o in fixed_objects]):
numConflicts[i] += 1
removed = []
while max(numConflicts) > 0:
amax = max((c,i) for (i,c) in enumerate(numConflicts))[1]
cmax = numConflicts[amax]
if verbose:
print "Unable to find conflict-free configuration for object",objects[amax].getName(),"with",cmax,"conflicts"
removed.append(amax)
#revise # of conflicts -- this could be faster, but whatever...
numConflicts = [0]*len(objects)
for (i,j) in collide.self_collision_iter([o.geometry() for o in objects]):
if i in removed or j in removed:
continue
numConflicts[i] += 1
numConflicts[j] += 1
for (i,j) in collide.group_collision_iter([o.geometry() for o in objects],[o.geometry() for o in fixed_objects]):
if i in removed:
continue
numConflicts[i] += 1
return removed | [
"def",
"xy_jiggle",
"(",
"world",
",",
"objects",
",",
"fixed_objects",
",",
"bmin",
",",
"bmax",
",",
"iters",
",",
"randomize",
"=",
"True",
",",
"verbose",
"=",
"0",
")",
":",
"if",
"randomize",
":",
"for",
"obj",
"in",
"objects",
":",
"xy_randomize",
"(",
"obj",
",",
"bmin",
",",
"bmax",
")",
"inner_iters",
"=",
"10",
"while",
"iters",
">",
"0",
":",
"numConflicts",
"=",
"[",
"0",
"]",
"*",
"len",
"(",
"objects",
")",
"for",
"(",
"i",
",",
"j",
")",
"in",
"collide",
".",
"self_collision_iter",
"(",
"[",
"o",
".",
"geometry",
"(",
")",
"for",
"o",
"in",
"objects",
"]",
")",
":",
"numConflicts",
"[",
"i",
"]",
"+=",
"1",
"numConflicts",
"[",
"j",
"]",
"+=",
"1",
"for",
"(",
"i",
",",
"j",
")",
"in",
"collide",
".",
"group_collision_iter",
"(",
"[",
"o",
".",
"geometry",
"(",
")",
"for",
"o",
"in",
"objects",
"]",
",",
"[",
"o",
".",
"geometry",
"(",
")",
"for",
"o",
"in",
"fixed_objects",
"]",
")",
":",
"numConflicts",
"[",
"i",
"]",
"+=",
"1",
"amax",
"=",
"max",
"(",
"(",
"c",
",",
"i",
")",
"for",
"(",
"i",
",",
"c",
")",
"in",
"enumerate",
"(",
"numConflicts",
")",
")",
"[",
"1",
"]",
"cmax",
"=",
"numConflicts",
"[",
"amax",
"]",
"if",
"cmax",
"==",
"0",
":",
"#conflict free",
"return",
"if",
"verbose",
":",
"print",
"cmax",
",",
"\"conflicts with object\"",
",",
"objects",
"[",
"amax",
"]",
".",
"getName",
"(",
")",
"other_geoms",
"=",
"[",
"o",
".",
"geometry",
"(",
")",
"for",
"o",
"in",
"objects",
"[",
":",
"amax",
"]",
"+",
"objects",
"[",
"amax",
"+",
"1",
":",
"]",
"+",
"fixed_objects",
"]",
"for",
"it",
"in",
"xrange",
"(",
"inner_iters",
")",
":",
"xy_randomize",
"(",
"objects",
"[",
"amax",
"]",
",",
"bmin",
",",
"bmax",
")",
"nc",
"=",
"sum",
"(",
"[",
"1",
"for",
"p",
"in",
"collide",
".",
"group_collision_iter",
"(",
"[",
"objects",
"[",
"amax",
"]",
".",
"geometry",
"(",
")",
"]",
",",
"other_geoms",
")",
"]",
")",
"if",
"nc",
"<",
"cmax",
":",
"break",
"iters",
"-=",
"1",
"if",
"verbose",
":",
"print",
"\"Now\"",
",",
"nc",
",",
"\"conflicts with object\"",
",",
"objects",
"[",
"amax",
"]",
".",
"getName",
"(",
")",
"numConflicts",
"=",
"[",
"0",
"]",
"*",
"len",
"(",
"objects",
")",
"for",
"(",
"i",
",",
"j",
")",
"in",
"collide",
".",
"self_collision_iter",
"(",
"[",
"o",
".",
"geometry",
"(",
")",
"for",
"o",
"in",
"objects",
"]",
")",
":",
"numConflicts",
"[",
"i",
"]",
"+=",
"1",
"numConflicts",
"[",
"j",
"]",
"+=",
"1",
"for",
"(",
"i",
",",
"j",
")",
"in",
"collide",
".",
"group_collision_iter",
"(",
"[",
"o",
".",
"geometry",
"(",
")",
"for",
"o",
"in",
"objects",
"]",
",",
"[",
"o",
".",
"geometry",
"(",
")",
"for",
"o",
"in",
"fixed_objects",
"]",
")",
":",
"numConflicts",
"[",
"i",
"]",
"+=",
"1",
"removed",
"=",
"[",
"]",
"while",
"max",
"(",
"numConflicts",
")",
">",
"0",
":",
"amax",
"=",
"max",
"(",
"(",
"c",
",",
"i",
")",
"for",
"(",
"i",
",",
"c",
")",
"in",
"enumerate",
"(",
"numConflicts",
")",
")",
"[",
"1",
"]",
"cmax",
"=",
"numConflicts",
"[",
"amax",
"]",
"if",
"verbose",
":",
"print",
"\"Unable to find conflict-free configuration for object\"",
",",
"objects",
"[",
"amax",
"]",
".",
"getName",
"(",
")",
",",
"\"with\"",
",",
"cmax",
",",
"\"conflicts\"",
"removed",
".",
"append",
"(",
"amax",
")",
"#revise # of conflicts -- this could be faster, but whatever...",
"numConflicts",
"=",
"[",
"0",
"]",
"*",
"len",
"(",
"objects",
")",
"for",
"(",
"i",
",",
"j",
")",
"in",
"collide",
".",
"self_collision_iter",
"(",
"[",
"o",
".",
"geometry",
"(",
")",
"for",
"o",
"in",
"objects",
"]",
")",
":",
"if",
"i",
"in",
"removed",
"or",
"j",
"in",
"removed",
":",
"continue",
"numConflicts",
"[",
"i",
"]",
"+=",
"1",
"numConflicts",
"[",
"j",
"]",
"+=",
"1",
"for",
"(",
"i",
",",
"j",
")",
"in",
"collide",
".",
"group_collision_iter",
"(",
"[",
"o",
".",
"geometry",
"(",
")",
"for",
"o",
"in",
"objects",
"]",
",",
"[",
"o",
".",
"geometry",
"(",
")",
"for",
"o",
"in",
"fixed_objects",
"]",
")",
":",
"if",
"i",
"in",
"removed",
":",
"continue",
"numConflicts",
"[",
"i",
"]",
"+=",
"1",
"return",
"removed"
] | https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/python2_version/klampt/model/create/pile.py#L36-L97 |
|
nasa/fprime | 595cf3682d8365943d86c1a6fe7c78f0a116acf0 | Autocoders/Python/src/fprime_ac/parsers/XmlTopologyParser.py | python | XmlTopologyParser.get_connections | (self) | return self.__connections | Returns a topology object. | Returns a topology object. | [
"Returns",
"a",
"topology",
"object",
"."
] | def get_connections(self):
"""
Returns a topology object.
"""
return self.__connections | [
"def",
"get_connections",
"(",
"self",
")",
":",
"return",
"self",
".",
"__connections"
] | https://github.com/nasa/fprime/blob/595cf3682d8365943d86c1a6fe7c78f0a116acf0/Autocoders/Python/src/fprime_ac/parsers/XmlTopologyParser.py#L346-L350 |
|
rbgirshick/caffe-fast-rcnn | 28a579eaf0668850705598b3075b8969f22226d9 | python/caffe/pycaffe.py | python | _Net_forward | (self, blobs=None, start=None, end=None, **kwargs) | return {out: self.blobs[out].data for out in outputs} | Forward pass: prepare inputs and run the net forward.
Parameters
----------
blobs : list of blobs to return in addition to output blobs.
kwargs : Keys are input blob names and values are blob ndarrays.
For formatting inputs for Caffe, see Net.preprocess().
If None, input is taken from data layers.
start : optional name of layer at which to begin the forward pass
end : optional name of layer at which to finish the forward pass
(inclusive)
Returns
-------
outs : {blob name: blob ndarray} dict. | Forward pass: prepare inputs and run the net forward. | [
"Forward",
"pass",
":",
"prepare",
"inputs",
"and",
"run",
"the",
"net",
"forward",
"."
] | def _Net_forward(self, blobs=None, start=None, end=None, **kwargs):
"""
Forward pass: prepare inputs and run the net forward.
Parameters
----------
blobs : list of blobs to return in addition to output blobs.
kwargs : Keys are input blob names and values are blob ndarrays.
For formatting inputs for Caffe, see Net.preprocess().
If None, input is taken from data layers.
start : optional name of layer at which to begin the forward pass
end : optional name of layer at which to finish the forward pass
(inclusive)
Returns
-------
outs : {blob name: blob ndarray} dict.
"""
if blobs is None:
blobs = []
if start is not None:
start_ind = list(self._layer_names).index(start)
else:
start_ind = 0
if end is not None:
end_ind = list(self._layer_names).index(end)
outputs = set([end] + blobs)
else:
end_ind = len(self.layers) - 1
outputs = set(self.outputs + blobs)
if kwargs:
if set(kwargs.keys()) != set(self.inputs):
raise Exception('Input blob arguments do not match net inputs.')
# Set input according to defined shapes and make arrays single and
# C-contiguous as Caffe expects.
for in_, blob in kwargs.iteritems():
if blob.shape[0] != self.blobs[in_].num:
raise Exception('Input is not batch sized')
self.blobs[in_].data[...] = blob
self._forward(start_ind, end_ind)
# Unpack blobs to extract
return {out: self.blobs[out].data for out in outputs} | [
"def",
"_Net_forward",
"(",
"self",
",",
"blobs",
"=",
"None",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"blobs",
"is",
"None",
":",
"blobs",
"=",
"[",
"]",
"if",
"start",
"is",
"not",
"None",
":",
"start_ind",
"=",
"list",
"(",
"self",
".",
"_layer_names",
")",
".",
"index",
"(",
"start",
")",
"else",
":",
"start_ind",
"=",
"0",
"if",
"end",
"is",
"not",
"None",
":",
"end_ind",
"=",
"list",
"(",
"self",
".",
"_layer_names",
")",
".",
"index",
"(",
"end",
")",
"outputs",
"=",
"set",
"(",
"[",
"end",
"]",
"+",
"blobs",
")",
"else",
":",
"end_ind",
"=",
"len",
"(",
"self",
".",
"layers",
")",
"-",
"1",
"outputs",
"=",
"set",
"(",
"self",
".",
"outputs",
"+",
"blobs",
")",
"if",
"kwargs",
":",
"if",
"set",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
"!=",
"set",
"(",
"self",
".",
"inputs",
")",
":",
"raise",
"Exception",
"(",
"'Input blob arguments do not match net inputs.'",
")",
"# Set input according to defined shapes and make arrays single and",
"# C-contiguous as Caffe expects.",
"for",
"in_",
",",
"blob",
"in",
"kwargs",
".",
"iteritems",
"(",
")",
":",
"if",
"blob",
".",
"shape",
"[",
"0",
"]",
"!=",
"self",
".",
"blobs",
"[",
"in_",
"]",
".",
"num",
":",
"raise",
"Exception",
"(",
"'Input is not batch sized'",
")",
"self",
".",
"blobs",
"[",
"in_",
"]",
".",
"data",
"[",
"...",
"]",
"=",
"blob",
"self",
".",
"_forward",
"(",
"start_ind",
",",
"end_ind",
")",
"# Unpack blobs to extract",
"return",
"{",
"out",
":",
"self",
".",
"blobs",
"[",
"out",
"]",
".",
"data",
"for",
"out",
"in",
"outputs",
"}"
] | https://github.com/rbgirshick/caffe-fast-rcnn/blob/28a579eaf0668850705598b3075b8969f22226d9/python/caffe/pycaffe.py#L52-L98 |
|
apache/impala | 8ddac48f3428c86f2cbd037ced89cfb903298b12 | shell/impala_client.py | python | ImpalaClient.get_column_names | (self, last_query_handle) | Get a list of column names for the query. The query must have a result set. | Get a list of column names for the query. The query must have a result set. | [
"Get",
"a",
"list",
"of",
"column",
"names",
"for",
"the",
"query",
".",
"The",
"query",
"must",
"have",
"a",
"result",
"set",
"."
] | def get_column_names(self, last_query_handle):
"""Get a list of column names for the query. The query must have a result set."""
raise NotImplementedError() | [
"def",
"get_column_names",
"(",
"self",
",",
"last_query_handle",
")",
":",
"raise",
"NotImplementedError",
"(",
")"
] | https://github.com/apache/impala/blob/8ddac48f3428c86f2cbd037ced89cfb903298b12/shell/impala_client.py#L279-L281 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/email/_policybase.py | python | Policy.register_defect | (self, obj, defect) | Record 'defect' on 'obj'.
Called by handle_defect if raise_on_defect is False. This method is
part of the Policy API so that Policy subclasses can implement custom
defect handling. The default implementation calls the append method of
the defects attribute of obj. The objects used by the email package by
default that get passed to this method will always have a defects
attribute with an append method. | Record 'defect' on 'obj'. | [
"Record",
"defect",
"on",
"obj",
"."
] | def register_defect(self, obj, defect):
"""Record 'defect' on 'obj'.
Called by handle_defect if raise_on_defect is False. This method is
part of the Policy API so that Policy subclasses can implement custom
defect handling. The default implementation calls the append method of
the defects attribute of obj. The objects used by the email package by
default that get passed to this method will always have a defects
attribute with an append method.
"""
obj.defects.append(defect) | [
"def",
"register_defect",
"(",
"self",
",",
"obj",
",",
"defect",
")",
":",
"obj",
".",
"defects",
".",
"append",
"(",
"defect",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/email/_policybase.py#L188-L199 |
||
hpi-xnor/BMXNet | ed0b201da6667887222b8e4b5f997c4f6b61943d | example/rcnn/rcnn/pycocotools/coco.py | python | COCO.loadImgs | (self, ids=[]) | Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects | Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects | [
"Load",
"anns",
"with",
"the",
"specified",
"ids",
".",
":",
"param",
"ids",
"(",
"int",
"array",
")",
":",
"integer",
"ids",
"specifying",
"img",
":",
"return",
":",
"imgs",
"(",
"object",
"array",
")",
":",
"loaded",
"img",
"objects"
] | def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]] | [
"def",
"loadImgs",
"(",
"self",
",",
"ids",
"=",
"[",
"]",
")",
":",
"if",
"type",
"(",
"ids",
")",
"==",
"list",
":",
"return",
"[",
"self",
".",
"imgs",
"[",
"id",
"]",
"for",
"id",
"in",
"ids",
"]",
"elif",
"type",
"(",
"ids",
")",
"==",
"int",
":",
"return",
"[",
"self",
".",
"imgs",
"[",
"ids",
"]",
"]"
] | https://github.com/hpi-xnor/BMXNet/blob/ed0b201da6667887222b8e4b5f997c4f6b61943d/example/rcnn/rcnn/pycocotools/coco.py#L234-L243 |
||
ricardoquesada/Spidermonkey | 4a75ea2543408bd1b2c515aa95901523eeef7858 | python/mozbuild/mozpack/copier.py | python | FileRegistry.required_directories | (self) | return set(k for k, v in self._required_directories.items() if v > 0) | Return the set of directories required by the paths in the container,
in no particular order. The returned directories are relative to an
unspecified (virtual) root directory (and do not include said root
directory). | Return the set of directories required by the paths in the container,
in no particular order. The returned directories are relative to an
unspecified (virtual) root directory (and do not include said root
directory). | [
"Return",
"the",
"set",
"of",
"directories",
"required",
"by",
"the",
"paths",
"in",
"the",
"container",
"in",
"no",
"particular",
"order",
".",
"The",
"returned",
"directories",
"are",
"relative",
"to",
"an",
"unspecified",
"(",
"virtual",
")",
"root",
"directory",
"(",
"and",
"do",
"not",
"include",
"said",
"root",
"directory",
")",
"."
] | def required_directories(self):
'''
Return the set of directories required by the paths in the container,
in no particular order. The returned directories are relative to an
unspecified (virtual) root directory (and do not include said root
directory).
'''
return set(k for k, v in self._required_directories.items() if v > 0) | [
"def",
"required_directories",
"(",
"self",
")",
":",
"return",
"set",
"(",
"k",
"for",
"k",
",",
"v",
"in",
"self",
".",
"_required_directories",
".",
"items",
"(",
")",
"if",
"v",
">",
"0",
")"
] | https://github.com/ricardoquesada/Spidermonkey/blob/4a75ea2543408bd1b2c515aa95901523eeef7858/python/mozbuild/mozpack/copier.py#L137-L144 |
|
Tencent/CMONGO | c40380caa14e05509f46993aa8b8da966b09b0b5 | src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Tool/xgettext.py | python | exists | (env) | Check, whether the tool exists | Check, whether the tool exists | [
"Check",
"whether",
"the",
"tool",
"exists"
] | def exists(env):
""" Check, whether the tool exists """
from SCons.Tool.GettextCommon import _xgettext_exists
try:
return _xgettext_exists(env)
except:
return False | [
"def",
"exists",
"(",
"env",
")",
":",
"from",
"SCons",
".",
"Tool",
".",
"GettextCommon",
"import",
"_xgettext_exists",
"try",
":",
"return",
"_xgettext_exists",
"(",
"env",
")",
"except",
":",
"return",
"False"
] | https://github.com/Tencent/CMONGO/blob/c40380caa14e05509f46993aa8b8da966b09b0b5/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Tool/xgettext.py#L326-L332 |
||
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/x86/toolchain/lib/python2.7/lib2to3/pytree.py | python | NegatedPattern.__init__ | (self, content=None) | Initializer.
The argument is either a pattern or None. If it is None, this
only matches an empty sequence (effectively '$' in regex
lingo). If it is not None, this matches whenever the argument
pattern doesn't have any matches. | Initializer. | [
"Initializer",
"."
] | def __init__(self, content=None):
"""
Initializer.
The argument is either a pattern or None. If it is None, this
only matches an empty sequence (effectively '$' in regex
lingo). If it is not None, this matches whenever the argument
pattern doesn't have any matches.
"""
if content is not None:
assert isinstance(content, BasePattern), repr(content)
self.content = content | [
"def",
"__init__",
"(",
"self",
",",
"content",
"=",
"None",
")",
":",
"if",
"content",
"is",
"not",
"None",
":",
"assert",
"isinstance",
"(",
"content",
",",
"BasePattern",
")",
",",
"repr",
"(",
"content",
")",
"self",
".",
"content",
"=",
"content"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/lib2to3/pytree.py#L829-L840 |
||
LLNL/lbann | 26083e6c86050302ce33148aea70f62e61cacb92 | applications/physics/cosmology/cosmoflow/cosmoflow.py | python | create_cosmoflow_data_reader | (
train_path, val_path, test_path, num_responses) | return lbann.reader_pb2.DataReader(reader=readers) | Create a data reader for CosmoFlow.
Args:
{train, val, test}_path (str): Path to the corresponding dataset.
num_responses (int): The number of parameters to predict. | Create a data reader for CosmoFlow. | [
"Create",
"a",
"data",
"reader",
"for",
"CosmoFlow",
"."
] | def create_cosmoflow_data_reader(
train_path, val_path, test_path, num_responses):
"""Create a data reader for CosmoFlow.
Args:
{train, val, test}_path (str): Path to the corresponding dataset.
num_responses (int): The number of parameters to predict.
"""
reader_args = [
{"role": "train", "data_filename": train_path},
{"role": "validate", "data_filename": val_path},
{"role": "test", "data_filename": test_path},
]
for reader_arg in reader_args:
reader_arg["data_file_pattern"] = "{}/*.hdf5".format(
reader_arg["data_filename"])
reader_arg["hdf5_key_data"] = "full"
reader_arg["hdf5_key_responses"] = "unitPar"
reader_arg["num_responses"] = num_responses
reader_arg.pop("data_filename")
readers = []
for reader_arg in reader_args:
reader = lbann.reader_pb2.Reader(
name="hdf5",
shuffle=(reader_arg["role"] != "test"),
validation_percent=0,
absolute_sample_count=0,
percent_of_data_to_use=1.0,
disable_labels=True,
disable_responses=False,
scaling_factor_int16=1.0,
**reader_arg)
readers.append(reader)
return lbann.reader_pb2.DataReader(reader=readers) | [
"def",
"create_cosmoflow_data_reader",
"(",
"train_path",
",",
"val_path",
",",
"test_path",
",",
"num_responses",
")",
":",
"reader_args",
"=",
"[",
"{",
"\"role\"",
":",
"\"train\"",
",",
"\"data_filename\"",
":",
"train_path",
"}",
",",
"{",
"\"role\"",
":",
"\"validate\"",
",",
"\"data_filename\"",
":",
"val_path",
"}",
",",
"{",
"\"role\"",
":",
"\"test\"",
",",
"\"data_filename\"",
":",
"test_path",
"}",
",",
"]",
"for",
"reader_arg",
"in",
"reader_args",
":",
"reader_arg",
"[",
"\"data_file_pattern\"",
"]",
"=",
"\"{}/*.hdf5\"",
".",
"format",
"(",
"reader_arg",
"[",
"\"data_filename\"",
"]",
")",
"reader_arg",
"[",
"\"hdf5_key_data\"",
"]",
"=",
"\"full\"",
"reader_arg",
"[",
"\"hdf5_key_responses\"",
"]",
"=",
"\"unitPar\"",
"reader_arg",
"[",
"\"num_responses\"",
"]",
"=",
"num_responses",
"reader_arg",
".",
"pop",
"(",
"\"data_filename\"",
")",
"readers",
"=",
"[",
"]",
"for",
"reader_arg",
"in",
"reader_args",
":",
"reader",
"=",
"lbann",
".",
"reader_pb2",
".",
"Reader",
"(",
"name",
"=",
"\"hdf5\"",
",",
"shuffle",
"=",
"(",
"reader_arg",
"[",
"\"role\"",
"]",
"!=",
"\"test\"",
")",
",",
"validation_percent",
"=",
"0",
",",
"absolute_sample_count",
"=",
"0",
",",
"percent_of_data_to_use",
"=",
"1.0",
",",
"disable_labels",
"=",
"True",
",",
"disable_responses",
"=",
"False",
",",
"scaling_factor_int16",
"=",
"1.0",
",",
"*",
"*",
"reader_arg",
")",
"readers",
".",
"append",
"(",
"reader",
")",
"return",
"lbann",
".",
"reader_pb2",
".",
"DataReader",
"(",
"reader",
"=",
"readers",
")"
] | https://github.com/LLNL/lbann/blob/26083e6c86050302ce33148aea70f62e61cacb92/applications/physics/cosmology/cosmoflow/cosmoflow.py#L229-L267 |
|
H-uru/Plasma | c2140ea046e82e9c199e257a7f2e7edb42602871 | Scripts/Python/nb01RPSGame.py | python | nb01RPSGame._OnRPSChoice | (self, state, which, events) | We picked a thingo! | We picked a thingo! | [
"We",
"picked",
"a",
"thingo!"
] | def _OnRPSChoice(self, state, which, events):
"""We picked a thingo!"""
if PtWasLocallyNotified(self.key):
self._round_played = True
if self.sceneobject.isLocallyOwned():
avatar = PtFindAvatar(events)
seat = self.players.index(PtGetClientIDFromAvatarKey(avatar.getKey()))
self.SDL.setIndex(SDL_CUR_SELECTION, seat, which + 1)
# Waiting on moves? You just got one. COUNTDOWN!
if self.game_state == GAME_AWAIT_MOVES:
self.game_state = GAME_MOVE_COUNTDOWN | [
"def",
"_OnRPSChoice",
"(",
"self",
",",
"state",
",",
"which",
",",
"events",
")",
":",
"if",
"PtWasLocallyNotified",
"(",
"self",
".",
"key",
")",
":",
"self",
".",
"_round_played",
"=",
"True",
"if",
"self",
".",
"sceneobject",
".",
"isLocallyOwned",
"(",
")",
":",
"avatar",
"=",
"PtFindAvatar",
"(",
"events",
")",
"seat",
"=",
"self",
".",
"players",
".",
"index",
"(",
"PtGetClientIDFromAvatarKey",
"(",
"avatar",
".",
"getKey",
"(",
")",
")",
")",
"self",
".",
"SDL",
".",
"setIndex",
"(",
"SDL_CUR_SELECTION",
",",
"seat",
",",
"which",
"+",
"1",
")",
"# Waiting on moves? You just got one. COUNTDOWN!",
"if",
"self",
".",
"game_state",
"==",
"GAME_AWAIT_MOVES",
":",
"self",
".",
"game_state",
"=",
"GAME_MOVE_COUNTDOWN"
] | https://github.com/H-uru/Plasma/blob/c2140ea046e82e9c199e257a7f2e7edb42602871/Scripts/Python/nb01RPSGame.py#L453-L463 |
||
domino-team/openwrt-cc | 8b181297c34d14d3ca521cc9f31430d561dbc688 | package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/xcodeproj_file.py | python | XCConfigurationList.AppendBuildSetting | (self, key, value) | Appends value to the build setting for key, which is treated as a list,
in all child XCBuildConfiguration objects. | Appends value to the build setting for key, which is treated as a list,
in all child XCBuildConfiguration objects. | [
"Appends",
"value",
"to",
"the",
"build",
"setting",
"for",
"key",
"which",
"is",
"treated",
"as",
"a",
"list",
"in",
"all",
"child",
"XCBuildConfiguration",
"objects",
"."
] | def AppendBuildSetting(self, key, value):
"""Appends value to the build setting for key, which is treated as a list,
in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.AppendBuildSetting(key, value) | [
"def",
"AppendBuildSetting",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"for",
"configuration",
"in",
"self",
".",
"_properties",
"[",
"'buildConfigurations'",
"]",
":",
"configuration",
".",
"AppendBuildSetting",
"(",
"key",
",",
"value",
")"
] | https://github.com/domino-team/openwrt-cc/blob/8b181297c34d14d3ca521cc9f31430d561dbc688/package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/xcodeproj_file.py#L1680-L1686 |
||
snap-stanford/snap-python | d53c51b0a26aa7e3e7400b014cdf728948fde80a | setup/snap.py | python | TIntV.AddBackSorted | (self, *args) | return _snap.TIntV_AddBackSorted(self, *args) | AddBackSorted(TIntV self, TInt Val, bool const & Asc) -> int
Parameters:
Val: TInt const &
Asc: bool const & | AddBackSorted(TIntV self, TInt Val, bool const & Asc) -> int | [
"AddBackSorted",
"(",
"TIntV",
"self",
"TInt",
"Val",
"bool",
"const",
"&",
"Asc",
")",
"-",
">",
"int"
] | def AddBackSorted(self, *args):
"""
AddBackSorted(TIntV self, TInt Val, bool const & Asc) -> int
Parameters:
Val: TInt const &
Asc: bool const &
"""
return _snap.TIntV_AddBackSorted(self, *args) | [
"def",
"AddBackSorted",
"(",
"self",
",",
"*",
"args",
")",
":",
"return",
"_snap",
".",
"TIntV_AddBackSorted",
"(",
"self",
",",
"*",
"args",
")"
] | https://github.com/snap-stanford/snap-python/blob/d53c51b0a26aa7e3e7400b014cdf728948fde80a/setup/snap.py#L15716-L15725 |
|
wujian16/Cornell-MOE | df299d1be882d2af9796d7a68b3f9505cac7a53e | moe/optimal_learning/python/repeated_domain.py | python | RepeatedDomain.get_constraint_list | (self) | return constraints | Return a list of lambda functions expressing the domain bounds as linear constraints. Used by COBYLA.
Calls ``self._domain.get_constraint_list()`` for each repeat, writing the results sequentially.
So output[0:2*dim] is from the first repeated domain, output[2*dim:4*dim] is from the second, etc.
:return: a list of lambda functions corresponding to constraints
:rtype: array of lambda functions with shape (num_repeats * dim * 2) | Return a list of lambda functions expressing the domain bounds as linear constraints. Used by COBYLA. | [
"Return",
"a",
"list",
"of",
"lambda",
"functions",
"expressing",
"the",
"domain",
"bounds",
"as",
"linear",
"constraints",
".",
"Used",
"by",
"COBYLA",
"."
] | def get_constraint_list(self):
"""Return a list of lambda functions expressing the domain bounds as linear constraints. Used by COBYLA.
Calls ``self._domain.get_constraint_list()`` for each repeat, writing the results sequentially.
So output[0:2*dim] is from the first repeated domain, output[2*dim:4*dim] is from the second, etc.
:return: a list of lambda functions corresponding to constraints
:rtype: array of lambda functions with shape (num_repeats * dim * 2)
"""
constraints = []
for i in range(self.num_repeats):
# Using start_index, start each domain at the correct index when flattening out points in COBYLA.
constraints.extend(self._domain.get_constraint_list(start_index=self.dim * i))
return constraints | [
"def",
"get_constraint_list",
"(",
"self",
")",
":",
"constraints",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num_repeats",
")",
":",
"# Using start_index, start each domain at the correct index when flattening out points in COBYLA.",
"constraints",
".",
"extend",
"(",
"self",
".",
"_domain",
".",
"get_constraint_list",
"(",
"start_index",
"=",
"self",
".",
"dim",
"*",
"i",
")",
")",
"return",
"constraints"
] | https://github.com/wujian16/Cornell-MOE/blob/df299d1be882d2af9796d7a68b3f9505cac7a53e/moe/optimal_learning/python/repeated_domain.py#L83-L97 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/distutils/filelist.py | python | FileList.debug_print | (self, msg) | Print 'msg' to stdout if the global DEBUG (taken from the
DISTUTILS_DEBUG environment variable) flag is true. | Print 'msg' to stdout if the global DEBUG (taken from the
DISTUTILS_DEBUG environment variable) flag is true. | [
"Print",
"msg",
"to",
"stdout",
"if",
"the",
"global",
"DEBUG",
"(",
"taken",
"from",
"the",
"DISTUTILS_DEBUG",
"environment",
"variable",
")",
"flag",
"is",
"true",
"."
] | def debug_print(self, msg):
"""Print 'msg' to stdout if the global DEBUG (taken from the
DISTUTILS_DEBUG environment variable) flag is true.
"""
from distutils.debug import DEBUG
if DEBUG:
print(msg) | [
"def",
"debug_print",
"(",
"self",
",",
"msg",
")",
":",
"from",
"distutils",
".",
"debug",
"import",
"DEBUG",
"if",
"DEBUG",
":",
"print",
"(",
"msg",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/distutils/filelist.py#L41-L47 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/numpy/py3/numpy/lib/format.py | python | write_array_header_2_0 | (fp, d) | Write the header for an array using the 2.0 format.
The 2.0 format allows storing very large structured arrays.
.. versionadded:: 1.9.0
Parameters
----------
fp : filelike object
d : dict
This has the appropriate entries for writing its string
representation to the header of the file. | Write the header for an array using the 2.0 format.
The 2.0 format allows storing very large structured arrays. | [
"Write",
"the",
"header",
"for",
"an",
"array",
"using",
"the",
"2",
".",
"0",
"format",
".",
"The",
"2",
".",
"0",
"format",
"allows",
"storing",
"very",
"large",
"structured",
"arrays",
"."
] | def write_array_header_2_0(fp, d):
""" Write the header for an array using the 2.0 format.
The 2.0 format allows storing very large structured arrays.
.. versionadded:: 1.9.0
Parameters
----------
fp : filelike object
d : dict
This has the appropriate entries for writing its string
representation to the header of the file.
"""
_write_array_header(fp, d, (2, 0)) | [
"def",
"write_array_header_2_0",
"(",
"fp",
",",
"d",
")",
":",
"_write_array_header",
"(",
"fp",
",",
"d",
",",
"(",
"2",
",",
"0",
")",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/numpy/py3/numpy/lib/format.py#L455-L468 |
||
carla-simulator/carla | 8854804f4d7748e14d937ec763a2912823a7e5f5 | Co-Simulation/Sumo/sumo_integration/bridge_helper.py | python | BridgeHelper.get_carla_traffic_light_state | (sumo_tl_state) | Returns carla traffic light state based on sumo traffic light state. | Returns carla traffic light state based on sumo traffic light state. | [
"Returns",
"carla",
"traffic",
"light",
"state",
"based",
"on",
"sumo",
"traffic",
"light",
"state",
"."
] | def get_carla_traffic_light_state(sumo_tl_state):
"""
Returns carla traffic light state based on sumo traffic light state.
"""
if sumo_tl_state == SumoSignalState.RED or sumo_tl_state == SumoSignalState.RED_YELLOW:
return carla.TrafficLightState.Red
elif sumo_tl_state == SumoSignalState.YELLOW:
return carla.TrafficLightState.Yellow
elif sumo_tl_state == SumoSignalState.GREEN or \
sumo_tl_state == SumoSignalState.GREEN_WITHOUT_PRIORITY:
return carla.TrafficLightState.Green
elif sumo_tl_state == SumoSignalState.OFF:
return carla.TrafficLightState.Off
else: # SumoSignalState.GREEN_RIGHT_TURN and SumoSignalState.OFF_BLINKING
return carla.TrafficLightState.Unknown | [
"def",
"get_carla_traffic_light_state",
"(",
"sumo_tl_state",
")",
":",
"if",
"sumo_tl_state",
"==",
"SumoSignalState",
".",
"RED",
"or",
"sumo_tl_state",
"==",
"SumoSignalState",
".",
"RED_YELLOW",
":",
"return",
"carla",
".",
"TrafficLightState",
".",
"Red",
"elif",
"sumo_tl_state",
"==",
"SumoSignalState",
".",
"YELLOW",
":",
"return",
"carla",
".",
"TrafficLightState",
".",
"Yellow",
"elif",
"sumo_tl_state",
"==",
"SumoSignalState",
".",
"GREEN",
"or",
"sumo_tl_state",
"==",
"SumoSignalState",
".",
"GREEN_WITHOUT_PRIORITY",
":",
"return",
"carla",
".",
"TrafficLightState",
".",
"Green",
"elif",
"sumo_tl_state",
"==",
"SumoSignalState",
".",
"OFF",
":",
"return",
"carla",
".",
"TrafficLightState",
".",
"Off",
"else",
":",
"# SumoSignalState.GREEN_RIGHT_TURN and SumoSignalState.OFF_BLINKING",
"return",
"carla",
".",
"TrafficLightState",
".",
"Unknown"
] | https://github.com/carla-simulator/carla/blob/8854804f4d7748e14d937ec763a2912823a7e5f5/Co-Simulation/Sumo/sumo_integration/bridge_helper.py#L334-L352 |
||
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | scripts/Diffraction/isis_powder/routines/common.py | python | generate_run_numbers | (run_number_string) | return run_list | Generates a list of run numbers as a list from the input. This input can be either a string or int type
and uses the same syntax that Mantid supports i.e. 1-10 generates 1,2,3...9,10 inclusive and commas can specify
breaks between runs
:param run_number_string: The string or int to convert into a list of run numbers
:return: A list of run numbers generated from the string | Generates a list of run numbers as a list from the input. This input can be either a string or int type
and uses the same syntax that Mantid supports i.e. 1-10 generates 1,2,3...9,10 inclusive and commas can specify
breaks between runs
:param run_number_string: The string or int to convert into a list of run numbers
:return: A list of run numbers generated from the string | [
"Generates",
"a",
"list",
"of",
"run",
"numbers",
"as",
"a",
"list",
"from",
"the",
"input",
".",
"This",
"input",
"can",
"be",
"either",
"a",
"string",
"or",
"int",
"type",
"and",
"uses",
"the",
"same",
"syntax",
"that",
"Mantid",
"supports",
"i",
".",
"e",
".",
"1",
"-",
"10",
"generates",
"1",
"2",
"3",
"...",
"9",
"10",
"inclusive",
"and",
"commas",
"can",
"specify",
"breaks",
"between",
"runs",
":",
"param",
"run_number_string",
":",
"The",
"string",
"or",
"int",
"to",
"convert",
"into",
"a",
"list",
"of",
"run",
"numbers",
":",
"return",
":",
"A",
"list",
"of",
"run",
"numbers",
"generated",
"from",
"the",
"string"
] | def generate_run_numbers(run_number_string):
"""
Generates a list of run numbers as a list from the input. This input can be either a string or int type
and uses the same syntax that Mantid supports i.e. 1-10 generates 1,2,3...9,10 inclusive and commas can specify
breaks between runs
:param run_number_string: The string or int to convert into a list of run numbers
:return: A list of run numbers generated from the string
"""
# Check its not a single run
if isinstance(run_number_string, int):
# Cast into a list and return
return [run_number_string]
elif isinstance(run_number_string, str) and run_number_string.isdigit():
# We can let Python handle the conversion in this case
return [int(run_number_string)]
# If its a string we must parse it
run_number_string = run_number_string.strip()
run_boundaries = run_number_string.replace('_', '-') # Accept either _ or - delimiters
run_list = _run_number_generator(processed_string=run_boundaries)
return run_list | [
"def",
"generate_run_numbers",
"(",
"run_number_string",
")",
":",
"# Check its not a single run",
"if",
"isinstance",
"(",
"run_number_string",
",",
"int",
")",
":",
"# Cast into a list and return",
"return",
"[",
"run_number_string",
"]",
"elif",
"isinstance",
"(",
"run_number_string",
",",
"str",
")",
"and",
"run_number_string",
".",
"isdigit",
"(",
")",
":",
"# We can let Python handle the conversion in this case",
"return",
"[",
"int",
"(",
"run_number_string",
")",
"]",
"# If its a string we must parse it",
"run_number_string",
"=",
"run_number_string",
".",
"strip",
"(",
")",
"run_boundaries",
"=",
"run_number_string",
".",
"replace",
"(",
"'_'",
",",
"'-'",
")",
"# Accept either _ or - delimiters",
"run_list",
"=",
"_run_number_generator",
"(",
"processed_string",
"=",
"run_boundaries",
")",
"return",
"run_list"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/scripts/Diffraction/isis_powder/routines/common.py#L182-L202 |
|
OPAE/opae-sdk | 221124343c8275243a249eb72d69e0ea2d568d1b | binaries/qpafilter/qpafilter.py | python | temp_verifier.verify_units | (self, units) | return units in [DEGREES_C, DEGREES_c] | Temperature units must be degrees C. | Temperature units must be degrees C. | [
"Temperature",
"units",
"must",
"be",
"degrees",
"C",
"."
] | def verify_units(self, units):
"""Temperature units must be degrees C."""
return units in [DEGREES_C, DEGREES_c] | [
"def",
"verify_units",
"(",
"self",
",",
"units",
")",
":",
"return",
"units",
"in",
"[",
"DEGREES_C",
",",
"DEGREES_c",
"]"
] | https://github.com/OPAE/opae-sdk/blob/221124343c8275243a249eb72d69e0ea2d568d1b/binaries/qpafilter/qpafilter.py#L232-L234 |
|
kamyu104/LeetCode-Solutions | 77605708a927ea3b85aee5a479db733938c7c211 | Python/minimum-time-visiting-all-points.py | python | Solution.minTimeToVisitAllPoints | (self, points) | return sum(max(abs(points[i+1][0] - points[i][0]),
abs(points[i+1][1] - points[i][1]))
for i in xrange(len(points)-1)) | :type points: List[List[int]]
:rtype: int | :type points: List[List[int]]
:rtype: int | [
":",
"type",
"points",
":",
"List",
"[",
"List",
"[",
"int",
"]]",
":",
"rtype",
":",
"int"
] | def minTimeToVisitAllPoints(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
return sum(max(abs(points[i+1][0] - points[i][0]),
abs(points[i+1][1] - points[i][1]))
for i in xrange(len(points)-1)) | [
"def",
"minTimeToVisitAllPoints",
"(",
"self",
",",
"points",
")",
":",
"return",
"sum",
"(",
"max",
"(",
"abs",
"(",
"points",
"[",
"i",
"+",
"1",
"]",
"[",
"0",
"]",
"-",
"points",
"[",
"i",
"]",
"[",
"0",
"]",
")",
",",
"abs",
"(",
"points",
"[",
"i",
"+",
"1",
"]",
"[",
"1",
"]",
"-",
"points",
"[",
"i",
"]",
"[",
"1",
"]",
")",
")",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"points",
")",
"-",
"1",
")",
")"
] | https://github.com/kamyu104/LeetCode-Solutions/blob/77605708a927ea3b85aee5a479db733938c7c211/Python/minimum-time-visiting-all-points.py#L5-L12 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/dataview.py | python | DataViewCtrl.PrependIconTextColumn | (*args, **kwargs) | return _dataview.DataViewCtrl_PrependIconTextColumn(*args, **kwargs) | PrependIconTextColumn(self, PyObject label_or_bitmap, unsigned int model_column,
int mode=DATAVIEW_CELL_INERT, int width=-1,
int align=ALIGN_NOT, int flags=DATAVIEW_COL_RESIZABLE) -> DataViewColumn | PrependIconTextColumn(self, PyObject label_or_bitmap, unsigned int model_column,
int mode=DATAVIEW_CELL_INERT, int width=-1,
int align=ALIGN_NOT, int flags=DATAVIEW_COL_RESIZABLE) -> DataViewColumn | [
"PrependIconTextColumn",
"(",
"self",
"PyObject",
"label_or_bitmap",
"unsigned",
"int",
"model_column",
"int",
"mode",
"=",
"DATAVIEW_CELL_INERT",
"int",
"width",
"=",
"-",
"1",
"int",
"align",
"=",
"ALIGN_NOT",
"int",
"flags",
"=",
"DATAVIEW_COL_RESIZABLE",
")",
"-",
">",
"DataViewColumn"
] | def PrependIconTextColumn(*args, **kwargs):
"""
PrependIconTextColumn(self, PyObject label_or_bitmap, unsigned int model_column,
int mode=DATAVIEW_CELL_INERT, int width=-1,
int align=ALIGN_NOT, int flags=DATAVIEW_COL_RESIZABLE) -> DataViewColumn
"""
return _dataview.DataViewCtrl_PrependIconTextColumn(*args, **kwargs) | [
"def",
"PrependIconTextColumn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_dataview",
".",
"DataViewCtrl_PrependIconTextColumn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/dataview.py#L1597-L1603 |
|
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/mimetypes.py | python | MimeTypes.readfp | (self, fp, strict=True) | Read a single mime.types-format file.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types. | Read a single mime.types-format file. | [
"Read",
"a",
"single",
"mime",
".",
"types",
"-",
"format",
"file",
"."
] | def readfp(self, fp, strict=True):
"""
Read a single mime.types-format file.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
while 1:
line = fp.readline()
if not line:
break
words = line.split()
for i in range(len(words)):
if words[i][0] == '#':
del words[i:]
break
if not words:
continue
type, suffixes = words[0], words[1:]
for suff in suffixes:
self.add_type(type, '.' + suff, strict) | [
"def",
"readfp",
"(",
"self",
",",
"fp",
",",
"strict",
"=",
"True",
")",
":",
"while",
"1",
":",
"line",
"=",
"fp",
".",
"readline",
"(",
")",
"if",
"not",
"line",
":",
"break",
"words",
"=",
"line",
".",
"split",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"words",
")",
")",
":",
"if",
"words",
"[",
"i",
"]",
"[",
"0",
"]",
"==",
"'#'",
":",
"del",
"words",
"[",
"i",
":",
"]",
"break",
"if",
"not",
"words",
":",
"continue",
"type",
",",
"suffixes",
"=",
"words",
"[",
"0",
"]",
",",
"words",
"[",
"1",
":",
"]",
"for",
"suff",
"in",
"suffixes",
":",
"self",
".",
"add_type",
"(",
"type",
",",
"'.'",
"+",
"suff",
",",
"strict",
")"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/mimetypes.py#L205-L226 |
||
reverbrain/elliptics | 4b4f9b8094d7616c1ec50eb8605edb059b9f228e | recovery/elliptics_recovery/dc_server_send.py | python | ServerSendRecovery._on_server_send_timeout | (self, keys, key_infos_map, group_id) | @keys - is a list of keys that were not recovered due to timeout.
Moves keys to next bucket, if appropriate bucket meta is identical to current meta,
because less relevant replica (e.g. with older timestamp) should not be used for
recovery due to temporary unavailability of relevant replica during recovery process. | [] | def _on_server_send_timeout(self, keys, key_infos_map, group_id):
'''
@keys - is a list of keys that were not recovered due to timeout.
Moves keys to next bucket, if appropriate bucket meta is identical to current meta,
because less relevant replica (e.g. with older timestamp) should not be used for
recovery due to temporary unavailability of relevant replica during recovery process.
'''
same_meta = lambda lhs, rhs: (lhs.timestamp, lhs.size, lhs.user_flags) == (rhs.timestamp, rhs.size, rhs.user_flags)
num_failed_keys = 0
for key in keys:
key_infos = key_infos_map[str(key)]
filtered_key_infos = self._get_unprocessed_key_infos(key_infos, group_id)
if len(filtered_key_infos) > 1:
current_meta = filtered_key_infos[0]
next_meta = filtered_key_infos[1]
if same_meta(current_meta, next_meta):
self.buckets.on_server_send_fail(key, key_infos, next_meta.group_id)
continue
num_failed_keys += 1
if num_failed_keys > 0:
self.result = False
self._update_timeouted_keys_stats(num_failed_keys) | [
"def",
"_on_server_send_timeout",
"(",
"self",
",",
"keys",
",",
"key_infos_map",
",",
"group_id",
")",
":",
"same_meta",
"=",
"lambda",
"lhs",
",",
"rhs",
":",
"(",
"lhs",
".",
"timestamp",
",",
"lhs",
".",
"size",
",",
"lhs",
".",
"user_flags",
")",
"==",
"(",
"rhs",
".",
"timestamp",
",",
"rhs",
".",
"size",
",",
"rhs",
".",
"user_flags",
")",
"num_failed_keys",
"=",
"0",
"for",
"key",
"in",
"keys",
":",
"key_infos",
"=",
"key_infos_map",
"[",
"str",
"(",
"key",
")",
"]",
"filtered_key_infos",
"=",
"self",
".",
"_get_unprocessed_key_infos",
"(",
"key_infos",
",",
"group_id",
")",
"if",
"len",
"(",
"filtered_key_infos",
")",
">",
"1",
":",
"current_meta",
"=",
"filtered_key_infos",
"[",
"0",
"]",
"next_meta",
"=",
"filtered_key_infos",
"[",
"1",
"]",
"if",
"same_meta",
"(",
"current_meta",
",",
"next_meta",
")",
":",
"self",
".",
"buckets",
".",
"on_server_send_fail",
"(",
"key",
",",
"key_infos",
",",
"next_meta",
".",
"group_id",
")",
"continue",
"num_failed_keys",
"+=",
"1",
"if",
"num_failed_keys",
">",
"0",
":",
"self",
".",
"result",
"=",
"False",
"self",
".",
"_update_timeouted_keys_stats",
"(",
"num_failed_keys",
")"
] | https://github.com/reverbrain/elliptics/blob/4b4f9b8094d7616c1ec50eb8605edb059b9f228e/recovery/elliptics_recovery/dc_server_send.py#L231-L253 |
|||
pyne/pyne | 0c2714d7c0d1b5e20be6ae6527da2c660dd6b1b3 | pyne/mcnp.py | python | SurfSrc.read_header | (self) | Read in the header block data. This block comprises 4 fortran
records which we refer to as: header, table1, table2, summary. | Read in the header block data. This block comprises 4 fortran
records which we refer to as: header, table1, table2, summary. | [
"Read",
"in",
"the",
"header",
"block",
"data",
".",
"This",
"block",
"comprises",
"4",
"fortran",
"records",
"which",
"we",
"refer",
"to",
"as",
":",
"header",
"table1",
"table2",
"summary",
"."
] | def read_header(self):
"""Read in the header block data. This block comprises 4 fortran
records which we refer to as: header, table1, table2, summary.
"""
# read header record
header = self.get_fortran_record()
# interpret header
self.kod = header.get_string(8)[0] # code identifier
if 'SF_00001' not in self.kod:
self.ver = header.get_string(5)[0] # code version identifier
if '2.6.0' in self.ver:
self.loddat = header.get_string(28)[0] # code version date
elif '5 ' in self.ver:
self.loddat = header.get_string(8)[0] # code version date
else:
raise NotImplementedError("MCNP5/X Version:" +
self.ver.rstrip() + " not supported")
self.idtm = header.get_string(19)[0] # current date and time
self.probid = header.get_string(19)[0] # problem id string
self.aid = header.get_string(80)[0] # title card of initial run
self.knod = header.get_int()[0] # dump number
# read table 1 record; various counts and sizes
tablelengths = self.get_fortran_record()
# interpret table lengths
if '2.6.0' in self.ver:
self.np1 = tablelengths.get_int()[0] # hist used to gen. src
self.nrss = tablelengths.get_int()[0] # #tracks to surf src
else:
self.np1 = tablelengths.get_long()[0] # hist used to gen. src
self.nrss = tablelengths.get_long()[0] # #tracks to surf src
# values in surf src record
# 6 for a spherical source
# 11 otherwise
self.ncrd = tablelengths.get_int()[0]
self.njsw = tablelengths.get_int()[0] # number of surfaces
self.niss = tablelengths.get_int()[0] # #histories to surf src
self.table1extra = list()
while tablelengths.num_bytes > tablelengths.pos:
self.table1extra += tablelengths.get_int()
elif 'SF_00001' in self.kod:
header = self.get_fortran_record()
self.ver = header.get_string(12)[0] # code version identifier
self.loddat = header.get_string(9)[0] # code version date
self.idtm = header.get_string(19)[0] # current date and time
self.probid = header.get_string(19)[0] # problem id string
self.aid = header.get_string(80)[0] # title card of initial run
self.knod = header.get_int()[0] # dump number
# read table 1 record; various counts and sizes
tablelengths = self.get_fortran_record()
# interpret table lengths
self.np1 = tablelengths.get_int()[0] # hist used to gen.source
self.notsure0 = tablelengths.get_int()[0] # vals in surf src rec.
self.nrss = tablelengths.get_int()[0] # tracks writ. to surf.src
self.notsure1 = tablelengths.get_int()[0] # number of surfaces
self.ncrd = tablelengths.get_int()[0] # histories to surf.src
self.njsw = tablelengths.get_int()[0] # number of surfaces
self.niss = tablelengths.get_int()[0] # histories to surf.src
self.table1extra = list()
while tablelengths.num_bytes > tablelengths.pos:
self.table1extra += tablelengths.get_int()
if self.np1 < 0:
# read table 2 record; more size info
tablelengths = self.get_fortran_record()
self.niwr = tablelengths.get_int()[0] # #cells in surf.src card
self.mipts = tablelengths.get_int()[0] # source particle type
self.kjaq = tablelengths.get_int()[0] # macrobody facet flag
self.table2extra = list()
while tablelengths.num_bytes > tablelengths.pos:
self.table2extra += tablelengths.get_int()
else:
pass
# Since np1 can be negative, preserve the actual np1 value while
# taking the absolute value so that np1 can be used mathematically
self.orignp1 = self.np1
self.np1 = abs(self.np1)
# get info for each surface
self.surflist = list()
for j in range(self.njsw):
# read next surface info record
self.surfaceinfo = self.get_fortran_record()
surfinfo = SourceSurf()
surfinfo.id = self.surfaceinfo.get_int() # surface ID
if self.kjaq == 1:
surfinfo.facet_id = self.surfaceinfo.get_int() # facet ID
else:
surfinfo.facet_id = -1 # dummy facet ID
surfinfo.type = self.surfaceinfo.get_int() # surface type
surfinfo.num_params = self.surfaceinfo.get_int()[0] # #surface prm
surfinfo.surf_params = \
self.surfaceinfo.get_double(surfinfo.num_params)
self.surflist.append(surfinfo)
# we read any extra records as determined by njsw+niwr...
# no known case of their actual utility is known currently
for j in range(self.njsw, self.njsw+self.niwr):
self.get_fortran_record()
warn("Extra info in header not handled: {0}".format(j),
RuntimeWarning)
# read summary table record
summary_info = self.get_fortran_record()
self.summary_table = summary_info.get_int(
(2+4*self.mipts)*(self.njsw+self.niwr)+1)
self.summary_extra = list()
while summary_info.num_bytes > summary_info.pos:
self.summary_extra += summary_info.get_int() | [
"def",
"read_header",
"(",
"self",
")",
":",
"# read header record",
"header",
"=",
"self",
".",
"get_fortran_record",
"(",
")",
"# interpret header",
"self",
".",
"kod",
"=",
"header",
".",
"get_string",
"(",
"8",
")",
"[",
"0",
"]",
"# code identifier",
"if",
"'SF_00001'",
"not",
"in",
"self",
".",
"kod",
":",
"self",
".",
"ver",
"=",
"header",
".",
"get_string",
"(",
"5",
")",
"[",
"0",
"]",
"# code version identifier",
"if",
"'2.6.0'",
"in",
"self",
".",
"ver",
":",
"self",
".",
"loddat",
"=",
"header",
".",
"get_string",
"(",
"28",
")",
"[",
"0",
"]",
"# code version date",
"elif",
"'5 '",
"in",
"self",
".",
"ver",
":",
"self",
".",
"loddat",
"=",
"header",
".",
"get_string",
"(",
"8",
")",
"[",
"0",
"]",
"# code version date",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"MCNP5/X Version:\"",
"+",
"self",
".",
"ver",
".",
"rstrip",
"(",
")",
"+",
"\" not supported\"",
")",
"self",
".",
"idtm",
"=",
"header",
".",
"get_string",
"(",
"19",
")",
"[",
"0",
"]",
"# current date and time",
"self",
".",
"probid",
"=",
"header",
".",
"get_string",
"(",
"19",
")",
"[",
"0",
"]",
"# problem id string",
"self",
".",
"aid",
"=",
"header",
".",
"get_string",
"(",
"80",
")",
"[",
"0",
"]",
"# title card of initial run",
"self",
".",
"knod",
"=",
"header",
".",
"get_int",
"(",
")",
"[",
"0",
"]",
"# dump number",
"# read table 1 record; various counts and sizes",
"tablelengths",
"=",
"self",
".",
"get_fortran_record",
"(",
")",
"# interpret table lengths",
"if",
"'2.6.0'",
"in",
"self",
".",
"ver",
":",
"self",
".",
"np1",
"=",
"tablelengths",
".",
"get_int",
"(",
")",
"[",
"0",
"]",
"# hist used to gen. src",
"self",
".",
"nrss",
"=",
"tablelengths",
".",
"get_int",
"(",
")",
"[",
"0",
"]",
"# #tracks to surf src",
"else",
":",
"self",
".",
"np1",
"=",
"tablelengths",
".",
"get_long",
"(",
")",
"[",
"0",
"]",
"# hist used to gen. src",
"self",
".",
"nrss",
"=",
"tablelengths",
".",
"get_long",
"(",
")",
"[",
"0",
"]",
"# #tracks to surf src",
"# values in surf src record",
"# 6 for a spherical source",
"# 11 otherwise",
"self",
".",
"ncrd",
"=",
"tablelengths",
".",
"get_int",
"(",
")",
"[",
"0",
"]",
"self",
".",
"njsw",
"=",
"tablelengths",
".",
"get_int",
"(",
")",
"[",
"0",
"]",
"# number of surfaces",
"self",
".",
"niss",
"=",
"tablelengths",
".",
"get_int",
"(",
")",
"[",
"0",
"]",
"# #histories to surf src",
"self",
".",
"table1extra",
"=",
"list",
"(",
")",
"while",
"tablelengths",
".",
"num_bytes",
">",
"tablelengths",
".",
"pos",
":",
"self",
".",
"table1extra",
"+=",
"tablelengths",
".",
"get_int",
"(",
")",
"elif",
"'SF_00001'",
"in",
"self",
".",
"kod",
":",
"header",
"=",
"self",
".",
"get_fortran_record",
"(",
")",
"self",
".",
"ver",
"=",
"header",
".",
"get_string",
"(",
"12",
")",
"[",
"0",
"]",
"# code version identifier",
"self",
".",
"loddat",
"=",
"header",
".",
"get_string",
"(",
"9",
")",
"[",
"0",
"]",
"# code version date",
"self",
".",
"idtm",
"=",
"header",
".",
"get_string",
"(",
"19",
")",
"[",
"0",
"]",
"# current date and time",
"self",
".",
"probid",
"=",
"header",
".",
"get_string",
"(",
"19",
")",
"[",
"0",
"]",
"# problem id string",
"self",
".",
"aid",
"=",
"header",
".",
"get_string",
"(",
"80",
")",
"[",
"0",
"]",
"# title card of initial run",
"self",
".",
"knod",
"=",
"header",
".",
"get_int",
"(",
")",
"[",
"0",
"]",
"# dump number",
"# read table 1 record; various counts and sizes",
"tablelengths",
"=",
"self",
".",
"get_fortran_record",
"(",
")",
"# interpret table lengths",
"self",
".",
"np1",
"=",
"tablelengths",
".",
"get_int",
"(",
")",
"[",
"0",
"]",
"# hist used to gen.source",
"self",
".",
"notsure0",
"=",
"tablelengths",
".",
"get_int",
"(",
")",
"[",
"0",
"]",
"# vals in surf src rec.",
"self",
".",
"nrss",
"=",
"tablelengths",
".",
"get_int",
"(",
")",
"[",
"0",
"]",
"# tracks writ. to surf.src",
"self",
".",
"notsure1",
"=",
"tablelengths",
".",
"get_int",
"(",
")",
"[",
"0",
"]",
"# number of surfaces",
"self",
".",
"ncrd",
"=",
"tablelengths",
".",
"get_int",
"(",
")",
"[",
"0",
"]",
"# histories to surf.src",
"self",
".",
"njsw",
"=",
"tablelengths",
".",
"get_int",
"(",
")",
"[",
"0",
"]",
"# number of surfaces",
"self",
".",
"niss",
"=",
"tablelengths",
".",
"get_int",
"(",
")",
"[",
"0",
"]",
"# histories to surf.src",
"self",
".",
"table1extra",
"=",
"list",
"(",
")",
"while",
"tablelengths",
".",
"num_bytes",
">",
"tablelengths",
".",
"pos",
":",
"self",
".",
"table1extra",
"+=",
"tablelengths",
".",
"get_int",
"(",
")",
"if",
"self",
".",
"np1",
"<",
"0",
":",
"# read table 2 record; more size info",
"tablelengths",
"=",
"self",
".",
"get_fortran_record",
"(",
")",
"self",
".",
"niwr",
"=",
"tablelengths",
".",
"get_int",
"(",
")",
"[",
"0",
"]",
"# #cells in surf.src card",
"self",
".",
"mipts",
"=",
"tablelengths",
".",
"get_int",
"(",
")",
"[",
"0",
"]",
"# source particle type",
"self",
".",
"kjaq",
"=",
"tablelengths",
".",
"get_int",
"(",
")",
"[",
"0",
"]",
"# macrobody facet flag",
"self",
".",
"table2extra",
"=",
"list",
"(",
")",
"while",
"tablelengths",
".",
"num_bytes",
">",
"tablelengths",
".",
"pos",
":",
"self",
".",
"table2extra",
"+=",
"tablelengths",
".",
"get_int",
"(",
")",
"else",
":",
"pass",
"# Since np1 can be negative, preserve the actual np1 value while",
"# taking the absolute value so that np1 can be used mathematically",
"self",
".",
"orignp1",
"=",
"self",
".",
"np1",
"self",
".",
"np1",
"=",
"abs",
"(",
"self",
".",
"np1",
")",
"# get info for each surface",
"self",
".",
"surflist",
"=",
"list",
"(",
")",
"for",
"j",
"in",
"range",
"(",
"self",
".",
"njsw",
")",
":",
"# read next surface info record",
"self",
".",
"surfaceinfo",
"=",
"self",
".",
"get_fortran_record",
"(",
")",
"surfinfo",
"=",
"SourceSurf",
"(",
")",
"surfinfo",
".",
"id",
"=",
"self",
".",
"surfaceinfo",
".",
"get_int",
"(",
")",
"# surface ID",
"if",
"self",
".",
"kjaq",
"==",
"1",
":",
"surfinfo",
".",
"facet_id",
"=",
"self",
".",
"surfaceinfo",
".",
"get_int",
"(",
")",
"# facet ID",
"else",
":",
"surfinfo",
".",
"facet_id",
"=",
"-",
"1",
"# dummy facet ID",
"surfinfo",
".",
"type",
"=",
"self",
".",
"surfaceinfo",
".",
"get_int",
"(",
")",
"# surface type",
"surfinfo",
".",
"num_params",
"=",
"self",
".",
"surfaceinfo",
".",
"get_int",
"(",
")",
"[",
"0",
"]",
"# #surface prm",
"surfinfo",
".",
"surf_params",
"=",
"self",
".",
"surfaceinfo",
".",
"get_double",
"(",
"surfinfo",
".",
"num_params",
")",
"self",
".",
"surflist",
".",
"append",
"(",
"surfinfo",
")",
"# we read any extra records as determined by njsw+niwr...",
"# no known case of their actual utility is known currently",
"for",
"j",
"in",
"range",
"(",
"self",
".",
"njsw",
",",
"self",
".",
"njsw",
"+",
"self",
".",
"niwr",
")",
":",
"self",
".",
"get_fortran_record",
"(",
")",
"warn",
"(",
"\"Extra info in header not handled: {0}\"",
".",
"format",
"(",
"j",
")",
",",
"RuntimeWarning",
")",
"# read summary table record",
"summary_info",
"=",
"self",
".",
"get_fortran_record",
"(",
")",
"self",
".",
"summary_table",
"=",
"summary_info",
".",
"get_int",
"(",
"(",
"2",
"+",
"4",
"*",
"self",
".",
"mipts",
")",
"*",
"(",
"self",
".",
"njsw",
"+",
"self",
".",
"niwr",
")",
"+",
"1",
")",
"self",
".",
"summary_extra",
"=",
"list",
"(",
")",
"while",
"summary_info",
".",
"num_bytes",
">",
"summary_info",
".",
"pos",
":",
"self",
".",
"summary_extra",
"+=",
"summary_info",
".",
"get_int",
"(",
")"
] | https://github.com/pyne/pyne/blob/0c2714d7c0d1b5e20be6ae6527da2c660dd6b1b3/pyne/mcnp.py#L325-L449 |
||
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/pyfakefs/pyfakefs/fake_filesystem.py | python | FakeFilesystem.Exists | (self, file_path) | return True | True if a path points to an existing file system object.
Args:
file_path: path to examine
Returns:
bool(if object exists)
Raises:
TypeError: if file_path is None | True if a path points to an existing file system object. | [
"True",
"if",
"a",
"path",
"points",
"to",
"an",
"existing",
"file",
"system",
"object",
"."
] | def Exists(self, file_path):
"""True if a path points to an existing file system object.
Args:
file_path: path to examine
Returns:
bool(if object exists)
Raises:
TypeError: if file_path is None
"""
if file_path is None:
raise TypeError
if not file_path:
return False
try:
file_path = self.ResolvePath(file_path)
except IOError:
return False
if file_path == self.root.name:
return True
path_components = self.GetPathComponents(file_path)
current_dir = self.root
for component in path_components:
if component not in current_dir.contents:
return False
current_dir = current_dir.contents[component]
return True | [
"def",
"Exists",
"(",
"self",
",",
"file_path",
")",
":",
"if",
"file_path",
"is",
"None",
":",
"raise",
"TypeError",
"if",
"not",
"file_path",
":",
"return",
"False",
"try",
":",
"file_path",
"=",
"self",
".",
"ResolvePath",
"(",
"file_path",
")",
"except",
"IOError",
":",
"return",
"False",
"if",
"file_path",
"==",
"self",
".",
"root",
".",
"name",
":",
"return",
"True",
"path_components",
"=",
"self",
".",
"GetPathComponents",
"(",
"file_path",
")",
"current_dir",
"=",
"self",
".",
"root",
"for",
"component",
"in",
"path_components",
":",
"if",
"component",
"not",
"in",
"current_dir",
".",
"contents",
":",
"return",
"False",
"current_dir",
"=",
"current_dir",
".",
"contents",
"[",
"component",
"]",
"return",
"True"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/pyfakefs/pyfakefs/fake_filesystem.py#L587-L615 |
|
openvinotoolkit/openvino | dedcbeafa8b84cccdc55ca64b8da516682b381c7 | cmake/developer_package/cpplint/cpplint.py | python | _CppLintState.SetOutputFormat | (self, output_format) | Sets the output format for errors. | Sets the output format for errors. | [
"Sets",
"the",
"output",
"format",
"for",
"errors",
"."
] | def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format | [
"def",
"SetOutputFormat",
"(",
"self",
",",
"output_format",
")",
":",
"self",
".",
"output_format",
"=",
"output_format"
] | https://github.com/openvinotoolkit/openvino/blob/dedcbeafa8b84cccdc55ca64b8da516682b381c7/cmake/developer_package/cpplint/cpplint.py#L1030-L1032 |
||
tensorflow/minigo | 6d89c202cdceaf449aefc3149ab2110d44f1a6a4 | bigtable_input.py | python | GameQueue.count_moves_in_game_range | (self, game_begin, game_end) | return sum([int(r.cell_value(METADATA, MOVE_COUNT)) for r in rows]) | Count the total moves in a game range.
Args:
game_begin: integer, starting game
game_end: integer, ending game
Uses the `ct_` keyspace for rapid move summary. | Count the total moves in a game range. | [
"Count",
"the",
"total",
"moves",
"in",
"a",
"game",
"range",
"."
] | def count_moves_in_game_range(self, game_begin, game_end):
"""Count the total moves in a game range.
Args:
game_begin: integer, starting game
game_end: integer, ending game
Uses the `ct_` keyspace for rapid move summary.
"""
rows = self.bt_table.read_rows(
ROWCOUNT_PREFIX.format(game_begin),
ROWCOUNT_PREFIX.format(game_end),
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, MOVE_COUNT, MOVE_COUNT))
return sum([int(r.cell_value(METADATA, MOVE_COUNT)) for r in rows]) | [
"def",
"count_moves_in_game_range",
"(",
"self",
",",
"game_begin",
",",
"game_end",
")",
":",
"rows",
"=",
"self",
".",
"bt_table",
".",
"read_rows",
"(",
"ROWCOUNT_PREFIX",
".",
"format",
"(",
"game_begin",
")",
",",
"ROWCOUNT_PREFIX",
".",
"format",
"(",
"game_end",
")",
",",
"filter_",
"=",
"bigtable_row_filters",
".",
"ColumnRangeFilter",
"(",
"METADATA",
",",
"MOVE_COUNT",
",",
"MOVE_COUNT",
")",
")",
"return",
"sum",
"(",
"[",
"int",
"(",
"r",
".",
"cell_value",
"(",
"METADATA",
",",
"MOVE_COUNT",
")",
")",
"for",
"r",
"in",
"rows",
"]",
")"
] | https://github.com/tensorflow/minigo/blob/6d89c202cdceaf449aefc3149ab2110d44f1a6a4/bigtable_input.py#L467-L481 |
|
mhammond/pywin32 | 44afd86ba8485194df93234639243252deeb40d5 | adodbapi/adodbapi.py | python | Cursor.get_returned_parameters | (self) | return retLst | with some providers, returned parameters and the .return_value are not available until
after the last recordset has been read. In that case, you must coll nextset() until it
returns None, then call this method to get your returned information. | with some providers, returned parameters and the .return_value are not available until
after the last recordset has been read. In that case, you must coll nextset() until it
returns None, then call this method to get your returned information. | [
"with",
"some",
"providers",
"returned",
"parameters",
"and",
"the",
".",
"return_value",
"are",
"not",
"available",
"until",
"after",
"the",
"last",
"recordset",
"has",
"been",
"read",
".",
"In",
"that",
"case",
"you",
"must",
"coll",
"nextset",
"()",
"until",
"it",
"returns",
"None",
"then",
"call",
"this",
"method",
"to",
"get",
"your",
"returned",
"information",
"."
] | def get_returned_parameters(self):
"""with some providers, returned parameters and the .return_value are not available until
after the last recordset has been read. In that case, you must coll nextset() until it
returns None, then call this method to get your returned information."""
retLst = (
[]
) # store procedures may return altered parameters, including an added "return value" item
for p in tuple(self.cmd.Parameters):
if verbose > 2:
print(
'Returned=Name: %s, Dir.: %s, Type: %s, Size: %s, Value: "%s",'
" Precision: %s, NumericScale: %s"
% (
p.Name,
adc.directions[p.Direction],
adc.adTypeNames.get(p.Type, str(p.Type) + " (unknown type)"),
p.Size,
p.Value,
p.Precision,
p.NumericScale,
)
)
pyObject = api.convert_to_python(p.Value, api.variantConversions[p.Type])
if p.Direction == adc.adParamReturnValue:
self.returnValue = (
pyObject # also load the undocumented attribute (Vernon's Error!)
)
self.return_value = pyObject
else:
retLst.append(pyObject)
return retLst | [
"def",
"get_returned_parameters",
"(",
"self",
")",
":",
"retLst",
"=",
"(",
"[",
"]",
")",
"# store procedures may return altered parameters, including an added \"return value\" item",
"for",
"p",
"in",
"tuple",
"(",
"self",
".",
"cmd",
".",
"Parameters",
")",
":",
"if",
"verbose",
">",
"2",
":",
"print",
"(",
"'Returned=Name: %s, Dir.: %s, Type: %s, Size: %s, Value: \"%s\",'",
"\" Precision: %s, NumericScale: %s\"",
"%",
"(",
"p",
".",
"Name",
",",
"adc",
".",
"directions",
"[",
"p",
".",
"Direction",
"]",
",",
"adc",
".",
"adTypeNames",
".",
"get",
"(",
"p",
".",
"Type",
",",
"str",
"(",
"p",
".",
"Type",
")",
"+",
"\" (unknown type)\"",
")",
",",
"p",
".",
"Size",
",",
"p",
".",
"Value",
",",
"p",
".",
"Precision",
",",
"p",
".",
"NumericScale",
",",
")",
")",
"pyObject",
"=",
"api",
".",
"convert_to_python",
"(",
"p",
".",
"Value",
",",
"api",
".",
"variantConversions",
"[",
"p",
".",
"Type",
"]",
")",
"if",
"p",
".",
"Direction",
"==",
"adc",
".",
"adParamReturnValue",
":",
"self",
".",
"returnValue",
"=",
"(",
"pyObject",
"# also load the undocumented attribute (Vernon's Error!)",
")",
"self",
".",
"return_value",
"=",
"pyObject",
"else",
":",
"retLst",
".",
"append",
"(",
"pyObject",
")",
"return",
"retLst"
] | https://github.com/mhammond/pywin32/blob/44afd86ba8485194df93234639243252deeb40d5/adodbapi/adodbapi.py#L817-L848 |
|
apache/incubator-weex | 5c25f0b59f7ac90703c363e7261f60bd06356dbe | weex_core/tools/cpplint.py | python | NestingState.Update | (self, filename, clean_lines, linenum, error) | Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. | Update nesting state with current line. | [
"Update",
"nesting",
"state",
"with",
"current",
"line",
"."
] | def Update(self, filename, clean_lines, linenum, error):
"""Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remember top of the previous nesting stack.
#
# The stack is always pushed/popped and not modified in place, so
# we can just do a shallow copy instead of copy.deepcopy. Using
# deepcopy would slow down cpplint by ~28%.
if self.stack:
self.previous_stack_top = self.stack[-1]
else:
self.previous_stack_top = None
# Update pp_stack
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
class_decl_match = Match(
r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?'
r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))'
r'(.*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
# We do not want to accept classes that are actually template arguments:
# template <class Ignore1,
# class Ignore2 = Default<Args>,
# template <Args> class Ignore3>
# void Function() {};
#
# To avoid template argument cases, we scan forward and look for
# an unmatched '>'. If we see one, assume we are inside a
# template argument list.
end_declaration = len(class_decl_match.group(1))
if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
self.stack.append(_ClassInfo(
class_decl_match.group(3), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(4)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
access_match = Match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
if access_match:
classinfo.access = access_match.group(2)
# Check that access keywords are indented +1 space. Skip this
# check if the keywords are not preceded by whitespaces.
indent = access_match.group(1)
if (len(indent) != classinfo.class_indent + 1 and
Match(r'^\s*$', indent)):
if classinfo.is_struct:
parent = 'struct ' + classinfo.name
else:
parent = 'class ' + classinfo.name
slots = ''
if access_match.group(3):
slots = access_match.group(3)
error(filename, linenum, 'whitespace/indent', 3,
'%s%s: should be indented +1 space inside %s' % (
access_match.group(2), slots, parent))
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen a opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
elif Match(r'^extern\s*"[^"]*"\s*\{', line):
self.stack.append(_ExternCInfo(linenum))
else:
self.stack.append(_BlockInfo(linenum, True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# a semicolon, this is probably a forward declaration. Pop
# the stack for these.
#
# Similarly, if we haven't seen an opening brace yet, but we
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2) | [
"def",
"Update",
"(",
"self",
",",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
":",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"# Remember top of the previous nesting stack.",
"#",
"# The stack is always pushed/popped and not modified in place, so",
"# we can just do a shallow copy instead of copy.deepcopy. Using",
"# deepcopy would slow down cpplint by ~28%.",
"if",
"self",
".",
"stack",
":",
"self",
".",
"previous_stack_top",
"=",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
"else",
":",
"self",
".",
"previous_stack_top",
"=",
"None",
"# Update pp_stack",
"self",
".",
"UpdatePreprocessor",
"(",
"line",
")",
"# Count parentheses. This is to avoid adding struct arguments to",
"# the nesting stack.",
"if",
"self",
".",
"stack",
":",
"inner_block",
"=",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
"depth_change",
"=",
"line",
".",
"count",
"(",
"'('",
")",
"-",
"line",
".",
"count",
"(",
"')'",
")",
"inner_block",
".",
"open_parentheses",
"+=",
"depth_change",
"# Also check if we are starting or ending an inline assembly block.",
"if",
"inner_block",
".",
"inline_asm",
"in",
"(",
"_NO_ASM",
",",
"_END_ASM",
")",
":",
"if",
"(",
"depth_change",
"!=",
"0",
"and",
"inner_block",
".",
"open_parentheses",
"==",
"1",
"and",
"_MATCH_ASM",
".",
"match",
"(",
"line",
")",
")",
":",
"# Enter assembly block",
"inner_block",
".",
"inline_asm",
"=",
"_INSIDE_ASM",
"else",
":",
"# Not entering assembly block. If previous line was _END_ASM,",
"# we will now shift to _NO_ASM state.",
"inner_block",
".",
"inline_asm",
"=",
"_NO_ASM",
"elif",
"(",
"inner_block",
".",
"inline_asm",
"==",
"_INSIDE_ASM",
"and",
"inner_block",
".",
"open_parentheses",
"==",
"0",
")",
":",
"# Exit assembly block",
"inner_block",
".",
"inline_asm",
"=",
"_END_ASM",
"# Consume namespace declaration at the beginning of the line. Do",
"# this in a loop so that we catch same line declarations like this:",
"# namespace proto2 { namespace bridge { class MessageSet; } }",
"while",
"True",
":",
"# Match start of namespace. The \"\\b\\s*\" below catches namespace",
"# declarations even if it weren't followed by a whitespace, this",
"# is so that we don't confuse our namespace checker. The",
"# missing spaces will be flagged by CheckSpacing.",
"namespace_decl_match",
"=",
"Match",
"(",
"r'^\\s*namespace\\b\\s*([:\\w]+)?(.*)$'",
",",
"line",
")",
"if",
"not",
"namespace_decl_match",
":",
"break",
"new_namespace",
"=",
"_NamespaceInfo",
"(",
"namespace_decl_match",
".",
"group",
"(",
"1",
")",
",",
"linenum",
")",
"self",
".",
"stack",
".",
"append",
"(",
"new_namespace",
")",
"line",
"=",
"namespace_decl_match",
".",
"group",
"(",
"2",
")",
"if",
"line",
".",
"find",
"(",
"'{'",
")",
"!=",
"-",
"1",
":",
"new_namespace",
".",
"seen_open_brace",
"=",
"True",
"line",
"=",
"line",
"[",
"line",
".",
"find",
"(",
"'{'",
")",
"+",
"1",
":",
"]",
"# Look for a class declaration in whatever is left of the line",
"# after parsing namespaces. The regexp accounts for decorated classes",
"# such as in:",
"# class LOCKABLE API Object {",
"# };",
"class_decl_match",
"=",
"Match",
"(",
"r'^(\\s*(?:template\\s*<[\\w\\s<>,:]*>\\s*)?'",
"r'(class|struct)\\s+(?:[A-Z_]+\\s+)*(\\w+(?:::\\w+)*))'",
"r'(.*)$'",
",",
"line",
")",
"if",
"(",
"class_decl_match",
"and",
"(",
"not",
"self",
".",
"stack",
"or",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
".",
"open_parentheses",
"==",
"0",
")",
")",
":",
"# We do not want to accept classes that are actually template arguments:",
"# template <class Ignore1,",
"# class Ignore2 = Default<Args>,",
"# template <Args> class Ignore3>",
"# void Function() {};",
"#",
"# To avoid template argument cases, we scan forward and look for",
"# an unmatched '>'. If we see one, assume we are inside a",
"# template argument list.",
"end_declaration",
"=",
"len",
"(",
"class_decl_match",
".",
"group",
"(",
"1",
")",
")",
"if",
"not",
"self",
".",
"InTemplateArgumentList",
"(",
"clean_lines",
",",
"linenum",
",",
"end_declaration",
")",
":",
"self",
".",
"stack",
".",
"append",
"(",
"_ClassInfo",
"(",
"class_decl_match",
".",
"group",
"(",
"3",
")",
",",
"class_decl_match",
".",
"group",
"(",
"2",
")",
",",
"clean_lines",
",",
"linenum",
")",
")",
"line",
"=",
"class_decl_match",
".",
"group",
"(",
"4",
")",
"# If we have not yet seen the opening brace for the innermost block,",
"# run checks here.",
"if",
"not",
"self",
".",
"SeenOpenBrace",
"(",
")",
":",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
".",
"CheckBegin",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
"# Update access control if we are inside a class/struct",
"if",
"self",
".",
"stack",
"and",
"isinstance",
"(",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
",",
"_ClassInfo",
")",
":",
"classinfo",
"=",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
"access_match",
"=",
"Match",
"(",
"r'^(.*)\\b(public|private|protected|signals)(\\s+(?:slots\\s*)?)?'",
"r':(?:[^:]|$)'",
",",
"line",
")",
"if",
"access_match",
":",
"classinfo",
".",
"access",
"=",
"access_match",
".",
"group",
"(",
"2",
")",
"# Check that access keywords are indented +1 space. Skip this",
"# check if the keywords are not preceded by whitespaces.",
"indent",
"=",
"access_match",
".",
"group",
"(",
"1",
")",
"if",
"(",
"len",
"(",
"indent",
")",
"!=",
"classinfo",
".",
"class_indent",
"+",
"1",
"and",
"Match",
"(",
"r'^\\s*$'",
",",
"indent",
")",
")",
":",
"if",
"classinfo",
".",
"is_struct",
":",
"parent",
"=",
"'struct '",
"+",
"classinfo",
".",
"name",
"else",
":",
"parent",
"=",
"'class '",
"+",
"classinfo",
".",
"name",
"slots",
"=",
"''",
"if",
"access_match",
".",
"group",
"(",
"3",
")",
":",
"slots",
"=",
"access_match",
".",
"group",
"(",
"3",
")",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/indent'",
",",
"3",
",",
"'%s%s: should be indented +1 space inside %s'",
"%",
"(",
"access_match",
".",
"group",
"(",
"2",
")",
",",
"slots",
",",
"parent",
")",
")",
"# Consume braces or semicolons from what's left of the line",
"while",
"True",
":",
"# Match first brace, semicolon, or closed parenthesis.",
"matched",
"=",
"Match",
"(",
"r'^[^{;)}]*([{;)}])(.*)$'",
",",
"line",
")",
"if",
"not",
"matched",
":",
"break",
"token",
"=",
"matched",
".",
"group",
"(",
"1",
")",
"if",
"token",
"==",
"'{'",
":",
"# If namespace or class hasn't seen a opening brace yet, mark",
"# namespace/class head as complete. Push a new block onto the",
"# stack otherwise.",
"if",
"not",
"self",
".",
"SeenOpenBrace",
"(",
")",
":",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
".",
"seen_open_brace",
"=",
"True",
"elif",
"Match",
"(",
"r'^extern\\s*\"[^\"]*\"\\s*\\{'",
",",
"line",
")",
":",
"self",
".",
"stack",
".",
"append",
"(",
"_ExternCInfo",
"(",
"linenum",
")",
")",
"else",
":",
"self",
".",
"stack",
".",
"append",
"(",
"_BlockInfo",
"(",
"linenum",
",",
"True",
")",
")",
"if",
"_MATCH_ASM",
".",
"match",
"(",
"line",
")",
":",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
".",
"inline_asm",
"=",
"_BLOCK_ASM",
"elif",
"token",
"==",
"';'",
"or",
"token",
"==",
"')'",
":",
"# If we haven't seen an opening brace yet, but we already saw",
"# a semicolon, this is probably a forward declaration. Pop",
"# the stack for these.",
"#",
"# Similarly, if we haven't seen an opening brace yet, but we",
"# already saw a closing parenthesis, then these are probably",
"# function arguments with extra \"class\" or \"struct\" keywords.",
"# Also pop these stack for these.",
"if",
"not",
"self",
".",
"SeenOpenBrace",
"(",
")",
":",
"self",
".",
"stack",
".",
"pop",
"(",
")",
"else",
":",
"# token == '}'",
"# Perform end of block checks and pop the stack.",
"if",
"self",
".",
"stack",
":",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
".",
"CheckEnd",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
"self",
".",
"stack",
".",
"pop",
"(",
")",
"line",
"=",
"matched",
".",
"group",
"(",
"2",
")"
] | https://github.com/apache/incubator-weex/blob/5c25f0b59f7ac90703c363e7261f60bd06356dbe/weex_core/tools/cpplint.py#L2582-L2744 |
||
yun-liu/RCF | 91bfb054ad04187dbbe21e539e165ad9bd3ff00b | scripts/cpp_lint.py | python | IsBlankLine | (line) | return not line or line.isspace() | Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank. | Returns true if the given line is blank. | [
"Returns",
"true",
"if",
"the",
"given",
"line",
"is",
"blank",
"."
] | def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace() | [
"def",
"IsBlankLine",
"(",
"line",
")",
":",
"return",
"not",
"line",
"or",
"line",
".",
"isspace",
"(",
")"
] | https://github.com/yun-liu/RCF/blob/91bfb054ad04187dbbe21e539e165ad9bd3ff00b/scripts/cpp_lint.py#L2369-L2381 |
|
baidu/bigflow | 449245016c0df7d1252e85581e588bfc60cefad3 | bigflow_python/python/bigflow/rpc/requests.py | python | write_record | (file_path, records, objector) | return None, res.status | write records to file path with objector | write records to file path with objector | [
"write",
"records",
"to",
"file",
"path",
"with",
"objector"
] | def write_record(file_path, records, objector):
"""
write records to file path with objector
"""
request = service_pb2.WriteLocalSeqFileRequest()
request.file_path = file_path
for record in records:
request.key.append("")
request.value.append(objector.serialize(record))
response = _service.request(request, "write_local_seqfile")
import google.protobuf.json_format as json_format
res = json_format.Parse(response, service_pb2.VoidResponse())
return None, res.status | [
"def",
"write_record",
"(",
"file_path",
",",
"records",
",",
"objector",
")",
":",
"request",
"=",
"service_pb2",
".",
"WriteLocalSeqFileRequest",
"(",
")",
"request",
".",
"file_path",
"=",
"file_path",
"for",
"record",
"in",
"records",
":",
"request",
".",
"key",
".",
"append",
"(",
"\"\"",
")",
"request",
".",
"value",
".",
"append",
"(",
"objector",
".",
"serialize",
"(",
"record",
")",
")",
"response",
"=",
"_service",
".",
"request",
"(",
"request",
",",
"\"write_local_seqfile\"",
")",
"import",
"google",
".",
"protobuf",
".",
"json_format",
"as",
"json_format",
"res",
"=",
"json_format",
".",
"Parse",
"(",
"response",
",",
"service_pb2",
".",
"VoidResponse",
"(",
")",
")",
"return",
"None",
",",
"res",
".",
"status"
] | https://github.com/baidu/bigflow/blob/449245016c0df7d1252e85581e588bfc60cefad3/bigflow_python/python/bigflow/rpc/requests.py#L206-L221 |
|
hughperkins/tf-coriander | 970d3df6c11400ad68405f22b0c42a52374e94ca | tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py | python | _DNNLinearCombinedBaseEstimator._get_target_column | (self) | return self._target_column | Returns the target column of this Estimator. | Returns the target column of this Estimator. | [
"Returns",
"the",
"target",
"column",
"of",
"this",
"Estimator",
"."
] | def _get_target_column(self):
"""Returns the target column of this Estimator."""
return self._target_column | [
"def",
"_get_target_column",
"(",
"self",
")",
":",
"return",
"self",
".",
"_target_column"
] | https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py#L197-L199 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/importlib/_bootstrap_external.py | python | SourceLoader._cache_bytecode | (self, source_path, cache_path, data) | return self.set_data(cache_path, data) | Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
The source path is needed in order to correctly transfer permissions | Optional method which writes data (bytes) to a file path (a str). | [
"Optional",
"method",
"which",
"writes",
"data",
"(",
"bytes",
")",
"to",
"a",
"file",
"path",
"(",
"a",
"str",
")",
"."
] | def _cache_bytecode(self, source_path, cache_path, data):
"""Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
The source path is needed in order to correctly transfer permissions
"""
# For backwards compatibility, we delegate to set_data()
return self.set_data(cache_path, data) | [
"def",
"_cache_bytecode",
"(",
"self",
",",
"source_path",
",",
"cache_path",
",",
"data",
")",
":",
"# For backwards compatibility, we delegate to set_data()",
"return",
"self",
".",
"set_data",
"(",
"cache_path",
",",
"data",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/importlib/_bootstrap_external.py#L758-L766 |
|
apache/parquet-cpp | 642da055adf009652689b20e68a198cffb857651 | build-support/cpplint.py | python | CheckPrintf | (filename, clean_lines, linenum, error) | Check for printf related issues.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. | Check for printf related issues. | [
"Check",
"for",
"printf",
"related",
"issues",
"."
] | def CheckPrintf(filename, clean_lines, linenum, error):
"""Check for printf related issues.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\s*\(', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\s*\(', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1)) | [
"def",
"CheckPrintf",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
":",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"# When snprintf is used, the second argument shouldn't be a literal.",
"match",
"=",
"Search",
"(",
"r'snprintf\\s*\\(([^,]*),\\s*([0-9]*)\\s*,'",
",",
"line",
")",
"if",
"match",
"and",
"match",
".",
"group",
"(",
"2",
")",
"!=",
"'0'",
":",
"# If 2nd arg is zero, snprintf is used to calculate size.",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/printf'",
",",
"3",
",",
"'If you can, use sizeof(%s) instead of %s as the 2nd arg '",
"'to snprintf.'",
"%",
"(",
"match",
".",
"group",
"(",
"1",
")",
",",
"match",
".",
"group",
"(",
"2",
")",
")",
")",
"# Check if some verboten C functions are being used.",
"if",
"Search",
"(",
"r'\\bsprintf\\s*\\('",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/printf'",
",",
"5",
",",
"'Never use sprintf. Use snprintf instead.'",
")",
"match",
"=",
"Search",
"(",
"r'\\b(strcpy|strcat)\\s*\\('",
",",
"line",
")",
"if",
"match",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/printf'",
",",
"4",
",",
"'Almost always, snprintf is better than %s'",
"%",
"match",
".",
"group",
"(",
"1",
")",
")"
] | https://github.com/apache/parquet-cpp/blob/642da055adf009652689b20e68a198cffb857651/build-support/cpplint.py#L4971-L4997 |
||
raspberrypi/tools | 13474ee775d0c5ec8a7da4fb0a9fa84187abfc87 | arm-bcm2708/arm-rpi-4.9.3-linux-gnueabihf/share/gdb/python/gdb/command/frame_filters.py | python | DisableFrameFilter.complete | (self, text, word) | Completion function for both frame filter dictionary, and
frame filter name. | Completion function for both frame filter dictionary, and
frame filter name. | [
"Completion",
"function",
"for",
"both",
"frame",
"filter",
"dictionary",
"and",
"frame",
"filter",
"name",
"."
] | def complete(self, text, word):
"""Completion function for both frame filter dictionary, and
frame filter name."""
if text.count(" ") == 0:
return _complete_frame_filter_list(text, word, True)
else:
printer_list = gdb.frames.return_list(text.split()[0].rstrip())
return _complete_frame_filter_name(word, printer_list) | [
"def",
"complete",
"(",
"self",
",",
"text",
",",
"word",
")",
":",
"if",
"text",
".",
"count",
"(",
"\" \"",
")",
"==",
"0",
":",
"return",
"_complete_frame_filter_list",
"(",
"text",
",",
"word",
",",
"True",
")",
"else",
":",
"printer_list",
"=",
"gdb",
".",
"frames",
".",
"return_list",
"(",
"text",
".",
"split",
"(",
")",
"[",
"0",
"]",
".",
"rstrip",
"(",
")",
")",
"return",
"_complete_frame_filter_name",
"(",
"word",
",",
"printer_list",
")"
] | https://github.com/raspberrypi/tools/blob/13474ee775d0c5ec8a7da4fb0a9fa84187abfc87/arm-bcm2708/arm-rpi-4.9.3-linux-gnueabihf/share/gdb/python/gdb/command/frame_filters.py#L270-L277 |
||
jubatus/jubatus | 1251ce551bac980488a6313728e72b3fe0b79a9f | tools/codestyle/cpplint/cpplint.py | python | _Filters | () | return _cpplint_state.filters | Returns the module's list of output filters, as a list. | Returns the module's list of output filters, as a list. | [
"Returns",
"the",
"module",
"s",
"list",
"of",
"output",
"filters",
"as",
"a",
"list",
"."
] | def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters | [
"def",
"_Filters",
"(",
")",
":",
"return",
"_cpplint_state",
".",
"filters"
] | https://github.com/jubatus/jubatus/blob/1251ce551bac980488a6313728e72b3fe0b79a9f/tools/codestyle/cpplint/cpplint.py#L611-L613 |
|
cms-sw/cmssw | fd9de012d503d3405420bcbeec0ec879baa57cf2 | PhysicsTools/HeppyCore/python/utils/das.py | python | get_data | (host, query, idx, limit, debug) | return data | Contact DAS server and retrieve data for given DAS query | Contact DAS server and retrieve data for given DAS query | [
"Contact",
"DAS",
"server",
"and",
"retrieve",
"data",
"for",
"given",
"DAS",
"query"
] | def get_data(host, query, idx, limit, debug):
"""Contact DAS server and retrieve data for given DAS query"""
params = {'input':query, 'idx':idx, 'limit':limit}
path = '/das/cache'
pat = re.compile('http[s]{0,1}://')
if not pat.match(host):
msg = 'Invalid hostname: %s' % host
raise Exception(msg)
url = host + path
headers = {"Accept": "application/json"}
encoded_data = urllib.urlencode(params, doseq=True)
url += '?%s' % encoded_data
req = urllib2.Request(url=url, headers=headers)
if debug:
hdlr = urllib2.HTTPHandler(debuglevel=1)
opener = urllib2.build_opener(hdlr)
else:
opener = urllib2.build_opener()
fdesc = opener.open(req)
data = fdesc.read()
fdesc.close()
pat = re.compile(r'^[a-z0-9]{32}')
if data and isinstance(data, str) and pat.match(data) and len(data) == 32:
pid = data
else:
pid = None
count = 5 # initial waiting time in seconds
timeout = 30 # final waiting time in seconds
while pid:
params.update({'pid':data})
encoded_data = urllib.urlencode(params, doseq=True)
url = host + path + '?%s' % encoded_data
req = urllib2.Request(url=url, headers=headers)
try:
fdesc = opener.open(req)
data = fdesc.read()
fdesc.close()
except urllib2.HTTPError as err:
print(err)
return ""
if data and isinstance(data, str) and pat.match(data) and len(data) == 32:
pid = data
else:
pid = None
time.sleep(count)
if count < timeout:
count *= 2
else:
count = timeout
return data | [
"def",
"get_data",
"(",
"host",
",",
"query",
",",
"idx",
",",
"limit",
",",
"debug",
")",
":",
"params",
"=",
"{",
"'input'",
":",
"query",
",",
"'idx'",
":",
"idx",
",",
"'limit'",
":",
"limit",
"}",
"path",
"=",
"'/das/cache'",
"pat",
"=",
"re",
".",
"compile",
"(",
"'http[s]{0,1}://'",
")",
"if",
"not",
"pat",
".",
"match",
"(",
"host",
")",
":",
"msg",
"=",
"'Invalid hostname: %s'",
"%",
"host",
"raise",
"Exception",
"(",
"msg",
")",
"url",
"=",
"host",
"+",
"path",
"headers",
"=",
"{",
"\"Accept\"",
":",
"\"application/json\"",
"}",
"encoded_data",
"=",
"urllib",
".",
"urlencode",
"(",
"params",
",",
"doseq",
"=",
"True",
")",
"url",
"+=",
"'?%s'",
"%",
"encoded_data",
"req",
"=",
"urllib2",
".",
"Request",
"(",
"url",
"=",
"url",
",",
"headers",
"=",
"headers",
")",
"if",
"debug",
":",
"hdlr",
"=",
"urllib2",
".",
"HTTPHandler",
"(",
"debuglevel",
"=",
"1",
")",
"opener",
"=",
"urllib2",
".",
"build_opener",
"(",
"hdlr",
")",
"else",
":",
"opener",
"=",
"urllib2",
".",
"build_opener",
"(",
")",
"fdesc",
"=",
"opener",
".",
"open",
"(",
"req",
")",
"data",
"=",
"fdesc",
".",
"read",
"(",
")",
"fdesc",
".",
"close",
"(",
")",
"pat",
"=",
"re",
".",
"compile",
"(",
"r'^[a-z0-9]{32}'",
")",
"if",
"data",
"and",
"isinstance",
"(",
"data",
",",
"str",
")",
"and",
"pat",
".",
"match",
"(",
"data",
")",
"and",
"len",
"(",
"data",
")",
"==",
"32",
":",
"pid",
"=",
"data",
"else",
":",
"pid",
"=",
"None",
"count",
"=",
"5",
"# initial waiting time in seconds",
"timeout",
"=",
"30",
"# final waiting time in seconds",
"while",
"pid",
":",
"params",
".",
"update",
"(",
"{",
"'pid'",
":",
"data",
"}",
")",
"encoded_data",
"=",
"urllib",
".",
"urlencode",
"(",
"params",
",",
"doseq",
"=",
"True",
")",
"url",
"=",
"host",
"+",
"path",
"+",
"'?%s'",
"%",
"encoded_data",
"req",
"=",
"urllib2",
".",
"Request",
"(",
"url",
"=",
"url",
",",
"headers",
"=",
"headers",
")",
"try",
":",
"fdesc",
"=",
"opener",
".",
"open",
"(",
"req",
")",
"data",
"=",
"fdesc",
".",
"read",
"(",
")",
"fdesc",
".",
"close",
"(",
")",
"except",
"urllib2",
".",
"HTTPError",
"as",
"err",
":",
"print",
"(",
"err",
")",
"return",
"\"\"",
"if",
"data",
"and",
"isinstance",
"(",
"data",
",",
"str",
")",
"and",
"pat",
".",
"match",
"(",
"data",
")",
"and",
"len",
"(",
"data",
")",
"==",
"32",
":",
"pid",
"=",
"data",
"else",
":",
"pid",
"=",
"None",
"time",
".",
"sleep",
"(",
"count",
")",
"if",
"count",
"<",
"timeout",
":",
"count",
"*=",
"2",
"else",
":",
"count",
"=",
"timeout",
"return",
"data"
] | https://github.com/cms-sw/cmssw/blob/fd9de012d503d3405420bcbeec0ec879baa57cf2/PhysicsTools/HeppyCore/python/utils/das.py#L71-L121 |
|
windystrife/UnrealEngine_NVIDIAGameWorks | b50e6338a7c5b26374d66306ebc7807541ff815e | Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/codecs.py | python | StreamReader.read | (self, size=-1, chars=-1, firstline=False) | return result | Decodes data from the stream self.stream and returns the
resulting object.
chars indicates the number of characters to read from the
stream. read() will never return more than chars
characters, but it might return less, if there are not enough
characters available.
size indicates the approximate maximum number of bytes to
read from the stream for decoding purposes. The decoder
can modify this setting as appropriate. The default value
-1 indicates to read and decode as much as possible. size
is intended to prevent having to decode huge files in one
step.
If firstline is true, and a UnicodeDecodeError happens
after the first line terminator in the input only the first line
will be returned, the rest of the input will be kept until the
next call to read().
The method should use a greedy read strategy meaning that
it should read as much data as is allowed within the
definition of the encoding and the given size, e.g. if
optional encoding endings or state markers are available
on the stream, these should be read too. | Decodes data from the stream self.stream and returns the
resulting object. | [
"Decodes",
"data",
"from",
"the",
"stream",
"self",
".",
"stream",
"and",
"returns",
"the",
"resulting",
"object",
"."
] | def read(self, size=-1, chars=-1, firstline=False):
""" Decodes data from the stream self.stream and returns the
resulting object.
chars indicates the number of characters to read from the
stream. read() will never return more than chars
characters, but it might return less, if there are not enough
characters available.
size indicates the approximate maximum number of bytes to
read from the stream for decoding purposes. The decoder
can modify this setting as appropriate. The default value
-1 indicates to read and decode as much as possible. size
is intended to prevent having to decode huge files in one
step.
If firstline is true, and a UnicodeDecodeError happens
after the first line terminator in the input only the first line
will be returned, the rest of the input will be kept until the
next call to read().
The method should use a greedy read strategy meaning that
it should read as much data as is allowed within the
definition of the encoding and the given size, e.g. if
optional encoding endings or state markers are available
on the stream, these should be read too.
"""
# If we have lines cached, first merge them back into characters
if self.linebuffer:
self.charbuffer = "".join(self.linebuffer)
self.linebuffer = None
# read until we get the required number of characters (if available)
while True:
# can the request can be satisfied from the character buffer?
if chars < 0:
if size < 0:
if self.charbuffer:
break
elif len(self.charbuffer) >= size:
break
else:
if len(self.charbuffer) >= chars:
break
# we need more data
if size < 0:
newdata = self.stream.read()
else:
newdata = self.stream.read(size)
# decode bytes (those remaining from the last call included)
data = self.bytebuffer + newdata
try:
newchars, decodedbytes = self.decode(data, self.errors)
except UnicodeDecodeError, exc:
if firstline:
newchars, decodedbytes = self.decode(data[:exc.start], self.errors)
lines = newchars.splitlines(True)
if len(lines)<=1:
raise
else:
raise
# keep undecoded bytes until the next call
self.bytebuffer = data[decodedbytes:]
# put new characters in the character buffer
self.charbuffer += newchars
# there was no data available
if not newdata:
break
if chars < 0:
# Return everything we've got
result = self.charbuffer
self.charbuffer = ""
else:
# Return the first chars characters
result = self.charbuffer[:chars]
self.charbuffer = self.charbuffer[chars:]
return result | [
"def",
"read",
"(",
"self",
",",
"size",
"=",
"-",
"1",
",",
"chars",
"=",
"-",
"1",
",",
"firstline",
"=",
"False",
")",
":",
"# If we have lines cached, first merge them back into characters",
"if",
"self",
".",
"linebuffer",
":",
"self",
".",
"charbuffer",
"=",
"\"\"",
".",
"join",
"(",
"self",
".",
"linebuffer",
")",
"self",
".",
"linebuffer",
"=",
"None",
"# read until we get the required number of characters (if available)",
"while",
"True",
":",
"# can the request can be satisfied from the character buffer?",
"if",
"chars",
"<",
"0",
":",
"if",
"size",
"<",
"0",
":",
"if",
"self",
".",
"charbuffer",
":",
"break",
"elif",
"len",
"(",
"self",
".",
"charbuffer",
")",
">=",
"size",
":",
"break",
"else",
":",
"if",
"len",
"(",
"self",
".",
"charbuffer",
")",
">=",
"chars",
":",
"break",
"# we need more data",
"if",
"size",
"<",
"0",
":",
"newdata",
"=",
"self",
".",
"stream",
".",
"read",
"(",
")",
"else",
":",
"newdata",
"=",
"self",
".",
"stream",
".",
"read",
"(",
"size",
")",
"# decode bytes (those remaining from the last call included)",
"data",
"=",
"self",
".",
"bytebuffer",
"+",
"newdata",
"try",
":",
"newchars",
",",
"decodedbytes",
"=",
"self",
".",
"decode",
"(",
"data",
",",
"self",
".",
"errors",
")",
"except",
"UnicodeDecodeError",
",",
"exc",
":",
"if",
"firstline",
":",
"newchars",
",",
"decodedbytes",
"=",
"self",
".",
"decode",
"(",
"data",
"[",
":",
"exc",
".",
"start",
"]",
",",
"self",
".",
"errors",
")",
"lines",
"=",
"newchars",
".",
"splitlines",
"(",
"True",
")",
"if",
"len",
"(",
"lines",
")",
"<=",
"1",
":",
"raise",
"else",
":",
"raise",
"# keep undecoded bytes until the next call",
"self",
".",
"bytebuffer",
"=",
"data",
"[",
"decodedbytes",
":",
"]",
"# put new characters in the character buffer",
"self",
".",
"charbuffer",
"+=",
"newchars",
"# there was no data available",
"if",
"not",
"newdata",
":",
"break",
"if",
"chars",
"<",
"0",
":",
"# Return everything we've got",
"result",
"=",
"self",
".",
"charbuffer",
"self",
".",
"charbuffer",
"=",
"\"\"",
"else",
":",
"# Return the first chars characters",
"result",
"=",
"self",
".",
"charbuffer",
"[",
":",
"chars",
"]",
"self",
".",
"charbuffer",
"=",
"self",
".",
"charbuffer",
"[",
"chars",
":",
"]",
"return",
"result"
] | https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/codecs.py#L424-L501 |
|
gimli-org/gimli | 17aa2160de9b15ababd9ef99e89b1bc3277bbb23 | pygimli/physics/petro/modelling.py | python | JointPetroInversion.setData | (self, data) | TODO. | TODO. | [
"TODO",
"."
] | def setData(self, data):
"""TODO."""
if isinstance(data, list):
if len(data) == len(self.managers):
self.tD.clear()
self.dataVals.clear()
self.dataErrs.clear()
self.fop.setData(data)
for i, mgr in enumerate(self.managers):
t = mgr.tD
self.tD.add(t, data[i].size())
self.dataVals = pg.cat(self.dataVals,
data[i](mgr.dataToken()))
if mgr.errIsAbsolute:
self.dataErrs = pg.cat(self.dataErrs,
data[i]('err') / data[i](mgr.dataToken()))
else:
self.dataErrs = pg.cat(self.dataErrs, data[i]('err'))
self.data = data
self.inv.setTransData(self.tD)
self.inv.setTransModel(self.tM)
else:
raise BaseException("To few datacontainer given") | [
"def",
"setData",
"(",
"self",
",",
"data",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"if",
"len",
"(",
"data",
")",
"==",
"len",
"(",
"self",
".",
"managers",
")",
":",
"self",
".",
"tD",
".",
"clear",
"(",
")",
"self",
".",
"dataVals",
".",
"clear",
"(",
")",
"self",
".",
"dataErrs",
".",
"clear",
"(",
")",
"self",
".",
"fop",
".",
"setData",
"(",
"data",
")",
"for",
"i",
",",
"mgr",
"in",
"enumerate",
"(",
"self",
".",
"managers",
")",
":",
"t",
"=",
"mgr",
".",
"tD",
"self",
".",
"tD",
".",
"add",
"(",
"t",
",",
"data",
"[",
"i",
"]",
".",
"size",
"(",
")",
")",
"self",
".",
"dataVals",
"=",
"pg",
".",
"cat",
"(",
"self",
".",
"dataVals",
",",
"data",
"[",
"i",
"]",
"(",
"mgr",
".",
"dataToken",
"(",
")",
")",
")",
"if",
"mgr",
".",
"errIsAbsolute",
":",
"self",
".",
"dataErrs",
"=",
"pg",
".",
"cat",
"(",
"self",
".",
"dataErrs",
",",
"data",
"[",
"i",
"]",
"(",
"'err'",
")",
"/",
"data",
"[",
"i",
"]",
"(",
"mgr",
".",
"dataToken",
"(",
")",
")",
")",
"else",
":",
"self",
".",
"dataErrs",
"=",
"pg",
".",
"cat",
"(",
"self",
".",
"dataErrs",
",",
"data",
"[",
"i",
"]",
"(",
"'err'",
")",
")",
"self",
".",
"data",
"=",
"data",
"self",
".",
"inv",
".",
"setTransData",
"(",
"self",
".",
"tD",
")",
"self",
".",
"inv",
".",
"setTransModel",
"(",
"self",
".",
"tM",
")",
"else",
":",
"raise",
"BaseException",
"(",
"\"To few datacontainer given\"",
")"
] | https://github.com/gimli-org/gimli/blob/17aa2160de9b15ababd9ef99e89b1bc3277bbb23/pygimli/physics/petro/modelling.py#L166-L194 |
||
cms-sw/cmssw | fd9de012d503d3405420bcbeec0ec879baa57cf2 | PhysicsTools/PythonAnalysis/python/rootplot/root2matplotlib.py | python | Hist.errorbar | (self, xerr=False, yerr=False, label_rotation=0,
label_alignment='center', **kwargs) | return errorbar | Generate a matplotlib errorbar figure.
All additional keyword arguments will be passed to
:func:`matplotlib.pyplot.errorbar`. | Generate a matplotlib errorbar figure. | [
"Generate",
"a",
"matplotlib",
"errorbar",
"figure",
"."
] | def errorbar(self, xerr=False, yerr=False, label_rotation=0,
label_alignment='center', **kwargs):
"""
Generate a matplotlib errorbar figure.
All additional keyword arguments will be passed to
:func:`matplotlib.pyplot.errorbar`.
"""
if xerr:
kwargs['xerr'] = self.xerr
if yerr:
kwargs['yerr'] = self.yerr
replacements = kwargs.get('replacements', None) or self.replacements
errorbar = plt.errorbar(self.x, self.y,
label=replace(self.label, replacements),
**kwargs)
self._prepare_xaxis(label_rotation, label_alignment)
return errorbar | [
"def",
"errorbar",
"(",
"self",
",",
"xerr",
"=",
"False",
",",
"yerr",
"=",
"False",
",",
"label_rotation",
"=",
"0",
",",
"label_alignment",
"=",
"'center'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"xerr",
":",
"kwargs",
"[",
"'xerr'",
"]",
"=",
"self",
".",
"xerr",
"if",
"yerr",
":",
"kwargs",
"[",
"'yerr'",
"]",
"=",
"self",
".",
"yerr",
"replacements",
"=",
"kwargs",
".",
"get",
"(",
"'replacements'",
",",
"None",
")",
"or",
"self",
".",
"replacements",
"errorbar",
"=",
"plt",
".",
"errorbar",
"(",
"self",
".",
"x",
",",
"self",
".",
"y",
",",
"label",
"=",
"replace",
"(",
"self",
".",
"label",
",",
"replacements",
")",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"_prepare_xaxis",
"(",
"label_rotation",
",",
"label_alignment",
")",
"return",
"errorbar"
] | https://github.com/cms-sw/cmssw/blob/fd9de012d503d3405420bcbeec0ec879baa57cf2/PhysicsTools/PythonAnalysis/python/rootplot/root2matplotlib.py#L154-L171 |
|
pytorch/pytorch | 7176c92687d3cc847cc046bf002269c6949a21c2 | torch/distributed/pipeline/sync/skip/skippable.py | python | Skippable.dispatch | (
self,
input,
handle_stash: Callable[[str, Optional[Tensor]], None],
handle_pop: Callable[[str], Optional[Tensor]],
) | Dispatches :class:`stash` or :class:`pop` commands generated by the
module's ``forward()``. | Dispatches :class:`stash` or :class:`pop` commands generated by the
module's ``forward()``. | [
"Dispatches",
":",
"class",
":",
"stash",
"or",
":",
"class",
":",
"pop",
"commands",
"generated",
"by",
"the",
"module",
"s",
"forward",
"()",
"."
] | def dispatch(
self,
input,
handle_stash: Callable[[str, Optional[Tensor]], None],
handle_pop: Callable[[str], Optional[Tensor]],
):
"""Dispatches :class:`stash` or :class:`pop` commands generated by the
module's ``forward()``.
"""
generator = self.module(input)
if not isinstance(generator, Generator):
# The underlying module returned output without any yield.
output = generator
return output
try:
op = next(generator)
while True:
if isinstance(op, stash):
handle_stash(op.name, op.tensor)
op = next(generator)
continue
if isinstance(op, pop):
tensor = handle_pop(op.name)
op = generator.send(tensor)
continue
raise TypeError("%r is not a command from @skippable" % op)
except StopIteration as stop:
output = stop.args[0]
return output | [
"def",
"dispatch",
"(",
"self",
",",
"input",
",",
"handle_stash",
":",
"Callable",
"[",
"[",
"str",
",",
"Optional",
"[",
"Tensor",
"]",
"]",
",",
"None",
"]",
",",
"handle_pop",
":",
"Callable",
"[",
"[",
"str",
"]",
",",
"Optional",
"[",
"Tensor",
"]",
"]",
",",
")",
":",
"generator",
"=",
"self",
".",
"module",
"(",
"input",
")",
"if",
"not",
"isinstance",
"(",
"generator",
",",
"Generator",
")",
":",
"# The underlying module returned output without any yield.",
"output",
"=",
"generator",
"return",
"output",
"try",
":",
"op",
"=",
"next",
"(",
"generator",
")",
"while",
"True",
":",
"if",
"isinstance",
"(",
"op",
",",
"stash",
")",
":",
"handle_stash",
"(",
"op",
".",
"name",
",",
"op",
".",
"tensor",
")",
"op",
"=",
"next",
"(",
"generator",
")",
"continue",
"if",
"isinstance",
"(",
"op",
",",
"pop",
")",
":",
"tensor",
"=",
"handle_pop",
"(",
"op",
".",
"name",
")",
"op",
"=",
"generator",
".",
"send",
"(",
"tensor",
")",
"continue",
"raise",
"TypeError",
"(",
"\"%r is not a command from @skippable\"",
"%",
"op",
")",
"except",
"StopIteration",
"as",
"stop",
":",
"output",
"=",
"stop",
".",
"args",
"[",
"0",
"]",
"return",
"output"
] | https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/distributed/pipeline/sync/skip/skippable.py#L146-L180 |
||
dogecoin/dogecoin | 31afd133119dd2e15862d46530cb99424cf564b0 | contrib/devtools/symbol-check.py | python | read_symbols | (executable, imports=True) | return syms | Parse an ELF executable and return a list of (symbol,version) tuples
for dynamic, imported symbols. | Parse an ELF executable and return a list of (symbol,version) tuples
for dynamic, imported symbols. | [
"Parse",
"an",
"ELF",
"executable",
"and",
"return",
"a",
"list",
"of",
"(",
"symbol",
"version",
")",
"tuples",
"for",
"dynamic",
"imported",
"symbols",
"."
] | def read_symbols(executable, imports=True):
'''
Parse an ELF executable and return a list of (symbol,version) tuples
for dynamic, imported symbols.
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip()))
syms = []
for line in stdout.split(b'\n'):
line = line.split()
if len(line)>7 and re.match(b'[0-9]+:$', line[0]):
(sym, _, version) = line[7].partition(b'@')
is_import = line[6] == b'UND'
if version.startswith(b'@'):
version = version[1:]
if is_import == imports:
syms.append((sym, version))
return syms | [
"def",
"read_symbols",
"(",
"executable",
",",
"imports",
"=",
"True",
")",
":",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"READELF_CMD",
",",
"'--dyn-syms'",
",",
"'-W'",
",",
"executable",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
"stdin",
"=",
"subprocess",
".",
"PIPE",
")",
"(",
"stdout",
",",
"stderr",
")",
"=",
"p",
".",
"communicate",
"(",
")",
"if",
"p",
".",
"returncode",
":",
"raise",
"IOError",
"(",
"'Could not read symbols for %s: %s'",
"%",
"(",
"executable",
",",
"stderr",
".",
"strip",
"(",
")",
")",
")",
"syms",
"=",
"[",
"]",
"for",
"line",
"in",
"stdout",
".",
"split",
"(",
"b'\\n'",
")",
":",
"line",
"=",
"line",
".",
"split",
"(",
")",
"if",
"len",
"(",
"line",
")",
">",
"7",
"and",
"re",
".",
"match",
"(",
"b'[0-9]+:$'",
",",
"line",
"[",
"0",
"]",
")",
":",
"(",
"sym",
",",
"_",
",",
"version",
")",
"=",
"line",
"[",
"7",
"]",
".",
"partition",
"(",
"b'@'",
")",
"is_import",
"=",
"line",
"[",
"6",
"]",
"==",
"b'UND'",
"if",
"version",
".",
"startswith",
"(",
"b'@'",
")",
":",
"version",
"=",
"version",
"[",
"1",
":",
"]",
"if",
"is_import",
"==",
"imports",
":",
"syms",
".",
"append",
"(",
"(",
"sym",
",",
"version",
")",
")",
"return",
"syms"
] | https://github.com/dogecoin/dogecoin/blob/31afd133119dd2e15862d46530cb99424cf564b0/contrib/devtools/symbol-check.py#L97-L116 |
|
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/debug/cli/debugger_cli_common.py | python | TabCompletionRegistry.remove_comp_items | (self, context_word, comp_items) | Remove a list of completion items from a completion context.
Args:
context_word: A single completion word as a string. The removal will
also apply to all other context words of the same context.
comp_items: Completion items to remove.
Raises:
KeyError: if the context word has not been registered. | Remove a list of completion items from a completion context. | [
"Remove",
"a",
"list",
"of",
"completion",
"items",
"from",
"a",
"completion",
"context",
"."
] | def remove_comp_items(self, context_word, comp_items):
"""Remove a list of completion items from a completion context.
Args:
context_word: A single completion word as a string. The removal will
also apply to all other context words of the same context.
comp_items: Completion items to remove.
Raises:
KeyError: if the context word has not been registered.
"""
if context_word not in self._comp_dict:
raise KeyError("Context word \"%s\" has not been registered" %
context_word)
for item in comp_items:
self._comp_dict[context_word].remove(item) | [
"def",
"remove_comp_items",
"(",
"self",
",",
"context_word",
",",
"comp_items",
")",
":",
"if",
"context_word",
"not",
"in",
"self",
".",
"_comp_dict",
":",
"raise",
"KeyError",
"(",
"\"Context word \\\"%s\\\" has not been registered\"",
"%",
"context_word",
")",
"for",
"item",
"in",
"comp_items",
":",
"self",
".",
"_comp_dict",
"[",
"context_word",
"]",
".",
"remove",
"(",
"item",
")"
] | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/debug/cli/debugger_cli_common.py#L932-L949 |
Subsets and Splits