nwo
stringlengths 5
86
| sha
stringlengths 40
40
| path
stringlengths 4
189
| language
stringclasses 1
value | identifier
stringlengths 1
94
| parameters
stringlengths 2
4.03k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
11.5k
| docstring
stringlengths 1
33.2k
| docstring_summary
stringlengths 0
5.15k
| docstring_tokens
list | function
stringlengths 34
151k
| function_tokens
list | url
stringlengths 90
278
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
QMCPACK/qmcpack
|
d0948ab455e38364458740cc8e2239600a14c5cd
|
utils/afqmctools/bin/kp_to_sparse.py
|
python
|
parse_args
|
(args)
|
return options
|
Parse command-line arguments.
Parameters
----------
args : list of strings
command-line arguments.
Returns
-------
options : :class:`argparse.ArgumentParser`
Command line arguments.
|
Parse command-line arguments.
|
[
"Parse",
"command",
"-",
"line",
"arguments",
"."
] |
def parse_args(args):
"""Parse command-line arguments.
Parameters
----------
args : list of strings
command-line arguments.
Returns
-------
options : :class:`argparse.ArgumentParser`
Command line arguments.
"""
parser = argparse.ArgumentParser(description = __doc__)
parser.add_argument('-i', '--input', dest='input_file', type=str,
default=None, help='Input kpoint factorized file.')
parser.add_argument('-o', '--output', dest='output_file',
type=str, default='sparse.h5',
help='Output file for sparse hamiltonian.')
parser.add_argument('-r', '--real-chol', dest='real_chol',
action='store_true', default=False,
help='Dump real integrals.')
parser.add_argument('-v', '--verbose', dest='verbose',
action='store_true', default=False,
help='Verbose output.')
options = parser.parse_args(args)
if not options.input_file:
parser.print_help()
sys.exit(1)
return options
|
[
"def",
"parse_args",
"(",
"args",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"__doc__",
")",
"parser",
".",
"add_argument",
"(",
"'-i'",
",",
"'--input'",
",",
"dest",
"=",
"'input_file'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"None",
",",
"help",
"=",
"'Input kpoint factorized file.'",
")",
"parser",
".",
"add_argument",
"(",
"'-o'",
",",
"'--output'",
",",
"dest",
"=",
"'output_file'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'sparse.h5'",
",",
"help",
"=",
"'Output file for sparse hamiltonian.'",
")",
"parser",
".",
"add_argument",
"(",
"'-r'",
",",
"'--real-chol'",
",",
"dest",
"=",
"'real_chol'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Dump real integrals.'",
")",
"parser",
".",
"add_argument",
"(",
"'-v'",
",",
"'--verbose'",
",",
"dest",
"=",
"'verbose'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Verbose output.'",
")",
"options",
"=",
"parser",
".",
"parse_args",
"(",
"args",
")",
"if",
"not",
"options",
".",
"input_file",
":",
"parser",
".",
"print_help",
"(",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"return",
"options"
] |
https://github.com/QMCPACK/qmcpack/blob/d0948ab455e38364458740cc8e2239600a14c5cd/utils/afqmctools/bin/kp_to_sparse.py#L11-L44
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/fastparquet/writer.py
|
python
|
encode_dict
|
(data, se)
|
return o.so_far().tostring() + data.values.tostring()
|
The data part of dictionary encoding is always int8, with RLE/bitpack
|
The data part of dictionary encoding is always int8, with RLE/bitpack
|
[
"The",
"data",
"part",
"of",
"dictionary",
"encoding",
"is",
"always",
"int8",
"with",
"RLE",
"/",
"bitpack"
] |
def encode_dict(data, se):
""" The data part of dictionary encoding is always int8, with RLE/bitpack
"""
width = data.values.dtype.itemsize * 8
o = encoding.Numpy8(np.empty(10, dtype=np.uint8))
o.write_byte(width)
bit_packed_count = (len(data) + 7) // 8
encode_unsigned_varint(bit_packed_count << 1 | 1, o) # write run header
return o.so_far().tostring() + data.values.tostring()
|
[
"def",
"encode_dict",
"(",
"data",
",",
"se",
")",
":",
"width",
"=",
"data",
".",
"values",
".",
"dtype",
".",
"itemsize",
"*",
"8",
"o",
"=",
"encoding",
".",
"Numpy8",
"(",
"np",
".",
"empty",
"(",
"10",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
")",
"o",
".",
"write_byte",
"(",
"width",
")",
"bit_packed_count",
"=",
"(",
"len",
"(",
"data",
")",
"+",
"7",
")",
"//",
"8",
"encode_unsigned_varint",
"(",
"bit_packed_count",
"<<",
"1",
"|",
"1",
",",
"o",
")",
"# write run header",
"return",
"o",
".",
"so_far",
"(",
")",
".",
"tostring",
"(",
")",
"+",
"data",
".",
"values",
".",
"tostring",
"(",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/fastparquet/writer.py#L368-L376
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
contrib/gizmos/gtk/gizmos.py
|
python
|
TreeListCtrl.GetSelections
|
(*args, **kwargs)
|
return _gizmos.TreeListCtrl_GetSelections(*args, **kwargs)
|
GetSelections(self) -> PyObject
|
GetSelections(self) -> PyObject
|
[
"GetSelections",
"(",
"self",
")",
"-",
">",
"PyObject"
] |
def GetSelections(*args, **kwargs):
"""GetSelections(self) -> PyObject"""
return _gizmos.TreeListCtrl_GetSelections(*args, **kwargs)
|
[
"def",
"GetSelections",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_gizmos",
".",
"TreeListCtrl_GetSelections",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/contrib/gizmos/gtk/gizmos.py#L754-L756
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/traitlets/py2/traitlets/config/configurable.py
|
python
|
Configurable.class_get_help
|
(cls, inst=None)
|
return '\n'.join(final_help)
|
Get the help string for this class in ReST format.
If `inst` is given, it's current trait values will be used in place of
class defaults.
|
Get the help string for this class in ReST format.
|
[
"Get",
"the",
"help",
"string",
"for",
"this",
"class",
"in",
"ReST",
"format",
"."
] |
def class_get_help(cls, inst=None):
"""Get the help string for this class in ReST format.
If `inst` is given, it's current trait values will be used in place of
class defaults.
"""
assert inst is None or isinstance(inst, cls)
final_help = []
final_help.append(u'%s options' % cls.__name__)
final_help.append(len(final_help[0])*u'-')
for k, v in sorted(cls.class_traits(config=True).items()):
help = cls.class_get_trait_help(v, inst)
final_help.append(help)
return '\n'.join(final_help)
|
[
"def",
"class_get_help",
"(",
"cls",
",",
"inst",
"=",
"None",
")",
":",
"assert",
"inst",
"is",
"None",
"or",
"isinstance",
"(",
"inst",
",",
"cls",
")",
"final_help",
"=",
"[",
"]",
"final_help",
".",
"append",
"(",
"u'%s options'",
"%",
"cls",
".",
"__name__",
")",
"final_help",
".",
"append",
"(",
"len",
"(",
"final_help",
"[",
"0",
"]",
")",
"*",
"u'-'",
")",
"for",
"k",
",",
"v",
"in",
"sorted",
"(",
"cls",
".",
"class_traits",
"(",
"config",
"=",
"True",
")",
".",
"items",
"(",
")",
")",
":",
"help",
"=",
"cls",
".",
"class_get_trait_help",
"(",
"v",
",",
"inst",
")",
"final_help",
".",
"append",
"(",
"help",
")",
"return",
"'\\n'",
".",
"join",
"(",
"final_help",
")"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/traitlets/py2/traitlets/config/configurable.py#L205-L218
|
|
ApolloAuto/apollo
|
463fb82f9e979d02dcb25044e60931293ab2dba0
|
modules/tools/common/proto_utils.py
|
python
|
write_pb_to_text_file
|
(topic_pb, file_path)
|
write pb message to file
|
write pb message to file
|
[
"write",
"pb",
"message",
"to",
"file"
] |
def write_pb_to_text_file(topic_pb, file_path):
"""write pb message to file"""
with open(file_path, 'w') as f:
f.write(str(topic_pb))
|
[
"def",
"write_pb_to_text_file",
"(",
"topic_pb",
",",
"file_path",
")",
":",
"with",
"open",
"(",
"file_path",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"str",
"(",
"topic_pb",
")",
")"
] |
https://github.com/ApolloAuto/apollo/blob/463fb82f9e979d02dcb25044e60931293ab2dba0/modules/tools/common/proto_utils.py#L22-L25
|
||
BlzFans/wke
|
b0fa21158312e40c5fbd84682d643022b6c34a93
|
cygwin/lib/python2.6/encodings/hex_codec.py
|
python
|
hex_decode
|
(input,errors='strict')
|
return (output, len(input))
|
Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
|
Decodes the object input and returns a tuple (output
object, length consumed).
|
[
"Decodes",
"the",
"object",
"input",
"and",
"returns",
"a",
"tuple",
"(",
"output",
"object",
"length",
"consumed",
")",
"."
] |
def hex_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = binascii.a2b_hex(input)
return (output, len(input))
|
[
"def",
"hex_decode",
"(",
"input",
",",
"errors",
"=",
"'strict'",
")",
":",
"assert",
"errors",
"==",
"'strict'",
"output",
"=",
"binascii",
".",
"a2b_hex",
"(",
"input",
")",
"return",
"(",
"output",
",",
"len",
"(",
"input",
")",
")"
] |
https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/encodings/hex_codec.py#L27-L43
|
|
wlanjie/AndroidFFmpeg
|
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
|
tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/calendar.py
|
python
|
HTMLCalendar.formatmonthname
|
(self, theyear, themonth, withyear=True)
|
return '<tr><th colspan="7" class="month">%s</th></tr>' % s
|
Return a month name as a table row.
|
Return a month name as a table row.
|
[
"Return",
"a",
"month",
"name",
"as",
"a",
"table",
"row",
"."
] |
def formatmonthname(self, theyear, themonth, withyear=True):
"""
Return a month name as a table row.
"""
if withyear:
s = '%s %s' % (month_name[themonth], theyear)
else:
s = '%s' % month_name[themonth]
return '<tr><th colspan="7" class="month">%s</th></tr>' % s
|
[
"def",
"formatmonthname",
"(",
"self",
",",
"theyear",
",",
"themonth",
",",
"withyear",
"=",
"True",
")",
":",
"if",
"withyear",
":",
"s",
"=",
"'%s %s'",
"%",
"(",
"month_name",
"[",
"themonth",
"]",
",",
"theyear",
")",
"else",
":",
"s",
"=",
"'%s'",
"%",
"month_name",
"[",
"themonth",
"]",
"return",
"'<tr><th colspan=\"7\" class=\"month\">%s</th></tr>'",
"%",
"s"
] |
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/calendar.py#L413-L421
|
|
happynear/caffe-windows
|
967eedf25009e334b7f6f933bb5e17aaaff5bef6
|
scripts/cpp_lint.py
|
python
|
ReplaceAll
|
(pattern, rep, s)
|
return _regexp_compile_cache[pattern].sub(rep, s)
|
Replaces instances of pattern in a string with a replacement.
The compiled regex is kept in a cache shared by Match and Search.
Args:
pattern: regex pattern
rep: replacement text
s: search string
Returns:
string with replacements made (or original string if no replacements)
|
Replaces instances of pattern in a string with a replacement.
|
[
"Replaces",
"instances",
"of",
"pattern",
"in",
"a",
"string",
"with",
"a",
"replacement",
"."
] |
def ReplaceAll(pattern, rep, s):
"""Replaces instances of pattern in a string with a replacement.
The compiled regex is kept in a cache shared by Match and Search.
Args:
pattern: regex pattern
rep: replacement text
s: search string
Returns:
string with replacements made (or original string if no replacements)
"""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(rep, s)
|
[
"def",
"ReplaceAll",
"(",
"pattern",
",",
"rep",
",",
"s",
")",
":",
"if",
"pattern",
"not",
"in",
"_regexp_compile_cache",
":",
"_regexp_compile_cache",
"[",
"pattern",
"]",
"=",
"sre_compile",
".",
"compile",
"(",
"pattern",
")",
"return",
"_regexp_compile_cache",
"[",
"pattern",
"]",
".",
"sub",
"(",
"rep",
",",
"s",
")"
] |
https://github.com/happynear/caffe-windows/blob/967eedf25009e334b7f6f933bb5e17aaaff5bef6/scripts/cpp_lint.py#L529-L544
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/gtk/_core.py
|
python
|
StdDialogButtonSizer.SetCancelButton
|
(*args, **kwargs)
|
return _core_.StdDialogButtonSizer_SetCancelButton(*args, **kwargs)
|
SetCancelButton(self, wxButton button)
|
SetCancelButton(self, wxButton button)
|
[
"SetCancelButton",
"(",
"self",
"wxButton",
"button",
")"
] |
def SetCancelButton(*args, **kwargs):
"""SetCancelButton(self, wxButton button)"""
return _core_.StdDialogButtonSizer_SetCancelButton(*args, **kwargs)
|
[
"def",
"SetCancelButton",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"StdDialogButtonSizer_SetCancelButton",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_core.py#L15516-L15518
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/s3transfer/futures.py
|
python
|
TransferCoordinator.done
|
(self)
|
return self.status in ['failed', 'cancelled', 'success']
|
Determines if a TransferFuture has completed
:returns: False if status is equal to 'failed', 'cancelled', or
'success'. True, otherwise
|
Determines if a TransferFuture has completed
|
[
"Determines",
"if",
"a",
"TransferFuture",
"has",
"completed"
] |
def done(self):
"""Determines if a TransferFuture has completed
:returns: False if status is equal to 'failed', 'cancelled', or
'success'. True, otherwise
"""
return self.status in ['failed', 'cancelled', 'success']
|
[
"def",
"done",
"(",
"self",
")",
":",
"return",
"self",
".",
"status",
"in",
"[",
"'failed'",
",",
"'cancelled'",
",",
"'success'",
"]"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/s3transfer/futures.py#L328-L334
|
|
miyosuda/TensorFlowAndroidMNIST
|
7b5a4603d2780a8a2834575706e9001977524007
|
jni-build/jni/include/tensorflow/python/client/timeline.py
|
python
|
_ChromeTraceFormatter.emit_obj_create
|
(self, category, name, timestamp, pid, tid, object_id)
|
Adds an object creation event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
|
Adds an object creation event to the trace.
|
[
"Adds",
"an",
"object",
"creation",
"event",
"to",
"the",
"trace",
"."
] |
def emit_obj_create(self, category, name, timestamp, pid, tid, object_id):
"""Adds an object creation event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
"""
event = self._create_event('N', category, name, pid, tid, timestamp)
event['id'] = object_id
self._events.append(event)
|
[
"def",
"emit_obj_create",
"(",
"self",
",",
"category",
",",
"name",
",",
"timestamp",
",",
"pid",
",",
"tid",
",",
"object_id",
")",
":",
"event",
"=",
"self",
".",
"_create_event",
"(",
"'N'",
",",
"category",
",",
"name",
",",
"pid",
",",
"tid",
",",
"timestamp",
")",
"event",
"[",
"'id'",
"]",
"=",
"object_id",
"self",
".",
"_events",
".",
"append",
"(",
"event",
")"
] |
https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/python/client/timeline.py#L138-L151
|
||
google/clif
|
cab24d6a105609a65c95a36a1712ae3c20c7b5df
|
clif/pyclif.py
|
python
|
GenerateFrom
|
(ast)
|
Traverse ast and generate output files.
|
Traverse ast and generate output files.
|
[
"Traverse",
"ast",
"and",
"generate",
"output",
"files",
"."
] |
def GenerateFrom(ast):
"""Traverse ast and generate output files."""
inc_headers = list(ast.usertype_includes)
api_header = _GetHeaders(ast)
modname = FLAGS.modname or StripExt(os.path.basename(ast.source
)).replace('-', '_')
m = pyext.Module(
modname,
ast.typemaps,
ast.namemaps,
indent=FLAGS.indent)
inc_headers.append(os.path.basename(FLAGS.header_out))
# Order of generators is important.
with open(FLAGS.ccdeps_out, 'w') as cout:
gen.WriteTo(cout, m.GenerateBase(ast, inc_headers))
with open(FLAGS.ccinit_out, 'w') as iout:
gen.WriteTo(iout, m.GenerateInit(ast.source))
with open(FLAGS.header_out, 'w') as hout:
gen.WriteTo(
hout, m.GenerateHeader(
ast.source, api_header, ast.macros,
ast.options.get('is_extended_from_python', 'False') == 'True'))
|
[
"def",
"GenerateFrom",
"(",
"ast",
")",
":",
"inc_headers",
"=",
"list",
"(",
"ast",
".",
"usertype_includes",
")",
"api_header",
"=",
"_GetHeaders",
"(",
"ast",
")",
"modname",
"=",
"FLAGS",
".",
"modname",
"or",
"StripExt",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"ast",
".",
"source",
")",
")",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
"m",
"=",
"pyext",
".",
"Module",
"(",
"modname",
",",
"ast",
".",
"typemaps",
",",
"ast",
".",
"namemaps",
",",
"indent",
"=",
"FLAGS",
".",
"indent",
")",
"inc_headers",
".",
"append",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"FLAGS",
".",
"header_out",
")",
")",
"# Order of generators is important.",
"with",
"open",
"(",
"FLAGS",
".",
"ccdeps_out",
",",
"'w'",
")",
"as",
"cout",
":",
"gen",
".",
"WriteTo",
"(",
"cout",
",",
"m",
".",
"GenerateBase",
"(",
"ast",
",",
"inc_headers",
")",
")",
"with",
"open",
"(",
"FLAGS",
".",
"ccinit_out",
",",
"'w'",
")",
"as",
"iout",
":",
"gen",
".",
"WriteTo",
"(",
"iout",
",",
"m",
".",
"GenerateInit",
"(",
"ast",
".",
"source",
")",
")",
"with",
"open",
"(",
"FLAGS",
".",
"header_out",
",",
"'w'",
")",
"as",
"hout",
":",
"gen",
".",
"WriteTo",
"(",
"hout",
",",
"m",
".",
"GenerateHeader",
"(",
"ast",
".",
"source",
",",
"api_header",
",",
"ast",
".",
"macros",
",",
"ast",
".",
"options",
".",
"get",
"(",
"'is_extended_from_python'",
",",
"'False'",
")",
"==",
"'True'",
")",
")"
] |
https://github.com/google/clif/blob/cab24d6a105609a65c95a36a1712ae3c20c7b5df/clif/pyclif.py#L98-L119
|
||
wlanjie/AndroidFFmpeg
|
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
|
tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/distutils/ccompiler.py
|
python
|
CCompiler.runtime_library_dir_option
|
(self, dir)
|
Return the compiler option to add 'dir' to the list of
directories searched for runtime libraries.
|
Return the compiler option to add 'dir' to the list of
directories searched for runtime libraries.
|
[
"Return",
"the",
"compiler",
"option",
"to",
"add",
"dir",
"to",
"the",
"list",
"of",
"directories",
"searched",
"for",
"runtime",
"libraries",
"."
] |
def runtime_library_dir_option(self, dir):
"""Return the compiler option to add 'dir' to the list of
directories searched for runtime libraries.
"""
raise NotImplementedError
|
[
"def",
"runtime_library_dir_option",
"(",
"self",
",",
"dir",
")",
":",
"raise",
"NotImplementedError"
] |
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/distutils/ccompiler.py#L714-L718
|
||
google/nucleus
|
68d3947fafba1337f294c0668a6e1c7f3f1273e3
|
nucleus/io/vcf.py
|
python
|
VcfHeaderCache.format_field_get_fn
|
(self, field_name)
|
return self._format_get_cache[field_name]
|
Returns a callable that gets the given FORMAT field based on its type.
|
Returns a callable that gets the given FORMAT field based on its type.
|
[
"Returns",
"a",
"callable",
"that",
"gets",
"the",
"given",
"FORMAT",
"field",
"based",
"on",
"its",
"type",
"."
] |
def format_field_get_fn(self, field_name):
"""Returns a callable that gets the given FORMAT field based on its type."""
return self._format_get_cache[field_name]
|
[
"def",
"format_field_get_fn",
"(",
"self",
",",
"field_name",
")",
":",
"return",
"self",
".",
"_format_get_cache",
"[",
"field_name",
"]"
] |
https://github.com/google/nucleus/blob/68d3947fafba1337f294c0668a6e1c7f3f1273e3/nucleus/io/vcf.py#L130-L132
|
|
htcondor/htcondor
|
4829724575176d1d6c936e4693dfd78a728569b0
|
bindings/python/htcondor/htchirp/htchirp.py
|
python
|
HTChirp._check_response
|
(self, response)
|
Check the response from the Chirp server for validity
:raises ChirpError: Many different subclasses of ChirpError
|
Check the response from the Chirp server for validity
|
[
"Check",
"the",
"response",
"from",
"the",
"Chirp",
"server",
"for",
"validity"
] |
def _check_response(self, response):
"""Check the response from the Chirp server for validity
:raises ChirpError: Many different subclasses of ChirpError
"""
chirp_errors = {
-1: self.NotAuthenticated("The client has not authenticated its identity."),
-2: self.NotAuthorized(
"The client is not authorized to perform that action."
),
-3: self.DoesntExist("There is no object by that name."),
-4: self.AlreadyExists("There is already an object by that name."),
-5: self.TooBig("That request is too big to execute."),
-6: self.NoSpace("There is not enough space to store that."),
-7: self.NoMemory("The server is out of memory."),
-8: self.InvalidRequest("The form of the request is invalid."),
-9: self.TooManyOpen("There are too many resources in use."),
-10: self.Busy("That object is in use by someone else."),
-11: self.TryAgain("A temporary condition prevented the request."),
-12: self.BadFD("The file descriptor requested is invalid."),
-13: self.IsDir("A file-only operation was attempted on a directory."),
-14: self.NotDir("A directory operation was attempted on a file."),
-15: self.NotEmpty(
"A directory cannot be removed because it is not empty."
),
-16: self.CrossDeviceLink("A hard link was attempted across devices."),
-17: self.Offline("The requested resource is temporarily not available."),
-127: self.UnknownError("An unknown error (-127) occured."),
}
if response in chirp_errors:
raise chirp_errors[response]
elif response < 0:
raise self.UnknownError("An unknown error ({0}) occured.".format(response))
|
[
"def",
"_check_response",
"(",
"self",
",",
"response",
")",
":",
"chirp_errors",
"=",
"{",
"-",
"1",
":",
"self",
".",
"NotAuthenticated",
"(",
"\"The client has not authenticated its identity.\"",
")",
",",
"-",
"2",
":",
"self",
".",
"NotAuthorized",
"(",
"\"The client is not authorized to perform that action.\"",
")",
",",
"-",
"3",
":",
"self",
".",
"DoesntExist",
"(",
"\"There is no object by that name.\"",
")",
",",
"-",
"4",
":",
"self",
".",
"AlreadyExists",
"(",
"\"There is already an object by that name.\"",
")",
",",
"-",
"5",
":",
"self",
".",
"TooBig",
"(",
"\"That request is too big to execute.\"",
")",
",",
"-",
"6",
":",
"self",
".",
"NoSpace",
"(",
"\"There is not enough space to store that.\"",
")",
",",
"-",
"7",
":",
"self",
".",
"NoMemory",
"(",
"\"The server is out of memory.\"",
")",
",",
"-",
"8",
":",
"self",
".",
"InvalidRequest",
"(",
"\"The form of the request is invalid.\"",
")",
",",
"-",
"9",
":",
"self",
".",
"TooManyOpen",
"(",
"\"There are too many resources in use.\"",
")",
",",
"-",
"10",
":",
"self",
".",
"Busy",
"(",
"\"That object is in use by someone else.\"",
")",
",",
"-",
"11",
":",
"self",
".",
"TryAgain",
"(",
"\"A temporary condition prevented the request.\"",
")",
",",
"-",
"12",
":",
"self",
".",
"BadFD",
"(",
"\"The file descriptor requested is invalid.\"",
")",
",",
"-",
"13",
":",
"self",
".",
"IsDir",
"(",
"\"A file-only operation was attempted on a directory.\"",
")",
",",
"-",
"14",
":",
"self",
".",
"NotDir",
"(",
"\"A directory operation was attempted on a file.\"",
")",
",",
"-",
"15",
":",
"self",
".",
"NotEmpty",
"(",
"\"A directory cannot be removed because it is not empty.\"",
")",
",",
"-",
"16",
":",
"self",
".",
"CrossDeviceLink",
"(",
"\"A hard link was attempted across devices.\"",
")",
",",
"-",
"17",
":",
"self",
".",
"Offline",
"(",
"\"The requested resource is temporarily not available.\"",
")",
",",
"-",
"127",
":",
"self",
".",
"UnknownError",
"(",
"\"An unknown error (-127) occured.\"",
")",
",",
"}",
"if",
"response",
"in",
"chirp_errors",
":",
"raise",
"chirp_errors",
"[",
"response",
"]",
"elif",
"response",
"<",
"0",
":",
"raise",
"self",
".",
"UnknownError",
"(",
"\"An unknown error ({0}) occured.\"",
".",
"format",
"(",
"response",
")",
")"
] |
https://github.com/htcondor/htcondor/blob/4829724575176d1d6c936e4693dfd78a728569b0/bindings/python/htcondor/htchirp/htchirp.py#L260-L295
|
||
windystrife/UnrealEngine_NVIDIAGameWorks
|
b50e6338a7c5b26374d66306ebc7807541ff815e
|
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/mailbox.py
|
python
|
Babyl.get_file
|
(self, key)
|
return StringIO.StringIO(self.get_string(key).replace('\n',
os.linesep))
|
Return a file-like representation or raise a KeyError.
|
Return a file-like representation or raise a KeyError.
|
[
"Return",
"a",
"file",
"-",
"like",
"representation",
"or",
"raise",
"a",
"KeyError",
"."
] |
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
return StringIO.StringIO(self.get_string(key).replace('\n',
os.linesep))
|
[
"def",
"get_file",
"(",
"self",
",",
"key",
")",
":",
"return",
"StringIO",
".",
"StringIO",
"(",
"self",
".",
"get_string",
"(",
"key",
")",
".",
"replace",
"(",
"'\\n'",
",",
"os",
".",
"linesep",
")",
")"
] |
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/mailbox.py#L1292-L1295
|
|
jiaxiang-wu/quantized-cnn
|
4d020e17026df90e40111d219e3eb74e0afb1588
|
cpplint.py
|
python
|
IsRValueType
|
(typenames, clean_lines, nesting_state, linenum, column)
|
return False
|
Check if the token ending on (linenum, column) is a type.
Assumes that text to the right of the column is "&&" or a function
name.
Args:
typenames: set of type names from template-argument-list.
clean_lines: A CleansedLines instance containing the file.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is a type, False if we are not sure.
|
Check if the token ending on (linenum, column) is a type.
|
[
"Check",
"if",
"the",
"token",
"ending",
"on",
"(",
"linenum",
"column",
")",
"is",
"a",
"type",
"."
] |
def IsRValueType(typenames, clean_lines, nesting_state, linenum, column):
"""Check if the token ending on (linenum, column) is a type.
Assumes that text to the right of the column is "&&" or a function
name.
Args:
typenames: set of type names from template-argument-list.
clean_lines: A CleansedLines instance containing the file.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is a type, False if we are not sure.
"""
prefix = clean_lines.elided[linenum][0:column]
# Get one word to the left. If we failed to do so, this is most
# likely not a type, since it's unlikely that the type name and "&&"
# would be split across multiple lines.
match = Match(r'^(.*)(\b\w+|[>*)&])\s*$', prefix)
if not match:
return False
# Check text following the token. If it's "&&>" or "&&," or "&&...", it's
# most likely a rvalue reference used inside a template.
suffix = clean_lines.elided[linenum][column:]
if Match(r'&&\s*(?:[>,]|\.\.\.)', suffix):
return True
# Check for known types and end of templates:
# int&& variable
# vector<int>&& variable
#
# Because this function is called recursively, we also need to
# recognize pointer and reference types:
# int* Function()
# int& Function()
if (match.group(2) in typenames or
match.group(2) in ['char', 'char16_t', 'char32_t', 'wchar_t', 'bool',
'short', 'int', 'long', 'signed', 'unsigned',
'float', 'double', 'void', 'auto', '>', '*', '&']):
return True
# If we see a close parenthesis, look for decltype on the other side.
# decltype would unambiguously identify a type, anything else is
# probably a parenthesized expression and not a type.
if match.group(2) == ')':
return IsDecltype(
clean_lines, linenum, len(match.group(1)) + len(match.group(2)) - 1)
# Check for casts and cv-qualifiers.
# match.group(1) remainder
# -------------- ---------
# const_cast< type&&
# const type&&
# type const&&
if Search(r'\b(?:const_cast\s*<|static_cast\s*<|dynamic_cast\s*<|'
r'reinterpret_cast\s*<|\w+\s)\s*$',
match.group(1)):
return True
# Look for a preceding symbol that might help differentiate the context.
# These are the cases that would be ambiguous:
# match.group(1) remainder
# -------------- ---------
# Call ( expression &&
# Declaration ( type&&
# sizeof ( type&&
# if ( expression &&
# while ( expression &&
# for ( type&&
# for( ; expression &&
# statement ; type&&
# block { type&&
# constructor { expression &&
start = linenum
line = match.group(1)
match_symbol = None
while start >= 0:
# We want to skip over identifiers and commas to get to a symbol.
# Commas are skipped so that we can find the opening parenthesis
# for function parameter lists.
match_symbol = Match(r'^(.*)([^\w\s,])[\w\s,]*$', line)
if match_symbol:
break
start -= 1
line = clean_lines.elided[start]
if not match_symbol:
# Probably the first statement in the file is an rvalue reference
return True
if match_symbol.group(2) == '}':
# Found closing brace, probably an indicate of this:
# block{} type&&
return True
if match_symbol.group(2) == ';':
# Found semicolon, probably one of these:
# for(; expression &&
# statement; type&&
# Look for the previous 'for(' in the previous lines.
before_text = match_symbol.group(1)
for i in xrange(start - 1, max(start - 6, 0), -1):
before_text = clean_lines.elided[i] + before_text
if Search(r'for\s*\([^{};]*$', before_text):
# This is the condition inside a for-loop
return False
# Did not find a for-init-statement before this semicolon, so this
# is probably a new statement and not a condition.
return True
if match_symbol.group(2) == '{':
# Found opening brace, probably one of these:
# block{ type&& = ... ; }
# constructor{ expression && expression }
# Look for a closing brace or a semicolon. If we see a semicolon
# first, this is probably a rvalue reference.
line = clean_lines.elided[start][0:len(match_symbol.group(1)) + 1]
end = start
depth = 1
while True:
for ch in line:
if ch == ';':
return True
elif ch == '{':
depth += 1
elif ch == '}':
depth -= 1
if depth == 0:
return False
end += 1
if end >= clean_lines.NumLines():
break
line = clean_lines.elided[end]
# Incomplete program?
return False
if match_symbol.group(2) == '(':
# Opening parenthesis. Need to check what's to the left of the
# parenthesis. Look back one extra line for additional context.
before_text = match_symbol.group(1)
if linenum > 1:
before_text = clean_lines.elided[linenum - 1] + before_text
before_text = match_symbol.group(1)
# Patterns that are likely to be types:
# [](type&&
# for (type&&
# sizeof(type&&
# operator=(type&&
#
if Search(r'(?:\]|\bfor|\bsizeof|\boperator\s*\S+\s*)\s*$', before_text):
return True
# Patterns that are likely to be expressions:
# if (expression &&
# while (expression &&
# : initializer(expression &&
# , initializer(expression &&
# ( FunctionCall(expression &&
# + FunctionCall(expression &&
# + (expression &&
#
# The last '+' represents operators such as '+' and '-'.
if Search(r'(?:\bif|\bwhile|[-+=%^(<!?:,&*]\s*)$', before_text):
return False
# Something else. Check that tokens to the left look like
# return_type function_name
match_func = Match(r'^(.*\S.*)\s+\w(?:\w|::)*(?:<[^<>]*>)?\s*$',
match_symbol.group(1))
if match_func:
# Check for constructors, which don't have return types.
if Search(r'\b(?:explicit|inline)$', match_func.group(1)):
return True
implicit_constructor = Match(r'\s*(\w+)\((?:const\s+)?(\w+)', prefix)
if (implicit_constructor and
implicit_constructor.group(1) == implicit_constructor.group(2)):
return True
return IsRValueType(typenames, clean_lines, nesting_state, linenum,
len(match_func.group(1)))
# Nothing before the function name. If this is inside a block scope,
# this is probably a function call.
return not (nesting_state.previous_stack_top and
nesting_state.previous_stack_top.IsBlockInfo())
if match_symbol.group(2) == '>':
# Possibly a closing bracket, check that what's on the other side
# looks like the start of a template.
return IsTemplateParameterList(
clean_lines, start, len(match_symbol.group(1)))
# Some other symbol, usually something like "a=b&&c". This is most
# likely not a type.
return False
|
[
"def",
"IsRValueType",
"(",
"typenames",
",",
"clean_lines",
",",
"nesting_state",
",",
"linenum",
",",
"column",
")",
":",
"prefix",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"[",
"0",
":",
"column",
"]",
"# Get one word to the left. If we failed to do so, this is most",
"# likely not a type, since it's unlikely that the type name and \"&&\"",
"# would be split across multiple lines.",
"match",
"=",
"Match",
"(",
"r'^(.*)(\\b\\w+|[>*)&])\\s*$'",
",",
"prefix",
")",
"if",
"not",
"match",
":",
"return",
"False",
"# Check text following the token. If it's \"&&>\" or \"&&,\" or \"&&...\", it's",
"# most likely a rvalue reference used inside a template.",
"suffix",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"[",
"column",
":",
"]",
"if",
"Match",
"(",
"r'&&\\s*(?:[>,]|\\.\\.\\.)'",
",",
"suffix",
")",
":",
"return",
"True",
"# Check for known types and end of templates:",
"# int&& variable",
"# vector<int>&& variable",
"#",
"# Because this function is called recursively, we also need to",
"# recognize pointer and reference types:",
"# int* Function()",
"# int& Function()",
"if",
"(",
"match",
".",
"group",
"(",
"2",
")",
"in",
"typenames",
"or",
"match",
".",
"group",
"(",
"2",
")",
"in",
"[",
"'char'",
",",
"'char16_t'",
",",
"'char32_t'",
",",
"'wchar_t'",
",",
"'bool'",
",",
"'short'",
",",
"'int'",
",",
"'long'",
",",
"'signed'",
",",
"'unsigned'",
",",
"'float'",
",",
"'double'",
",",
"'void'",
",",
"'auto'",
",",
"'>'",
",",
"'*'",
",",
"'&'",
"]",
")",
":",
"return",
"True",
"# If we see a close parenthesis, look for decltype on the other side.",
"# decltype would unambiguously identify a type, anything else is",
"# probably a parenthesized expression and not a type.",
"if",
"match",
".",
"group",
"(",
"2",
")",
"==",
"')'",
":",
"return",
"IsDecltype",
"(",
"clean_lines",
",",
"linenum",
",",
"len",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"+",
"len",
"(",
"match",
".",
"group",
"(",
"2",
")",
")",
"-",
"1",
")",
"# Check for casts and cv-qualifiers.",
"# match.group(1) remainder",
"# -------------- ---------",
"# const_cast< type&&",
"# const type&&",
"# type const&&",
"if",
"Search",
"(",
"r'\\b(?:const_cast\\s*<|static_cast\\s*<|dynamic_cast\\s*<|'",
"r'reinterpret_cast\\s*<|\\w+\\s)\\s*$'",
",",
"match",
".",
"group",
"(",
"1",
")",
")",
":",
"return",
"True",
"# Look for a preceding symbol that might help differentiate the context.",
"# These are the cases that would be ambiguous:",
"# match.group(1) remainder",
"# -------------- ---------",
"# Call ( expression &&",
"# Declaration ( type&&",
"# sizeof ( type&&",
"# if ( expression &&",
"# while ( expression &&",
"# for ( type&&",
"# for( ; expression &&",
"# statement ; type&&",
"# block { type&&",
"# constructor { expression &&",
"start",
"=",
"linenum",
"line",
"=",
"match",
".",
"group",
"(",
"1",
")",
"match_symbol",
"=",
"None",
"while",
"start",
">=",
"0",
":",
"# We want to skip over identifiers and commas to get to a symbol.",
"# Commas are skipped so that we can find the opening parenthesis",
"# for function parameter lists.",
"match_symbol",
"=",
"Match",
"(",
"r'^(.*)([^\\w\\s,])[\\w\\s,]*$'",
",",
"line",
")",
"if",
"match_symbol",
":",
"break",
"start",
"-=",
"1",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"start",
"]",
"if",
"not",
"match_symbol",
":",
"# Probably the first statement in the file is an rvalue reference",
"return",
"True",
"if",
"match_symbol",
".",
"group",
"(",
"2",
")",
"==",
"'}'",
":",
"# Found closing brace, probably an indicate of this:",
"# block{} type&&",
"return",
"True",
"if",
"match_symbol",
".",
"group",
"(",
"2",
")",
"==",
"';'",
":",
"# Found semicolon, probably one of these:",
"# for(; expression &&",
"# statement; type&&",
"# Look for the previous 'for(' in the previous lines.",
"before_text",
"=",
"match_symbol",
".",
"group",
"(",
"1",
")",
"for",
"i",
"in",
"xrange",
"(",
"start",
"-",
"1",
",",
"max",
"(",
"start",
"-",
"6",
",",
"0",
")",
",",
"-",
"1",
")",
":",
"before_text",
"=",
"clean_lines",
".",
"elided",
"[",
"i",
"]",
"+",
"before_text",
"if",
"Search",
"(",
"r'for\\s*\\([^{};]*$'",
",",
"before_text",
")",
":",
"# This is the condition inside a for-loop",
"return",
"False",
"# Did not find a for-init-statement before this semicolon, so this",
"# is probably a new statement and not a condition.",
"return",
"True",
"if",
"match_symbol",
".",
"group",
"(",
"2",
")",
"==",
"'{'",
":",
"# Found opening brace, probably one of these:",
"# block{ type&& = ... ; }",
"# constructor{ expression && expression }",
"# Look for a closing brace or a semicolon. If we see a semicolon",
"# first, this is probably a rvalue reference.",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"start",
"]",
"[",
"0",
":",
"len",
"(",
"match_symbol",
".",
"group",
"(",
"1",
")",
")",
"+",
"1",
"]",
"end",
"=",
"start",
"depth",
"=",
"1",
"while",
"True",
":",
"for",
"ch",
"in",
"line",
":",
"if",
"ch",
"==",
"';'",
":",
"return",
"True",
"elif",
"ch",
"==",
"'{'",
":",
"depth",
"+=",
"1",
"elif",
"ch",
"==",
"'}'",
":",
"depth",
"-=",
"1",
"if",
"depth",
"==",
"0",
":",
"return",
"False",
"end",
"+=",
"1",
"if",
"end",
">=",
"clean_lines",
".",
"NumLines",
"(",
")",
":",
"break",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"end",
"]",
"# Incomplete program?",
"return",
"False",
"if",
"match_symbol",
".",
"group",
"(",
"2",
")",
"==",
"'('",
":",
"# Opening parenthesis. Need to check what's to the left of the",
"# parenthesis. Look back one extra line for additional context.",
"before_text",
"=",
"match_symbol",
".",
"group",
"(",
"1",
")",
"if",
"linenum",
">",
"1",
":",
"before_text",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"-",
"1",
"]",
"+",
"before_text",
"before_text",
"=",
"match_symbol",
".",
"group",
"(",
"1",
")",
"# Patterns that are likely to be types:",
"# [](type&&",
"# for (type&&",
"# sizeof(type&&",
"# operator=(type&&",
"#",
"if",
"Search",
"(",
"r'(?:\\]|\\bfor|\\bsizeof|\\boperator\\s*\\S+\\s*)\\s*$'",
",",
"before_text",
")",
":",
"return",
"True",
"# Patterns that are likely to be expressions:",
"# if (expression &&",
"# while (expression &&",
"# : initializer(expression &&",
"# , initializer(expression &&",
"# ( FunctionCall(expression &&",
"# + FunctionCall(expression &&",
"# + (expression &&",
"#",
"# The last '+' represents operators such as '+' and '-'.",
"if",
"Search",
"(",
"r'(?:\\bif|\\bwhile|[-+=%^(<!?:,&*]\\s*)$'",
",",
"before_text",
")",
":",
"return",
"False",
"# Something else. Check that tokens to the left look like",
"# return_type function_name",
"match_func",
"=",
"Match",
"(",
"r'^(.*\\S.*)\\s+\\w(?:\\w|::)*(?:<[^<>]*>)?\\s*$'",
",",
"match_symbol",
".",
"group",
"(",
"1",
")",
")",
"if",
"match_func",
":",
"# Check for constructors, which don't have return types.",
"if",
"Search",
"(",
"r'\\b(?:explicit|inline)$'",
",",
"match_func",
".",
"group",
"(",
"1",
")",
")",
":",
"return",
"True",
"implicit_constructor",
"=",
"Match",
"(",
"r'\\s*(\\w+)\\((?:const\\s+)?(\\w+)'",
",",
"prefix",
")",
"if",
"(",
"implicit_constructor",
"and",
"implicit_constructor",
".",
"group",
"(",
"1",
")",
"==",
"implicit_constructor",
".",
"group",
"(",
"2",
")",
")",
":",
"return",
"True",
"return",
"IsRValueType",
"(",
"typenames",
",",
"clean_lines",
",",
"nesting_state",
",",
"linenum",
",",
"len",
"(",
"match_func",
".",
"group",
"(",
"1",
")",
")",
")",
"# Nothing before the function name. If this is inside a block scope,",
"# this is probably a function call.",
"return",
"not",
"(",
"nesting_state",
".",
"previous_stack_top",
"and",
"nesting_state",
".",
"previous_stack_top",
".",
"IsBlockInfo",
"(",
")",
")",
"if",
"match_symbol",
".",
"group",
"(",
"2",
")",
"==",
"'>'",
":",
"# Possibly a closing bracket, check that what's on the other side",
"# looks like the start of a template.",
"return",
"IsTemplateParameterList",
"(",
"clean_lines",
",",
"start",
",",
"len",
"(",
"match_symbol",
".",
"group",
"(",
"1",
")",
")",
")",
"# Some other symbol, usually something like \"a=b&&c\". This is most",
"# likely not a type.",
"return",
"False"
] |
https://github.com/jiaxiang-wu/quantized-cnn/blob/4d020e17026df90e40111d219e3eb74e0afb1588/cpplint.py#L3431-L3632
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/multiprocessing/context.py
|
python
|
BaseContext.SimpleQueue
|
(self)
|
return SimpleQueue(ctx=self.get_context())
|
Returns a queue object
|
Returns a queue object
|
[
"Returns",
"a",
"queue",
"object"
] |
def SimpleQueue(self):
'''Returns a queue object'''
from .queues import SimpleQueue
return SimpleQueue(ctx=self.get_context())
|
[
"def",
"SimpleQueue",
"(",
"self",
")",
":",
"from",
".",
"queues",
"import",
"SimpleQueue",
"return",
"SimpleQueue",
"(",
"ctx",
"=",
"self",
".",
"get_context",
"(",
")",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/multiprocessing/context.py#L109-L112
|
|
gimli-org/gimli
|
17aa2160de9b15ababd9ef99e89b1bc3277bbb23
|
pygimli/solver/solverFiniteVolume.py
|
python
|
cellToFaceArithmetic
|
(boundary, AMM)
|
TODO Documentme.
|
TODO Documentme.
|
[
"TODO",
"Documentme",
"."
] |
def cellToFaceArithmetic(boundary, AMM):
"""TODO Documentme."""
leftCell = boundary.leftCell()
rightCell = boundary.rightCell()
df1 = 0.
df2 = 0.
harmonic = False
if leftCell:
df1 = boundary.center().distance(leftCell.center())
if rightCell:
df2 = boundary.center().distance(rightCell.center())
d12 = (df1 + df2)
if leftCell and rightCell:
if harmonic:
pass
# harmonic mean
# uFace = (u1 * u2) / ((u2-u1)*df2/d12 + u1)
else:
# arithmetic mean
# check left vs. right
AMM.addVal(boundary.id(), leftCell.id(), df2 / d12)
AMM.addVal(boundary.id(), rightCell.id(), -df2 / d12 + 1.0)
# uFace = (u1 - u2) * df2/d12 + u2
elif leftCell:
AMM.addVal(boundary.id(), leftCell.id(), 1.0)
elif rightCell:
AMM.addVal(boundary.id(), rightCell.id(), 1.0)
|
[
"def",
"cellToFaceArithmetic",
"(",
"boundary",
",",
"AMM",
")",
":",
"leftCell",
"=",
"boundary",
".",
"leftCell",
"(",
")",
"rightCell",
"=",
"boundary",
".",
"rightCell",
"(",
")",
"df1",
"=",
"0.",
"df2",
"=",
"0.",
"harmonic",
"=",
"False",
"if",
"leftCell",
":",
"df1",
"=",
"boundary",
".",
"center",
"(",
")",
".",
"distance",
"(",
"leftCell",
".",
"center",
"(",
")",
")",
"if",
"rightCell",
":",
"df2",
"=",
"boundary",
".",
"center",
"(",
")",
".",
"distance",
"(",
"rightCell",
".",
"center",
"(",
")",
")",
"d12",
"=",
"(",
"df1",
"+",
"df2",
")",
"if",
"leftCell",
"and",
"rightCell",
":",
"if",
"harmonic",
":",
"pass",
"# harmonic mean",
"# uFace = (u1 * u2) / ((u2-u1)*df2/d12 + u1)",
"else",
":",
"# arithmetic mean",
"# check left vs. right",
"AMM",
".",
"addVal",
"(",
"boundary",
".",
"id",
"(",
")",
",",
"leftCell",
".",
"id",
"(",
")",
",",
"df2",
"/",
"d12",
")",
"AMM",
".",
"addVal",
"(",
"boundary",
".",
"id",
"(",
")",
",",
"rightCell",
".",
"id",
"(",
")",
",",
"-",
"df2",
"/",
"d12",
"+",
"1.0",
")",
"# uFace = (u1 - u2) * df2/d12 + u2",
"elif",
"leftCell",
":",
"AMM",
".",
"addVal",
"(",
"boundary",
".",
"id",
"(",
")",
",",
"leftCell",
".",
"id",
"(",
")",
",",
"1.0",
")",
"elif",
"rightCell",
":",
"AMM",
".",
"addVal",
"(",
"boundary",
".",
"id",
"(",
")",
",",
"rightCell",
".",
"id",
"(",
")",
",",
"1.0",
")"
] |
https://github.com/gimli-org/gimli/blob/17aa2160de9b15ababd9ef99e89b1bc3277bbb23/pygimli/solver/solverFiniteVolume.py#L121-L149
|
||
soui3/soui
|
c588024b2f4f6d3fadb53c1bfed5ccf00d0b7046
|
third-part/jsoncpp/makerelease.py
|
python
|
fix_sources_eol
|
(dist_dir)
|
Set file EOL for tarball distribution.
|
Set file EOL for tarball distribution.
|
[
"Set",
"file",
"EOL",
"for",
"tarball",
"distribution",
"."
] |
def fix_sources_eol(dist_dir):
"""Set file EOL for tarball distribution.
"""
print('Preparing exported source file EOL for distribution...')
prune_dirs = antglob.prune_dirs + 'scons-local* ./build* ./libs ./dist'
win_sources = antglob.glob(dist_dir,
includes = '**/*.sln **/*.vcproj',
prune_dirs = prune_dirs)
unix_sources = antglob.glob(dist_dir,
includes = '''**/*.h **/*.cpp **/*.inl **/*.txt **/*.dox **/*.py **/*.html **/*.in
sconscript *.json *.expected AUTHORS LICENSE''',
excludes = antglob.default_excludes + 'scons.py sconsign.py scons-*',
prune_dirs = prune_dirs)
for path in win_sources:
fixeol.fix_source_eol(path, is_dry_run = False, verbose = True, eol = '\r\n')
for path in unix_sources:
fixeol.fix_source_eol(path, is_dry_run = False, verbose = True, eol = '\n')
|
[
"def",
"fix_sources_eol",
"(",
"dist_dir",
")",
":",
"print",
"(",
"'Preparing exported source file EOL for distribution...'",
")",
"prune_dirs",
"=",
"antglob",
".",
"prune_dirs",
"+",
"'scons-local* ./build* ./libs ./dist'",
"win_sources",
"=",
"antglob",
".",
"glob",
"(",
"dist_dir",
",",
"includes",
"=",
"'**/*.sln **/*.vcproj'",
",",
"prune_dirs",
"=",
"prune_dirs",
")",
"unix_sources",
"=",
"antglob",
".",
"glob",
"(",
"dist_dir",
",",
"includes",
"=",
"'''**/*.h **/*.cpp **/*.inl **/*.txt **/*.dox **/*.py **/*.html **/*.in\n sconscript *.json *.expected AUTHORS LICENSE'''",
",",
"excludes",
"=",
"antglob",
".",
"default_excludes",
"+",
"'scons.py sconsign.py scons-*'",
",",
"prune_dirs",
"=",
"prune_dirs",
")",
"for",
"path",
"in",
"win_sources",
":",
"fixeol",
".",
"fix_source_eol",
"(",
"path",
",",
"is_dry_run",
"=",
"False",
",",
"verbose",
"=",
"True",
",",
"eol",
"=",
"'\\r\\n'",
")",
"for",
"path",
"in",
"unix_sources",
":",
"fixeol",
".",
"fix_source_eol",
"(",
"path",
",",
"is_dry_run",
"=",
"False",
",",
"verbose",
"=",
"True",
",",
"eol",
"=",
"'\\n'",
")"
] |
https://github.com/soui3/soui/blob/c588024b2f4f6d3fadb53c1bfed5ccf00d0b7046/third-part/jsoncpp/makerelease.py#L124-L140
|
||
mindspore-ai/mindspore
|
fb8fd3338605bb34fa5cea054e535a8b1d753fab
|
mindspore/python/mindspore/dataset/audio/validators.py
|
python
|
check_fade
|
(method)
|
return new_method
|
Wrapper method to check the parameters of Fade.
|
Wrapper method to check the parameters of Fade.
|
[
"Wrapper",
"method",
"to",
"check",
"the",
"parameters",
"of",
"Fade",
"."
] |
def check_fade(method):
"""Wrapper method to check the parameters of Fade."""
@wraps(method)
def new_method(self, *args, **kwargs):
[fade_in_len, fade_out_len, fade_shape], _ = parse_user_args(method, *args, **kwargs)
type_check(fade_in_len, (int,), "fade_in_len")
check_non_negative_int32(fade_in_len, "fade_in_len")
type_check(fade_out_len, (int,), "fade_out_len")
check_non_negative_int32(fade_out_len, "fade_out_len")
type_check(fade_shape, (FadeShape,), "fade_shape")
return method(self, *args, **kwargs)
return new_method
|
[
"def",
"check_fade",
"(",
"method",
")",
":",
"@",
"wraps",
"(",
"method",
")",
"def",
"new_method",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"[",
"fade_in_len",
",",
"fade_out_len",
",",
"fade_shape",
"]",
",",
"_",
"=",
"parse_user_args",
"(",
"method",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"type_check",
"(",
"fade_in_len",
",",
"(",
"int",
",",
")",
",",
"\"fade_in_len\"",
")",
"check_non_negative_int32",
"(",
"fade_in_len",
",",
"\"fade_in_len\"",
")",
"type_check",
"(",
"fade_out_len",
",",
"(",
"int",
",",
")",
",",
"\"fade_out_len\"",
")",
"check_non_negative_int32",
"(",
"fade_out_len",
",",
"\"fade_out_len\"",
")",
"type_check",
"(",
"fade_shape",
",",
"(",
"FadeShape",
",",
")",
",",
"\"fade_shape\"",
")",
"return",
"method",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"new_method"
] |
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/dataset/audio/validators.py#L534-L547
|
|
eclipse/omr
|
056e7c9ce9d503649190bc5bd9931fac30b4e4bc
|
jitbuilder/apigen/genutils.py
|
python
|
APIClass.base
|
(self)
|
return self.api.get_class_by_name(self.api.base_of(self.name())) if self.has_parent() else self
|
Returns the base class of the current class. If the class does not
extend another class, the current class is returned.
|
Returns the base class of the current class. If the class does not
extend another class, the current class is returned.
|
[
"Returns",
"the",
"base",
"class",
"of",
"the",
"current",
"class",
".",
"If",
"the",
"class",
"does",
"not",
"extend",
"another",
"class",
"the",
"current",
"class",
"is",
"returned",
"."
] |
def base(self):
"""
Returns the base class of the current class. If the class does not
extend another class, the current class is returned.
"""
return self.api.get_class_by_name(self.api.base_of(self.name())) if self.has_parent() else self
|
[
"def",
"base",
"(",
"self",
")",
":",
"return",
"self",
".",
"api",
".",
"get_class_by_name",
"(",
"self",
".",
"api",
".",
"base_of",
"(",
"self",
".",
"name",
"(",
")",
")",
")",
"if",
"self",
".",
"has_parent",
"(",
")",
"else",
"self"
] |
https://github.com/eclipse/omr/blob/056e7c9ce9d503649190bc5bd9931fac30b4e4bc/jitbuilder/apigen/genutils.py#L339-L344
|
|
apple/turicreate
|
cce55aa5311300e3ce6af93cb45ba791fd1bdf49
|
deps/src/libxml2-2.9.1/python/libxml2class.py
|
python
|
xmlTextReader.Close
|
(self)
|
return ret
|
This method releases any resources allocated by the current
instance changes the state to Closed and close any
underlying input.
|
This method releases any resources allocated by the current
instance changes the state to Closed and close any
underlying input.
|
[
"This",
"method",
"releases",
"any",
"resources",
"allocated",
"by",
"the",
"current",
"instance",
"changes",
"the",
"state",
"to",
"Closed",
"and",
"close",
"any",
"underlying",
"input",
"."
] |
def Close(self):
"""This method releases any resources allocated by the current
instance changes the state to Closed and close any
underlying input. """
ret = libxml2mod.xmlTextReaderClose(self._o)
return ret
|
[
"def",
"Close",
"(",
"self",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlTextReaderClose",
"(",
"self",
".",
"_o",
")",
"return",
"ret"
] |
https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/deps/src/libxml2-2.9.1/python/libxml2class.py#L5762-L5767
|
|
ideawu/ssdb-rocks
|
a3cbb322cafb2f493252829c608e2239df98c9ac
|
deps/cpy/antlr3/tokens.py
|
python
|
Token.setLine
|
(self, line)
|
@brief Set the line number on which this token was matched
Using setter/getter methods is deprecated. Use o.line instead.
|
@brief Set the line number on which this token was matched
|
[
"@brief",
"Set",
"the",
"line",
"number",
"on",
"which",
"this",
"token",
"was",
"matched"
] |
def setLine(self, line):
"""@brief Set the line number on which this token was matched
Using setter/getter methods is deprecated. Use o.line instead."""
raise NotImplementedError
|
[
"def",
"setLine",
"(",
"self",
",",
"line",
")",
":",
"raise",
"NotImplementedError"
] |
https://github.com/ideawu/ssdb-rocks/blob/a3cbb322cafb2f493252829c608e2239df98c9ac/deps/cpy/antlr3/tokens.py#L83-L88
|
||
wlanjie/AndroidFFmpeg
|
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
|
tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/platform.py
|
python
|
uname
|
()
|
return _uname_cache
|
Fairly portable uname interface. Returns a tuple
of strings (system,node,release,version,machine,processor)
identifying the underlying platform.
Note that unlike the os.uname function this also returns
possible processor information as an additional tuple entry.
Entries which cannot be determined are set to ''.
|
Fairly portable uname interface. Returns a tuple
of strings (system,node,release,version,machine,processor)
identifying the underlying platform.
|
[
"Fairly",
"portable",
"uname",
"interface",
".",
"Returns",
"a",
"tuple",
"of",
"strings",
"(",
"system",
"node",
"release",
"version",
"machine",
"processor",
")",
"identifying",
"the",
"underlying",
"platform",
"."
] |
def uname():
""" Fairly portable uname interface. Returns a tuple
of strings (system,node,release,version,machine,processor)
identifying the underlying platform.
Note that unlike the os.uname function this also returns
possible processor information as an additional tuple entry.
Entries which cannot be determined are set to ''.
"""
global _uname_cache
no_os_uname = 0
if _uname_cache is not None:
return _uname_cache
processor = ''
# Get some infos from the builtin os.uname API...
try:
system,node,release,version,machine = os.uname()
except AttributeError:
no_os_uname = 1
if no_os_uname or not filter(None, (system, node, release, version, machine)):
# Hmm, no there is either no uname or uname has returned
#'unknowns'... we'll have to poke around the system then.
if no_os_uname:
system = sys.platform
release = ''
version = ''
node = _node()
machine = ''
use_syscmd_ver = 1
# Try win32_ver() on win32 platforms
if system == 'win32':
release,version,csd,ptype = win32_ver()
if release and version:
use_syscmd_ver = 0
# Try to use the PROCESSOR_* environment variables
# available on Win XP and later; see
# http://support.microsoft.com/kb/888731 and
# http://www.geocities.com/rick_lively/MANUALS/ENV/MSWIN/PROCESSI.HTM
if not machine:
# WOW64 processes mask the native architecture
if "PROCESSOR_ARCHITEW6432" in os.environ:
machine = os.environ.get("PROCESSOR_ARCHITEW6432", '')
else:
machine = os.environ.get('PROCESSOR_ARCHITECTURE', '')
if not processor:
processor = os.environ.get('PROCESSOR_IDENTIFIER', machine)
# Try the 'ver' system command available on some
# platforms
if use_syscmd_ver:
system,release,version = _syscmd_ver(system)
# Normalize system to what win32_ver() normally returns
# (_syscmd_ver() tends to return the vendor name as well)
if system == 'Microsoft Windows':
system = 'Windows'
elif system == 'Microsoft' and release == 'Windows':
# Under Windows Vista and Windows Server 2008,
# Microsoft changed the output of the ver command. The
# release is no longer printed. This causes the
# system and release to be misidentified.
system = 'Windows'
if '6.0' == version[:3]:
release = 'Vista'
else:
release = ''
# In case we still don't know anything useful, we'll try to
# help ourselves
if system in ('win32','win16'):
if not version:
if system == 'win32':
version = '32bit'
else:
version = '16bit'
system = 'Windows'
elif system[:4] == 'java':
release,vendor,vminfo,osinfo = java_ver()
system = 'Java'
version = string.join(vminfo,', ')
if not version:
version = vendor
# System specific extensions
if system == 'OpenVMS':
# OpenVMS seems to have release and version mixed up
if not release or release == '0':
release = version
version = ''
# Get processor information
try:
import vms_lib
except ImportError:
pass
else:
csid, cpu_number = vms_lib.getsyi('SYI$_CPU',0)
if (cpu_number >= 128):
processor = 'Alpha'
else:
processor = 'VAX'
if not processor:
# Get processor information from the uname system command
processor = _syscmd_uname('-p','')
#If any unknowns still exist, replace them with ''s, which are more portable
if system == 'unknown':
system = ''
if node == 'unknown':
node = ''
if release == 'unknown':
release = ''
if version == 'unknown':
version = ''
if machine == 'unknown':
machine = ''
if processor == 'unknown':
processor = ''
# normalize name
if system == 'Microsoft' and release == 'Windows':
system = 'Windows'
release = 'Vista'
_uname_cache = system,node,release,version,machine,processor
return _uname_cache
|
[
"def",
"uname",
"(",
")",
":",
"global",
"_uname_cache",
"no_os_uname",
"=",
"0",
"if",
"_uname_cache",
"is",
"not",
"None",
":",
"return",
"_uname_cache",
"processor",
"=",
"''",
"# Get some infos from the builtin os.uname API...",
"try",
":",
"system",
",",
"node",
",",
"release",
",",
"version",
",",
"machine",
"=",
"os",
".",
"uname",
"(",
")",
"except",
"AttributeError",
":",
"no_os_uname",
"=",
"1",
"if",
"no_os_uname",
"or",
"not",
"filter",
"(",
"None",
",",
"(",
"system",
",",
"node",
",",
"release",
",",
"version",
",",
"machine",
")",
")",
":",
"# Hmm, no there is either no uname or uname has returned",
"#'unknowns'... we'll have to poke around the system then.",
"if",
"no_os_uname",
":",
"system",
"=",
"sys",
".",
"platform",
"release",
"=",
"''",
"version",
"=",
"''",
"node",
"=",
"_node",
"(",
")",
"machine",
"=",
"''",
"use_syscmd_ver",
"=",
"1",
"# Try win32_ver() on win32 platforms",
"if",
"system",
"==",
"'win32'",
":",
"release",
",",
"version",
",",
"csd",
",",
"ptype",
"=",
"win32_ver",
"(",
")",
"if",
"release",
"and",
"version",
":",
"use_syscmd_ver",
"=",
"0",
"# Try to use the PROCESSOR_* environment variables",
"# available on Win XP and later; see",
"# http://support.microsoft.com/kb/888731 and",
"# http://www.geocities.com/rick_lively/MANUALS/ENV/MSWIN/PROCESSI.HTM",
"if",
"not",
"machine",
":",
"# WOW64 processes mask the native architecture",
"if",
"\"PROCESSOR_ARCHITEW6432\"",
"in",
"os",
".",
"environ",
":",
"machine",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"PROCESSOR_ARCHITEW6432\"",
",",
"''",
")",
"else",
":",
"machine",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'PROCESSOR_ARCHITECTURE'",
",",
"''",
")",
"if",
"not",
"processor",
":",
"processor",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'PROCESSOR_IDENTIFIER'",
",",
"machine",
")",
"# Try the 'ver' system command available on some",
"# platforms",
"if",
"use_syscmd_ver",
":",
"system",
",",
"release",
",",
"version",
"=",
"_syscmd_ver",
"(",
"system",
")",
"# Normalize system to what win32_ver() normally returns",
"# (_syscmd_ver() tends to return the vendor name as well)",
"if",
"system",
"==",
"'Microsoft Windows'",
":",
"system",
"=",
"'Windows'",
"elif",
"system",
"==",
"'Microsoft'",
"and",
"release",
"==",
"'Windows'",
":",
"# Under Windows Vista and Windows Server 2008,",
"# Microsoft changed the output of the ver command. The",
"# release is no longer printed. This causes the",
"# system and release to be misidentified.",
"system",
"=",
"'Windows'",
"if",
"'6.0'",
"==",
"version",
"[",
":",
"3",
"]",
":",
"release",
"=",
"'Vista'",
"else",
":",
"release",
"=",
"''",
"# In case we still don't know anything useful, we'll try to",
"# help ourselves",
"if",
"system",
"in",
"(",
"'win32'",
",",
"'win16'",
")",
":",
"if",
"not",
"version",
":",
"if",
"system",
"==",
"'win32'",
":",
"version",
"=",
"'32bit'",
"else",
":",
"version",
"=",
"'16bit'",
"system",
"=",
"'Windows'",
"elif",
"system",
"[",
":",
"4",
"]",
"==",
"'java'",
":",
"release",
",",
"vendor",
",",
"vminfo",
",",
"osinfo",
"=",
"java_ver",
"(",
")",
"system",
"=",
"'Java'",
"version",
"=",
"string",
".",
"join",
"(",
"vminfo",
",",
"', '",
")",
"if",
"not",
"version",
":",
"version",
"=",
"vendor",
"# System specific extensions",
"if",
"system",
"==",
"'OpenVMS'",
":",
"# OpenVMS seems to have release and version mixed up",
"if",
"not",
"release",
"or",
"release",
"==",
"'0'",
":",
"release",
"=",
"version",
"version",
"=",
"''",
"# Get processor information",
"try",
":",
"import",
"vms_lib",
"except",
"ImportError",
":",
"pass",
"else",
":",
"csid",
",",
"cpu_number",
"=",
"vms_lib",
".",
"getsyi",
"(",
"'SYI$_CPU'",
",",
"0",
")",
"if",
"(",
"cpu_number",
">=",
"128",
")",
":",
"processor",
"=",
"'Alpha'",
"else",
":",
"processor",
"=",
"'VAX'",
"if",
"not",
"processor",
":",
"# Get processor information from the uname system command",
"processor",
"=",
"_syscmd_uname",
"(",
"'-p'",
",",
"''",
")",
"#If any unknowns still exist, replace them with ''s, which are more portable",
"if",
"system",
"==",
"'unknown'",
":",
"system",
"=",
"''",
"if",
"node",
"==",
"'unknown'",
":",
"node",
"=",
"''",
"if",
"release",
"==",
"'unknown'",
":",
"release",
"=",
"''",
"if",
"version",
"==",
"'unknown'",
":",
"version",
"=",
"''",
"if",
"machine",
"==",
"'unknown'",
":",
"machine",
"=",
"''",
"if",
"processor",
"==",
"'unknown'",
":",
"processor",
"=",
"''",
"# normalize name",
"if",
"system",
"==",
"'Microsoft'",
"and",
"release",
"==",
"'Windows'",
":",
"system",
"=",
"'Windows'",
"release",
"=",
"'Vista'",
"_uname_cache",
"=",
"system",
",",
"node",
",",
"release",
",",
"version",
",",
"machine",
",",
"processor",
"return",
"_uname_cache"
] |
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/platform.py#L1166-L1299
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/numpy/py2/numpy/lib/arrayterator.py
|
python
|
Arrayterator.__getitem__
|
(self, index)
|
return out
|
Return a new arrayterator.
|
Return a new arrayterator.
|
[
"Return",
"a",
"new",
"arrayterator",
"."
] |
def __getitem__(self, index):
"""
Return a new arrayterator.
"""
# Fix index, handling ellipsis and incomplete slices.
if not isinstance(index, tuple):
index = (index,)
fixed = []
length, dims = len(index), self.ndim
for slice_ in index:
if slice_ is Ellipsis:
fixed.extend([slice(None)] * (dims-length+1))
length = len(fixed)
elif isinstance(slice_, (int, long)):
fixed.append(slice(slice_, slice_+1, 1))
else:
fixed.append(slice_)
index = tuple(fixed)
if len(index) < dims:
index += (slice(None),) * (dims-len(index))
# Return a new arrayterator object.
out = self.__class__(self.var, self.buf_size)
for i, (start, stop, step, slice_) in enumerate(
zip(self.start, self.stop, self.step, index)):
out.start[i] = start + (slice_.start or 0)
out.step[i] = step * (slice_.step or 1)
out.stop[i] = start + (slice_.stop or stop-start)
out.stop[i] = min(stop, out.stop[i])
return out
|
[
"def",
"__getitem__",
"(",
"self",
",",
"index",
")",
":",
"# Fix index, handling ellipsis and incomplete slices.",
"if",
"not",
"isinstance",
"(",
"index",
",",
"tuple",
")",
":",
"index",
"=",
"(",
"index",
",",
")",
"fixed",
"=",
"[",
"]",
"length",
",",
"dims",
"=",
"len",
"(",
"index",
")",
",",
"self",
".",
"ndim",
"for",
"slice_",
"in",
"index",
":",
"if",
"slice_",
"is",
"Ellipsis",
":",
"fixed",
".",
"extend",
"(",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"(",
"dims",
"-",
"length",
"+",
"1",
")",
")",
"length",
"=",
"len",
"(",
"fixed",
")",
"elif",
"isinstance",
"(",
"slice_",
",",
"(",
"int",
",",
"long",
")",
")",
":",
"fixed",
".",
"append",
"(",
"slice",
"(",
"slice_",
",",
"slice_",
"+",
"1",
",",
"1",
")",
")",
"else",
":",
"fixed",
".",
"append",
"(",
"slice_",
")",
"index",
"=",
"tuple",
"(",
"fixed",
")",
"if",
"len",
"(",
"index",
")",
"<",
"dims",
":",
"index",
"+=",
"(",
"slice",
"(",
"None",
")",
",",
")",
"*",
"(",
"dims",
"-",
"len",
"(",
"index",
")",
")",
"# Return a new arrayterator object.",
"out",
"=",
"self",
".",
"__class__",
"(",
"self",
".",
"var",
",",
"self",
".",
"buf_size",
")",
"for",
"i",
",",
"(",
"start",
",",
"stop",
",",
"step",
",",
"slice_",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"self",
".",
"start",
",",
"self",
".",
"stop",
",",
"self",
".",
"step",
",",
"index",
")",
")",
":",
"out",
".",
"start",
"[",
"i",
"]",
"=",
"start",
"+",
"(",
"slice_",
".",
"start",
"or",
"0",
")",
"out",
".",
"step",
"[",
"i",
"]",
"=",
"step",
"*",
"(",
"slice_",
".",
"step",
"or",
"1",
")",
"out",
".",
"stop",
"[",
"i",
"]",
"=",
"start",
"+",
"(",
"slice_",
".",
"stop",
"or",
"stop",
"-",
"start",
")",
"out",
".",
"stop",
"[",
"i",
"]",
"=",
"min",
"(",
"stop",
",",
"out",
".",
"stop",
"[",
"i",
"]",
")",
"return",
"out"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/numpy/py2/numpy/lib/arrayterator.py#L100-L130
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/scipy/scipy/spatial/kdtree.py
|
python
|
KDTree.query_pairs
|
(self, r, p=2., eps=0)
|
return results
|
Find all pairs of points within a distance.
Parameters
----------
r : positive float
The maximum distance.
p : float, optional
Which Minkowski norm to use. `p` has to meet the condition
``1 <= p <= infinity``.
eps : float, optional
Approximate search. Branches of the tree are not explored
if their nearest points are further than ``r/(1+eps)``, and
branches are added in bulk if their furthest points are nearer
than ``r * (1+eps)``. `eps` has to be non-negative.
Returns
-------
results : set
Set of pairs ``(i,j)``, with ``i < j``, for which the corresponding
positions are close.
|
Find all pairs of points within a distance.
|
[
"Find",
"all",
"pairs",
"of",
"points",
"within",
"a",
"distance",
"."
] |
def query_pairs(self, r, p=2., eps=0):
"""
Find all pairs of points within a distance.
Parameters
----------
r : positive float
The maximum distance.
p : float, optional
Which Minkowski norm to use. `p` has to meet the condition
``1 <= p <= infinity``.
eps : float, optional
Approximate search. Branches of the tree are not explored
if their nearest points are further than ``r/(1+eps)``, and
branches are added in bulk if their furthest points are nearer
than ``r * (1+eps)``. `eps` has to be non-negative.
Returns
-------
results : set
Set of pairs ``(i,j)``, with ``i < j``, for which the corresponding
positions are close.
"""
results = set()
def traverse_checking(node1, rect1, node2, rect2):
if rect1.min_distance_rectangle(rect2, p) > r/(1.+eps):
return
elif rect1.max_distance_rectangle(rect2, p) < r*(1.+eps):
traverse_no_checking(node1, node2)
elif isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
# Special care to avoid duplicate pairs
if id(node1) == id(node2):
d = self.data[node2.idx]
for i in node1.idx:
for j in node2.idx[minkowski_distance(d,self.data[i],p) <= r]:
if i < j:
results.add((i,j))
else:
d = self.data[node2.idx]
for i in node1.idx:
for j in node2.idx[minkowski_distance(d,self.data[i],p) <= r]:
if i < j:
results.add((i,j))
elif j < i:
results.add((j,i))
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1,rect1,node2.less,less)
traverse_checking(node1,rect1,node2.greater,greater)
elif isinstance(node2, KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse_checking(node1.less,less,node2,rect2)
traverse_checking(node1.greater,greater,node2,rect2)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1.less,less1,node2.less,less2)
traverse_checking(node1.less,less1,node2.greater,greater2)
# Avoid traversing (node1.less, node2.greater) and
# (node1.greater, node2.less) (it's the same node pair twice
# over, which is the source of the complication in the
# original KDTree.query_pairs)
if id(node1) != id(node2):
traverse_checking(node1.greater,greater1,node2.less,less2)
traverse_checking(node1.greater,greater1,node2.greater,greater2)
def traverse_no_checking(node1, node2):
if isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
# Special care to avoid duplicate pairs
if id(node1) == id(node2):
for i in node1.idx:
for j in node2.idx:
if i < j:
results.add((i,j))
else:
for i in node1.idx:
for j in node2.idx:
if i < j:
results.add((i,j))
elif j < i:
results.add((j,i))
else:
traverse_no_checking(node1, node2.less)
traverse_no_checking(node1, node2.greater)
else:
# Avoid traversing (node1.less, node2.greater) and
# (node1.greater, node2.less) (it's the same node pair twice
# over, which is the source of the complication in the
# original KDTree.query_pairs)
if id(node1) == id(node2):
traverse_no_checking(node1.less, node2.less)
traverse_no_checking(node1.less, node2.greater)
traverse_no_checking(node1.greater, node2.greater)
else:
traverse_no_checking(node1.less, node2)
traverse_no_checking(node1.greater, node2)
traverse_checking(self.tree, Rectangle(self.maxes, self.mins),
self.tree, Rectangle(self.maxes, self.mins))
return results
|
[
"def",
"query_pairs",
"(",
"self",
",",
"r",
",",
"p",
"=",
"2.",
",",
"eps",
"=",
"0",
")",
":",
"results",
"=",
"set",
"(",
")",
"def",
"traverse_checking",
"(",
"node1",
",",
"rect1",
",",
"node2",
",",
"rect2",
")",
":",
"if",
"rect1",
".",
"min_distance_rectangle",
"(",
"rect2",
",",
"p",
")",
">",
"r",
"/",
"(",
"1.",
"+",
"eps",
")",
":",
"return",
"elif",
"rect1",
".",
"max_distance_rectangle",
"(",
"rect2",
",",
"p",
")",
"<",
"r",
"*",
"(",
"1.",
"+",
"eps",
")",
":",
"traverse_no_checking",
"(",
"node1",
",",
"node2",
")",
"elif",
"isinstance",
"(",
"node1",
",",
"KDTree",
".",
"leafnode",
")",
":",
"if",
"isinstance",
"(",
"node2",
",",
"KDTree",
".",
"leafnode",
")",
":",
"# Special care to avoid duplicate pairs",
"if",
"id",
"(",
"node1",
")",
"==",
"id",
"(",
"node2",
")",
":",
"d",
"=",
"self",
".",
"data",
"[",
"node2",
".",
"idx",
"]",
"for",
"i",
"in",
"node1",
".",
"idx",
":",
"for",
"j",
"in",
"node2",
".",
"idx",
"[",
"minkowski_distance",
"(",
"d",
",",
"self",
".",
"data",
"[",
"i",
"]",
",",
"p",
")",
"<=",
"r",
"]",
":",
"if",
"i",
"<",
"j",
":",
"results",
".",
"add",
"(",
"(",
"i",
",",
"j",
")",
")",
"else",
":",
"d",
"=",
"self",
".",
"data",
"[",
"node2",
".",
"idx",
"]",
"for",
"i",
"in",
"node1",
".",
"idx",
":",
"for",
"j",
"in",
"node2",
".",
"idx",
"[",
"minkowski_distance",
"(",
"d",
",",
"self",
".",
"data",
"[",
"i",
"]",
",",
"p",
")",
"<=",
"r",
"]",
":",
"if",
"i",
"<",
"j",
":",
"results",
".",
"add",
"(",
"(",
"i",
",",
"j",
")",
")",
"elif",
"j",
"<",
"i",
":",
"results",
".",
"add",
"(",
"(",
"j",
",",
"i",
")",
")",
"else",
":",
"less",
",",
"greater",
"=",
"rect2",
".",
"split",
"(",
"node2",
".",
"split_dim",
",",
"node2",
".",
"split",
")",
"traverse_checking",
"(",
"node1",
",",
"rect1",
",",
"node2",
".",
"less",
",",
"less",
")",
"traverse_checking",
"(",
"node1",
",",
"rect1",
",",
"node2",
".",
"greater",
",",
"greater",
")",
"elif",
"isinstance",
"(",
"node2",
",",
"KDTree",
".",
"leafnode",
")",
":",
"less",
",",
"greater",
"=",
"rect1",
".",
"split",
"(",
"node1",
".",
"split_dim",
",",
"node1",
".",
"split",
")",
"traverse_checking",
"(",
"node1",
".",
"less",
",",
"less",
",",
"node2",
",",
"rect2",
")",
"traverse_checking",
"(",
"node1",
".",
"greater",
",",
"greater",
",",
"node2",
",",
"rect2",
")",
"else",
":",
"less1",
",",
"greater1",
"=",
"rect1",
".",
"split",
"(",
"node1",
".",
"split_dim",
",",
"node1",
".",
"split",
")",
"less2",
",",
"greater2",
"=",
"rect2",
".",
"split",
"(",
"node2",
".",
"split_dim",
",",
"node2",
".",
"split",
")",
"traverse_checking",
"(",
"node1",
".",
"less",
",",
"less1",
",",
"node2",
".",
"less",
",",
"less2",
")",
"traverse_checking",
"(",
"node1",
".",
"less",
",",
"less1",
",",
"node2",
".",
"greater",
",",
"greater2",
")",
"# Avoid traversing (node1.less, node2.greater) and",
"# (node1.greater, node2.less) (it's the same node pair twice",
"# over, which is the source of the complication in the",
"# original KDTree.query_pairs)",
"if",
"id",
"(",
"node1",
")",
"!=",
"id",
"(",
"node2",
")",
":",
"traverse_checking",
"(",
"node1",
".",
"greater",
",",
"greater1",
",",
"node2",
".",
"less",
",",
"less2",
")",
"traverse_checking",
"(",
"node1",
".",
"greater",
",",
"greater1",
",",
"node2",
".",
"greater",
",",
"greater2",
")",
"def",
"traverse_no_checking",
"(",
"node1",
",",
"node2",
")",
":",
"if",
"isinstance",
"(",
"node1",
",",
"KDTree",
".",
"leafnode",
")",
":",
"if",
"isinstance",
"(",
"node2",
",",
"KDTree",
".",
"leafnode",
")",
":",
"# Special care to avoid duplicate pairs",
"if",
"id",
"(",
"node1",
")",
"==",
"id",
"(",
"node2",
")",
":",
"for",
"i",
"in",
"node1",
".",
"idx",
":",
"for",
"j",
"in",
"node2",
".",
"idx",
":",
"if",
"i",
"<",
"j",
":",
"results",
".",
"add",
"(",
"(",
"i",
",",
"j",
")",
")",
"else",
":",
"for",
"i",
"in",
"node1",
".",
"idx",
":",
"for",
"j",
"in",
"node2",
".",
"idx",
":",
"if",
"i",
"<",
"j",
":",
"results",
".",
"add",
"(",
"(",
"i",
",",
"j",
")",
")",
"elif",
"j",
"<",
"i",
":",
"results",
".",
"add",
"(",
"(",
"j",
",",
"i",
")",
")",
"else",
":",
"traverse_no_checking",
"(",
"node1",
",",
"node2",
".",
"less",
")",
"traverse_no_checking",
"(",
"node1",
",",
"node2",
".",
"greater",
")",
"else",
":",
"# Avoid traversing (node1.less, node2.greater) and",
"# (node1.greater, node2.less) (it's the same node pair twice",
"# over, which is the source of the complication in the",
"# original KDTree.query_pairs)",
"if",
"id",
"(",
"node1",
")",
"==",
"id",
"(",
"node2",
")",
":",
"traverse_no_checking",
"(",
"node1",
".",
"less",
",",
"node2",
".",
"less",
")",
"traverse_no_checking",
"(",
"node1",
".",
"less",
",",
"node2",
".",
"greater",
")",
"traverse_no_checking",
"(",
"node1",
".",
"greater",
",",
"node2",
".",
"greater",
")",
"else",
":",
"traverse_no_checking",
"(",
"node1",
".",
"less",
",",
"node2",
")",
"traverse_no_checking",
"(",
"node1",
".",
"greater",
",",
"node2",
")",
"traverse_checking",
"(",
"self",
".",
"tree",
",",
"Rectangle",
"(",
"self",
".",
"maxes",
",",
"self",
".",
"mins",
")",
",",
"self",
".",
"tree",
",",
"Rectangle",
"(",
"self",
".",
"maxes",
",",
"self",
".",
"mins",
")",
")",
"return",
"results"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/scipy/spatial/kdtree.py#L698-L803
|
|
tensorflow/tensorflow
|
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
|
tensorflow/python/ops/metrics_impl.py
|
python
|
root_mean_squared_error
|
(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None)
|
return rmse, update_rmse_op
|
Computes the root mean squared error between the labels and predictions.
The `root_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the root mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`root_mean_squared_error`: an idempotent operation that takes the square root
of the division of `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`root_mean_squared_error`. Internally, a `squared_error` operation computes
the element-wise square of the difference between `predictions` and `labels`.
Then `update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`root_mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
root_mean_squared_error: A `Tensor` representing the current mean, the value
of `total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `root_mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
|
Computes the root mean squared error between the labels and predictions.
|
[
"Computes",
"the",
"root",
"mean",
"squared",
"error",
"between",
"the",
"labels",
"and",
"predictions",
"."
] |
def root_mean_squared_error(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the root mean squared error between the labels and predictions.
The `root_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the root mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`root_mean_squared_error`: an idempotent operation that takes the square root
of the division of `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`root_mean_squared_error`. Internally, a `squared_error` operation computes
the element-wise square of the difference between `predictions` and `labels`.
Then `update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`root_mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
root_mean_squared_error: A `Tensor` representing the current mean, the value
of `total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `root_mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.root_mean_squared_error is not '
'supported when eager execution is enabled.')
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
mse, update_mse_op = mean_squared_error(labels, predictions, weights, None,
None, name or
'root_mean_squared_error')
once_across_replicas = lambda _, mse: math_ops.sqrt(mse)
rmse = _aggregate_across_replicas(
metrics_collections, once_across_replicas, mse)
update_rmse_op = math_ops.sqrt(update_mse_op)
if updates_collections:
ops.add_to_collections(updates_collections, update_rmse_op)
return rmse, update_rmse_op
|
[
"def",
"root_mean_squared_error",
"(",
"labels",
",",
"predictions",
",",
"weights",
"=",
"None",
",",
"metrics_collections",
"=",
"None",
",",
"updates_collections",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"if",
"context",
".",
"executing_eagerly",
"(",
")",
":",
"raise",
"RuntimeError",
"(",
"'tf.metrics.root_mean_squared_error is not '",
"'supported when eager execution is enabled.'",
")",
"predictions",
",",
"labels",
",",
"weights",
"=",
"_remove_squeezable_dimensions",
"(",
"predictions",
"=",
"predictions",
",",
"labels",
"=",
"labels",
",",
"weights",
"=",
"weights",
")",
"mse",
",",
"update_mse_op",
"=",
"mean_squared_error",
"(",
"labels",
",",
"predictions",
",",
"weights",
",",
"None",
",",
"None",
",",
"name",
"or",
"'root_mean_squared_error'",
")",
"once_across_replicas",
"=",
"lambda",
"_",
",",
"mse",
":",
"math_ops",
".",
"sqrt",
"(",
"mse",
")",
"rmse",
"=",
"_aggregate_across_replicas",
"(",
"metrics_collections",
",",
"once_across_replicas",
",",
"mse",
")",
"update_rmse_op",
"=",
"math_ops",
".",
"sqrt",
"(",
"update_mse_op",
")",
"if",
"updates_collections",
":",
"ops",
".",
"add_to_collections",
"(",
"updates_collections",
",",
"update_rmse_op",
")",
"return",
"rmse",
",",
"update_rmse_op"
] |
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/ops/metrics_impl.py#L2916-L2983
|
|
apache/qpid-proton
|
6bcdfebb55ea3554bc29b1901422532db331a591
|
python/proton/_data.py
|
python
|
SymbolList.__add__
|
(self, t: Iterable[Any])
|
return SymbolList(super(SymbolList, self).__add__(self._check_list(t)), raise_on_error=self.raise_on_error)
|
Handles list1 + list2
|
Handles list1 + list2
|
[
"Handles",
"list1",
"+",
"list2"
] |
def __add__(self, t: Iterable[Any]) -> 'SymbolList':
""" Handles list1 + list2 """
return SymbolList(super(SymbolList, self).__add__(self._check_list(t)), raise_on_error=self.raise_on_error)
|
[
"def",
"__add__",
"(",
"self",
",",
"t",
":",
"Iterable",
"[",
"Any",
"]",
")",
"->",
"'SymbolList'",
":",
"return",
"SymbolList",
"(",
"super",
"(",
"SymbolList",
",",
"self",
")",
".",
"__add__",
"(",
"self",
".",
"_check_list",
"(",
"t",
")",
")",
",",
"raise_on_error",
"=",
"self",
".",
"raise_on_error",
")"
] |
https://github.com/apache/qpid-proton/blob/6bcdfebb55ea3554bc29b1901422532db331a591/python/proton/_data.py#L536-L538
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/multiprocessing/managers.py
|
python
|
all_methods
|
(obj)
|
return temp
|
Return a list of names of methods of `obj`
|
Return a list of names of methods of `obj`
|
[
"Return",
"a",
"list",
"of",
"names",
"of",
"methods",
"of",
"obj"
] |
def all_methods(obj):
'''
Return a list of names of methods of `obj`
'''
temp = []
for name in dir(obj):
func = getattr(obj, name)
if callable(func):
temp.append(name)
return temp
|
[
"def",
"all_methods",
"(",
"obj",
")",
":",
"temp",
"=",
"[",
"]",
"for",
"name",
"in",
"dir",
"(",
"obj",
")",
":",
"func",
"=",
"getattr",
"(",
"obj",
",",
"name",
")",
"if",
"callable",
"(",
"func",
")",
":",
"temp",
".",
"append",
"(",
"name",
")",
"return",
"temp"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/multiprocessing/managers.py#L107-L116
|
|
mongodb/mongo
|
d8ff665343ad29cf286ee2cf4a1960d29371937b
|
buildscripts/idl/idl/generator.py
|
python
|
_CppHeaderFileWriter.gen_derived_class_declaration_block
|
(self, class_name)
|
return writer.IndentedScopedBlock(
self._writer, 'class %s : public TypedCommand<Derived> {' % class_name, '};')
|
Generate a command's base class declaration block.
|
Generate a command's base class declaration block.
|
[
"Generate",
"a",
"command",
"s",
"base",
"class",
"declaration",
"block",
"."
] |
def gen_derived_class_declaration_block(self, class_name):
# type: (str) -> writer.IndentedScopedBlock
"""Generate a command's base class declaration block."""
return writer.IndentedScopedBlock(
self._writer, 'class %s : public TypedCommand<Derived> {' % class_name, '};')
|
[
"def",
"gen_derived_class_declaration_block",
"(",
"self",
",",
"class_name",
")",
":",
"# type: (str) -> writer.IndentedScopedBlock",
"return",
"writer",
".",
"IndentedScopedBlock",
"(",
"self",
".",
"_writer",
",",
"'class %s : public TypedCommand<Derived> {'",
"%",
"class_name",
",",
"'};'",
")"
] |
https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/buildscripts/idl/idl/generator.py#L858-L862
|
|
hanpfei/chromium-net
|
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
|
third_party/catapult/third_party/closure_linter/closure_linter/tokenutil.py
|
python
|
DeleteToken
|
(token)
|
Deletes the given token from the linked list.
Args:
token: The token to delete
|
Deletes the given token from the linked list.
|
[
"Deletes",
"the",
"given",
"token",
"from",
"the",
"linked",
"list",
"."
] |
def DeleteToken(token):
"""Deletes the given token from the linked list.
Args:
token: The token to delete
"""
# When deleting a token, we do not update the deleted token itself to make
# sure the previous and next pointers are still pointing to tokens which are
# not deleted. Also it is very hard to keep track of all previously deleted
# tokens to update them when their pointers become invalid. So we add this
# flag that any token linked list iteration logic can skip deleted node safely
# when its current token is deleted.
token.is_deleted = True
if token.previous:
token.previous.next = token.next
if token.next:
token.next.previous = token.previous
following_token = token.next
while following_token and following_token.metadata.last_code == token:
following_token.metadata.last_code = token.metadata.last_code
following_token = following_token.next
|
[
"def",
"DeleteToken",
"(",
"token",
")",
":",
"# When deleting a token, we do not update the deleted token itself to make",
"# sure the previous and next pointers are still pointing to tokens which are",
"# not deleted. Also it is very hard to keep track of all previously deleted",
"# tokens to update them when their pointers become invalid. So we add this",
"# flag that any token linked list iteration logic can skip deleted node safely",
"# when its current token is deleted.",
"token",
".",
"is_deleted",
"=",
"True",
"if",
"token",
".",
"previous",
":",
"token",
".",
"previous",
".",
"next",
"=",
"token",
".",
"next",
"if",
"token",
".",
"next",
":",
"token",
".",
"next",
".",
"previous",
"=",
"token",
".",
"previous",
"following_token",
"=",
"token",
".",
"next",
"while",
"following_token",
"and",
"following_token",
".",
"metadata",
".",
"last_code",
"==",
"token",
":",
"following_token",
".",
"metadata",
".",
"last_code",
"=",
"token",
".",
"metadata",
".",
"last_code",
"following_token",
"=",
"following_token",
".",
"next"
] |
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/closure_linter/closure_linter/tokenutil.py#L212-L234
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_cocoa/calendar.py
|
python
|
CalendarDateAttr.HasBorderColour
|
(*args, **kwargs)
|
return _calendar.CalendarDateAttr_HasBorderColour(*args, **kwargs)
|
HasBorderColour(self) -> bool
|
HasBorderColour(self) -> bool
|
[
"HasBorderColour",
"(",
"self",
")",
"-",
">",
"bool"
] |
def HasBorderColour(*args, **kwargs):
"""HasBorderColour(self) -> bool"""
return _calendar.CalendarDateAttr_HasBorderColour(*args, **kwargs)
|
[
"def",
"HasBorderColour",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_calendar",
".",
"CalendarDateAttr_HasBorderColour",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/calendar.py#L130-L132
|
|
sdhash/sdhash
|
b9eff63e4e5867e910f41fd69032bbb1c94a2a5e
|
sdhash-ui/serverui/sdhashsrv/sdhashsrv.py
|
python
|
Client.getHashsetName
|
(self, num1)
|
return self.recv_getHashsetName()
|
Parameters:
- num1
|
Parameters:
- num1
|
[
"Parameters",
":",
"-",
"num1"
] |
def getHashsetName(self, num1):
"""
Parameters:
- num1
"""
self.send_getHashsetName(num1)
return self.recv_getHashsetName()
|
[
"def",
"getHashsetName",
"(",
"self",
",",
"num1",
")",
":",
"self",
".",
"send_getHashsetName",
"(",
"num1",
")",
"return",
"self",
".",
"recv_getHashsetName",
"(",
")"
] |
https://github.com/sdhash/sdhash/blob/b9eff63e4e5867e910f41fd69032bbb1c94a2a5e/sdhash-ui/serverui/sdhashsrv/sdhashsrv.py#L470-L476
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_cocoa/grid.py
|
python
|
GridTableBase.IsEmptyCell
|
(*args, **kwargs)
|
return _grid.GridTableBase_IsEmptyCell(*args, **kwargs)
|
IsEmptyCell(self, int row, int col) -> bool
|
IsEmptyCell(self, int row, int col) -> bool
|
[
"IsEmptyCell",
"(",
"self",
"int",
"row",
"int",
"col",
")",
"-",
">",
"bool"
] |
def IsEmptyCell(*args, **kwargs):
"""IsEmptyCell(self, int row, int col) -> bool"""
return _grid.GridTableBase_IsEmptyCell(*args, **kwargs)
|
[
"def",
"IsEmptyCell",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_grid",
".",
"GridTableBase_IsEmptyCell",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/grid.py#L810-L812
|
|
bundy-dns/bundy
|
3d41934996b82b0cd2fe22dd74d2abc1daba835d
|
src/lib/python/bundy/datasrc/sqlite3_ds.py
|
python
|
get_zoneid
|
(zonename, cur)
|
Get the zone_id for a given zone name.
Arguments:
zonename - the zone's origin name.
cur - sqlite3 cursor.
Return zone id for the given zone name, or an empty string if the
zone is not found.
|
Get the zone_id for a given zone name.
|
[
"Get",
"the",
"zone_id",
"for",
"a",
"given",
"zone",
"name",
"."
] |
def get_zoneid(zonename, cur):
""" Get the zone_id for a given zone name.
Arguments:
zonename - the zone's origin name.
cur - sqlite3 cursor.
Return zone id for the given zone name, or an empty string if the
zone is not found.
"""
cur.execute("SELECT id FROM zones WHERE name = ?", [zonename])
row = cur.fetchone()
if row:
return row[0]
else:
return ''
|
[
"def",
"get_zoneid",
"(",
"zonename",
",",
"cur",
")",
":",
"cur",
".",
"execute",
"(",
"\"SELECT id FROM zones WHERE name = ?\"",
",",
"[",
"zonename",
"]",
")",
"row",
"=",
"cur",
".",
"fetchone",
"(",
")",
"if",
"row",
":",
"return",
"row",
"[",
"0",
"]",
"else",
":",
"return",
"''"
] |
https://github.com/bundy-dns/bundy/blob/3d41934996b82b0cd2fe22dd74d2abc1daba835d/src/lib/python/bundy/datasrc/sqlite3_ds.py#L215-L230
|
||
trilinos/Trilinos
|
6168be6dd51e35e1cd681e9c4b24433e709df140
|
packages/seacas/scripts/exodus2.in.py
|
python
|
exodus.set_side_set_variable_number
|
(self, number)
|
return True
|
status = exo.set_side_set_variable_number(num_ssvars)
-> update the number of side set variables in the model
input value(s):
<int> num_ssvars
return value(s):
<bool> status True = successful execution
|
status = exo.set_side_set_variable_number(num_ssvars)
|
[
"status",
"=",
"exo",
".",
"set_side_set_variable_number",
"(",
"num_ssvars",
")"
] |
def set_side_set_variable_number(self, number):
"""
status = exo.set_side_set_variable_number(num_ssvars)
-> update the number of side set variables in the model
input value(s):
<int> num_ssvars
return value(s):
<bool> status True = successful execution
"""
ssType = ex_entity_type("EX_SIDE_SET")
self.__ex_put_variable_param(ssType, number)
return True
|
[
"def",
"set_side_set_variable_number",
"(",
"self",
",",
"number",
")",
":",
"ssType",
"=",
"ex_entity_type",
"(",
"\"EX_SIDE_SET\"",
")",
"self",
".",
"__ex_put_variable_param",
"(",
"ssType",
",",
"number",
")",
"return",
"True"
] |
https://github.com/trilinos/Trilinos/blob/6168be6dd51e35e1cd681e9c4b24433e709df140/packages/seacas/scripts/exodus2.in.py#L2893-L2907
|
|
trilinos/Trilinos
|
6168be6dd51e35e1cd681e9c4b24433e709df140
|
packages/seacas/libraries/ioss/src/visualization/catalyst/phactori/PhactoriDriver.py
|
python
|
PhactoriIntersectNodeNormalsWithSurface.CreateParaViewFilter
|
(self, inInputFilter)
|
return newParaViewFilter
|
create the filter for ParaView
|
create the filter for ParaView
|
[
"create",
"the",
"filter",
"for",
"ParaView"
] |
def CreateParaViewFilter(self, inInputFilter):
"create the filter for ParaView"
if PhactoriDbg(100):
myDebugPrint3("PhactoriIntersectNodeNormalsWithSurface." \
"CreateParaViewFilter entered\n", 100)
#info in block class should already be parsed and checked
savedActiveSource = GetActiveSource()
if self.mCreateIntersectionSegmentGroup == True:
self.mPointRayLineSource = PVTrivialProducer()
self.mPointRayLineSource.GetClientSideObject().SetOutput(
self.SegmentsFromPointRays.mVtkPolyData)
self.mTriangleRayLineSource = PVTrivialProducer()
self.mTriangleRayLineSource.GetClientSideObject().SetOutput(
self.SegmentsFromTriangleRays.mVtkPolyData)
#newParaViewFilter = GroupDatasets(Input = [])
#self.mGroupLineSource = newParaViewFilter
self.mGroupLineSource = GroupDatasets(
Input = [self.mPointRayLineSource, self.mTriangleRayLineSource])
self.mTubeFilter = Tube(Input = self.mGroupLineSource)
#self.mTubeFilter.NumberofSides = 8
self.mTubeFilter.Radius = 0.01
#self.mTubeFilter.VaryRadius = 'By Scalar'
newParaViewFilter = self.mTubeFilter
else:
newParaViewFilter = PVTrivialProducer()
passthru = inInputFilter.GetClientSideObject().GetOutputDataObject(0)
numpts = passthru.GetNumberOfPoints()
newvar = vtk.vtkDoubleArray()
newvar.SetNumberOfComponents(1)
newvar.SetName("PointNormalRayIntersectDistance")
for ii in range(0, numpts):
newvar.InsertNextValue(float(-1.0))
passthru.GetPointData().AddArray(newvar)
self.mPointNormalRayIntersectDistanceArray = \
passthru.GetPointData().GetArray("PointNormalRayIntersectDistance")
numcells = passthru.GetNumberOfCells()
newvar2 = vtk.vtkDoubleArray()
newvar2.SetNumberOfComponents(1)
newvar2.SetName("CellNormalRayIntersectDistance")
for ii in range(0, numcells):
newvar2.InsertNextValue(float(-1.0))
passthru.GetCellData().AddArray(newvar2)
self.mCellNormalRayIntersectDistanceArray = \
passthru.GetCellData().GetArray("CellNormalRayIntersectDistance")
if PhactoriDbg(100):
numpts = passthru.GetNumberOfPoints()
myDebugPrint3("numpts: " + str(numpts) + "\n")
numptarrays = passthru.GetPointData().GetNumberOfArrays()
myDebugPrint3("numptarrays: " + str(numptarrays) + "\n")
numcells = passthru.GetNumberOfCells()
myDebugPrint3("numcells: " + str(numcells) + "\n")
numcellarrays = passthru.GetCellData().GetNumberOfArrays()
myDebugPrint3("numcellarrays: " + str(numcellarrays) + "\n")
newParaViewFilter.GetClientSideObject().SetOutput(passthru)
SetActiveSource(newParaViewFilter)
SetActiveSource(savedActiveSource)
if PhactoriDbg(100):
myDebugPrint3(str(self.mGroupLineSource))
myDebugPrint3("PhactoriIntersectNodeNormalsWithSurface." \
"CreateParaViewFilter returning\n", 100)
return newParaViewFilter
|
[
"def",
"CreateParaViewFilter",
"(",
"self",
",",
"inInputFilter",
")",
":",
"if",
"PhactoriDbg",
"(",
"100",
")",
":",
"myDebugPrint3",
"(",
"\"PhactoriIntersectNodeNormalsWithSurface.\"",
"\"CreateParaViewFilter entered\\n\"",
",",
"100",
")",
"#info in block class should already be parsed and checked",
"savedActiveSource",
"=",
"GetActiveSource",
"(",
")",
"if",
"self",
".",
"mCreateIntersectionSegmentGroup",
"==",
"True",
":",
"self",
".",
"mPointRayLineSource",
"=",
"PVTrivialProducer",
"(",
")",
"self",
".",
"mPointRayLineSource",
".",
"GetClientSideObject",
"(",
")",
".",
"SetOutput",
"(",
"self",
".",
"SegmentsFromPointRays",
".",
"mVtkPolyData",
")",
"self",
".",
"mTriangleRayLineSource",
"=",
"PVTrivialProducer",
"(",
")",
"self",
".",
"mTriangleRayLineSource",
".",
"GetClientSideObject",
"(",
")",
".",
"SetOutput",
"(",
"self",
".",
"SegmentsFromTriangleRays",
".",
"mVtkPolyData",
")",
"#newParaViewFilter = GroupDatasets(Input = [])",
"#self.mGroupLineSource = newParaViewFilter",
"self",
".",
"mGroupLineSource",
"=",
"GroupDatasets",
"(",
"Input",
"=",
"[",
"self",
".",
"mPointRayLineSource",
",",
"self",
".",
"mTriangleRayLineSource",
"]",
")",
"self",
".",
"mTubeFilter",
"=",
"Tube",
"(",
"Input",
"=",
"self",
".",
"mGroupLineSource",
")",
"#self.mTubeFilter.NumberofSides = 8",
"self",
".",
"mTubeFilter",
".",
"Radius",
"=",
"0.01",
"#self.mTubeFilter.VaryRadius = 'By Scalar'",
"newParaViewFilter",
"=",
"self",
".",
"mTubeFilter",
"else",
":",
"newParaViewFilter",
"=",
"PVTrivialProducer",
"(",
")",
"passthru",
"=",
"inInputFilter",
".",
"GetClientSideObject",
"(",
")",
".",
"GetOutputDataObject",
"(",
"0",
")",
"numpts",
"=",
"passthru",
".",
"GetNumberOfPoints",
"(",
")",
"newvar",
"=",
"vtk",
".",
"vtkDoubleArray",
"(",
")",
"newvar",
".",
"SetNumberOfComponents",
"(",
"1",
")",
"newvar",
".",
"SetName",
"(",
"\"PointNormalRayIntersectDistance\"",
")",
"for",
"ii",
"in",
"range",
"(",
"0",
",",
"numpts",
")",
":",
"newvar",
".",
"InsertNextValue",
"(",
"float",
"(",
"-",
"1.0",
")",
")",
"passthru",
".",
"GetPointData",
"(",
")",
".",
"AddArray",
"(",
"newvar",
")",
"self",
".",
"mPointNormalRayIntersectDistanceArray",
"=",
"passthru",
".",
"GetPointData",
"(",
")",
".",
"GetArray",
"(",
"\"PointNormalRayIntersectDistance\"",
")",
"numcells",
"=",
"passthru",
".",
"GetNumberOfCells",
"(",
")",
"newvar2",
"=",
"vtk",
".",
"vtkDoubleArray",
"(",
")",
"newvar2",
".",
"SetNumberOfComponents",
"(",
"1",
")",
"newvar2",
".",
"SetName",
"(",
"\"CellNormalRayIntersectDistance\"",
")",
"for",
"ii",
"in",
"range",
"(",
"0",
",",
"numcells",
")",
":",
"newvar2",
".",
"InsertNextValue",
"(",
"float",
"(",
"-",
"1.0",
")",
")",
"passthru",
".",
"GetCellData",
"(",
")",
".",
"AddArray",
"(",
"newvar2",
")",
"self",
".",
"mCellNormalRayIntersectDistanceArray",
"=",
"passthru",
".",
"GetCellData",
"(",
")",
".",
"GetArray",
"(",
"\"CellNormalRayIntersectDistance\"",
")",
"if",
"PhactoriDbg",
"(",
"100",
")",
":",
"numpts",
"=",
"passthru",
".",
"GetNumberOfPoints",
"(",
")",
"myDebugPrint3",
"(",
"\"numpts: \"",
"+",
"str",
"(",
"numpts",
")",
"+",
"\"\\n\"",
")",
"numptarrays",
"=",
"passthru",
".",
"GetPointData",
"(",
")",
".",
"GetNumberOfArrays",
"(",
")",
"myDebugPrint3",
"(",
"\"numptarrays: \"",
"+",
"str",
"(",
"numptarrays",
")",
"+",
"\"\\n\"",
")",
"numcells",
"=",
"passthru",
".",
"GetNumberOfCells",
"(",
")",
"myDebugPrint3",
"(",
"\"numcells: \"",
"+",
"str",
"(",
"numcells",
")",
"+",
"\"\\n\"",
")",
"numcellarrays",
"=",
"passthru",
".",
"GetCellData",
"(",
")",
".",
"GetNumberOfArrays",
"(",
")",
"myDebugPrint3",
"(",
"\"numcellarrays: \"",
"+",
"str",
"(",
"numcellarrays",
")",
"+",
"\"\\n\"",
")",
"newParaViewFilter",
".",
"GetClientSideObject",
"(",
")",
".",
"SetOutput",
"(",
"passthru",
")",
"SetActiveSource",
"(",
"newParaViewFilter",
")",
"SetActiveSource",
"(",
"savedActiveSource",
")",
"if",
"PhactoriDbg",
"(",
"100",
")",
":",
"myDebugPrint3",
"(",
"str",
"(",
"self",
".",
"mGroupLineSource",
")",
")",
"myDebugPrint3",
"(",
"\"PhactoriIntersectNodeNormalsWithSurface.\"",
"\"CreateParaViewFilter returning\\n\"",
",",
"100",
")",
"return",
"newParaViewFilter"
] |
https://github.com/trilinos/Trilinos/blob/6168be6dd51e35e1cd681e9c4b24433e709df140/packages/seacas/libraries/ioss/src/visualization/catalyst/phactori/PhactoriDriver.py#L9086-L9161
|
|
tiny-dnn/tiny-dnn
|
c0f576f5cb7b35893f62127cb7aec18f77a3bcc5
|
third_party/cpplint.py
|
python
|
ParseArguments
|
(args)
|
return filenames
|
Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
|
Parses the command line arguments.
|
[
"Parses",
"the",
"command",
"line",
"arguments",
"."
] |
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter=',
'root=',
'repository=',
'linelength=',
'extensions=',
'exclude=',
'headers=',
'quiet',
'recursive'])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
recursive = False
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if val not in ('emacs', 'vs7', 'eclipse', 'junit'):
PrintUsage('The only allowed output formats are emacs, vs7, eclipse '
'and junit.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
elif opt == '--repository':
global _repository
_repository = val
elif opt == '--linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif opt == '--exclude':
global _excludes
if not _excludes:
_excludes = set()
_excludes.update(glob.glob(val))
elif opt == '--extensions':
global _valid_extensions
try:
_valid_extensions = set(val.split(','))
except ValueError:
PrintUsage('Extensions must be comma seperated list.')
elif opt == '--headers':
global _header_extensions
try:
_header_extensions = set(val.split(','))
except ValueError:
PrintUsage('Extensions must be comma seperated list.')
elif opt == '--recursive':
recursive = True
elif opt == '--quiet':
global _quiet
_quiet = True
if not filenames:
PrintUsage('No files were specified.')
if recursive:
filenames = _ExpandDirectories(filenames)
if _excludes:
filenames = _FilterExcludedFiles(filenames)
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
|
[
"def",
"ParseArguments",
"(",
"args",
")",
":",
"try",
":",
"(",
"opts",
",",
"filenames",
")",
"=",
"getopt",
".",
"getopt",
"(",
"args",
",",
"''",
",",
"[",
"'help'",
",",
"'output='",
",",
"'verbose='",
",",
"'counting='",
",",
"'filter='",
",",
"'root='",
",",
"'repository='",
",",
"'linelength='",
",",
"'extensions='",
",",
"'exclude='",
",",
"'headers='",
",",
"'quiet'",
",",
"'recursive'",
"]",
")",
"except",
"getopt",
".",
"GetoptError",
":",
"PrintUsage",
"(",
"'Invalid arguments.'",
")",
"verbosity",
"=",
"_VerboseLevel",
"(",
")",
"output_format",
"=",
"_OutputFormat",
"(",
")",
"filters",
"=",
"''",
"counting_style",
"=",
"''",
"recursive",
"=",
"False",
"for",
"(",
"opt",
",",
"val",
")",
"in",
"opts",
":",
"if",
"opt",
"==",
"'--help'",
":",
"PrintUsage",
"(",
"None",
")",
"elif",
"opt",
"==",
"'--output'",
":",
"if",
"val",
"not",
"in",
"(",
"'emacs'",
",",
"'vs7'",
",",
"'eclipse'",
",",
"'junit'",
")",
":",
"PrintUsage",
"(",
"'The only allowed output formats are emacs, vs7, eclipse '",
"'and junit.'",
")",
"output_format",
"=",
"val",
"elif",
"opt",
"==",
"'--verbose'",
":",
"verbosity",
"=",
"int",
"(",
"val",
")",
"elif",
"opt",
"==",
"'--filter'",
":",
"filters",
"=",
"val",
"if",
"not",
"filters",
":",
"PrintCategories",
"(",
")",
"elif",
"opt",
"==",
"'--counting'",
":",
"if",
"val",
"not",
"in",
"(",
"'total'",
",",
"'toplevel'",
",",
"'detailed'",
")",
":",
"PrintUsage",
"(",
"'Valid counting options are total, toplevel, and detailed'",
")",
"counting_style",
"=",
"val",
"elif",
"opt",
"==",
"'--root'",
":",
"global",
"_root",
"_root",
"=",
"val",
"elif",
"opt",
"==",
"'--repository'",
":",
"global",
"_repository",
"_repository",
"=",
"val",
"elif",
"opt",
"==",
"'--linelength'",
":",
"global",
"_line_length",
"try",
":",
"_line_length",
"=",
"int",
"(",
"val",
")",
"except",
"ValueError",
":",
"PrintUsage",
"(",
"'Line length must be digits.'",
")",
"elif",
"opt",
"==",
"'--exclude'",
":",
"global",
"_excludes",
"if",
"not",
"_excludes",
":",
"_excludes",
"=",
"set",
"(",
")",
"_excludes",
".",
"update",
"(",
"glob",
".",
"glob",
"(",
"val",
")",
")",
"elif",
"opt",
"==",
"'--extensions'",
":",
"global",
"_valid_extensions",
"try",
":",
"_valid_extensions",
"=",
"set",
"(",
"val",
".",
"split",
"(",
"','",
")",
")",
"except",
"ValueError",
":",
"PrintUsage",
"(",
"'Extensions must be comma seperated list.'",
")",
"elif",
"opt",
"==",
"'--headers'",
":",
"global",
"_header_extensions",
"try",
":",
"_header_extensions",
"=",
"set",
"(",
"val",
".",
"split",
"(",
"','",
")",
")",
"except",
"ValueError",
":",
"PrintUsage",
"(",
"'Extensions must be comma seperated list.'",
")",
"elif",
"opt",
"==",
"'--recursive'",
":",
"recursive",
"=",
"True",
"elif",
"opt",
"==",
"'--quiet'",
":",
"global",
"_quiet",
"_quiet",
"=",
"True",
"if",
"not",
"filenames",
":",
"PrintUsage",
"(",
"'No files were specified.'",
")",
"if",
"recursive",
":",
"filenames",
"=",
"_ExpandDirectories",
"(",
"filenames",
")",
"if",
"_excludes",
":",
"filenames",
"=",
"_FilterExcludedFiles",
"(",
"filenames",
")",
"_SetOutputFormat",
"(",
"output_format",
")",
"_SetVerboseLevel",
"(",
"verbosity",
")",
"_SetFilters",
"(",
"filters",
")",
"_SetCountingStyle",
"(",
"counting_style",
")",
"return",
"filenames"
] |
https://github.com/tiny-dnn/tiny-dnn/blob/c0f576f5cb7b35893f62127cb7aec18f77a3bcc5/third_party/cpplint.py#L6312-L6411
|
|
goldeneye-source/ges-code
|
2630cd8ef3d015af53c72ec2e19fc1f7e7fe8d9d
|
thirdparty/protobuf-2.3.0/python/google/protobuf/internal/encoder.py
|
python
|
_SignedVarintSize
|
(value)
|
return 10
|
Compute the size of a signed varint value.
|
Compute the size of a signed varint value.
|
[
"Compute",
"the",
"size",
"of",
"a",
"signed",
"varint",
"value",
"."
] |
def _SignedVarintSize(value):
"""Compute the size of a signed varint value."""
if value < 0: return 10
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value <= 0xffffffffffffff: return 8
if value <= 0x7fffffffffffffff: return 9
return 10
|
[
"def",
"_SignedVarintSize",
"(",
"value",
")",
":",
"if",
"value",
"<",
"0",
":",
"return",
"10",
"if",
"value",
"<=",
"0x7f",
":",
"return",
"1",
"if",
"value",
"<=",
"0x3fff",
":",
"return",
"2",
"if",
"value",
"<=",
"0x1fffff",
":",
"return",
"3",
"if",
"value",
"<=",
"0xfffffff",
":",
"return",
"4",
"if",
"value",
"<=",
"0x7ffffffff",
":",
"return",
"5",
"if",
"value",
"<=",
"0x3ffffffffff",
":",
"return",
"6",
"if",
"value",
"<=",
"0x1ffffffffffff",
":",
"return",
"7",
"if",
"value",
"<=",
"0xffffffffffffff",
":",
"return",
"8",
"if",
"value",
"<=",
"0x7fffffffffffffff",
":",
"return",
"9",
"return",
"10"
] |
https://github.com/goldeneye-source/ges-code/blob/2630cd8ef3d015af53c72ec2e19fc1f7e7fe8d9d/thirdparty/protobuf-2.3.0/python/google/protobuf/internal/encoder.py#L87-L99
|
|
facebook/proxygen
|
a9ca025af207787815cb01eee1971cd572c7a81e
|
build/fbcode_builder/getdeps/cache.py
|
python
|
ArtifactCache.upload_from_file
|
(self, name, source_file_name)
|
Causes `name` to be populated in the cache by uploading
the contents of `source_file_name` to the storage system.
If a transient issue was encountered a TransientFailure shall
be raised.
If the upload failed for some other reason, an appropriate
exception shall be raised.
|
Causes `name` to be populated in the cache by uploading
the contents of `source_file_name` to the storage system.
If a transient issue was encountered a TransientFailure shall
be raised.
If the upload failed for some other reason, an appropriate
exception shall be raised.
|
[
"Causes",
"name",
"to",
"be",
"populated",
"in",
"the",
"cache",
"by",
"uploading",
"the",
"contents",
"of",
"source_file_name",
"to",
"the",
"storage",
"system",
".",
"If",
"a",
"transient",
"issue",
"was",
"encountered",
"a",
"TransientFailure",
"shall",
"be",
"raised",
".",
"If",
"the",
"upload",
"failed",
"for",
"some",
"other",
"reason",
"an",
"appropriate",
"exception",
"shall",
"be",
"raised",
"."
] |
def upload_from_file(self, name, source_file_name):
"""Causes `name` to be populated in the cache by uploading
the contents of `source_file_name` to the storage system.
If a transient issue was encountered a TransientFailure shall
be raised.
If the upload failed for some other reason, an appropriate
exception shall be raised."""
pass
|
[
"def",
"upload_from_file",
"(",
"self",
",",
"name",
",",
"source_file_name",
")",
":",
"pass"
] |
https://github.com/facebook/proxygen/blob/a9ca025af207787815cb01eee1971cd572c7a81e/build/fbcode_builder/getdeps/cache.py#L24-L31
|
||
hanpfei/chromium-net
|
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
|
third_party/catapult/third_party/gsutil/third_party/boto/boto/rds/__init__.py
|
python
|
RDSConnection.delete_option_group
|
(self, name)
|
return self.get_status('DeleteOptionGroup', params)
|
Delete an OptionGroup from your account.
:type key_name: string
:param key_name: The name of the OptionGroup to delete
|
Delete an OptionGroup from your account.
|
[
"Delete",
"an",
"OptionGroup",
"from",
"your",
"account",
"."
] |
def delete_option_group(self, name):
"""
Delete an OptionGroup from your account.
:type key_name: string
:param key_name: The name of the OptionGroup to delete
"""
params = {'OptionGroupName': name}
return self.get_status('DeleteOptionGroup', params)
|
[
"def",
"delete_option_group",
"(",
"self",
",",
"name",
")",
":",
"params",
"=",
"{",
"'OptionGroupName'",
":",
"name",
"}",
"return",
"self",
".",
"get_status",
"(",
"'DeleteOptionGroup'",
",",
"params",
")"
] |
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/boto/boto/rds/__init__.py#L1529-L1537
|
|
oracle/graaljs
|
36a56e8e993d45fc40939a3a4d9c0c24990720f1
|
graal-nodejs/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/make.py
|
python
|
SourceifyAndQuoteSpaces
|
(path)
|
return QuoteSpaces(Sourceify(path))
|
Convert a path to its source directory form and quote spaces.
|
Convert a path to its source directory form and quote spaces.
|
[
"Convert",
"a",
"path",
"to",
"its",
"source",
"directory",
"form",
"and",
"quote",
"spaces",
"."
] |
def SourceifyAndQuoteSpaces(path):
"""Convert a path to its source directory form and quote spaces."""
return QuoteSpaces(Sourceify(path))
|
[
"def",
"SourceifyAndQuoteSpaces",
"(",
"path",
")",
":",
"return",
"QuoteSpaces",
"(",
"Sourceify",
"(",
"path",
")",
")"
] |
https://github.com/oracle/graaljs/blob/36a56e8e993d45fc40939a3a4d9c0c24990720f1/graal-nodejs/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/make.py#L667-L669
|
|
Kronuz/Xapiand
|
a71570859dcfc9f48090d845053f359b07f4f78c
|
contrib/python/xapiand-py/example/load.py
|
python
|
parse_commits
|
(name)
|
Go through the git repository log and generate a document per commit
containing all the metadata.
|
Go through the git repository log and generate a document per commit
containing all the metadata.
|
[
"Go",
"through",
"the",
"git",
"repository",
"log",
"and",
"generate",
"a",
"document",
"per",
"commit",
"containing",
"all",
"the",
"metadata",
"."
] |
def parse_commits(name):
"""
Go through the git repository log and generate a document per commit
containing all the metadata.
"""
for commit in iter_commits():
yield {
'_id': commit['hexsha'],
'repository': name,
'committed_date': datetime.fromtimestamp(float(commit['committed_date'])),
'committer': {
'name': commit['committer_name'],
'email': commit['committer_email'],
},
'authored_date': datetime.fromtimestamp(float(commit['authored_date'])),
'author': {
'name': commit['author_name'],
'email': commit['author_email'],
},
'description': '\n\n'.join((commit['subject'], commit['body'])).strip(),
'parent_shas': commit['parents'].split(),
# we only care about the filenames, not the per-file stats
'files': list(commit['files']),
'stats': commit['stats'],
}
|
[
"def",
"parse_commits",
"(",
"name",
")",
":",
"for",
"commit",
"in",
"iter_commits",
"(",
")",
":",
"yield",
"{",
"'_id'",
":",
"commit",
"[",
"'hexsha'",
"]",
",",
"'repository'",
":",
"name",
",",
"'committed_date'",
":",
"datetime",
".",
"fromtimestamp",
"(",
"float",
"(",
"commit",
"[",
"'committed_date'",
"]",
")",
")",
",",
"'committer'",
":",
"{",
"'name'",
":",
"commit",
"[",
"'committer_name'",
"]",
",",
"'email'",
":",
"commit",
"[",
"'committer_email'",
"]",
",",
"}",
",",
"'authored_date'",
":",
"datetime",
".",
"fromtimestamp",
"(",
"float",
"(",
"commit",
"[",
"'authored_date'",
"]",
")",
")",
",",
"'author'",
":",
"{",
"'name'",
":",
"commit",
"[",
"'author_name'",
"]",
",",
"'email'",
":",
"commit",
"[",
"'author_email'",
"]",
",",
"}",
",",
"'description'",
":",
"'\\n\\n'",
".",
"join",
"(",
"(",
"commit",
"[",
"'subject'",
"]",
",",
"commit",
"[",
"'body'",
"]",
")",
")",
".",
"strip",
"(",
")",
",",
"'parent_shas'",
":",
"commit",
"[",
"'parents'",
"]",
".",
"split",
"(",
")",
",",
"# we only care about the filenames, not the per-file stats",
"'files'",
":",
"list",
"(",
"commit",
"[",
"'files'",
"]",
")",
",",
"'stats'",
":",
"commit",
"[",
"'stats'",
"]",
",",
"}"
] |
https://github.com/Kronuz/Xapiand/blob/a71570859dcfc9f48090d845053f359b07f4f78c/contrib/python/xapiand-py/example/load.py#L118-L143
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
wx/lib/agw/ribbon/toolbar.py
|
python
|
RibbonToolBar.DoGetNextLargerSize
|
(self, direction, relative_to)
|
return result
|
Implementation of :meth:`RibbonControl.GetNextLargerSize() <lib.agw.ribbon.control.RibbonControl.GetNextLargerSize>`.
Controls which have non-continuous sizing must override this virtual function
rather than :meth:`RibbonControl.GetNextLargerSize() <lib.agw.ribbon.control.RibbonControl.GetNextLargerSize>`.
|
Implementation of :meth:`RibbonControl.GetNextLargerSize() <lib.agw.ribbon.control.RibbonControl.GetNextLargerSize>`.
|
[
"Implementation",
"of",
":",
"meth",
":",
"RibbonControl",
".",
"GetNextLargerSize",
"()",
"<lib",
".",
"agw",
".",
"ribbon",
".",
"control",
".",
"RibbonControl",
".",
"GetNextLargerSize",
">",
"."
] |
def DoGetNextLargerSize(self, direction, relative_to):
"""
Implementation of :meth:`RibbonControl.GetNextLargerSize() <lib.agw.ribbon.control.RibbonControl.GetNextLargerSize>`.
Controls which have non-continuous sizing must override this virtual function
rather than :meth:`RibbonControl.GetNextLargerSize() <lib.agw.ribbon.control.RibbonControl.GetNextLargerSize>`.
"""
# Pick the smallest of our sizes which are larger than the given size
result = wx.Size(*relative_to)
area = 10000
tobreak = False
for nrows in xrange(self._nrows_min, self._nrows_max+1):
size = wx.Size(*self._sizes[nrows - self._nrows_min])
original = wx.Size(*size)
if direction == wx.HORIZONTAL:
if size.GetWidth() > relative_to.GetWidth() and size.GetHeight() <= relative_to.GetHeight():
size.SetHeight(relative_to.GetHeight())
tobreak = True
elif direction == wx.VERTICAL:
if size.GetWidth() <= relative_to.GetWidth() and size.GetHeight() > relative_to.GetHeight():
size.SetWidth(relative_to.GetWidth())
tobreak = True
elif direction == wx.BOTH:
if size.GetWidth() > relative_to.GetWidth() and size.GetHeight() > relative_to.GetHeight():
tobreak = True
if GetSizeInOrientation(original, direction) < area:
result = wx.Size(*size)
area = GetSizeInOrientation(original, direction)
if tobreak:
break
return result
|
[
"def",
"DoGetNextLargerSize",
"(",
"self",
",",
"direction",
",",
"relative_to",
")",
":",
"# Pick the smallest of our sizes which are larger than the given size",
"result",
"=",
"wx",
".",
"Size",
"(",
"*",
"relative_to",
")",
"area",
"=",
"10000",
"tobreak",
"=",
"False",
"for",
"nrows",
"in",
"xrange",
"(",
"self",
".",
"_nrows_min",
",",
"self",
".",
"_nrows_max",
"+",
"1",
")",
":",
"size",
"=",
"wx",
".",
"Size",
"(",
"*",
"self",
".",
"_sizes",
"[",
"nrows",
"-",
"self",
".",
"_nrows_min",
"]",
")",
"original",
"=",
"wx",
".",
"Size",
"(",
"*",
"size",
")",
"if",
"direction",
"==",
"wx",
".",
"HORIZONTAL",
":",
"if",
"size",
".",
"GetWidth",
"(",
")",
">",
"relative_to",
".",
"GetWidth",
"(",
")",
"and",
"size",
".",
"GetHeight",
"(",
")",
"<=",
"relative_to",
".",
"GetHeight",
"(",
")",
":",
"size",
".",
"SetHeight",
"(",
"relative_to",
".",
"GetHeight",
"(",
")",
")",
"tobreak",
"=",
"True",
"elif",
"direction",
"==",
"wx",
".",
"VERTICAL",
":",
"if",
"size",
".",
"GetWidth",
"(",
")",
"<=",
"relative_to",
".",
"GetWidth",
"(",
")",
"and",
"size",
".",
"GetHeight",
"(",
")",
">",
"relative_to",
".",
"GetHeight",
"(",
")",
":",
"size",
".",
"SetWidth",
"(",
"relative_to",
".",
"GetWidth",
"(",
")",
")",
"tobreak",
"=",
"True",
"elif",
"direction",
"==",
"wx",
".",
"BOTH",
":",
"if",
"size",
".",
"GetWidth",
"(",
")",
">",
"relative_to",
".",
"GetWidth",
"(",
")",
"and",
"size",
".",
"GetHeight",
"(",
")",
">",
"relative_to",
".",
"GetHeight",
"(",
")",
":",
"tobreak",
"=",
"True",
"if",
"GetSizeInOrientation",
"(",
"original",
",",
"direction",
")",
"<",
"area",
":",
"result",
"=",
"wx",
".",
"Size",
"(",
"*",
"size",
")",
"area",
"=",
"GetSizeInOrientation",
"(",
"original",
",",
"direction",
")",
"if",
"tobreak",
":",
"break",
"return",
"result"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/ribbon/toolbar.py#L913-L951
|
|
sigmaai/self-driving-golf-cart
|
8d891600af3d851add27a10ae45cf3c2108bb87c
|
ros/src/ros_carla_bridge/carla_ros_bridge/src/carla_ros_bridge/traffic_participant.py
|
python
|
TrafficParticipant.get_object_info
|
(self)
|
return obj
|
Function to send object messages of this traffic participant.
A derived_object_msgs.msg.Object is prepared to be published via '/carla/objects'
:return:
|
Function to send object messages of this traffic participant.
|
[
"Function",
"to",
"send",
"object",
"messages",
"of",
"this",
"traffic",
"participant",
"."
] |
def get_object_info(self):
"""
Function to send object messages of this traffic participant.
A derived_object_msgs.msg.Object is prepared to be published via '/carla/objects'
:return:
"""
obj = Object(header=self.get_msg_header("map"))
# ID
obj.id = self.get_id()
# Pose
obj.pose = self.get_current_ros_pose()
# Twist
obj.twist = self.get_current_ros_twist()
# Acceleration
obj.accel = self.get_current_ros_accel()
# Shape
obj.shape.type = SolidPrimitive.BOX
obj.shape.dimensions.extend([
self.carla_actor.bounding_box.extent.x * 2.0,
self.carla_actor.bounding_box.extent.y * 2.0,
self.carla_actor.bounding_box.extent.z * 2.0])
# Classification if available in attributes
if self.get_classification() != Object.CLASSIFICATION_UNKNOWN:
obj.object_classified = True
obj.classification = self.get_classification()
obj.classification_certainty = 1.0
obj.classification_age = self.classification_age
return obj
|
[
"def",
"get_object_info",
"(",
"self",
")",
":",
"obj",
"=",
"Object",
"(",
"header",
"=",
"self",
".",
"get_msg_header",
"(",
"\"map\"",
")",
")",
"# ID",
"obj",
".",
"id",
"=",
"self",
".",
"get_id",
"(",
")",
"# Pose",
"obj",
".",
"pose",
"=",
"self",
".",
"get_current_ros_pose",
"(",
")",
"# Twist",
"obj",
".",
"twist",
"=",
"self",
".",
"get_current_ros_twist",
"(",
")",
"# Acceleration",
"obj",
".",
"accel",
"=",
"self",
".",
"get_current_ros_accel",
"(",
")",
"# Shape",
"obj",
".",
"shape",
".",
"type",
"=",
"SolidPrimitive",
".",
"BOX",
"obj",
".",
"shape",
".",
"dimensions",
".",
"extend",
"(",
"[",
"self",
".",
"carla_actor",
".",
"bounding_box",
".",
"extent",
".",
"x",
"*",
"2.0",
",",
"self",
".",
"carla_actor",
".",
"bounding_box",
".",
"extent",
".",
"y",
"*",
"2.0",
",",
"self",
".",
"carla_actor",
".",
"bounding_box",
".",
"extent",
".",
"z",
"*",
"2.0",
"]",
")",
"# Classification if available in attributes",
"if",
"self",
".",
"get_classification",
"(",
")",
"!=",
"Object",
".",
"CLASSIFICATION_UNKNOWN",
":",
"obj",
".",
"object_classified",
"=",
"True",
"obj",
".",
"classification",
"=",
"self",
".",
"get_classification",
"(",
")",
"obj",
".",
"classification_certainty",
"=",
"1.0",
"obj",
".",
"classification_age",
"=",
"self",
".",
"classification_age",
"return",
"obj"
] |
https://github.com/sigmaai/self-driving-golf-cart/blob/8d891600af3d851add27a10ae45cf3c2108bb87c/ros/src/ros_carla_bridge/carla_ros_bridge/src/carla_ros_bridge/traffic_participant.py#L74-L105
|
|
eventql/eventql
|
7ca0dbb2e683b525620ea30dc40540a22d5eb227
|
deps/3rdparty/spidermonkey/mozjs/python/mach/mach/config.py
|
python
|
ConfigSettings.load_files
|
(self, filenames)
|
Load a config from files specified by their paths.
Files are loaded in the order given. Subsequent files will overwrite
values from previous files. If a file does not exist, it will be
ignored.
|
Load a config from files specified by their paths.
|
[
"Load",
"a",
"config",
"from",
"files",
"specified",
"by",
"their",
"paths",
"."
] |
def load_files(self, filenames):
"""Load a config from files specified by their paths.
Files are loaded in the order given. Subsequent files will overwrite
values from previous files. If a file does not exist, it will be
ignored.
"""
filtered = [f for f in filenames if os.path.exists(f)]
fps = [open(f, 'rt') for f in filtered]
self.load_fps(fps)
self._loaded_filenames.update(set(filtered))
for fp in fps:
fp.close()
|
[
"def",
"load_files",
"(",
"self",
",",
"filenames",
")",
":",
"filtered",
"=",
"[",
"f",
"for",
"f",
"in",
"filenames",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"f",
")",
"]",
"fps",
"=",
"[",
"open",
"(",
"f",
",",
"'rt'",
")",
"for",
"f",
"in",
"filtered",
"]",
"self",
".",
"load_fps",
"(",
"fps",
")",
"self",
".",
"_loaded_filenames",
".",
"update",
"(",
"set",
"(",
"filtered",
")",
")",
"for",
"fp",
"in",
"fps",
":",
"fp",
".",
"close",
"(",
")"
] |
https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/python/mach/mach/config.py#L367-L380
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_carbon/stc.py
|
python
|
StyledTextCtrl.GetTechnology
|
(*args, **kwargs)
|
return _stc.StyledTextCtrl_GetTechnology(*args, **kwargs)
|
GetTechnology(self) -> int
|
GetTechnology(self) -> int
|
[
"GetTechnology",
"(",
"self",
")",
"-",
">",
"int"
] |
def GetTechnology(*args, **kwargs):
"""GetTechnology(self) -> int"""
return _stc.StyledTextCtrl_GetTechnology(*args, **kwargs)
|
[
"def",
"GetTechnology",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_stc",
".",
"StyledTextCtrl_GetTechnology",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/stc.py#L6389-L6391
|
|
bairdzhang/smallhardface
|
76fa1d87a9602d9b13d7a7fe693fc7aec91cab80
|
caffe/scripts/cpp_lint.py
|
python
|
CheckBraces
|
(filename, clean_lines, linenum, error)
|
Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
|
Looks for misplaced braces (e.g. at the end of line).
|
[
"Looks",
"for",
"misplaced",
"braces",
"(",
"e",
".",
"g",
".",
"at",
"the",
"end",
"of",
"line",
")",
"."
] |
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone is using
# braces in a block to explicitly create a new scope, which is commonly used
# to control the lifetime of stack-allocated variables. Braces are also
# used for brace initializers inside function calls. We don't detect this
# perfectly: we just don't complain if the last non-whitespace character on
# the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
# previous line starts a preprocessor block.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if (not Search(r'[,;:}{(]\s*$', prevline) and
not Match(r'\s*#', prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\s*', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
if Search(r'}\s*else if([^{]*)$', line): # could be multi-line if
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
if endline[endpos:].find('{') == -1: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
else: # common case: else not followed by a multi-line if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we use a whitelist approach to check these
# rather than a blacklist. These are the places where "};" should
# be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a whitelist of safe macros instead of a blacklist of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the whitelist wrong means some extra
# semicolons, while the downside for getting the blacklist wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on compound
# literals.
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z_]+)\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
Search(r'\s+=\s*$', line_prefix)):
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }")
|
[
"def",
"CheckBraces",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
":",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"# get rid of comments and strings",
"if",
"Match",
"(",
"r'\\s*{\\s*$'",
",",
"line",
")",
":",
"# We allow an open brace to start a line in the case where someone is using",
"# braces in a block to explicitly create a new scope, which is commonly used",
"# to control the lifetime of stack-allocated variables. Braces are also",
"# used for brace initializers inside function calls. We don't detect this",
"# perfectly: we just don't complain if the last non-whitespace character on",
"# the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the",
"# previous line starts a preprocessor block.",
"prevline",
"=",
"GetPreviousNonBlankLine",
"(",
"clean_lines",
",",
"linenum",
")",
"[",
"0",
"]",
"if",
"(",
"not",
"Search",
"(",
"r'[,;:}{(]\\s*$'",
",",
"prevline",
")",
"and",
"not",
"Match",
"(",
"r'\\s*#'",
",",
"prevline",
")",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/braces'",
",",
"4",
",",
"'{ should almost always be at the end of the previous line'",
")",
"# An else clause should be on the same line as the preceding closing brace.",
"if",
"Match",
"(",
"r'\\s*else\\s*'",
",",
"line",
")",
":",
"prevline",
"=",
"GetPreviousNonBlankLine",
"(",
"clean_lines",
",",
"linenum",
")",
"[",
"0",
"]",
"if",
"Match",
"(",
"r'\\s*}\\s*$'",
",",
"prevline",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/newline'",
",",
"4",
",",
"'An else should appear on the same line as the preceding }'",
")",
"# If braces come on one side of an else, they should be on both.",
"# However, we have to worry about \"else if\" that spans multiple lines!",
"if",
"Search",
"(",
"r'}\\s*else[^{]*$'",
",",
"line",
")",
"or",
"Match",
"(",
"r'[^}]*else\\s*{'",
",",
"line",
")",
":",
"if",
"Search",
"(",
"r'}\\s*else if([^{]*)$'",
",",
"line",
")",
":",
"# could be multi-line if",
"# find the ( after the if",
"pos",
"=",
"line",
".",
"find",
"(",
"'else if'",
")",
"pos",
"=",
"line",
".",
"find",
"(",
"'('",
",",
"pos",
")",
"if",
"pos",
">",
"0",
":",
"(",
"endline",
",",
"_",
",",
"endpos",
")",
"=",
"CloseExpression",
"(",
"clean_lines",
",",
"linenum",
",",
"pos",
")",
"if",
"endline",
"[",
"endpos",
":",
"]",
".",
"find",
"(",
"'{'",
")",
"==",
"-",
"1",
":",
"# must be brace after if",
"error",
"(",
"filename",
",",
"linenum",
",",
"'readability/braces'",
",",
"5",
",",
"'If an else has a brace on one side, it should have it on both'",
")",
"else",
":",
"# common case: else not followed by a multi-line if",
"error",
"(",
"filename",
",",
"linenum",
",",
"'readability/braces'",
",",
"5",
",",
"'If an else has a brace on one side, it should have it on both'",
")",
"# Likewise, an else should never have the else clause on the same line",
"if",
"Search",
"(",
"r'\\belse [^\\s{]'",
",",
"line",
")",
"and",
"not",
"Search",
"(",
"r'\\belse if\\b'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/newline'",
",",
"4",
",",
"'Else clause should never be on same line as else (use 2 lines)'",
")",
"# In the same way, a do/while should never be on one line",
"if",
"Match",
"(",
"r'\\s*do [^\\s{]'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/newline'",
",",
"4",
",",
"'do/while clauses should not be on a single line'",
")",
"# Block bodies should not be followed by a semicolon. Due to C++11",
"# brace initialization, there are more places where semicolons are",
"# required than not, so we use a whitelist approach to check these",
"# rather than a blacklist. These are the places where \"};\" should",
"# be replaced by just \"}\":",
"# 1. Some flavor of block following closing parenthesis:",
"# for (;;) {};",
"# while (...) {};",
"# switch (...) {};",
"# Function(...) {};",
"# if (...) {};",
"# if (...) else if (...) {};",
"#",
"# 2. else block:",
"# if (...) else {};",
"#",
"# 3. const member function:",
"# Function(...) const {};",
"#",
"# 4. Block following some statement:",
"# x = 42;",
"# {};",
"#",
"# 5. Block at the beginning of a function:",
"# Function(...) {",
"# {};",
"# }",
"#",
"# Note that naively checking for the preceding \"{\" will also match",
"# braces inside multi-dimensional arrays, but this is fine since",
"# that expression will not contain semicolons.",
"#",
"# 6. Block following another block:",
"# while (true) {}",
"# {};",
"#",
"# 7. End of namespaces:",
"# namespace {};",
"#",
"# These semicolons seems far more common than other kinds of",
"# redundant semicolons, possibly due to people converting classes",
"# to namespaces. For now we do not warn for this case.",
"#",
"# Try matching case 1 first.",
"match",
"=",
"Match",
"(",
"r'^(.*\\)\\s*)\\{'",
",",
"line",
")",
"if",
"match",
":",
"# Matched closing parenthesis (case 1). Check the token before the",
"# matching opening parenthesis, and don't warn if it looks like a",
"# macro. This avoids these false positives:",
"# - macro that defines a base class",
"# - multi-line macro that defines a base class",
"# - macro that defines the whole class-head",
"#",
"# But we still issue warnings for macros that we know are safe to",
"# warn, specifically:",
"# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P",
"# - TYPED_TEST",
"# - INTERFACE_DEF",
"# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:",
"#",
"# We implement a whitelist of safe macros instead of a blacklist of",
"# unsafe macros, even though the latter appears less frequently in",
"# google code and would have been easier to implement. This is because",
"# the downside for getting the whitelist wrong means some extra",
"# semicolons, while the downside for getting the blacklist wrong",
"# would result in compile errors.",
"#",
"# In addition to macros, we also don't want to warn on compound",
"# literals.",
"closing_brace_pos",
"=",
"match",
".",
"group",
"(",
"1",
")",
".",
"rfind",
"(",
"')'",
")",
"opening_parenthesis",
"=",
"ReverseCloseExpression",
"(",
"clean_lines",
",",
"linenum",
",",
"closing_brace_pos",
")",
"if",
"opening_parenthesis",
"[",
"2",
"]",
">",
"-",
"1",
":",
"line_prefix",
"=",
"opening_parenthesis",
"[",
"0",
"]",
"[",
"0",
":",
"opening_parenthesis",
"[",
"2",
"]",
"]",
"macro",
"=",
"Search",
"(",
"r'\\b([A-Z_]+)\\s*$'",
",",
"line_prefix",
")",
"if",
"(",
"(",
"macro",
"and",
"macro",
".",
"group",
"(",
"1",
")",
"not",
"in",
"(",
"'TEST'",
",",
"'TEST_F'",
",",
"'MATCHER'",
",",
"'MATCHER_P'",
",",
"'TYPED_TEST'",
",",
"'EXCLUSIVE_LOCKS_REQUIRED'",
",",
"'SHARED_LOCKS_REQUIRED'",
",",
"'LOCKS_EXCLUDED'",
",",
"'INTERFACE_DEF'",
")",
")",
"or",
"Search",
"(",
"r'\\s+=\\s*$'",
",",
"line_prefix",
")",
")",
":",
"match",
"=",
"None",
"else",
":",
"# Try matching cases 2-3.",
"match",
"=",
"Match",
"(",
"r'^(.*(?:else|\\)\\s*const)\\s*)\\{'",
",",
"line",
")",
"if",
"not",
"match",
":",
"# Try matching cases 4-6. These are always matched on separate lines.",
"#",
"# Note that we can't simply concatenate the previous line to the",
"# current line and do a single match, otherwise we may output",
"# duplicate warnings for the blank line case:",
"# if (cond) {",
"# // blank line",
"# }",
"prevline",
"=",
"GetPreviousNonBlankLine",
"(",
"clean_lines",
",",
"linenum",
")",
"[",
"0",
"]",
"if",
"prevline",
"and",
"Search",
"(",
"r'[;{}]\\s*$'",
",",
"prevline",
")",
":",
"match",
"=",
"Match",
"(",
"r'^(\\s*)\\{'",
",",
"line",
")",
"# Check matching closing brace",
"if",
"match",
":",
"(",
"endline",
",",
"endlinenum",
",",
"endpos",
")",
"=",
"CloseExpression",
"(",
"clean_lines",
",",
"linenum",
",",
"len",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
")",
"if",
"endpos",
">",
"-",
"1",
"and",
"Match",
"(",
"r'^\\s*;'",
",",
"endline",
"[",
"endpos",
":",
"]",
")",
":",
"# Current {} pair is eligible for semicolon check, and we have found",
"# the redundant semicolon, output warning here.",
"#",
"# Note: because we are scanning forward for opening braces, and",
"# outputting warnings for the matching closing brace, if there are",
"# nested blocks with trailing semicolons, we will get the error",
"# messages in reversed order.",
"error",
"(",
"filename",
",",
"endlinenum",
",",
"'readability/braces'",
",",
"4",
",",
"\"You don't need a ; after a }\"",
")"
] |
https://github.com/bairdzhang/smallhardface/blob/76fa1d87a9602d9b13d7a7fe693fc7aec91cab80/caffe/scripts/cpp_lint.py#L3073-L3244
|
||
rdkit/rdkit
|
ede860ae316d12d8568daf5ee800921c3389c84e
|
rdkit/Chem/BuildFragmentCatalog.py
|
python
|
ProcessGainsData
|
(inF, delim=',', idCol=0, gainCol=1)
|
return res
|
reads a list of ids and info gains out of an input file
|
reads a list of ids and info gains out of an input file
|
[
"reads",
"a",
"list",
"of",
"ids",
"and",
"info",
"gains",
"out",
"of",
"an",
"input",
"file"
] |
def ProcessGainsData(inF, delim=',', idCol=0, gainCol=1):
""" reads a list of ids and info gains out of an input file
"""
res = []
_ = inF.readline()
for line in inF:
splitL = line.strip().split(delim)
res.append((splitL[idCol], float(splitL[gainCol])))
return res
|
[
"def",
"ProcessGainsData",
"(",
"inF",
",",
"delim",
"=",
"','",
",",
"idCol",
"=",
"0",
",",
"gainCol",
"=",
"1",
")",
":",
"res",
"=",
"[",
"]",
"_",
"=",
"inF",
".",
"readline",
"(",
")",
"for",
"line",
"in",
"inF",
":",
"splitL",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"delim",
")",
"res",
".",
"append",
"(",
"(",
"splitL",
"[",
"idCol",
"]",
",",
"float",
"(",
"splitL",
"[",
"gainCol",
"]",
")",
")",
")",
"return",
"res"
] |
https://github.com/rdkit/rdkit/blob/ede860ae316d12d8568daf5ee800921c3389c84e/rdkit/Chem/BuildFragmentCatalog.py#L365-L374
|
|
krishauser/Klampt
|
972cc83ea5befac3f653c1ba20f80155768ad519
|
Python/python2_version/klampt/robotsim.py
|
python
|
Simulator.__init__
|
(self, model)
|
__init__(Simulator self, WorldModel model) -> Simulator
Constructs the simulator from a WorldModel. If the WorldModel was loaded from an
XML file, then the simulation setup is loaded from it.
|
__init__(Simulator self, WorldModel model) -> Simulator
|
[
"__init__",
"(",
"Simulator",
"self",
"WorldModel",
"model",
")",
"-",
">",
"Simulator"
] |
def __init__(self, model):
"""
__init__(Simulator self, WorldModel model) -> Simulator
Constructs the simulator from a WorldModel. If the WorldModel was loaded from an
XML file, then the simulation setup is loaded from it.
"""
this = _robotsim.new_Simulator(model)
try:
self.this.append(this)
except Exception:
self.this = this
|
[
"def",
"__init__",
"(",
"self",
",",
"model",
")",
":",
"this",
"=",
"_robotsim",
".",
"new_Simulator",
"(",
"model",
")",
"try",
":",
"self",
".",
"this",
".",
"append",
"(",
"this",
")",
"except",
"Exception",
":",
"self",
".",
"this",
"=",
"this"
] |
https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/python2_version/klampt/robotsim.py#L8154-L8168
|
||
apple/swift-lldb
|
d74be846ef3e62de946df343e8c234bde93a8912
|
scripts/Python/static-binding/lldb.py
|
python
|
SBCommunication.__init__
|
(self, *args)
|
__init__(lldb::SBCommunication self) -> SBCommunication
__init__(lldb::SBCommunication self, char const * broadcaster_name) -> SBCommunication
|
__init__(lldb::SBCommunication self) -> SBCommunication
__init__(lldb::SBCommunication self, char const * broadcaster_name) -> SBCommunication
|
[
"__init__",
"(",
"lldb",
"::",
"SBCommunication",
"self",
")",
"-",
">",
"SBCommunication",
"__init__",
"(",
"lldb",
"::",
"SBCommunication",
"self",
"char",
"const",
"*",
"broadcaster_name",
")",
"-",
">",
"SBCommunication"
] |
def __init__(self, *args):
"""
__init__(lldb::SBCommunication self) -> SBCommunication
__init__(lldb::SBCommunication self, char const * broadcaster_name) -> SBCommunication
"""
this = _lldb.new_SBCommunication(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
|
[
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
")",
":",
"this",
"=",
"_lldb",
".",
"new_SBCommunication",
"(",
"*",
"args",
")",
"try",
":",
"self",
".",
"this",
".",
"append",
"(",
"this",
")",
"except",
"__builtin__",
".",
"Exception",
":",
"self",
".",
"this",
"=",
"this"
] |
https://github.com/apple/swift-lldb/blob/d74be846ef3e62de946df343e8c234bde93a8912/scripts/Python/static-binding/lldb.py#L2997-L3006
|
||
gv22ga/dlib-face-recognition-android
|
42d6305cbd85833f2b85bb79b70ab9ab004153c9
|
tools/lint/cpplint.py
|
python
|
CheckForNonStandardConstructs
|
(filename, clean_lines, linenum,
nesting_state, error)
|
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
|
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
|
[
"r",
"Logs",
"an",
"error",
"if",
"we",
"see",
"certain",
"non",
"-",
"ANSI",
"constructs",
"ignored",
"by",
"gcc",
"-",
"2",
".",
"Complain",
"about",
"several",
"constructs",
"which",
"gcc",
"-",
"2",
"accepts",
"but",
"which",
"are",
"not",
"standard",
"C",
"++",
".",
"Warning",
"about",
"these",
"in",
"lint",
"is",
"one",
"way",
"to",
"ease",
"the",
"transition",
"to",
"new",
"compilers",
".",
"-",
"put",
"storage",
"class",
"first",
"(",
"e",
".",
"g",
".",
"static",
"const",
"instead",
"of",
"const",
"static",
")",
".",
"-",
"%lld",
"instead",
"of",
"%qd",
"in",
"printf",
"-",
"type",
"functions",
".",
"-",
"%1$d",
"is",
"non",
"-",
"standard",
"in",
"printf",
"-",
"type",
"functions",
".",
"-",
"\\",
"%",
"is",
"an",
"undefined",
"character",
"escape",
"sequence",
".",
"-",
"text",
"after",
"#endif",
"is",
"not",
"allowed",
".",
"-",
"invalid",
"inner",
"-",
"style",
"forward",
"declaration",
".",
"-",
">",
"?",
"and",
"<?",
"operators",
"and",
"their",
">",
"?",
"=",
"and",
"<?",
"=",
"cousins",
".",
"Additionally",
"check",
"for",
"constructor",
"/",
"destructor",
"style",
"violations",
"and",
"reference",
"members",
"as",
"it",
"is",
"very",
"convenient",
"to",
"do",
"so",
"while",
"checking",
"for",
"gcc",
"-",
"2",
"compliance",
".",
"Args",
":",
"filename",
":",
"The",
"name",
"of",
"the",
"current",
"file",
".",
"clean_lines",
":",
"A",
"CleansedLines",
"instance",
"containing",
"the",
"file",
".",
"linenum",
":",
"The",
"number",
"of",
"the",
"line",
"to",
"check",
".",
"nesting_state",
":",
"A",
"NestingState",
"instance",
"which",
"maintains",
"information",
"about",
"the",
"current",
"stack",
"of",
"nested",
"blocks",
"being",
"parsed",
".",
"error",
":",
"A",
"callable",
"to",
"which",
"errors",
"are",
"reported",
"which",
"takes",
"4",
"arguments",
":",
"filename",
"line",
"number",
"error",
"level",
"and",
"message"
] |
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage-class specifier (static, extern, typedef, etc) should be '
'at the beginning of the declaration.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style.
explicit_constructor_match = Match(
r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*'
r'\(((?:[^()]|\([^()]*\))*)\)'
% re.escape(base_classname),
line)
if explicit_constructor_match:
is_marked_explicit = explicit_constructor_match.group(1)
if not explicit_constructor_match.group(2):
constructor_args = []
else:
constructor_args = explicit_constructor_match.group(2).split(',')
# collapse arguments so that commas in template parameter lists and function
# argument parameter lists don't split arguments in two
i = 0
while i < len(constructor_args):
constructor_arg = constructor_args[i]
while (constructor_arg.count('<') > constructor_arg.count('>') or
constructor_arg.count('(') > constructor_arg.count(')')):
constructor_arg += ',' + constructor_args[i + 1]
del constructor_args[i + 1]
constructor_args[i] = constructor_arg
i += 1
defaulted_args = [arg for arg in constructor_args if '=' in arg]
noarg_constructor = (not constructor_args or # empty arg list
# 'void' arg specifier
(len(constructor_args) == 1 and
constructor_args[0].strip() == 'void'))
onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg
not noarg_constructor) or
# all but at most one arg defaulted
(len(constructor_args) >= 1 and
not noarg_constructor and
len(defaulted_args) >= len(constructor_args) - 1))
initializer_list_constructor = bool(
onearg_constructor and
Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
copy_constructor = bool(
onearg_constructor and
Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), constructor_args[0].strip()))
if (not is_marked_explicit and
onearg_constructor and
not initializer_list_constructor and
not copy_constructor):
if defaulted_args:
error(filename, linenum, 'runtime/explicit', 5,
'Constructors callable with one argument '
'should be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 5,
'Single-parameter constructors should be marked explicit.')
elif is_marked_explicit and not onearg_constructor:
if noarg_constructor:
error(filename, linenum, 'runtime/explicit', 5,
'Zero-parameter constructors should not be marked explicit.')
|
[
"def",
"CheckForNonStandardConstructs",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"nesting_state",
",",
"error",
")",
":",
"# Remove comments from the line, but leave in strings for now.",
"line",
"=",
"clean_lines",
".",
"lines",
"[",
"linenum",
"]",
"if",
"Search",
"(",
"r'printf\\s*\\(.*\".*%[-+ ]?\\d*q'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/printf_format'",
",",
"3",
",",
"'%q in format strings is deprecated. Use %ll instead.'",
")",
"if",
"Search",
"(",
"r'printf\\s*\\(.*\".*%\\d+\\$'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/printf_format'",
",",
"2",
",",
"'%N$ formats are unconventional. Try rewriting to avoid them.'",
")",
"# Remove escaped backslashes before looking for undefined escapes.",
"line",
"=",
"line",
".",
"replace",
"(",
"'\\\\\\\\'",
",",
"''",
")",
"if",
"Search",
"(",
"r'(\"|\\').*\\\\(%|\\[|\\(|{)'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'build/printf_format'",
",",
"3",
",",
"'%, [, (, and { are undefined character escapes. Unescape them.'",
")",
"# For the rest, work with both comments and strings removed.",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"if",
"Search",
"(",
"r'\\b(const|volatile|void|char|short|int|long'",
"r'|float|double|signed|unsigned'",
"r'|schar|u?int8|u?int16|u?int32|u?int64)'",
"r'\\s+(register|static|extern|typedef)\\b'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'build/storage_class'",
",",
"5",
",",
"'Storage-class specifier (static, extern, typedef, etc) should be '",
"'at the beginning of the declaration.'",
")",
"if",
"Match",
"(",
"r'\\s*#\\s*endif\\s*[^/\\s]+'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'build/endif_comment'",
",",
"5",
",",
"'Uncommented text after #endif is non-standard. Use a comment.'",
")",
"if",
"Match",
"(",
"r'\\s*class\\s+(\\w+\\s*::\\s*)+\\w+\\s*;'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'build/forward_decl'",
",",
"5",
",",
"'Inner-style forward declarations are invalid. Remove this line.'",
")",
"if",
"Search",
"(",
"r'(\\w+|[+-]?\\d+(\\.\\d*)?)\\s*(<|>)\\?=?\\s*(\\w+|[+-]?\\d+)(\\.\\d*)?'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'build/deprecated'",
",",
"3",
",",
"'>? and <? (max and min) operators are non-standard and deprecated.'",
")",
"if",
"Search",
"(",
"r'^\\s*const\\s*string\\s*&\\s*\\w+\\s*;'",
",",
"line",
")",
":",
"# TODO(unknown): Could it be expanded safely to arbitrary references,",
"# without triggering too many false positives? The first",
"# attempt triggered 5 warnings for mostly benign code in the regtest, hence",
"# the restriction.",
"# Here's the original regexp, for the reference:",
"# type_name = r'\\w+((\\s*::\\s*\\w+)|(\\s*<\\s*\\w+?\\s*>))?'",
"# r'\\s*const\\s*' + type_name + '\\s*&\\s*\\w+\\s*;'",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/member_string_references'",
",",
"2",
",",
"'const string& members are dangerous. It is much better to use '",
"'alternatives, such as pointers or simple constants.'",
")",
"# Everything else in this function operates on class declarations.",
"# Return early if the top of the nesting stack is not a class, or if",
"# the class head is not completed yet.",
"classinfo",
"=",
"nesting_state",
".",
"InnermostClass",
"(",
")",
"if",
"not",
"classinfo",
"or",
"not",
"classinfo",
".",
"seen_open_brace",
":",
"return",
"# The class may have been declared with namespace or classname qualifiers.",
"# The constructor and destructor will not have those qualifiers.",
"base_classname",
"=",
"classinfo",
".",
"name",
".",
"split",
"(",
"'::'",
")",
"[",
"-",
"1",
"]",
"# Look for single-argument constructors that aren't marked explicit.",
"# Technically a valid construct, but against style.",
"explicit_constructor_match",
"=",
"Match",
"(",
"r'\\s+(?:inline\\s+)?(explicit\\s+)?(?:inline\\s+)?%s\\s*'",
"r'\\(((?:[^()]|\\([^()]*\\))*)\\)'",
"%",
"re",
".",
"escape",
"(",
"base_classname",
")",
",",
"line",
")",
"if",
"explicit_constructor_match",
":",
"is_marked_explicit",
"=",
"explicit_constructor_match",
".",
"group",
"(",
"1",
")",
"if",
"not",
"explicit_constructor_match",
".",
"group",
"(",
"2",
")",
":",
"constructor_args",
"=",
"[",
"]",
"else",
":",
"constructor_args",
"=",
"explicit_constructor_match",
".",
"group",
"(",
"2",
")",
".",
"split",
"(",
"','",
")",
"# collapse arguments so that commas in template parameter lists and function",
"# argument parameter lists don't split arguments in two",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"constructor_args",
")",
":",
"constructor_arg",
"=",
"constructor_args",
"[",
"i",
"]",
"while",
"(",
"constructor_arg",
".",
"count",
"(",
"'<'",
")",
">",
"constructor_arg",
".",
"count",
"(",
"'>'",
")",
"or",
"constructor_arg",
".",
"count",
"(",
"'('",
")",
">",
"constructor_arg",
".",
"count",
"(",
"')'",
")",
")",
":",
"constructor_arg",
"+=",
"','",
"+",
"constructor_args",
"[",
"i",
"+",
"1",
"]",
"del",
"constructor_args",
"[",
"i",
"+",
"1",
"]",
"constructor_args",
"[",
"i",
"]",
"=",
"constructor_arg",
"i",
"+=",
"1",
"defaulted_args",
"=",
"[",
"arg",
"for",
"arg",
"in",
"constructor_args",
"if",
"'='",
"in",
"arg",
"]",
"noarg_constructor",
"=",
"(",
"not",
"constructor_args",
"or",
"# empty arg list",
"# 'void' arg specifier",
"(",
"len",
"(",
"constructor_args",
")",
"==",
"1",
"and",
"constructor_args",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"==",
"'void'",
")",
")",
"onearg_constructor",
"=",
"(",
"(",
"len",
"(",
"constructor_args",
")",
"==",
"1",
"and",
"# exactly one arg",
"not",
"noarg_constructor",
")",
"or",
"# all but at most one arg defaulted",
"(",
"len",
"(",
"constructor_args",
")",
">=",
"1",
"and",
"not",
"noarg_constructor",
"and",
"len",
"(",
"defaulted_args",
")",
">=",
"len",
"(",
"constructor_args",
")",
"-",
"1",
")",
")",
"initializer_list_constructor",
"=",
"bool",
"(",
"onearg_constructor",
"and",
"Search",
"(",
"r'\\bstd\\s*::\\s*initializer_list\\b'",
",",
"constructor_args",
"[",
"0",
"]",
")",
")",
"copy_constructor",
"=",
"bool",
"(",
"onearg_constructor",
"and",
"Match",
"(",
"r'(const\\s+)?%s(\\s*<[^>]*>)?(\\s+const)?\\s*(?:<\\w+>\\s*)?&'",
"%",
"re",
".",
"escape",
"(",
"base_classname",
")",
",",
"constructor_args",
"[",
"0",
"]",
".",
"strip",
"(",
")",
")",
")",
"if",
"(",
"not",
"is_marked_explicit",
"and",
"onearg_constructor",
"and",
"not",
"initializer_list_constructor",
"and",
"not",
"copy_constructor",
")",
":",
"if",
"defaulted_args",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/explicit'",
",",
"5",
",",
"'Constructors callable with one argument '",
"'should be marked explicit.'",
")",
"else",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/explicit'",
",",
"5",
",",
"'Single-parameter constructors should be marked explicit.'",
")",
"elif",
"is_marked_explicit",
"and",
"not",
"onearg_constructor",
":",
"if",
"noarg_constructor",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/explicit'",
",",
"5",
",",
"'Zero-parameter constructors should not be marked explicit.'",
")"
] |
https://github.com/gv22ga/dlib-face-recognition-android/blob/42d6305cbd85833f2b85bb79b70ab9ab004153c9/tools/lint/cpplint.py#L2549-L2702
|
||
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/turtle.py
|
python
|
RawTurtle._clear
|
(self)
|
Delete all of pen's drawings
|
Delete all of pen's drawings
|
[
"Delete",
"all",
"of",
"pen",
"s",
"drawings"
] |
def _clear(self):
"""Delete all of pen's drawings"""
self._fillitem = self._fillpath = None
for item in self.items:
self.screen._delete(item)
self.currentLineItem = self.screen._createline()
self.currentLine = []
if self._drawing:
self.currentLine.append(self._position)
self.items = [self.currentLineItem]
self.clearstamps()
self.setundobuffer(self._undobuffersize)
|
[
"def",
"_clear",
"(",
"self",
")",
":",
"self",
".",
"_fillitem",
"=",
"self",
".",
"_fillpath",
"=",
"None",
"for",
"item",
"in",
"self",
".",
"items",
":",
"self",
".",
"screen",
".",
"_delete",
"(",
"item",
")",
"self",
".",
"currentLineItem",
"=",
"self",
".",
"screen",
".",
"_createline",
"(",
")",
"self",
".",
"currentLine",
"=",
"[",
"]",
"if",
"self",
".",
"_drawing",
":",
"self",
".",
"currentLine",
".",
"append",
"(",
"self",
".",
"_position",
")",
"self",
".",
"items",
"=",
"[",
"self",
".",
"currentLineItem",
"]",
"self",
".",
"clearstamps",
"(",
")",
"self",
".",
"setundobuffer",
"(",
"self",
".",
"_undobuffersize",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/turtle.py#L2616-L2627
|
||
openvinotoolkit/openvino
|
dedcbeafa8b84cccdc55ca64b8da516682b381c7
|
src/bindings/python/src/compatibility/ngraph/utils/types.py
|
python
|
get_ndarray
|
(data: NumericData)
|
return np.array(data)
|
Wrap data into a numpy ndarray.
|
Wrap data into a numpy ndarray.
|
[
"Wrap",
"data",
"into",
"a",
"numpy",
"ndarray",
"."
] |
def get_ndarray(data: NumericData) -> np.ndarray:
"""Wrap data into a numpy ndarray."""
if type(data) == np.ndarray:
return data
return np.array(data)
|
[
"def",
"get_ndarray",
"(",
"data",
":",
"NumericData",
")",
"->",
"np",
".",
"ndarray",
":",
"if",
"type",
"(",
"data",
")",
"==",
"np",
".",
"ndarray",
":",
"return",
"data",
"return",
"np",
".",
"array",
"(",
"data",
")"
] |
https://github.com/openvinotoolkit/openvino/blob/dedcbeafa8b84cccdc55ca64b8da516682b381c7/src/bindings/python/src/compatibility/ngraph/utils/types.py#L108-L112
|
|
domino-team/openwrt-cc
|
8b181297c34d14d3ca521cc9f31430d561dbc688
|
package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/input.py
|
python
|
Filter
|
(l, item)
|
return [res.setdefault(e, e) for e in l if e != item]
|
Removes item from l.
|
Removes item from l.
|
[
"Removes",
"item",
"from",
"l",
"."
] |
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
|
[
"def",
"Filter",
"(",
"l",
",",
"item",
")",
":",
"res",
"=",
"{",
"}",
"return",
"[",
"res",
".",
"setdefault",
"(",
"e",
",",
"e",
")",
"for",
"e",
"in",
"l",
"if",
"e",
"!=",
"item",
"]"
] |
https://github.com/domino-team/openwrt-cc/blob/8b181297c34d14d3ca521cc9f31430d561dbc688/package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/input.py#L1482-L1485
|
|
sdhash/sdhash
|
b9eff63e4e5867e910f41fd69032bbb1c94a2a5e
|
sdhash-ui/jinja2/environment.py
|
python
|
Environment.handle_exception
|
(self, exc_info=None, rendered=False, source_hint=None)
|
Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
|
Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
|
[
"Exception",
"handling",
"helper",
".",
"This",
"is",
"used",
"internally",
"to",
"either",
"raise",
"rewritten",
"exceptions",
"or",
"return",
"a",
"rendered",
"traceback",
"for",
"the",
"template",
"."
] |
def handle_exception(self, exc_info=None, rendered=False, source_hint=None):
"""Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
"""
global _make_traceback
if exc_info is None:
exc_info = sys.exc_info()
# the debugging module is imported when it's used for the first time.
# we're doing a lot of stuff there and for applications that do not
# get any exceptions in template rendering there is no need to load
# all of that.
if _make_traceback is None:
from jinja2.debug import make_traceback as _make_traceback
traceback = _make_traceback(exc_info, source_hint)
if rendered and self.exception_formatter is not None:
return self.exception_formatter(traceback)
if self.exception_handler is not None:
self.exception_handler(traceback)
exc_type, exc_value, tb = traceback.standard_exc_info
raise exc_type, exc_value, tb
|
[
"def",
"handle_exception",
"(",
"self",
",",
"exc_info",
"=",
"None",
",",
"rendered",
"=",
"False",
",",
"source_hint",
"=",
"None",
")",
":",
"global",
"_make_traceback",
"if",
"exc_info",
"is",
"None",
":",
"exc_info",
"=",
"sys",
".",
"exc_info",
"(",
")",
"# the debugging module is imported when it's used for the first time.",
"# we're doing a lot of stuff there and for applications that do not",
"# get any exceptions in template rendering there is no need to load",
"# all of that.",
"if",
"_make_traceback",
"is",
"None",
":",
"from",
"jinja2",
".",
"debug",
"import",
"make_traceback",
"as",
"_make_traceback",
"traceback",
"=",
"_make_traceback",
"(",
"exc_info",
",",
"source_hint",
")",
"if",
"rendered",
"and",
"self",
".",
"exception_formatter",
"is",
"not",
"None",
":",
"return",
"self",
".",
"exception_formatter",
"(",
"traceback",
")",
"if",
"self",
".",
"exception_handler",
"is",
"not",
"None",
":",
"self",
".",
"exception_handler",
"(",
"traceback",
")",
"exc_type",
",",
"exc_value",
",",
"tb",
"=",
"traceback",
".",
"standard_exc_info",
"raise",
"exc_type",
",",
"exc_value",
",",
"tb"
] |
https://github.com/sdhash/sdhash/blob/b9eff63e4e5867e910f41fd69032bbb1c94a2a5e/sdhash-ui/jinja2/environment.py#L650-L670
|
||
Xilinx/Vitis-AI
|
fc74d404563d9951b57245443c73bef389f3657f
|
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py
|
python
|
embedding_attention_seq2seq
|
(encoder_inputs,
decoder_inputs,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size,
num_heads=1,
output_projection=None,
feed_previous=False,
dtype=None,
scope=None,
initial_state_attention=False)
|
Embedding sequence-to-sequence model with attention.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_encoder_symbols x input_size]). Then it runs an RNN to encode
embedded encoder_inputs into a state vector. It keeps the outputs of this
RNN at every step to use for attention later. Next, it embeds decoder_inputs
by another newly created embedding (of shape [num_decoder_symbols x
input_size]). Then it runs attention decoder, initialized with the last
encoder state, on embedded decoder_inputs and attending to encoder outputs.
Warning: when output_projection is None, the size of the attention vectors
and variables will be made proportional to num_decoder_symbols, can be large.
Args:
encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
decoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
cell: tf.compat.v1.nn.rnn_cell.RNNCell defining the cell function and size.
num_encoder_symbols: Integer; number of symbols on the encoder side.
num_decoder_symbols: Integer; number of symbols on the decoder side.
embedding_size: Integer, the length of the embedding vector for each symbol.
num_heads: Number of attention heads that read from attention_states.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_decoder_symbols] and B has shape
[num_decoder_symbols]; if provided and feed_previous=True, each fed
previous output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first of
decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype of the initial RNN state (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_attention_seq2seq".
initial_state_attention: If False (default), initial attentions are zero. If
True, initialize the attentions from the initial state and attention
states.
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x num_decoder_symbols] containing the generated
outputs.
state: The state of each decoder cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
|
Embedding sequence-to-sequence model with attention.
|
[
"Embedding",
"sequence",
"-",
"to",
"-",
"sequence",
"model",
"with",
"attention",
"."
] |
def embedding_attention_seq2seq(encoder_inputs,
decoder_inputs,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size,
num_heads=1,
output_projection=None,
feed_previous=False,
dtype=None,
scope=None,
initial_state_attention=False):
"""Embedding sequence-to-sequence model with attention.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_encoder_symbols x input_size]). Then it runs an RNN to encode
embedded encoder_inputs into a state vector. It keeps the outputs of this
RNN at every step to use for attention later. Next, it embeds decoder_inputs
by another newly created embedding (of shape [num_decoder_symbols x
input_size]). Then it runs attention decoder, initialized with the last
encoder state, on embedded decoder_inputs and attending to encoder outputs.
Warning: when output_projection is None, the size of the attention vectors
and variables will be made proportional to num_decoder_symbols, can be large.
Args:
encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
decoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
cell: tf.compat.v1.nn.rnn_cell.RNNCell defining the cell function and size.
num_encoder_symbols: Integer; number of symbols on the encoder side.
num_decoder_symbols: Integer; number of symbols on the decoder side.
embedding_size: Integer, the length of the embedding vector for each symbol.
num_heads: Number of attention heads that read from attention_states.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_decoder_symbols] and B has shape
[num_decoder_symbols]; if provided and feed_previous=True, each fed
previous output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first of
decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype of the initial RNN state (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_attention_seq2seq".
initial_state_attention: If False (default), initial attentions are zero. If
True, initialize the attentions from the initial state and attention
states.
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x num_decoder_symbols] containing the generated
outputs.
state: The state of each decoder cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope(
scope or "embedding_attention_seq2seq", dtype=dtype) as scope:
dtype = scope.dtype
# Encoder.
encoder_cell = copy.deepcopy(cell)
encoder_cell = core_rnn_cell.EmbeddingWrapper(
encoder_cell,
embedding_classes=num_encoder_symbols,
embedding_size=embedding_size)
encoder_outputs, encoder_state = rnn.static_rnn(
encoder_cell, encoder_inputs, dtype=dtype)
# First calculate a concatenation of encoder outputs to put attention on.
top_states = [
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in encoder_outputs
]
attention_states = array_ops.concat(top_states, 1)
# Decoder.
output_size = None
if output_projection is None:
cell = core_rnn_cell.OutputProjectionWrapper(cell, num_decoder_symbols)
output_size = num_decoder_symbols
if isinstance(feed_previous, bool):
return embedding_attention_decoder(
decoder_inputs,
encoder_state,
attention_states,
cell,
num_decoder_symbols,
embedding_size,
num_heads=num_heads,
output_size=output_size,
output_projection=output_projection,
feed_previous=feed_previous,
initial_state_attention=initial_state_attention)
# If feed_previous is a Tensor, we construct 2 graphs and use cond.
def decoder(feed_previous_bool):
reuse = None if feed_previous_bool else True
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), reuse=reuse):
outputs, state = embedding_attention_decoder(
decoder_inputs,
encoder_state,
attention_states,
cell,
num_decoder_symbols,
embedding_size,
num_heads=num_heads,
output_size=output_size,
output_projection=output_projection,
feed_previous=feed_previous_bool,
update_embedding_for_previous=False,
initial_state_attention=initial_state_attention)
state_list = [state]
if nest.is_sequence(state):
state_list = nest.flatten(state)
return outputs + state_list
outputs_and_state = control_flow_ops.cond(
feed_previous, lambda: decoder(True), lambda: decoder(False))
outputs_len = len(decoder_inputs) # Outputs length same as decoder inputs.
state_list = outputs_and_state[outputs_len:]
state = state_list[0]
if nest.is_sequence(encoder_state):
state = nest.pack_sequence_as(
structure=encoder_state, flat_sequence=state_list)
return outputs_and_state[:outputs_len], state
|
[
"def",
"embedding_attention_seq2seq",
"(",
"encoder_inputs",
",",
"decoder_inputs",
",",
"cell",
",",
"num_encoder_symbols",
",",
"num_decoder_symbols",
",",
"embedding_size",
",",
"num_heads",
"=",
"1",
",",
"output_projection",
"=",
"None",
",",
"feed_previous",
"=",
"False",
",",
"dtype",
"=",
"None",
",",
"scope",
"=",
"None",
",",
"initial_state_attention",
"=",
"False",
")",
":",
"with",
"variable_scope",
".",
"variable_scope",
"(",
"scope",
"or",
"\"embedding_attention_seq2seq\"",
",",
"dtype",
"=",
"dtype",
")",
"as",
"scope",
":",
"dtype",
"=",
"scope",
".",
"dtype",
"# Encoder.",
"encoder_cell",
"=",
"copy",
".",
"deepcopy",
"(",
"cell",
")",
"encoder_cell",
"=",
"core_rnn_cell",
".",
"EmbeddingWrapper",
"(",
"encoder_cell",
",",
"embedding_classes",
"=",
"num_encoder_symbols",
",",
"embedding_size",
"=",
"embedding_size",
")",
"encoder_outputs",
",",
"encoder_state",
"=",
"rnn",
".",
"static_rnn",
"(",
"encoder_cell",
",",
"encoder_inputs",
",",
"dtype",
"=",
"dtype",
")",
"# First calculate a concatenation of encoder outputs to put attention on.",
"top_states",
"=",
"[",
"array_ops",
".",
"reshape",
"(",
"e",
",",
"[",
"-",
"1",
",",
"1",
",",
"cell",
".",
"output_size",
"]",
")",
"for",
"e",
"in",
"encoder_outputs",
"]",
"attention_states",
"=",
"array_ops",
".",
"concat",
"(",
"top_states",
",",
"1",
")",
"# Decoder.",
"output_size",
"=",
"None",
"if",
"output_projection",
"is",
"None",
":",
"cell",
"=",
"core_rnn_cell",
".",
"OutputProjectionWrapper",
"(",
"cell",
",",
"num_decoder_symbols",
")",
"output_size",
"=",
"num_decoder_symbols",
"if",
"isinstance",
"(",
"feed_previous",
",",
"bool",
")",
":",
"return",
"embedding_attention_decoder",
"(",
"decoder_inputs",
",",
"encoder_state",
",",
"attention_states",
",",
"cell",
",",
"num_decoder_symbols",
",",
"embedding_size",
",",
"num_heads",
"=",
"num_heads",
",",
"output_size",
"=",
"output_size",
",",
"output_projection",
"=",
"output_projection",
",",
"feed_previous",
"=",
"feed_previous",
",",
"initial_state_attention",
"=",
"initial_state_attention",
")",
"# If feed_previous is a Tensor, we construct 2 graphs and use cond.",
"def",
"decoder",
"(",
"feed_previous_bool",
")",
":",
"reuse",
"=",
"None",
"if",
"feed_previous_bool",
"else",
"True",
"with",
"variable_scope",
".",
"variable_scope",
"(",
"variable_scope",
".",
"get_variable_scope",
"(",
")",
",",
"reuse",
"=",
"reuse",
")",
":",
"outputs",
",",
"state",
"=",
"embedding_attention_decoder",
"(",
"decoder_inputs",
",",
"encoder_state",
",",
"attention_states",
",",
"cell",
",",
"num_decoder_symbols",
",",
"embedding_size",
",",
"num_heads",
"=",
"num_heads",
",",
"output_size",
"=",
"output_size",
",",
"output_projection",
"=",
"output_projection",
",",
"feed_previous",
"=",
"feed_previous_bool",
",",
"update_embedding_for_previous",
"=",
"False",
",",
"initial_state_attention",
"=",
"initial_state_attention",
")",
"state_list",
"=",
"[",
"state",
"]",
"if",
"nest",
".",
"is_sequence",
"(",
"state",
")",
":",
"state_list",
"=",
"nest",
".",
"flatten",
"(",
"state",
")",
"return",
"outputs",
"+",
"state_list",
"outputs_and_state",
"=",
"control_flow_ops",
".",
"cond",
"(",
"feed_previous",
",",
"lambda",
":",
"decoder",
"(",
"True",
")",
",",
"lambda",
":",
"decoder",
"(",
"False",
")",
")",
"outputs_len",
"=",
"len",
"(",
"decoder_inputs",
")",
"# Outputs length same as decoder inputs.",
"state_list",
"=",
"outputs_and_state",
"[",
"outputs_len",
":",
"]",
"state",
"=",
"state_list",
"[",
"0",
"]",
"if",
"nest",
".",
"is_sequence",
"(",
"encoder_state",
")",
":",
"state",
"=",
"nest",
".",
"pack_sequence_as",
"(",
"structure",
"=",
"encoder_state",
",",
"flat_sequence",
"=",
"state_list",
")",
"return",
"outputs_and_state",
"[",
":",
"outputs_len",
"]",
",",
"state"
] |
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py#L793-L918
|
||
idaholab/moose
|
9eeebc65e098b4c30f8205fb41591fd5b61eb6ff
|
python/chigger/annotations/ImageAnnotation.py
|
python
|
ImageAnnotation.update
|
(self, **kwargs)
|
Updates the 3D camera to place the image in the defined location.
|
Updates the 3D camera to place the image in the defined location.
|
[
"Updates",
"the",
"3D",
"camera",
"to",
"place",
"the",
"image",
"in",
"the",
"defined",
"location",
"."
] |
def update(self, **kwargs):
"""
Updates the 3D camera to place the image in the defined location.
"""
super(ImageAnnotation, self).update(**kwargs)
renderer = self.getVTKRenderer()
# Coordinate transormation object
tr = vtk.vtkCoordinate()
tr.SetCoordinateSystemToNormalizedViewport()
# Size of window
window = renderer.GetRenderWindow().GetSize()
# Size of image
size = self._sources[-1].getVTKSource().GetOutput().GetDimensions()
# Image scale
if self.isOptionValid('scale'):
scale = self.getOption('scale')
else:
scale = float(window[0])/float(size[0]) * self.getOption('width')
# Compute the camera distance
angle = self._vtkcamera.GetViewAngle()
d = window[1]*0.5 / math.tan(math.radians(angle*0.5))
# Determine the image position
if self.isOptionValid('position'):
p = self.getOption('position')
tr.SetValue(p[0], p[1], 0)
position = list(tr.GetComputedDisplayValue(renderer))
# Adjust for off-center alignments
if self.getOption('horizontal_alignment') == 'left':
position[0] = position[0] + (size[0]*0.5*scale)
elif self.getOption('horizontal_alignment') == 'right':
position[0] = position[0] - (size[0]*0.5*scale)
if self.getOption('vertical_alignment') == 'top':
position[1] = position[1] - (size[1]*0.5*scale)
elif self.getOption('vertical_alignment') == 'bottom':
position[1] = position[1] + (size[1]*0.5*scale)
# Reference position (middle of window)
tr.SetValue(0.5, 0.5, 0)
ref = tr.GetComputedDisplayValue(renderer)
# Camera offsets
x = (ref[0] - position[0]) * 1/scale
y = (ref[1] - position[1]) * 1/scale
# Set the camera
self._vtkcamera.SetViewUp(0, 1, 0)
self._vtkcamera.SetPosition(size[0]/2. + x, size[1]/2. + y, d * 1/scale)
self._vtkcamera.SetFocalPoint(size[0]/2. + x, size[1]/2. + y, 0)
# Update the renderer
renderer.SetActiveCamera(self._vtkcamera)
renderer.ResetCameraClippingRange()
|
[
"def",
"update",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
"ImageAnnotation",
",",
"self",
")",
".",
"update",
"(",
"*",
"*",
"kwargs",
")",
"renderer",
"=",
"self",
".",
"getVTKRenderer",
"(",
")",
"# Coordinate transormation object",
"tr",
"=",
"vtk",
".",
"vtkCoordinate",
"(",
")",
"tr",
".",
"SetCoordinateSystemToNormalizedViewport",
"(",
")",
"# Size of window",
"window",
"=",
"renderer",
".",
"GetRenderWindow",
"(",
")",
".",
"GetSize",
"(",
")",
"# Size of image",
"size",
"=",
"self",
".",
"_sources",
"[",
"-",
"1",
"]",
".",
"getVTKSource",
"(",
")",
".",
"GetOutput",
"(",
")",
".",
"GetDimensions",
"(",
")",
"# Image scale",
"if",
"self",
".",
"isOptionValid",
"(",
"'scale'",
")",
":",
"scale",
"=",
"self",
".",
"getOption",
"(",
"'scale'",
")",
"else",
":",
"scale",
"=",
"float",
"(",
"window",
"[",
"0",
"]",
")",
"/",
"float",
"(",
"size",
"[",
"0",
"]",
")",
"*",
"self",
".",
"getOption",
"(",
"'width'",
")",
"# Compute the camera distance",
"angle",
"=",
"self",
".",
"_vtkcamera",
".",
"GetViewAngle",
"(",
")",
"d",
"=",
"window",
"[",
"1",
"]",
"*",
"0.5",
"/",
"math",
".",
"tan",
"(",
"math",
".",
"radians",
"(",
"angle",
"*",
"0.5",
")",
")",
"# Determine the image position",
"if",
"self",
".",
"isOptionValid",
"(",
"'position'",
")",
":",
"p",
"=",
"self",
".",
"getOption",
"(",
"'position'",
")",
"tr",
".",
"SetValue",
"(",
"p",
"[",
"0",
"]",
",",
"p",
"[",
"1",
"]",
",",
"0",
")",
"position",
"=",
"list",
"(",
"tr",
".",
"GetComputedDisplayValue",
"(",
"renderer",
")",
")",
"# Adjust for off-center alignments",
"if",
"self",
".",
"getOption",
"(",
"'horizontal_alignment'",
")",
"==",
"'left'",
":",
"position",
"[",
"0",
"]",
"=",
"position",
"[",
"0",
"]",
"+",
"(",
"size",
"[",
"0",
"]",
"*",
"0.5",
"*",
"scale",
")",
"elif",
"self",
".",
"getOption",
"(",
"'horizontal_alignment'",
")",
"==",
"'right'",
":",
"position",
"[",
"0",
"]",
"=",
"position",
"[",
"0",
"]",
"-",
"(",
"size",
"[",
"0",
"]",
"*",
"0.5",
"*",
"scale",
")",
"if",
"self",
".",
"getOption",
"(",
"'vertical_alignment'",
")",
"==",
"'top'",
":",
"position",
"[",
"1",
"]",
"=",
"position",
"[",
"1",
"]",
"-",
"(",
"size",
"[",
"1",
"]",
"*",
"0.5",
"*",
"scale",
")",
"elif",
"self",
".",
"getOption",
"(",
"'vertical_alignment'",
")",
"==",
"'bottom'",
":",
"position",
"[",
"1",
"]",
"=",
"position",
"[",
"1",
"]",
"+",
"(",
"size",
"[",
"1",
"]",
"*",
"0.5",
"*",
"scale",
")",
"# Reference position (middle of window)",
"tr",
".",
"SetValue",
"(",
"0.5",
",",
"0.5",
",",
"0",
")",
"ref",
"=",
"tr",
".",
"GetComputedDisplayValue",
"(",
"renderer",
")",
"# Camera offsets",
"x",
"=",
"(",
"ref",
"[",
"0",
"]",
"-",
"position",
"[",
"0",
"]",
")",
"*",
"1",
"/",
"scale",
"y",
"=",
"(",
"ref",
"[",
"1",
"]",
"-",
"position",
"[",
"1",
"]",
")",
"*",
"1",
"/",
"scale",
"# Set the camera",
"self",
".",
"_vtkcamera",
".",
"SetViewUp",
"(",
"0",
",",
"1",
",",
"0",
")",
"self",
".",
"_vtkcamera",
".",
"SetPosition",
"(",
"size",
"[",
"0",
"]",
"/",
"2.",
"+",
"x",
",",
"size",
"[",
"1",
"]",
"/",
"2.",
"+",
"y",
",",
"d",
"*",
"1",
"/",
"scale",
")",
"self",
".",
"_vtkcamera",
".",
"SetFocalPoint",
"(",
"size",
"[",
"0",
"]",
"/",
"2.",
"+",
"x",
",",
"size",
"[",
"1",
"]",
"/",
"2.",
"+",
"y",
",",
"0",
")",
"# Update the renderer",
"renderer",
".",
"SetActiveCamera",
"(",
"self",
".",
"_vtkcamera",
")",
"renderer",
".",
"ResetCameraClippingRange",
"(",
")"
] |
https://github.com/idaholab/moose/blob/9eeebc65e098b4c30f8205fb41591fd5b61eb6ff/python/chigger/annotations/ImageAnnotation.py#L49-L109
|
||
Xilinx/Vitis-AI
|
fc74d404563d9951b57245443c73bef389f3657f
|
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/metrics/python/ops/metric_ops.py
|
python
|
streaming_false_negatives
|
(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None)
|
return metrics.false_negatives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
|
Computes the total number of false negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric value
variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
|
Computes the total number of false negatives.
|
[
"Computes",
"the",
"total",
"number",
"of",
"false",
"negatives",
"."
] |
def streaming_false_negatives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the total number of false negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric value
variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.false_negatives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
|
[
"def",
"streaming_false_negatives",
"(",
"predictions",
",",
"labels",
",",
"weights",
"=",
"None",
",",
"metrics_collections",
"=",
"None",
",",
"updates_collections",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"return",
"metrics",
".",
"false_negatives",
"(",
"predictions",
"=",
"predictions",
",",
"labels",
"=",
"labels",
",",
"weights",
"=",
"weights",
",",
"metrics_collections",
"=",
"metrics_collections",
",",
"updates_collections",
"=",
"updates_collections",
",",
"name",
"=",
"name",
")"
] |
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/metrics/python/ops/metric_ops.py#L185-L224
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/msw/_windows.py
|
python
|
PyWindow.DoGetVirtualSize
|
(*args, **kwargs)
|
return _windows_.PyWindow_DoGetVirtualSize(*args, **kwargs)
|
DoGetVirtualSize(self) -> Size
|
DoGetVirtualSize(self) -> Size
|
[
"DoGetVirtualSize",
"(",
"self",
")",
"-",
">",
"Size"
] |
def DoGetVirtualSize(*args, **kwargs):
"""DoGetVirtualSize(self) -> Size"""
return _windows_.PyWindow_DoGetVirtualSize(*args, **kwargs)
|
[
"def",
"DoGetVirtualSize",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_windows_",
".",
"PyWindow_DoGetVirtualSize",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_windows.py#L4182-L4184
|
|
wlanjie/AndroidFFmpeg
|
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
|
tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/sgmllib.py
|
python
|
SGMLParser.reset
|
(self)
|
Reset this instance. Loses all unprocessed data.
|
Reset this instance. Loses all unprocessed data.
|
[
"Reset",
"this",
"instance",
".",
"Loses",
"all",
"unprocessed",
"data",
"."
] |
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.__starttag_text = None
self.rawdata = ''
self.stack = []
self.lasttag = '???'
self.nomoretags = 0
self.literal = 0
markupbase.ParserBase.reset(self)
|
[
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"__starttag_text",
"=",
"None",
"self",
".",
"rawdata",
"=",
"''",
"self",
".",
"stack",
"=",
"[",
"]",
"self",
".",
"lasttag",
"=",
"'???'",
"self",
".",
"nomoretags",
"=",
"0",
"self",
".",
"literal",
"=",
"0",
"markupbase",
".",
"ParserBase",
".",
"reset",
"(",
"self",
")"
] |
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/sgmllib.py#L71-L79
|
||
mindspore-ai/mindspore
|
fb8fd3338605bb34fa5cea054e535a8b1d753fab
|
mindspore/python/mindspore/ops/composite/multitype_ops/not_in_impl.py
|
python
|
_number_not_in_list
|
(x, y)
|
return not const_utils.scalar_in_sequence(x, y)
|
Determine if a number not in list.
Args:
x (Number): x
y (list): y
Returns:
bool, if x not in y return true, x in y return false.
|
Determine if a number not in list.
|
[
"Determine",
"if",
"a",
"number",
"not",
"in",
"list",
"."
] |
def _number_not_in_list(x, y):
"""
Determine if a number not in list.
Args:
x (Number): x
y (list): y
Returns:
bool, if x not in y return true, x in y return false.
"""
return not const_utils.scalar_in_sequence(x, y)
|
[
"def",
"_number_not_in_list",
"(",
"x",
",",
"y",
")",
":",
"return",
"not",
"const_utils",
".",
"scalar_in_sequence",
"(",
"x",
",",
"y",
")"
] |
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/ops/composite/multitype_ops/not_in_impl.py#L46-L57
|
|
adobe/chromium
|
cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7
|
tools/symsrc/pefile.py
|
python
|
PE.parse_sections
|
(self, offset)
|
Fetch the PE file sections.
The sections will be readily available in the "sections" attribute.
Its attributes will contain all the section information plus "data"
a buffer containing the section's data.
The "Characteristics" member will be processed and attributes
representing the section characteristics (with the 'IMAGE_SCN_'
string trimmed from the constant's names) will be added to the
section instance.
Refer to the SectionStructure class for additional info.
|
Fetch the PE file sections.
The sections will be readily available in the "sections" attribute.
Its attributes will contain all the section information plus "data"
a buffer containing the section's data.
The "Characteristics" member will be processed and attributes
representing the section characteristics (with the 'IMAGE_SCN_'
string trimmed from the constant's names) will be added to the
section instance.
Refer to the SectionStructure class for additional info.
|
[
"Fetch",
"the",
"PE",
"file",
"sections",
".",
"The",
"sections",
"will",
"be",
"readily",
"available",
"in",
"the",
"sections",
"attribute",
".",
"Its",
"attributes",
"will",
"contain",
"all",
"the",
"section",
"information",
"plus",
"data",
"a",
"buffer",
"containing",
"the",
"section",
"s",
"data",
".",
"The",
"Characteristics",
"member",
"will",
"be",
"processed",
"and",
"attributes",
"representing",
"the",
"section",
"characteristics",
"(",
"with",
"the",
"IMAGE_SCN_",
"string",
"trimmed",
"from",
"the",
"constant",
"s",
"names",
")",
"will",
"be",
"added",
"to",
"the",
"section",
"instance",
".",
"Refer",
"to",
"the",
"SectionStructure",
"class",
"for",
"additional",
"info",
"."
] |
def parse_sections(self, offset):
"""Fetch the PE file sections.
The sections will be readily available in the "sections" attribute.
Its attributes will contain all the section information plus "data"
a buffer containing the section's data.
The "Characteristics" member will be processed and attributes
representing the section characteristics (with the 'IMAGE_SCN_'
string trimmed from the constant's names) will be added to the
section instance.
Refer to the SectionStructure class for additional info.
"""
self.sections = []
for i in xrange(self.FILE_HEADER.NumberOfSections):
section = SectionStructure(self.__IMAGE_SECTION_HEADER_format__)
if not section:
break
section_offset = offset + section.sizeof() * i
section.set_file_offset(section_offset)
section.__unpack__(self.__data__[section_offset:])
self.__structures__.append(section)
if section.SizeOfRawData > len(self.__data__):
self.__warnings.append(
('Error parsing section %d. ' % i) +
'SizeOfRawData is larger than file.')
if section.PointerToRawData > len(self.__data__):
self.__warnings.append(
('Error parsing section %d. ' % i) +
'PointerToRawData points beyond the end of the file.')
if section.Misc_VirtualSize > 0x10000000:
self.__warnings.append(
('Suspicious value found parsing section %d. ' % i) +
'VirtualSize is extremely large > 256MiB.')
if section.VirtualAddress > 0x10000000:
self.__warnings.append(
('Suspicious value found parsing section %d. ' % i) +
'VirtualAddress is beyond 0x10000000.')
#
# Some packer used a non-aligned PointerToRawData in the sections,
# which causes several common tools not to load the section data
# properly as they blindly read from the indicated offset.
# It seems that Windows will round the offset down to the largest
# offset multiple of FileAlignment which is smaller than
# PointerToRawData. The following code will do the same.
#
#alignment = self.OPTIONAL_HEADER.FileAlignment
section_data_start = section.PointerToRawData
if ( self.OPTIONAL_HEADER.FileAlignment != 0 and
(section.PointerToRawData % self.OPTIONAL_HEADER.FileAlignment) != 0):
self.__warnings.append(
('Error parsing section %d. ' % i) +
'Suspicious value for FileAlignment in the Optional Header. ' +
'Normally the PointerToRawData entry of the sections\' structures ' +
'is a multiple of FileAlignment, this might imply the file ' +
'is trying to confuse tools which parse this incorrectly')
section_data_end = section_data_start+section.SizeOfRawData
section.set_data(self.__data__[section_data_start:section_data_end])
section_flags = self.retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
# Set the section's flags according the the Characteristics member
self.set_flags(section, section.Characteristics, section_flags)
if ( section.__dict__.get('IMAGE_SCN_MEM_WRITE', False) and
section.__dict__.get('IMAGE_SCN_MEM_EXECUTE', False) ):
self.__warnings.append(
('Suspicious flags set for section %d. ' % i) +
'Both IMAGE_SCN_MEM_WRITE and IMAGE_SCN_MEM_EXECUTE are set.' +
'This might indicate a packed executable.')
self.sections.append(section)
if self.FILE_HEADER.NumberOfSections > 0 and self.sections:
return offset + self.sections[0].sizeof()*self.FILE_HEADER.NumberOfSections
else:
return offset
|
[
"def",
"parse_sections",
"(",
"self",
",",
"offset",
")",
":",
"self",
".",
"sections",
"=",
"[",
"]",
"for",
"i",
"in",
"xrange",
"(",
"self",
".",
"FILE_HEADER",
".",
"NumberOfSections",
")",
":",
"section",
"=",
"SectionStructure",
"(",
"self",
".",
"__IMAGE_SECTION_HEADER_format__",
")",
"if",
"not",
"section",
":",
"break",
"section_offset",
"=",
"offset",
"+",
"section",
".",
"sizeof",
"(",
")",
"*",
"i",
"section",
".",
"set_file_offset",
"(",
"section_offset",
")",
"section",
".",
"__unpack__",
"(",
"self",
".",
"__data__",
"[",
"section_offset",
":",
"]",
")",
"self",
".",
"__structures__",
".",
"append",
"(",
"section",
")",
"if",
"section",
".",
"SizeOfRawData",
">",
"len",
"(",
"self",
".",
"__data__",
")",
":",
"self",
".",
"__warnings",
".",
"append",
"(",
"(",
"'Error parsing section %d. '",
"%",
"i",
")",
"+",
"'SizeOfRawData is larger than file.'",
")",
"if",
"section",
".",
"PointerToRawData",
">",
"len",
"(",
"self",
".",
"__data__",
")",
":",
"self",
".",
"__warnings",
".",
"append",
"(",
"(",
"'Error parsing section %d. '",
"%",
"i",
")",
"+",
"'PointerToRawData points beyond the end of the file.'",
")",
"if",
"section",
".",
"Misc_VirtualSize",
">",
"0x10000000",
":",
"self",
".",
"__warnings",
".",
"append",
"(",
"(",
"'Suspicious value found parsing section %d. '",
"%",
"i",
")",
"+",
"'VirtualSize is extremely large > 256MiB.'",
")",
"if",
"section",
".",
"VirtualAddress",
">",
"0x10000000",
":",
"self",
".",
"__warnings",
".",
"append",
"(",
"(",
"'Suspicious value found parsing section %d. '",
"%",
"i",
")",
"+",
"'VirtualAddress is beyond 0x10000000.'",
")",
"#",
"# Some packer used a non-aligned PointerToRawData in the sections,",
"# which causes several common tools not to load the section data",
"# properly as they blindly read from the indicated offset.",
"# It seems that Windows will round the offset down to the largest",
"# offset multiple of FileAlignment which is smaller than",
"# PointerToRawData. The following code will do the same.",
"#",
"#alignment = self.OPTIONAL_HEADER.FileAlignment",
"section_data_start",
"=",
"section",
".",
"PointerToRawData",
"if",
"(",
"self",
".",
"OPTIONAL_HEADER",
".",
"FileAlignment",
"!=",
"0",
"and",
"(",
"section",
".",
"PointerToRawData",
"%",
"self",
".",
"OPTIONAL_HEADER",
".",
"FileAlignment",
")",
"!=",
"0",
")",
":",
"self",
".",
"__warnings",
".",
"append",
"(",
"(",
"'Error parsing section %d. '",
"%",
"i",
")",
"+",
"'Suspicious value for FileAlignment in the Optional Header. '",
"+",
"'Normally the PointerToRawData entry of the sections\\' structures '",
"+",
"'is a multiple of FileAlignment, this might imply the file '",
"+",
"'is trying to confuse tools which parse this incorrectly'",
")",
"section_data_end",
"=",
"section_data_start",
"+",
"section",
".",
"SizeOfRawData",
"section",
".",
"set_data",
"(",
"self",
".",
"__data__",
"[",
"section_data_start",
":",
"section_data_end",
"]",
")",
"section_flags",
"=",
"self",
".",
"retrieve_flags",
"(",
"SECTION_CHARACTERISTICS",
",",
"'IMAGE_SCN_'",
")",
"# Set the section's flags according the the Characteristics member",
"self",
".",
"set_flags",
"(",
"section",
",",
"section",
".",
"Characteristics",
",",
"section_flags",
")",
"if",
"(",
"section",
".",
"__dict__",
".",
"get",
"(",
"'IMAGE_SCN_MEM_WRITE'",
",",
"False",
")",
"and",
"section",
".",
"__dict__",
".",
"get",
"(",
"'IMAGE_SCN_MEM_EXECUTE'",
",",
"False",
")",
")",
":",
"self",
".",
"__warnings",
".",
"append",
"(",
"(",
"'Suspicious flags set for section %d. '",
"%",
"i",
")",
"+",
"'Both IMAGE_SCN_MEM_WRITE and IMAGE_SCN_MEM_EXECUTE are set.'",
"+",
"'This might indicate a packed executable.'",
")",
"self",
".",
"sections",
".",
"append",
"(",
"section",
")",
"if",
"self",
".",
"FILE_HEADER",
".",
"NumberOfSections",
">",
"0",
"and",
"self",
".",
"sections",
":",
"return",
"offset",
"+",
"self",
".",
"sections",
"[",
"0",
"]",
".",
"sizeof",
"(",
")",
"*",
"self",
".",
"FILE_HEADER",
".",
"NumberOfSections",
"else",
":",
"return",
"offset"
] |
https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/tools/symsrc/pefile.py#L1692-L1780
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
wx/tools/Editra/src/extern/aui/framemanager.py
|
python
|
AuiCenterDockingGuide.CreateShapesWithStyle
|
(self)
|
Creates the docking guide window shape based on which docking bitmaps are used.
|
Creates the docking guide window shape based on which docking bitmaps are used.
|
[
"Creates",
"the",
"docking",
"guide",
"window",
"shape",
"based",
"on",
"which",
"docking",
"bitmaps",
"are",
"used",
"."
] |
def CreateShapesWithStyle(self):
""" Creates the docking guide window shape based on which docking bitmaps are used. """
useAero = (GetManager(self.GetParent()).GetAGWFlags() & AUI_MGR_AERO_DOCKING_GUIDES) != 0
useWhidbey = (GetManager(self.GetParent()).GetAGWFlags() & AUI_MGR_WHIDBEY_DOCKING_GUIDES) != 0
self._useAero = 0
if useAero:
self._useAero = 1
elif useWhidbey:
self._useAero = 2
if useAero:
sizeX, sizeY = aeroguideSizeX, aeroguideSizeY
elif useWhidbey:
sizeX, sizeY = whidbeySizeX, whidbeySizeY
else:
sizeX, sizeY = guideSizeX, guideSizeY
rectLeft = wx.Rect(0, sizeY, sizeY, sizeX)
rectTop = wx.Rect(sizeY, 0, sizeX, sizeY)
rectRight = wx.Rect(sizeY+sizeX, sizeY, sizeY, sizeX)
rectBottom = wx.Rect(sizeY, sizeX + sizeY, sizeX, sizeY)
rectCenter = wx.Rect(sizeY, sizeY, sizeX, sizeX)
if not self._useAero:
self.targetLeft = AuiDockingGuideWindow(self, rectLeft, wx.LEFT, True, useAero)
self.targetTop = AuiDockingGuideWindow(self, rectTop, wx.TOP, True, useAero)
self.targetRight = AuiDockingGuideWindow(self, rectRight, wx.RIGHT, True, useAero)
self.targetBottom = AuiDockingGuideWindow(self, rectBottom, wx.BOTTOM, True, useAero)
self.targetCenter = AuiDockingGuideWindow(self, rectCenter, wx.CENTER, True, useAero)
# top-left diamond
tld = [wx.Point(rectTop.x, rectTop.y+rectTop.height-8),
wx.Point(rectLeft.x+rectLeft.width-8, rectLeft.y),
rectTop.GetBottomLeft()]
# bottom-left diamond
bld = [wx.Point(rectLeft.x+rectLeft.width-8, rectLeft.y+rectLeft.height),
wx.Point(rectBottom.x, rectBottom.y+8),
rectBottom.GetTopLeft()]
# top-right diamond
trd = [wx.Point(rectTop.x+rectTop.width, rectTop.y+rectTop.height-8),
wx.Point(rectRight.x+8, rectRight.y),
rectRight.GetTopLeft()]
# bottom-right diamond
brd = [wx.Point(rectRight.x+8, rectRight.y+rectRight.height),
wx.Point(rectBottom.x+rectBottom.width, rectBottom.y+8),
rectBottom.GetTopRight()]
self._triangles = [tld[0:2], bld[0:2],
[wx.Point(rectTop.x+rectTop.width-1, rectTop.y+rectTop.height-8),
wx.Point(rectRight.x+7, rectRight.y)],
[wx.Point(rectRight.x+7, rectRight.y+rectRight.height),
wx.Point(rectBottom.x+rectBottom.width-1, rectBottom.y+8)]]
region = wx.Region()
region.UnionRect(rectLeft)
region.UnionRect(rectTop)
region.UnionRect(rectRight)
region.UnionRect(rectBottom)
region.UnionRect(rectCenter)
region.UnionRegion(wx.RegionFromPoints(tld))
region.UnionRegion(wx.RegionFromPoints(bld))
region.UnionRegion(wx.RegionFromPoints(trd))
region.UnionRegion(wx.RegionFromPoints(brd))
elif useAero:
self._aeroBmp = aero_dock_pane.GetBitmap()
region = wx.RegionFromBitmap(self._aeroBmp)
self._allAeroBmps = [aero_dock_pane_left.GetBitmap(), aero_dock_pane_top.GetBitmap(),
aero_dock_pane_right.GetBitmap(), aero_dock_pane_bottom.GetBitmap(),
aero_dock_pane_center.GetBitmap(), aero_dock_pane.GetBitmap()]
self._deniedBitmap = aero_denied.GetBitmap()
self._aeroRects = [rectLeft, rectTop, rectRight, rectBottom, rectCenter]
self._valid = True
elif useWhidbey:
self._aeroBmp = whidbey_dock_pane.GetBitmap()
region = wx.RegionFromBitmap(self._aeroBmp)
self._allAeroBmps = [whidbey_dock_pane_left.GetBitmap(), whidbey_dock_pane_top.GetBitmap(),
whidbey_dock_pane_right.GetBitmap(), whidbey_dock_pane_bottom.GetBitmap(),
whidbey_dock_pane_center.GetBitmap(), whidbey_dock_pane.GetBitmap()]
self._deniedBitmap = whidbey_denied.GetBitmap()
self._aeroRects = [rectLeft, rectTop, rectRight, rectBottom, rectCenter]
self._valid = True
self.region = region
|
[
"def",
"CreateShapesWithStyle",
"(",
"self",
")",
":",
"useAero",
"=",
"(",
"GetManager",
"(",
"self",
".",
"GetParent",
"(",
")",
")",
".",
"GetAGWFlags",
"(",
")",
"&",
"AUI_MGR_AERO_DOCKING_GUIDES",
")",
"!=",
"0",
"useWhidbey",
"=",
"(",
"GetManager",
"(",
"self",
".",
"GetParent",
"(",
")",
")",
".",
"GetAGWFlags",
"(",
")",
"&",
"AUI_MGR_WHIDBEY_DOCKING_GUIDES",
")",
"!=",
"0",
"self",
".",
"_useAero",
"=",
"0",
"if",
"useAero",
":",
"self",
".",
"_useAero",
"=",
"1",
"elif",
"useWhidbey",
":",
"self",
".",
"_useAero",
"=",
"2",
"if",
"useAero",
":",
"sizeX",
",",
"sizeY",
"=",
"aeroguideSizeX",
",",
"aeroguideSizeY",
"elif",
"useWhidbey",
":",
"sizeX",
",",
"sizeY",
"=",
"whidbeySizeX",
",",
"whidbeySizeY",
"else",
":",
"sizeX",
",",
"sizeY",
"=",
"guideSizeX",
",",
"guideSizeY",
"rectLeft",
"=",
"wx",
".",
"Rect",
"(",
"0",
",",
"sizeY",
",",
"sizeY",
",",
"sizeX",
")",
"rectTop",
"=",
"wx",
".",
"Rect",
"(",
"sizeY",
",",
"0",
",",
"sizeX",
",",
"sizeY",
")",
"rectRight",
"=",
"wx",
".",
"Rect",
"(",
"sizeY",
"+",
"sizeX",
",",
"sizeY",
",",
"sizeY",
",",
"sizeX",
")",
"rectBottom",
"=",
"wx",
".",
"Rect",
"(",
"sizeY",
",",
"sizeX",
"+",
"sizeY",
",",
"sizeX",
",",
"sizeY",
")",
"rectCenter",
"=",
"wx",
".",
"Rect",
"(",
"sizeY",
",",
"sizeY",
",",
"sizeX",
",",
"sizeX",
")",
"if",
"not",
"self",
".",
"_useAero",
":",
"self",
".",
"targetLeft",
"=",
"AuiDockingGuideWindow",
"(",
"self",
",",
"rectLeft",
",",
"wx",
".",
"LEFT",
",",
"True",
",",
"useAero",
")",
"self",
".",
"targetTop",
"=",
"AuiDockingGuideWindow",
"(",
"self",
",",
"rectTop",
",",
"wx",
".",
"TOP",
",",
"True",
",",
"useAero",
")",
"self",
".",
"targetRight",
"=",
"AuiDockingGuideWindow",
"(",
"self",
",",
"rectRight",
",",
"wx",
".",
"RIGHT",
",",
"True",
",",
"useAero",
")",
"self",
".",
"targetBottom",
"=",
"AuiDockingGuideWindow",
"(",
"self",
",",
"rectBottom",
",",
"wx",
".",
"BOTTOM",
",",
"True",
",",
"useAero",
")",
"self",
".",
"targetCenter",
"=",
"AuiDockingGuideWindow",
"(",
"self",
",",
"rectCenter",
",",
"wx",
".",
"CENTER",
",",
"True",
",",
"useAero",
")",
"# top-left diamond",
"tld",
"=",
"[",
"wx",
".",
"Point",
"(",
"rectTop",
".",
"x",
",",
"rectTop",
".",
"y",
"+",
"rectTop",
".",
"height",
"-",
"8",
")",
",",
"wx",
".",
"Point",
"(",
"rectLeft",
".",
"x",
"+",
"rectLeft",
".",
"width",
"-",
"8",
",",
"rectLeft",
".",
"y",
")",
",",
"rectTop",
".",
"GetBottomLeft",
"(",
")",
"]",
"# bottom-left diamond",
"bld",
"=",
"[",
"wx",
".",
"Point",
"(",
"rectLeft",
".",
"x",
"+",
"rectLeft",
".",
"width",
"-",
"8",
",",
"rectLeft",
".",
"y",
"+",
"rectLeft",
".",
"height",
")",
",",
"wx",
".",
"Point",
"(",
"rectBottom",
".",
"x",
",",
"rectBottom",
".",
"y",
"+",
"8",
")",
",",
"rectBottom",
".",
"GetTopLeft",
"(",
")",
"]",
"# top-right diamond",
"trd",
"=",
"[",
"wx",
".",
"Point",
"(",
"rectTop",
".",
"x",
"+",
"rectTop",
".",
"width",
",",
"rectTop",
".",
"y",
"+",
"rectTop",
".",
"height",
"-",
"8",
")",
",",
"wx",
".",
"Point",
"(",
"rectRight",
".",
"x",
"+",
"8",
",",
"rectRight",
".",
"y",
")",
",",
"rectRight",
".",
"GetTopLeft",
"(",
")",
"]",
"# bottom-right diamond",
"brd",
"=",
"[",
"wx",
".",
"Point",
"(",
"rectRight",
".",
"x",
"+",
"8",
",",
"rectRight",
".",
"y",
"+",
"rectRight",
".",
"height",
")",
",",
"wx",
".",
"Point",
"(",
"rectBottom",
".",
"x",
"+",
"rectBottom",
".",
"width",
",",
"rectBottom",
".",
"y",
"+",
"8",
")",
",",
"rectBottom",
".",
"GetTopRight",
"(",
")",
"]",
"self",
".",
"_triangles",
"=",
"[",
"tld",
"[",
"0",
":",
"2",
"]",
",",
"bld",
"[",
"0",
":",
"2",
"]",
",",
"[",
"wx",
".",
"Point",
"(",
"rectTop",
".",
"x",
"+",
"rectTop",
".",
"width",
"-",
"1",
",",
"rectTop",
".",
"y",
"+",
"rectTop",
".",
"height",
"-",
"8",
")",
",",
"wx",
".",
"Point",
"(",
"rectRight",
".",
"x",
"+",
"7",
",",
"rectRight",
".",
"y",
")",
"]",
",",
"[",
"wx",
".",
"Point",
"(",
"rectRight",
".",
"x",
"+",
"7",
",",
"rectRight",
".",
"y",
"+",
"rectRight",
".",
"height",
")",
",",
"wx",
".",
"Point",
"(",
"rectBottom",
".",
"x",
"+",
"rectBottom",
".",
"width",
"-",
"1",
",",
"rectBottom",
".",
"y",
"+",
"8",
")",
"]",
"]",
"region",
"=",
"wx",
".",
"Region",
"(",
")",
"region",
".",
"UnionRect",
"(",
"rectLeft",
")",
"region",
".",
"UnionRect",
"(",
"rectTop",
")",
"region",
".",
"UnionRect",
"(",
"rectRight",
")",
"region",
".",
"UnionRect",
"(",
"rectBottom",
")",
"region",
".",
"UnionRect",
"(",
"rectCenter",
")",
"region",
".",
"UnionRegion",
"(",
"wx",
".",
"RegionFromPoints",
"(",
"tld",
")",
")",
"region",
".",
"UnionRegion",
"(",
"wx",
".",
"RegionFromPoints",
"(",
"bld",
")",
")",
"region",
".",
"UnionRegion",
"(",
"wx",
".",
"RegionFromPoints",
"(",
"trd",
")",
")",
"region",
".",
"UnionRegion",
"(",
"wx",
".",
"RegionFromPoints",
"(",
"brd",
")",
")",
"elif",
"useAero",
":",
"self",
".",
"_aeroBmp",
"=",
"aero_dock_pane",
".",
"GetBitmap",
"(",
")",
"region",
"=",
"wx",
".",
"RegionFromBitmap",
"(",
"self",
".",
"_aeroBmp",
")",
"self",
".",
"_allAeroBmps",
"=",
"[",
"aero_dock_pane_left",
".",
"GetBitmap",
"(",
")",
",",
"aero_dock_pane_top",
".",
"GetBitmap",
"(",
")",
",",
"aero_dock_pane_right",
".",
"GetBitmap",
"(",
")",
",",
"aero_dock_pane_bottom",
".",
"GetBitmap",
"(",
")",
",",
"aero_dock_pane_center",
".",
"GetBitmap",
"(",
")",
",",
"aero_dock_pane",
".",
"GetBitmap",
"(",
")",
"]",
"self",
".",
"_deniedBitmap",
"=",
"aero_denied",
".",
"GetBitmap",
"(",
")",
"self",
".",
"_aeroRects",
"=",
"[",
"rectLeft",
",",
"rectTop",
",",
"rectRight",
",",
"rectBottom",
",",
"rectCenter",
"]",
"self",
".",
"_valid",
"=",
"True",
"elif",
"useWhidbey",
":",
"self",
".",
"_aeroBmp",
"=",
"whidbey_dock_pane",
".",
"GetBitmap",
"(",
")",
"region",
"=",
"wx",
".",
"RegionFromBitmap",
"(",
"self",
".",
"_aeroBmp",
")",
"self",
".",
"_allAeroBmps",
"=",
"[",
"whidbey_dock_pane_left",
".",
"GetBitmap",
"(",
")",
",",
"whidbey_dock_pane_top",
".",
"GetBitmap",
"(",
")",
",",
"whidbey_dock_pane_right",
".",
"GetBitmap",
"(",
")",
",",
"whidbey_dock_pane_bottom",
".",
"GetBitmap",
"(",
")",
",",
"whidbey_dock_pane_center",
".",
"GetBitmap",
"(",
")",
",",
"whidbey_dock_pane",
".",
"GetBitmap",
"(",
")",
"]",
"self",
".",
"_deniedBitmap",
"=",
"whidbey_denied",
".",
"GetBitmap",
"(",
")",
"self",
".",
"_aeroRects",
"=",
"[",
"rectLeft",
",",
"rectTop",
",",
"rectRight",
",",
"rectBottom",
",",
"rectCenter",
"]",
"self",
".",
"_valid",
"=",
"True",
"self",
".",
"region",
"=",
"region"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/extern/aui/framemanager.py#L2413-L2506
|
||
SequoiaDB/SequoiaDB
|
2894ed7e5bd6fe57330afc900cf76d0ff0df9f64
|
tools/server/php_linux/libxml2/lib/python2.4/site-packages/libxml2.py
|
python
|
xmlNode.addPrevSibling
|
(self, elem)
|
return __tmp
|
Add a new node @elem as the previous sibling of @cur
merging adjacent TEXT nodes (@elem may be freed) If the new
node was already inserted in a document it is first
unlinked from its existing context. If the new node is
ATTRIBUTE, it is added into properties instead of children.
If there is an attribute with equal name, it is first
destroyed.
|
Add a new node
|
[
"Add",
"a",
"new",
"node"
] |
def addPrevSibling(self, elem):
"""Add a new node @elem as the previous sibling of @cur
merging adjacent TEXT nodes (@elem may be freed) If the new
node was already inserted in a document it is first
unlinked from its existing context. If the new node is
ATTRIBUTE, it is added into properties instead of children.
If there is an attribute with equal name, it is first
destroyed. """
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlAddPrevSibling(self._o, elem__o)
if ret is None:raise treeError('xmlAddPrevSibling() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
|
[
"def",
"addPrevSibling",
"(",
"self",
",",
"elem",
")",
":",
"if",
"elem",
"is",
"None",
":",
"elem__o",
"=",
"None",
"else",
":",
"elem__o",
"=",
"elem",
".",
"_o",
"ret",
"=",
"libxml2mod",
".",
"xmlAddPrevSibling",
"(",
"self",
".",
"_o",
",",
"elem__o",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"treeError",
"(",
"'xmlAddPrevSibling() failed'",
")",
"__tmp",
"=",
"xmlNode",
"(",
"_obj",
"=",
"ret",
")",
"return",
"__tmp"
] |
https://github.com/SequoiaDB/SequoiaDB/blob/2894ed7e5bd6fe57330afc900cf76d0ff0df9f64/tools/server/php_linux/libxml2/lib/python2.4/site-packages/libxml2.py#L3079-L3092
|
|
thalium/icebox
|
99d147d5b9269222225443ce171b4fd46d8985d4
|
third_party/retdec-3.2/scripts/type_extractor/type_extractor/func_info.py
|
python
|
FuncInfo.header_text
|
(self)
|
return object_attr_string_repr(self.header)
|
Returns a textual representation of the header.
|
Returns a textual representation of the header.
|
[
"Returns",
"a",
"textual",
"representation",
"of",
"the",
"header",
"."
] |
def header_text(self):
"""Returns a textual representation of the header."""
return object_attr_string_repr(self.header)
|
[
"def",
"header_text",
"(",
"self",
")",
":",
"return",
"object_attr_string_repr",
"(",
"self",
".",
"header",
")"
] |
https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/retdec-3.2/scripts/type_extractor/type_extractor/func_info.py#L29-L31
|
|
hanpfei/chromium-net
|
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
|
third_party/catapult/third_party/webapp2/webapp2.py
|
python
|
Request.get
|
(self, argument_name, default_value='', allow_multiple=False)
|
Returns the query or POST argument with the given name.
We parse the query string and POST payload lazily, so this will be a
slower operation on the first call.
:param argument_name:
The name of the query or POST argument.
:param default_value:
The value to return if the given argument is not present.
:param allow_multiple:
Return a list of values with the given name (deprecated).
:returns:
If allow_multiple is False (which it is by default), we return
the first value with the given name given in the request. If it
is True, we always return a list.
|
Returns the query or POST argument with the given name.
|
[
"Returns",
"the",
"query",
"or",
"POST",
"argument",
"with",
"the",
"given",
"name",
"."
] |
def get(self, argument_name, default_value='', allow_multiple=False):
"""Returns the query or POST argument with the given name.
We parse the query string and POST payload lazily, so this will be a
slower operation on the first call.
:param argument_name:
The name of the query or POST argument.
:param default_value:
The value to return if the given argument is not present.
:param allow_multiple:
Return a list of values with the given name (deprecated).
:returns:
If allow_multiple is False (which it is by default), we return
the first value with the given name given in the request. If it
is True, we always return a list.
"""
param_value = self.get_all(argument_name)
if allow_multiple:
logging.warning('allow_multiple is a deprecated param. '
'Please use the Request.get_all() method instead.')
if len(param_value) > 0:
if allow_multiple:
return param_value
return param_value[0]
else:
if allow_multiple and not default_value:
return []
return default_value
|
[
"def",
"get",
"(",
"self",
",",
"argument_name",
",",
"default_value",
"=",
"''",
",",
"allow_multiple",
"=",
"False",
")",
":",
"param_value",
"=",
"self",
".",
"get_all",
"(",
"argument_name",
")",
"if",
"allow_multiple",
":",
"logging",
".",
"warning",
"(",
"'allow_multiple is a deprecated param. '",
"'Please use the Request.get_all() method instead.'",
")",
"if",
"len",
"(",
"param_value",
")",
">",
"0",
":",
"if",
"allow_multiple",
":",
"return",
"param_value",
"return",
"param_value",
"[",
"0",
"]",
"else",
":",
"if",
"allow_multiple",
"and",
"not",
"default_value",
":",
"return",
"[",
"]",
"return",
"default_value"
] |
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/webapp2/webapp2.py#L158-L189
|
||
NERSC/timemory
|
431912b360ff50d1a160d7826e2eea04fbd1037f
|
timemory/profiler/profiler.py
|
python
|
Profiler.is_enabled
|
()
|
return False
|
Checks whether the profiler is enabled
|
Checks whether the profiler is enabled
|
[
"Checks",
"whether",
"the",
"profiler",
"is",
"enabled"
] |
def is_enabled():
"""Checks whether the profiler is enabled"""
try:
return Profiler._conditional_functor()
except Exception:
pass
return False
|
[
"def",
"is_enabled",
"(",
")",
":",
"try",
":",
"return",
"Profiler",
".",
"_conditional_functor",
"(",
")",
"except",
"Exception",
":",
"pass",
"return",
"False"
] |
https://github.com/NERSC/timemory/blob/431912b360ff50d1a160d7826e2eea04fbd1037f/timemory/profiler/profiler.py#L97-L104
|
|
pmq20/node-packer
|
12c46c6e44fbc14d9ee645ebd17d5296b324f7e0
|
lts/tools/gyp/pylib/gyp/xcode_emulation.py
|
python
|
GetXcodeArchsDefault
|
()
|
return XCODE_ARCHS_DEFAULT_CACHE
|
Returns the |XcodeArchsDefault| object to use to expand ARCHS for the
installed version of Xcode. The default values used by Xcode for ARCHS
and the expansion of the variables depends on the version of Xcode used.
For all version anterior to Xcode 5.0 or posterior to Xcode 5.1 included
uses $(ARCHS_STANDARD) if ARCHS is unset, while Xcode 5.0 to 5.0.2 uses
$(ARCHS_STANDARD_INCLUDING_64_BIT). This variable was added to Xcode 5.0
and deprecated with Xcode 5.1.
For "macosx" SDKROOT, all version starting with Xcode 5.0 includes 64-bit
architecture as part of $(ARCHS_STANDARD) and default to only building it.
For "iphoneos" and "iphonesimulator" SDKROOT, 64-bit architectures are part
of $(ARCHS_STANDARD_INCLUDING_64_BIT) from Xcode 5.0. From Xcode 5.1, they
are also part of $(ARCHS_STANDARD).
All thoses rules are coded in the construction of the |XcodeArchsDefault|
object to use depending on the version of Xcode detected. The object is
for performance reason.
|
Returns the |XcodeArchsDefault| object to use to expand ARCHS for the
installed version of Xcode. The default values used by Xcode for ARCHS
and the expansion of the variables depends on the version of Xcode used.
|
[
"Returns",
"the",
"|XcodeArchsDefault|",
"object",
"to",
"use",
"to",
"expand",
"ARCHS",
"for",
"the",
"installed",
"version",
"of",
"Xcode",
".",
"The",
"default",
"values",
"used",
"by",
"Xcode",
"for",
"ARCHS",
"and",
"the",
"expansion",
"of",
"the",
"variables",
"depends",
"on",
"the",
"version",
"of",
"Xcode",
"used",
"."
] |
def GetXcodeArchsDefault():
"""Returns the |XcodeArchsDefault| object to use to expand ARCHS for the
installed version of Xcode. The default values used by Xcode for ARCHS
and the expansion of the variables depends on the version of Xcode used.
For all version anterior to Xcode 5.0 or posterior to Xcode 5.1 included
uses $(ARCHS_STANDARD) if ARCHS is unset, while Xcode 5.0 to 5.0.2 uses
$(ARCHS_STANDARD_INCLUDING_64_BIT). This variable was added to Xcode 5.0
and deprecated with Xcode 5.1.
For "macosx" SDKROOT, all version starting with Xcode 5.0 includes 64-bit
architecture as part of $(ARCHS_STANDARD) and default to only building it.
For "iphoneos" and "iphonesimulator" SDKROOT, 64-bit architectures are part
of $(ARCHS_STANDARD_INCLUDING_64_BIT) from Xcode 5.0. From Xcode 5.1, they
are also part of $(ARCHS_STANDARD).
All thoses rules are coded in the construction of the |XcodeArchsDefault|
object to use depending on the version of Xcode detected. The object is
for performance reason."""
global XCODE_ARCHS_DEFAULT_CACHE
if XCODE_ARCHS_DEFAULT_CACHE:
return XCODE_ARCHS_DEFAULT_CACHE
xcode_version, _ = XcodeVersion()
if xcode_version < '0500':
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD)',
XcodeArchsVariableMapping(['i386']),
XcodeArchsVariableMapping(['i386']),
XcodeArchsVariableMapping(['armv7']))
elif xcode_version < '0510':
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD_INCLUDING_64_BIT)',
XcodeArchsVariableMapping(['x86_64'], ['x86_64']),
XcodeArchsVariableMapping(['i386'], ['i386', 'x86_64']),
XcodeArchsVariableMapping(
['armv7', 'armv7s'],
['armv7', 'armv7s', 'arm64']))
else:
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD)',
XcodeArchsVariableMapping(['x86_64'], ['x86_64']),
XcodeArchsVariableMapping(['i386', 'x86_64'], ['i386', 'x86_64']),
XcodeArchsVariableMapping(
['armv7', 'armv7s', 'arm64'],
['armv7', 'armv7s', 'arm64']))
return XCODE_ARCHS_DEFAULT_CACHE
|
[
"def",
"GetXcodeArchsDefault",
"(",
")",
":",
"global",
"XCODE_ARCHS_DEFAULT_CACHE",
"if",
"XCODE_ARCHS_DEFAULT_CACHE",
":",
"return",
"XCODE_ARCHS_DEFAULT_CACHE",
"xcode_version",
",",
"_",
"=",
"XcodeVersion",
"(",
")",
"if",
"xcode_version",
"<",
"'0500'",
":",
"XCODE_ARCHS_DEFAULT_CACHE",
"=",
"XcodeArchsDefault",
"(",
"'$(ARCHS_STANDARD)'",
",",
"XcodeArchsVariableMapping",
"(",
"[",
"'i386'",
"]",
")",
",",
"XcodeArchsVariableMapping",
"(",
"[",
"'i386'",
"]",
")",
",",
"XcodeArchsVariableMapping",
"(",
"[",
"'armv7'",
"]",
")",
")",
"elif",
"xcode_version",
"<",
"'0510'",
":",
"XCODE_ARCHS_DEFAULT_CACHE",
"=",
"XcodeArchsDefault",
"(",
"'$(ARCHS_STANDARD_INCLUDING_64_BIT)'",
",",
"XcodeArchsVariableMapping",
"(",
"[",
"'x86_64'",
"]",
",",
"[",
"'x86_64'",
"]",
")",
",",
"XcodeArchsVariableMapping",
"(",
"[",
"'i386'",
"]",
",",
"[",
"'i386'",
",",
"'x86_64'",
"]",
")",
",",
"XcodeArchsVariableMapping",
"(",
"[",
"'armv7'",
",",
"'armv7s'",
"]",
",",
"[",
"'armv7'",
",",
"'armv7s'",
",",
"'arm64'",
"]",
")",
")",
"else",
":",
"XCODE_ARCHS_DEFAULT_CACHE",
"=",
"XcodeArchsDefault",
"(",
"'$(ARCHS_STANDARD)'",
",",
"XcodeArchsVariableMapping",
"(",
"[",
"'x86_64'",
"]",
",",
"[",
"'x86_64'",
"]",
")",
",",
"XcodeArchsVariableMapping",
"(",
"[",
"'i386'",
",",
"'x86_64'",
"]",
",",
"[",
"'i386'",
",",
"'x86_64'",
"]",
")",
",",
"XcodeArchsVariableMapping",
"(",
"[",
"'armv7'",
",",
"'armv7s'",
",",
"'arm64'",
"]",
",",
"[",
"'armv7'",
",",
"'armv7s'",
",",
"'arm64'",
"]",
")",
")",
"return",
"XCODE_ARCHS_DEFAULT_CACHE"
] |
https://github.com/pmq20/node-packer/blob/12c46c6e44fbc14d9ee645ebd17d5296b324f7e0/lts/tools/gyp/pylib/gyp/xcode_emulation.py#L99-L145
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/windows/Lib/mailbox.py
|
python
|
_lock_file
|
(f, dotlock=True)
|
Lock file f using lockf and dot locking.
|
Lock file f using lockf and dot locking.
|
[
"Lock",
"file",
"f",
"using",
"lockf",
"and",
"dot",
"locking",
"."
] |
def _lock_file(f, dotlock=True):
"""Lock file f using lockf and dot locking."""
dotlock_done = False
try:
if fcntl:
try:
fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError as e:
if e.errno in (errno.EAGAIN, errno.EACCES, errno.EROFS):
raise ExternalClashError('lockf: lock unavailable: %s' %
f.name)
else:
raise
if dotlock:
try:
pre_lock = _create_temporary(f.name + '.lock')
pre_lock.close()
except OSError as e:
if e.errno in (errno.EACCES, errno.EROFS):
return # Without write access, just skip dotlocking.
else:
raise
try:
try:
os.link(pre_lock.name, f.name + '.lock')
dotlock_done = True
except (AttributeError, PermissionError):
os.rename(pre_lock.name, f.name + '.lock')
dotlock_done = True
else:
os.unlink(pre_lock.name)
except FileExistsError:
os.remove(pre_lock.name)
raise ExternalClashError('dot lock unavailable: %s' %
f.name)
except:
if fcntl:
fcntl.lockf(f, fcntl.LOCK_UN)
if dotlock_done:
os.remove(f.name + '.lock')
raise
|
[
"def",
"_lock_file",
"(",
"f",
",",
"dotlock",
"=",
"True",
")",
":",
"dotlock_done",
"=",
"False",
"try",
":",
"if",
"fcntl",
":",
"try",
":",
"fcntl",
".",
"lockf",
"(",
"f",
",",
"fcntl",
".",
"LOCK_EX",
"|",
"fcntl",
".",
"LOCK_NB",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"in",
"(",
"errno",
".",
"EAGAIN",
",",
"errno",
".",
"EACCES",
",",
"errno",
".",
"EROFS",
")",
":",
"raise",
"ExternalClashError",
"(",
"'lockf: lock unavailable: %s'",
"%",
"f",
".",
"name",
")",
"else",
":",
"raise",
"if",
"dotlock",
":",
"try",
":",
"pre_lock",
"=",
"_create_temporary",
"(",
"f",
".",
"name",
"+",
"'.lock'",
")",
"pre_lock",
".",
"close",
"(",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"in",
"(",
"errno",
".",
"EACCES",
",",
"errno",
".",
"EROFS",
")",
":",
"return",
"# Without write access, just skip dotlocking.",
"else",
":",
"raise",
"try",
":",
"try",
":",
"os",
".",
"link",
"(",
"pre_lock",
".",
"name",
",",
"f",
".",
"name",
"+",
"'.lock'",
")",
"dotlock_done",
"=",
"True",
"except",
"(",
"AttributeError",
",",
"PermissionError",
")",
":",
"os",
".",
"rename",
"(",
"pre_lock",
".",
"name",
",",
"f",
".",
"name",
"+",
"'.lock'",
")",
"dotlock_done",
"=",
"True",
"else",
":",
"os",
".",
"unlink",
"(",
"pre_lock",
".",
"name",
")",
"except",
"FileExistsError",
":",
"os",
".",
"remove",
"(",
"pre_lock",
".",
"name",
")",
"raise",
"ExternalClashError",
"(",
"'dot lock unavailable: %s'",
"%",
"f",
".",
"name",
")",
"except",
":",
"if",
"fcntl",
":",
"fcntl",
".",
"lockf",
"(",
"f",
",",
"fcntl",
".",
"LOCK_UN",
")",
"if",
"dotlock_done",
":",
"os",
".",
"remove",
"(",
"f",
".",
"name",
"+",
"'.lock'",
")",
"raise"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/mailbox.py#L2058-L2098
|
||
apple/turicreate
|
cce55aa5311300e3ce6af93cb45ba791fd1bdf49
|
src/python/turicreate/data_structures/sarray.py
|
python
|
SArray.__has_size__
|
(self)
|
return self.__proxy__.has_size()
|
Returns whether or not the size of the SArray is known.
|
Returns whether or not the size of the SArray is known.
|
[
"Returns",
"whether",
"or",
"not",
"the",
"size",
"of",
"the",
"SArray",
"is",
"known",
"."
] |
def __has_size__(self):
"""
Returns whether or not the size of the SArray is known.
"""
return self.__proxy__.has_size()
|
[
"def",
"__has_size__",
"(",
"self",
")",
":",
"return",
"self",
".",
"__proxy__",
".",
"has_size",
"(",
")"
] |
https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/src/python/turicreate/data_structures/sarray.py#L1357-L1361
|
|
wlanjie/AndroidFFmpeg
|
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
|
tools/fdk-aac-build/x86/toolchain/lib/python2.7/lib-tk/Tkinter.py
|
python
|
Spinbox.selection_clear
|
(self)
|
return self.selection("clear")
|
Clear the selection
If the selection isn't in this widget then the
command has no effect. Returns an empty string.
|
Clear the selection
|
[
"Clear",
"the",
"selection"
] |
def selection_clear(self):
"""Clear the selection
If the selection isn't in this widget then the
command has no effect. Returns an empty string.
"""
return self.selection("clear")
|
[
"def",
"selection_clear",
"(",
"self",
")",
":",
"return",
"self",
".",
"selection",
"(",
"\"clear\"",
")"
] |
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/lib-tk/Tkinter.py#L3511-L3517
|
|
ros-perception/vision_opencv
|
c791220cefd0abf02c6719e2ce0fea465857a88e
|
image_geometry/src/image_geometry/cameramodels.py
|
python
|
PinholeCameraModel.rectifyImage
|
(self, raw, rectified)
|
:param raw: input image
:type raw: :class:`CvMat` or :class:`IplImage`
:param rectified: rectified output image
:type rectified: :class:`CvMat` or :class:`IplImage`
Applies the rectification specified by camera parameters :math:`K` and and :math:`D` to image `raw` and writes the resulting image `rectified`.
|
:param raw: input image
:type raw: :class:`CvMat` or :class:`IplImage`
:param rectified: rectified output image
:type rectified: :class:`CvMat` or :class:`IplImage`
|
[
":",
"param",
"raw",
":",
"input",
"image",
":",
"type",
"raw",
":",
":",
"class",
":",
"CvMat",
"or",
":",
"class",
":",
"IplImage",
":",
"param",
"rectified",
":",
"rectified",
"output",
"image",
":",
"type",
"rectified",
":",
":",
"class",
":",
"CvMat",
"or",
":",
"class",
":",
"IplImage"
] |
def rectifyImage(self, raw, rectified):
"""
:param raw: input image
:type raw: :class:`CvMat` or :class:`IplImage`
:param rectified: rectified output image
:type rectified: :class:`CvMat` or :class:`IplImage`
Applies the rectification specified by camera parameters :math:`K` and and :math:`D` to image `raw` and writes the resulting image `rectified`.
"""
self.mapx = numpy.ndarray(shape=(self.height, self.width, 1),
dtype='float32')
self.mapy = numpy.ndarray(shape=(self.height, self.width, 1),
dtype='float32')
cv2.initUndistortRectifyMap(self.K, self.D, self.R, self.P,
(self.width, self.height), cv2.CV_32FC1, self.mapx, self.mapy)
cv2.remap(raw, self.mapx, self.mapy, cv2.INTER_CUBIC, rectified)
|
[
"def",
"rectifyImage",
"(",
"self",
",",
"raw",
",",
"rectified",
")",
":",
"self",
".",
"mapx",
"=",
"numpy",
".",
"ndarray",
"(",
"shape",
"=",
"(",
"self",
".",
"height",
",",
"self",
".",
"width",
",",
"1",
")",
",",
"dtype",
"=",
"'float32'",
")",
"self",
".",
"mapy",
"=",
"numpy",
".",
"ndarray",
"(",
"shape",
"=",
"(",
"self",
".",
"height",
",",
"self",
".",
"width",
",",
"1",
")",
",",
"dtype",
"=",
"'float32'",
")",
"cv2",
".",
"initUndistortRectifyMap",
"(",
"self",
".",
"K",
",",
"self",
".",
"D",
",",
"self",
".",
"R",
",",
"self",
".",
"P",
",",
"(",
"self",
".",
"width",
",",
"self",
".",
"height",
")",
",",
"cv2",
".",
"CV_32FC1",
",",
"self",
".",
"mapx",
",",
"self",
".",
"mapy",
")",
"cv2",
".",
"remap",
"(",
"raw",
",",
"self",
".",
"mapx",
",",
"self",
".",
"mapy",
",",
"cv2",
".",
"INTER_CUBIC",
",",
"rectified",
")"
] |
https://github.com/ros-perception/vision_opencv/blob/c791220cefd0abf02c6719e2ce0fea465857a88e/image_geometry/src/image_geometry/cameramodels.py#L76-L92
|
||
SpenceKonde/megaTinyCore
|
1c4a70b18a149fe6bcb551dfa6db11ca50b8997b
|
megaavr/tools/libs/pyedbglib/protocols/ati.py
|
python
|
AsynchronousTransportInterface.read_response_buffer
|
(self, num_bytes=None)
|
return self.read_buffer(0, num_bytes, buffer_type=ATI_CTRL_TYPE_CMDRSP)
|
Read data from the response buffer
:param num_bytes: Number of bytes to read from buffer
:return: bytearray of data bytes read from the buffer
|
Read data from the response buffer
|
[
"Read",
"data",
"from",
"the",
"response",
"buffer"
] |
def read_response_buffer(self, num_bytes=None):
"""
Read data from the response buffer
:param num_bytes: Number of bytes to read from buffer
:return: bytearray of data bytes read from the buffer
"""
return self.read_buffer(0, num_bytes, buffer_type=ATI_CTRL_TYPE_CMDRSP)
|
[
"def",
"read_response_buffer",
"(",
"self",
",",
"num_bytes",
"=",
"None",
")",
":",
"return",
"self",
".",
"read_buffer",
"(",
"0",
",",
"num_bytes",
",",
"buffer_type",
"=",
"ATI_CTRL_TYPE_CMDRSP",
")"
] |
https://github.com/SpenceKonde/megaTinyCore/blob/1c4a70b18a149fe6bcb551dfa6db11ca50b8997b/megaavr/tools/libs/pyedbglib/protocols/ati.py#L113-L120
|
|
hughperkins/tf-coriander
|
970d3df6c11400ad68405f22b0c42a52374e94ca
|
tensorflow/python/training/supervisor.py
|
python
|
Supervisor.summary_op
|
(self)
|
return self._summary_op
|
Return the Summary Tensor used by the chief supervisor.
Returns:
A string Tensor for the summary or `None`.
|
Return the Summary Tensor used by the chief supervisor.
|
[
"Return",
"the",
"Summary",
"Tensor",
"used",
"by",
"the",
"chief",
"supervisor",
"."
] |
def summary_op(self):
"""Return the Summary Tensor used by the chief supervisor.
Returns:
A string Tensor for the summary or `None`.
"""
return self._summary_op
|
[
"def",
"summary_op",
"(",
"self",
")",
":",
"return",
"self",
".",
"_summary_op"
] |
https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/python/training/supervisor.py#L566-L572
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/setuptools/py2/setuptools/command/easy_install.py
|
python
|
CommandSpec.best
|
(cls)
|
return cls
|
Choose the best CommandSpec class based on environmental conditions.
|
Choose the best CommandSpec class based on environmental conditions.
|
[
"Choose",
"the",
"best",
"CommandSpec",
"class",
"based",
"on",
"environmental",
"conditions",
"."
] |
def best(cls):
"""
Choose the best CommandSpec class based on environmental conditions.
"""
return cls
|
[
"def",
"best",
"(",
"cls",
")",
":",
"return",
"cls"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/setuptools/py2/setuptools/command/easy_install.py#L1982-L1986
|
|
NVIDIA/TensorRT
|
42805f078052daad1a98bc5965974fcffaad0960
|
samples/python/yolov3_onnx/data_processing.py
|
python
|
PreprocessYOLO._load_and_resize
|
(self, input_image_path)
|
return image_raw, image_resized
|
Load an image from the specified path and resize it to the input resolution.
Return the input image before resizing as a PIL Image (required for visualization),
and the resized image as a NumPy float array.
Keyword arguments:
input_image_path -- string path of the image to be loaded
|
Load an image from the specified path and resize it to the input resolution.
Return the input image before resizing as a PIL Image (required for visualization),
and the resized image as a NumPy float array.
|
[
"Load",
"an",
"image",
"from",
"the",
"specified",
"path",
"and",
"resize",
"it",
"to",
"the",
"input",
"resolution",
".",
"Return",
"the",
"input",
"image",
"before",
"resizing",
"as",
"a",
"PIL",
"Image",
"(",
"required",
"for",
"visualization",
")",
"and",
"the",
"resized",
"image",
"as",
"a",
"NumPy",
"float",
"array",
"."
] |
def _load_and_resize(self, input_image_path):
"""Load an image from the specified path and resize it to the input resolution.
Return the input image before resizing as a PIL Image (required for visualization),
and the resized image as a NumPy float array.
Keyword arguments:
input_image_path -- string path of the image to be loaded
"""
image_raw = Image.open(input_image_path)
# Expecting yolo_input_resolution in (height, width) format, adjusting to PIL
# convention (width, height) in PIL:
new_resolution = (
self.yolo_input_resolution[1],
self.yolo_input_resolution[0])
image_resized = image_raw.resize(
new_resolution, resample=Image.BICUBIC)
image_resized = np.array(image_resized, dtype=np.float32, order='C')
return image_raw, image_resized
|
[
"def",
"_load_and_resize",
"(",
"self",
",",
"input_image_path",
")",
":",
"image_raw",
"=",
"Image",
".",
"open",
"(",
"input_image_path",
")",
"# Expecting yolo_input_resolution in (height, width) format, adjusting to PIL",
"# convention (width, height) in PIL:",
"new_resolution",
"=",
"(",
"self",
".",
"yolo_input_resolution",
"[",
"1",
"]",
",",
"self",
".",
"yolo_input_resolution",
"[",
"0",
"]",
")",
"image_resized",
"=",
"image_raw",
".",
"resize",
"(",
"new_resolution",
",",
"resample",
"=",
"Image",
".",
"BICUBIC",
")",
"image_resized",
"=",
"np",
".",
"array",
"(",
"image_resized",
",",
"dtype",
"=",
"np",
".",
"float32",
",",
"order",
"=",
"'C'",
")",
"return",
"image_raw",
",",
"image_resized"
] |
https://github.com/NVIDIA/TensorRT/blob/42805f078052daad1a98bc5965974fcffaad0960/samples/python/yolov3_onnx/data_processing.py#L65-L83
|
|
psi4/psi4
|
be533f7f426b6ccc263904e55122899b16663395
|
psi4/driver/procrouting/wrappers_cfour.py
|
python
|
vpt2
|
(name, **kwargs)
|
Perform vibrational second-order perturbation computation through
Cfour to get anharmonic frequencies. This version uses c4 for the disp
and pt2 but gets gradients from p4.
:type c4full: :ref:`boolean <op_py_boolean>`
:param c4full: ``'on'`` || |dl| ``'off'`` |dr|
Indicates whether when *name* indicates a Cfour method and *mode*
indicates a sow/reap approach, sown files are direct ZMAT files
and FJOBARC files are expected to reap, so that Cfour only, not
Cfour-through-Psi4, is needed for distributed jobs.
.. caution:: Some features are not yet implemented. Buy a developer a coffee.
- Presently uses all gradients. Could mix in analytic 2nd-derivs.
- Collect resutls.
- Manage scratch / subdir better.
- Allow CFOUR_BASIS
- Consider forcing some tighter convcrit, c4 and p4
- mixed ang/bohr signals
- error by converting to ang in psi?
- Expand CURRENT DIPOLE XYZ beyond SCF
- Remember additional FJOBARC record TOTENER2 if EXCITE .ne. NONE
- switch C --> S/R with recovery using shelf
|
Perform vibrational second-order perturbation computation through
Cfour to get anharmonic frequencies. This version uses c4 for the disp
and pt2 but gets gradients from p4.
|
[
"Perform",
"vibrational",
"second",
"-",
"order",
"perturbation",
"computation",
"through",
"Cfour",
"to",
"get",
"anharmonic",
"frequencies",
".",
"This",
"version",
"uses",
"c4",
"for",
"the",
"disp",
"and",
"pt2",
"but",
"gets",
"gradients",
"from",
"p4",
"."
] |
def vpt2(name, **kwargs):
"""Perform vibrational second-order perturbation computation through
Cfour to get anharmonic frequencies. This version uses c4 for the disp
and pt2 but gets gradients from p4.
:type c4full: :ref:`boolean <op_py_boolean>`
:param c4full: ``'on'`` || |dl| ``'off'`` |dr|
Indicates whether when *name* indicates a Cfour method and *mode*
indicates a sow/reap approach, sown files are direct ZMAT files
and FJOBARC files are expected to reap, so that Cfour only, not
Cfour-through-Psi4, is needed for distributed jobs.
.. caution:: Some features are not yet implemented. Buy a developer a coffee.
- Presently uses all gradients. Could mix in analytic 2nd-derivs.
- Collect resutls.
- Manage scratch / subdir better.
- Allow CFOUR_BASIS
- Consider forcing some tighter convcrit, c4 and p4
- mixed ang/bohr signals
- error by converting to ang in psi?
- Expand CURRENT DIPOLE XYZ beyond SCF
- Remember additional FJOBARC record TOTENER2 if EXCITE .ne. NONE
- switch C --> S/R with recovery using shelf
"""
lowername = name.lower()
kwargs = p4util.kwargs_lower(kwargs)
optstash = p4util.OptionsState(
['BASIS'])
# Option mode of operation- whether vpt2 run in one job or files farmed out
if not('vpt2_mode' in kwargs):
if ('mode' in kwargs):
kwargs['vpt2_mode'] = kwargs['mode']
del kwargs['mode']
else:
kwargs['vpt2_mode'] = 'continuous'
# Switches for route through code- S/R or continuous & Psi4 or Cfour gradients
isSowReap = True if kwargs['vpt2_mode'].lower() == 'sowreap' else False
isC4notP4 = bool(re.match('cfour', lowername)) or bool(re.match('c4-', lowername))
isC4fully = True if ('c4full' in kwargs and yes.match(str(kwargs['c4full'])) and isC4notP4 and isSowReap) else False
# Save submission directory and basis set
current_directory = os.getcwd()
user_basis = core.get_global_option('BASIS')
# Open data persistence shelf- vital for sowreap, checkpoint for continuouw
shelf = shelve.open(current_directory + '/' + os.path.splitext(core.outfile_name())[0] + '.shelf', writeback=True)
# Cfour keywords to request vpt2 analysis through findif gradients
core.set_local_option('CFOUR', 'CFOUR_VIBRATION', 'FINDIF')
core.set_local_option('CFOUR', 'CFOUR_FREQ_ALGORITHM', 'PARALLEL')
core.set_local_option('CFOUR', 'CFOUR_ANH_ALGORITHM', 'PARALLEL')
core.set_local_option('CFOUR', 'CFOUR_ANHARMONIC', 'VPT2')
core.set_local_option('CFOUR', 'CFOUR_FD_PROJECT', 'OFF')
# When a Psi4 method is requested for vpt2, a skeleton of
# computations in Cfour is still required to hang the gradients
# upon. The skeleton is as cheap as possible (integrals only
# & sto-3g) and set up here.
if isC4notP4:
skelname = lowername
else:
skelname = 'c4-scf'
core.set_global_option('BASIS', 'STO-3G')
# P4 'c4-scf'/'cfour'CALC_LEVEL lowername # temporary
# C4 lowername cfour{} # temporary
if 'status' not in shelf:
shelf['status'] = 'initialized'
shelf['linkage'] = os.getpid()
shelf['zmat'] = {} # Cfour-generated ZMAT files with finite difference geometries
shelf['fjobarc'] = {} # Cfour- or Psi4-generated ascii files with packaged gradient results
shelf.sync()
else:
pass
# how decide whether to use. keep precedent of intco.dat in mind
# Construct and move into directory job scratch / cfour scratch / harm
psioh = core.IOManager.shared_object()
psio = core.IO.shared_object()
os.chdir(psioh.get_default_path()) # psi_scratch
cfour_tmpdir = kwargs['path'] if 'path' in kwargs else \
'psi.' + str(os.getpid()) + '.' + psio.get_default_namespace() + \
'.cfour.' + str(uuid.uuid4())[:8]
if not os.path.exists(cfour_tmpdir):
os.mkdir(cfour_tmpdir)
os.chdir(cfour_tmpdir) # psi_scratch/cfour
if not os.path.exists('harm'):
os.mkdir('harm')
os.chdir('harm') # psi_scratch/cfour/harm
psioh.set_specific_retention(32, True) # temporary, to track p4 scratch
#shelf['status'] = 'anharm_jobs_sown' # temporary to force backtrack
print('STAT', shelf['status']) # temporary
# Generate the ZMAT input file in scratch
with open('ZMAT', 'w') as handle:
cfour_infile = write_zmat(skelname, 1)
handle.write(cfour_infile)
print('\n====== Begin ZMAT input for CFOUR ======')
print(open('ZMAT', 'r').read())
print('======= End ZMAT input for CFOUR =======\n')
shelf['genbas'] = open('GENBAS', 'r').read()
# Check existing shelf consistent with generated ZMAT, store
if ('000-000' in shelf['zmat']) and (shelf['zmat']['000-000'] != cfour_infile):
diff = difflib.Differ().compare(shelf['zmat']['000-000'].splitlines(), cfour_infile.splitlines())
raise ValidationError("""Input file translated to Cfour ZMAT does not match ZMAT stored in shelf.\n\n""" +
'\n'.join(list(diff)))
shelf['zmat']['000-000'] = cfour_infile
shelf.sync()
# Reset basis after Cfour skeleton seeded
core.set_global_option('BASIS', user_basis)
if shelf['status'] == 'initialized':
p4util.banner(' VPT2 Setup: Harmonic ')
# Generate the displacements that will form the harmonic freq
os.chdir(psioh.get_default_path() + cfour_tmpdir + '/harm') # psi_scratch/cfour/harm
with open('partial.out', 'w') as handle:
handle.write(run_cfour_module('xjoda'))
handle.write(run_cfour_module('xsymcor'))
# Read the displacements that will form the harmonic freq
zmats0N = ['000-' + item[-3:] for item in sorted(glob.glob('zmat*'))]
for zm12 in zmats0N:
zm1, zm2 = zm12.split('-')
with open('zmat' + zm2, 'r') as handle:
shelf['zmat'][zm12] = handle.read()
shelf.sync()
core.print_out(' CFOUR scratch file %s for %s-%s has been read\n' % ('zmat' + zm2, zm1, zm2))
core.print_out('%s\n' % shelf['zmat'][zm12])
# S/R: Write distributed input files for harmonic freq
if isSowReap:
os.chdir(current_directory)
inputSansMol = p4util.format_currentstate_for_input(gradient, lowername, allButMol=True, **kwargs)
for zm12 in zmats0N:
zm1, zm2 = zm12.split('-')
ifile = vpt2_sow_files(zm12, shelf['linkage'], isC4notP4, isC4fully,
shelf['zmat'][zm12], inputSansMol, shelf['genbas'])
with open('VPT2-' + zm12 + '.in', 'w') as handle:
handle.write(ifile)
msg = vpt2_instructions('harmonic', current_directory, zmats0N)
core.print_out(msg)
print(msg)
shelf['status'] = 'harm_jobs_sown'
# S/R: Pause for distributed calculations
if isSowReap:
shelf.close()
return 0.0
if shelf['status'] == 'harm_jobs_sown':
zmats0N = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] == '000' and item[-3:] != '000')]
# S/R: Check that distributed calcs all completed correctly
if isSowReap:
msg = vpt2_instructions('harmonic', current_directory, zmats0N)
core.print_out(msg)
isOk, msg = sown_jobs_status(current_directory, 'VPT2', zmats0N, reap_job_validate,
shelf['linkage'], ['CURRENT ENERGY', 'CURRENT DIPOLE', 'CURRENT GRADIENT'])
core.print_out(msg)
print(msg)
if not isOk:
shelf.close()
return 0.0
# Collect all results from gradients forming the harmonic freq
for zm12 in zmats0N:
zm1, zm2 = zm12.split('-')
if zm12 not in shelf['fjobarc']:
p4util.banner(' VPT2 Computation: %s ' % (zm12))
print(' VPT2 Computation: %s ' % (zm12))
fjobarc = vpt2_reaprun_files(zm12, shelf['linkage'], isSowReap, isC4notP4, isC4fully,
shelf['zmat'][zm12], current_directory, psioh.get_default_path(), cfour_tmpdir,
lowername, kwargs)
shelf['fjobarc'][zm12] = fjobarc
shelf.sync()
shelf['status'] = 'harm_jobs_reaped'
if shelf['status'] == 'harm_jobs_reaped':
zmats0N = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] == '000' and item[-3:] != '000')]
p4util.banner(' VPT2 Results: Harmonic ')
# Process the gradients into harmonic freq
os.chdir(psioh.get_default_path() + cfour_tmpdir + '/harm') # psi_scratch/cfour/harm
harmout = run_cfour_module('xjoda')
harmout += run_cfour_module('xsymcor')
for zm12 in zmats0N:
zm1, zm2 = zm12.split('-')
with open('FJOBARC', 'w') as handle:
handle.write(shelf['fjobarc'][zm12])
harmout += run_cfour_module('xja2fja')
harmout += run_cfour_module('xsymcor')
shutil.move('FJOBARC', 'fja.' + zm12)
try:
os.remove('zmat' + zm2)
except OSError:
pass
harmout += run_cfour_module('xjoda')
harmout += run_cfour_module('xcubic')
core.print_out(harmout)
with open('harm.out', 'w') as handle:
handle.write(harmout)
# Generate displacements along harmonic normal modes
zmatsN0 = [item[-3:] for item in sorted(glob.glob('zmat*'))]
os.chdir('..') # psi_scratch/cfour
for zm1 in zmatsN0:
zm12 = zm1 + '-000'
with open(psioh.get_default_path() + cfour_tmpdir + '/harm/zmat' + zm1, 'r') as handle:
shelf['zmat'][zm12] = handle.read()
shelf.sync()
core.print_out(' CFOUR scratch file %s for %s has been read\n' % ('zmat' + zm1, zm12))
core.print_out('%s\n' % shelf['zmat'][zm12])
# Collect displacements along the normal coordinates generated by the harmonic freq.
# Further harmonic freqs are to be run at each of these to produce quartic force field.
# To carry these out, generate displacements for findif by gradient at each displacement.
if os.path.exists(zm1):
shutil.rmtree(zm1)
os.mkdir(zm1)
os.chdir(zm1) # psi_scratch/cfour/004
with open('ZMAT', 'w') as handle:
handle.write(shelf['zmat'][zm12])
shutil.copy2('../harm/GENBAS', 'GENBAS') # ln -s $ecpdir/ECPDATA $j/ECPDATA
with open('partial.out', 'w') as handle:
handle.write(run_cfour_module('xjoda'))
handle.write(run_cfour_module('xsymcor'))
# Read the displacements that will form the anharmonic freq
zmatsNN = [item[-3:] for item in sorted(glob.glob('zmat*'))]
for zm2 in zmatsNN:
zm12 = zm1 + '-' + zm2
with open(psioh.get_default_path() + cfour_tmpdir + '/' + zm1 + '/zmat' + zm2, 'r') as handle:
shelf['zmat'][zm12] = handle.read()
shelf.sync()
core.print_out(' CFOUR scratch file %s for %s has been read\n' % ('zmat' + zm2, zm12))
core.print_out('%s\n' % shelf['zmat'][zm12])
os.chdir('..') # psi_scratch/cfour
zmatsNN = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] != '000' and item[-3:] != '000')]
# S/R: Write distributed input files for anharmonic freq
if isSowReap:
os.chdir(current_directory)
inputSansMol = p4util.format_currentstate_for_input(gradient, lowername, allButMol=True, **kwargs)
for zm12 in zmatsNN:
zm1, zm2 = zm12.split('-')
ifile = vpt2_sow_files(zm12, shelf['linkage'], isC4notP4, isC4fully,
shelf['zmat'][zm12], inputSansMol, shelf['genbas'])
# GENBAS needed here
with open('VPT2-' + zm12 + '.in', 'w') as handle:
handle.write(ifile)
msg = vpt2_instructions('anharmonic', current_directory, zmatsNN)
core.print_out(msg)
print(msg)
shelf['status'] = 'anharm_jobs_sown'
# S/R: Pause for distributed calculations
if isSowReap:
shelf.close()
return 0.0
if shelf['status'] == 'anharm_jobs_sown':
zmatsNN = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] != '000' and item[-3:] != '000')]
# S/R: Check that distributed calcs all completed correctly
if isSowReap:
msg = vpt2_instructions('anharmonic', current_directory, zmatsNN)
core.print_out(msg)
isOk, msg = sown_jobs_status(current_directory, 'VPT2', zmatsNN,
reap_job_validate, shelf['linkage'],
['CURRENT ENERGY', 'CURRENT DIPOLE', 'CURRENT GRADIENT'])
core.print_out(msg)
print(msg)
if not isOk:
shelf.close()
return 0.0
# Collect all results from gradients forming the anharmonic freq
for zm12 in zmatsNN:
zm1, zm2 = zm12.split('-')
if zm12 not in shelf['fjobarc']:
p4util.banner(' VPT2 Computation: %s ' % (zm12))
print(' VPT2 Computation: %s ' % (zm12))
fjobarc = vpt2_reaprun_files(zm12, shelf['linkage'], isSowReap, isC4notP4, isC4fully,
shelf['zmat'][zm12], current_directory, psioh.get_default_path(), cfour_tmpdir,
lowername, kwargs)
shelf['fjobarc'][zm12] = fjobarc
shelf.sync()
shelf['status'] = 'anharm_jobs_reaped'
if shelf['status'] == 'anharm_jobs_reaped':
zmats0N = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] == '000' and item[-3:] != '000')]
zmatsN0 = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] != '000' and item[-3:] == '000')]
zmatsNN = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] != '000' and item[-3:] != '000')]
p4util.banner(' VPT2 Results: Harmonic ')
# Process the gradients into harmonic freq
os.chdir(psioh.get_default_path() + cfour_tmpdir) # psi_scratch/cfour
if os.path.exists('anharm'):
shutil.rmtree('anharm')
os.mkdir('anharm')
os.chdir('harm') # psi_scratch/cfour/harm
run_cfour_module('xclean')
anharmout = run_cfour_module('xjoda')
anharmout += run_cfour_module('xsymcor')
for zm12 in zmats0N:
zm1, zm2 = zm12.split('-')
with open('FJOBARC', 'w') as handle:
handle.write(shelf['fjobarc'][zm12])
anharmout += run_cfour_module('xja2fja')
anharmout += run_cfour_module('xsymcor')
shutil.move('FJOBARC', 'fja.' + zm12)
anharmout += run_cfour_module('xjoda')
anharmout += run_cfour_module('xcubic')
core.print_out(anharmout)
with open('harm.out', 'w') as handle:
handle.write(anharmout)
# Process the gradients into harmonic freq at each normco displaced point
os.chdir('..') # psi_scratch/cfour
for zm11 in zmatsN0:
zm1 = zm11[:3]
if os.path.exists(zm1):
shutil.rmtree(zm1)
os.mkdir(zm1)
os.chdir(zm1) # psi_scratch/cfour/004
run_cfour_module('xclean')
with open('ZMAT', 'w') as handle:
handle.write(shelf['zmat'][zm11])
shutil.copy2('../harm/GENBAS', 'GENBAS')
anharmout = run_cfour_module('xjoda')
anharmout += run_cfour_module('xsymcor')
for zm22 in [item for item in zmatsNN if (item[:3] == zm1 and item[-3:] != '000')]:
zm2 = zm22[-3:]
zm12 = zm1 + '-' + zm2
print(zm12)
with open('FJOBARC', 'w') as handle:
handle.write(shelf['fjobarc'][zm12])
anharmout += run_cfour_module('xja2fja')
anharmout += run_cfour_module('xsymcor')
shutil.move('FJOBARC', 'fja.' + zm12)
anharmout += run_cfour_module('xjoda')
anharmout += run_cfour_module('xja2fja')
with open('FJOBARC', 'r') as handle:
shelf['fjobarc'][zm11] = handle.read()
shelf.sync()
core.print_out(anharmout)
with open('partial.out', 'w') as handle:
handle.write(anharmout)
os.chdir('..') # psi_scratch/cfour
# Process the harmonic freqs at normco displacements into anharmonic freq
p4util.banner(' VPT2 Results: Anharmonic ')
os.chdir('anharm') # psi_scratch/cfour/anharm
shutil.copy2('../harm/JOBARC', 'JOBARC')
shutil.copy2('../harm/JAINDX', 'JAINDX')
for zm12 in zmatsN0:
with open('FJOBARC', 'w') as handle:
handle.write(shelf['fjobarc'][zm12])
anharmout = run_cfour_module('xja2fja')
anharmout += run_cfour_module('xcubic')
shutil.move('FJOBARC', 'fja.' + zm12)
core.print_out(anharmout)
with open('anharm.out', 'w') as handle:
handle.write(anharmout)
shelf['status'] = 'vpt2_completed'
# Finish up
os.chdir(current_directory)
shelf.close()
optstash.restore()
|
[
"def",
"vpt2",
"(",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"lowername",
"=",
"name",
".",
"lower",
"(",
")",
"kwargs",
"=",
"p4util",
".",
"kwargs_lower",
"(",
"kwargs",
")",
"optstash",
"=",
"p4util",
".",
"OptionsState",
"(",
"[",
"'BASIS'",
"]",
")",
"# Option mode of operation- whether vpt2 run in one job or files farmed out",
"if",
"not",
"(",
"'vpt2_mode'",
"in",
"kwargs",
")",
":",
"if",
"(",
"'mode'",
"in",
"kwargs",
")",
":",
"kwargs",
"[",
"'vpt2_mode'",
"]",
"=",
"kwargs",
"[",
"'mode'",
"]",
"del",
"kwargs",
"[",
"'mode'",
"]",
"else",
":",
"kwargs",
"[",
"'vpt2_mode'",
"]",
"=",
"'continuous'",
"# Switches for route through code- S/R or continuous & Psi4 or Cfour gradients",
"isSowReap",
"=",
"True",
"if",
"kwargs",
"[",
"'vpt2_mode'",
"]",
".",
"lower",
"(",
")",
"==",
"'sowreap'",
"else",
"False",
"isC4notP4",
"=",
"bool",
"(",
"re",
".",
"match",
"(",
"'cfour'",
",",
"lowername",
")",
")",
"or",
"bool",
"(",
"re",
".",
"match",
"(",
"'c4-'",
",",
"lowername",
")",
")",
"isC4fully",
"=",
"True",
"if",
"(",
"'c4full'",
"in",
"kwargs",
"and",
"yes",
".",
"match",
"(",
"str",
"(",
"kwargs",
"[",
"'c4full'",
"]",
")",
")",
"and",
"isC4notP4",
"and",
"isSowReap",
")",
"else",
"False",
"# Save submission directory and basis set",
"current_directory",
"=",
"os",
".",
"getcwd",
"(",
")",
"user_basis",
"=",
"core",
".",
"get_global_option",
"(",
"'BASIS'",
")",
"# Open data persistence shelf- vital for sowreap, checkpoint for continuouw",
"shelf",
"=",
"shelve",
".",
"open",
"(",
"current_directory",
"+",
"'/'",
"+",
"os",
".",
"path",
".",
"splitext",
"(",
"core",
".",
"outfile_name",
"(",
")",
")",
"[",
"0",
"]",
"+",
"'.shelf'",
",",
"writeback",
"=",
"True",
")",
"# Cfour keywords to request vpt2 analysis through findif gradients",
"core",
".",
"set_local_option",
"(",
"'CFOUR'",
",",
"'CFOUR_VIBRATION'",
",",
"'FINDIF'",
")",
"core",
".",
"set_local_option",
"(",
"'CFOUR'",
",",
"'CFOUR_FREQ_ALGORITHM'",
",",
"'PARALLEL'",
")",
"core",
".",
"set_local_option",
"(",
"'CFOUR'",
",",
"'CFOUR_ANH_ALGORITHM'",
",",
"'PARALLEL'",
")",
"core",
".",
"set_local_option",
"(",
"'CFOUR'",
",",
"'CFOUR_ANHARMONIC'",
",",
"'VPT2'",
")",
"core",
".",
"set_local_option",
"(",
"'CFOUR'",
",",
"'CFOUR_FD_PROJECT'",
",",
"'OFF'",
")",
"# When a Psi4 method is requested for vpt2, a skeleton of",
"# computations in Cfour is still required to hang the gradients",
"# upon. The skeleton is as cheap as possible (integrals only",
"# & sto-3g) and set up here.",
"if",
"isC4notP4",
":",
"skelname",
"=",
"lowername",
"else",
":",
"skelname",
"=",
"'c4-scf'",
"core",
".",
"set_global_option",
"(",
"'BASIS'",
",",
"'STO-3G'",
")",
"# P4 'c4-scf'/'cfour'CALC_LEVEL lowername # temporary",
"# C4 lowername cfour{} # temporary",
"if",
"'status'",
"not",
"in",
"shelf",
":",
"shelf",
"[",
"'status'",
"]",
"=",
"'initialized'",
"shelf",
"[",
"'linkage'",
"]",
"=",
"os",
".",
"getpid",
"(",
")",
"shelf",
"[",
"'zmat'",
"]",
"=",
"{",
"}",
"# Cfour-generated ZMAT files with finite difference geometries",
"shelf",
"[",
"'fjobarc'",
"]",
"=",
"{",
"}",
"# Cfour- or Psi4-generated ascii files with packaged gradient results",
"shelf",
".",
"sync",
"(",
")",
"else",
":",
"pass",
"# how decide whether to use. keep precedent of intco.dat in mind",
"# Construct and move into directory job scratch / cfour scratch / harm",
"psioh",
"=",
"core",
".",
"IOManager",
".",
"shared_object",
"(",
")",
"psio",
"=",
"core",
".",
"IO",
".",
"shared_object",
"(",
")",
"os",
".",
"chdir",
"(",
"psioh",
".",
"get_default_path",
"(",
")",
")",
"# psi_scratch",
"cfour_tmpdir",
"=",
"kwargs",
"[",
"'path'",
"]",
"if",
"'path'",
"in",
"kwargs",
"else",
"'psi.'",
"+",
"str",
"(",
"os",
".",
"getpid",
"(",
")",
")",
"+",
"'.'",
"+",
"psio",
".",
"get_default_namespace",
"(",
")",
"+",
"'.cfour.'",
"+",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"[",
":",
"8",
"]",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"cfour_tmpdir",
")",
":",
"os",
".",
"mkdir",
"(",
"cfour_tmpdir",
")",
"os",
".",
"chdir",
"(",
"cfour_tmpdir",
")",
"# psi_scratch/cfour",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"'harm'",
")",
":",
"os",
".",
"mkdir",
"(",
"'harm'",
")",
"os",
".",
"chdir",
"(",
"'harm'",
")",
"# psi_scratch/cfour/harm",
"psioh",
".",
"set_specific_retention",
"(",
"32",
",",
"True",
")",
"# temporary, to track p4 scratch",
"#shelf['status'] = 'anharm_jobs_sown' # temporary to force backtrack",
"print",
"(",
"'STAT'",
",",
"shelf",
"[",
"'status'",
"]",
")",
"# temporary",
"# Generate the ZMAT input file in scratch",
"with",
"open",
"(",
"'ZMAT'",
",",
"'w'",
")",
"as",
"handle",
":",
"cfour_infile",
"=",
"write_zmat",
"(",
"skelname",
",",
"1",
")",
"handle",
".",
"write",
"(",
"cfour_infile",
")",
"print",
"(",
"'\\n====== Begin ZMAT input for CFOUR ======'",
")",
"print",
"(",
"open",
"(",
"'ZMAT'",
",",
"'r'",
")",
".",
"read",
"(",
")",
")",
"print",
"(",
"'======= End ZMAT input for CFOUR =======\\n'",
")",
"shelf",
"[",
"'genbas'",
"]",
"=",
"open",
"(",
"'GENBAS'",
",",
"'r'",
")",
".",
"read",
"(",
")",
"# Check existing shelf consistent with generated ZMAT, store",
"if",
"(",
"'000-000'",
"in",
"shelf",
"[",
"'zmat'",
"]",
")",
"and",
"(",
"shelf",
"[",
"'zmat'",
"]",
"[",
"'000-000'",
"]",
"!=",
"cfour_infile",
")",
":",
"diff",
"=",
"difflib",
".",
"Differ",
"(",
")",
".",
"compare",
"(",
"shelf",
"[",
"'zmat'",
"]",
"[",
"'000-000'",
"]",
".",
"splitlines",
"(",
")",
",",
"cfour_infile",
".",
"splitlines",
"(",
")",
")",
"raise",
"ValidationError",
"(",
"\"\"\"Input file translated to Cfour ZMAT does not match ZMAT stored in shelf.\\n\\n\"\"\"",
"+",
"'\\n'",
".",
"join",
"(",
"list",
"(",
"diff",
")",
")",
")",
"shelf",
"[",
"'zmat'",
"]",
"[",
"'000-000'",
"]",
"=",
"cfour_infile",
"shelf",
".",
"sync",
"(",
")",
"# Reset basis after Cfour skeleton seeded",
"core",
".",
"set_global_option",
"(",
"'BASIS'",
",",
"user_basis",
")",
"if",
"shelf",
"[",
"'status'",
"]",
"==",
"'initialized'",
":",
"p4util",
".",
"banner",
"(",
"' VPT2 Setup: Harmonic '",
")",
"# Generate the displacements that will form the harmonic freq",
"os",
".",
"chdir",
"(",
"psioh",
".",
"get_default_path",
"(",
")",
"+",
"cfour_tmpdir",
"+",
"'/harm'",
")",
"# psi_scratch/cfour/harm",
"with",
"open",
"(",
"'partial.out'",
",",
"'w'",
")",
"as",
"handle",
":",
"handle",
".",
"write",
"(",
"run_cfour_module",
"(",
"'xjoda'",
")",
")",
"handle",
".",
"write",
"(",
"run_cfour_module",
"(",
"'xsymcor'",
")",
")",
"# Read the displacements that will form the harmonic freq",
"zmats0N",
"=",
"[",
"'000-'",
"+",
"item",
"[",
"-",
"3",
":",
"]",
"for",
"item",
"in",
"sorted",
"(",
"glob",
".",
"glob",
"(",
"'zmat*'",
")",
")",
"]",
"for",
"zm12",
"in",
"zmats0N",
":",
"zm1",
",",
"zm2",
"=",
"zm12",
".",
"split",
"(",
"'-'",
")",
"with",
"open",
"(",
"'zmat'",
"+",
"zm2",
",",
"'r'",
")",
"as",
"handle",
":",
"shelf",
"[",
"'zmat'",
"]",
"[",
"zm12",
"]",
"=",
"handle",
".",
"read",
"(",
")",
"shelf",
".",
"sync",
"(",
")",
"core",
".",
"print_out",
"(",
"' CFOUR scratch file %s for %s-%s has been read\\n'",
"%",
"(",
"'zmat'",
"+",
"zm2",
",",
"zm1",
",",
"zm2",
")",
")",
"core",
".",
"print_out",
"(",
"'%s\\n'",
"%",
"shelf",
"[",
"'zmat'",
"]",
"[",
"zm12",
"]",
")",
"# S/R: Write distributed input files for harmonic freq",
"if",
"isSowReap",
":",
"os",
".",
"chdir",
"(",
"current_directory",
")",
"inputSansMol",
"=",
"p4util",
".",
"format_currentstate_for_input",
"(",
"gradient",
",",
"lowername",
",",
"allButMol",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"for",
"zm12",
"in",
"zmats0N",
":",
"zm1",
",",
"zm2",
"=",
"zm12",
".",
"split",
"(",
"'-'",
")",
"ifile",
"=",
"vpt2_sow_files",
"(",
"zm12",
",",
"shelf",
"[",
"'linkage'",
"]",
",",
"isC4notP4",
",",
"isC4fully",
",",
"shelf",
"[",
"'zmat'",
"]",
"[",
"zm12",
"]",
",",
"inputSansMol",
",",
"shelf",
"[",
"'genbas'",
"]",
")",
"with",
"open",
"(",
"'VPT2-'",
"+",
"zm12",
"+",
"'.in'",
",",
"'w'",
")",
"as",
"handle",
":",
"handle",
".",
"write",
"(",
"ifile",
")",
"msg",
"=",
"vpt2_instructions",
"(",
"'harmonic'",
",",
"current_directory",
",",
"zmats0N",
")",
"core",
".",
"print_out",
"(",
"msg",
")",
"print",
"(",
"msg",
")",
"shelf",
"[",
"'status'",
"]",
"=",
"'harm_jobs_sown'",
"# S/R: Pause for distributed calculations",
"if",
"isSowReap",
":",
"shelf",
".",
"close",
"(",
")",
"return",
"0.0",
"if",
"shelf",
"[",
"'status'",
"]",
"==",
"'harm_jobs_sown'",
":",
"zmats0N",
"=",
"[",
"item",
"for",
"item",
"in",
"sorted",
"(",
"shelf",
"[",
"'zmat'",
"]",
".",
"keys",
"(",
")",
")",
"if",
"(",
"item",
"[",
":",
"3",
"]",
"==",
"'000'",
"and",
"item",
"[",
"-",
"3",
":",
"]",
"!=",
"'000'",
")",
"]",
"# S/R: Check that distributed calcs all completed correctly",
"if",
"isSowReap",
":",
"msg",
"=",
"vpt2_instructions",
"(",
"'harmonic'",
",",
"current_directory",
",",
"zmats0N",
")",
"core",
".",
"print_out",
"(",
"msg",
")",
"isOk",
",",
"msg",
"=",
"sown_jobs_status",
"(",
"current_directory",
",",
"'VPT2'",
",",
"zmats0N",
",",
"reap_job_validate",
",",
"shelf",
"[",
"'linkage'",
"]",
",",
"[",
"'CURRENT ENERGY'",
",",
"'CURRENT DIPOLE'",
",",
"'CURRENT GRADIENT'",
"]",
")",
"core",
".",
"print_out",
"(",
"msg",
")",
"print",
"(",
"msg",
")",
"if",
"not",
"isOk",
":",
"shelf",
".",
"close",
"(",
")",
"return",
"0.0",
"# Collect all results from gradients forming the harmonic freq",
"for",
"zm12",
"in",
"zmats0N",
":",
"zm1",
",",
"zm2",
"=",
"zm12",
".",
"split",
"(",
"'-'",
")",
"if",
"zm12",
"not",
"in",
"shelf",
"[",
"'fjobarc'",
"]",
":",
"p4util",
".",
"banner",
"(",
"' VPT2 Computation: %s '",
"%",
"(",
"zm12",
")",
")",
"print",
"(",
"' VPT2 Computation: %s '",
"%",
"(",
"zm12",
")",
")",
"fjobarc",
"=",
"vpt2_reaprun_files",
"(",
"zm12",
",",
"shelf",
"[",
"'linkage'",
"]",
",",
"isSowReap",
",",
"isC4notP4",
",",
"isC4fully",
",",
"shelf",
"[",
"'zmat'",
"]",
"[",
"zm12",
"]",
",",
"current_directory",
",",
"psioh",
".",
"get_default_path",
"(",
")",
",",
"cfour_tmpdir",
",",
"lowername",
",",
"kwargs",
")",
"shelf",
"[",
"'fjobarc'",
"]",
"[",
"zm12",
"]",
"=",
"fjobarc",
"shelf",
".",
"sync",
"(",
")",
"shelf",
"[",
"'status'",
"]",
"=",
"'harm_jobs_reaped'",
"if",
"shelf",
"[",
"'status'",
"]",
"==",
"'harm_jobs_reaped'",
":",
"zmats0N",
"=",
"[",
"item",
"for",
"item",
"in",
"sorted",
"(",
"shelf",
"[",
"'zmat'",
"]",
".",
"keys",
"(",
")",
")",
"if",
"(",
"item",
"[",
":",
"3",
"]",
"==",
"'000'",
"and",
"item",
"[",
"-",
"3",
":",
"]",
"!=",
"'000'",
")",
"]",
"p4util",
".",
"banner",
"(",
"' VPT2 Results: Harmonic '",
")",
"# Process the gradients into harmonic freq",
"os",
".",
"chdir",
"(",
"psioh",
".",
"get_default_path",
"(",
")",
"+",
"cfour_tmpdir",
"+",
"'/harm'",
")",
"# psi_scratch/cfour/harm",
"harmout",
"=",
"run_cfour_module",
"(",
"'xjoda'",
")",
"harmout",
"+=",
"run_cfour_module",
"(",
"'xsymcor'",
")",
"for",
"zm12",
"in",
"zmats0N",
":",
"zm1",
",",
"zm2",
"=",
"zm12",
".",
"split",
"(",
"'-'",
")",
"with",
"open",
"(",
"'FJOBARC'",
",",
"'w'",
")",
"as",
"handle",
":",
"handle",
".",
"write",
"(",
"shelf",
"[",
"'fjobarc'",
"]",
"[",
"zm12",
"]",
")",
"harmout",
"+=",
"run_cfour_module",
"(",
"'xja2fja'",
")",
"harmout",
"+=",
"run_cfour_module",
"(",
"'xsymcor'",
")",
"shutil",
".",
"move",
"(",
"'FJOBARC'",
",",
"'fja.'",
"+",
"zm12",
")",
"try",
":",
"os",
".",
"remove",
"(",
"'zmat'",
"+",
"zm2",
")",
"except",
"OSError",
":",
"pass",
"harmout",
"+=",
"run_cfour_module",
"(",
"'xjoda'",
")",
"harmout",
"+=",
"run_cfour_module",
"(",
"'xcubic'",
")",
"core",
".",
"print_out",
"(",
"harmout",
")",
"with",
"open",
"(",
"'harm.out'",
",",
"'w'",
")",
"as",
"handle",
":",
"handle",
".",
"write",
"(",
"harmout",
")",
"# Generate displacements along harmonic normal modes",
"zmatsN0",
"=",
"[",
"item",
"[",
"-",
"3",
":",
"]",
"for",
"item",
"in",
"sorted",
"(",
"glob",
".",
"glob",
"(",
"'zmat*'",
")",
")",
"]",
"os",
".",
"chdir",
"(",
"'..'",
")",
"# psi_scratch/cfour",
"for",
"zm1",
"in",
"zmatsN0",
":",
"zm12",
"=",
"zm1",
"+",
"'-000'",
"with",
"open",
"(",
"psioh",
".",
"get_default_path",
"(",
")",
"+",
"cfour_tmpdir",
"+",
"'/harm/zmat'",
"+",
"zm1",
",",
"'r'",
")",
"as",
"handle",
":",
"shelf",
"[",
"'zmat'",
"]",
"[",
"zm12",
"]",
"=",
"handle",
".",
"read",
"(",
")",
"shelf",
".",
"sync",
"(",
")",
"core",
".",
"print_out",
"(",
"' CFOUR scratch file %s for %s has been read\\n'",
"%",
"(",
"'zmat'",
"+",
"zm1",
",",
"zm12",
")",
")",
"core",
".",
"print_out",
"(",
"'%s\\n'",
"%",
"shelf",
"[",
"'zmat'",
"]",
"[",
"zm12",
"]",
")",
"# Collect displacements along the normal coordinates generated by the harmonic freq.",
"# Further harmonic freqs are to be run at each of these to produce quartic force field.",
"# To carry these out, generate displacements for findif by gradient at each displacement.",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"zm1",
")",
":",
"shutil",
".",
"rmtree",
"(",
"zm1",
")",
"os",
".",
"mkdir",
"(",
"zm1",
")",
"os",
".",
"chdir",
"(",
"zm1",
")",
"# psi_scratch/cfour/004",
"with",
"open",
"(",
"'ZMAT'",
",",
"'w'",
")",
"as",
"handle",
":",
"handle",
".",
"write",
"(",
"shelf",
"[",
"'zmat'",
"]",
"[",
"zm12",
"]",
")",
"shutil",
".",
"copy2",
"(",
"'../harm/GENBAS'",
",",
"'GENBAS'",
")",
"# ln -s $ecpdir/ECPDATA $j/ECPDATA",
"with",
"open",
"(",
"'partial.out'",
",",
"'w'",
")",
"as",
"handle",
":",
"handle",
".",
"write",
"(",
"run_cfour_module",
"(",
"'xjoda'",
")",
")",
"handle",
".",
"write",
"(",
"run_cfour_module",
"(",
"'xsymcor'",
")",
")",
"# Read the displacements that will form the anharmonic freq",
"zmatsNN",
"=",
"[",
"item",
"[",
"-",
"3",
":",
"]",
"for",
"item",
"in",
"sorted",
"(",
"glob",
".",
"glob",
"(",
"'zmat*'",
")",
")",
"]",
"for",
"zm2",
"in",
"zmatsNN",
":",
"zm12",
"=",
"zm1",
"+",
"'-'",
"+",
"zm2",
"with",
"open",
"(",
"psioh",
".",
"get_default_path",
"(",
")",
"+",
"cfour_tmpdir",
"+",
"'/'",
"+",
"zm1",
"+",
"'/zmat'",
"+",
"zm2",
",",
"'r'",
")",
"as",
"handle",
":",
"shelf",
"[",
"'zmat'",
"]",
"[",
"zm12",
"]",
"=",
"handle",
".",
"read",
"(",
")",
"shelf",
".",
"sync",
"(",
")",
"core",
".",
"print_out",
"(",
"' CFOUR scratch file %s for %s has been read\\n'",
"%",
"(",
"'zmat'",
"+",
"zm2",
",",
"zm12",
")",
")",
"core",
".",
"print_out",
"(",
"'%s\\n'",
"%",
"shelf",
"[",
"'zmat'",
"]",
"[",
"zm12",
"]",
")",
"os",
".",
"chdir",
"(",
"'..'",
")",
"# psi_scratch/cfour",
"zmatsNN",
"=",
"[",
"item",
"for",
"item",
"in",
"sorted",
"(",
"shelf",
"[",
"'zmat'",
"]",
".",
"keys",
"(",
")",
")",
"if",
"(",
"item",
"[",
":",
"3",
"]",
"!=",
"'000'",
"and",
"item",
"[",
"-",
"3",
":",
"]",
"!=",
"'000'",
")",
"]",
"# S/R: Write distributed input files for anharmonic freq",
"if",
"isSowReap",
":",
"os",
".",
"chdir",
"(",
"current_directory",
")",
"inputSansMol",
"=",
"p4util",
".",
"format_currentstate_for_input",
"(",
"gradient",
",",
"lowername",
",",
"allButMol",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"for",
"zm12",
"in",
"zmatsNN",
":",
"zm1",
",",
"zm2",
"=",
"zm12",
".",
"split",
"(",
"'-'",
")",
"ifile",
"=",
"vpt2_sow_files",
"(",
"zm12",
",",
"shelf",
"[",
"'linkage'",
"]",
",",
"isC4notP4",
",",
"isC4fully",
",",
"shelf",
"[",
"'zmat'",
"]",
"[",
"zm12",
"]",
",",
"inputSansMol",
",",
"shelf",
"[",
"'genbas'",
"]",
")",
"# GENBAS needed here",
"with",
"open",
"(",
"'VPT2-'",
"+",
"zm12",
"+",
"'.in'",
",",
"'w'",
")",
"as",
"handle",
":",
"handle",
".",
"write",
"(",
"ifile",
")",
"msg",
"=",
"vpt2_instructions",
"(",
"'anharmonic'",
",",
"current_directory",
",",
"zmatsNN",
")",
"core",
".",
"print_out",
"(",
"msg",
")",
"print",
"(",
"msg",
")",
"shelf",
"[",
"'status'",
"]",
"=",
"'anharm_jobs_sown'",
"# S/R: Pause for distributed calculations",
"if",
"isSowReap",
":",
"shelf",
".",
"close",
"(",
")",
"return",
"0.0",
"if",
"shelf",
"[",
"'status'",
"]",
"==",
"'anharm_jobs_sown'",
":",
"zmatsNN",
"=",
"[",
"item",
"for",
"item",
"in",
"sorted",
"(",
"shelf",
"[",
"'zmat'",
"]",
".",
"keys",
"(",
")",
")",
"if",
"(",
"item",
"[",
":",
"3",
"]",
"!=",
"'000'",
"and",
"item",
"[",
"-",
"3",
":",
"]",
"!=",
"'000'",
")",
"]",
"# S/R: Check that distributed calcs all completed correctly",
"if",
"isSowReap",
":",
"msg",
"=",
"vpt2_instructions",
"(",
"'anharmonic'",
",",
"current_directory",
",",
"zmatsNN",
")",
"core",
".",
"print_out",
"(",
"msg",
")",
"isOk",
",",
"msg",
"=",
"sown_jobs_status",
"(",
"current_directory",
",",
"'VPT2'",
",",
"zmatsNN",
",",
"reap_job_validate",
",",
"shelf",
"[",
"'linkage'",
"]",
",",
"[",
"'CURRENT ENERGY'",
",",
"'CURRENT DIPOLE'",
",",
"'CURRENT GRADIENT'",
"]",
")",
"core",
".",
"print_out",
"(",
"msg",
")",
"print",
"(",
"msg",
")",
"if",
"not",
"isOk",
":",
"shelf",
".",
"close",
"(",
")",
"return",
"0.0",
"# Collect all results from gradients forming the anharmonic freq",
"for",
"zm12",
"in",
"zmatsNN",
":",
"zm1",
",",
"zm2",
"=",
"zm12",
".",
"split",
"(",
"'-'",
")",
"if",
"zm12",
"not",
"in",
"shelf",
"[",
"'fjobarc'",
"]",
":",
"p4util",
".",
"banner",
"(",
"' VPT2 Computation: %s '",
"%",
"(",
"zm12",
")",
")",
"print",
"(",
"' VPT2 Computation: %s '",
"%",
"(",
"zm12",
")",
")",
"fjobarc",
"=",
"vpt2_reaprun_files",
"(",
"zm12",
",",
"shelf",
"[",
"'linkage'",
"]",
",",
"isSowReap",
",",
"isC4notP4",
",",
"isC4fully",
",",
"shelf",
"[",
"'zmat'",
"]",
"[",
"zm12",
"]",
",",
"current_directory",
",",
"psioh",
".",
"get_default_path",
"(",
")",
",",
"cfour_tmpdir",
",",
"lowername",
",",
"kwargs",
")",
"shelf",
"[",
"'fjobarc'",
"]",
"[",
"zm12",
"]",
"=",
"fjobarc",
"shelf",
".",
"sync",
"(",
")",
"shelf",
"[",
"'status'",
"]",
"=",
"'anharm_jobs_reaped'",
"if",
"shelf",
"[",
"'status'",
"]",
"==",
"'anharm_jobs_reaped'",
":",
"zmats0N",
"=",
"[",
"item",
"for",
"item",
"in",
"sorted",
"(",
"shelf",
"[",
"'zmat'",
"]",
".",
"keys",
"(",
")",
")",
"if",
"(",
"item",
"[",
":",
"3",
"]",
"==",
"'000'",
"and",
"item",
"[",
"-",
"3",
":",
"]",
"!=",
"'000'",
")",
"]",
"zmatsN0",
"=",
"[",
"item",
"for",
"item",
"in",
"sorted",
"(",
"shelf",
"[",
"'zmat'",
"]",
".",
"keys",
"(",
")",
")",
"if",
"(",
"item",
"[",
":",
"3",
"]",
"!=",
"'000'",
"and",
"item",
"[",
"-",
"3",
":",
"]",
"==",
"'000'",
")",
"]",
"zmatsNN",
"=",
"[",
"item",
"for",
"item",
"in",
"sorted",
"(",
"shelf",
"[",
"'zmat'",
"]",
".",
"keys",
"(",
")",
")",
"if",
"(",
"item",
"[",
":",
"3",
"]",
"!=",
"'000'",
"and",
"item",
"[",
"-",
"3",
":",
"]",
"!=",
"'000'",
")",
"]",
"p4util",
".",
"banner",
"(",
"' VPT2 Results: Harmonic '",
")",
"# Process the gradients into harmonic freq",
"os",
".",
"chdir",
"(",
"psioh",
".",
"get_default_path",
"(",
")",
"+",
"cfour_tmpdir",
")",
"# psi_scratch/cfour",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"'anharm'",
")",
":",
"shutil",
".",
"rmtree",
"(",
"'anharm'",
")",
"os",
".",
"mkdir",
"(",
"'anharm'",
")",
"os",
".",
"chdir",
"(",
"'harm'",
")",
"# psi_scratch/cfour/harm",
"run_cfour_module",
"(",
"'xclean'",
")",
"anharmout",
"=",
"run_cfour_module",
"(",
"'xjoda'",
")",
"anharmout",
"+=",
"run_cfour_module",
"(",
"'xsymcor'",
")",
"for",
"zm12",
"in",
"zmats0N",
":",
"zm1",
",",
"zm2",
"=",
"zm12",
".",
"split",
"(",
"'-'",
")",
"with",
"open",
"(",
"'FJOBARC'",
",",
"'w'",
")",
"as",
"handle",
":",
"handle",
".",
"write",
"(",
"shelf",
"[",
"'fjobarc'",
"]",
"[",
"zm12",
"]",
")",
"anharmout",
"+=",
"run_cfour_module",
"(",
"'xja2fja'",
")",
"anharmout",
"+=",
"run_cfour_module",
"(",
"'xsymcor'",
")",
"shutil",
".",
"move",
"(",
"'FJOBARC'",
",",
"'fja.'",
"+",
"zm12",
")",
"anharmout",
"+=",
"run_cfour_module",
"(",
"'xjoda'",
")",
"anharmout",
"+=",
"run_cfour_module",
"(",
"'xcubic'",
")",
"core",
".",
"print_out",
"(",
"anharmout",
")",
"with",
"open",
"(",
"'harm.out'",
",",
"'w'",
")",
"as",
"handle",
":",
"handle",
".",
"write",
"(",
"anharmout",
")",
"# Process the gradients into harmonic freq at each normco displaced point",
"os",
".",
"chdir",
"(",
"'..'",
")",
"# psi_scratch/cfour",
"for",
"zm11",
"in",
"zmatsN0",
":",
"zm1",
"=",
"zm11",
"[",
":",
"3",
"]",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"zm1",
")",
":",
"shutil",
".",
"rmtree",
"(",
"zm1",
")",
"os",
".",
"mkdir",
"(",
"zm1",
")",
"os",
".",
"chdir",
"(",
"zm1",
")",
"# psi_scratch/cfour/004",
"run_cfour_module",
"(",
"'xclean'",
")",
"with",
"open",
"(",
"'ZMAT'",
",",
"'w'",
")",
"as",
"handle",
":",
"handle",
".",
"write",
"(",
"shelf",
"[",
"'zmat'",
"]",
"[",
"zm11",
"]",
")",
"shutil",
".",
"copy2",
"(",
"'../harm/GENBAS'",
",",
"'GENBAS'",
")",
"anharmout",
"=",
"run_cfour_module",
"(",
"'xjoda'",
")",
"anharmout",
"+=",
"run_cfour_module",
"(",
"'xsymcor'",
")",
"for",
"zm22",
"in",
"[",
"item",
"for",
"item",
"in",
"zmatsNN",
"if",
"(",
"item",
"[",
":",
"3",
"]",
"==",
"zm1",
"and",
"item",
"[",
"-",
"3",
":",
"]",
"!=",
"'000'",
")",
"]",
":",
"zm2",
"=",
"zm22",
"[",
"-",
"3",
":",
"]",
"zm12",
"=",
"zm1",
"+",
"'-'",
"+",
"zm2",
"print",
"(",
"zm12",
")",
"with",
"open",
"(",
"'FJOBARC'",
",",
"'w'",
")",
"as",
"handle",
":",
"handle",
".",
"write",
"(",
"shelf",
"[",
"'fjobarc'",
"]",
"[",
"zm12",
"]",
")",
"anharmout",
"+=",
"run_cfour_module",
"(",
"'xja2fja'",
")",
"anharmout",
"+=",
"run_cfour_module",
"(",
"'xsymcor'",
")",
"shutil",
".",
"move",
"(",
"'FJOBARC'",
",",
"'fja.'",
"+",
"zm12",
")",
"anharmout",
"+=",
"run_cfour_module",
"(",
"'xjoda'",
")",
"anharmout",
"+=",
"run_cfour_module",
"(",
"'xja2fja'",
")",
"with",
"open",
"(",
"'FJOBARC'",
",",
"'r'",
")",
"as",
"handle",
":",
"shelf",
"[",
"'fjobarc'",
"]",
"[",
"zm11",
"]",
"=",
"handle",
".",
"read",
"(",
")",
"shelf",
".",
"sync",
"(",
")",
"core",
".",
"print_out",
"(",
"anharmout",
")",
"with",
"open",
"(",
"'partial.out'",
",",
"'w'",
")",
"as",
"handle",
":",
"handle",
".",
"write",
"(",
"anharmout",
")",
"os",
".",
"chdir",
"(",
"'..'",
")",
"# psi_scratch/cfour",
"# Process the harmonic freqs at normco displacements into anharmonic freq",
"p4util",
".",
"banner",
"(",
"' VPT2 Results: Anharmonic '",
")",
"os",
".",
"chdir",
"(",
"'anharm'",
")",
"# psi_scratch/cfour/anharm",
"shutil",
".",
"copy2",
"(",
"'../harm/JOBARC'",
",",
"'JOBARC'",
")",
"shutil",
".",
"copy2",
"(",
"'../harm/JAINDX'",
",",
"'JAINDX'",
")",
"for",
"zm12",
"in",
"zmatsN0",
":",
"with",
"open",
"(",
"'FJOBARC'",
",",
"'w'",
")",
"as",
"handle",
":",
"handle",
".",
"write",
"(",
"shelf",
"[",
"'fjobarc'",
"]",
"[",
"zm12",
"]",
")",
"anharmout",
"=",
"run_cfour_module",
"(",
"'xja2fja'",
")",
"anharmout",
"+=",
"run_cfour_module",
"(",
"'xcubic'",
")",
"shutil",
".",
"move",
"(",
"'FJOBARC'",
",",
"'fja.'",
"+",
"zm12",
")",
"core",
".",
"print_out",
"(",
"anharmout",
")",
"with",
"open",
"(",
"'anharm.out'",
",",
"'w'",
")",
"as",
"handle",
":",
"handle",
".",
"write",
"(",
"anharmout",
")",
"shelf",
"[",
"'status'",
"]",
"=",
"'vpt2_completed'",
"# Finish up",
"os",
".",
"chdir",
"(",
"current_directory",
")",
"shelf",
".",
"close",
"(",
")",
"optstash",
".",
"restore",
"(",
")"
] |
https://github.com/psi4/psi4/blob/be533f7f426b6ccc263904e55122899b16663395/psi4/driver/procrouting/wrappers_cfour.py#L84-L496
|
||
cms-sw/cmssw
|
fd9de012d503d3405420bcbeec0ec879baa57cf2
|
CondCore/Utilities/scripts/uploadConditions.py
|
python
|
HTTP.discardCookies
|
(self)
|
Discards cookies.
|
Discards cookies.
|
[
"Discards",
"cookies",
"."
] |
def discardCookies(self):
'''Discards cookies.
'''
self.curl.setopt(self.curl.COOKIELIST, 'ALL')
|
[
"def",
"discardCookies",
"(",
"self",
")",
":",
"self",
".",
"curl",
".",
"setopt",
"(",
"self",
".",
"curl",
".",
"COOKIELIST",
",",
"'ALL'",
")"
] |
https://github.com/cms-sw/cmssw/blob/fd9de012d503d3405420bcbeec0ec879baa57cf2/CondCore/Utilities/scripts/uploadConditions.py#L248-L251
|
||
SFTtech/openage
|
d6a08c53c48dc1e157807471df92197f6ca9e04d
|
openage/util/filelike/fifo.py
|
python
|
FIFO.seek
|
(self, offset, whence=os.SEEK_SET)
|
Unsupported because this is a FIFO.
|
Unsupported because this is a FIFO.
|
[
"Unsupported",
"because",
"this",
"is",
"a",
"FIFO",
"."
] |
def seek(self, offset, whence=os.SEEK_SET):
"""
Unsupported because this is a FIFO.
"""
del offset, whence # unused
raise UnsupportedOperation("unseekable stream")
|
[
"def",
"seek",
"(",
"self",
",",
"offset",
",",
"whence",
"=",
"os",
".",
"SEEK_SET",
")",
":",
"del",
"offset",
",",
"whence",
"# unused",
"raise",
"UnsupportedOperation",
"(",
"\"unseekable stream\"",
")"
] |
https://github.com/SFTtech/openage/blob/d6a08c53c48dc1e157807471df92197f6ca9e04d/openage/util/filelike/fifo.py#L51-L57
|
||
Xilinx/Vitis-AI
|
fc74d404563d9951b57245443c73bef389f3657f
|
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/tools/compatibility/tf_upgrade_v2.py
|
python
|
_contrib_layers_xavier_initializer_transformer
|
(
parent, node, full_name, name, logs)
|
return node
|
Updates references to contrib.layers.xavier_initializer.
Transforms:
tf.contrib.layers.xavier_initializer(uniform, seed, dtype) to
tf.compat.v1.keras.initializers.VarianceScaling(
scale=1.0, mode="fan_avg",
distribution=("uniform" if uniform else "truncated_normal"),
seed=seed, dtype=dtype)
Returns: The new node
|
Updates references to contrib.layers.xavier_initializer.
|
[
"Updates",
"references",
"to",
"contrib",
".",
"layers",
".",
"xavier_initializer",
"."
] |
def _contrib_layers_xavier_initializer_transformer(
parent, node, full_name, name, logs):
"""Updates references to contrib.layers.xavier_initializer.
Transforms:
tf.contrib.layers.xavier_initializer(uniform, seed, dtype) to
tf.compat.v1.keras.initializers.VarianceScaling(
scale=1.0, mode="fan_avg",
distribution=("uniform" if uniform else "truncated_normal"),
seed=seed, dtype=dtype)
Returns: The new node
"""
def _get_distribution(old_value):
"""Returns an AST matching the following:
("uniform" if (old_value) else "truncated_normal")
"""
dist = pasta.parse("\"uniform\" if old_value else \"truncated_normal\"")
ifexpr = dist.body[0].value
pasta.ast_utils.replace_child(ifexpr, ifexpr.test, old_value)
pasta.base.formatting.set(dist, "prefix", "(")
pasta.base.formatting.set(dist, "suffix", ")")
return dist
found_distribution = False
for keyword_arg in node.keywords:
if keyword_arg.arg == "uniform":
found_distribution = True
keyword_arg.arg = "distribution"
old_value = keyword_arg.value
new_value = _get_distribution(keyword_arg.value)
pasta.ast_utils.replace_child(keyword_arg, old_value, new_value)
pasta.base.formatting.set(keyword_arg.value, "prefix", "(")
pasta.base.formatting.set(keyword_arg.value, "suffix", ")")
new_keywords = []
scale = pasta.parse("1.0")
new_keywords.append(ast.keyword(arg="scale", value=scale))
mode = pasta.parse("\"fan_avg\"")
new_keywords.append(ast.keyword(arg="mode", value=mode))
if len(node.args) >= 1:
found_distribution = True
dist = _get_distribution(node.args[0])
new_keywords.append(ast.keyword(arg="distribution", value=dist))
if not found_distribution:
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
uniform_dist = pasta.parse("\"uniform\"")
new_keywords.append(ast.keyword(arg="distribution", value=uniform_dist))
if len(node.args) >= 2:
new_keywords.append(ast.keyword(arg="seed", value=node.args[1]))
if len(node.args) >= 3:
new_keywords.append(ast.keyword(arg="dtype", value=node.args[2]))
node.args = []
node.keywords = new_keywords + node.keywords
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.compat.v1.keras.initializers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "VarianceScaling"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing tf.contrib.layers xavier initializer"
" to a tf.compat.v1.keras.initializers.VarianceScaling and"
" converting arguments.\n"))
return node
|
[
"def",
"_contrib_layers_xavier_initializer_transformer",
"(",
"parent",
",",
"node",
",",
"full_name",
",",
"name",
",",
"logs",
")",
":",
"def",
"_get_distribution",
"(",
"old_value",
")",
":",
"\"\"\"Returns an AST matching the following:\n (\"uniform\" if (old_value) else \"truncated_normal\")\n \"\"\"",
"dist",
"=",
"pasta",
".",
"parse",
"(",
"\"\\\"uniform\\\" if old_value else \\\"truncated_normal\\\"\"",
")",
"ifexpr",
"=",
"dist",
".",
"body",
"[",
"0",
"]",
".",
"value",
"pasta",
".",
"ast_utils",
".",
"replace_child",
"(",
"ifexpr",
",",
"ifexpr",
".",
"test",
",",
"old_value",
")",
"pasta",
".",
"base",
".",
"formatting",
".",
"set",
"(",
"dist",
",",
"\"prefix\"",
",",
"\"(\"",
")",
"pasta",
".",
"base",
".",
"formatting",
".",
"set",
"(",
"dist",
",",
"\"suffix\"",
",",
"\")\"",
")",
"return",
"dist",
"found_distribution",
"=",
"False",
"for",
"keyword_arg",
"in",
"node",
".",
"keywords",
":",
"if",
"keyword_arg",
".",
"arg",
"==",
"\"uniform\"",
":",
"found_distribution",
"=",
"True",
"keyword_arg",
".",
"arg",
"=",
"\"distribution\"",
"old_value",
"=",
"keyword_arg",
".",
"value",
"new_value",
"=",
"_get_distribution",
"(",
"keyword_arg",
".",
"value",
")",
"pasta",
".",
"ast_utils",
".",
"replace_child",
"(",
"keyword_arg",
",",
"old_value",
",",
"new_value",
")",
"pasta",
".",
"base",
".",
"formatting",
".",
"set",
"(",
"keyword_arg",
".",
"value",
",",
"\"prefix\"",
",",
"\"(\"",
")",
"pasta",
".",
"base",
".",
"formatting",
".",
"set",
"(",
"keyword_arg",
".",
"value",
",",
"\"suffix\"",
",",
"\")\"",
")",
"new_keywords",
"=",
"[",
"]",
"scale",
"=",
"pasta",
".",
"parse",
"(",
"\"1.0\"",
")",
"new_keywords",
".",
"append",
"(",
"ast",
".",
"keyword",
"(",
"arg",
"=",
"\"scale\"",
",",
"value",
"=",
"scale",
")",
")",
"mode",
"=",
"pasta",
".",
"parse",
"(",
"\"\\\"fan_avg\\\"\"",
")",
"new_keywords",
".",
"append",
"(",
"ast",
".",
"keyword",
"(",
"arg",
"=",
"\"mode\"",
",",
"value",
"=",
"mode",
")",
")",
"if",
"len",
"(",
"node",
".",
"args",
")",
">=",
"1",
":",
"found_distribution",
"=",
"True",
"dist",
"=",
"_get_distribution",
"(",
"node",
".",
"args",
"[",
"0",
"]",
")",
"new_keywords",
".",
"append",
"(",
"ast",
".",
"keyword",
"(",
"arg",
"=",
"\"distribution\"",
",",
"value",
"=",
"dist",
")",
")",
"if",
"not",
"found_distribution",
":",
"# Parse with pasta instead of ast to avoid emitting a spurious trailing \\n.",
"uniform_dist",
"=",
"pasta",
".",
"parse",
"(",
"\"\\\"uniform\\\"\"",
")",
"new_keywords",
".",
"append",
"(",
"ast",
".",
"keyword",
"(",
"arg",
"=",
"\"distribution\"",
",",
"value",
"=",
"uniform_dist",
")",
")",
"if",
"len",
"(",
"node",
".",
"args",
")",
">=",
"2",
":",
"new_keywords",
".",
"append",
"(",
"ast",
".",
"keyword",
"(",
"arg",
"=",
"\"seed\"",
",",
"value",
"=",
"node",
".",
"args",
"[",
"1",
"]",
")",
")",
"if",
"len",
"(",
"node",
".",
"args",
")",
">=",
"3",
":",
"new_keywords",
".",
"append",
"(",
"ast",
".",
"keyword",
"(",
"arg",
"=",
"\"dtype\"",
",",
"value",
"=",
"node",
".",
"args",
"[",
"2",
"]",
")",
")",
"node",
".",
"args",
"=",
"[",
"]",
"node",
".",
"keywords",
"=",
"new_keywords",
"+",
"node",
".",
"keywords",
"lineno",
"=",
"node",
".",
"func",
".",
"value",
".",
"lineno",
"col_offset",
"=",
"node",
".",
"func",
".",
"value",
".",
"col_offset",
"node",
".",
"func",
".",
"value",
"=",
"ast_edits",
".",
"full_name_node",
"(",
"\"tf.compat.v1.keras.initializers\"",
")",
"node",
".",
"func",
".",
"value",
".",
"lineno",
"=",
"lineno",
"node",
".",
"func",
".",
"value",
".",
"col_offset",
"=",
"col_offset",
"node",
".",
"func",
".",
"attr",
"=",
"\"VarianceScaling\"",
"logs",
".",
"append",
"(",
"(",
"ast_edits",
".",
"INFO",
",",
"node",
".",
"lineno",
",",
"node",
".",
"col_offset",
",",
"\"Changing tf.contrib.layers xavier initializer\"",
"\" to a tf.compat.v1.keras.initializers.VarianceScaling and\"",
"\" converting arguments.\\n\"",
")",
")",
"return",
"node"
] |
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/tools/compatibility/tf_upgrade_v2.py#L2161-L2236
|
|
MythTV/mythtv
|
d282a209cb8be85d036f85a62a8ec971b67d45f4
|
mythtv/bindings/python/MythTV/database.py
|
python
|
DBCache.getStorageGroup
|
(self, groupname=None, hostname=None)
|
obj.getStorageGroup(groupname=None, hostname=None)
-> tuple of StorageGroup objects
groupname and hostname can be used as optional filters
|
obj.getStorageGroup(groupname=None, hostname=None)
-> tuple of StorageGroup objects
groupname and hostname can be used as optional filters
|
[
"obj",
".",
"getStorageGroup",
"(",
"groupname",
"=",
"None",
"hostname",
"=",
"None",
")",
"-",
">",
"tuple",
"of",
"StorageGroup",
"objects",
"groupname",
"and",
"hostname",
"can",
"be",
"used",
"as",
"optional",
"filters"
] |
def getStorageGroup(self, groupname=None, hostname=None):
"""
obj.getStorageGroup(groupname=None, hostname=None)
-> tuple of StorageGroup objects
groupname and hostname can be used as optional filters
"""
where = []
wheredat = []
if groupname:
where.append("groupname=?")
wheredat.append(groupname)
if hostname:
where.append("hostname=?")
wheredat.append(hostname)
with self.cursor(self.log) as cursor:
if len(where):
where = 'WHERE '+' AND '.join(where)
cursor.execute("""SELECT * FROM storagegroup %s
ORDER BY id""" % where, wheredat)
else:
cursor.execute("""SELECT * FROM storagegroup
ORDER BY id""")
for row in cursor:
yield StorageGroup.fromRaw(row, self)
|
[
"def",
"getStorageGroup",
"(",
"self",
",",
"groupname",
"=",
"None",
",",
"hostname",
"=",
"None",
")",
":",
"where",
"=",
"[",
"]",
"wheredat",
"=",
"[",
"]",
"if",
"groupname",
":",
"where",
".",
"append",
"(",
"\"groupname=?\"",
")",
"wheredat",
".",
"append",
"(",
"groupname",
")",
"if",
"hostname",
":",
"where",
".",
"append",
"(",
"\"hostname=?\"",
")",
"wheredat",
".",
"append",
"(",
"hostname",
")",
"with",
"self",
".",
"cursor",
"(",
"self",
".",
"log",
")",
"as",
"cursor",
":",
"if",
"len",
"(",
"where",
")",
":",
"where",
"=",
"'WHERE '",
"+",
"' AND '",
".",
"join",
"(",
"where",
")",
"cursor",
".",
"execute",
"(",
"\"\"\"SELECT * FROM storagegroup %s\n ORDER BY id\"\"\"",
"%",
"where",
",",
"wheredat",
")",
"else",
":",
"cursor",
".",
"execute",
"(",
"\"\"\"SELECT * FROM storagegroup\n ORDER BY id\"\"\"",
")",
"for",
"row",
"in",
"cursor",
":",
"yield",
"StorageGroup",
".",
"fromRaw",
"(",
"row",
",",
"self",
")"
] |
https://github.com/MythTV/mythtv/blob/d282a209cb8be85d036f85a62a8ec971b67d45f4/mythtv/bindings/python/MythTV/database.py#L1357-L1381
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/gtk/_gdi.py
|
python
|
GraphicsPen.__init__
|
(self, *args, **kwargs)
|
__init__(self) -> GraphicsPen
A wx.GraphicsPen is a native representation of a pen. It is used for
stroking a path on a `wx.GraphicsContext`. The contents are specific and
private to the respective renderer. The only way to get a valid instance
is via a CreatePen call on the graphics context or the renderer
instance.
|
__init__(self) -> GraphicsPen
|
[
"__init__",
"(",
"self",
")",
"-",
">",
"GraphicsPen"
] |
def __init__(self, *args, **kwargs):
"""
__init__(self) -> GraphicsPen
A wx.GraphicsPen is a native representation of a pen. It is used for
stroking a path on a `wx.GraphicsContext`. The contents are specific and
private to the respective renderer. The only way to get a valid instance
is via a CreatePen call on the graphics context or the renderer
instance.
"""
_gdi_.GraphicsPen_swiginit(self,_gdi_.new_GraphicsPen(*args, **kwargs))
|
[
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"_gdi_",
".",
"GraphicsPen_swiginit",
"(",
"self",
",",
"_gdi_",
".",
"new_GraphicsPen",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_gdi.py#L5498-L5508
|
||
Xilinx/Vitis-AI
|
fc74d404563d9951b57245443c73bef389f3657f
|
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/training/learning_rate_decay.py
|
python
|
natural_exp_decay
|
(learning_rate,
global_step,
decay_steps,
decay_rate,
staircase=False,
name=None)
|
return decayed_lr
|
Applies natural exponential decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an exponential decay function
to a provided initial learning rate. It requires an `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
decayed_learning_rate = learning_rate * exp(-decay_rate * global_step /
decay_step)
```
or, if `staircase` is `True`, as:
```python
decayed_learning_rate = learning_rate * exp(-decay_rate * floor(global_step /
decay_step))
```
Example: decay exponentially with a base of 0.96:
```python
...
global_step = tf.Variable(0, trainable=False)
learning_rate = 0.1
decay_steps = 5
k = 0.5
learning_rate = tf.compat.v1.train.natural_exp_decay(learning_rate,
global_step,
decay_steps, k)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.compat.v1.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number.
The initial learning rate.
global_step: A Python number. Global step to use for the decay computation.
Must not be negative.
decay_steps: How often to apply decay.
decay_rate: A Python number. The decay rate.
staircase: Whether to apply decay in a discrete staircase, as opposed to
continuous, fashion.
name: String. Optional name of the operation. Defaults to
'ExponentialTimeDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
|
Applies natural exponential decay to the initial learning rate.
|
[
"Applies",
"natural",
"exponential",
"decay",
"to",
"the",
"initial",
"learning",
"rate",
"."
] |
def natural_exp_decay(learning_rate,
global_step,
decay_steps,
decay_rate,
staircase=False,
name=None):
"""Applies natural exponential decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an exponential decay function
to a provided initial learning rate. It requires an `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
decayed_learning_rate = learning_rate * exp(-decay_rate * global_step /
decay_step)
```
or, if `staircase` is `True`, as:
```python
decayed_learning_rate = learning_rate * exp(-decay_rate * floor(global_step /
decay_step))
```
Example: decay exponentially with a base of 0.96:
```python
...
global_step = tf.Variable(0, trainable=False)
learning_rate = 0.1
decay_steps = 5
k = 0.5
learning_rate = tf.compat.v1.train.natural_exp_decay(learning_rate,
global_step,
decay_steps, k)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.compat.v1.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number.
The initial learning rate.
global_step: A Python number. Global step to use for the decay computation.
Must not be negative.
decay_steps: How often to apply decay.
decay_rate: A Python number. The decay rate.
staircase: Whether to apply decay in a discrete staircase, as opposed to
continuous, fashion.
name: String. Optional name of the operation. Defaults to
'ExponentialTimeDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
natural_exp_rate = math_ops.exp(math_ops.negative(decay_rate))
decayed_lr = learning_rate_schedule.ExponentialDecay(
learning_rate,
decay_steps,
natural_exp_rate,
staircase=staircase,
name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr(global_step)
else:
decayed_lr = functools.partial(decayed_lr, global_step)
return decayed_lr
|
[
"def",
"natural_exp_decay",
"(",
"learning_rate",
",",
"global_step",
",",
"decay_steps",
",",
"decay_rate",
",",
"staircase",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"natural_exp_rate",
"=",
"math_ops",
".",
"exp",
"(",
"math_ops",
".",
"negative",
"(",
"decay_rate",
")",
")",
"decayed_lr",
"=",
"learning_rate_schedule",
".",
"ExponentialDecay",
"(",
"learning_rate",
",",
"decay_steps",
",",
"natural_exp_rate",
",",
"staircase",
"=",
"staircase",
",",
"name",
"=",
"name",
")",
"if",
"not",
"context",
".",
"executing_eagerly",
"(",
")",
":",
"decayed_lr",
"=",
"decayed_lr",
"(",
"global_step",
")",
"else",
":",
"decayed_lr",
"=",
"functools",
".",
"partial",
"(",
"decayed_lr",
",",
"global_step",
")",
"return",
"decayed_lr"
] |
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/training/learning_rate_decay.py#L284-L368
|
|
wlanjie/AndroidFFmpeg
|
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
|
tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/binhex.py
|
python
|
hexbin
|
(inp, out)
|
(infilename, outfilename) - Decode binhexed file
|
(infilename, outfilename) - Decode binhexed file
|
[
"(",
"infilename",
"outfilename",
")",
"-",
"Decode",
"binhexed",
"file"
] |
def hexbin(inp, out):
"""(infilename, outfilename) - Decode binhexed file"""
ifp = HexBin(inp)
finfo = ifp.FInfo
if not out:
out = ifp.FName
ofp = open(out, 'wb')
# XXXX Do translation on non-mac systems
while 1:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close()
ifp.close_data()
d = ifp.read_rsrc(128000)
if d:
ofp = openrsrc(out, 'wb')
ofp.write(d)
while 1:
d = ifp.read_rsrc(128000)
if not d: break
ofp.write(d)
ofp.close()
ifp.close()
|
[
"def",
"hexbin",
"(",
"inp",
",",
"out",
")",
":",
"ifp",
"=",
"HexBin",
"(",
"inp",
")",
"finfo",
"=",
"ifp",
".",
"FInfo",
"if",
"not",
"out",
":",
"out",
"=",
"ifp",
".",
"FName",
"ofp",
"=",
"open",
"(",
"out",
",",
"'wb'",
")",
"# XXXX Do translation on non-mac systems",
"while",
"1",
":",
"d",
"=",
"ifp",
".",
"read",
"(",
"128000",
")",
"if",
"not",
"d",
":",
"break",
"ofp",
".",
"write",
"(",
"d",
")",
"ofp",
".",
"close",
"(",
")",
"ifp",
".",
"close_data",
"(",
")",
"d",
"=",
"ifp",
".",
"read_rsrc",
"(",
"128000",
")",
"if",
"d",
":",
"ofp",
"=",
"openrsrc",
"(",
"out",
",",
"'wb'",
")",
"ofp",
".",
"write",
"(",
"d",
")",
"while",
"1",
":",
"d",
"=",
"ifp",
".",
"read_rsrc",
"(",
"128000",
")",
"if",
"not",
"d",
":",
"break",
"ofp",
".",
"write",
"(",
"d",
")",
"ofp",
".",
"close",
"(",
")",
"ifp",
".",
"close",
"(",
")"
] |
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/binhex.py#L472-L498
|
||
apache/kudu
|
90895ce76590f10730ad7aac3613b69d89ff5422
|
src/kudu/scripts/dump_breakpad_symbols.py
|
python
|
is_elf_file
|
(path)
|
return is_regular_file(path) and 'ELF' in magic.from_file(path)
|
Check whether 'path' is an ELF file.
|
Check whether 'path' is an ELF file.
|
[
"Check",
"whether",
"path",
"is",
"an",
"ELF",
"file",
"."
] |
def is_elf_file(path):
"""Check whether 'path' is an ELF file."""
return is_regular_file(path) and 'ELF' in magic.from_file(path)
|
[
"def",
"is_elf_file",
"(",
"path",
")",
":",
"return",
"is_regular_file",
"(",
"path",
")",
"and",
"'ELF'",
"in",
"magic",
".",
"from_file",
"(",
"path",
")"
] |
https://github.com/apache/kudu/blob/90895ce76590f10730ad7aac3613b69d89ff5422/src/kudu/scripts/dump_breakpad_symbols.py#L166-L168
|
|
ucsb-seclab/difuze
|
bb59a12ff87ad5ae45d9c60e349891bf80d72877
|
helper_scripts/components/driver_linker.py
|
python
|
DriverLinker.perform
|
(self)
|
p = subprocess.Popen(self.dr_link_bin + " " + self.llvm_bc_out + " " + str(self.chipset_numer), stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True)
(stdout, stderr) = p.communicate()
|
p = subprocess.Popen(self.dr_link_bin + " " + self.llvm_bc_out + " " + str(self.chipset_numer), stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True)
(stdout, stderr) = p.communicate()
|
[
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"self",
".",
"dr_link_bin",
"+",
"+",
"self",
".",
"llvm_bc_out",
"+",
"+",
"str",
"(",
"self",
".",
"chipset_numer",
")",
"stdout",
"=",
"subprocess",
".",
"PIPE",
"stderr",
"=",
"subprocess",
".",
"PIPE",
"shell",
"=",
"True",
")",
"(",
"stdout",
"stderr",
")",
"=",
"p",
".",
"communicate",
"()"
] |
def perform(self):
log_info("Running dr_linker. This might take time. Please wait.")
cmd_to_run = self.dr_link_bin + " " + self.llvm_bc_out + " " + str(self.chipset_numer)
returncode = os.system(cmd_to_run)
'''p = subprocess.Popen(self.dr_link_bin + " " + self.llvm_bc_out + " " + str(self.chipset_numer), stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True)
(stdout, stderr) = p.communicate()'''
log_info("dr_linker finished execution.")
if returncode == 0:
log_info("Running llvm-link to generate the final linked bitcode file.")
return _process_dir(self.llvm_bc_out)
else:
log_error("Error occurred while executing:", cmd_to_run)
return False
|
[
"def",
"perform",
"(",
"self",
")",
":",
"log_info",
"(",
"\"Running dr_linker. This might take time. Please wait.\"",
")",
"cmd_to_run",
"=",
"self",
".",
"dr_link_bin",
"+",
"\" \"",
"+",
"self",
".",
"llvm_bc_out",
"+",
"\" \"",
"+",
"str",
"(",
"self",
".",
"chipset_numer",
")",
"returncode",
"=",
"os",
".",
"system",
"(",
"cmd_to_run",
")",
"log_info",
"(",
"\"dr_linker finished execution.\"",
")",
"if",
"returncode",
"==",
"0",
":",
"log_info",
"(",
"\"Running llvm-link to generate the final linked bitcode file.\"",
")",
"return",
"_process_dir",
"(",
"self",
".",
"llvm_bc_out",
")",
"else",
":",
"log_error",
"(",
"\"Error occurred while executing:\"",
",",
"cmd_to_run",
")",
"return",
"False"
] |
https://github.com/ucsb-seclab/difuze/blob/bb59a12ff87ad5ae45d9c60e349891bf80d72877/helper_scripts/components/driver_linker.py#L34-L47
|
||
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/ipython/py3/IPython/core/interactiveshell.py
|
python
|
InteractiveShell.drop_by_id
|
(self, variables)
|
Remove a dict of variables from the user namespace, if they are the
same as the values in the dictionary.
This is intended for use by extensions: variables that they've added can
be taken back out if they are unloaded, without removing any that the
user has overwritten.
Parameters
----------
variables : dict
A dictionary mapping object names (as strings) to the objects.
|
Remove a dict of variables from the user namespace, if they are the
same as the values in the dictionary.
This is intended for use by extensions: variables that they've added can
be taken back out if they are unloaded, without removing any that the
user has overwritten.
Parameters
----------
variables : dict
A dictionary mapping object names (as strings) to the objects.
|
[
"Remove",
"a",
"dict",
"of",
"variables",
"from",
"the",
"user",
"namespace",
"if",
"they",
"are",
"the",
"same",
"as",
"the",
"values",
"in",
"the",
"dictionary",
".",
"This",
"is",
"intended",
"for",
"use",
"by",
"extensions",
":",
"variables",
"that",
"they",
"ve",
"added",
"can",
"be",
"taken",
"back",
"out",
"if",
"they",
"are",
"unloaded",
"without",
"removing",
"any",
"that",
"the",
"user",
"has",
"overwritten",
".",
"Parameters",
"----------",
"variables",
":",
"dict",
"A",
"dictionary",
"mapping",
"object",
"names",
"(",
"as",
"strings",
")",
"to",
"the",
"objects",
"."
] |
def drop_by_id(self, variables):
"""Remove a dict of variables from the user namespace, if they are the
same as the values in the dictionary.
This is intended for use by extensions: variables that they've added can
be taken back out if they are unloaded, without removing any that the
user has overwritten.
Parameters
----------
variables : dict
A dictionary mapping object names (as strings) to the objects.
"""
for name, obj in variables.items():
if name in self.user_ns and self.user_ns[name] is obj:
del self.user_ns[name]
self.user_ns_hidden.pop(name, None)
|
[
"def",
"drop_by_id",
"(",
"self",
",",
"variables",
")",
":",
"for",
"name",
",",
"obj",
"in",
"variables",
".",
"items",
"(",
")",
":",
"if",
"name",
"in",
"self",
".",
"user_ns",
"and",
"self",
".",
"user_ns",
"[",
"name",
"]",
"is",
"obj",
":",
"del",
"self",
".",
"user_ns",
"[",
"name",
"]",
"self",
".",
"user_ns_hidden",
".",
"pop",
"(",
"name",
",",
"None",
")"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/ipython/py3/IPython/core/interactiveshell.py#L1600-L1616
|
||
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/botocore/configprovider.py
|
python
|
ScopedConfigProvider.provide
|
(self)
|
return scoped_config.get(self._config_var_name)
|
Provide a value from a config file property.
|
Provide a value from a config file property.
|
[
"Provide",
"a",
"value",
"from",
"a",
"config",
"file",
"property",
"."
] |
def provide(self):
"""Provide a value from a config file property."""
scoped_config = self._session.get_scoped_config()
if isinstance(self._config_var_name, tuple):
section_config = scoped_config.get(self._config_var_name[0])
if not isinstance(section_config, dict):
return None
return section_config.get(self._config_var_name[1])
return scoped_config.get(self._config_var_name)
|
[
"def",
"provide",
"(",
"self",
")",
":",
"scoped_config",
"=",
"self",
".",
"_session",
".",
"get_scoped_config",
"(",
")",
"if",
"isinstance",
"(",
"self",
".",
"_config_var_name",
",",
"tuple",
")",
":",
"section_config",
"=",
"scoped_config",
".",
"get",
"(",
"self",
".",
"_config_var_name",
"[",
"0",
"]",
")",
"if",
"not",
"isinstance",
"(",
"section_config",
",",
"dict",
")",
":",
"return",
"None",
"return",
"section_config",
".",
"get",
"(",
"self",
".",
"_config_var_name",
"[",
"1",
"]",
")",
"return",
"scoped_config",
".",
"get",
"(",
"self",
".",
"_config_var_name",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/botocore/configprovider.py#L449-L457
|
|
deepmind/streetlearn
|
ccf1d60b9c45154894d45a897748aee85d7eb69b
|
streetlearn/python/environment/courier_game.py
|
python
|
CourierGame._compute_spl_current_goal
|
(self, streetlearn)
|
return shortest_path_len / max(actual_path_len, shortest_path_len)
|
Compute the success weighted by inverse path length for the current goal.
We use the SPL definition from Eq. 1 in the following paper:
Anderson et al. (2018) "On Evaluation of Embodied Navigation Agents"
https://arxiv.org/pdf/1807.06757.pdf
Args:
streetlearn: The StreetLearn environment.
Returns:
The SPL metric for the current goal.
|
Compute the success weighted by inverse path length for the current goal.
|
[
"Compute",
"the",
"success",
"weighted",
"by",
"inverse",
"path",
"length",
"for",
"the",
"current",
"goal",
"."
] |
def _compute_spl_current_goal(self, streetlearn):
"""Compute the success weighted by inverse path length for the current goal.
We use the SPL definition from Eq. 1 in the following paper:
Anderson et al. (2018) "On Evaluation of Embodied Navigation Agents"
https://arxiv.org/pdf/1807.06757.pdf
Args:
streetlearn: The StreetLearn environment.
Returns:
The SPL metric for the current goal.
"""
# Since reaching the goal is defined as being within a circle around the
# goal pano, we subtract the panoramas within that circle from the shortest
# path length estimate, as well as from the actual path length.
# We add 1 to handle cases when the agent spawned within that circle.
_, num_remaining_panos_to_goal = self._shortest_paths(
streetlearn, self._current_goal_id, streetlearn.current_pano_id)
shortest_path_len = self._reward_current_goal - num_remaining_panos_to_goal
shortest_path_len = max(shortest_path_len, 1)
actual_path_len = len(self._visited_panos) - num_remaining_panos_to_goal
actual_path_len = max(actual_path_len, 1)
return shortest_path_len / max(actual_path_len, shortest_path_len)
|
[
"def",
"_compute_spl_current_goal",
"(",
"self",
",",
"streetlearn",
")",
":",
"# Since reaching the goal is defined as being within a circle around the",
"# goal pano, we subtract the panoramas within that circle from the shortest",
"# path length estimate, as well as from the actual path length.",
"# We add 1 to handle cases when the agent spawned within that circle.",
"_",
",",
"num_remaining_panos_to_goal",
"=",
"self",
".",
"_shortest_paths",
"(",
"streetlearn",
",",
"self",
".",
"_current_goal_id",
",",
"streetlearn",
".",
"current_pano_id",
")",
"shortest_path_len",
"=",
"self",
".",
"_reward_current_goal",
"-",
"num_remaining_panos_to_goal",
"shortest_path_len",
"=",
"max",
"(",
"shortest_path_len",
",",
"1",
")",
"actual_path_len",
"=",
"len",
"(",
"self",
".",
"_visited_panos",
")",
"-",
"num_remaining_panos_to_goal",
"actual_path_len",
"=",
"max",
"(",
"actual_path_len",
",",
"1",
")",
"return",
"shortest_path_len",
"/",
"max",
"(",
"actual_path_len",
",",
"shortest_path_len",
")"
] |
https://github.com/deepmind/streetlearn/blob/ccf1d60b9c45154894d45a897748aee85d7eb69b/streetlearn/python/environment/courier_game.py#L263-L285
|
|
livecode/livecode
|
4606a10ea10b16d5071d0f9f263ccdd7ede8b31d
|
gyp/pylib/gyp/generator/ninja.py
|
python
|
NinjaWriter.WriteNewNinjaRule
|
(self, name, args, description, is_cygwin, env, pool,
depfile=None)
|
return rule_name, args
|
Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule, and a copy of |args| with variables
expanded.
|
Write out a new ninja "rule" statement for a given command.
|
[
"Write",
"out",
"a",
"new",
"ninja",
"rule",
"statement",
"for",
"a",
"given",
"command",
"."
] |
def WriteNewNinjaRule(self, name, args, description, is_cygwin, env, pool,
depfile=None):
"""Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule, and a copy of |args| with variables
expanded."""
if self.flavor == 'win':
args = [self.msvs_settings.ConvertVSMacros(
arg, self.base_to_build, config=self.config_name)
for arg in args]
description = self.msvs_settings.ConvertVSMacros(
description, config=self.config_name)
elif self.flavor == 'mac':
# |env| is an empty list on non-mac.
args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args]
description = gyp.xcode_emulation.ExpandEnvVars(description, env)
# TODO: we shouldn't need to qualify names; we do it because
# currently the ninja rule namespace is global, but it really
# should be scoped to the subninja.
rule_name = self.name
if self.toolset == 'target':
rule_name += '.' + self.toolset
rule_name += '.' + name
rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name)
# Remove variable references, but not if they refer to the magic rule
# variables. This is not quite right, as it also protects these for
# actions, not just for rules where they are valid. Good enough.
protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ]
protect = '(?!' + '|'.join(map(re.escape, protect)) + ')'
description = re.sub(protect + r'\$', '_', description)
# gyp dictates that commands are run from the base directory.
# cd into the directory before running, and adjust paths in
# the arguments to point to the proper locations.
rspfile = None
rspfile_content = None
args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
if self.flavor == 'win':
rspfile = rule_name + '.$unique_name.rsp'
# The cygwin case handles this inside the bash sub-shell.
run_in = '' if is_cygwin else ' ' + self.build_to_base
if is_cygwin:
rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine(
args, self.build_to_base)
else:
rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args)
command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable +
rspfile + run_in)
else:
env = self.ComputeExportEnvString(env)
command = gyp.common.EncodePOSIXShellList(args)
command = 'cd %s; ' % self.build_to_base + env + command
# GYP rules/actions express being no-ops by not touching their outputs.
# Avoid executing downstream dependencies in this case by specifying
# restat=1 to ninja.
self.ninja.rule(rule_name, command, description, depfile=depfile,
restat=True, pool=pool,
rspfile=rspfile, rspfile_content=rspfile_content)
self.ninja.newline()
return rule_name, args
|
[
"def",
"WriteNewNinjaRule",
"(",
"self",
",",
"name",
",",
"args",
",",
"description",
",",
"is_cygwin",
",",
"env",
",",
"pool",
",",
"depfile",
"=",
"None",
")",
":",
"if",
"self",
".",
"flavor",
"==",
"'win'",
":",
"args",
"=",
"[",
"self",
".",
"msvs_settings",
".",
"ConvertVSMacros",
"(",
"arg",
",",
"self",
".",
"base_to_build",
",",
"config",
"=",
"self",
".",
"config_name",
")",
"for",
"arg",
"in",
"args",
"]",
"description",
"=",
"self",
".",
"msvs_settings",
".",
"ConvertVSMacros",
"(",
"description",
",",
"config",
"=",
"self",
".",
"config_name",
")",
"elif",
"self",
".",
"flavor",
"==",
"'mac'",
":",
"# |env| is an empty list on non-mac.",
"args",
"=",
"[",
"gyp",
".",
"xcode_emulation",
".",
"ExpandEnvVars",
"(",
"arg",
",",
"env",
")",
"for",
"arg",
"in",
"args",
"]",
"description",
"=",
"gyp",
".",
"xcode_emulation",
".",
"ExpandEnvVars",
"(",
"description",
",",
"env",
")",
"# TODO: we shouldn't need to qualify names; we do it because",
"# currently the ninja rule namespace is global, but it really",
"# should be scoped to the subninja.",
"rule_name",
"=",
"self",
".",
"name",
"if",
"self",
".",
"toolset",
"==",
"'target'",
":",
"rule_name",
"+=",
"'.'",
"+",
"self",
".",
"toolset",
"rule_name",
"+=",
"'.'",
"+",
"name",
"rule_name",
"=",
"re",
".",
"sub",
"(",
"'[^a-zA-Z0-9_]'",
",",
"'_'",
",",
"rule_name",
")",
"# Remove variable references, but not if they refer to the magic rule",
"# variables. This is not quite right, as it also protects these for",
"# actions, not just for rules where they are valid. Good enough.",
"protect",
"=",
"[",
"'${root}'",
",",
"'${dirname}'",
",",
"'${source}'",
",",
"'${ext}'",
",",
"'${name}'",
"]",
"protect",
"=",
"'(?!'",
"+",
"'|'",
".",
"join",
"(",
"map",
"(",
"re",
".",
"escape",
",",
"protect",
")",
")",
"+",
"')'",
"description",
"=",
"re",
".",
"sub",
"(",
"protect",
"+",
"r'\\$'",
",",
"'_'",
",",
"description",
")",
"# gyp dictates that commands are run from the base directory.",
"# cd into the directory before running, and adjust paths in",
"# the arguments to point to the proper locations.",
"rspfile",
"=",
"None",
"rspfile_content",
"=",
"None",
"args",
"=",
"[",
"self",
".",
"ExpandSpecial",
"(",
"arg",
",",
"self",
".",
"base_to_build",
")",
"for",
"arg",
"in",
"args",
"]",
"if",
"self",
".",
"flavor",
"==",
"'win'",
":",
"rspfile",
"=",
"rule_name",
"+",
"'.$unique_name.rsp'",
"# The cygwin case handles this inside the bash sub-shell.",
"run_in",
"=",
"''",
"if",
"is_cygwin",
"else",
"' '",
"+",
"self",
".",
"build_to_base",
"if",
"is_cygwin",
":",
"rspfile_content",
"=",
"self",
".",
"msvs_settings",
".",
"BuildCygwinBashCommandLine",
"(",
"args",
",",
"self",
".",
"build_to_base",
")",
"else",
":",
"rspfile_content",
"=",
"gyp",
".",
"msvs_emulation",
".",
"EncodeRspFileList",
"(",
"args",
")",
"command",
"=",
"(",
"'%s gyp-win-tool action-wrapper $arch '",
"%",
"sys",
".",
"executable",
"+",
"rspfile",
"+",
"run_in",
")",
"else",
":",
"env",
"=",
"self",
".",
"ComputeExportEnvString",
"(",
"env",
")",
"command",
"=",
"gyp",
".",
"common",
".",
"EncodePOSIXShellList",
"(",
"args",
")",
"command",
"=",
"'cd %s; '",
"%",
"self",
".",
"build_to_base",
"+",
"env",
"+",
"command",
"# GYP rules/actions express being no-ops by not touching their outputs.",
"# Avoid executing downstream dependencies in this case by specifying",
"# restat=1 to ninja.",
"self",
".",
"ninja",
".",
"rule",
"(",
"rule_name",
",",
"command",
",",
"description",
",",
"depfile",
"=",
"depfile",
",",
"restat",
"=",
"True",
",",
"pool",
"=",
"pool",
",",
"rspfile",
"=",
"rspfile",
",",
"rspfile_content",
"=",
"rspfile_content",
")",
"self",
".",
"ninja",
".",
"newline",
"(",
")",
"return",
"rule_name",
",",
"args"
] |
https://github.com/livecode/livecode/blob/4606a10ea10b16d5071d0f9f263ccdd7ede8b31d/gyp/pylib/gyp/generator/ninja.py#L1511-L1575
|
|
cyberbotics/webots
|
af7fa7d68dcf7b4550f1f2e132092b41e83698fc
|
resources/osm_importer/osm_objects.py
|
python
|
OSMMultipolygon.add_intermediate_point
|
(self)
|
If last and first points are not the same we need to compute an intermediate point location.
|
If last and first points are not the same we need to compute an intermediate point location.
|
[
"If",
"last",
"and",
"first",
"points",
"are",
"not",
"the",
"same",
"we",
"need",
"to",
"compute",
"an",
"intermediate",
"point",
"location",
"."
] |
def add_intermediate_point(self):
"""If last and first points are not the same we need to compute an intermediate point location."""
"""The point is used to close the polygon."""
coordBegin = OSMCoord.coordDictionnary[self.ref[0]]
coordEnd = OSMCoord.coordDictionnary[self.ref[-1]]
distance = length2D(coordBegin.x - coordEnd.x, coordBegin.y - coordEnd.y)
angle = math.atan2(coordBegin.y - coordEnd.y, coordBegin.x - coordEnd.x)
# there is two possible 'optimal' intermediate points
# we select the one that is the farthest from all the other coord (=>inside the lake)
x1 = math.cos(math.pi / 2 + angle) * (distance / 2) + (coordBegin.x + coordEnd.x) / 2
y1 = math.sin(math.pi / 2 + angle) * (distance / 2) + (coordBegin.y + coordEnd.y) / 2
x2 = -math.cos(math.pi / 2 + angle) * (distance / 2) + (coordBegin.x + coordEnd.x) / 2
y2 = -math.sin(math.pi / 2 + angle) * (distance / 2) + (coordBegin.y + coordEnd.y) / 2
distanceSum1 = OSMMultipolygon.sum_distances_to_coords(OSMCoord.coordDictionnary, x1, y1, 2000)
distanceSum2 = OSMMultipolygon.sum_distances_to_coords(OSMCoord.coordDictionnary, x2, y2, 2000)
if distanceSum1 < distanceSum2:
x = x1
y = y1
else:
x = x2
y = y2
self.ref.append(OSMCoord.add_new_coord_to_list(x, y))
|
[
"def",
"add_intermediate_point",
"(",
"self",
")",
":",
"\"\"\"The point is used to close the polygon.\"\"\"",
"coordBegin",
"=",
"OSMCoord",
".",
"coordDictionnary",
"[",
"self",
".",
"ref",
"[",
"0",
"]",
"]",
"coordEnd",
"=",
"OSMCoord",
".",
"coordDictionnary",
"[",
"self",
".",
"ref",
"[",
"-",
"1",
"]",
"]",
"distance",
"=",
"length2D",
"(",
"coordBegin",
".",
"x",
"-",
"coordEnd",
".",
"x",
",",
"coordBegin",
".",
"y",
"-",
"coordEnd",
".",
"y",
")",
"angle",
"=",
"math",
".",
"atan2",
"(",
"coordBegin",
".",
"y",
"-",
"coordEnd",
".",
"y",
",",
"coordBegin",
".",
"x",
"-",
"coordEnd",
".",
"x",
")",
"# there is two possible 'optimal' intermediate points",
"# we select the one that is the farthest from all the other coord (=>inside the lake)",
"x1",
"=",
"math",
".",
"cos",
"(",
"math",
".",
"pi",
"/",
"2",
"+",
"angle",
")",
"*",
"(",
"distance",
"/",
"2",
")",
"+",
"(",
"coordBegin",
".",
"x",
"+",
"coordEnd",
".",
"x",
")",
"/",
"2",
"y1",
"=",
"math",
".",
"sin",
"(",
"math",
".",
"pi",
"/",
"2",
"+",
"angle",
")",
"*",
"(",
"distance",
"/",
"2",
")",
"+",
"(",
"coordBegin",
".",
"y",
"+",
"coordEnd",
".",
"y",
")",
"/",
"2",
"x2",
"=",
"-",
"math",
".",
"cos",
"(",
"math",
".",
"pi",
"/",
"2",
"+",
"angle",
")",
"*",
"(",
"distance",
"/",
"2",
")",
"+",
"(",
"coordBegin",
".",
"x",
"+",
"coordEnd",
".",
"x",
")",
"/",
"2",
"y2",
"=",
"-",
"math",
".",
"sin",
"(",
"math",
".",
"pi",
"/",
"2",
"+",
"angle",
")",
"*",
"(",
"distance",
"/",
"2",
")",
"+",
"(",
"coordBegin",
".",
"y",
"+",
"coordEnd",
".",
"y",
")",
"/",
"2",
"distanceSum1",
"=",
"OSMMultipolygon",
".",
"sum_distances_to_coords",
"(",
"OSMCoord",
".",
"coordDictionnary",
",",
"x1",
",",
"y1",
",",
"2000",
")",
"distanceSum2",
"=",
"OSMMultipolygon",
".",
"sum_distances_to_coords",
"(",
"OSMCoord",
".",
"coordDictionnary",
",",
"x2",
",",
"y2",
",",
"2000",
")",
"if",
"distanceSum1",
"<",
"distanceSum2",
":",
"x",
"=",
"x1",
"y",
"=",
"y1",
"else",
":",
"x",
"=",
"x2",
"y",
"=",
"y2",
"self",
".",
"ref",
".",
"append",
"(",
"OSMCoord",
".",
"add_new_coord_to_list",
"(",
"x",
",",
"y",
")",
")"
] |
https://github.com/cyberbotics/webots/blob/af7fa7d68dcf7b4550f1f2e132092b41e83698fc/resources/osm_importer/osm_objects.py#L198-L220
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
wx/tools/Editra/src/ed_cmdbar.py
|
python
|
CommandBarBase.OnShowBar
|
(self, evt)
|
Update the session list
|
Update the session list
|
[
"Update",
"the",
"session",
"list"
] |
def OnShowBar(self, evt):
"""Update the session list"""
if evt.IsShown():
if self and evt.EventObject is self:
self.OnBarShown()
evt.Skip()
|
[
"def",
"OnShowBar",
"(",
"self",
",",
"evt",
")",
":",
"if",
"evt",
".",
"IsShown",
"(",
")",
":",
"if",
"self",
"and",
"evt",
".",
"EventObject",
"is",
"self",
":",
"self",
".",
"OnBarShown",
"(",
")",
"evt",
".",
"Skip",
"(",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/ed_cmdbar.py#L168-L173
|
||
hughperkins/tf-coriander
|
970d3df6c11400ad68405f22b0c42a52374e94ca
|
tensorflow/python/framework/docs.py
|
python
|
Index.__init__
|
(self, module_to_name, members, filename_to_library_map,
path_prefix)
|
Creates a new Index.
Args:
module_to_name: Dictionary mapping modules to short names.
members: Dictionary mapping member name to (fullname, member).
filename_to_library_map: A list of (filename, Library) pairs. The order
corresponds to the order in which the libraries appear in the index.
path_prefix: Prefix to add to links in the index.
|
Creates a new Index.
|
[
"Creates",
"a",
"new",
"Index",
"."
] |
def __init__(self, module_to_name, members, filename_to_library_map,
path_prefix):
"""Creates a new Index.
Args:
module_to_name: Dictionary mapping modules to short names.
members: Dictionary mapping member name to (fullname, member).
filename_to_library_map: A list of (filename, Library) pairs. The order
corresponds to the order in which the libraries appear in the index.
path_prefix: Prefix to add to links in the index.
"""
self._module_to_name = module_to_name
self._members = members
self._filename_to_library_map = filename_to_library_map
self._path_prefix = path_prefix
|
[
"def",
"__init__",
"(",
"self",
",",
"module_to_name",
",",
"members",
",",
"filename_to_library_map",
",",
"path_prefix",
")",
":",
"self",
".",
"_module_to_name",
"=",
"module_to_name",
"self",
".",
"_members",
"=",
"members",
"self",
".",
"_filename_to_library_map",
"=",
"filename_to_library_map",
"self",
".",
"_path_prefix",
"=",
"path_prefix"
] |
https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/python/framework/docs.py#L56-L70
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/msw/propgrid.py
|
python
|
PGProperty.GetAttributesAsList
|
(*args, **kwargs)
|
return _propgrid.PGProperty_GetAttributesAsList(*args, **kwargs)
|
GetAttributesAsList(self) -> wxVariant
|
GetAttributesAsList(self) -> wxVariant
|
[
"GetAttributesAsList",
"(",
"self",
")",
"-",
">",
"wxVariant"
] |
def GetAttributesAsList(*args, **kwargs):
"""GetAttributesAsList(self) -> wxVariant"""
return _propgrid.PGProperty_GetAttributesAsList(*args, **kwargs)
|
[
"def",
"GetAttributesAsList",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_propgrid",
".",
"PGProperty_GetAttributesAsList",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/propgrid.py#L547-L549
|
|
baidu-research/tensorflow-allreduce
|
66d5b855e90b0949e9fa5cca5599fd729a70e874
|
tensorflow/contrib/timeseries/python/timeseries/model.py
|
python
|
TimeSeriesModel._process_exogenous_features
|
(self, times, features)
|
return exogenous_regressors
|
Create a single vector from exogenous features.
Args:
times: A [batch size, window size] vector of times for this batch,
primarily used to check the shape information of exogenous features.
features: A dictionary of exogenous features corresponding to the columns
in self._exogenous_feature_columns. Each value should have a shape
prefixed by [batch size, window size].
Returns:
A Tensor with shape [batch size, window size, exogenous dimension], where
the size of the exogenous dimension depends on the exogenous feature
columns passed to the model's constructor.
Raises:
ValueError: If an exogenous feature has an unknown rank.
|
Create a single vector from exogenous features.
|
[
"Create",
"a",
"single",
"vector",
"from",
"exogenous",
"features",
"."
] |
def _process_exogenous_features(self, times, features):
"""Create a single vector from exogenous features.
Args:
times: A [batch size, window size] vector of times for this batch,
primarily used to check the shape information of exogenous features.
features: A dictionary of exogenous features corresponding to the columns
in self._exogenous_feature_columns. Each value should have a shape
prefixed by [batch size, window size].
Returns:
A Tensor with shape [batch size, window size, exogenous dimension], where
the size of the exogenous dimension depends on the exogenous feature
columns passed to the model's constructor.
Raises:
ValueError: If an exogenous feature has an unknown rank.
"""
if self._exogenous_feature_columns:
exogenous_features_single_batch_dimension = {}
for name, tensor in features.items():
if tensor.get_shape().ndims is None:
# input_from_feature_columns does not support completely unknown
# feature shapes, so we save on a bit of logic and provide a better
# error message by checking that here.
raise ValueError(
("Features with unknown rank are not supported. Got shape {} for "
"feature {}.").format(tensor.get_shape(), name))
tensor_shape_dynamic = array_ops.shape(tensor)
tensor = array_ops.reshape(
tensor,
array_ops.concat([[tensor_shape_dynamic[0]
* tensor_shape_dynamic[1]],
tensor_shape_dynamic[2:]], axis=0))
# Avoid shape warnings when embedding "scalar" exogenous features (those
# with only batch and window dimensions); input_from_feature_columns
# expects input ranks to match the embedded rank.
if tensor.get_shape().ndims == 1:
exogenous_features_single_batch_dimension[name] = tensor[:, None]
else:
exogenous_features_single_batch_dimension[name] = tensor
embedded_exogenous_features_single_batch_dimension = (
layers.input_from_feature_columns(
columns_to_tensors=exogenous_features_single_batch_dimension,
feature_columns=self._exogenous_feature_columns,
trainable=True))
exogenous_regressors = array_ops.reshape(
embedded_exogenous_features_single_batch_dimension,
array_ops.concat(
[
array_ops.shape(times), array_ops.shape(
embedded_exogenous_features_single_batch_dimension)[1:]
],
axis=0))
exogenous_regressors.set_shape(times.get_shape().concatenate(
embedded_exogenous_features_single_batch_dimension.get_shape()[1:]))
exogenous_regressors = math_ops.cast(
exogenous_regressors, dtype=self.dtype)
else:
# Not having any exogenous features is a special case so that models can
# avoid superfluous updates, which may not be free of side effects due to
# bias terms in transformations.
exogenous_regressors = None
return exogenous_regressors
|
[
"def",
"_process_exogenous_features",
"(",
"self",
",",
"times",
",",
"features",
")",
":",
"if",
"self",
".",
"_exogenous_feature_columns",
":",
"exogenous_features_single_batch_dimension",
"=",
"{",
"}",
"for",
"name",
",",
"tensor",
"in",
"features",
".",
"items",
"(",
")",
":",
"if",
"tensor",
".",
"get_shape",
"(",
")",
".",
"ndims",
"is",
"None",
":",
"# input_from_feature_columns does not support completely unknown",
"# feature shapes, so we save on a bit of logic and provide a better",
"# error message by checking that here.",
"raise",
"ValueError",
"(",
"(",
"\"Features with unknown rank are not supported. Got shape {} for \"",
"\"feature {}.\"",
")",
".",
"format",
"(",
"tensor",
".",
"get_shape",
"(",
")",
",",
"name",
")",
")",
"tensor_shape_dynamic",
"=",
"array_ops",
".",
"shape",
"(",
"tensor",
")",
"tensor",
"=",
"array_ops",
".",
"reshape",
"(",
"tensor",
",",
"array_ops",
".",
"concat",
"(",
"[",
"[",
"tensor_shape_dynamic",
"[",
"0",
"]",
"*",
"tensor_shape_dynamic",
"[",
"1",
"]",
"]",
",",
"tensor_shape_dynamic",
"[",
"2",
":",
"]",
"]",
",",
"axis",
"=",
"0",
")",
")",
"# Avoid shape warnings when embedding \"scalar\" exogenous features (those",
"# with only batch and window dimensions); input_from_feature_columns",
"# expects input ranks to match the embedded rank.",
"if",
"tensor",
".",
"get_shape",
"(",
")",
".",
"ndims",
"==",
"1",
":",
"exogenous_features_single_batch_dimension",
"[",
"name",
"]",
"=",
"tensor",
"[",
":",
",",
"None",
"]",
"else",
":",
"exogenous_features_single_batch_dimension",
"[",
"name",
"]",
"=",
"tensor",
"embedded_exogenous_features_single_batch_dimension",
"=",
"(",
"layers",
".",
"input_from_feature_columns",
"(",
"columns_to_tensors",
"=",
"exogenous_features_single_batch_dimension",
",",
"feature_columns",
"=",
"self",
".",
"_exogenous_feature_columns",
",",
"trainable",
"=",
"True",
")",
")",
"exogenous_regressors",
"=",
"array_ops",
".",
"reshape",
"(",
"embedded_exogenous_features_single_batch_dimension",
",",
"array_ops",
".",
"concat",
"(",
"[",
"array_ops",
".",
"shape",
"(",
"times",
")",
",",
"array_ops",
".",
"shape",
"(",
"embedded_exogenous_features_single_batch_dimension",
")",
"[",
"1",
":",
"]",
"]",
",",
"axis",
"=",
"0",
")",
")",
"exogenous_regressors",
".",
"set_shape",
"(",
"times",
".",
"get_shape",
"(",
")",
".",
"concatenate",
"(",
"embedded_exogenous_features_single_batch_dimension",
".",
"get_shape",
"(",
")",
"[",
"1",
":",
"]",
")",
")",
"exogenous_regressors",
"=",
"math_ops",
".",
"cast",
"(",
"exogenous_regressors",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
"else",
":",
"# Not having any exogenous features is a special case so that models can",
"# avoid superfluous updates, which may not be free of side effects due to",
"# bias terms in transformations.",
"exogenous_regressors",
"=",
"None",
"return",
"exogenous_regressors"
] |
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/timeseries/python/timeseries/model.py#L220-L281
|
|
SFTtech/openage
|
d6a08c53c48dc1e157807471df92197f6ca9e04d
|
openage/util/fslike/path.py
|
python
|
Path.resolve_native_path_w
|
(self)
|
return None
|
Resolve the path for write access and try to return
a native equivalent.
If no native path could be determined, return None.
|
Resolve the path for write access and try to return
a native equivalent.
If no native path could be determined, return None.
|
[
"Resolve",
"the",
"path",
"for",
"write",
"access",
"and",
"try",
"to",
"return",
"a",
"native",
"equivalent",
".",
"If",
"no",
"native",
"path",
"could",
"be",
"determined",
"return",
"None",
"."
] |
def resolve_native_path_w(self):
"""
Resolve the path for write access and try to return
a native equivalent.
If no native path could be determined, return None.
"""
resolved_path = self._resolve_w()
if resolved_path:
# pylint: disable=protected-access
return resolved_path._get_native_path()
return None
|
[
"def",
"resolve_native_path_w",
"(",
"self",
")",
":",
"resolved_path",
"=",
"self",
".",
"_resolve_w",
"(",
")",
"if",
"resolved_path",
":",
"# pylint: disable=protected-access",
"return",
"resolved_path",
".",
"_get_native_path",
"(",
")",
"return",
"None"
] |
https://github.com/SFTtech/openage/blob/d6a08c53c48dc1e157807471df92197f6ca9e04d/openage/util/fslike/path.py#L190-L200
|
|
hpi-xnor/BMXNet
|
ed0b201da6667887222b8e4b5f997c4f6b61943d
|
python/mxnet/module/bucketing_module.py
|
python
|
BucketingModule.data_shapes
|
(self)
|
return self._curr_module.data_shapes
|
Get data shapes.
Returns
-------
A list of `(name, shape)` pairs.
|
Get data shapes.
|
[
"Get",
"data",
"shapes",
"."
] |
def data_shapes(self):
"""Get data shapes.
Returns
-------
A list of `(name, shape)` pairs.
"""
assert self.binded
return self._curr_module.data_shapes
|
[
"def",
"data_shapes",
"(",
"self",
")",
":",
"assert",
"self",
".",
"binded",
"return",
"self",
".",
"_curr_module",
".",
"data_shapes"
] |
https://github.com/hpi-xnor/BMXNet/blob/ed0b201da6667887222b8e4b5f997c4f6b61943d/python/mxnet/module/bucketing_module.py#L123-L131
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_carbon/_core.py
|
python
|
Rect2D.Contains
|
(*args, **kwargs)
|
return _core_.Rect2D_Contains(*args, **kwargs)
|
Contains(self, Point2D pt) -> bool
|
Contains(self, Point2D pt) -> bool
|
[
"Contains",
"(",
"self",
"Point2D",
"pt",
")",
"-",
">",
"bool"
] |
def Contains(*args, **kwargs):
"""Contains(self, Point2D pt) -> bool"""
return _core_.Rect2D_Contains(*args, **kwargs)
|
[
"def",
"Contains",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"Rect2D_Contains",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_core.py#L1967-L1969
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.