nwo
stringlengths 5
86
| sha
stringlengths 40
40
| path
stringlengths 4
189
| language
stringclasses 1
value | identifier
stringlengths 1
94
| parameters
stringlengths 2
4.03k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
11.5k
| docstring
stringlengths 1
33.2k
| docstring_summary
stringlengths 0
5.15k
| docstring_tokens
sequence | function
stringlengths 34
151k
| function_tokens
sequence | url
stringlengths 90
278
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
giuspen/cherrytree | 84712f206478fcf9acf30174009ad28c648c6344 | pygtk2/modules/printing.py | python | PrintHandler.run_print_operation | (self, print_operation, parent) | Run a Ready Print Operation | Run a Ready Print Operation | [
"Run",
"a",
"Ready",
"Print",
"Operation"
] | def run_print_operation(self, print_operation, parent):
"""Run a Ready Print Operation"""
if self.pdf_filepath: print_operation.set_export_filename(self.pdf_filepath)
print_operation_action = gtk.PRINT_OPERATION_ACTION_EXPORT if self.pdf_filepath else gtk.PRINT_OPERATION_ACTION_PRINT_DIALOG
try: res = print_operation.run(print_operation_action, parent)
except gobject.GError, ex:
support.dialog_error("Error printing file:\n%s (exception caught)" % str(ex), parent)
else:
if res == gtk.PRINT_OPERATION_RESULT_ERROR:
support.dialog_error("Error printing file (bad res)", parent)
elif res == gtk.PRINT_OPERATION_RESULT_APPLY:
self.settings = print_operation.get_print_settings()
if not print_operation.is_finished():
print_operation.connect("status_changed", self.on_print_status_changed) | [
"def",
"run_print_operation",
"(",
"self",
",",
"print_operation",
",",
"parent",
")",
":",
"if",
"self",
".",
"pdf_filepath",
":",
"print_operation",
".",
"set_export_filename",
"(",
"self",
".",
"pdf_filepath",
")",
"print_operation_action",
"=",
"gtk",
".",
"PRINT_OPERATION_ACTION_EXPORT",
"if",
"self",
".",
"pdf_filepath",
"else",
"gtk",
".",
"PRINT_OPERATION_ACTION_PRINT_DIALOG",
"try",
":",
"res",
"=",
"print_operation",
".",
"run",
"(",
"print_operation_action",
",",
"parent",
")",
"except",
"gobject",
".",
"GError",
",",
"ex",
":",
"support",
".",
"dialog_error",
"(",
"\"Error printing file:\\n%s (exception caught)\"",
"%",
"str",
"(",
"ex",
")",
",",
"parent",
")",
"else",
":",
"if",
"res",
"==",
"gtk",
".",
"PRINT_OPERATION_RESULT_ERROR",
":",
"support",
".",
"dialog_error",
"(",
"\"Error printing file (bad res)\"",
",",
"parent",
")",
"elif",
"res",
"==",
"gtk",
".",
"PRINT_OPERATION_RESULT_APPLY",
":",
"self",
".",
"settings",
"=",
"print_operation",
".",
"get_print_settings",
"(",
")",
"if",
"not",
"print_operation",
".",
"is_finished",
"(",
")",
":",
"print_operation",
".",
"connect",
"(",
"\"status_changed\"",
",",
"self",
".",
"on_print_status_changed",
")"
] | https://github.com/giuspen/cherrytree/blob/84712f206478fcf9acf30174009ad28c648c6344/pygtk2/modules/printing.py#L60-L73 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/joblib/joblib/numpy_pickle_compat.py | python | ZNDArrayWrapper.read | (self, unpickler) | return array | Reconstruct the array from the meta-information and the z-file. | Reconstruct the array from the meta-information and the z-file. | [
"Reconstruct",
"the",
"array",
"from",
"the",
"meta",
"-",
"information",
"and",
"the",
"z",
"-",
"file",
"."
] | def read(self, unpickler):
"""Reconstruct the array from the meta-information and the z-file."""
# Here we a simply reproducing the unpickling mechanism for numpy
# arrays
filename = os.path.join(unpickler._dirname, self.filename)
array = unpickler.np.core.multiarray._reconstruct(*self.init_args)
with open(filename, 'rb') as f:
data = read_zfile(f)
state = self.state + (data,)
array.__setstate__(state)
return array | [
"def",
"read",
"(",
"self",
",",
"unpickler",
")",
":",
"# Here we a simply reproducing the unpickling mechanism for numpy",
"# arrays",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"unpickler",
".",
"_dirname",
",",
"self",
".",
"filename",
")",
"array",
"=",
"unpickler",
".",
"np",
".",
"core",
".",
"multiarray",
".",
"_reconstruct",
"(",
"*",
"self",
".",
"init_args",
")",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"f",
":",
"data",
"=",
"read_zfile",
"(",
"f",
")",
"state",
"=",
"self",
".",
"state",
"+",
"(",
"data",
",",
")",
"array",
".",
"__setstate__",
"(",
"state",
")",
"return",
"array"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/joblib/joblib/numpy_pickle_compat.py#L145-L155 |
|
mongodb/mongo | d8ff665343ad29cf286ee2cf4a1960d29371937b | buildscripts/idl/idl/syntax.py | python | Condition.__init__ | (self, file_name, line, column) | Construct a Condition. | Construct a Condition. | [
"Construct",
"a",
"Condition",
"."
] | def __init__(self, file_name, line, column):
# type: (str, int, int) -> None
"""Construct a Condition."""
self.expr = None # type: str
self.constexpr = None # type: str
self.preprocessor = None # type: str
super(Condition, self).__init__(file_name, line, column) | [
"def",
"__init__",
"(",
"self",
",",
"file_name",
",",
"line",
",",
"column",
")",
":",
"# type: (str, int, int) -> None",
"self",
".",
"expr",
"=",
"None",
"# type: str",
"self",
".",
"constexpr",
"=",
"None",
"# type: str",
"self",
".",
"preprocessor",
"=",
"None",
"# type: str",
"super",
"(",
"Condition",
",",
"self",
")",
".",
"__init__",
"(",
"file_name",
",",
"line",
",",
"column",
")"
] | https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/buildscripts/idl/idl/syntax.py#L711-L718 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/ast.py | python | copy_location | (new_node, old_node) | return new_node | Copy source location (`lineno` and `col_offset` attributes) from
*old_node* to *new_node* if possible, and return *new_node*. | Copy source location (`lineno` and `col_offset` attributes) from
*old_node* to *new_node* if possible, and return *new_node*. | [
"Copy",
"source",
"location",
"(",
"lineno",
"and",
"col_offset",
"attributes",
")",
"from",
"*",
"old_node",
"*",
"to",
"*",
"new_node",
"*",
"if",
"possible",
"and",
"return",
"*",
"new_node",
"*",
"."
] | def copy_location(new_node, old_node):
"""
Copy source location (`lineno` and `col_offset` attributes) from
*old_node* to *new_node* if possible, and return *new_node*.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node | [
"def",
"copy_location",
"(",
"new_node",
",",
"old_node",
")",
":",
"for",
"attr",
"in",
"'lineno'",
",",
"'col_offset'",
":",
"if",
"attr",
"in",
"old_node",
".",
"_attributes",
"and",
"attr",
"in",
"new_node",
".",
"_attributes",
"and",
"hasattr",
"(",
"old_node",
",",
"attr",
")",
":",
"setattr",
"(",
"new_node",
",",
"attr",
",",
"getattr",
"(",
"old_node",
",",
"attr",
")",
")",
"return",
"new_node"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/ast.py#L133-L142 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/_core.py | python | SizerItem.SetInitSize | (*args, **kwargs) | return _core_.SizerItem_SetInitSize(*args, **kwargs) | SetInitSize(self, int x, int y) | SetInitSize(self, int x, int y) | [
"SetInitSize",
"(",
"self",
"int",
"x",
"int",
"y",
")"
] | def SetInitSize(*args, **kwargs):
"""SetInitSize(self, int x, int y)"""
return _core_.SizerItem_SetInitSize(*args, **kwargs) | [
"def",
"SetInitSize",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"SizerItem_SetInitSize",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_core.py#L14109-L14111 |
|
forkineye/ESPixelStick | 22926f1c0d1131f1369fc7cad405689a095ae3cb | dist/bin/pyserial/serial/tools/hexlify_codec.py | python | IncrementalEncoder.encode | (self, data, final=False) | return serial.to_bytes(encoded) | \
Incremental encode, keep track of digits and emit a byte when a pair
of hex digits is found. The space is optional unless the error
handling is defined to be 'strict'. | \
Incremental encode, keep track of digits and emit a byte when a pair
of hex digits is found. The space is optional unless the error
handling is defined to be 'strict'. | [
"\\",
"Incremental",
"encode",
"keep",
"track",
"of",
"digits",
"and",
"emit",
"a",
"byte",
"when",
"a",
"pair",
"of",
"hex",
"digits",
"is",
"found",
".",
"The",
"space",
"is",
"optional",
"unless",
"the",
"error",
"handling",
"is",
"defined",
"to",
"be",
"strict",
"."
] | def encode(self, data, final=False):
"""\
Incremental encode, keep track of digits and emit a byte when a pair
of hex digits is found. The space is optional unless the error
handling is defined to be 'strict'.
"""
state = self.state
encoded = []
for c in data.upper():
if c in HEXDIGITS:
z = HEXDIGITS.index(c)
if state:
encoded.append(z + (state & 0xf0))
state = 0
else:
state = 0x100 + (z << 4)
elif c == ' ': # allow spaces to separate values
if state and self.errors == 'strict':
raise UnicodeError('odd number of hex digits')
state = 0
else:
if self.errors == 'strict':
raise UnicodeError('non-hex digit found: {!r}'.format(c))
self.state = state
return serial.to_bytes(encoded) | [
"def",
"encode",
"(",
"self",
",",
"data",
",",
"final",
"=",
"False",
")",
":",
"state",
"=",
"self",
".",
"state",
"encoded",
"=",
"[",
"]",
"for",
"c",
"in",
"data",
".",
"upper",
"(",
")",
":",
"if",
"c",
"in",
"HEXDIGITS",
":",
"z",
"=",
"HEXDIGITS",
".",
"index",
"(",
"c",
")",
"if",
"state",
":",
"encoded",
".",
"append",
"(",
"z",
"+",
"(",
"state",
"&",
"0xf0",
")",
")",
"state",
"=",
"0",
"else",
":",
"state",
"=",
"0x100",
"+",
"(",
"z",
"<<",
"4",
")",
"elif",
"c",
"==",
"' '",
":",
"# allow spaces to separate values",
"if",
"state",
"and",
"self",
".",
"errors",
"==",
"'strict'",
":",
"raise",
"UnicodeError",
"(",
"'odd number of hex digits'",
")",
"state",
"=",
"0",
"else",
":",
"if",
"self",
".",
"errors",
"==",
"'strict'",
":",
"raise",
"UnicodeError",
"(",
"'non-hex digit found: {!r}'",
".",
"format",
"(",
"c",
")",
")",
"self",
".",
"state",
"=",
"state",
"return",
"serial",
".",
"to_bytes",
"(",
"encoded",
")"
] | https://github.com/forkineye/ESPixelStick/blob/22926f1c0d1131f1369fc7cad405689a095ae3cb/dist/bin/pyserial/serial/tools/hexlify_codec.py#L72-L96 |
|
bigartm/bigartm | 47e37f982de87aa67bfd475ff1f39da696b181b3 | python/artm/master_component.py | python | MasterComponent.import_score_tracker | (self, filename) | :param str filename: the name of file to load score tracker from binary format | :param str filename: the name of file to load score tracker from binary format | [
":",
"param",
"str",
"filename",
":",
"the",
"name",
"of",
"file",
"to",
"load",
"score",
"tracker",
"from",
"binary",
"format"
] | def import_score_tracker(self, filename):
"""
:param str filename: the name of file to load score tracker from binary format
"""
args = messages.ImportScoreTrackerArgs(file_name=filename)
result = self._lib.ArtmImportScoreTracker(self.master_id, args) | [
"def",
"import_score_tracker",
"(",
"self",
",",
"filename",
")",
":",
"args",
"=",
"messages",
".",
"ImportScoreTrackerArgs",
"(",
"file_name",
"=",
"filename",
")",
"result",
"=",
"self",
".",
"_lib",
".",
"ArtmImportScoreTracker",
"(",
"self",
".",
"master_id",
",",
"args",
")"
] | https://github.com/bigartm/bigartm/blob/47e37f982de87aa67bfd475ff1f39da696b181b3/python/artm/master_component.py#L1002-L1007 |
||
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/x86/toolchain/lib/python2.7/rfc822.py | python | Message.getrawheader | (self, name) | return ''.join(lst) | A higher-level interface to getfirstmatchingheader().
Return a string containing the literal text of the header but with the
keyword stripped. All leading, trailing and embedded whitespace is
kept in the string, however. Return None if the header does not
occur. | A higher-level interface to getfirstmatchingheader(). | [
"A",
"higher",
"-",
"level",
"interface",
"to",
"getfirstmatchingheader",
"()",
"."
] | def getrawheader(self, name):
"""A higher-level interface to getfirstmatchingheader().
Return a string containing the literal text of the header but with the
keyword stripped. All leading, trailing and embedded whitespace is
kept in the string, however. Return None if the header does not
occur.
"""
lst = self.getfirstmatchingheader(name)
if not lst:
return None
lst[0] = lst[0][len(name) + 1:]
return ''.join(lst) | [
"def",
"getrawheader",
"(",
"self",
",",
"name",
")",
":",
"lst",
"=",
"self",
".",
"getfirstmatchingheader",
"(",
"name",
")",
"if",
"not",
"lst",
":",
"return",
"None",
"lst",
"[",
"0",
"]",
"=",
"lst",
"[",
"0",
"]",
"[",
"len",
"(",
"name",
")",
"+",
"1",
":",
"]",
"return",
"''",
".",
"join",
"(",
"lst",
")"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/rfc822.py#L270-L283 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | samples/pydocview/FindService.py | python | FindService.OnFindClose | (self, event) | Cleanup handles when find/replace dialog is closed | Cleanup handles when find/replace dialog is closed | [
"Cleanup",
"handles",
"when",
"find",
"/",
"replace",
"dialog",
"is",
"closed"
] | def OnFindClose(self, event):
""" Cleanup handles when find/replace dialog is closed """
if self._findDialog != None:
self._findDialog = None
elif self._replaceDialog != None:
self._replaceDialog = None | [
"def",
"OnFindClose",
"(",
"self",
",",
"event",
")",
":",
"if",
"self",
".",
"_findDialog",
"!=",
"None",
":",
"self",
".",
"_findDialog",
"=",
"None",
"elif",
"self",
".",
"_replaceDialog",
"!=",
"None",
":",
"self",
".",
"_replaceDialog",
"=",
"None"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/samples/pydocview/FindService.py#L138-L143 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/prompt-toolkit/py3/prompt_toolkit/layout/containers.py | python | WindowRenderInfo.first_visible_line | (self, after_scroll_offset: bool = False) | Return the line number (0 based) of the input document that corresponds
with the first visible line. | Return the line number (0 based) of the input document that corresponds
with the first visible line. | [
"Return",
"the",
"line",
"number",
"(",
"0",
"based",
")",
"of",
"the",
"input",
"document",
"that",
"corresponds",
"with",
"the",
"first",
"visible",
"line",
"."
] | def first_visible_line(self, after_scroll_offset: bool = False) -> int:
"""
Return the line number (0 based) of the input document that corresponds
with the first visible line.
"""
if after_scroll_offset:
return self.displayed_lines[self.applied_scroll_offsets.top]
else:
return self.displayed_lines[0] | [
"def",
"first_visible_line",
"(",
"self",
",",
"after_scroll_offset",
":",
"bool",
"=",
"False",
")",
"->",
"int",
":",
"if",
"after_scroll_offset",
":",
"return",
"self",
".",
"displayed_lines",
"[",
"self",
".",
"applied_scroll_offsets",
".",
"top",
"]",
"else",
":",
"return",
"self",
".",
"displayed_lines",
"[",
"0",
"]"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/prompt-toolkit/py3/prompt_toolkit/layout/containers.py#L1242-L1250 |
||
rapidsai/cudf | d5b2448fc69f17509304d594f029d0df56984962 | python/cudf/cudf/core/abc.py | python | Serializable.device_serialize | (self) | return header, frames | Serialize data and metadata associated with device memory.
Returns
-------
header : dict
The metadata required to reconstruct the object.
frames : list
The Buffers or memoryviews that the object should contain.
:meta private: | Serialize data and metadata associated with device memory. | [
"Serialize",
"data",
"and",
"metadata",
"associated",
"with",
"device",
"memory",
"."
] | def device_serialize(self):
"""Serialize data and metadata associated with device memory.
Returns
-------
header : dict
The metadata required to reconstruct the object.
frames : list
The Buffers or memoryviews that the object should contain.
:meta private:
"""
header, frames = self.serialize()
assert all(
(type(f) in [cudf.core.buffer.Buffer, memoryview]) for f in frames
)
header["type-serialized"] = pickle.dumps(type(self))
header["is-cuda"] = [
hasattr(f, "__cuda_array_interface__") for f in frames
]
header["lengths"] = [f.nbytes for f in frames]
return header, frames | [
"def",
"device_serialize",
"(",
"self",
")",
":",
"header",
",",
"frames",
"=",
"self",
".",
"serialize",
"(",
")",
"assert",
"all",
"(",
"(",
"type",
"(",
"f",
")",
"in",
"[",
"cudf",
".",
"core",
".",
"buffer",
".",
"Buffer",
",",
"memoryview",
"]",
")",
"for",
"f",
"in",
"frames",
")",
"header",
"[",
"\"type-serialized\"",
"]",
"=",
"pickle",
".",
"dumps",
"(",
"type",
"(",
"self",
")",
")",
"header",
"[",
"\"is-cuda\"",
"]",
"=",
"[",
"hasattr",
"(",
"f",
",",
"\"__cuda_array_interface__\"",
")",
"for",
"f",
"in",
"frames",
"]",
"header",
"[",
"\"lengths\"",
"]",
"=",
"[",
"f",
".",
"nbytes",
"for",
"f",
"in",
"frames",
"]",
"return",
"header",
",",
"frames"
] | https://github.com/rapidsai/cudf/blob/d5b2448fc69f17509304d594f029d0df56984962/python/cudf/cudf/core/abc.py#L85-L106 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/typing.py | python | _SpecialForm.__new__ | (cls, *args, **kwds) | return super().__new__(cls) | Constructor.
This only exists to give a better error message in case
someone tries to subclass a special typing object (not a good idea). | Constructor. | [
"Constructor",
"."
] | def __new__(cls, *args, **kwds):
"""Constructor.
This only exists to give a better error message in case
someone tries to subclass a special typing object (not a good idea).
"""
if (len(args) == 3 and
isinstance(args[0], str) and
isinstance(args[1], tuple)):
# Close enough.
raise TypeError(f"Cannot subclass {cls!r}")
return super().__new__(cls) | [
"def",
"__new__",
"(",
"cls",
",",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"if",
"(",
"len",
"(",
"args",
")",
"==",
"3",
"and",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"str",
")",
"and",
"isinstance",
"(",
"args",
"[",
"1",
"]",
",",
"tuple",
")",
")",
":",
"# Close enough.",
"raise",
"TypeError",
"(",
"f\"Cannot subclass {cls!r}\"",
")",
"return",
"super",
"(",
")",
".",
"__new__",
"(",
"cls",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/typing.py#L300-L311 |
|
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/rfc822.py | python | Message.getfirstmatchingheader | (self, name) | return lst | Get the first header line matching name.
This is similar to getallmatchingheaders, but it returns only the
first matching header (and its continuation lines). | Get the first header line matching name. | [
"Get",
"the",
"first",
"header",
"line",
"matching",
"name",
"."
] | def getfirstmatchingheader(self, name):
"""Get the first header line matching name.
This is similar to getallmatchingheaders, but it returns only the
first matching header (and its continuation lines).
"""
name = name.lower() + ':'
n = len(name)
lst = []
hit = 0
for line in self.headers:
if hit:
if not line[:1].isspace():
break
elif line[:n].lower() == name:
hit = 1
if hit:
lst.append(line)
return lst | [
"def",
"getfirstmatchingheader",
"(",
"self",
",",
"name",
")",
":",
"name",
"=",
"name",
".",
"lower",
"(",
")",
"+",
"':'",
"n",
"=",
"len",
"(",
"name",
")",
"lst",
"=",
"[",
"]",
"hit",
"=",
"0",
"for",
"line",
"in",
"self",
".",
"headers",
":",
"if",
"hit",
":",
"if",
"not",
"line",
"[",
":",
"1",
"]",
".",
"isspace",
"(",
")",
":",
"break",
"elif",
"line",
"[",
":",
"n",
"]",
".",
"lower",
"(",
")",
"==",
"name",
":",
"hit",
"=",
"1",
"if",
"hit",
":",
"lst",
".",
"append",
"(",
"line",
")",
"return",
"lst"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/rfc822.py#L250-L268 |
|
nest/nest-simulator | f2623eb78518cdbd55e77e0ed486bf1111bcb62f | pynest/nest/lib/hl_api_simulation.py | python | Cleanup | () | Cleans up resources after a `Run` call. Not needed for `Simulate`.
Closes state for a series of runs, such as flushing and closing files.
A `Prepare` is needed after a `Cleanup` before any more calls to `Run`.
See Also
--------
Run, Prepare | Cleans up resources after a `Run` call. Not needed for `Simulate`. | [
"Cleans",
"up",
"resources",
"after",
"a",
"Run",
"call",
".",
"Not",
"needed",
"for",
"Simulate",
"."
] | def Cleanup():
"""Cleans up resources after a `Run` call. Not needed for `Simulate`.
Closes state for a series of runs, such as flushing and closing files.
A `Prepare` is needed after a `Cleanup` before any more calls to `Run`.
See Also
--------
Run, Prepare
"""
sr('Cleanup') | [
"def",
"Cleanup",
"(",
")",
":",
"sr",
"(",
"'Cleanup'",
")"
] | https://github.com/nest/nest-simulator/blob/f2623eb78518cdbd55e77e0ed486bf1111bcb62f/pynest/nest/lib/hl_api_simulation.py#L123-L134 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/_windows.py | python | PreviewFrame.CreateControlBar | (*args, **kwargs) | return _windows_.PreviewFrame_CreateControlBar(*args, **kwargs) | CreateControlBar(self) | CreateControlBar(self) | [
"CreateControlBar",
"(",
"self",
")"
] | def CreateControlBar(*args, **kwargs):
"""CreateControlBar(self)"""
return _windows_.PreviewFrame_CreateControlBar(*args, **kwargs) | [
"def",
"CreateControlBar",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_windows_",
".",
"PreviewFrame_CreateControlBar",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_windows.py#L5497-L5499 |
|
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/python/ops/init_ops.py | python | glorot_normal_initializer | (seed=None, dtype=dtypes.float32) | return variance_scaling_initializer(scale=1.0,
mode="fan_avg",
distribution="normal",
seed=seed,
dtype=dtype) | The Glorot normal initializer, also called Xavier normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(2 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
Reference: http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
Arguments:
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
An initializer. | The Glorot normal initializer, also called Xavier normal initializer. | [
"The",
"Glorot",
"normal",
"initializer",
"also",
"called",
"Xavier",
"normal",
"initializer",
"."
] | def glorot_normal_initializer(seed=None, dtype=dtypes.float32):
"""The Glorot normal initializer, also called Xavier normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(2 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
Reference: http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
Arguments:
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
An initializer.
"""
return variance_scaling_initializer(scale=1.0,
mode="fan_avg",
distribution="normal",
seed=seed,
dtype=dtype) | [
"def",
"glorot_normal_initializer",
"(",
"seed",
"=",
"None",
",",
"dtype",
"=",
"dtypes",
".",
"float32",
")",
":",
"return",
"variance_scaling_initializer",
"(",
"scale",
"=",
"1.0",
",",
"mode",
"=",
"\"fan_avg\"",
",",
"distribution",
"=",
"\"normal\"",
",",
"seed",
"=",
"seed",
",",
"dtype",
"=",
"dtype",
")"
] | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/ops/init_ops.py#L553-L576 |
|
funnyzhou/Adaptive_Feeding | 9c78182331d8c0ea28de47226e805776c638d46f | lib/datasets/pascal_voc.py | python | pascal_voc._load_image_set_index | (self) | return image_index | Load the indexes listed in this dataset's image set file. | Load the indexes listed in this dataset's image set file. | [
"Load",
"the",
"indexes",
"listed",
"in",
"this",
"dataset",
"s",
"image",
"set",
"file",
"."
] | def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index | [
"def",
"_load_image_set_index",
"(",
"self",
")",
":",
"# Example path to image set file:",
"# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt",
"image_set_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_data_path",
",",
"'ImageSets'",
",",
"'Main'",
",",
"self",
".",
"_image_set",
"+",
"'.txt'",
")",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"image_set_file",
")",
",",
"'Path does not exist: {}'",
".",
"format",
"(",
"image_set_file",
")",
"with",
"open",
"(",
"image_set_file",
")",
"as",
"f",
":",
"image_index",
"=",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"f",
".",
"readlines",
"(",
")",
"]",
"return",
"image_index"
] | https://github.com/funnyzhou/Adaptive_Feeding/blob/9c78182331d8c0ea28de47226e805776c638d46f/lib/datasets/pascal_voc.py#L73-L85 |
|
generalized-intelligence/GAAS | 29ab17d3e8a4ba18edef3a57c36d8db6329fac73 | algorithms/src/SystemManagement/json_request_response_lib/src/third_party/nlohmann_json/third_party/cpplint/cpplint.py | python | FileInfo.BaseName | (self) | return self.Split()[1] | File base name - text after the final slash, before the final period. | File base name - text after the final slash, before the final period. | [
"File",
"base",
"name",
"-",
"text",
"after",
"the",
"final",
"slash",
"before",
"the",
"final",
"period",
"."
] | def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1] | [
"def",
"BaseName",
"(",
"self",
")",
":",
"return",
"self",
".",
"Split",
"(",
")",
"[",
"1",
"]"
] | https://github.com/generalized-intelligence/GAAS/blob/29ab17d3e8a4ba18edef3a57c36d8db6329fac73/algorithms/src/SystemManagement/json_request_response_lib/src/third_party/nlohmann_json/third_party/cpplint/cpplint.py#L1384-L1386 |
|
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/USANSReduction.py | python | USANSReduction._load_data | (self) | return total_points | Load data and go through each file to determine how many points
will have to be dealt with. | Load data and go through each file to determine how many points
will have to be dealt with. | [
"Load",
"data",
"and",
"go",
"through",
"each",
"file",
"to",
"determine",
"how",
"many",
"points",
"will",
"have",
"to",
"be",
"dealt",
"with",
"."
] | def _load_data(self):
"""
Load data and go through each file to determine how many points
will have to be dealt with.
"""
# Load the empty run
empty_run = self.getProperty("EmptyRun").value
Load(Filename='USANS_%s' % empty_run, LoadMonitors=True, OutputWorkspace='__empty')
# A simple Load doesn't load the instrument properties correctly with our test file
# Reload the instrument for now
LoadInstrument(Workspace='__empty', InstrumentName='USANS', RewriteSpectraMap=False)
# For testing, we may have to load the monitors by hand
if not mtd.doesExist('__empty_monitors'):
Load(Filename=self._find_monitors(empty_run), OutputWorkspace='__empty_monitors')
# Get the wavelength peak positions
wl_cfg_str = mtd['__empty'].getInstrument().getStringParameter("wavelength_config")[0]
self.wl_list = json.loads(wl_cfg_str)
# Get the runs to reduce
run_list = self.getProperty("RunNumbers").value
# Total number of measurements per wavelength peak
total_points = 0
# Load all files so we can determine how many points we have
self.data_files = []
for item in run_list:
ws_name = '__sample_%s' % item
Load(Filename='USANS_%s' % item, LoadMonitors=True, OutputWorkspace=ws_name)
# For testing, we may have to load the monitors by hand
if not mtd.doesExist(ws_name+'_monitors'):
Load(Filename=self._find_monitors(empty_run), OutputWorkspace=ws_name+'_monitors')
# Determine whether we are putting together multiple files or whether
# we will be looking for scan_index markers.
is_scan = False
max_index = 1
if mtd[ws_name].getRun().hasProperty('scan_index'):
scan_index = mtd[ws_name].getRun().getProperty("scan_index").value
if len(scan_index)>0:
_max_index = scan_index.getStatistics().maximum
if _max_index>0:
max_index = _max_index
is_scan = True
# Append the info for when we do the reduction
self.data_files.append(self.DataFile(workspace=ws_name,
monitor=ws_name+'_monitors',
empty='__empty',
empty_monitor='__empty_monitors',
is_scan=is_scan,
max_index=max_index))
total_points += max_index
return total_points | [
"def",
"_load_data",
"(",
"self",
")",
":",
"# Load the empty run",
"empty_run",
"=",
"self",
".",
"getProperty",
"(",
"\"EmptyRun\"",
")",
".",
"value",
"Load",
"(",
"Filename",
"=",
"'USANS_%s'",
"%",
"empty_run",
",",
"LoadMonitors",
"=",
"True",
",",
"OutputWorkspace",
"=",
"'__empty'",
")",
"# A simple Load doesn't load the instrument properties correctly with our test file",
"# Reload the instrument for now",
"LoadInstrument",
"(",
"Workspace",
"=",
"'__empty'",
",",
"InstrumentName",
"=",
"'USANS'",
",",
"RewriteSpectraMap",
"=",
"False",
")",
"# For testing, we may have to load the monitors by hand",
"if",
"not",
"mtd",
".",
"doesExist",
"(",
"'__empty_monitors'",
")",
":",
"Load",
"(",
"Filename",
"=",
"self",
".",
"_find_monitors",
"(",
"empty_run",
")",
",",
"OutputWorkspace",
"=",
"'__empty_monitors'",
")",
"# Get the wavelength peak positions",
"wl_cfg_str",
"=",
"mtd",
"[",
"'__empty'",
"]",
".",
"getInstrument",
"(",
")",
".",
"getStringParameter",
"(",
"\"wavelength_config\"",
")",
"[",
"0",
"]",
"self",
".",
"wl_list",
"=",
"json",
".",
"loads",
"(",
"wl_cfg_str",
")",
"# Get the runs to reduce",
"run_list",
"=",
"self",
".",
"getProperty",
"(",
"\"RunNumbers\"",
")",
".",
"value",
"# Total number of measurements per wavelength peak",
"total_points",
"=",
"0",
"# Load all files so we can determine how many points we have",
"self",
".",
"data_files",
"=",
"[",
"]",
"for",
"item",
"in",
"run_list",
":",
"ws_name",
"=",
"'__sample_%s'",
"%",
"item",
"Load",
"(",
"Filename",
"=",
"'USANS_%s'",
"%",
"item",
",",
"LoadMonitors",
"=",
"True",
",",
"OutputWorkspace",
"=",
"ws_name",
")",
"# For testing, we may have to load the monitors by hand",
"if",
"not",
"mtd",
".",
"doesExist",
"(",
"ws_name",
"+",
"'_monitors'",
")",
":",
"Load",
"(",
"Filename",
"=",
"self",
".",
"_find_monitors",
"(",
"empty_run",
")",
",",
"OutputWorkspace",
"=",
"ws_name",
"+",
"'_monitors'",
")",
"# Determine whether we are putting together multiple files or whether",
"# we will be looking for scan_index markers.",
"is_scan",
"=",
"False",
"max_index",
"=",
"1",
"if",
"mtd",
"[",
"ws_name",
"]",
".",
"getRun",
"(",
")",
".",
"hasProperty",
"(",
"'scan_index'",
")",
":",
"scan_index",
"=",
"mtd",
"[",
"ws_name",
"]",
".",
"getRun",
"(",
")",
".",
"getProperty",
"(",
"\"scan_index\"",
")",
".",
"value",
"if",
"len",
"(",
"scan_index",
")",
">",
"0",
":",
"_max_index",
"=",
"scan_index",
".",
"getStatistics",
"(",
")",
".",
"maximum",
"if",
"_max_index",
">",
"0",
":",
"max_index",
"=",
"_max_index",
"is_scan",
"=",
"True",
"# Append the info for when we do the reduction",
"self",
".",
"data_files",
".",
"append",
"(",
"self",
".",
"DataFile",
"(",
"workspace",
"=",
"ws_name",
",",
"monitor",
"=",
"ws_name",
"+",
"'_monitors'",
",",
"empty",
"=",
"'__empty'",
",",
"empty_monitor",
"=",
"'__empty_monitors'",
",",
"is_scan",
"=",
"is_scan",
",",
"max_index",
"=",
"max_index",
")",
")",
"total_points",
"+=",
"max_index",
"return",
"total_points"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/USANSReduction.py#L73-L129 |
|
apple/turicreate | cce55aa5311300e3ce6af93cb45ba791fd1bdf49 | src/external/boost/boost_1_68_0/libs/mpl/preprocessed/fix_boost_mpl_preprocess.py | python | fix_input_files_for_numbered_seq | (sourceDir, suffix, timestamp, containers) | Fixes files used as input when pre-processing MPL-containers in their numbered form. | Fixes files used as input when pre-processing MPL-containers in their numbered form. | [
"Fixes",
"files",
"used",
"as",
"input",
"when",
"pre",
"-",
"processing",
"MPL",
"-",
"containers",
"in",
"their",
"numbered",
"form",
"."
] | def fix_input_files_for_numbered_seq(sourceDir, suffix, timestamp, containers):
"""Fixes files used as input when pre-processing MPL-containers in their numbered form."""
# Fix input files for each MPL-container type.
for container in containers:
files = glob.glob( os.path.join( sourceDir, container, container + '*' + suffix ) )
for currentFile in sorted( files ):
fix_header_comment( currentFile, timestamp ) | [
"def",
"fix_input_files_for_numbered_seq",
"(",
"sourceDir",
",",
"suffix",
",",
"timestamp",
",",
"containers",
")",
":",
"# Fix input files for each MPL-container type.",
"for",
"container",
"in",
"containers",
":",
"files",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"sourceDir",
",",
"container",
",",
"container",
"+",
"'*'",
"+",
"suffix",
")",
")",
"for",
"currentFile",
"in",
"sorted",
"(",
"files",
")",
":",
"fix_header_comment",
"(",
"currentFile",
",",
"timestamp",
")"
] | https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/src/external/boost/boost_1_68_0/libs/mpl/preprocessed/fix_boost_mpl_preprocess.py#L114-L120 |
||
Polidea/SiriusObfuscator | b0e590d8130e97856afe578869b83a209e2b19be | SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py | python | SBModuleSpec.SetTriple | (self, *args) | return _lldb.SBModuleSpec_SetTriple(self, *args) | SetTriple(self, str triple) | SetTriple(self, str triple) | [
"SetTriple",
"(",
"self",
"str",
"triple",
")"
] | def SetTriple(self, *args):
"""SetTriple(self, str triple)"""
return _lldb.SBModuleSpec_SetTriple(self, *args) | [
"def",
"SetTriple",
"(",
"self",
",",
"*",
"args",
")",
":",
"return",
"_lldb",
".",
"SBModuleSpec_SetTriple",
"(",
"self",
",",
"*",
"args",
")"
] | https://github.com/Polidea/SiriusObfuscator/blob/b0e590d8130e97856afe578869b83a209e2b19be/SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py#L6560-L6562 |
|
microsoft/ELL | a1d6bacc37a14879cc025d9be2ba40b1a0632315 | docs/tutorials/shared/tutorial_helpers.py | python | non_max_suppression | (regions, overlap_threshold, categories) | return final_regions | Given a list of `regions` (returned by a call to `get_regions`), remove
any overlapping bounding boxes for the same object.
`overlap_threshold` is the minimum amount of overlap needed between two
boxes for them to be considered the same.
`categories` is a list of categories that represent the type of objects to
be detected. | Given a list of `regions` (returned by a call to `get_regions`), remove
any overlapping bounding boxes for the same object. | [
"Given",
"a",
"list",
"of",
"regions",
"(",
"returned",
"by",
"a",
"call",
"to",
"get_regions",
")",
"remove",
"any",
"overlapping",
"bounding",
"boxes",
"for",
"the",
"same",
"object",
"."
] | def non_max_suppression(regions, overlap_threshold, categories):
"""Given a list of `regions` (returned by a call to `get_regions`), remove
any overlapping bounding boxes for the same object.
`overlap_threshold` is the minimum amount of overlap needed between two
boxes for them to be considered the same.
`categories` is a list of categories that represent the type of objects to
be detected.
"""
if not regions:
# If list of regions is empty, return an empty list
return []
final_regions = []
for c in categories:
# Only look at regions that have found the same object
filtered_regions = [
region for region in regions if region.category == c]
if len(filtered_regions) < 2:
continue
# Get all the boxes that represent the object in question
boxes = np.array([region.location for region in filtered_regions])
# Calculate the areas once
areas = boxes[:, 2] * boxes[:, 3]
# `argsort` returns the indices in ascending order
# We want the regions with the highest probability to be considered
# the "main" regions
sorted_indices = np.argsort([r.probability for r in filtered_regions])
pick = []
while len(sorted_indices):
last = len(sorted_indices) - 1
i = sorted_indices[last]
pick.append(i)
suppress = [last]
for pos in range(last):
j = sorted_indices[pos]
intersection = filtered_regions[i].intersect(filtered_regions[j])
overlap_width = intersection[2]
overlap_height = intersection[3]
# If either `overlap_width` or `overlap_height` are `<= 0`
# then that means there's no overlap
if overlap_width > 0 and overlap_height > 0:
overlap1 = overlap_width * overlap_height / areas[j]
overlap2 = overlap_width * overlap_height / areas[i]
# If there's enough overlap, we want to remove this box
if overlap1 > overlap_threshold and overlap2 > overlap_threshold:
suppress.append(pos)
# Remove all the values that are at the list of indices of
# `suppress` and return the resultant array
sorted_indices = np.delete(sorted_indices, suppress)
# Append to `final_regions` the main bounding boxes
final_regions += [filtered_regions[i] for i in pick]
return final_regions | [
"def",
"non_max_suppression",
"(",
"regions",
",",
"overlap_threshold",
",",
"categories",
")",
":",
"if",
"not",
"regions",
":",
"# If list of regions is empty, return an empty list",
"return",
"[",
"]",
"final_regions",
"=",
"[",
"]",
"for",
"c",
"in",
"categories",
":",
"# Only look at regions that have found the same object",
"filtered_regions",
"=",
"[",
"region",
"for",
"region",
"in",
"regions",
"if",
"region",
".",
"category",
"==",
"c",
"]",
"if",
"len",
"(",
"filtered_regions",
")",
"<",
"2",
":",
"continue",
"# Get all the boxes that represent the object in question",
"boxes",
"=",
"np",
".",
"array",
"(",
"[",
"region",
".",
"location",
"for",
"region",
"in",
"filtered_regions",
"]",
")",
"# Calculate the areas once",
"areas",
"=",
"boxes",
"[",
":",
",",
"2",
"]",
"*",
"boxes",
"[",
":",
",",
"3",
"]",
"# `argsort` returns the indices in ascending order",
"# We want the regions with the highest probability to be considered",
"# the \"main\" regions",
"sorted_indices",
"=",
"np",
".",
"argsort",
"(",
"[",
"r",
".",
"probability",
"for",
"r",
"in",
"filtered_regions",
"]",
")",
"pick",
"=",
"[",
"]",
"while",
"len",
"(",
"sorted_indices",
")",
":",
"last",
"=",
"len",
"(",
"sorted_indices",
")",
"-",
"1",
"i",
"=",
"sorted_indices",
"[",
"last",
"]",
"pick",
".",
"append",
"(",
"i",
")",
"suppress",
"=",
"[",
"last",
"]",
"for",
"pos",
"in",
"range",
"(",
"last",
")",
":",
"j",
"=",
"sorted_indices",
"[",
"pos",
"]",
"intersection",
"=",
"filtered_regions",
"[",
"i",
"]",
".",
"intersect",
"(",
"filtered_regions",
"[",
"j",
"]",
")",
"overlap_width",
"=",
"intersection",
"[",
"2",
"]",
"overlap_height",
"=",
"intersection",
"[",
"3",
"]",
"# If either `overlap_width` or `overlap_height` are `<= 0`",
"# then that means there's no overlap",
"if",
"overlap_width",
">",
"0",
"and",
"overlap_height",
">",
"0",
":",
"overlap1",
"=",
"overlap_width",
"*",
"overlap_height",
"/",
"areas",
"[",
"j",
"]",
"overlap2",
"=",
"overlap_width",
"*",
"overlap_height",
"/",
"areas",
"[",
"i",
"]",
"# If there's enough overlap, we want to remove this box",
"if",
"overlap1",
">",
"overlap_threshold",
"and",
"overlap2",
">",
"overlap_threshold",
":",
"suppress",
".",
"append",
"(",
"pos",
")",
"# Remove all the values that are at the list of indices of",
"# `suppress` and return the resultant array",
"sorted_indices",
"=",
"np",
".",
"delete",
"(",
"sorted_indices",
",",
"suppress",
")",
"# Append to `final_regions` the main bounding boxes",
"final_regions",
"+=",
"[",
"filtered_regions",
"[",
"i",
"]",
"for",
"i",
"in",
"pick",
"]",
"return",
"final_regions"
] | https://github.com/microsoft/ELL/blob/a1d6bacc37a14879cc025d9be2ba40b1a0632315/docs/tutorials/shared/tutorial_helpers.py#L493-L557 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_gdi.py | python | GraphicsContext.DrawEllipse | (*args, **kwargs) | return _gdi_.GraphicsContext_DrawEllipse(*args, **kwargs) | DrawEllipse(self, Double x, Double y, Double w, Double h)
Draws an ellipse. | DrawEllipse(self, Double x, Double y, Double w, Double h) | [
"DrawEllipse",
"(",
"self",
"Double",
"x",
"Double",
"y",
"Double",
"w",
"Double",
"h",
")"
] | def DrawEllipse(*args, **kwargs):
"""
DrawEllipse(self, Double x, Double y, Double w, Double h)
Draws an ellipse.
"""
return _gdi_.GraphicsContext_DrawEllipse(*args, **kwargs) | [
"def",
"DrawEllipse",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_gdi_",
".",
"GraphicsContext_DrawEllipse",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_gdi.py#L6490-L6496 |
|
FreeCAD/FreeCAD | ba42231b9c6889b89e064d6d563448ed81e376ec | src/Mod/Path/PathScripts/PathPropertyEditor.py | python | Types | () | return [t for t in _EditorFactory] | Return the types of properties supported. | Return the types of properties supported. | [
"Return",
"the",
"types",
"of",
"properties",
"supported",
"."
] | def Types():
"""Return the types of properties supported."""
return [t for t in _EditorFactory] | [
"def",
"Types",
"(",
")",
":",
"return",
"[",
"t",
"for",
"t",
"in",
"_EditorFactory",
"]"
] | https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Path/PathScripts/PathPropertyEditor.py#L229-L231 |
|
adobe/chromium | cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7 | third_party/protobuf/python/mox.py | python | MockObject.__call__ | (self, *params, **named_params) | return mock_method(*params, **named_params) | Provide custom logic for mocking classes that are callable. | Provide custom logic for mocking classes that are callable. | [
"Provide",
"custom",
"logic",
"for",
"mocking",
"classes",
"that",
"are",
"callable",
"."
] | def __call__(self, *params, **named_params):
"""Provide custom logic for mocking classes that are callable."""
# Verify the class we are mocking is callable
callable = self._class_to_mock.__dict__.get('__call__', None)
if callable is None:
raise TypeError('Not callable')
# Because the call is happening directly on this object instead of a method,
# the call on the mock method is made right here
mock_method = self._CreateMockMethod('__call__')
return mock_method(*params, **named_params) | [
"def",
"__call__",
"(",
"self",
",",
"*",
"params",
",",
"*",
"*",
"named_params",
")",
":",
"# Verify the class we are mocking is callable",
"callable",
"=",
"self",
".",
"_class_to_mock",
".",
"__dict__",
".",
"get",
"(",
"'__call__'",
",",
"None",
")",
"if",
"callable",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"'Not callable'",
")",
"# Because the call is happening directly on this object instead of a method,",
"# the call on the mock method is made right here",
"mock_method",
"=",
"self",
".",
"_CreateMockMethod",
"(",
"'__call__'",
")",
"return",
"mock_method",
"(",
"*",
"params",
",",
"*",
"*",
"named_params",
")"
] | https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/third_party/protobuf/python/mox.py#L490-L501 |
|
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/debug/lib/debug_data.py | python | DebugDumpDir.node_recipients | (self, node_name, is_control=False, device_name=None) | Get recipient of the given node's output according to partition graphs.
Args:
node_name: (`str`) name of the node.
is_control: (`bool`) whether control outputs, rather than non-control
outputs, are to be returned.
device_name: (`str`) name of the device. If there is only one device or if
node_name exists on only one device, this argument is optional.
Returns:
(`list` of `str`) all inputs to the node, as a list of node names.
Raises:
LookupError: If node inputs and control inputs have not been loaded
from partition graphs yet. | Get recipient of the given node's output according to partition graphs. | [
"Get",
"recipient",
"of",
"the",
"given",
"node",
"s",
"output",
"according",
"to",
"partition",
"graphs",
"."
] | def node_recipients(self, node_name, is_control=False, device_name=None):
"""Get recipient of the given node's output according to partition graphs.
Args:
node_name: (`str`) name of the node.
is_control: (`bool`) whether control outputs, rather than non-control
outputs, are to be returned.
device_name: (`str`) name of the device. If there is only one device or if
node_name exists on only one device, this argument is optional.
Returns:
(`list` of `str`) all inputs to the node, as a list of node names.
Raises:
LookupError: If node inputs and control inputs have not been loaded
from partition graphs yet.
"""
if not self._debug_graphs:
raise LookupError(
"Node recipients are not loaded from partition graphs yet.")
device_name = self._infer_device_name(device_name, node_name)
debug_graph = self._debug_graphs[device_name]
if is_control:
return debug_graph.node_ctrl_recipients[node_name]
else:
return debug_graph.node_recipients[node_name] | [
"def",
"node_recipients",
"(",
"self",
",",
"node_name",
",",
"is_control",
"=",
"False",
",",
"device_name",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_debug_graphs",
":",
"raise",
"LookupError",
"(",
"\"Node recipients are not loaded from partition graphs yet.\"",
")",
"device_name",
"=",
"self",
".",
"_infer_device_name",
"(",
"device_name",
",",
"node_name",
")",
"debug_graph",
"=",
"self",
".",
"_debug_graphs",
"[",
"device_name",
"]",
"if",
"is_control",
":",
"return",
"debug_graph",
".",
"node_ctrl_recipients",
"[",
"node_name",
"]",
"else",
":",
"return",
"debug_graph",
".",
"node_recipients",
"[",
"node_name",
"]"
] | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/debug/lib/debug_data.py#L1227-L1254 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/tools/Editra/src/ed_search.py | python | EdSearchCtrl.OnCancel | (self, evt) | Cancels the Search Query
@param evt: SearchCtrl event | Cancels the Search Query
@param evt: SearchCtrl event | [
"Cancels",
"the",
"Search",
"Query",
"@param",
"evt",
":",
"SearchCtrl",
"event"
] | def OnCancel(self, evt):
"""Cancels the Search Query
@param evt: SearchCtrl event
"""
self.SetValue(u"")
self.ShowCancelButton(False)
evt.Skip() | [
"def",
"OnCancel",
"(",
"self",
",",
"evt",
")",
":",
"self",
".",
"SetValue",
"(",
"u\"\"",
")",
"self",
".",
"ShowCancelButton",
"(",
"False",
")",
"evt",
".",
"Skip",
"(",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/ed_search.py#L1164-L1171 |
||
tensorflow/io | 92b44e180674a8af0e12e405530f7343e3e693e4 | tensorflow_io/python/experimental/kafka_batch_io_dataset_ops.py | python | KafkaBatchIODataset.__init__ | (
self,
topics,
group_id,
servers,
stream_timeout=-1,
message_poll_timeout=10000,
configuration=None,
internal=True,
) | Args:
topics: A `tf.string` tensor containing topic names in [topic] format.
For example: ["topic1"]
group_id: The id of the consumer group. For example: cgstream
servers: An optional list of bootstrap servers.
For example: `localhost:9092`.
stream_timeout: An optional timeout value (in milliseconds) to wait for
the new messages from kafka to be retrieved by the consumers.
By default it is set to -1 to block indefinitely.
message_poll_timeout: An optional timeout duration (in milliseconds)
after which the kafka consumer throws a timeout error while fetching
a single message. This value also represents the intervals at which
the kafka topic(s) are polled for new messages while using the `stream_timeout`.
configuration: An optional `tf.string` tensor containing
configurations in [Key=Value] format.
Global configuration: please refer to 'Global configuration properties'
in librdkafka doc. Examples include
["enable.auto.commit=false", "heartbeat.interval.ms=2000"]
Topic configuration: please refer to 'Topic configuration properties'
in librdkafka doc. Note all topic configurations should be
prefixed with `conf.topic.`. Examples include
["conf.topic.auto.offset.reset=earliest"]
Reference: https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
internal: Whether the dataset is being created from within the named scope.
Default: True | Args:
topics: A `tf.string` tensor containing topic names in [topic] format.
For example: ["topic1"]
group_id: The id of the consumer group. For example: cgstream
servers: An optional list of bootstrap servers.
For example: `localhost:9092`.
stream_timeout: An optional timeout value (in milliseconds) to wait for
the new messages from kafka to be retrieved by the consumers.
By default it is set to -1 to block indefinitely.
message_poll_timeout: An optional timeout duration (in milliseconds)
after which the kafka consumer throws a timeout error while fetching
a single message. This value also represents the intervals at which
the kafka topic(s) are polled for new messages while using the `stream_timeout`.
configuration: An optional `tf.string` tensor containing
configurations in [Key=Value] format.
Global configuration: please refer to 'Global configuration properties'
in librdkafka doc. Examples include
["enable.auto.commit=false", "heartbeat.interval.ms=2000"]
Topic configuration: please refer to 'Topic configuration properties'
in librdkafka doc. Note all topic configurations should be
prefixed with `conf.topic.`. Examples include
["conf.topic.auto.offset.reset=earliest"]
Reference: https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
internal: Whether the dataset is being created from within the named scope.
Default: True | [
"Args",
":",
"topics",
":",
"A",
"tf",
".",
"string",
"tensor",
"containing",
"topic",
"names",
"in",
"[",
"topic",
"]",
"format",
".",
"For",
"example",
":",
"[",
"topic1",
"]",
"group_id",
":",
"The",
"id",
"of",
"the",
"consumer",
"group",
".",
"For",
"example",
":",
"cgstream",
"servers",
":",
"An",
"optional",
"list",
"of",
"bootstrap",
"servers",
".",
"For",
"example",
":",
"localhost",
":",
"9092",
".",
"stream_timeout",
":",
"An",
"optional",
"timeout",
"value",
"(",
"in",
"milliseconds",
")",
"to",
"wait",
"for",
"the",
"new",
"messages",
"from",
"kafka",
"to",
"be",
"retrieved",
"by",
"the",
"consumers",
".",
"By",
"default",
"it",
"is",
"set",
"to",
"-",
"1",
"to",
"block",
"indefinitely",
".",
"message_poll_timeout",
":",
"An",
"optional",
"timeout",
"duration",
"(",
"in",
"milliseconds",
")",
"after",
"which",
"the",
"kafka",
"consumer",
"throws",
"a",
"timeout",
"error",
"while",
"fetching",
"a",
"single",
"message",
".",
"This",
"value",
"also",
"represents",
"the",
"intervals",
"at",
"which",
"the",
"kafka",
"topic",
"(",
"s",
")",
"are",
"polled",
"for",
"new",
"messages",
"while",
"using",
"the",
"stream_timeout",
".",
"configuration",
":",
"An",
"optional",
"tf",
".",
"string",
"tensor",
"containing",
"configurations",
"in",
"[",
"Key",
"=",
"Value",
"]",
"format",
".",
"Global",
"configuration",
":",
"please",
"refer",
"to",
"Global",
"configuration",
"properties",
"in",
"librdkafka",
"doc",
".",
"Examples",
"include",
"[",
"enable",
".",
"auto",
".",
"commit",
"=",
"false",
"heartbeat",
".",
"interval",
".",
"ms",
"=",
"2000",
"]",
"Topic",
"configuration",
":",
"please",
"refer",
"to",
"Topic",
"configuration",
"properties",
"in",
"librdkafka",
"doc",
".",
"Note",
"all",
"topic",
"configurations",
"should",
"be",
"prefixed",
"with",
"conf",
".",
"topic",
".",
".",
"Examples",
"include",
"[",
"conf",
".",
"topic",
".",
"auto",
".",
"offset",
".",
"reset",
"=",
"earliest",
"]",
"Reference",
":",
"https",
":",
"//",
"github",
".",
"com",
"/",
"edenhill",
"/",
"librdkafka",
"/",
"blob",
"/",
"master",
"/",
"CONFIGURATION",
".",
"md",
"internal",
":",
"Whether",
"the",
"dataset",
"is",
"being",
"created",
"from",
"within",
"the",
"named",
"scope",
".",
"Default",
":",
"True"
] | def __init__(
self,
topics,
group_id,
servers,
stream_timeout=-1,
message_poll_timeout=10000,
configuration=None,
internal=True,
):
"""
Args:
topics: A `tf.string` tensor containing topic names in [topic] format.
For example: ["topic1"]
group_id: The id of the consumer group. For example: cgstream
servers: An optional list of bootstrap servers.
For example: `localhost:9092`.
stream_timeout: An optional timeout value (in milliseconds) to wait for
the new messages from kafka to be retrieved by the consumers.
By default it is set to -1 to block indefinitely.
message_poll_timeout: An optional timeout duration (in milliseconds)
after which the kafka consumer throws a timeout error while fetching
a single message. This value also represents the intervals at which
the kafka topic(s) are polled for new messages while using the `stream_timeout`.
configuration: An optional `tf.string` tensor containing
configurations in [Key=Value] format.
Global configuration: please refer to 'Global configuration properties'
in librdkafka doc. Examples include
["enable.auto.commit=false", "heartbeat.interval.ms=2000"]
Topic configuration: please refer to 'Topic configuration properties'
in librdkafka doc. Note all topic configurations should be
prefixed with `conf.topic.`. Examples include
["conf.topic.auto.offset.reset=earliest"]
Reference: https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
internal: Whether the dataset is being created from within the named scope.
Default: True
"""
with tf.name_scope("KafkaBatchIODataset"):
assert internal
if stream_timeout == -1:
stream_timeout = sys.maxsize
elif stream_timeout >= 0:
# Taking the max of `stream_timeout` and `message_poll_timeout`
# to prevent the user from bothering about the underlying polling
# mechanism.
stream_timeout = max(stream_timeout, message_poll_timeout)
else:
raise ValueError(
"Invalid stream_timeout value: {} ,set it to -1 to block indefinitely.".format(
stream_timeout
)
)
metadata = list(configuration or [])
if group_id is not None:
metadata.append("group.id=%s" % group_id)
if servers is not None:
metadata.append("bootstrap.servers=%s" % servers)
resource = core_ops.io_kafka_group_readable_init(
topics=topics, metadata=metadata
)
self._resource = resource
dataset = tf.data.experimental.Counter()
dataset = dataset.map(
lambda i: core_ops.io_kafka_group_readable_next(
input=self._resource,
index=i,
message_poll_timeout=message_poll_timeout,
stream_timeout=stream_timeout,
)
)
dataset = dataset.apply(
tf.data.experimental.take_while(
lambda v: tf.greater(v.continue_fetch, 0)
)
)
dataset = dataset.map(
lambda v: tf.data.Dataset.zip(
(
tf.data.Dataset.from_tensor_slices(v.message),
tf.data.Dataset.from_tensor_slices(v.key),
)
)
)
self._dataset = dataset
super().__init__(
self._dataset._variant_tensor
) | [
"def",
"__init__",
"(",
"self",
",",
"topics",
",",
"group_id",
",",
"servers",
",",
"stream_timeout",
"=",
"-",
"1",
",",
"message_poll_timeout",
"=",
"10000",
",",
"configuration",
"=",
"None",
",",
"internal",
"=",
"True",
",",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"\"KafkaBatchIODataset\"",
")",
":",
"assert",
"internal",
"if",
"stream_timeout",
"==",
"-",
"1",
":",
"stream_timeout",
"=",
"sys",
".",
"maxsize",
"elif",
"stream_timeout",
">=",
"0",
":",
"# Taking the max of `stream_timeout` and `message_poll_timeout`",
"# to prevent the user from bothering about the underlying polling",
"# mechanism.",
"stream_timeout",
"=",
"max",
"(",
"stream_timeout",
",",
"message_poll_timeout",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid stream_timeout value: {} ,set it to -1 to block indefinitely.\"",
".",
"format",
"(",
"stream_timeout",
")",
")",
"metadata",
"=",
"list",
"(",
"configuration",
"or",
"[",
"]",
")",
"if",
"group_id",
"is",
"not",
"None",
":",
"metadata",
".",
"append",
"(",
"\"group.id=%s\"",
"%",
"group_id",
")",
"if",
"servers",
"is",
"not",
"None",
":",
"metadata",
".",
"append",
"(",
"\"bootstrap.servers=%s\"",
"%",
"servers",
")",
"resource",
"=",
"core_ops",
".",
"io_kafka_group_readable_init",
"(",
"topics",
"=",
"topics",
",",
"metadata",
"=",
"metadata",
")",
"self",
".",
"_resource",
"=",
"resource",
"dataset",
"=",
"tf",
".",
"data",
".",
"experimental",
".",
"Counter",
"(",
")",
"dataset",
"=",
"dataset",
".",
"map",
"(",
"lambda",
"i",
":",
"core_ops",
".",
"io_kafka_group_readable_next",
"(",
"input",
"=",
"self",
".",
"_resource",
",",
"index",
"=",
"i",
",",
"message_poll_timeout",
"=",
"message_poll_timeout",
",",
"stream_timeout",
"=",
"stream_timeout",
",",
")",
")",
"dataset",
"=",
"dataset",
".",
"apply",
"(",
"tf",
".",
"data",
".",
"experimental",
".",
"take_while",
"(",
"lambda",
"v",
":",
"tf",
".",
"greater",
"(",
"v",
".",
"continue_fetch",
",",
"0",
")",
")",
")",
"dataset",
"=",
"dataset",
".",
"map",
"(",
"lambda",
"v",
":",
"tf",
".",
"data",
".",
"Dataset",
".",
"zip",
"(",
"(",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"v",
".",
"message",
")",
",",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"v",
".",
"key",
")",
",",
")",
")",
")",
"self",
".",
"_dataset",
"=",
"dataset",
"super",
"(",
")",
".",
"__init__",
"(",
"self",
".",
"_dataset",
".",
"_variant_tensor",
")"
] | https://github.com/tensorflow/io/blob/92b44e180674a8af0e12e405530f7343e3e693e4/tensorflow_io/python/experimental/kafka_batch_io_dataset_ops.py#L70-L158 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/_windows.py | python | QueryLayoutInfoEvent.__init__ | (self, *args, **kwargs) | __init__(self, int id=0) -> QueryLayoutInfoEvent | __init__(self, int id=0) -> QueryLayoutInfoEvent | [
"__init__",
"(",
"self",
"int",
"id",
"=",
"0",
")",
"-",
">",
"QueryLayoutInfoEvent"
] | def __init__(self, *args, **kwargs):
"""__init__(self, int id=0) -> QueryLayoutInfoEvent"""
_windows_.QueryLayoutInfoEvent_swiginit(self,_windows_.new_QueryLayoutInfoEvent(*args, **kwargs)) | [
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"_windows_",
".",
"QueryLayoutInfoEvent_swiginit",
"(",
"self",
",",
"_windows_",
".",
"new_QueryLayoutInfoEvent",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_windows.py#L1954-L1956 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/idlelib/calltip_w.py | python | CalltipWindow.showcontents | (self) | Create the call-tip widget. | Create the call-tip widget. | [
"Create",
"the",
"call",
"-",
"tip",
"widget",
"."
] | def showcontents(self):
"""Create the call-tip widget."""
self.label = Label(self.tipwindow, text=self.text, justify=LEFT,
background="#ffffd0", foreground="black",
relief=SOLID, borderwidth=1,
font=self.anchor_widget['font'])
self.label.pack() | [
"def",
"showcontents",
"(",
"self",
")",
":",
"self",
".",
"label",
"=",
"Label",
"(",
"self",
".",
"tipwindow",
",",
"text",
"=",
"self",
".",
"text",
",",
"justify",
"=",
"LEFT",
",",
"background",
"=",
"\"#ffffd0\"",
",",
"foreground",
"=",
"\"black\"",
",",
"relief",
"=",
"SOLID",
",",
"borderwidth",
"=",
"1",
",",
"font",
"=",
"self",
".",
"anchor_widget",
"[",
"'font'",
"]",
")",
"self",
".",
"label",
".",
"pack",
"(",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/idlelib/calltip_w.py#L80-L86 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/path.py/path.py | python | Path.lstat | (self) | return os.lstat(self) | Like :meth:`stat`, but do not follow symbolic links.
.. seealso:: :meth:`stat`, :func:`os.lstat` | Like :meth:`stat`, but do not follow symbolic links. | [
"Like",
":",
"meth",
":",
"stat",
"but",
"do",
"not",
"follow",
"symbolic",
"links",
"."
] | def lstat(self):
""" Like :meth:`stat`, but do not follow symbolic links.
.. seealso:: :meth:`stat`, :func:`os.lstat`
"""
return os.lstat(self) | [
"def",
"lstat",
"(",
"self",
")",
":",
"return",
"os",
".",
"lstat",
"(",
"self",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/path.py/path.py#L1076-L1081 |
|
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | scripts/reduction_gui/reduction/diffraction/diffraction_reduction_script.py | python | DiffractionReductionScripter.constructPythonScript | (self, paramdict) | return script | Construct python script | Construct python script | [
"Construct",
"python",
"script"
] | def constructPythonScript(self, paramdict):
""" Construct python script
"""
# 1. Obtain all information
runsetupdict = paramdict["RunSetupWidget"]
advsetupdict = paramdict["AdvancedSetupWidget"]
filterdict = paramdict["FilterSetupWidget"]
# 2. Obtain some information
datafilenames = self.getDataFileNames(runsetupdict, advsetupdict)
if len(datafilenames) == 0:
raise NotImplementedError("RunNumber cannot be neglected. ")
dofilter = self.doFiltering(filterdict)
# 3. Header
script = "from mantid.simpleapi import *\n"
script += "config['default.facility']=\"%s\"\n" % self.facility_name
script += "\n"
if dofilter:
# a) Construct python script with generating filters
for runtuple in datafilenames:
runnumber = runtuple[0]
datafilename = runtuple[1]
# print "Working on run ", str(runnumber), " in file ", datafilename
# i. Load meta data only
metadatawsname = str(datafilename.split(".")[0]+"_meta")
splitwsname = str(datafilename.split(".")[0] + "_splitters")
splitinfowsname = str(datafilename.split(".")[0] + "_splitinfo")
script += "# Load data's log only\n"
script += "Load(\n"
script += "{}Filename = '{}',\n".format(DiffractionReductionScripter.WIDTH, datafilename)
script += "{}OutputWorkspace = '{}',\n".format(DiffractionReductionScripter.WIDTH, metadatawsname)
script += "{}MetaDataOnly = True)\n".format(DiffractionReductionScripter.WIDTH)
script += "\n"
# ii. Generate event filters
script += "# Construct the event filters\n"
script += "GenerateEventsFilter(\n"
script += "{}InputWorkspace = '{}',\n".format(DiffractionReductionScripter.WIDTH, metadatawsname)
script += "{}OutputWorkspace = '{}',\n".format(DiffractionReductionScripter.WIDTH, splitwsname)
script += "{}InformationWorkspace = '{}',\n".format(DiffractionReductionScripter.WIDTH, splitinfowsname)
if filterdict["FilterByTimeMin"] != "":
script += "{}StartTime = '{}',\n".format(DiffractionReductionScripter.WIDTH, filterdict["FilterByTimeMin"])
if filterdict["FilterByTimeMax"] != "":
script += "{}StopTime = '{}',\n".format(DiffractionReductionScripter.WIDTH, filterdict["FilterByTimeMax"])
if filterdict["FilterType"] == "ByTime":
# Filter by time
script += "{}TimeInterval = '{}',\n".format(DiffractionReductionScripter.WIDTH, filterdict["LengthOfTimeInterval"])
script += "{}UnitOfTime = '{}',\n".format(DiffractionReductionScripter.WIDTH, filterdict["UnitOfTime"])
script += "{}LogName = '',\n".format(DiffractionReductionScripter.WIDTH) # intentionally empty
elif filterdict["FilterType"] == "ByLogValue":
# Filter by log value
script += "{}LogName = '{}',\n".format(DiffractionReductionScripter.WIDTH, filterdict["LogName"])
if filterdict["MinimumLogValue"] != "":
script += "{}MinimumLogValue = '{}',\n".format(DiffractionReductionScripter.WIDTH, filterdict["MinimumLogValue"])
if filterdict["MaximumLogValue"] != "":
script += "{}MaximumLogValue = '{}',\n".format(DiffractionReductionScripter.WIDTH, filterdict["MaximumLogValue"])
script += "{}FilterLogValueByChangingDirection = '{}',\n".format(DiffractionReductionScripter.WIDTH,
filterdict["FilterLogValueByChangingDirection"])
if filterdict["LogValueInterval"] != "":
# Filter by log value interval
script += "{}LogValueInterval = '{}',\n".format(DiffractionReductionScripter.WIDTH,
filterdict["LogValueInterval"])
script += "{}LogBoundary = '{}',\n".format(DiffractionReductionScripter.WIDTH, filterdict["LogBoundary"])
if filterdict["TimeTolerance"] != "":
script += "{}TimeTolerance = '{}',\n".format(DiffractionReductionScripter.WIDTH, filterdict["TimeTolerance"])
if filterdict["LogValueTolerance"] != "":
script += "{}LogValueTolerance = '{}',\n".format(DiffractionReductionScripter.WIDTH,
filterdict["LogValueTolerance"])
# ENDIF
script += ")\n"
# iii. Data reduction
script += self.buildPowderDataReductionScript(runsetupdict, advsetupdict, runnumber, splitwsname, splitinfowsname)
# ENDFOR data file names
else:
# b) Construct python script without generating filters
script += self.buildPowderDataReductionScript(runsetupdict, advsetupdict)
# ENDIF : do filter
print ("Script and Save XML to default.")
return script | [
"def",
"constructPythonScript",
"(",
"self",
",",
"paramdict",
")",
":",
"# 1. Obtain all information",
"runsetupdict",
"=",
"paramdict",
"[",
"\"RunSetupWidget\"",
"]",
"advsetupdict",
"=",
"paramdict",
"[",
"\"AdvancedSetupWidget\"",
"]",
"filterdict",
"=",
"paramdict",
"[",
"\"FilterSetupWidget\"",
"]",
"# 2. Obtain some information",
"datafilenames",
"=",
"self",
".",
"getDataFileNames",
"(",
"runsetupdict",
",",
"advsetupdict",
")",
"if",
"len",
"(",
"datafilenames",
")",
"==",
"0",
":",
"raise",
"NotImplementedError",
"(",
"\"RunNumber cannot be neglected. \"",
")",
"dofilter",
"=",
"self",
".",
"doFiltering",
"(",
"filterdict",
")",
"# 3. Header",
"script",
"=",
"\"from mantid.simpleapi import *\\n\"",
"script",
"+=",
"\"config['default.facility']=\\\"%s\\\"\\n\"",
"%",
"self",
".",
"facility_name",
"script",
"+=",
"\"\\n\"",
"if",
"dofilter",
":",
"# a) Construct python script with generating filters",
"for",
"runtuple",
"in",
"datafilenames",
":",
"runnumber",
"=",
"runtuple",
"[",
"0",
"]",
"datafilename",
"=",
"runtuple",
"[",
"1",
"]",
"# print \"Working on run \", str(runnumber), \" in file \", datafilename",
"# i. Load meta data only",
"metadatawsname",
"=",
"str",
"(",
"datafilename",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
"+",
"\"_meta\"",
")",
"splitwsname",
"=",
"str",
"(",
"datafilename",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
"+",
"\"_splitters\"",
")",
"splitinfowsname",
"=",
"str",
"(",
"datafilename",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
"+",
"\"_splitinfo\"",
")",
"script",
"+=",
"\"# Load data's log only\\n\"",
"script",
"+=",
"\"Load(\\n\"",
"script",
"+=",
"\"{}Filename = '{}',\\n\"",
".",
"format",
"(",
"DiffractionReductionScripter",
".",
"WIDTH",
",",
"datafilename",
")",
"script",
"+=",
"\"{}OutputWorkspace = '{}',\\n\"",
".",
"format",
"(",
"DiffractionReductionScripter",
".",
"WIDTH",
",",
"metadatawsname",
")",
"script",
"+=",
"\"{}MetaDataOnly = True)\\n\"",
".",
"format",
"(",
"DiffractionReductionScripter",
".",
"WIDTH",
")",
"script",
"+=",
"\"\\n\"",
"# ii. Generate event filters",
"script",
"+=",
"\"# Construct the event filters\\n\"",
"script",
"+=",
"\"GenerateEventsFilter(\\n\"",
"script",
"+=",
"\"{}InputWorkspace = '{}',\\n\"",
".",
"format",
"(",
"DiffractionReductionScripter",
".",
"WIDTH",
",",
"metadatawsname",
")",
"script",
"+=",
"\"{}OutputWorkspace = '{}',\\n\"",
".",
"format",
"(",
"DiffractionReductionScripter",
".",
"WIDTH",
",",
"splitwsname",
")",
"script",
"+=",
"\"{}InformationWorkspace = '{}',\\n\"",
".",
"format",
"(",
"DiffractionReductionScripter",
".",
"WIDTH",
",",
"splitinfowsname",
")",
"if",
"filterdict",
"[",
"\"FilterByTimeMin\"",
"]",
"!=",
"\"\"",
":",
"script",
"+=",
"\"{}StartTime = '{}',\\n\"",
".",
"format",
"(",
"DiffractionReductionScripter",
".",
"WIDTH",
",",
"filterdict",
"[",
"\"FilterByTimeMin\"",
"]",
")",
"if",
"filterdict",
"[",
"\"FilterByTimeMax\"",
"]",
"!=",
"\"\"",
":",
"script",
"+=",
"\"{}StopTime = '{}',\\n\"",
".",
"format",
"(",
"DiffractionReductionScripter",
".",
"WIDTH",
",",
"filterdict",
"[",
"\"FilterByTimeMax\"",
"]",
")",
"if",
"filterdict",
"[",
"\"FilterType\"",
"]",
"==",
"\"ByTime\"",
":",
"# Filter by time",
"script",
"+=",
"\"{}TimeInterval = '{}',\\n\"",
".",
"format",
"(",
"DiffractionReductionScripter",
".",
"WIDTH",
",",
"filterdict",
"[",
"\"LengthOfTimeInterval\"",
"]",
")",
"script",
"+=",
"\"{}UnitOfTime = '{}',\\n\"",
".",
"format",
"(",
"DiffractionReductionScripter",
".",
"WIDTH",
",",
"filterdict",
"[",
"\"UnitOfTime\"",
"]",
")",
"script",
"+=",
"\"{}LogName = '',\\n\"",
".",
"format",
"(",
"DiffractionReductionScripter",
".",
"WIDTH",
")",
"# intentionally empty",
"elif",
"filterdict",
"[",
"\"FilterType\"",
"]",
"==",
"\"ByLogValue\"",
":",
"# Filter by log value",
"script",
"+=",
"\"{}LogName = '{}',\\n\"",
".",
"format",
"(",
"DiffractionReductionScripter",
".",
"WIDTH",
",",
"filterdict",
"[",
"\"LogName\"",
"]",
")",
"if",
"filterdict",
"[",
"\"MinimumLogValue\"",
"]",
"!=",
"\"\"",
":",
"script",
"+=",
"\"{}MinimumLogValue = '{}',\\n\"",
".",
"format",
"(",
"DiffractionReductionScripter",
".",
"WIDTH",
",",
"filterdict",
"[",
"\"MinimumLogValue\"",
"]",
")",
"if",
"filterdict",
"[",
"\"MaximumLogValue\"",
"]",
"!=",
"\"\"",
":",
"script",
"+=",
"\"{}MaximumLogValue = '{}',\\n\"",
".",
"format",
"(",
"DiffractionReductionScripter",
".",
"WIDTH",
",",
"filterdict",
"[",
"\"MaximumLogValue\"",
"]",
")",
"script",
"+=",
"\"{}FilterLogValueByChangingDirection = '{}',\\n\"",
".",
"format",
"(",
"DiffractionReductionScripter",
".",
"WIDTH",
",",
"filterdict",
"[",
"\"FilterLogValueByChangingDirection\"",
"]",
")",
"if",
"filterdict",
"[",
"\"LogValueInterval\"",
"]",
"!=",
"\"\"",
":",
"# Filter by log value interval",
"script",
"+=",
"\"{}LogValueInterval = '{}',\\n\"",
".",
"format",
"(",
"DiffractionReductionScripter",
".",
"WIDTH",
",",
"filterdict",
"[",
"\"LogValueInterval\"",
"]",
")",
"script",
"+=",
"\"{}LogBoundary = '{}',\\n\"",
".",
"format",
"(",
"DiffractionReductionScripter",
".",
"WIDTH",
",",
"filterdict",
"[",
"\"LogBoundary\"",
"]",
")",
"if",
"filterdict",
"[",
"\"TimeTolerance\"",
"]",
"!=",
"\"\"",
":",
"script",
"+=",
"\"{}TimeTolerance = '{}',\\n\"",
".",
"format",
"(",
"DiffractionReductionScripter",
".",
"WIDTH",
",",
"filterdict",
"[",
"\"TimeTolerance\"",
"]",
")",
"if",
"filterdict",
"[",
"\"LogValueTolerance\"",
"]",
"!=",
"\"\"",
":",
"script",
"+=",
"\"{}LogValueTolerance = '{}',\\n\"",
".",
"format",
"(",
"DiffractionReductionScripter",
".",
"WIDTH",
",",
"filterdict",
"[",
"\"LogValueTolerance\"",
"]",
")",
"# ENDIF",
"script",
"+=",
"\")\\n\"",
"# iii. Data reduction",
"script",
"+=",
"self",
".",
"buildPowderDataReductionScript",
"(",
"runsetupdict",
",",
"advsetupdict",
",",
"runnumber",
",",
"splitwsname",
",",
"splitinfowsname",
")",
"# ENDFOR data file names",
"else",
":",
"# b) Construct python script without generating filters",
"script",
"+=",
"self",
".",
"buildPowderDataReductionScript",
"(",
"runsetupdict",
",",
"advsetupdict",
")",
"# ENDIF : do filter",
"print",
"(",
"\"Script and Save XML to default.\"",
")",
"return",
"script"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/scripts/reduction_gui/reduction/diffraction/diffraction_reduction_script.py#L136-L230 |
|
Polidea/SiriusObfuscator | b0e590d8130e97856afe578869b83a209e2b19be | SymbolExtractorAndRenamer/clang/bindings/python/clang/cindex.py | python | TranslationUnit.codeComplete | (self, path, line, column, unsaved_files=None,
include_macros=False, include_code_patterns=False,
include_brief_comments=False) | return None | Code complete in this translation unit.
In-memory contents for files can be provided by passing a list of pairs
as unsaved_files, the first items should be the filenames to be mapped
and the second should be the contents to be substituted for the
file. The contents may be passed as strings or file objects. | Code complete in this translation unit. | [
"Code",
"complete",
"in",
"this",
"translation",
"unit",
"."
] | def codeComplete(self, path, line, column, unsaved_files=None,
include_macros=False, include_code_patterns=False,
include_brief_comments=False):
"""
Code complete in this translation unit.
In-memory contents for files can be provided by passing a list of pairs
as unsaved_files, the first items should be the filenames to be mapped
and the second should be the contents to be substituted for the
file. The contents may be passed as strings or file objects.
"""
options = 0
if include_macros:
options += 1
if include_code_patterns:
options += 2
if include_brief_comments:
options += 4
if unsaved_files is None:
unsaved_files = []
unsaved_files_array = 0
if len(unsaved_files):
unsaved_files_array = (_CXUnsavedFile * len(unsaved_files))()
for i,(name,value) in enumerate(unsaved_files):
if not isinstance(value, str):
# FIXME: It would be great to support an efficient version
# of this, one day.
value = value.read()
print(value)
if not isinstance(value, str):
raise TypeError('Unexpected unsaved file contents.')
unsaved_files_array[i].name = name
unsaved_files_array[i].contents = value
unsaved_files_array[i].length = len(value)
ptr = conf.lib.clang_codeCompleteAt(self, path, line, column,
unsaved_files_array, len(unsaved_files), options)
if ptr:
return CodeCompletionResults(ptr)
return None | [
"def",
"codeComplete",
"(",
"self",
",",
"path",
",",
"line",
",",
"column",
",",
"unsaved_files",
"=",
"None",
",",
"include_macros",
"=",
"False",
",",
"include_code_patterns",
"=",
"False",
",",
"include_brief_comments",
"=",
"False",
")",
":",
"options",
"=",
"0",
"if",
"include_macros",
":",
"options",
"+=",
"1",
"if",
"include_code_patterns",
":",
"options",
"+=",
"2",
"if",
"include_brief_comments",
":",
"options",
"+=",
"4",
"if",
"unsaved_files",
"is",
"None",
":",
"unsaved_files",
"=",
"[",
"]",
"unsaved_files_array",
"=",
"0",
"if",
"len",
"(",
"unsaved_files",
")",
":",
"unsaved_files_array",
"=",
"(",
"_CXUnsavedFile",
"*",
"len",
"(",
"unsaved_files",
")",
")",
"(",
")",
"for",
"i",
",",
"(",
"name",
",",
"value",
")",
"in",
"enumerate",
"(",
"unsaved_files",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"# FIXME: It would be great to support an efficient version",
"# of this, one day.",
"value",
"=",
"value",
".",
"read",
"(",
")",
"print",
"(",
"value",
")",
"if",
"not",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"'Unexpected unsaved file contents.'",
")",
"unsaved_files_array",
"[",
"i",
"]",
".",
"name",
"=",
"name",
"unsaved_files_array",
"[",
"i",
"]",
".",
"contents",
"=",
"value",
"unsaved_files_array",
"[",
"i",
"]",
".",
"length",
"=",
"len",
"(",
"value",
")",
"ptr",
"=",
"conf",
".",
"lib",
".",
"clang_codeCompleteAt",
"(",
"self",
",",
"path",
",",
"line",
",",
"column",
",",
"unsaved_files_array",
",",
"len",
"(",
"unsaved_files",
")",
",",
"options",
")",
"if",
"ptr",
":",
"return",
"CodeCompletionResults",
"(",
"ptr",
")",
"return",
"None"
] | https://github.com/Polidea/SiriusObfuscator/blob/b0e590d8130e97856afe578869b83a209e2b19be/SymbolExtractorAndRenamer/clang/bindings/python/clang/cindex.py#L2763-L2806 |
|
mysql/mysql-workbench | 2f35f9034f015cbcd22139a60e1baa2e3e8e795c | plugins/wb.admin/backend/wb_log_reader.py | python | BaseLogFileReader._shorten_query_field | (self, data) | return data if l <= 256 else data + ' [truncated, %s total]' % size | Receives a query stored in a log file and prepares it for the output in
the log viewer shortening to 256 characters and taking care of encoding issues | Receives a query stored in a log file and prepares it for the output in
the log viewer shortening to 256 characters and taking care of encoding issues | [
"Receives",
"a",
"query",
"stored",
"in",
"a",
"log",
"file",
"and",
"prepares",
"it",
"for",
"the",
"output",
"in",
"the",
"log",
"viewer",
"shortening",
"to",
"256",
"characters",
"and",
"taking",
"care",
"of",
"encoding",
"issues"
] | def _shorten_query_field(self, data):
'''
Receives a query stored in a log file and prepares it for the output in
the log viewer shortening to 256 characters and taking care of encoding issues
'''
l = len(data)
abbr = data[:256]
size = '%d bytes' % l if l < 1024 else '%.1f KB' % (l / 1024.0)
return data if l <= 256 else data + ' [truncated, %s total]' % size | [
"def",
"_shorten_query_field",
"(",
"self",
",",
"data",
")",
":",
"l",
"=",
"len",
"(",
"data",
")",
"abbr",
"=",
"data",
"[",
":",
"256",
"]",
"size",
"=",
"'%d bytes'",
"%",
"l",
"if",
"l",
"<",
"1024",
"else",
"'%.1f KB'",
"%",
"(",
"l",
"/",
"1024.0",
")",
"return",
"data",
"if",
"l",
"<=",
"256",
"else",
"data",
"+",
"' [truncated, %s total]'",
"%",
"size"
] | https://github.com/mysql/mysql-workbench/blob/2f35f9034f015cbcd22139a60e1baa2e3e8e795c/plugins/wb.admin/backend/wb_log_reader.py#L476-L484 |
|
facebookresearch/habitat-sim | 63b6c71d9ca8adaefb140b198196f5d0ca1f1e34 | examples/fairmotion_interface.py | python | FairmotionInterface.load_model | (self) | Loads the model currently set by metadata. | Loads the model currently set by metadata. | [
"Loads",
"the",
"model",
"currently",
"set",
"by",
"metadata",
"."
] | def load_model(self) -> None:
"""
Loads the model currently set by metadata.
"""
# loading text because the setup pauses here during motion load
logger.info("Loading...")
self.hide_model()
self.activity = Activity.MOTION_STAGE
# keeps the model up to date with current data target
data = self.user_metadata
# add an ArticulatedObject to the world with a fixed base
self.model = self.art_obj_mgr.add_articulated_object_from_urdf(
filepath=data["urdf_path"], fixed_base=True
)
assert self.model.is_alive
# change motion_type to KINEMATIC
self.model.motion_type = phy.MotionType.KINEMATIC
self.model.translation = self.translation_offset
self.next_pose(repeat=True)
logger.info("Done Loading.") | [
"def",
"load_model",
"(",
"self",
")",
"->",
"None",
":",
"# loading text because the setup pauses here during motion load",
"logger",
".",
"info",
"(",
"\"Loading...\"",
")",
"self",
".",
"hide_model",
"(",
")",
"self",
".",
"activity",
"=",
"Activity",
".",
"MOTION_STAGE",
"# keeps the model up to date with current data target",
"data",
"=",
"self",
".",
"user_metadata",
"# add an ArticulatedObject to the world with a fixed base",
"self",
".",
"model",
"=",
"self",
".",
"art_obj_mgr",
".",
"add_articulated_object_from_urdf",
"(",
"filepath",
"=",
"data",
"[",
"\"urdf_path\"",
"]",
",",
"fixed_base",
"=",
"True",
")",
"assert",
"self",
".",
"model",
".",
"is_alive",
"# change motion_type to KINEMATIC",
"self",
".",
"model",
".",
"motion_type",
"=",
"phy",
".",
"MotionType",
".",
"KINEMATIC",
"self",
".",
"model",
".",
"translation",
"=",
"self",
".",
"translation_offset",
"self",
".",
"next_pose",
"(",
"repeat",
"=",
"True",
")",
"logger",
".",
"info",
"(",
"\"Done Loading.\"",
")"
] | https://github.com/facebookresearch/habitat-sim/blob/63b6c71d9ca8adaefb140b198196f5d0ca1f1e34/examples/fairmotion_interface.py#L312-L335 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_controls.py | python | PyControl.DoGetVirtualSize | (*args, **kwargs) | return _controls_.PyControl_DoGetVirtualSize(*args, **kwargs) | DoGetVirtualSize(self) -> Size | DoGetVirtualSize(self) -> Size | [
"DoGetVirtualSize",
"(",
"self",
")",
"-",
">",
"Size"
] | def DoGetVirtualSize(*args, **kwargs):
"""DoGetVirtualSize(self) -> Size"""
return _controls_.PyControl_DoGetVirtualSize(*args, **kwargs) | [
"def",
"DoGetVirtualSize",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"PyControl_DoGetVirtualSize",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_controls.py#L5870-L5872 |
|
PixarAnimationStudios/USD | faed18ce62c8736b02413635b584a2f637156bad | pxr/usdImaging/usdviewq/selectionDataModel.py | python | SelectionDataModel._buildPropPath | (self, primPath, propName) | return Sdf.Path(str(primPath) + "." + propName) | Build a new property path from a prim path and a property name. | Build a new property path from a prim path and a property name. | [
"Build",
"a",
"new",
"property",
"path",
"from",
"a",
"prim",
"path",
"and",
"a",
"property",
"name",
"."
] | def _buildPropPath(self, primPath, propName):
"""Build a new property path from a prim path and a property name."""
return Sdf.Path(str(primPath) + "." + propName) | [
"def",
"_buildPropPath",
"(",
"self",
",",
"primPath",
",",
"propName",
")",
":",
"return",
"Sdf",
".",
"Path",
"(",
"str",
"(",
"primPath",
")",
"+",
"\".\"",
"+",
"propName",
")"
] | https://github.com/PixarAnimationStudios/USD/blob/faed18ce62c8736b02413635b584a2f637156bad/pxr/usdImaging/usdviewq/selectionDataModel.py#L521-L524 |
|
mamedev/mame | 02cd26d37ee11191f3e311e19e805d872cb1e3a4 | scripts/build/msgfmt.py | python | generate | () | return output | Return the generated output. | Return the generated output. | [
"Return",
"the",
"generated",
"output",
"."
] | def generate():
"Return the generated output."
global MESSAGES
# the keys are sorted in the .mo file
keys = sorted(MESSAGES.keys())
offsets = []
ids = strs = b''
for id in keys:
# For each string, we need size and file offset. Each string is NUL
# terminated; the NUL does not count into the size.
offsets.append((len(ids), len(id), len(strs), len(MESSAGES[id])))
ids += id + b'\0'
strs += MESSAGES[id] + b'\0'
output = ''
# The header is 7 32-bit unsigned integers. We don't use hash tables, so
# the keys start right after the index tables.
# translated string.
keystart = 7*4+16*len(keys)
# and the values start after the keys
valuestart = keystart + len(ids)
koffsets = []
voffsets = []
# The string table first has the list of keys, then the list of values.
# Each entry has first the size of the string, then the file offset.
for o1, l1, o2, l2 in offsets:
koffsets += [l1, o1+keystart]
voffsets += [l2, o2+valuestart]
offsets = koffsets + voffsets
output = struct.pack("Iiiiiii",
0x950412de, # Magic
0, # Version
len(keys), # # of entries
7*4, # start of key index
7*4+len(keys)*8, # start of value index
0, 0) # size and offset of hash table
output += array.array("i", offsets).tobytes()
output += ids
output += strs
return output | [
"def",
"generate",
"(",
")",
":",
"global",
"MESSAGES",
"# the keys are sorted in the .mo file",
"keys",
"=",
"sorted",
"(",
"MESSAGES",
".",
"keys",
"(",
")",
")",
"offsets",
"=",
"[",
"]",
"ids",
"=",
"strs",
"=",
"b''",
"for",
"id",
"in",
"keys",
":",
"# For each string, we need size and file offset. Each string is NUL",
"# terminated; the NUL does not count into the size.",
"offsets",
".",
"append",
"(",
"(",
"len",
"(",
"ids",
")",
",",
"len",
"(",
"id",
")",
",",
"len",
"(",
"strs",
")",
",",
"len",
"(",
"MESSAGES",
"[",
"id",
"]",
")",
")",
")",
"ids",
"+=",
"id",
"+",
"b'\\0'",
"strs",
"+=",
"MESSAGES",
"[",
"id",
"]",
"+",
"b'\\0'",
"output",
"=",
"''",
"# The header is 7 32-bit unsigned integers. We don't use hash tables, so",
"# the keys start right after the index tables.",
"# translated string.",
"keystart",
"=",
"7",
"*",
"4",
"+",
"16",
"*",
"len",
"(",
"keys",
")",
"# and the values start after the keys",
"valuestart",
"=",
"keystart",
"+",
"len",
"(",
"ids",
")",
"koffsets",
"=",
"[",
"]",
"voffsets",
"=",
"[",
"]",
"# The string table first has the list of keys, then the list of values.",
"# Each entry has first the size of the string, then the file offset.",
"for",
"o1",
",",
"l1",
",",
"o2",
",",
"l2",
"in",
"offsets",
":",
"koffsets",
"+=",
"[",
"l1",
",",
"o1",
"+",
"keystart",
"]",
"voffsets",
"+=",
"[",
"l2",
",",
"o2",
"+",
"valuestart",
"]",
"offsets",
"=",
"koffsets",
"+",
"voffsets",
"output",
"=",
"struct",
".",
"pack",
"(",
"\"Iiiiiii\"",
",",
"0x950412de",
",",
"# Magic",
"0",
",",
"# Version",
"len",
"(",
"keys",
")",
",",
"# # of entries",
"7",
"*",
"4",
",",
"# start of key index",
"7",
"*",
"4",
"+",
"len",
"(",
"keys",
")",
"*",
"8",
",",
"# start of value index",
"0",
",",
"0",
")",
"# size and offset of hash table",
"output",
"+=",
"array",
".",
"array",
"(",
"\"i\"",
",",
"offsets",
")",
".",
"tobytes",
"(",
")",
"output",
"+=",
"ids",
"output",
"+=",
"strs",
"return",
"output"
] | https://github.com/mamedev/mame/blob/02cd26d37ee11191f3e311e19e805d872cb1e3a4/scripts/build/msgfmt.py#L58-L96 |
|
microsoft/checkedc-clang | a173fefde5d7877b7750e7ce96dd08cf18baebf2 | clang/docs/tools/dump_ast_matchers.py | python | extract_result_types | (comment) | Extracts a list of result types from the given comment.
We allow annotations in the comment of the matcher to specify what
nodes a matcher can match on. Those comments have the form:
Usable as: Any Matcher | (Matcher<T1>[, Matcher<t2>[, ...]])
Returns ['*'] in case of 'Any Matcher', or ['T1', 'T2', ...].
Returns the empty list if no 'Usable as' specification could be
parsed. | Extracts a list of result types from the given comment. | [
"Extracts",
"a",
"list",
"of",
"result",
"types",
"from",
"the",
"given",
"comment",
"."
] | def extract_result_types(comment):
"""Extracts a list of result types from the given comment.
We allow annotations in the comment of the matcher to specify what
nodes a matcher can match on. Those comments have the form:
Usable as: Any Matcher | (Matcher<T1>[, Matcher<t2>[, ...]])
Returns ['*'] in case of 'Any Matcher', or ['T1', 'T2', ...].
Returns the empty list if no 'Usable as' specification could be
parsed.
"""
result_types = []
m = re.search(r'Usable as: Any Matcher[\s\n]*$', comment, re.S)
if m:
return ['*']
while True:
m = re.match(r'^(.*)Matcher<([^>]+)>\s*,?[\s\n]*$', comment, re.S)
if not m:
if re.search(r'Usable as:\s*$', comment):
return result_types
else:
return None
result_types += [m.group(2)]
comment = m.group(1) | [
"def",
"extract_result_types",
"(",
"comment",
")",
":",
"result_types",
"=",
"[",
"]",
"m",
"=",
"re",
".",
"search",
"(",
"r'Usable as: Any Matcher[\\s\\n]*$'",
",",
"comment",
",",
"re",
".",
"S",
")",
"if",
"m",
":",
"return",
"[",
"'*'",
"]",
"while",
"True",
":",
"m",
"=",
"re",
".",
"match",
"(",
"r'^(.*)Matcher<([^>]+)>\\s*,?[\\s\\n]*$'",
",",
"comment",
",",
"re",
".",
"S",
")",
"if",
"not",
"m",
":",
"if",
"re",
".",
"search",
"(",
"r'Usable as:\\s*$'",
",",
"comment",
")",
":",
"return",
"result_types",
"else",
":",
"return",
"None",
"result_types",
"+=",
"[",
"m",
".",
"group",
"(",
"2",
")",
"]",
"comment",
"=",
"m",
".",
"group",
"(",
"1",
")"
] | https://github.com/microsoft/checkedc-clang/blob/a173fefde5d7877b7750e7ce96dd08cf18baebf2/clang/docs/tools/dump_ast_matchers.py#L60-L83 |
||
CRYTEK/CRYENGINE | 232227c59a220cbbd311576f0fbeba7bb53b2a8c | Editor/Python/windows/Lib/site-packages/pip/_vendor/distlib/database.py | python | InstalledDistribution.exports | (self) | return result | Return the information exported by this distribution.
:return: A dictionary of exports, mapping an export category to a dict
of :class:`ExportEntry` instances describing the individual
export entries, and keyed by name. | Return the information exported by this distribution.
:return: A dictionary of exports, mapping an export category to a dict
of :class:`ExportEntry` instances describing the individual
export entries, and keyed by name. | [
"Return",
"the",
"information",
"exported",
"by",
"this",
"distribution",
".",
":",
"return",
":",
"A",
"dictionary",
"of",
"exports",
"mapping",
"an",
"export",
"category",
"to",
"a",
"dict",
"of",
":",
"class",
":",
"ExportEntry",
"instances",
"describing",
"the",
"individual",
"export",
"entries",
"and",
"keyed",
"by",
"name",
"."
] | def exports(self):
"""
Return the information exported by this distribution.
:return: A dictionary of exports, mapping an export category to a dict
of :class:`ExportEntry` instances describing the individual
export entries, and keyed by name.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
result = self.read_exports()
return result | [
"def",
"exports",
"(",
"self",
")",
":",
"result",
"=",
"{",
"}",
"r",
"=",
"self",
".",
"get_distinfo_resource",
"(",
"EXPORTS_FILENAME",
")",
"if",
"r",
":",
"result",
"=",
"self",
".",
"read_exports",
"(",
")",
"return",
"result"
] | https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Editor/Python/windows/Lib/site-packages/pip/_vendor/distlib/database.py#L584-L595 |
|
wyrover/book-code | 7f4883d9030d553bc6bcfa3da685e34789839900 | 3rdparty/protobuf/python/google/protobuf/message.py | python | Message.__eq__ | (self, other_msg) | Recursively compares two messages by value and structure. | Recursively compares two messages by value and structure. | [
"Recursively",
"compares",
"two",
"messages",
"by",
"value",
"and",
"structure",
"."
] | def __eq__(self, other_msg):
"""Recursively compares two messages by value and structure."""
raise NotImplementedError | [
"def",
"__eq__",
"(",
"self",
",",
"other_msg",
")",
":",
"raise",
"NotImplementedError"
] | https://github.com/wyrover/book-code/blob/7f4883d9030d553bc6bcfa3da685e34789839900/3rdparty/protobuf/python/google/protobuf/message.py#L74-L76 |
||
1989Ryan/Semantic_SLAM | 0284b3f832ca431c494f9c134fe46c40ec86ee38 | Third_Part/PSPNet_Keras_tensorflow/Semantic_Information_Publisher.py | python | Semantic_Imformation_Publisher.__init__ | (self) | node initialization | node initialization | [
"node",
"initialization"
] | def __init__(self):
'''
node initialization
'''
self._cv_bridge = CvBridge()
#self._session = tf.Session()
self.pspnet = PSPNet101(nb_classes=19, input_shape=(713, 713),
weights='pspnet101_cityscapes')
#init = tf.global_variables_initializer()
#self._session.run(init)
self.graph = tf.get_default_graph()
self._sub = rospy.Subscriber('image', Image, self.callback, queue_size = 1000)
self._pub = rospy.Publisher('/result', frame, queue_size = 1) | [
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"_cv_bridge",
"=",
"CvBridge",
"(",
")",
"#self._session = tf.Session()",
"self",
".",
"pspnet",
"=",
"PSPNet101",
"(",
"nb_classes",
"=",
"19",
",",
"input_shape",
"=",
"(",
"713",
",",
"713",
")",
",",
"weights",
"=",
"'pspnet101_cityscapes'",
")",
"#init = tf.global_variables_initializer()",
"#self._session.run(init)",
"self",
".",
"graph",
"=",
"tf",
".",
"get_default_graph",
"(",
")",
"self",
".",
"_sub",
"=",
"rospy",
".",
"Subscriber",
"(",
"'image'",
",",
"Image",
",",
"self",
".",
"callback",
",",
"queue_size",
"=",
"1000",
")",
"self",
".",
"_pub",
"=",
"rospy",
".",
"Publisher",
"(",
"'/result'",
",",
"frame",
",",
"queue_size",
"=",
"1",
")"
] | https://github.com/1989Ryan/Semantic_SLAM/blob/0284b3f832ca431c494f9c134fe46c40ec86ee38/Third_Part/PSPNet_Keras_tensorflow/Semantic_Information_Publisher.py#L30-L42 |
||
apache/trafodion | 8455c839ad6b6d7b6e04edda5715053095b78046 | install/python-installer/scripts/traf_discover.py | python | Discover.get_cpu_cores | (self) | return self.CPUINFO.count('processor') | get CPU cores | get CPU cores | [
"get",
"CPU",
"cores"
] | def get_cpu_cores(self):
""" get CPU cores """
return self.CPUINFO.count('processor') | [
"def",
"get_cpu_cores",
"(",
"self",
")",
":",
"return",
"self",
".",
"CPUINFO",
".",
"count",
"(",
"'processor'",
")"
] | https://github.com/apache/trafodion/blob/8455c839ad6b6d7b6e04edda5715053095b78046/install/python-installer/scripts/traf_discover.py#L188-L190 |
|
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/fitting_widgets/tf_asymmetry_fitting/tf_asymmetry_mode_switcher_view.py | python | TFAsymmetryModeSwitcherView.__init__ | (self, parent: QWidget = None) | Initializes the TFAsymmetryModeSwitcherView. | Initializes the TFAsymmetryModeSwitcherView. | [
"Initializes",
"the",
"TFAsymmetryModeSwitcherView",
"."
] | def __init__(self, parent: QWidget = None):
"""Initializes the TFAsymmetryModeSwitcherView."""
super(TFAsymmetryModeSwitcherView, self).__init__(parent)
self.setupUi(self) | [
"def",
"__init__",
"(",
"self",
",",
"parent",
":",
"QWidget",
"=",
"None",
")",
":",
"super",
"(",
"TFAsymmetryModeSwitcherView",
",",
"self",
")",
".",
"__init__",
"(",
"parent",
")",
"self",
".",
"setupUi",
"(",
"self",
")"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/fitting_widgets/tf_asymmetry_fitting/tf_asymmetry_mode_switcher_view.py#L22-L25 |
||
francinexue/xuefu | b6ff79747a42e020588c0c0a921048e08fe4680c | ctpx/ctp3/ctptd.py | python | CtpTd.onRspQryInstrumentOrderCommRate | (self, InstrumentOrderCommRateField, RspInfoField, requestId, final) | 请求查询报单手续费响应 | 请求查询报单手续费响应 | [
"请求查询报单手续费响应"
] | def onRspQryInstrumentOrderCommRate(self, InstrumentOrderCommRateField, RspInfoField, requestId, final):
"""请求查询报单手续费响应"""
pass | [
"def",
"onRspQryInstrumentOrderCommRate",
"(",
"self",
",",
"InstrumentOrderCommRateField",
",",
"RspInfoField",
",",
"requestId",
",",
"final",
")",
":",
"pass"
] | https://github.com/francinexue/xuefu/blob/b6ff79747a42e020588c0c0a921048e08fe4680c/ctpx/ctp3/ctptd.py#L306-L308 |
||
bigartm/bigartm | 47e37f982de87aa67bfd475ff1f39da696b181b3 | 3rdparty/protobuf-3.0.0/python/google/protobuf/internal/python_message.py | python | _AddPrivateHelperMethods | (message_descriptor, cls) | Adds implementation of private helper methods to cls. | Adds implementation of private helper methods to cls. | [
"Adds",
"implementation",
"of",
"private",
"helper",
"methods",
"to",
"cls",
"."
] | def _AddPrivateHelperMethods(message_descriptor, cls):
"""Adds implementation of private helper methods to cls."""
def Modified(self):
"""Sets the _cached_byte_size_dirty bit to true,
and propagates this to our listener iff this was a state change.
"""
# Note: Some callers check _cached_byte_size_dirty before calling
# _Modified() as an extra optimization. So, if this method is ever
# changed such that it does stuff even when _cached_byte_size_dirty is
# already true, the callers need to be updated.
if not self._cached_byte_size_dirty:
self._cached_byte_size_dirty = True
self._listener_for_children.dirty = True
self._is_present_in_parent = True
self._listener.Modified()
def _UpdateOneofState(self, field):
"""Sets field as the active field in its containing oneof.
Will also delete currently active field in the oneof, if it is different
from the argument. Does not mark the message as modified.
"""
other_field = self._oneofs.setdefault(field.containing_oneof, field)
if other_field is not field:
del self._fields[other_field]
self._oneofs[field.containing_oneof] = field
cls._Modified = Modified
cls.SetInParent = Modified
cls._UpdateOneofState = _UpdateOneofState | [
"def",
"_AddPrivateHelperMethods",
"(",
"message_descriptor",
",",
"cls",
")",
":",
"def",
"Modified",
"(",
"self",
")",
":",
"\"\"\"Sets the _cached_byte_size_dirty bit to true,\n and propagates this to our listener iff this was a state change.\n \"\"\"",
"# Note: Some callers check _cached_byte_size_dirty before calling",
"# _Modified() as an extra optimization. So, if this method is ever",
"# changed such that it does stuff even when _cached_byte_size_dirty is",
"# already true, the callers need to be updated.",
"if",
"not",
"self",
".",
"_cached_byte_size_dirty",
":",
"self",
".",
"_cached_byte_size_dirty",
"=",
"True",
"self",
".",
"_listener_for_children",
".",
"dirty",
"=",
"True",
"self",
".",
"_is_present_in_parent",
"=",
"True",
"self",
".",
"_listener",
".",
"Modified",
"(",
")",
"def",
"_UpdateOneofState",
"(",
"self",
",",
"field",
")",
":",
"\"\"\"Sets field as the active field in its containing oneof.\n\n Will also delete currently active field in the oneof, if it is different\n from the argument. Does not mark the message as modified.\n \"\"\"",
"other_field",
"=",
"self",
".",
"_oneofs",
".",
"setdefault",
"(",
"field",
".",
"containing_oneof",
",",
"field",
")",
"if",
"other_field",
"is",
"not",
"field",
":",
"del",
"self",
".",
"_fields",
"[",
"other_field",
"]",
"self",
".",
"_oneofs",
"[",
"field",
".",
"containing_oneof",
"]",
"=",
"field",
"cls",
".",
"_Modified",
"=",
"Modified",
"cls",
".",
"SetInParent",
"=",
"Modified",
"cls",
".",
"_UpdateOneofState",
"=",
"_UpdateOneofState"
] | https://github.com/bigartm/bigartm/blob/47e37f982de87aa67bfd475ff1f39da696b181b3/3rdparty/protobuf-3.0.0/python/google/protobuf/internal/python_message.py#L1333-L1364 |
||
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/distributions/python/ops/bijectors/sinh_arcsinh.py | python | SinhArcsinh.tailweight | (self) | return self._tailweight | The `tailweight` in: `Y = Sinh((Arcsinh(X) + skewness) * tailweight)`. | The `tailweight` in: `Y = Sinh((Arcsinh(X) + skewness) * tailweight)`. | [
"The",
"tailweight",
"in",
":",
"Y",
"=",
"Sinh",
"((",
"Arcsinh",
"(",
"X",
")",
"+",
"skewness",
")",
"*",
"tailweight",
")",
"."
] | def tailweight(self):
"""The `tailweight` in: `Y = Sinh((Arcsinh(X) + skewness) * tailweight)`."""
return self._tailweight | [
"def",
"tailweight",
"(",
"self",
")",
":",
"return",
"self",
".",
"_tailweight"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/distributions/python/ops/bijectors/sinh_arcsinh.py#L152-L154 |
|
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/mapreduce/mapreduce/key_ranges.py | python | KeyRangesFactory.create_from_list | (cls, list_of_key_ranges) | return _KeyRangesFromList(list_of_key_ranges) | Create a KeyRanges object.
Args:
list_of_key_ranges: a list of key_range.KeyRange object.
Returns:
A _KeyRanges object. | Create a KeyRanges object. | [
"Create",
"a",
"KeyRanges",
"object",
"."
] | def create_from_list(cls, list_of_key_ranges):
"""Create a KeyRanges object.
Args:
list_of_key_ranges: a list of key_range.KeyRange object.
Returns:
A _KeyRanges object.
"""
return _KeyRangesFromList(list_of_key_ranges) | [
"def",
"create_from_list",
"(",
"cls",
",",
"list_of_key_ranges",
")",
":",
"return",
"_KeyRangesFromList",
"(",
"list_of_key_ranges",
")"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/mapreduce/mapreduce/key_ranges.py#L21-L30 |
|
facebook/openr | ed38bdfd6bf290084bfab4821b59f83e7b59315d | openr/py/openr/cli/clis/decision.py | python | DecisionRibPolicyCli.show | (cli_opts) | Show currently configured RibPolicy | Show currently configured RibPolicy | [
"Show",
"currently",
"configured",
"RibPolicy"
] | def show(cli_opts): # noqa: B902
"""
Show currently configured RibPolicy
"""
decision.DecisionRibPolicyCmd(cli_opts).run() | [
"def",
"show",
"(",
"cli_opts",
")",
":",
"# noqa: B902",
"decision",
".",
"DecisionRibPolicyCmd",
"(",
"cli_opts",
")",
".",
"run",
"(",
")"
] | https://github.com/facebook/openr/blob/ed38bdfd6bf290084bfab4821b59f83e7b59315d/openr/py/openr/cli/clis/decision.py#L157-L162 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_windows.py | python | StandardDialogLayoutAdapter.CreateScrolledWindow | (*args, **kwargs) | return _windows_.StandardDialogLayoutAdapter_CreateScrolledWindow(*args, **kwargs) | CreateScrolledWindow(self, Window parent) -> ScrolledWindow | CreateScrolledWindow(self, Window parent) -> ScrolledWindow | [
"CreateScrolledWindow",
"(",
"self",
"Window",
"parent",
")",
"-",
">",
"ScrolledWindow"
] | def CreateScrolledWindow(*args, **kwargs):
"""CreateScrolledWindow(self, Window parent) -> ScrolledWindow"""
return _windows_.StandardDialogLayoutAdapter_CreateScrolledWindow(*args, **kwargs) | [
"def",
"CreateScrolledWindow",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_windows_",
".",
"StandardDialogLayoutAdapter_CreateScrolledWindow",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_windows.py#L979-L981 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scikit-learn/py2/sklearn/mixture/dpgmm.py | python | _DPGMMBase._update_means | (self, X, z) | Update the variational distributions for the means | Update the variational distributions for the means | [
"Update",
"the",
"variational",
"distributions",
"for",
"the",
"means"
] | def _update_means(self, X, z):
"""Update the variational distributions for the means"""
n_features = X.shape[1]
for k in range(self.n_components):
if self.covariance_type in ['spherical', 'diag']:
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num *= self.precs_[k]
den = 1. + self.precs_[k] * np.sum(z.T[k])
self.means_[k] = num / den
elif self.covariance_type in ['tied', 'full']:
if self.covariance_type == 'tied':
cov = self.precs_
else:
cov = self.precs_[k]
den = np.identity(n_features) + cov * np.sum(z.T[k])
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num = np.dot(cov, num)
self.means_[k] = linalg.lstsq(den, num)[0] | [
"def",
"_update_means",
"(",
"self",
",",
"X",
",",
"z",
")",
":",
"n_features",
"=",
"X",
".",
"shape",
"[",
"1",
"]",
"for",
"k",
"in",
"range",
"(",
"self",
".",
"n_components",
")",
":",
"if",
"self",
".",
"covariance_type",
"in",
"[",
"'spherical'",
",",
"'diag'",
"]",
":",
"num",
"=",
"np",
".",
"sum",
"(",
"z",
".",
"T",
"[",
"k",
"]",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
"*",
"X",
",",
"axis",
"=",
"0",
")",
"num",
"*=",
"self",
".",
"precs_",
"[",
"k",
"]",
"den",
"=",
"1.",
"+",
"self",
".",
"precs_",
"[",
"k",
"]",
"*",
"np",
".",
"sum",
"(",
"z",
".",
"T",
"[",
"k",
"]",
")",
"self",
".",
"means_",
"[",
"k",
"]",
"=",
"num",
"/",
"den",
"elif",
"self",
".",
"covariance_type",
"in",
"[",
"'tied'",
",",
"'full'",
"]",
":",
"if",
"self",
".",
"covariance_type",
"==",
"'tied'",
":",
"cov",
"=",
"self",
".",
"precs_",
"else",
":",
"cov",
"=",
"self",
".",
"precs_",
"[",
"k",
"]",
"den",
"=",
"np",
".",
"identity",
"(",
"n_features",
")",
"+",
"cov",
"*",
"np",
".",
"sum",
"(",
"z",
".",
"T",
"[",
"k",
"]",
")",
"num",
"=",
"np",
".",
"sum",
"(",
"z",
".",
"T",
"[",
"k",
"]",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
"*",
"X",
",",
"axis",
"=",
"0",
")",
"num",
"=",
"np",
".",
"dot",
"(",
"cov",
",",
"num",
")",
"self",
".",
"means_",
"[",
"k",
"]",
"=",
"linalg",
".",
"lstsq",
"(",
"den",
",",
"num",
")",
"[",
"0",
"]"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scikit-learn/py2/sklearn/mixture/dpgmm.py#L307-L324 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python/src/Lib/mailbox.py | python | _singlefileMailbox.remove | (self, key) | Remove the keyed message; raise KeyError if it doesn't exist. | Remove the keyed message; raise KeyError if it doesn't exist. | [
"Remove",
"the",
"keyed",
"message",
";",
"raise",
"KeyError",
"if",
"it",
"doesn",
"t",
"exist",
"."
] | def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
self._lookup(key)
del self._toc[key]
self._pending = True | [
"def",
"remove",
"(",
"self",
",",
"key",
")",
":",
"self",
".",
"_lookup",
"(",
"key",
")",
"del",
"self",
".",
"_toc",
"[",
"key",
"]",
"self",
".",
"_pending",
"=",
"True"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/mailbox.py#L600-L604 |
||
ceph/ceph | 959663007321a369c83218414a29bd9dbc8bda3a | src/ceph-volume/ceph_volume/util/system.py | python | set_context | (path, recursive=False) | Calls ``restorecon`` to set the proper context on SELinux systems. Only if
the ``restorecon`` executable is found anywhere in the path it will get
called.
If the ``CEPH_VOLUME_SKIP_RESTORECON`` environment variable is set to
any of: "1", "true", "yes" the call will be skipped as well.
Finally, if SELinux is not enabled, or not available in the system,
``restorecon`` will not be called. This is checked by calling out to the
``selinuxenabled`` executable. If that tool is not installed or returns
a non-zero exit status then no further action is taken and this function
will return. | Calls ``restorecon`` to set the proper context on SELinux systems. Only if
the ``restorecon`` executable is found anywhere in the path it will get
called. | [
"Calls",
"restorecon",
"to",
"set",
"the",
"proper",
"context",
"on",
"SELinux",
"systems",
".",
"Only",
"if",
"the",
"restorecon",
"executable",
"is",
"found",
"anywhere",
"in",
"the",
"path",
"it",
"will",
"get",
"called",
"."
] | def set_context(path, recursive=False):
"""
Calls ``restorecon`` to set the proper context on SELinux systems. Only if
the ``restorecon`` executable is found anywhere in the path it will get
called.
If the ``CEPH_VOLUME_SKIP_RESTORECON`` environment variable is set to
any of: "1", "true", "yes" the call will be skipped as well.
Finally, if SELinux is not enabled, or not available in the system,
``restorecon`` will not be called. This is checked by calling out to the
``selinuxenabled`` executable. If that tool is not installed or returns
a non-zero exit status then no further action is taken and this function
will return.
"""
skip = os.environ.get('CEPH_VOLUME_SKIP_RESTORECON', '')
if skip.lower() in ['1', 'true', 'yes']:
logger.info(
'CEPH_VOLUME_SKIP_RESTORECON environ is set, will not call restorecon'
)
return
try:
stdout, stderr, code = process.call(['selinuxenabled'],
verbose_on_failure=False)
except FileNotFoundError:
logger.info('No SELinux found, skipping call to restorecon')
return
if code != 0:
logger.info('SELinux is not enabled, will not call restorecon')
return
# restore selinux context to default policy values
if which('restorecon').startswith('/'):
if recursive:
process.run(['restorecon', '-R', path])
else:
process.run(['restorecon', path]) | [
"def",
"set_context",
"(",
"path",
",",
"recursive",
"=",
"False",
")",
":",
"skip",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'CEPH_VOLUME_SKIP_RESTORECON'",
",",
"''",
")",
"if",
"skip",
".",
"lower",
"(",
")",
"in",
"[",
"'1'",
",",
"'true'",
",",
"'yes'",
"]",
":",
"logger",
".",
"info",
"(",
"'CEPH_VOLUME_SKIP_RESTORECON environ is set, will not call restorecon'",
")",
"return",
"try",
":",
"stdout",
",",
"stderr",
",",
"code",
"=",
"process",
".",
"call",
"(",
"[",
"'selinuxenabled'",
"]",
",",
"verbose_on_failure",
"=",
"False",
")",
"except",
"FileNotFoundError",
":",
"logger",
".",
"info",
"(",
"'No SELinux found, skipping call to restorecon'",
")",
"return",
"if",
"code",
"!=",
"0",
":",
"logger",
".",
"info",
"(",
"'SELinux is not enabled, will not call restorecon'",
")",
"return",
"# restore selinux context to default policy values",
"if",
"which",
"(",
"'restorecon'",
")",
".",
"startswith",
"(",
"'/'",
")",
":",
"if",
"recursive",
":",
"process",
".",
"run",
"(",
"[",
"'restorecon'",
",",
"'-R'",
",",
"path",
"]",
")",
"else",
":",
"process",
".",
"run",
"(",
"[",
"'restorecon'",
",",
"path",
"]",
")"
] | https://github.com/ceph/ceph/blob/959663007321a369c83218414a29bd9dbc8bda3a/src/ceph-volume/ceph_volume/util/system.py#L342-L380 |
||
apache/incubator-mxnet | f03fb23f1d103fec9541b5ae59ee06b1734a51d9 | python/mxnet/numpy/multiarray.py | python | trunc | (x, out=None, **kwargs) | return _mx_nd_np.trunc(x, out=out, **kwargs) | r"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : ndarray or scalar
Input data.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
y : ndarray or scalar
The truncated value of each element in `x`.
This is a scalar if `x` is a scalar.
.. note::
This function differs from the original numpy.trunc in the following aspects:
* Do not support `where`, a parameter in numpy which indicates where to calculate.
* Cannot cast type automatically. Dtype of `out` must be same as the expected one.
* Cannot broadcast automatically. Shape of `out` must be same as the expected one.
* If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.]) | r"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded. | [
"r",
"Return",
"the",
"truncated",
"value",
"of",
"the",
"input",
"element",
"-",
"wise",
".",
"The",
"truncated",
"value",
"of",
"the",
"scalar",
"x",
"is",
"the",
"nearest",
"integer",
"i",
"which",
"is",
"closer",
"to",
"zero",
"than",
"x",
"is",
".",
"In",
"short",
"the",
"fractional",
"part",
"of",
"the",
"signed",
"number",
"x",
"is",
"discarded",
"."
] | def trunc(x, out=None, **kwargs):
r"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : ndarray or scalar
Input data.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
y : ndarray or scalar
The truncated value of each element in `x`.
This is a scalar if `x` is a scalar.
.. note::
This function differs from the original numpy.trunc in the following aspects:
* Do not support `where`, a parameter in numpy which indicates where to calculate.
* Cannot cast type automatically. Dtype of `out` must be same as the expected one.
* Cannot broadcast automatically. Shape of `out` must be same as the expected one.
* If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
"""
return _mx_nd_np.trunc(x, out=out, **kwargs) | [
"def",
"trunc",
"(",
"x",
",",
"out",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_mx_nd_np",
".",
"trunc",
"(",
"x",
",",
"out",
"=",
"out",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/python/mxnet/numpy/multiarray.py#L5609-L5643 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/_controls.py | python | ListCtrl.GetClassDefaultAttributes | (*args, **kwargs) | return _controls_.ListCtrl_GetClassDefaultAttributes(*args, **kwargs) | GetClassDefaultAttributes(int variant=WINDOW_VARIANT_NORMAL) -> VisualAttributes
Get the default attributes for this class. This is useful if you want
to use the same font or colour in your own control as in a standard
control -- which is a much better idea than hard coding specific
colours or fonts which might look completely out of place on the
user's system, especially if it uses themes.
The variant parameter is only relevant under Mac currently and is
ignore under other platforms. Under Mac, it will change the size of
the returned font. See `wx.Window.SetWindowVariant` for more about
this. | GetClassDefaultAttributes(int variant=WINDOW_VARIANT_NORMAL) -> VisualAttributes | [
"GetClassDefaultAttributes",
"(",
"int",
"variant",
"=",
"WINDOW_VARIANT_NORMAL",
")",
"-",
">",
"VisualAttributes"
] | def GetClassDefaultAttributes(*args, **kwargs):
"""
GetClassDefaultAttributes(int variant=WINDOW_VARIANT_NORMAL) -> VisualAttributes
Get the default attributes for this class. This is useful if you want
to use the same font or colour in your own control as in a standard
control -- which is a much better idea than hard coding specific
colours or fonts which might look completely out of place on the
user's system, especially if it uses themes.
The variant parameter is only relevant under Mac currently and is
ignore under other platforms. Under Mac, it will change the size of
the returned font. See `wx.Window.SetWindowVariant` for more about
this.
"""
return _controls_.ListCtrl_GetClassDefaultAttributes(*args, **kwargs) | [
"def",
"GetClassDefaultAttributes",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"ListCtrl_GetClassDefaultAttributes",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_controls.py#L4834-L4849 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/py3/scipy/stats/stats.py | python | ttest_ind_from_stats | (mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True) | return Ttest_indResult(*res) | T-test for means of two independent samples from descriptive statistics.
This is a two-sided test for the null hypothesis that two independent
samples have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
Returns
-------
statistic : float or array
The calculated t-statistics
pvalue : float or array
The two-tailed p-value.
See Also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test
Examples
--------
Suppose we have the summary data for two samples, as follows::
Sample Sample
Size Mean Variance
Sample 1 13 15.0 87.5
Sample 2 11 12.0 39.0
Apply the t-test to this data (with the assumption that the population
variances are equal):
>>> from scipy.stats import ttest_ind_from_stats
>>> ttest_ind_from_stats(mean1=15.0, std1=np.sqrt(87.5), nobs1=13,
... mean2=12.0, std2=np.sqrt(39.0), nobs2=11)
Ttest_indResult(statistic=0.9051358093310269, pvalue=0.3751996797581487)
For comparison, here is the data from which those summary statistics
were taken. With this data, we can compute the same result using
`scipy.stats.ttest_ind`:
>>> a = np.array([1, 3, 4, 6, 11, 13, 15, 19, 22, 24, 25, 26, 26])
>>> b = np.array([2, 4, 6, 9, 11, 13, 14, 15, 18, 19, 21])
>>> from scipy.stats import ttest_ind
>>> ttest_ind(a, b)
Ttest_indResult(statistic=0.905135809331027, pvalue=0.3751996797581486) | T-test for means of two independent samples from descriptive statistics. | [
"T",
"-",
"test",
"for",
"means",
"of",
"two",
"independent",
"samples",
"from",
"descriptive",
"statistics",
"."
] | def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True):
"""
T-test for means of two independent samples from descriptive statistics.
This is a two-sided test for the null hypothesis that two independent
samples have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
Returns
-------
statistic : float or array
The calculated t-statistics
pvalue : float or array
The two-tailed p-value.
See Also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test
Examples
--------
Suppose we have the summary data for two samples, as follows::
Sample Sample
Size Mean Variance
Sample 1 13 15.0 87.5
Sample 2 11 12.0 39.0
Apply the t-test to this data (with the assumption that the population
variances are equal):
>>> from scipy.stats import ttest_ind_from_stats
>>> ttest_ind_from_stats(mean1=15.0, std1=np.sqrt(87.5), nobs1=13,
... mean2=12.0, std2=np.sqrt(39.0), nobs2=11)
Ttest_indResult(statistic=0.9051358093310269, pvalue=0.3751996797581487)
For comparison, here is the data from which those summary statistics
were taken. With this data, we can compute the same result using
`scipy.stats.ttest_ind`:
>>> a = np.array([1, 3, 4, 6, 11, 13, 15, 19, 22, 24, 25, 26, 26])
>>> b = np.array([2, 4, 6, 9, 11, 13, 14, 15, 18, 19, 21])
>>> from scipy.stats import ttest_ind
>>> ttest_ind(a, b)
Ttest_indResult(statistic=0.905135809331027, pvalue=0.3751996797581486)
"""
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
res = _ttest_ind_from_stats(mean1, mean2, denom, df)
return Ttest_indResult(*res) | [
"def",
"ttest_ind_from_stats",
"(",
"mean1",
",",
"std1",
",",
"nobs1",
",",
"mean2",
",",
"std2",
",",
"nobs2",
",",
"equal_var",
"=",
"True",
")",
":",
"if",
"equal_var",
":",
"df",
",",
"denom",
"=",
"_equal_var_ttest_denom",
"(",
"std1",
"**",
"2",
",",
"nobs1",
",",
"std2",
"**",
"2",
",",
"nobs2",
")",
"else",
":",
"df",
",",
"denom",
"=",
"_unequal_var_ttest_denom",
"(",
"std1",
"**",
"2",
",",
"nobs1",
",",
"std2",
"**",
"2",
",",
"nobs2",
")",
"res",
"=",
"_ttest_ind_from_stats",
"(",
"mean1",
",",
"mean2",
",",
"denom",
",",
"df",
")",
"return",
"Ttest_indResult",
"(",
"*",
"res",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py3/scipy/stats/stats.py#L4004-L4089 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/stc.py | python | StyledTextCtrl.GetCaretLineBackAlpha | (*args, **kwargs) | return _stc.StyledTextCtrl_GetCaretLineBackAlpha(*args, **kwargs) | GetCaretLineBackAlpha(self) -> int
Get the background alpha of the caret line. | GetCaretLineBackAlpha(self) -> int | [
"GetCaretLineBackAlpha",
"(",
"self",
")",
"-",
">",
"int"
] | def GetCaretLineBackAlpha(*args, **kwargs):
"""
GetCaretLineBackAlpha(self) -> int
Get the background alpha of the caret line.
"""
return _stc.StyledTextCtrl_GetCaretLineBackAlpha(*args, **kwargs) | [
"def",
"GetCaretLineBackAlpha",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_stc",
".",
"StyledTextCtrl_GetCaretLineBackAlpha",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/stc.py#L5623-L5629 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/AWSPythonSDK/1.5.8/dateutil/tz/tz.py | python | datetime_ambiguous | (dt, tz=None) | return not (same_offset and same_dst) | Given a datetime and a time zone, determine whether or not a given datetime
is ambiguous (i.e if there are two times differentiated only by their DST
status).
:param dt:
A :class:`datetime.datetime` (whose time zone will be ignored if ``tz``
is provided.)
:param tz:
A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If
``None`` or not provided, the datetime's own time zone will be used.
:return:
Returns a boolean value whether or not the "wall time" is ambiguous in
``tz``.
.. versionadded:: 2.6.0 | Given a datetime and a time zone, determine whether or not a given datetime
is ambiguous (i.e if there are two times differentiated only by their DST
status). | [
"Given",
"a",
"datetime",
"and",
"a",
"time",
"zone",
"determine",
"whether",
"or",
"not",
"a",
"given",
"datetime",
"is",
"ambiguous",
"(",
"i",
".",
"e",
"if",
"there",
"are",
"two",
"times",
"differentiated",
"only",
"by",
"their",
"DST",
"status",
")",
"."
] | def datetime_ambiguous(dt, tz=None):
"""
Given a datetime and a time zone, determine whether or not a given datetime
is ambiguous (i.e if there are two times differentiated only by their DST
status).
:param dt:
A :class:`datetime.datetime` (whose time zone will be ignored if ``tz``
is provided.)
:param tz:
A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If
``None`` or not provided, the datetime's own time zone will be used.
:return:
Returns a boolean value whether or not the "wall time" is ambiguous in
``tz``.
.. versionadded:: 2.6.0
"""
if tz is None:
if dt.tzinfo is None:
raise ValueError('Datetime is naive and no time zone provided.')
tz = dt.tzinfo
# If a time zone defines its own "is_ambiguous" function, we'll use that.
is_ambiguous_fn = getattr(tz, 'is_ambiguous', None)
if is_ambiguous_fn is not None:
try:
return tz.is_ambiguous(dt)
except:
pass
# If it doesn't come out and tell us it's ambiguous, we'll just check if
# the fold attribute has any effect on this particular date and time.
dt = dt.replace(tzinfo=tz)
wall_0 = enfold(dt, fold=0)
wall_1 = enfold(dt, fold=1)
same_offset = wall_0.utcoffset() == wall_1.utcoffset()
same_dst = wall_0.dst() == wall_1.dst()
return not (same_offset and same_dst) | [
"def",
"datetime_ambiguous",
"(",
"dt",
",",
"tz",
"=",
"None",
")",
":",
"if",
"tz",
"is",
"None",
":",
"if",
"dt",
".",
"tzinfo",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Datetime is naive and no time zone provided.'",
")",
"tz",
"=",
"dt",
".",
"tzinfo",
"# If a time zone defines its own \"is_ambiguous\" function, we'll use that.",
"is_ambiguous_fn",
"=",
"getattr",
"(",
"tz",
",",
"'is_ambiguous'",
",",
"None",
")",
"if",
"is_ambiguous_fn",
"is",
"not",
"None",
":",
"try",
":",
"return",
"tz",
".",
"is_ambiguous",
"(",
"dt",
")",
"except",
":",
"pass",
"# If it doesn't come out and tell us it's ambiguous, we'll just check if",
"# the fold attribute has any effect on this particular date and time.",
"dt",
"=",
"dt",
".",
"replace",
"(",
"tzinfo",
"=",
"tz",
")",
"wall_0",
"=",
"enfold",
"(",
"dt",
",",
"fold",
"=",
"0",
")",
"wall_1",
"=",
"enfold",
"(",
"dt",
",",
"fold",
"=",
"1",
")",
"same_offset",
"=",
"wall_0",
".",
"utcoffset",
"(",
")",
"==",
"wall_1",
".",
"utcoffset",
"(",
")",
"same_dst",
"=",
"wall_0",
".",
"dst",
"(",
")",
"==",
"wall_1",
".",
"dst",
"(",
")",
"return",
"not",
"(",
"same_offset",
"and",
"same_dst",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/AWSPythonSDK/1.5.8/dateutil/tz/tz.py#L1443-L1486 |
|
hpi-xnor/BMXNet-v2 | af2b1859eafc5c721b1397cef02f946aaf2ce20d | benchmark/opperf/nd_operations/binary_operators.py | python | run_mx_binary_element_wise_operators_benchmarks | (ctx=mx.cpu(), dtype='float32', warmup=10, runs=50) | return mx_binary_op_results | Runs benchmarks with the given context and precision (dtype)for all the binary
element_wise operators in MXNet.
Parameters
----------
ctx: mx.ctx
Context to run benchmarks
dtype: str, default 'float32'
Precision to use for benchmarks
warmup: int, default 10
Number of times to run for warmup
runs: int, default 50
Number of runs to capture benchmark results
Returns
-------
Dictionary of results. Key -> Name of the operator, Value -> Benchmark results. | Runs benchmarks with the given context and precision (dtype)for all the binary
element_wise operators in MXNet. | [
"Runs",
"benchmarks",
"with",
"the",
"given",
"context",
"and",
"precision",
"(",
"dtype",
")",
"for",
"all",
"the",
"binary",
"element_wise",
"operators",
"in",
"MXNet",
"."
] | def run_mx_binary_element_wise_operators_benchmarks(ctx=mx.cpu(), dtype='float32', warmup=10, runs=50):
"""Runs benchmarks with the given context and precision (dtype)for all the binary
element_wise operators in MXNet.
Parameters
----------
ctx: mx.ctx
Context to run benchmarks
dtype: str, default 'float32'
Precision to use for benchmarks
warmup: int, default 10
Number of times to run for warmup
runs: int, default 50
Number of runs to capture benchmark results
Returns
-------
Dictionary of results. Key -> Name of the operator, Value -> Benchmark results.
"""
# Fetch all Binary Element_wise Operators
mx_binary_element_wise_ops = get_all_elemen_wise_binary_operators()
# Run benchmarks
mx_binary_op_results = run_op_benchmarks(mx_binary_element_wise_ops, dtype, ctx, warmup, runs)
return mx_binary_op_results | [
"def",
"run_mx_binary_element_wise_operators_benchmarks",
"(",
"ctx",
"=",
"mx",
".",
"cpu",
"(",
")",
",",
"dtype",
"=",
"'float32'",
",",
"warmup",
"=",
"10",
",",
"runs",
"=",
"50",
")",
":",
"# Fetch all Binary Element_wise Operators",
"mx_binary_element_wise_ops",
"=",
"get_all_elemen_wise_binary_operators",
"(",
")",
"# Run benchmarks",
"mx_binary_op_results",
"=",
"run_op_benchmarks",
"(",
"mx_binary_element_wise_ops",
",",
"dtype",
",",
"ctx",
",",
"warmup",
",",
"runs",
")",
"return",
"mx_binary_op_results"
] | https://github.com/hpi-xnor/BMXNet-v2/blob/af2b1859eafc5c721b1397cef02f946aaf2ce20d/benchmark/opperf/nd_operations/binary_operators.py#L68-L92 |
|
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/python/summary/summary.py | python | image | (name, tensor, max_outputs=3, collections=None, family=None) | return val | Outputs a `Summary` protocol buffer with images.
The summary has up to `max_outputs` summary values containing images. The
images are built from `tensor` which must be 4-D with shape `[batch_size,
height, width, channels]` and where `channels` can be:
* 1: `tensor` is interpreted as Grayscale.
* 3: `tensor` is interpreted as RGB.
* 4: `tensor` is interpreted as RGBA.
The images have the same number of channels as the input tensor. For float
input, the values are normalized one image at a time to fit in the range
`[0, 255]`. `uint8` values are unchanged. The op uses two different
normalization algorithms:
* If the input values are all positive, they are rescaled so the largest one
is 255.
* If any input value is negative, the values are shifted so input value 0.0
is at 127. They are then rescaled so that either the smallest value is 0,
or the largest one is 255.
The `tag` in the outputted Summary.Value protobufs is generated based on the
name, with a suffix depending on the max_outputs setting:
* If `max_outputs` is 1, the summary value tag is '*name*/image'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*name*/image/0', '*name*/image/1', etc.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height,
width, channels]` where `channels` is 1, 3, or 4.
max_outputs: Max number of batch elements to generate images for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [_ops.GraphKeys.SUMMARIES]
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer. | Outputs a `Summary` protocol buffer with images. | [
"Outputs",
"a",
"Summary",
"protocol",
"buffer",
"with",
"images",
"."
] | def image(name, tensor, max_outputs=3, collections=None, family=None):
"""Outputs a `Summary` protocol buffer with images.
The summary has up to `max_outputs` summary values containing images. The
images are built from `tensor` which must be 4-D with shape `[batch_size,
height, width, channels]` and where `channels` can be:
* 1: `tensor` is interpreted as Grayscale.
* 3: `tensor` is interpreted as RGB.
* 4: `tensor` is interpreted as RGBA.
The images have the same number of channels as the input tensor. For float
input, the values are normalized one image at a time to fit in the range
`[0, 255]`. `uint8` values are unchanged. The op uses two different
normalization algorithms:
* If the input values are all positive, they are rescaled so the largest one
is 255.
* If any input value is negative, the values are shifted so input value 0.0
is at 127. They are then rescaled so that either the smallest value is 0,
or the largest one is 255.
The `tag` in the outputted Summary.Value protobufs is generated based on the
name, with a suffix depending on the max_outputs setting:
* If `max_outputs` is 1, the summary value tag is '*name*/image'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*name*/image/0', '*name*/image/1', etc.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height,
width, channels]` where `channels` is 1, 3, or 4.
max_outputs: Max number of batch elements to generate images for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [_ops.GraphKeys.SUMMARIES]
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with _summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
# pylint: disable=protected-access
val = _gen_logging_ops._image_summary(
tag=tag, tensor=tensor, max_images=max_outputs, name=scope)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val | [
"def",
"image",
"(",
"name",
",",
"tensor",
",",
"max_outputs",
"=",
"3",
",",
"collections",
"=",
"None",
",",
"family",
"=",
"None",
")",
":",
"with",
"_summary_op_util",
".",
"summary_scope",
"(",
"name",
",",
"family",
",",
"values",
"=",
"[",
"tensor",
"]",
")",
"as",
"(",
"tag",
",",
"scope",
")",
":",
"# pylint: disable=protected-access",
"val",
"=",
"_gen_logging_ops",
".",
"_image_summary",
"(",
"tag",
"=",
"tag",
",",
"tensor",
"=",
"tensor",
",",
"max_images",
"=",
"max_outputs",
",",
"name",
"=",
"scope",
")",
"_summary_op_util",
".",
"collect",
"(",
"val",
",",
"collections",
",",
"[",
"_ops",
".",
"GraphKeys",
".",
"SUMMARIES",
"]",
")",
"return",
"val"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/summary/summary.py#L104-L155 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/asyncio/runners.py | python | run | (main, *, debug=False) | Execute the coroutine and return the result.
This function runs the passed coroutine, taking care of
managing the asyncio event loop and finalizing asynchronous
generators.
This function cannot be called when another asyncio event loop is
running in the same thread.
If debug is True, the event loop will be run in debug mode.
This function always creates a new event loop and closes it at the end.
It should be used as a main entry point for asyncio programs, and should
ideally only be called once.
Example:
async def main():
await asyncio.sleep(1)
print('hello')
asyncio.run(main()) | Execute the coroutine and return the result. | [
"Execute",
"the",
"coroutine",
"and",
"return",
"the",
"result",
"."
] | def run(main, *, debug=False):
"""Execute the coroutine and return the result.
This function runs the passed coroutine, taking care of
managing the asyncio event loop and finalizing asynchronous
generators.
This function cannot be called when another asyncio event loop is
running in the same thread.
If debug is True, the event loop will be run in debug mode.
This function always creates a new event loop and closes it at the end.
It should be used as a main entry point for asyncio programs, and should
ideally only be called once.
Example:
async def main():
await asyncio.sleep(1)
print('hello')
asyncio.run(main())
"""
if events._get_running_loop() is not None:
raise RuntimeError(
"asyncio.run() cannot be called from a running event loop")
if not coroutines.iscoroutine(main):
raise ValueError("a coroutine was expected, got {!r}".format(main))
loop = events.new_event_loop()
try:
events.set_event_loop(loop)
loop.set_debug(debug)
return loop.run_until_complete(main)
finally:
try:
_cancel_all_tasks(loop)
loop.run_until_complete(loop.shutdown_asyncgens())
finally:
events.set_event_loop(None)
loop.close() | [
"def",
"run",
"(",
"main",
",",
"*",
",",
"debug",
"=",
"False",
")",
":",
"if",
"events",
".",
"_get_running_loop",
"(",
")",
"is",
"not",
"None",
":",
"raise",
"RuntimeError",
"(",
"\"asyncio.run() cannot be called from a running event loop\"",
")",
"if",
"not",
"coroutines",
".",
"iscoroutine",
"(",
"main",
")",
":",
"raise",
"ValueError",
"(",
"\"a coroutine was expected, got {!r}\"",
".",
"format",
"(",
"main",
")",
")",
"loop",
"=",
"events",
".",
"new_event_loop",
"(",
")",
"try",
":",
"events",
".",
"set_event_loop",
"(",
"loop",
")",
"loop",
".",
"set_debug",
"(",
"debug",
")",
"return",
"loop",
".",
"run_until_complete",
"(",
"main",
")",
"finally",
":",
"try",
":",
"_cancel_all_tasks",
"(",
"loop",
")",
"loop",
".",
"run_until_complete",
"(",
"loop",
".",
"shutdown_asyncgens",
"(",
")",
")",
"finally",
":",
"events",
".",
"set_event_loop",
"(",
"None",
")",
"loop",
".",
"close",
"(",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/asyncio/runners.py#L8-L50 |
||
lammps/lammps | b75c3065430a75b1b5543a10e10f46d9b4c91913 | tools/i-pi/ipi/utils/inputvalue.py | python | InputValue.write | (self, name="", indent="") | return Input.write(self, name=name, indent=indent, text=write_type(self.type, self.value)) | Writes data in xml file format.
Writes the data in the appropriate format between appropriate tags.
Args:
name: An optional string giving the tag name. Defaults to "".
indent: An optional string giving the string to be added to the start
of the line, so usually a number of tabs. Defaults to "".
Returns:
A string giving the stored value in the appropriate xml format. | Writes data in xml file format. | [
"Writes",
"data",
"in",
"xml",
"file",
"format",
"."
] | def write(self, name="", indent=""):
"""Writes data in xml file format.
Writes the data in the appropriate format between appropriate tags.
Args:
name: An optional string giving the tag name. Defaults to "".
indent: An optional string giving the string to be added to the start
of the line, so usually a number of tabs. Defaults to "".
Returns:
A string giving the stored value in the appropriate xml format.
"""
return Input.write(self, name=name, indent=indent, text=write_type(self.type, self.value)) | [
"def",
"write",
"(",
"self",
",",
"name",
"=",
"\"\"",
",",
"indent",
"=",
"\"\"",
")",
":",
"return",
"Input",
".",
"write",
"(",
"self",
",",
"name",
"=",
"name",
",",
"indent",
"=",
"indent",
",",
"text",
"=",
"write_type",
"(",
"self",
".",
"type",
",",
"self",
".",
"value",
")",
")"
] | https://github.com/lammps/lammps/blob/b75c3065430a75b1b5543a10e10f46d9b4c91913/tools/i-pi/ipi/utils/inputvalue.py#L827-L841 |
|
lammps/lammps | b75c3065430a75b1b5543a10e10f46d9b4c91913 | tools/i-pi/ipi/inputs/initializer.py | python | InputInitCell.fetch | (self) | return self._initclass(value=ibase.value, mode=mode, units=self.units.fetch()) | Creates a cell initializer object.
Note that the cell can be initialized from the lengths of the sides and
the angles between them instead of by a vector, as specified by the
'abc' or 'abcABC' modes. | Creates a cell initializer object. | [
"Creates",
"a",
"cell",
"initializer",
"object",
"."
] | def fetch(self):
"""Creates a cell initializer object.
Note that the cell can be initialized from the lengths of the sides and
the angles between them instead of by a vector, as specified by the
'abc' or 'abcABC' modes.
"""
mode = self.mode.fetch()
ibase = super(InputInitCell,self).fetch()
if mode == "abc" or mode == "abcABC":
h = io_xml.read_array(np.float, ibase.value)
if mode == "abc":
if h.size != 3:
raise ValueError("If you are initializing cell from cell side lengths you must pass the 'cell' tag an array of 3 floats.")
else:
h = mt.abc2h(h[0], h[1], h[2], np.pi/2, np.pi/2, np.pi/2)
elif mode == "abcABC":
if h.size != 6:
raise ValueError("If you are initializing cell from cell side lengths and angles you must pass the 'cell' tag an array of 6 floats.")
else:
h = mt.abc2h(h[0], h[1], h[2], h[3]*np.pi/180.0, h[4]*np.pi/180.0, h[5]*np.pi/180.0)
h.shape = (9,)
ibase.value = h
mode = "manual"
if mode == "manual":
h = ibase.value
if h.size != 9:
raise ValueError("Cell objects must contain a 3x3 matrix describing the cell vectors.")
if not (h[3] == 0.0 and h[6] == 0.0 and h[7] == 0.0):
warning("Cell vector matrix must be upper triangular, all elements below the diagonal being set to zero.", verbosity.low)
h[3] = h[6] = h[7] = 0
ibase.value = h
return self._initclass(value=ibase.value, mode=mode, units=self.units.fetch()) | [
"def",
"fetch",
"(",
"self",
")",
":",
"mode",
"=",
"self",
".",
"mode",
".",
"fetch",
"(",
")",
"ibase",
"=",
"super",
"(",
"InputInitCell",
",",
"self",
")",
".",
"fetch",
"(",
")",
"if",
"mode",
"==",
"\"abc\"",
"or",
"mode",
"==",
"\"abcABC\"",
":",
"h",
"=",
"io_xml",
".",
"read_array",
"(",
"np",
".",
"float",
",",
"ibase",
".",
"value",
")",
"if",
"mode",
"==",
"\"abc\"",
":",
"if",
"h",
".",
"size",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"\"If you are initializing cell from cell side lengths you must pass the 'cell' tag an array of 3 floats.\"",
")",
"else",
":",
"h",
"=",
"mt",
".",
"abc2h",
"(",
"h",
"[",
"0",
"]",
",",
"h",
"[",
"1",
"]",
",",
"h",
"[",
"2",
"]",
",",
"np",
".",
"pi",
"/",
"2",
",",
"np",
".",
"pi",
"/",
"2",
",",
"np",
".",
"pi",
"/",
"2",
")",
"elif",
"mode",
"==",
"\"abcABC\"",
":",
"if",
"h",
".",
"size",
"!=",
"6",
":",
"raise",
"ValueError",
"(",
"\"If you are initializing cell from cell side lengths and angles you must pass the 'cell' tag an array of 6 floats.\"",
")",
"else",
":",
"h",
"=",
"mt",
".",
"abc2h",
"(",
"h",
"[",
"0",
"]",
",",
"h",
"[",
"1",
"]",
",",
"h",
"[",
"2",
"]",
",",
"h",
"[",
"3",
"]",
"*",
"np",
".",
"pi",
"/",
"180.0",
",",
"h",
"[",
"4",
"]",
"*",
"np",
".",
"pi",
"/",
"180.0",
",",
"h",
"[",
"5",
"]",
"*",
"np",
".",
"pi",
"/",
"180.0",
")",
"h",
".",
"shape",
"=",
"(",
"9",
",",
")",
"ibase",
".",
"value",
"=",
"h",
"mode",
"=",
"\"manual\"",
"if",
"mode",
"==",
"\"manual\"",
":",
"h",
"=",
"ibase",
".",
"value",
"if",
"h",
".",
"size",
"!=",
"9",
":",
"raise",
"ValueError",
"(",
"\"Cell objects must contain a 3x3 matrix describing the cell vectors.\"",
")",
"if",
"not",
"(",
"h",
"[",
"3",
"]",
"==",
"0.0",
"and",
"h",
"[",
"6",
"]",
"==",
"0.0",
"and",
"h",
"[",
"7",
"]",
"==",
"0.0",
")",
":",
"warning",
"(",
"\"Cell vector matrix must be upper triangular, all elements below the diagonal being set to zero.\"",
",",
"verbosity",
".",
"low",
")",
"h",
"[",
"3",
"]",
"=",
"h",
"[",
"6",
"]",
"=",
"h",
"[",
"7",
"]",
"=",
"0",
"ibase",
".",
"value",
"=",
"h",
"return",
"self",
".",
"_initclass",
"(",
"value",
"=",
"ibase",
".",
"value",
",",
"mode",
"=",
"mode",
",",
"units",
"=",
"self",
".",
"units",
".",
"fetch",
"(",
")",
")"
] | https://github.com/lammps/lammps/blob/b75c3065430a75b1b5543a10e10f46d9b4c91913/tools/i-pi/ipi/inputs/initializer.py#L250-L290 |
|
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/telemetry/third_party/altgraph/altgraph/Dot.py | python | Dot.style | (self, **attr) | Changes the overall style | Changes the overall style | [
"Changes",
"the",
"overall",
"style"
] | def style(self, **attr):
'''
Changes the overall style
'''
self.attr = attr | [
"def",
"style",
"(",
"self",
",",
"*",
"*",
"attr",
")",
":",
"self",
".",
"attr",
"=",
"attr"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/telemetry/third_party/altgraph/altgraph/Dot.py#L170-L174 |
||
pmq20/node-packer | 12c46c6e44fbc14d9ee645ebd17d5296b324f7e0 | lts/tools/gyp/tools/graphviz.py | python | LoadEdges | (filename, targets) | return target_edges | Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents. | Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents. | [
"Load",
"the",
"edges",
"map",
"from",
"the",
"dump",
"file",
"and",
"filter",
"it",
"to",
"only",
"show",
"targets",
"in",
"|targets|",
"and",
"their",
"depedendents",
"."
] | def LoadEdges(filename, targets):
"""Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents."""
file = open('dump.json')
edges = json.load(file)
file.close()
# Copy out only the edges we're interested in from the full edge list.
target_edges = {}
to_visit = targets[:]
while to_visit:
src = to_visit.pop()
if src in target_edges:
continue
target_edges[src] = edges[src]
to_visit.extend(edges[src])
return target_edges | [
"def",
"LoadEdges",
"(",
"filename",
",",
"targets",
")",
":",
"file",
"=",
"open",
"(",
"'dump.json'",
")",
"edges",
"=",
"json",
".",
"load",
"(",
"file",
")",
"file",
".",
"close",
"(",
")",
"# Copy out only the edges we're interested in from the full edge list.",
"target_edges",
"=",
"{",
"}",
"to_visit",
"=",
"targets",
"[",
":",
"]",
"while",
"to_visit",
":",
"src",
"=",
"to_visit",
".",
"pop",
"(",
")",
"if",
"src",
"in",
"target_edges",
":",
"continue",
"target_edges",
"[",
"src",
"]",
"=",
"edges",
"[",
"src",
"]",
"to_visit",
".",
"extend",
"(",
"edges",
"[",
"src",
"]",
")",
"return",
"target_edges"
] | https://github.com/pmq20/node-packer/blob/12c46c6e44fbc14d9ee645ebd17d5296b324f7e0/lts/tools/gyp/tools/graphviz.py#L24-L42 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/setuptools/msvc.py | python | RegistryInfo.vc_for_python | (self) | return r'DevDiv\VCForPython' | Microsoft Visual C++ for Python registry key.
Return
------
str
Registry key | Microsoft Visual C++ for Python registry key. | [
"Microsoft",
"Visual",
"C",
"++",
"for",
"Python",
"registry",
"key",
"."
] | def vc_for_python(self):
"""
Microsoft Visual C++ for Python registry key.
Return
------
str
Registry key
"""
return r'DevDiv\VCForPython' | [
"def",
"vc_for_python",
"(",
"self",
")",
":",
"return",
"r'DevDiv\\VCForPython'"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/setuptools/msvc.py#L550-L559 |
|
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/logging/handlers.py | python | NTEventLogHandler.emit | (self, record) | Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log. | Emit a record. | [
"Emit",
"a",
"record",
"."
] | def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record) | [
"def",
"emit",
"(",
"self",
",",
"record",
")",
":",
"if",
"self",
".",
"_welu",
":",
"try",
":",
"id",
"=",
"self",
".",
"getMessageID",
"(",
"record",
")",
"cat",
"=",
"self",
".",
"getEventCategory",
"(",
"record",
")",
"type",
"=",
"self",
".",
"getEventType",
"(",
"record",
")",
"msg",
"=",
"self",
".",
"format",
"(",
"record",
")",
"self",
".",
"_welu",
".",
"ReportEvent",
"(",
"self",
".",
"appname",
",",
"id",
",",
"cat",
",",
"type",
",",
"[",
"msg",
"]",
")",
"except",
"(",
"KeyboardInterrupt",
",",
"SystemExit",
")",
":",
"raise",
"except",
":",
"self",
".",
"handleError",
"(",
"record",
")"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/logging/handlers.py#L1016-L1033 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_core.py | python | PyEventBinder.Bind | (self, target, id1, id2, function) | Bind this set of event types to target. | Bind this set of event types to target. | [
"Bind",
"this",
"set",
"of",
"event",
"types",
"to",
"target",
"."
] | def Bind(self, target, id1, id2, function):
"""Bind this set of event types to target."""
for et in self.evtType:
target.Connect(id1, id2, et, function) | [
"def",
"Bind",
"(",
"self",
",",
"target",
",",
"id1",
",",
"id2",
",",
"function",
")",
":",
"for",
"et",
"in",
"self",
".",
"evtType",
":",
"target",
".",
"Connect",
"(",
"id1",
",",
"id2",
",",
"et",
",",
"function",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_core.py#L4569-L4572 |
||
ApolloAuto/apollo-platform | 86d9dc6743b496ead18d597748ebabd34a513289 | ros/third_party/lib_x86_64/python2.7/dist-packages/numpy/oldnumeric/ma.py | python | domain_tan.__call__ | (self, x) | return umath.less(umath.absolute(umath.cos(x)), self.eps) | Execute the call behavior. | Execute the call behavior. | [
"Execute",
"the",
"call",
"behavior",
"."
] | def __call__ (self, x):
"Execute the call behavior."
return umath.less(umath.absolute(umath.cos(x)), self.eps) | [
"def",
"__call__",
"(",
"self",
",",
"x",
")",
":",
"return",
"umath",
".",
"less",
"(",
"umath",
".",
"absolute",
"(",
"umath",
".",
"cos",
"(",
"x",
")",
")",
",",
"self",
".",
"eps",
")"
] | https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/third_party/lib_x86_64/python2.7/dist-packages/numpy/oldnumeric/ma.py#L287-L289 |
|
borglab/gtsam | a5bee157efce6a0563704bce6a5d188c29817f39 | python/gtsam/examples/PlanarManipulatorExample.py | python | ThreeLinkArm.manipulator_jacobian | (self, q) | return np.stack(differential_twists, axis=1) | Calculate manipulator Jacobian.
Takes numpy array of joint angles, in radians.
Returns the manipulator Jacobian of differential twists. When multiplied with
a vector of joint velocities, will yield a single differential twist which is
the spatial velocity d(sTt)/dt * inv(sTt) of the end-effector pose.
Just like always, differential twists can be hatted and multiplied with spatial
coordinates of a point to give the spatial velocity of the point. | Calculate manipulator Jacobian.
Takes numpy array of joint angles, in radians.
Returns the manipulator Jacobian of differential twists. When multiplied with
a vector of joint velocities, will yield a single differential twist which is
the spatial velocity d(sTt)/dt * inv(sTt) of the end-effector pose.
Just like always, differential twists can be hatted and multiplied with spatial
coordinates of a point to give the spatial velocity of the point. | [
"Calculate",
"manipulator",
"Jacobian",
".",
"Takes",
"numpy",
"array",
"of",
"joint",
"angles",
"in",
"radians",
".",
"Returns",
"the",
"manipulator",
"Jacobian",
"of",
"differential",
"twists",
".",
"When",
"multiplied",
"with",
"a",
"vector",
"of",
"joint",
"velocities",
"will",
"yield",
"a",
"single",
"differential",
"twist",
"which",
"is",
"the",
"spatial",
"velocity",
"d",
"(",
"sTt",
")",
"/",
"dt",
"*",
"inv",
"(",
"sTt",
")",
"of",
"the",
"end",
"-",
"effector",
"pose",
".",
"Just",
"like",
"always",
"differential",
"twists",
"can",
"be",
"hatted",
"and",
"multiplied",
"with",
"spatial",
"coordinates",
"of",
"a",
"point",
"to",
"give",
"the",
"spatial",
"velocity",
"of",
"the",
"point",
"."
] | def manipulator_jacobian(self, q):
""" Calculate manipulator Jacobian.
Takes numpy array of joint angles, in radians.
Returns the manipulator Jacobian of differential twists. When multiplied with
a vector of joint velocities, will yield a single differential twist which is
the spatial velocity d(sTt)/dt * inv(sTt) of the end-effector pose.
Just like always, differential twists can be hatted and multiplied with spatial
coordinates of a point to give the spatial velocity of the point.
"""
l1Zl1 = Pose2.Expmap(self.xi1 * q[0])
l2Zl2 = Pose2.Expmap(self.xi2 * q[1])
# l3Zl3 = Pose2.Expmap(self.xi3 * q[2])
p1 = self.xi1
# p1 = Pose2().Adjoint(self.xi1)
sTl1 = l1Zl1
p2 = sTl1.Adjoint(self.xi2)
sTl2 = compose(l1Zl1, l2Zl2)
p3 = sTl2.Adjoint(self.xi3)
differential_twists = [p1, p2, p3]
return np.stack(differential_twists, axis=1) | [
"def",
"manipulator_jacobian",
"(",
"self",
",",
"q",
")",
":",
"l1Zl1",
"=",
"Pose2",
".",
"Expmap",
"(",
"self",
".",
"xi1",
"*",
"q",
"[",
"0",
"]",
")",
"l2Zl2",
"=",
"Pose2",
".",
"Expmap",
"(",
"self",
".",
"xi2",
"*",
"q",
"[",
"1",
"]",
")",
"# l3Zl3 = Pose2.Expmap(self.xi3 * q[2])",
"p1",
"=",
"self",
".",
"xi1",
"# p1 = Pose2().Adjoint(self.xi1)",
"sTl1",
"=",
"l1Zl1",
"p2",
"=",
"sTl1",
".",
"Adjoint",
"(",
"self",
".",
"xi2",
")",
"sTl2",
"=",
"compose",
"(",
"l1Zl1",
",",
"l2Zl2",
")",
"p3",
"=",
"sTl2",
".",
"Adjoint",
"(",
"self",
".",
"xi3",
")",
"differential_twists",
"=",
"[",
"p1",
",",
"p2",
",",
"p3",
"]",
"return",
"np",
".",
"stack",
"(",
"differential_twists",
",",
"axis",
"=",
"1",
")"
] | https://github.com/borglab/gtsam/blob/a5bee157efce6a0563704bce6a5d188c29817f39/python/gtsam/examples/PlanarManipulatorExample.py#L137-L160 |
|
SFTtech/openage | d6a08c53c48dc1e157807471df92197f6ca9e04d | openage/convert/processor/conversion/aoc/effect_subprocessor.py | python | AoCEffectSubprocessor.get_repair_effects | (line, location_ref) | return effects | Creates effects that are used for repairing (unit command: 106)
:param line: Unit/Building line that gets the ability.
:type line: ...dataformat.converter_object.ConverterObjectGroup
:param location_ref: Reference to API object the effects are added to.
:type location_ref: str
:returns: The forward references for the effects.
:rtype: list | Creates effects that are used for repairing (unit command: 106) | [
"Creates",
"effects",
"that",
"are",
"used",
"for",
"repairing",
"(",
"unit",
"command",
":",
"106",
")"
] | def get_repair_effects(line, location_ref):
"""
Creates effects that are used for repairing (unit command: 106)
:param line: Unit/Building line that gets the ability.
:type line: ...dataformat.converter_object.ConverterObjectGroup
:param location_ref: Reference to API object the effects are added to.
:type location_ref: str
:returns: The forward references for the effects.
:rtype: list
"""
dataset = line.data
api_objects = dataset.nyan_api_objects
name_lookup_dict = internal_name_lookups.get_entity_lookups(dataset.game_version)
effects = []
effect_parent = "engine.effect.continuous.flat_attribute_change.FlatAttributeChange"
repair_parent = "engine.effect.continuous.flat_attribute_change.type.FlatAttributeChangeIncrease"
repairable_lines = []
repairable_lines.extend(dataset.building_lines.values())
for unit_line in dataset.unit_lines.values():
if unit_line.is_repairable():
repairable_lines.append(unit_line)
for repairable_line in repairable_lines:
game_entity_name = name_lookup_dict[repairable_line.get_head_unit_id()][0]
repair_name = f"{game_entity_name}RepairEffect"
repair_ref = f"{location_ref}.{repair_name}"
repair_raw_api_object = RawAPIObject(repair_ref,
repair_name,
dataset.nyan_api_objects)
repair_raw_api_object.add_raw_parent(repair_parent)
repair_location = ForwardRef(line, location_ref)
repair_raw_api_object.set_location(repair_location)
line.add_raw_api_object(repair_raw_api_object)
# Type
type_ref = f"util.attribute_change_type.types.{game_entity_name}Repair"
change_type = dataset.pregen_nyan_objects[type_ref].get_nyan_object()
repair_raw_api_object.add_raw_member("type",
change_type,
effect_parent)
# Min value (optional; not added because buildings don't block repairing)
# Max value (optional; not added because there is none in AoE2)
# Change rate
# =================================================================================
rate_name = f"{location_ref}.{repair_name}.ChangeRate"
rate_raw_api_object = RawAPIObject(rate_name, "ChangeRate", dataset.nyan_api_objects)
rate_raw_api_object.add_raw_parent("engine.util.attribute.AttributeRate")
rate_location = ForwardRef(line, repair_ref)
rate_raw_api_object.set_location(rate_location)
attribute = dataset.pregen_nyan_objects["util.attribute.types.Health"].get_nyan_object()
rate_raw_api_object.add_raw_member("type",
attribute,
"engine.util.attribute.AttributeRate")
# Hardcoded repair rate:
# - Buildings: 750 HP/min = 12.5 HP/s
# - Ships/Siege: 187.5 HP/min = 3.125 HP/s
if isinstance(repairable_line, GenieBuildingLineGroup):
repair_rate = 12.5
else:
repair_rate = 3.125
rate_raw_api_object.add_raw_member("rate",
repair_rate,
"engine.util.attribute.AttributeRate")
line.add_raw_api_object(rate_raw_api_object)
# =================================================================================
rate_forward_ref = ForwardRef(line, rate_name)
repair_raw_api_object.add_raw_member("change_rate",
rate_forward_ref,
effect_parent)
# Ignore protection
repair_raw_api_object.add_raw_member("ignore_protection",
[],
effect_parent)
# Repair cost
property_ref = f"{repair_ref}.Cost"
property_raw_api_object = RawAPIObject(property_ref,
"Cost",
dataset.nyan_api_objects)
property_raw_api_object.add_raw_parent("engine.effect.property.type.Cost")
property_location = ForwardRef(line, repair_ref)
property_raw_api_object.set_location(property_location)
line.add_raw_api_object(property_raw_api_object)
cost_ref = f"{game_entity_name}.CreatableGameEntity.{game_entity_name}RepairCost"
cost_forward_ref = ForwardRef(repairable_line, cost_ref)
property_raw_api_object.add_raw_member("cost",
cost_forward_ref,
"engine.effect.property.type.Cost")
property_forward_ref = ForwardRef(line, property_ref)
properties = {
api_objects["engine.effect.property.type.Cost"]: property_forward_ref
}
repair_raw_api_object.add_raw_member("properties",
properties,
"engine.effect.Effect")
repair_forward_ref = ForwardRef(line, repair_ref)
effects.append(repair_forward_ref)
return effects | [
"def",
"get_repair_effects",
"(",
"line",
",",
"location_ref",
")",
":",
"dataset",
"=",
"line",
".",
"data",
"api_objects",
"=",
"dataset",
".",
"nyan_api_objects",
"name_lookup_dict",
"=",
"internal_name_lookups",
".",
"get_entity_lookups",
"(",
"dataset",
".",
"game_version",
")",
"effects",
"=",
"[",
"]",
"effect_parent",
"=",
"\"engine.effect.continuous.flat_attribute_change.FlatAttributeChange\"",
"repair_parent",
"=",
"\"engine.effect.continuous.flat_attribute_change.type.FlatAttributeChangeIncrease\"",
"repairable_lines",
"=",
"[",
"]",
"repairable_lines",
".",
"extend",
"(",
"dataset",
".",
"building_lines",
".",
"values",
"(",
")",
")",
"for",
"unit_line",
"in",
"dataset",
".",
"unit_lines",
".",
"values",
"(",
")",
":",
"if",
"unit_line",
".",
"is_repairable",
"(",
")",
":",
"repairable_lines",
".",
"append",
"(",
"unit_line",
")",
"for",
"repairable_line",
"in",
"repairable_lines",
":",
"game_entity_name",
"=",
"name_lookup_dict",
"[",
"repairable_line",
".",
"get_head_unit_id",
"(",
")",
"]",
"[",
"0",
"]",
"repair_name",
"=",
"f\"{game_entity_name}RepairEffect\"",
"repair_ref",
"=",
"f\"{location_ref}.{repair_name}\"",
"repair_raw_api_object",
"=",
"RawAPIObject",
"(",
"repair_ref",
",",
"repair_name",
",",
"dataset",
".",
"nyan_api_objects",
")",
"repair_raw_api_object",
".",
"add_raw_parent",
"(",
"repair_parent",
")",
"repair_location",
"=",
"ForwardRef",
"(",
"line",
",",
"location_ref",
")",
"repair_raw_api_object",
".",
"set_location",
"(",
"repair_location",
")",
"line",
".",
"add_raw_api_object",
"(",
"repair_raw_api_object",
")",
"# Type",
"type_ref",
"=",
"f\"util.attribute_change_type.types.{game_entity_name}Repair\"",
"change_type",
"=",
"dataset",
".",
"pregen_nyan_objects",
"[",
"type_ref",
"]",
".",
"get_nyan_object",
"(",
")",
"repair_raw_api_object",
".",
"add_raw_member",
"(",
"\"type\"",
",",
"change_type",
",",
"effect_parent",
")",
"# Min value (optional; not added because buildings don't block repairing)",
"# Max value (optional; not added because there is none in AoE2)",
"# Change rate",
"# =================================================================================",
"rate_name",
"=",
"f\"{location_ref}.{repair_name}.ChangeRate\"",
"rate_raw_api_object",
"=",
"RawAPIObject",
"(",
"rate_name",
",",
"\"ChangeRate\"",
",",
"dataset",
".",
"nyan_api_objects",
")",
"rate_raw_api_object",
".",
"add_raw_parent",
"(",
"\"engine.util.attribute.AttributeRate\"",
")",
"rate_location",
"=",
"ForwardRef",
"(",
"line",
",",
"repair_ref",
")",
"rate_raw_api_object",
".",
"set_location",
"(",
"rate_location",
")",
"attribute",
"=",
"dataset",
".",
"pregen_nyan_objects",
"[",
"\"util.attribute.types.Health\"",
"]",
".",
"get_nyan_object",
"(",
")",
"rate_raw_api_object",
".",
"add_raw_member",
"(",
"\"type\"",
",",
"attribute",
",",
"\"engine.util.attribute.AttributeRate\"",
")",
"# Hardcoded repair rate:",
"# - Buildings: 750 HP/min = 12.5 HP/s",
"# - Ships/Siege: 187.5 HP/min = 3.125 HP/s",
"if",
"isinstance",
"(",
"repairable_line",
",",
"GenieBuildingLineGroup",
")",
":",
"repair_rate",
"=",
"12.5",
"else",
":",
"repair_rate",
"=",
"3.125",
"rate_raw_api_object",
".",
"add_raw_member",
"(",
"\"rate\"",
",",
"repair_rate",
",",
"\"engine.util.attribute.AttributeRate\"",
")",
"line",
".",
"add_raw_api_object",
"(",
"rate_raw_api_object",
")",
"# =================================================================================",
"rate_forward_ref",
"=",
"ForwardRef",
"(",
"line",
",",
"rate_name",
")",
"repair_raw_api_object",
".",
"add_raw_member",
"(",
"\"change_rate\"",
",",
"rate_forward_ref",
",",
"effect_parent",
")",
"# Ignore protection",
"repair_raw_api_object",
".",
"add_raw_member",
"(",
"\"ignore_protection\"",
",",
"[",
"]",
",",
"effect_parent",
")",
"# Repair cost",
"property_ref",
"=",
"f\"{repair_ref}.Cost\"",
"property_raw_api_object",
"=",
"RawAPIObject",
"(",
"property_ref",
",",
"\"Cost\"",
",",
"dataset",
".",
"nyan_api_objects",
")",
"property_raw_api_object",
".",
"add_raw_parent",
"(",
"\"engine.effect.property.type.Cost\"",
")",
"property_location",
"=",
"ForwardRef",
"(",
"line",
",",
"repair_ref",
")",
"property_raw_api_object",
".",
"set_location",
"(",
"property_location",
")",
"line",
".",
"add_raw_api_object",
"(",
"property_raw_api_object",
")",
"cost_ref",
"=",
"f\"{game_entity_name}.CreatableGameEntity.{game_entity_name}RepairCost\"",
"cost_forward_ref",
"=",
"ForwardRef",
"(",
"repairable_line",
",",
"cost_ref",
")",
"property_raw_api_object",
".",
"add_raw_member",
"(",
"\"cost\"",
",",
"cost_forward_ref",
",",
"\"engine.effect.property.type.Cost\"",
")",
"property_forward_ref",
"=",
"ForwardRef",
"(",
"line",
",",
"property_ref",
")",
"properties",
"=",
"{",
"api_objects",
"[",
"\"engine.effect.property.type.Cost\"",
"]",
":",
"property_forward_ref",
"}",
"repair_raw_api_object",
".",
"add_raw_member",
"(",
"\"properties\"",
",",
"properties",
",",
"\"engine.effect.Effect\"",
")",
"repair_forward_ref",
"=",
"ForwardRef",
"(",
"line",
",",
"repair_ref",
")",
"effects",
".",
"append",
"(",
"repair_forward_ref",
")",
"return",
"effects"
] | https://github.com/SFTtech/openage/blob/d6a08c53c48dc1e157807471df92197f6ca9e04d/openage/convert/processor/conversion/aoc/effect_subprocessor.py#L337-L456 |
|
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/contrib/graph_editor/reroute.py | python | _RerouteMode.check | (cls, mode) | Check swap mode.
Args:
mode: an integer representing one of the modes.
Returns:
A tuple `(a2b, b2a)` boolean indicating what rerouting needs doing.
Raises:
ValueError: if mode is outside the enum range. | Check swap mode. | [
"Check",
"swap",
"mode",
"."
] | def check(cls, mode):
"""Check swap mode.
Args:
mode: an integer representing one of the modes.
Returns:
A tuple `(a2b, b2a)` boolean indicating what rerouting needs doing.
Raises:
ValueError: if mode is outside the enum range.
"""
if mode == cls.swap:
return True, True
elif mode == cls.b2a:
return False, True
elif mode == cls.a2b:
return True, False
else:
raise ValueError("Unknown _RerouteMode: {}".format(mode)) | [
"def",
"check",
"(",
"cls",
",",
"mode",
")",
":",
"if",
"mode",
"==",
"cls",
".",
"swap",
":",
"return",
"True",
",",
"True",
"elif",
"mode",
"==",
"cls",
".",
"b2a",
":",
"return",
"False",
",",
"True",
"elif",
"mode",
"==",
"cls",
".",
"a2b",
":",
"return",
"True",
",",
"False",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown _RerouteMode: {}\"",
".",
"format",
"(",
"mode",
")",
")"
] | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/graph_editor/reroute.py#L81-L98 |
||
devsisters/libquic | 8954789a056d8e7d5fcb6452fd1572ca57eb5c4e | src/third_party/protobuf/python/google/protobuf/internal/encoder.py | python | StringSizer | (field_number, is_repeated, is_packed) | Returns a sizer for a string field. | Returns a sizer for a string field. | [
"Returns",
"a",
"sizer",
"for",
"a",
"string",
"field",
"."
] | def StringSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a string field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
local_len = len
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = local_len(element.encode('utf-8'))
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = local_len(value.encode('utf-8'))
return tag_size + local_VarintSize(l) + l
return FieldSize | [
"def",
"StringSizer",
"(",
"field_number",
",",
"is_repeated",
",",
"is_packed",
")",
":",
"tag_size",
"=",
"_TagSize",
"(",
"field_number",
")",
"local_VarintSize",
"=",
"_VarintSize",
"local_len",
"=",
"len",
"assert",
"not",
"is_packed",
"if",
"is_repeated",
":",
"def",
"RepeatedFieldSize",
"(",
"value",
")",
":",
"result",
"=",
"tag_size",
"*",
"len",
"(",
"value",
")",
"for",
"element",
"in",
"value",
":",
"l",
"=",
"local_len",
"(",
"element",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"result",
"+=",
"local_VarintSize",
"(",
"l",
")",
"+",
"l",
"return",
"result",
"return",
"RepeatedFieldSize",
"else",
":",
"def",
"FieldSize",
"(",
"value",
")",
":",
"l",
"=",
"local_len",
"(",
"value",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"return",
"tag_size",
"+",
"local_VarintSize",
"(",
"l",
")",
"+",
"l",
"return",
"FieldSize"
] | https://github.com/devsisters/libquic/blob/8954789a056d8e7d5fcb6452fd1572ca57eb5c4e/src/third_party/protobuf/python/google/protobuf/internal/encoder.py#L230-L249 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | contrib/gizmos/osx_carbon/gizmos.py | python | TreeListCtrl.DeleteChildren | (*args, **kwargs) | return _gizmos.TreeListCtrl_DeleteChildren(*args, **kwargs) | DeleteChildren(self, TreeItemId item) | DeleteChildren(self, TreeItemId item) | [
"DeleteChildren",
"(",
"self",
"TreeItemId",
"item",
")"
] | def DeleteChildren(*args, **kwargs):
"""DeleteChildren(self, TreeItemId item)"""
return _gizmos.TreeListCtrl_DeleteChildren(*args, **kwargs) | [
"def",
"DeleteChildren",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_gizmos",
".",
"TreeListCtrl_DeleteChildren",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/contrib/gizmos/osx_carbon/gizmos.py#L862-L864 |
|
panda3d/panda3d | 833ad89ebad58395d0af0b7ec08538e5e4308265 | direct/src/stdpy/thread.py | python | _remove_thread_id | (threadId) | Removes the thread with the indicated ID from the thread list. | Removes the thread with the indicated ID from the thread list. | [
"Removes",
"the",
"thread",
"with",
"the",
"indicated",
"ID",
"from",
"the",
"thread",
"list",
"."
] | def _remove_thread_id(threadId):
""" Removes the thread with the indicated ID from the thread list. """
# On interpreter shutdown, Python may set module globals to None.
if _threadsLock is None or _threads is None:
return
_threadsLock.acquire()
try:
if threadId in _threads:
thread, locals, wrapper = _threads[threadId]
assert thread.getPythonIndex() == threadId
del _threads[threadId]
thread.setPythonIndex(-1)
finally:
_threadsLock.release() | [
"def",
"_remove_thread_id",
"(",
"threadId",
")",
":",
"# On interpreter shutdown, Python may set module globals to None.",
"if",
"_threadsLock",
"is",
"None",
"or",
"_threads",
"is",
"None",
":",
"return",
"_threadsLock",
".",
"acquire",
"(",
")",
"try",
":",
"if",
"threadId",
"in",
"_threads",
":",
"thread",
",",
"locals",
",",
"wrapper",
"=",
"_threads",
"[",
"threadId",
"]",
"assert",
"thread",
".",
"getPythonIndex",
"(",
")",
"==",
"threadId",
"del",
"_threads",
"[",
"threadId",
"]",
"thread",
".",
"setPythonIndex",
"(",
"-",
"1",
")",
"finally",
":",
"_threadsLock",
".",
"release",
"(",
")"
] | https://github.com/panda3d/panda3d/blob/833ad89ebad58395d0af0b7ec08538e5e4308265/direct/src/stdpy/thread.py#L217-L233 |
||
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/x86/toolchain/share/gdb/python/gdb/FrameDecorator.py | python | FrameDecorator.line | (self) | Return line number information associated with the frame's
pc. If symbol table/line information does not exist, or if
this frame is deemed to be a special case, return None | Return line number information associated with the frame's
pc. If symbol table/line information does not exist, or if
this frame is deemed to be a special case, return None | [
"Return",
"line",
"number",
"information",
"associated",
"with",
"the",
"frame",
"s",
"pc",
".",
"If",
"symbol",
"table",
"/",
"line",
"information",
"does",
"not",
"exist",
"or",
"if",
"this",
"frame",
"is",
"deemed",
"to",
"be",
"a",
"special",
"case",
"return",
"None"
] | def line(self):
""" Return line number information associated with the frame's
pc. If symbol table/line information does not exist, or if
this frame is deemed to be a special case, return None"""
if hasattr(self._base, "line"):
return self._base.line()
frame = self.inferior_frame()
if self._is_limited_frame(frame):
return None
sal = frame.find_sal()
if (sal):
return sal.line
else:
return None | [
"def",
"line",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
".",
"_base",
",",
"\"line\"",
")",
":",
"return",
"self",
".",
"_base",
".",
"line",
"(",
")",
"frame",
"=",
"self",
".",
"inferior_frame",
"(",
")",
"if",
"self",
".",
"_is_limited_frame",
"(",
"frame",
")",
":",
"return",
"None",
"sal",
"=",
"frame",
".",
"find_sal",
"(",
")",
"if",
"(",
"sal",
")",
":",
"return",
"sal",
".",
"line",
"else",
":",
"return",
"None"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/share/gdb/python/gdb/FrameDecorator.py#L179-L195 |
||
eventql/eventql | 7ca0dbb2e683b525620ea30dc40540a22d5eb227 | deps/3rdparty/spidermonkey/mozjs/media/webrtc/trunk/tools/gyp/pylib/gyp/xcode_emulation.py | python | XcodeSettings.GetLdflags | (self, configname, product_dir, gyp_to_build_path) | return ldflags | Returns flags that need to be passed to the linker.
Args:
configname: The name of the configuration to get ld flags for.
product_dir: The directory where products such static and dynamic
libraries are placed. This is added to the library search path.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry. | Returns flags that need to be passed to the linker. | [
"Returns",
"flags",
"that",
"need",
"to",
"be",
"passed",
"to",
"the",
"linker",
"."
] | def GetLdflags(self, configname, product_dir, gyp_to_build_path):
"""Returns flags that need to be passed to the linker.
Args:
configname: The name of the configuration to get ld flags for.
product_dir: The directory where products such static and dynamic
libraries are placed. This is added to the library search path.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
self.configname = configname
ldflags = []
# The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS
# can contain entries that depend on this. Explicitly absolutify these.
for ldflag in self._Settings().get('OTHER_LDFLAGS', []):
ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path))
if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'):
ldflags.append('-Wl,-dead_strip')
if self._Test('PREBINDING', 'YES', default='NO'):
ldflags.append('-Wl,-prebind')
self._Appendf(
ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s')
self._Appendf(
ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s')
self._Appendf(
ldflags, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
if 'SDKROOT' in self._Settings():
ldflags.append('-isysroot ' + self._SdkPath())
for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []):
ldflags.append('-L' + gyp_to_build_path(library_path))
if 'ORDER_FILE' in self._Settings():
ldflags.append('-Wl,-order_file ' +
'-Wl,' + gyp_to_build_path(
self._Settings()['ORDER_FILE']))
archs = self._Settings().get('ARCHS', ['i386'])
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
ldflags.append('-arch ' + archs[0])
# Xcode adds the product directory by default.
ldflags.append('-L' + product_dir)
install_name = self.GetInstallName()
if install_name:
ldflags.append('-install_name ' + install_name.replace(' ', r'\ '))
for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []):
ldflags.append('-Wl,-rpath,' + rpath)
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
ldflags.append('-F' + directory.replace('$(SDKROOT)', self._SdkPath()))
self.configname = None
return ldflags | [
"def",
"GetLdflags",
"(",
"self",
",",
"configname",
",",
"product_dir",
",",
"gyp_to_build_path",
")",
":",
"self",
".",
"configname",
"=",
"configname",
"ldflags",
"=",
"[",
"]",
"# The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS",
"# can contain entries that depend on this. Explicitly absolutify these.",
"for",
"ldflag",
"in",
"self",
".",
"_Settings",
"(",
")",
".",
"get",
"(",
"'OTHER_LDFLAGS'",
",",
"[",
"]",
")",
":",
"ldflags",
".",
"append",
"(",
"self",
".",
"_MapLinkerFlagFilename",
"(",
"ldflag",
",",
"gyp_to_build_path",
")",
")",
"if",
"self",
".",
"_Test",
"(",
"'DEAD_CODE_STRIPPING'",
",",
"'YES'",
",",
"default",
"=",
"'NO'",
")",
":",
"ldflags",
".",
"append",
"(",
"'-Wl,-dead_strip'",
")",
"if",
"self",
".",
"_Test",
"(",
"'PREBINDING'",
",",
"'YES'",
",",
"default",
"=",
"'NO'",
")",
":",
"ldflags",
".",
"append",
"(",
"'-Wl,-prebind'",
")",
"self",
".",
"_Appendf",
"(",
"ldflags",
",",
"'DYLIB_COMPATIBILITY_VERSION'",
",",
"'-compatibility_version %s'",
")",
"self",
".",
"_Appendf",
"(",
"ldflags",
",",
"'DYLIB_CURRENT_VERSION'",
",",
"'-current_version %s'",
")",
"self",
".",
"_Appendf",
"(",
"ldflags",
",",
"'MACOSX_DEPLOYMENT_TARGET'",
",",
"'-mmacosx-version-min=%s'",
")",
"if",
"'SDKROOT'",
"in",
"self",
".",
"_Settings",
"(",
")",
":",
"ldflags",
".",
"append",
"(",
"'-isysroot '",
"+",
"self",
".",
"_SdkPath",
"(",
")",
")",
"for",
"library_path",
"in",
"self",
".",
"_Settings",
"(",
")",
".",
"get",
"(",
"'LIBRARY_SEARCH_PATHS'",
",",
"[",
"]",
")",
":",
"ldflags",
".",
"append",
"(",
"'-L'",
"+",
"gyp_to_build_path",
"(",
"library_path",
")",
")",
"if",
"'ORDER_FILE'",
"in",
"self",
".",
"_Settings",
"(",
")",
":",
"ldflags",
".",
"append",
"(",
"'-Wl,-order_file '",
"+",
"'-Wl,'",
"+",
"gyp_to_build_path",
"(",
"self",
".",
"_Settings",
"(",
")",
"[",
"'ORDER_FILE'",
"]",
")",
")",
"archs",
"=",
"self",
".",
"_Settings",
"(",
")",
".",
"get",
"(",
"'ARCHS'",
",",
"[",
"'i386'",
"]",
")",
"if",
"len",
"(",
"archs",
")",
"!=",
"1",
":",
"# TODO: Supporting fat binaries will be annoying.",
"self",
".",
"_WarnUnimplemented",
"(",
"'ARCHS'",
")",
"archs",
"=",
"[",
"'i386'",
"]",
"ldflags",
".",
"append",
"(",
"'-arch '",
"+",
"archs",
"[",
"0",
"]",
")",
"# Xcode adds the product directory by default.",
"ldflags",
".",
"append",
"(",
"'-L'",
"+",
"product_dir",
")",
"install_name",
"=",
"self",
".",
"GetInstallName",
"(",
")",
"if",
"install_name",
":",
"ldflags",
".",
"append",
"(",
"'-install_name '",
"+",
"install_name",
".",
"replace",
"(",
"' '",
",",
"r'\\ '",
")",
")",
"for",
"rpath",
"in",
"self",
".",
"_Settings",
"(",
")",
".",
"get",
"(",
"'LD_RUNPATH_SEARCH_PATHS'",
",",
"[",
"]",
")",
":",
"ldflags",
".",
"append",
"(",
"'-Wl,-rpath,'",
"+",
"rpath",
")",
"config",
"=",
"self",
".",
"spec",
"[",
"'configurations'",
"]",
"[",
"self",
".",
"configname",
"]",
"framework_dirs",
"=",
"config",
".",
"get",
"(",
"'mac_framework_dirs'",
",",
"[",
"]",
")",
"for",
"directory",
"in",
"framework_dirs",
":",
"ldflags",
".",
"append",
"(",
"'-F'",
"+",
"directory",
".",
"replace",
"(",
"'$(SDKROOT)'",
",",
"self",
".",
"_SdkPath",
"(",
")",
")",
")",
"self",
".",
"configname",
"=",
"None",
"return",
"ldflags"
] | https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/media/webrtc/trunk/tools/gyp/pylib/gyp/xcode_emulation.py#L499-L563 |
|
pmq20/node-packer | 12c46c6e44fbc14d9ee645ebd17d5296b324f7e0 | current/deps/v8/tools/grokdump.py | python | InspectionShell.do_dp | (self, address) | return self.do_display_page(address) | see display_page | see display_page | [
"see",
"display_page"
] | def do_dp(self, address):
""" see display_page """
return self.do_display_page(address) | [
"def",
"do_dp",
"(",
"self",
",",
"address",
")",
":",
"return",
"self",
".",
"do_display_page",
"(",
"address",
")"
] | https://github.com/pmq20/node-packer/blob/12c46c6e44fbc14d9ee645ebd17d5296b324f7e0/current/deps/v8/tools/grokdump.py#L3622-L3624 |
|
rootm0s/Protectors | 5b3f4d11687a5955caf9c3af30666c4bfc2c19ab | OWASP-ZSC/module/readline_windows/pyreadline/rlmain.py | python | BaseReadline.get_line_buffer | (self) | return self.mode.l_buffer.get_line_text() | Return the current contents of the line buffer. | Return the current contents of the line buffer. | [
"Return",
"the",
"current",
"contents",
"of",
"the",
"line",
"buffer",
"."
] | def get_line_buffer(self):
'''Return the current contents of the line buffer.'''
return self.mode.l_buffer.get_line_text() | [
"def",
"get_line_buffer",
"(",
"self",
")",
":",
"return",
"self",
".",
"mode",
".",
"l_buffer",
".",
"get_line_text",
"(",
")"
] | https://github.com/rootm0s/Protectors/blob/5b3f4d11687a5955caf9c3af30666c4bfc2c19ab/OWASP-ZSC/module/readline_windows/pyreadline/rlmain.py#L120-L122 |
|
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | tools/json_to_struct/json_to_struct.py | python | _Load | (filename) | return result | Loads a JSON file int a Python object and return this object. | Loads a JSON file int a Python object and return this object. | [
"Loads",
"a",
"JSON",
"file",
"int",
"a",
"Python",
"object",
"and",
"return",
"this",
"object",
"."
] | def _Load(filename):
"""Loads a JSON file int a Python object and return this object.
"""
# TODO(beaudoin): When moving to Python 2.7 use object_pairs_hook=OrderedDict.
with open(filename, 'r') as handle:
result = json.loads(json_comment_eater.Nom(handle.read()))
return result | [
"def",
"_Load",
"(",
"filename",
")",
":",
"# TODO(beaudoin): When moving to Python 2.7 use object_pairs_hook=OrderedDict.",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"handle",
":",
"result",
"=",
"json",
".",
"loads",
"(",
"json_comment_eater",
".",
"Nom",
"(",
"handle",
".",
"read",
"(",
")",
")",
")",
"return",
"result"
] | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/json_to_struct/json_to_struct.py#L168-L174 |
|
kit-cel/gr-radar | ceebb6d83280526f6e08a8aa0dde486db6898c81 | docs/doxygen/doxyxml/doxyindex.py | python | generate_swig_doc_i | (self) | %feature("docstring") gr_make_align_on_samplenumbers_ss::align_state "
Wraps the C++: gr_align_on_samplenumbers_ss::align_state"; | %feature("docstring") gr_make_align_on_samplenumbers_ss::align_state "
Wraps the C++: gr_align_on_samplenumbers_ss::align_state"; | [
"%feature",
"(",
"docstring",
")",
"gr_make_align_on_samplenumbers_ss",
"::",
"align_state",
"Wraps",
"the",
"C",
"++",
":",
"gr_align_on_samplenumbers_ss",
"::",
"align_state",
";"
] | def generate_swig_doc_i(self):
"""
%feature("docstring") gr_make_align_on_samplenumbers_ss::align_state "
Wraps the C++: gr_align_on_samplenumbers_ss::align_state";
"""
pass | [
"def",
"generate_swig_doc_i",
"(",
"self",
")",
":",
"pass"
] | https://github.com/kit-cel/gr-radar/blob/ceebb6d83280526f6e08a8aa0dde486db6898c81/docs/doxygen/doxyxml/doxyindex.py#L63-L68 |
||
msitt/blpapi-python | bebcf43668c9e5f5467b1f685f9baebbfc45bc87 | src/blpapi/element.py | python | Element._fromPyHelper | (self, value, name=None, path=None) | Helper method for `fromPy`.
Args:
value: Used to format this `Element` or the `Element` specified
by ``name``.
name (Name or str): If ``name`` is ``None``, format this `Element`
with ``value``. Otherwise, ``name`` refers to this `Element`'s
sub-`Element` that will be formatted with ``value``.
path (str): The path uniquely identifying this `Element`, starting
from the root `Element`. | Helper method for `fromPy`. | [
"Helper",
"method",
"for",
"fromPy",
"."
] | def _fromPyHelper(self, value, name=None, path=None):
"""Helper method for `fromPy`.
Args:
value: Used to format this `Element` or the `Element` specified
by ``name``.
name (Name or str): If ``name`` is ``None``, format this `Element`
with ``value``. Otherwise, ``name`` refers to this `Element`'s
sub-`Element` that will be formatted with ``value``.
path (str): The path uniquely identifying this `Element`, starting
from the root `Element`.
"""
# Note, the double exception throwing has no good solution in Python 2,
# but Python 3 has exception chaining that we should use when we can
activeElement = self
def getActivePathMessage(isArrayEntry=False):
elementType = "scalar"
if activeElement.isArray():
elementType = "array"
elif activeElement.isComplexType():
elementType = "complex"
arrayEntryText = "an entry in " if isArrayEntry else ""
return "While operating on {}{} Element `{}`, ".format(
arrayEntryText, elementType, path)
if path is None:
path = str(activeElement.name())
if name is not None:
try:
activeElement = self.getElement(name)
path += "/" + str(activeElement.name())
except Exception as exc:
errorMsg = "encountered error: {}".format(exc)
raise Exception(getActivePathMessage() + errorMsg)
if activeElement.numElements() or activeElement.numValues():
errorMsg = "this Element has already been formatted"
raise Exception(getActivePathMessage() + errorMsg)
if isinstance(value, Mapping):
if not activeElement.isComplexType():
errorMsg = "encountered a `Mapping` instance while" \
" formatting a non-complex Element"
raise Exception(getActivePathMessage() + errorMsg)
complexElement = activeElement
for subName in value:
subValue = value[subName]
complexElement._fromPyHelper(subValue, subName, path)
elif isNonScalarSequence(value):
if not activeElement.isArray():
errorMsg = "encountered a `Sequence` while formatting a" \
" non-array Element"
raise Exception(getActivePathMessage() + errorMsg)
arrayElement = activeElement
typeDef = arrayElement.elementDefinition().typeDefinition()
arrayValuesAreScalar = not typeDef.isComplexType()
for index, val in enumerate(value):
if isinstance(val, Mapping):
if arrayValuesAreScalar:
path += "[{}]".format(index)
errorMsg = "encountered a `Mapping` where a scalar" \
" value was expected."
raise Exception(getActivePathMessage(isArrayEntry=True)
+ errorMsg)
appendedElement = arrayElement.appendElement()
arrayEntryPath = path + "[{}]".format(index)
appendedElement._fromPyHelper(val, path=arrayEntryPath)
elif isNonScalarSequence(val):
path += "[{}]".format(index)
expectedObject = "scalar value" if arrayValuesAreScalar \
else "`Mapping`"
errorMsg = "encountered a nested `Sequence` where a {}" \
" was expected.".format(expectedObject)
raise Exception(getActivePathMessage(isArrayEntry=True)
+ errorMsg)
else:
if not arrayValuesAreScalar:
path += "[{}]".format(index)
errorMsg = "encountered a scalar value where a" \
" `Mapping` was expected."
raise Exception(getActivePathMessage(isArrayEntry=True)
+ errorMsg)
try:
arrayElement.appendValue(val)
except Exception as exc:
path += "[{}]".format(index)
errorMsg = "encountered error: {}".format(exc)
raise Exception(getActivePathMessage(isArrayEntry=True)
+ errorMsg)
else:
if value is None:
return
if activeElement.isComplexType() or activeElement.isArray():
errorMsg = "encountered an incompatible type, {}, for a" \
" non-scalar Element".format(type(value))
raise Exception(getActivePathMessage() + errorMsg)
try:
activeElement.setValue(value)
except Exception as exc:
errorMsg = "encountered error: {}".format(exc)
raise Exception(getActivePathMessage() + errorMsg) | [
"def",
"_fromPyHelper",
"(",
"self",
",",
"value",
",",
"name",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"# Note, the double exception throwing has no good solution in Python 2,",
"# but Python 3 has exception chaining that we should use when we can",
"activeElement",
"=",
"self",
"def",
"getActivePathMessage",
"(",
"isArrayEntry",
"=",
"False",
")",
":",
"elementType",
"=",
"\"scalar\"",
"if",
"activeElement",
".",
"isArray",
"(",
")",
":",
"elementType",
"=",
"\"array\"",
"elif",
"activeElement",
".",
"isComplexType",
"(",
")",
":",
"elementType",
"=",
"\"complex\"",
"arrayEntryText",
"=",
"\"an entry in \"",
"if",
"isArrayEntry",
"else",
"\"\"",
"return",
"\"While operating on {}{} Element `{}`, \"",
".",
"format",
"(",
"arrayEntryText",
",",
"elementType",
",",
"path",
")",
"if",
"path",
"is",
"None",
":",
"path",
"=",
"str",
"(",
"activeElement",
".",
"name",
"(",
")",
")",
"if",
"name",
"is",
"not",
"None",
":",
"try",
":",
"activeElement",
"=",
"self",
".",
"getElement",
"(",
"name",
")",
"path",
"+=",
"\"/\"",
"+",
"str",
"(",
"activeElement",
".",
"name",
"(",
")",
")",
"except",
"Exception",
"as",
"exc",
":",
"errorMsg",
"=",
"\"encountered error: {}\"",
".",
"format",
"(",
"exc",
")",
"raise",
"Exception",
"(",
"getActivePathMessage",
"(",
")",
"+",
"errorMsg",
")",
"if",
"activeElement",
".",
"numElements",
"(",
")",
"or",
"activeElement",
".",
"numValues",
"(",
")",
":",
"errorMsg",
"=",
"\"this Element has already been formatted\"",
"raise",
"Exception",
"(",
"getActivePathMessage",
"(",
")",
"+",
"errorMsg",
")",
"if",
"isinstance",
"(",
"value",
",",
"Mapping",
")",
":",
"if",
"not",
"activeElement",
".",
"isComplexType",
"(",
")",
":",
"errorMsg",
"=",
"\"encountered a `Mapping` instance while\"",
"\" formatting a non-complex Element\"",
"raise",
"Exception",
"(",
"getActivePathMessage",
"(",
")",
"+",
"errorMsg",
")",
"complexElement",
"=",
"activeElement",
"for",
"subName",
"in",
"value",
":",
"subValue",
"=",
"value",
"[",
"subName",
"]",
"complexElement",
".",
"_fromPyHelper",
"(",
"subValue",
",",
"subName",
",",
"path",
")",
"elif",
"isNonScalarSequence",
"(",
"value",
")",
":",
"if",
"not",
"activeElement",
".",
"isArray",
"(",
")",
":",
"errorMsg",
"=",
"\"encountered a `Sequence` while formatting a\"",
"\" non-array Element\"",
"raise",
"Exception",
"(",
"getActivePathMessage",
"(",
")",
"+",
"errorMsg",
")",
"arrayElement",
"=",
"activeElement",
"typeDef",
"=",
"arrayElement",
".",
"elementDefinition",
"(",
")",
".",
"typeDefinition",
"(",
")",
"arrayValuesAreScalar",
"=",
"not",
"typeDef",
".",
"isComplexType",
"(",
")",
"for",
"index",
",",
"val",
"in",
"enumerate",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"Mapping",
")",
":",
"if",
"arrayValuesAreScalar",
":",
"path",
"+=",
"\"[{}]\"",
".",
"format",
"(",
"index",
")",
"errorMsg",
"=",
"\"encountered a `Mapping` where a scalar\"",
"\" value was expected.\"",
"raise",
"Exception",
"(",
"getActivePathMessage",
"(",
"isArrayEntry",
"=",
"True",
")",
"+",
"errorMsg",
")",
"appendedElement",
"=",
"arrayElement",
".",
"appendElement",
"(",
")",
"arrayEntryPath",
"=",
"path",
"+",
"\"[{}]\"",
".",
"format",
"(",
"index",
")",
"appendedElement",
".",
"_fromPyHelper",
"(",
"val",
",",
"path",
"=",
"arrayEntryPath",
")",
"elif",
"isNonScalarSequence",
"(",
"val",
")",
":",
"path",
"+=",
"\"[{}]\"",
".",
"format",
"(",
"index",
")",
"expectedObject",
"=",
"\"scalar value\"",
"if",
"arrayValuesAreScalar",
"else",
"\"`Mapping`\"",
"errorMsg",
"=",
"\"encountered a nested `Sequence` where a {}\"",
"\" was expected.\"",
".",
"format",
"(",
"expectedObject",
")",
"raise",
"Exception",
"(",
"getActivePathMessage",
"(",
"isArrayEntry",
"=",
"True",
")",
"+",
"errorMsg",
")",
"else",
":",
"if",
"not",
"arrayValuesAreScalar",
":",
"path",
"+=",
"\"[{}]\"",
".",
"format",
"(",
"index",
")",
"errorMsg",
"=",
"\"encountered a scalar value where a\"",
"\" `Mapping` was expected.\"",
"raise",
"Exception",
"(",
"getActivePathMessage",
"(",
"isArrayEntry",
"=",
"True",
")",
"+",
"errorMsg",
")",
"try",
":",
"arrayElement",
".",
"appendValue",
"(",
"val",
")",
"except",
"Exception",
"as",
"exc",
":",
"path",
"+=",
"\"[{}]\"",
".",
"format",
"(",
"index",
")",
"errorMsg",
"=",
"\"encountered error: {}\"",
".",
"format",
"(",
"exc",
")",
"raise",
"Exception",
"(",
"getActivePathMessage",
"(",
"isArrayEntry",
"=",
"True",
")",
"+",
"errorMsg",
")",
"else",
":",
"if",
"value",
"is",
"None",
":",
"return",
"if",
"activeElement",
".",
"isComplexType",
"(",
")",
"or",
"activeElement",
".",
"isArray",
"(",
")",
":",
"errorMsg",
"=",
"\"encountered an incompatible type, {}, for a\"",
"\" non-scalar Element\"",
".",
"format",
"(",
"type",
"(",
"value",
")",
")",
"raise",
"Exception",
"(",
"getActivePathMessage",
"(",
")",
"+",
"errorMsg",
")",
"try",
":",
"activeElement",
".",
"setValue",
"(",
"value",
")",
"except",
"Exception",
"as",
"exc",
":",
"errorMsg",
"=",
"\"encountered error: {}\"",
".",
"format",
"(",
"exc",
")",
"raise",
"Exception",
"(",
"getActivePathMessage",
"(",
")",
"+",
"errorMsg",
")"
] | https://github.com/msitt/blpapi-python/blob/bebcf43668c9e5f5467b1f685f9baebbfc45bc87/src/blpapi/element.py#L1219-L1330 |
||
blackberry/Boost | fc90c3fde129c62565c023f091eddc4a7ed9902b | tools/build/v2/build/scanner.py | python | reset | () | Clear the module state. This is mainly for testing purposes. | Clear the module state. This is mainly for testing purposes. | [
"Clear",
"the",
"module",
"state",
".",
"This",
"is",
"mainly",
"for",
"testing",
"purposes",
"."
] | def reset ():
""" Clear the module state. This is mainly for testing purposes.
"""
global __scanners, __rv_cache, __scanner_cache
# Maps registered scanner classes to relevant properties
__scanners = {}
# A cache of scanners.
# The key is: class_name.properties_tag, where properties_tag is the concatenation
# of all relevant properties, separated by '-'
__scanner_cache = {} | [
"def",
"reset",
"(",
")",
":",
"global",
"__scanners",
",",
"__rv_cache",
",",
"__scanner_cache",
"# Maps registered scanner classes to relevant properties",
"__scanners",
"=",
"{",
"}",
"# A cache of scanners.",
"# The key is: class_name.properties_tag, where properties_tag is the concatenation ",
"# of all relevant properties, separated by '-'",
"__scanner_cache",
"=",
"{",
"}"
] | https://github.com/blackberry/Boost/blob/fc90c3fde129c62565c023f091eddc4a7ed9902b/tools/build/v2/build/scanner.py#L38-L49 |
||
y123456yz/reading-and-annotate-mongodb-3.6 | 93280293672ca7586dc24af18132aa61e4ed7fcf | mongo/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Tool/packaging/rpm.py | python | collectintargz | (target, source, env) | return (target, tarball) | Puts all source files into a tar.gz file. | Puts all source files into a tar.gz file. | [
"Puts",
"all",
"source",
"files",
"into",
"a",
"tar",
".",
"gz",
"file",
"."
] | def collectintargz(target, source, env):
""" Puts all source files into a tar.gz file. """
# the rpm tool depends on a source package, until this is changed
# this hack needs to be here that tries to pack all sources in.
sources = env.FindSourceFiles()
# filter out the target we are building the source list for.
sources = [s for s in sources if s not in target]
# find the .spec file for rpm and add it since it is not necessarily found
# by the FindSourceFiles function.
sources.extend( [s for s in source if str(s).rfind('.spec')!=-1] )
# as the source contains the url of the source package this rpm package
# is built from, we extract the target name
tarball = (str(target[0])+".tar.gz").replace('.rpm', '')
try:
tarball = env['SOURCE_URL'].split('/')[-1]
except KeyError, e:
raise SCons.Errors.UserError( "Missing PackageTag '%s' for RPM packager" % e.args[0] )
tarball = src_targz.package(env, source=sources, target=tarball,
PACKAGEROOT=env['PACKAGEROOT'], )
return (target, tarball) | [
"def",
"collectintargz",
"(",
"target",
",",
"source",
",",
"env",
")",
":",
"# the rpm tool depends on a source package, until this is changed",
"# this hack needs to be here that tries to pack all sources in.",
"sources",
"=",
"env",
".",
"FindSourceFiles",
"(",
")",
"# filter out the target we are building the source list for.",
"sources",
"=",
"[",
"s",
"for",
"s",
"in",
"sources",
"if",
"s",
"not",
"in",
"target",
"]",
"# find the .spec file for rpm and add it since it is not necessarily found",
"# by the FindSourceFiles function.",
"sources",
".",
"extend",
"(",
"[",
"s",
"for",
"s",
"in",
"source",
"if",
"str",
"(",
"s",
")",
".",
"rfind",
"(",
"'.spec'",
")",
"!=",
"-",
"1",
"]",
")",
"# as the source contains the url of the source package this rpm package",
"# is built from, we extract the target name",
"tarball",
"=",
"(",
"str",
"(",
"target",
"[",
"0",
"]",
")",
"+",
"\".tar.gz\"",
")",
".",
"replace",
"(",
"'.rpm'",
",",
"''",
")",
"try",
":",
"tarball",
"=",
"env",
"[",
"'SOURCE_URL'",
"]",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"except",
"KeyError",
",",
"e",
":",
"raise",
"SCons",
".",
"Errors",
".",
"UserError",
"(",
"\"Missing PackageTag '%s' for RPM packager\"",
"%",
"e",
".",
"args",
"[",
"0",
"]",
")",
"tarball",
"=",
"src_targz",
".",
"package",
"(",
"env",
",",
"source",
"=",
"sources",
",",
"target",
"=",
"tarball",
",",
"PACKAGEROOT",
"=",
"env",
"[",
"'PACKAGEROOT'",
"]",
",",
")",
"return",
"(",
"target",
",",
"tarball",
")"
] | https://github.com/y123456yz/reading-and-annotate-mongodb-3.6/blob/93280293672ca7586dc24af18132aa61e4ed7fcf/mongo/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Tool/packaging/rpm.py#L86-L110 |
|
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/contrib/keras/python/keras/backend.py | python | set_session | (session) | Sets the global TensorFlow session.
Arguments:
session: A TF Session. | Sets the global TensorFlow session. | [
"Sets",
"the",
"global",
"TensorFlow",
"session",
"."
] | def set_session(session):
"""Sets the global TensorFlow session.
Arguments:
session: A TF Session.
"""
global _SESSION
_SESSION = session | [
"def",
"set_session",
"(",
"session",
")",
":",
"global",
"_SESSION",
"_SESSION",
"=",
"session"
] | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/keras/python/keras/backend.py#L380-L387 |
||
gimli-org/gimli | 17aa2160de9b15ababd9ef99e89b1bc3277bbb23 | pygimli/physics/SIP/sipspectrum.py | python | SIPSpectrum.removeEpsilonEffect | (self, er=None, mode=0) | return er | remove effect of (constant high-frequency) epsilon from sigma
Parameters
----------
er : float
relative epsilon to correct for (else automatically determined)
mode : int
automatic epsilon determination mode (see determineEpsilon)
Returns
-------
er : float
determined permittivity (see determineEpsilon) | remove effect of (constant high-frequency) epsilon from sigma | [
"remove",
"effect",
"of",
"(",
"constant",
"high",
"-",
"frequency",
")",
"epsilon",
"from",
"sigma"
] | def removeEpsilonEffect(self, er=None, mode=0):
"""remove effect of (constant high-frequency) epsilon from sigma
Parameters
----------
er : float
relative epsilon to correct for (else automatically determined)
mode : int
automatic epsilon determination mode (see determineEpsilon)
Returns
-------
er : float
determined permittivity (see determineEpsilon)
"""
sigR, sigI = self.realimag(cond=True)
if er is None: #
er = self.determineEpsilon(mode=mode, sigmaR=sigR, sigmaI=sigI)
print("detected epsilon of ", er)
sigI -= er * self.omega() * self.epsilon0
self.phiOrg = self.phi
self.phi = np.arctan(sigI/sigR)
self.ampOrg = self.amp
self.amp = 1. / np.sqrt(sigR**2 + sigR**2)
return er | [
"def",
"removeEpsilonEffect",
"(",
"self",
",",
"er",
"=",
"None",
",",
"mode",
"=",
"0",
")",
":",
"sigR",
",",
"sigI",
"=",
"self",
".",
"realimag",
"(",
"cond",
"=",
"True",
")",
"if",
"er",
"is",
"None",
":",
"#",
"er",
"=",
"self",
".",
"determineEpsilon",
"(",
"mode",
"=",
"mode",
",",
"sigmaR",
"=",
"sigR",
",",
"sigmaI",
"=",
"sigI",
")",
"print",
"(",
"\"detected epsilon of \"",
",",
"er",
")",
"sigI",
"-=",
"er",
"*",
"self",
".",
"omega",
"(",
")",
"*",
"self",
".",
"epsilon0",
"self",
".",
"phiOrg",
"=",
"self",
".",
"phi",
"self",
".",
"phi",
"=",
"np",
".",
"arctan",
"(",
"sigI",
"/",
"sigR",
")",
"self",
".",
"ampOrg",
"=",
"self",
".",
"amp",
"self",
".",
"amp",
"=",
"1.",
"/",
"np",
".",
"sqrt",
"(",
"sigR",
"**",
"2",
"+",
"sigR",
"**",
"2",
")",
"return",
"er"
] | https://github.com/gimli-org/gimli/blob/17aa2160de9b15ababd9ef99e89b1bc3277bbb23/pygimli/physics/SIP/sipspectrum.py#L562-L587 |
|
ONLYOFFICE/core | 1f976ae79a2593fc22ee78e9fdbb76090e83785c | DesktopEditor/freetype_names/freetype-2.5.3/src/tools/docmaker/tohtml.py | python | HtmlFormatter.make_html_word | ( self, word ) | return html_quote( word ) | analyze a simple word to detect cross-references and styling | analyze a simple word to detect cross-references and styling | [
"analyze",
"a",
"simple",
"word",
"to",
"detect",
"cross",
"-",
"references",
"and",
"styling"
] | def make_html_word( self, word ):
"""analyze a simple word to detect cross-references and styling"""
# look for cross-references
m = re_crossref.match( word )
if m:
try:
name = m.group( 1 )
rest = m.group( 2 )
block = self.identifiers[name]
url = self.make_block_url( block )
return '<a href="' + url + '">' + name + '</a>' + rest
except:
# we detected a cross-reference to an unknown item
sys.stderr.write( \
"WARNING: undefined cross reference '" + name + "'.\n" )
return '?' + name + '?' + rest
# look for italics and bolds
m = re_italic.match( word )
if m:
name = m.group( 1 )
rest = m.group( 3 )
return '<i>' + name + '</i>' + rest
m = re_bold.match( word )
if m:
name = m.group( 1 )
rest = m.group( 3 )
return '<b>' + name + '</b>' + rest
return html_quote( word ) | [
"def",
"make_html_word",
"(",
"self",
",",
"word",
")",
":",
"# look for cross-references",
"m",
"=",
"re_crossref",
".",
"match",
"(",
"word",
")",
"if",
"m",
":",
"try",
":",
"name",
"=",
"m",
".",
"group",
"(",
"1",
")",
"rest",
"=",
"m",
".",
"group",
"(",
"2",
")",
"block",
"=",
"self",
".",
"identifiers",
"[",
"name",
"]",
"url",
"=",
"self",
".",
"make_block_url",
"(",
"block",
")",
"return",
"'<a href=\"'",
"+",
"url",
"+",
"'\">'",
"+",
"name",
"+",
"'</a>'",
"+",
"rest",
"except",
":",
"# we detected a cross-reference to an unknown item",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"WARNING: undefined cross reference '\"",
"+",
"name",
"+",
"\"'.\\n\"",
")",
"return",
"'?'",
"+",
"name",
"+",
"'?'",
"+",
"rest",
"# look for italics and bolds",
"m",
"=",
"re_italic",
".",
"match",
"(",
"word",
")",
"if",
"m",
":",
"name",
"=",
"m",
".",
"group",
"(",
"1",
")",
"rest",
"=",
"m",
".",
"group",
"(",
"3",
")",
"return",
"'<i>'",
"+",
"name",
"+",
"'</i>'",
"+",
"rest",
"m",
"=",
"re_bold",
".",
"match",
"(",
"word",
")",
"if",
"m",
":",
"name",
"=",
"m",
".",
"group",
"(",
"1",
")",
"rest",
"=",
"m",
".",
"group",
"(",
"3",
")",
"return",
"'<b>'",
"+",
"name",
"+",
"'</b>'",
"+",
"rest",
"return",
"html_quote",
"(",
"word",
")"
] | https://github.com/ONLYOFFICE/core/blob/1f976ae79a2593fc22ee78e9fdbb76090e83785c/DesktopEditor/freetype_names/freetype-2.5.3/src/tools/docmaker/tohtml.py#L226-L256 |
|
xbmc/xbmc | 091211a754589fc40a2a1f239b0ce9f4ee138268 | addons/metadata.tvshows.themoviedb.org.python/libs/data_utils.py | python | _clean_plot | (plot) | return plot | Replace HTML tags with Kodi skin tags | Replace HTML tags with Kodi skin tags | [
"Replace",
"HTML",
"tags",
"with",
"Kodi",
"skin",
"tags"
] | def _clean_plot(plot):
# type: (Text) -> Text
"""Replace HTML tags with Kodi skin tags"""
for repl in CLEAN_PLOT_REPLACEMENTS:
plot = plot.replace(repl[0], repl[1])
plot = TAG_RE.sub('', plot)
return plot | [
"def",
"_clean_plot",
"(",
"plot",
")",
":",
"# type: (Text) -> Text",
"for",
"repl",
"in",
"CLEAN_PLOT_REPLACEMENTS",
":",
"plot",
"=",
"plot",
".",
"replace",
"(",
"repl",
"[",
"0",
"]",
",",
"repl",
"[",
"1",
"]",
")",
"plot",
"=",
"TAG_RE",
".",
"sub",
"(",
"''",
",",
"plot",
")",
"return",
"plot"
] | https://github.com/xbmc/xbmc/blob/091211a754589fc40a2a1f239b0ce9f4ee138268/addons/metadata.tvshows.themoviedb.org.python/libs/data_utils.py#L63-L69 |
|
ppwwyyxx/speaker-recognition | 15d7bf32ad4ba2f1543e1287b03f3f2e6791d4dd | log/final/dataextractor.py | python | DataExtractor.extract_data | (self, lines) | return xs, ys, yerr | return x, y, yerr | return x, y, yerr | [
"return",
"x",
"y",
"yerr"
] | def extract_data(self, lines):
""" return x, y, yerr """
data = defaultdict(list)
cur_x = None
for lino, line in enumerate(lines):
tx = self.get_x(line)
if tx is not None:
assert cur_x is None, (lino + 1, line)
cur_x = tx
ty = self.get_y(line)
if ty is not None:
assert cur_x is not None, (lino + 1, line)
data[cur_x].append(ty)
cur_x = None
xs, ys, yerr = [], [], []
for x, y in sorted(data.iteritems()):
xs.append(x)
ys.append(np.mean(y))
yerr.append(np.std(y))
return xs, ys, yerr | [
"def",
"extract_data",
"(",
"self",
",",
"lines",
")",
":",
"data",
"=",
"defaultdict",
"(",
"list",
")",
"cur_x",
"=",
"None",
"for",
"lino",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"tx",
"=",
"self",
".",
"get_x",
"(",
"line",
")",
"if",
"tx",
"is",
"not",
"None",
":",
"assert",
"cur_x",
"is",
"None",
",",
"(",
"lino",
"+",
"1",
",",
"line",
")",
"cur_x",
"=",
"tx",
"ty",
"=",
"self",
".",
"get_y",
"(",
"line",
")",
"if",
"ty",
"is",
"not",
"None",
":",
"assert",
"cur_x",
"is",
"not",
"None",
",",
"(",
"lino",
"+",
"1",
",",
"line",
")",
"data",
"[",
"cur_x",
"]",
".",
"append",
"(",
"ty",
")",
"cur_x",
"=",
"None",
"xs",
",",
"ys",
",",
"yerr",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"for",
"x",
",",
"y",
"in",
"sorted",
"(",
"data",
".",
"iteritems",
"(",
")",
")",
":",
"xs",
".",
"append",
"(",
"x",
")",
"ys",
".",
"append",
"(",
"np",
".",
"mean",
"(",
"y",
")",
")",
"yerr",
".",
"append",
"(",
"np",
".",
"std",
"(",
"y",
")",
")",
"return",
"xs",
",",
"ys",
",",
"yerr"
] | https://github.com/ppwwyyxx/speaker-recognition/blob/15d7bf32ad4ba2f1543e1287b03f3f2e6791d4dd/log/final/dataextractor.py#L34-L56 |
|
windystrife/UnrealEngine_NVIDIAGameWorks | b50e6338a7c5b26374d66306ebc7807541ff815e | Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/optparse.py | python | OptionParser.parse_args | (self, args=None, values=None) | return self.check_values(values, args) | parse_args(args : [string] = sys.argv[1:],
values : Values = None)
-> (values : Values, args : [string])
Parse the command-line options found in 'args' (default:
sys.argv[1:]). Any errors result in a call to 'error()', which
by default prints the usage message to stderr and calls
sys.exit() with an error message. On success returns a pair
(values, args) where 'values' is an Values instance (with all
your option values) and 'args' is the list of arguments left
over after parsing options. | parse_args(args : [string] = sys.argv[1:],
values : Values = None)
-> (values : Values, args : [string]) | [
"parse_args",
"(",
"args",
":",
"[",
"string",
"]",
"=",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
"values",
":",
"Values",
"=",
"None",
")",
"-",
">",
"(",
"values",
":",
"Values",
"args",
":",
"[",
"string",
"]",
")"
] | def parse_args(self, args=None, values=None):
"""
parse_args(args : [string] = sys.argv[1:],
values : Values = None)
-> (values : Values, args : [string])
Parse the command-line options found in 'args' (default:
sys.argv[1:]). Any errors result in a call to 'error()', which
by default prints the usage message to stderr and calls
sys.exit() with an error message. On success returns a pair
(values, args) where 'values' is an Values instance (with all
your option values) and 'args' is the list of arguments left
over after parsing options.
"""
rargs = self._get_args(args)
if values is None:
values = self.get_default_values()
# Store the halves of the argument list as attributes for the
# convenience of callbacks:
# rargs
# the rest of the command-line (the "r" stands for
# "remaining" or "right-hand")
# largs
# the leftover arguments -- ie. what's left after removing
# options and their arguments (the "l" stands for "leftover"
# or "left-hand")
self.rargs = rargs
self.largs = largs = []
self.values = values
try:
stop = self._process_args(largs, rargs, values)
except (BadOptionError, OptionValueError), err:
self.error(str(err))
args = largs + rargs
return self.check_values(values, args) | [
"def",
"parse_args",
"(",
"self",
",",
"args",
"=",
"None",
",",
"values",
"=",
"None",
")",
":",
"rargs",
"=",
"self",
".",
"_get_args",
"(",
"args",
")",
"if",
"values",
"is",
"None",
":",
"values",
"=",
"self",
".",
"get_default_values",
"(",
")",
"# Store the halves of the argument list as attributes for the",
"# convenience of callbacks:",
"# rargs",
"# the rest of the command-line (the \"r\" stands for",
"# \"remaining\" or \"right-hand\")",
"# largs",
"# the leftover arguments -- ie. what's left after removing",
"# options and their arguments (the \"l\" stands for \"leftover\"",
"# or \"left-hand\")",
"self",
".",
"rargs",
"=",
"rargs",
"self",
".",
"largs",
"=",
"largs",
"=",
"[",
"]",
"self",
".",
"values",
"=",
"values",
"try",
":",
"stop",
"=",
"self",
".",
"_process_args",
"(",
"largs",
",",
"rargs",
",",
"values",
")",
"except",
"(",
"BadOptionError",
",",
"OptionValueError",
")",
",",
"err",
":",
"self",
".",
"error",
"(",
"str",
"(",
"err",
")",
")",
"args",
"=",
"largs",
"+",
"rargs",
"return",
"self",
".",
"check_values",
"(",
"values",
",",
"args",
")"
] | https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/optparse.py#L1367-L1404 |
|
zhaoweicai/cascade-rcnn | 2252f46158ea6555868ca6fa5c221ea71d9b5e6c | python/caffe/io.py | python | Transformer.set_input_scale | (self, in_, scale) | Set the scale of preprocessed inputs s.t. the blob = blob * scale.
N.B. input_scale is done AFTER mean subtraction and other preprocessing
while raw_scale is done BEFORE.
Parameters
----------
in_ : which input to assign this scale factor
scale : scale coefficient | Set the scale of preprocessed inputs s.t. the blob = blob * scale.
N.B. input_scale is done AFTER mean subtraction and other preprocessing
while raw_scale is done BEFORE. | [
"Set",
"the",
"scale",
"of",
"preprocessed",
"inputs",
"s",
".",
"t",
".",
"the",
"blob",
"=",
"blob",
"*",
"scale",
".",
"N",
".",
"B",
".",
"input_scale",
"is",
"done",
"AFTER",
"mean",
"subtraction",
"and",
"other",
"preprocessing",
"while",
"raw_scale",
"is",
"done",
"BEFORE",
"."
] | def set_input_scale(self, in_, scale):
"""
Set the scale of preprocessed inputs s.t. the blob = blob * scale.
N.B. input_scale is done AFTER mean subtraction and other preprocessing
while raw_scale is done BEFORE.
Parameters
----------
in_ : which input to assign this scale factor
scale : scale coefficient
"""
self.__check_input(in_)
self.input_scale[in_] = scale | [
"def",
"set_input_scale",
"(",
"self",
",",
"in_",
",",
"scale",
")",
":",
"self",
".",
"__check_input",
"(",
"in_",
")",
"self",
".",
"input_scale",
"[",
"in_",
"]",
"=",
"scale"
] | https://github.com/zhaoweicai/cascade-rcnn/blob/2252f46158ea6555868ca6fa5c221ea71d9b5e6c/python/caffe/io.py#L262-L274 |
||
luliyucoordinate/Leetcode | 96afcdc54807d1d184e881a075d1dbf3371e31fb | src/0344-Reverse-String/0344.py | python | Solution.reverseString | (self, s) | return s[::-1] | :type s: str
:rtype: str | :type s: str
:rtype: str | [
":",
"type",
"s",
":",
"str",
":",
"rtype",
":",
"str"
] | def reverseString(self, s):
"""
:type s: str
:rtype: str
"""
return s[::-1] | [
"def",
"reverseString",
"(",
"self",
",",
"s",
")",
":",
"return",
"s",
"[",
":",
":",
"-",
"1",
"]"
] | https://github.com/luliyucoordinate/Leetcode/blob/96afcdc54807d1d184e881a075d1dbf3371e31fb/src/0344-Reverse-String/0344.py#L2-L7 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/_controls.py | python | ToolBarToolBase.IsControl | (*args, **kwargs) | return _controls_.ToolBarToolBase_IsControl(*args, **kwargs) | IsControl(self) -> int | IsControl(self) -> int | [
"IsControl",
"(",
"self",
")",
"-",
">",
"int"
] | def IsControl(*args, **kwargs):
"""IsControl(self) -> int"""
return _controls_.ToolBarToolBase_IsControl(*args, **kwargs) | [
"def",
"IsControl",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"ToolBarToolBase_IsControl",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_controls.py#L3461-L3463 |
|
zdevito/ATen | 4aa3e1de29ed58457e530f84217e53db0998476c | aten/src/ATen/nn_parse.py | python | base_declaration | (func, thnn_function, backends, backend_types, inplace=False) | return function_info(name, arguments, None, buffers, backends, inplace, func.get('scalar_check'), backend_types) | Creates the NN function without any buffers in it's signature | Creates the NN function without any buffers in it's signature | [
"Creates",
"the",
"NN",
"function",
"without",
"any",
"buffers",
"in",
"it",
"s",
"signature"
] | def base_declaration(func, thnn_function, backends, backend_types, inplace=False):
"""Creates the NN function without any buffers in it's signature"""
name, params = re.match(NAME_PARAM_REGEX, func['name']).groups()
if inplace:
name += '_'
params = params.split(', ')
arguments = [argument_to_declaration(a, func) for a in params]
if not inplace:
arguments += output_arguments(thnn_function)
buffers = [argument_to_declaration('Tensor ' + buf)
for buf in func.get('buffers', [])]
return function_info(name, arguments, None, buffers, backends, inplace, func.get('scalar_check'), backend_types) | [
"def",
"base_declaration",
"(",
"func",
",",
"thnn_function",
",",
"backends",
",",
"backend_types",
",",
"inplace",
"=",
"False",
")",
":",
"name",
",",
"params",
"=",
"re",
".",
"match",
"(",
"NAME_PARAM_REGEX",
",",
"func",
"[",
"'name'",
"]",
")",
".",
"groups",
"(",
")",
"if",
"inplace",
":",
"name",
"+=",
"'_'",
"params",
"=",
"params",
".",
"split",
"(",
"', '",
")",
"arguments",
"=",
"[",
"argument_to_declaration",
"(",
"a",
",",
"func",
")",
"for",
"a",
"in",
"params",
"]",
"if",
"not",
"inplace",
":",
"arguments",
"+=",
"output_arguments",
"(",
"thnn_function",
")",
"buffers",
"=",
"[",
"argument_to_declaration",
"(",
"'Tensor '",
"+",
"buf",
")",
"for",
"buf",
"in",
"func",
".",
"get",
"(",
"'buffers'",
",",
"[",
"]",
")",
"]",
"return",
"function_info",
"(",
"name",
",",
"arguments",
",",
"None",
",",
"buffers",
",",
"backends",
",",
"inplace",
",",
"func",
".",
"get",
"(",
"'scalar_check'",
")",
",",
"backend_types",
")"
] | https://github.com/zdevito/ATen/blob/4aa3e1de29ed58457e530f84217e53db0998476c/aten/src/ATen/nn_parse.py#L246-L258 |
|
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/ops/op_selector.py | python | map_subgraph | (init_tensor, sources, disallowed_placeholders, visited_ops,
op_outputs, add_sources) | return extra_sources | Walk a Graph and capture the subgraph between init_tensor and sources.
Note: This function mutates visited_ops and op_outputs.
Args:
init_tensor: A Tensor or Operation where the subgraph terminates.
sources: A set of Tensors where subgraph extraction should stop.
disallowed_placeholders: An optional set of ops which may not appear in the
lifted graph. Defaults to all placeholders.
visited_ops: A set of operations which were visited in a prior pass.
op_outputs: A defaultdict containing the outputs of an op which are to be
copied into the new subgraph.
add_sources: A boolean indicating whether placeholders which are not in
sources should be allowed.
Returns:
The set of placeholders upon which init_tensor depends and are not in
sources.
Raises:
UnliftableError: if init_tensor depends on a placeholder which is not in
sources and add_sources is False. | Walk a Graph and capture the subgraph between init_tensor and sources. | [
"Walk",
"a",
"Graph",
"and",
"capture",
"the",
"subgraph",
"between",
"init_tensor",
"and",
"sources",
"."
] | def map_subgraph(init_tensor, sources, disallowed_placeholders, visited_ops,
op_outputs, add_sources):
"""Walk a Graph and capture the subgraph between init_tensor and sources.
Note: This function mutates visited_ops and op_outputs.
Args:
init_tensor: A Tensor or Operation where the subgraph terminates.
sources: A set of Tensors where subgraph extraction should stop.
disallowed_placeholders: An optional set of ops which may not appear in the
lifted graph. Defaults to all placeholders.
visited_ops: A set of operations which were visited in a prior pass.
op_outputs: A defaultdict containing the outputs of an op which are to be
copied into the new subgraph.
add_sources: A boolean indicating whether placeholders which are not in
sources should be allowed.
Returns:
The set of placeholders upon which init_tensor depends and are not in
sources.
Raises:
UnliftableError: if init_tensor depends on a placeholder which is not in
sources and add_sources is False.
"""
ops_to_visit = [_as_operation(init_tensor)]
extra_sources = object_identity.ObjectIdentitySet()
while ops_to_visit:
op = ops_to_visit.pop()
if op in visited_ops:
continue
visited_ops.add(op)
should_raise = False
if disallowed_placeholders is not None and op in disallowed_placeholders:
should_raise = True
elif op.type == "Placeholder":
if disallowed_placeholders is None and not add_sources:
should_raise = True
extra_sources.update(op.outputs)
if should_raise:
raise UnliftableError(
"Unable to lift tensor %s because it depends transitively on "
"placeholder %s via at least one path, e.g.: %s"
% (repr(init_tensor), repr(op), _path_from(op, init_tensor, sources)))
for inp in graph_inputs(op):
op_outputs[inp].add(op)
if inp not in visited_ops and inp not in (sources or extra_sources):
ops_to_visit.append(inp)
return extra_sources | [
"def",
"map_subgraph",
"(",
"init_tensor",
",",
"sources",
",",
"disallowed_placeholders",
",",
"visited_ops",
",",
"op_outputs",
",",
"add_sources",
")",
":",
"ops_to_visit",
"=",
"[",
"_as_operation",
"(",
"init_tensor",
")",
"]",
"extra_sources",
"=",
"object_identity",
".",
"ObjectIdentitySet",
"(",
")",
"while",
"ops_to_visit",
":",
"op",
"=",
"ops_to_visit",
".",
"pop",
"(",
")",
"if",
"op",
"in",
"visited_ops",
":",
"continue",
"visited_ops",
".",
"add",
"(",
"op",
")",
"should_raise",
"=",
"False",
"if",
"disallowed_placeholders",
"is",
"not",
"None",
"and",
"op",
"in",
"disallowed_placeholders",
":",
"should_raise",
"=",
"True",
"elif",
"op",
".",
"type",
"==",
"\"Placeholder\"",
":",
"if",
"disallowed_placeholders",
"is",
"None",
"and",
"not",
"add_sources",
":",
"should_raise",
"=",
"True",
"extra_sources",
".",
"update",
"(",
"op",
".",
"outputs",
")",
"if",
"should_raise",
":",
"raise",
"UnliftableError",
"(",
"\"Unable to lift tensor %s because it depends transitively on \"",
"\"placeholder %s via at least one path, e.g.: %s\"",
"%",
"(",
"repr",
"(",
"init_tensor",
")",
",",
"repr",
"(",
"op",
")",
",",
"_path_from",
"(",
"op",
",",
"init_tensor",
",",
"sources",
")",
")",
")",
"for",
"inp",
"in",
"graph_inputs",
"(",
"op",
")",
":",
"op_outputs",
"[",
"inp",
"]",
".",
"add",
"(",
"op",
")",
"if",
"inp",
"not",
"in",
"visited_ops",
"and",
"inp",
"not",
"in",
"(",
"sources",
"or",
"extra_sources",
")",
":",
"ops_to_visit",
".",
"append",
"(",
"inp",
")",
"return",
"extra_sources"
] | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/ops/op_selector.py#L365-L416 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_misc.py | python | GetPasswordFromUser | (*args, **kwargs) | return _misc_.GetPasswordFromUser(*args, **kwargs) | GetPasswordFromUser(String message, String caption=EmptyString, String default_value=EmptyString,
Window parent=None) -> String | GetPasswordFromUser(String message, String caption=EmptyString, String default_value=EmptyString,
Window parent=None) -> String | [
"GetPasswordFromUser",
"(",
"String",
"message",
"String",
"caption",
"=",
"EmptyString",
"String",
"default_value",
"=",
"EmptyString",
"Window",
"parent",
"=",
"None",
")",
"-",
">",
"String"
] | def GetPasswordFromUser(*args, **kwargs):
"""
GetPasswordFromUser(String message, String caption=EmptyString, String default_value=EmptyString,
Window parent=None) -> String
"""
return _misc_.GetPasswordFromUser(*args, **kwargs) | [
"def",
"GetPasswordFromUser",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_misc_",
".",
"GetPasswordFromUser",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_misc.py#L462-L467 |
|
microsoft/TSS.MSR | 0f2516fca2cd9929c31d5450e39301c9bde43688 | TSS.Py/src/TpmTypes.py | python | TPMS_NULL_KDF_SCHEME.GetUnionSelector | (self) | return TPM_ALG_ID.NULL | TpmUnion method | TpmUnion method | [
"TpmUnion",
"method"
] | def GetUnionSelector(self): # TPM_ALG_ID
""" TpmUnion method """
return TPM_ALG_ID.NULL | [
"def",
"GetUnionSelector",
"(",
"self",
")",
":",
"# TPM_ALG_ID",
"return",
"TPM_ALG_ID",
".",
"NULL"
] | https://github.com/microsoft/TSS.MSR/blob/0f2516fca2cd9929c31d5450e39301c9bde43688/TSS.Py/src/TpmTypes.py#L6899-L6901 |
|
mamedev/mame | 02cd26d37ee11191f3e311e19e805d872cb1e3a4 | 3rdparty/benchmark/mingw.py | python | find_in_path | (file, path=None) | return list(filter(os.path.exists,
map(lambda dir, file=file: os.path.join(dir, file), path))) | Attempts to find an executable in the path | Attempts to find an executable in the path | [
"Attempts",
"to",
"find",
"an",
"executable",
"in",
"the",
"path"
] | def find_in_path(file, path=None):
'''
Attempts to find an executable in the path
'''
if platform.system() == 'Windows':
file += '.exe'
if path is None:
path = os.environ.get('PATH', '')
if type(path) is type(''):
path = path.split(os.pathsep)
return list(filter(os.path.exists,
map(lambda dir, file=file: os.path.join(dir, file), path))) | [
"def",
"find_in_path",
"(",
"file",
",",
"path",
"=",
"None",
")",
":",
"if",
"platform",
".",
"system",
"(",
")",
"==",
"'Windows'",
":",
"file",
"+=",
"'.exe'",
"if",
"path",
"is",
"None",
":",
"path",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'PATH'",
",",
"''",
")",
"if",
"type",
"(",
"path",
")",
"is",
"type",
"(",
"''",
")",
":",
"path",
"=",
"path",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"return",
"list",
"(",
"filter",
"(",
"os",
".",
"path",
".",
"exists",
",",
"map",
"(",
"lambda",
"dir",
",",
"file",
"=",
"file",
":",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"file",
")",
",",
"path",
")",
")",
")"
] | https://github.com/mamedev/mame/blob/02cd26d37ee11191f3e311e19e805d872cb1e3a4/3rdparty/benchmark/mingw.py#L86-L97 |
|
hpi-xnor/BMXNet-v2 | af2b1859eafc5c721b1397cef02f946aaf2ce20d | tools/caffe_translator/scripts/convert_caffe_model.py | python | CaffeModelConverter.add_param | (self, param_name, layer_index, blob_index) | Add a param to the .params file | Add a param to the .params file | [
"Add",
"a",
"param",
"to",
"the",
".",
"params",
"file"
] | def add_param(self, param_name, layer_index, blob_index):
"""Add a param to the .params file"""
blobs = self.layers[layer_index].blobs
self.dict_param[param_name] = mx.nd.array(caffe.io.blobproto_to_array(blobs[blob_index])) | [
"def",
"add_param",
"(",
"self",
",",
"param_name",
",",
"layer_index",
",",
"blob_index",
")",
":",
"blobs",
"=",
"self",
".",
"layers",
"[",
"layer_index",
"]",
".",
"blobs",
"self",
".",
"dict_param",
"[",
"param_name",
"]",
"=",
"mx",
".",
"nd",
".",
"array",
"(",
"caffe",
".",
"io",
".",
"blobproto_to_array",
"(",
"blobs",
"[",
"blob_index",
"]",
")",
")"
] | https://github.com/hpi-xnor/BMXNet-v2/blob/af2b1859eafc5c721b1397cef02f946aaf2ce20d/tools/caffe_translator/scripts/convert_caffe_model.py#L33-L36 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/aui.py | python | AuiToolBar.GetOverflowVisible | (*args, **kwargs) | return _aui.AuiToolBar_GetOverflowVisible(*args, **kwargs) | GetOverflowVisible(self) -> bool | GetOverflowVisible(self) -> bool | [
"GetOverflowVisible",
"(",
"self",
")",
"-",
">",
"bool"
] | def GetOverflowVisible(*args, **kwargs):
"""GetOverflowVisible(self) -> bool"""
return _aui.AuiToolBar_GetOverflowVisible(*args, **kwargs) | [
"def",
"GetOverflowVisible",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_aui",
".",
"AuiToolBar_GetOverflowVisible",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/aui.py#L2130-L2132 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.